CombinedText stringlengths 4 3.42M |
|---|
package main
import (
"bytes"
"encoding/hex"
"encoding/xml"
"errors"
"flag"
"fmt"
"github.com/FactomProject/FactomCode/notaryapi"
"github.com/conformal/btcrpcclient"
"github.com/conformal/btcutil"
"github.com/FactomProject/dynrsrc"
"github.com/FactomProject/gobundle"
"github.com/FactomProject/gocoding"
"io/ioutil"
"net/http"
"net/url"
"path/filepath"
"strconv"
"strings"
"sync"
"os"
"time"
"log"
"encoding/binary"
"encoding/csv"
"github.com/FactomProject/FactomCode/database"
"github.com/FactomProject/FactomCode/database/ldb"
"code.google.com/p/gcfg"
"reflect"
"github.com/FactomProject/FactomCode/factomapi"
)
var (
wclient *btcrpcclient.Client //rpc client for btcwallet rpc server
dclient *btcrpcclient.Client //rpc client for btcd rpc server
currentAddr btcutil.Address
tickers [2]*time.Ticker
db database.Db // database
chainIDMap map[string]*notaryapi.EChain // ChainIDMap with chainID string([32]byte) as key
//chainNameMap map[string]*notaryapi.Chain // ChainNameMap with chain name string as key
dchain *notaryapi.DChain //Directory Block Chain
cchain *notaryapi.CChain //Entry Credit Chain
creditsPerChain int32 = -5
creditsPerEntry int32 = -1
creditsPerFactoid uint64 = 1000
eCreditMap map[string]int32 // eCreditMap with public key string([32]byte) as key, credit balance as value
prePaidEntryMap map[string]int32 // Paid but unrevealed entries string(Etnry Hash) as key, Number of payments as value
// dbBatches []*notaryapi.FBBatch
dbBatches *DBBatches
dbBatch *notaryapi.DBBatch
//Map to store export csv files
serverDataFileMap map[string]string
)
var (
logLevel = "DEBUG"
portNumber int = 8083
sendToBTCinSeconds = 600
directoryBlockInSeconds = 60
applicationName = "factom/restapi"
dataStorePath = "/tmp/store/seed/"
ldbpath = "/tmp/ldb9"
//BTC:
// addrStr = "movaFTARmsaTMk3j71MpX8HtMURpsKhdra"
walletPassphrase = "lindasilva"
certHomePath = "btcwallet"
rpcClientHost = "localhost:18332" //btcwallet rpcserver address
rpcClientEndpoint = "ws"
rpcClientUser = "testuser"
rpcClientPass = "notarychain"
btcTransFee float64 = 0.0001
certHomePathBtcd = "btcd"
rpcBtcdHost = "localhost:18334" //btcd rpcserver address
)
type DBBatches struct {
batches []*notaryapi.DBBatch
batchMutex sync.Mutex
}
func loadConfigurations(){
cfg := struct {
App struct{
PortNumber int
ApplicationName string
LdbPath string
DataStorePath string
DirectoryBlockInSeconds int
}
Btc struct{
BTCPubAddr string
SendToBTCinSeconds int
WalletPassphrase string
CertHomePath string
RpcClientHost string
RpcClientEndpoint string
RpcClientUser string
RpcClientPass string
BtcTransFee float64
}
Log struct{
LogLevel string
}
}{}
wd, err := os.Getwd()
if err != nil{
log.Println(err)
}
err = gcfg.ReadFileInto(&cfg, wd+"/restapi.conf")
if err != nil{
log.Println(err)
log.Println("Server starting with default settings...")
} else {
//setting the variables by the valued form the config file
logLevel = cfg.Log.LogLevel
applicationName = cfg.App.ApplicationName
portNumber = cfg.App.PortNumber
dataStorePath = cfg.App.DataStorePath
ldbpath = cfg.App.LdbPath
directoryBlockInSeconds = cfg.App.DirectoryBlockInSeconds
// addrStr = cfg.Btc.BTCPubAddr
sendToBTCinSeconds = cfg.Btc.SendToBTCinSeconds
walletPassphrase = cfg.Btc.WalletPassphrase
certHomePath = cfg.Btc.CertHomePath
rpcClientHost = cfg.Btc.RpcClientHost
rpcClientEndpoint = cfg.Btc.RpcClientEndpoint
rpcClientUser = cfg.Btc.RpcClientUser
rpcClientPass = cfg.Btc.RpcClientPass
btcTransFee = cfg.Btc.BtcTransFee
}
}
func watchError(err error) {
panic(err)
}
func readError(err error) {
fmt.Println("error: ", err)
}
func initWithBinary(chain *notaryapi.EChain) {
matches, err := filepath.Glob(dataStorePath + chain.ChainID.String() + "/store.*.block") // need to get it from a property file??
if err != nil {
panic(err)
}
chain.Blocks = make([]*notaryapi.EBlock, len(matches))
num := 0
for _, match := range matches {
data, err := ioutil.ReadFile(match)
if err != nil {
panic(err)
}
block := new(notaryapi.EBlock)
err = block.UnmarshalBinary(data)
if err != nil {
panic(err)
}
block.Chain = chain
block.IsSealed = true
chain.Blocks[num] = block
num++
}
//Create an empty block and append to the chain
if len(chain.Blocks) == 0{
chain.NextBlockID = 0
newblock, _ := notaryapi.CreateBlock(chain, nil, 10)
chain.Blocks = append(chain.Blocks, newblock)
} else{
chain.NextBlockID = uint64(len(chain.Blocks))
newblock,_ := notaryapi.CreateBlock(chain, chain.Blocks[len(chain.Blocks)-1], 10)
chain.Blocks = append(chain.Blocks, newblock)
}
//Get the unprocessed entries in db for the past # of mins for the open block
binaryTimestamp := make([]byte, 8)
binary.BigEndian.PutUint64(binaryTimestamp, uint64(0))
if chain.Blocks[chain.NextBlockID].IsSealed == true {
panic ("chain.Blocks[chain.NextBlockID].IsSealed for chain:" + chain.ChainID.String())
}
chain.Blocks[chain.NextBlockID].EBEntries, _ = db.FetchEBEntriesFromQueue(&chain.ChainID.Bytes, &binaryTimestamp)
}
func initDB() {
//init db
var err error
db, err = ldb.OpenLevelDB(ldbpath, false)
if err != nil{
log.Println("err opening db: %v", err)
}
if db == nil{
log.Println("Creating new db ...")
db, err = ldb.OpenLevelDB(ldbpath, true)
if err!=nil{
panic(err)
}
}
log.Println("Database started from: " + ldbpath)
}
func init() {
loadConfigurations()
gobundle.Setup.Application.Name = applicationName
gobundle.Init()
initDB()
initChains()
dynrsrc.Start(watchError, readError)
notaryapi.StartDynamic(gobundle.DataFile("html.gwp"), readError)
for _, chain := range chainIDMap {
initWithBinary(chain)
fmt.Println("Loaded", len(chain.Blocks)-1, "blocks for chain: " + chain.ChainID.String())
for i := 0; i < len(chain.Blocks); i = i + 1 {
if uint64(i) != chain.Blocks[i].Header.BlockID {
panic(errors.New("BlockID does not equal index"))
}
}
}
// init Directory Block Chain
initDChain()
fmt.Println("Loaded", len(dchain.Blocks)-1, "Directory blocks for chain: "+ notaryapi.EncodeBinary(dchain.ChainID))
// init Entry Credit Chain
initCChain()
fmt.Println("Loaded", len(cchain.Blocks)-1, "Entry Credit blocks for chain: "+ cchain.ChainID.String())
// init dbBatches, dbBatch
dbBatches = &DBBatches {
batches: make([]*notaryapi.DBBatch, 0, 100),
}
dbBatch := ¬aryapi.DBBatch {
DBlocks: make([]*notaryapi.DBlock, 0, 10),
}
dbBatches.batches = append(dbBatches.batches, dbBatch)
// init the export file list for client distribution
initServerDataFileMap()
// create EBlocks and FBlock every 60 seconds
tickers[0] = time.NewTicker(time.Second * time.Duration(directoryBlockInSeconds))
// write 10 FBlock in a batch to BTC every 10 minutes
tickers[1] = time.NewTicker(time.Second * time.Duration(sendToBTCinSeconds))
go func() {
for _ = range tickers[0].C {
fmt.Println("in tickers[0]: newEntryBlock & newFactomBlock")
// Entry Chains
for _, chain := range chainIDMap {
eblock := newEntryBlock(chain)
if eblock != nil{
dchain.AddDBEntry(eblock)
}
save(chain)
}
// Entry Credit Chain
cBlock := newEntryCreditBlock(cchain)
if cBlock != nil{
dchain.AddCBlockToDBEntry(cBlock)
}
saveCChain(cchain)
// Directory Block chain
dbBlock := newDirectoryBlock(dchain)
if dbBlock != nil {
// mark the start block of a DBBatch
fmt.Println("in tickers[0]: len(dbBatch.DBlocks)=", len(dbBatch.DBlocks))
if len(dbBatch.DBlocks) == 0 {
dbBlock.Header.BatchFlag = byte(1)
}
dbBatch.DBlocks = append(dbBatch.DBlocks, dbBlock)
fmt.Println("in tickers[0]: ADDED FBBLOCK: len(dbBatch.DBlocks)=", len(dbBatch.DBlocks))
}
saveDChain(dchain)
}
}()
go func() {
for _ = range tickers[1].C {
fmt.Println("in tickers[1]: new FBBatch. len(dbBatch.DBlocks)=", len(dbBatch.DBlocks))
// skip empty dbBatch.
if len(dbBatch.DBlocks) > 0 {
doneBatch := dbBatch
dbBatch = ¬aryapi.DBBatch {
DBlocks: make([]*notaryapi.DBlock, 0, 10),
}
dbBatches.batchMutex.Lock()
dbBatches.batches = append(dbBatches.batches, doneBatch)
dbBatches.batchMutex.Unlock()
fmt.Printf("in tickers[1]: doneBatch=%#v\n", doneBatch)
// go routine here?
saveDBBatchMerkleRoottoBTC(doneBatch)
}
}
}()
}
func main() {
//addrStr := "muhXX7mXoMZUBvGLCgfjuoY2n2mziYETYC"
//addrStr := "movaFTARmsaTMk3j71MpX8HtMURpsKhdra"
// err := initRPCClient()
// if err != nil {
// log.Fatalf("cannot init rpc client: %s", err)
// }
// defer shutdown()
//
// if err := initWallet(); err != nil {
// log.Fatalf("cannot init wallet: %s", err)
// }
//doEntries()
flag.Parse()
defer func() {
tickers[0].Stop()
tickers[1].Stop()
dynrsrc.Stop()
db.Close()
}()
/*
err :=http.ListenAndServe(":8081", http.FileServer(http.Dir("/tmp/store/seed/csv")))
if err != nil {
panic(err)
}
*/
http.HandleFunc("/", serveRESTfulHTTP)
err1 := http.ListenAndServe(":"+strconv.Itoa(portNumber), nil)
if err1 != nil {
panic(err1)
}
}
func fileNotExists(name string) (bool) {
_, err := os.Stat(name)
if os.IsNotExist(err) {
return true
}
return err != nil
}
func save(chain *notaryapi.EChain) {
if len(chain.Blocks)==0{
log.Println("no blocks to save for chain: " + chain.ChainID.String())
return
}
bcp := make([]*notaryapi.EBlock, len(chain.Blocks))
chain.BlockMutex.Lock()
copy(bcp, chain.Blocks)
chain.BlockMutex.Unlock()
for i, block := range bcp {
//the open block is not saved
if block.IsSealed == false {
continue
}
data, err := block.MarshalBinary()
if err != nil {
panic(err)
}
strChainID := chain.ChainID.String()
if fileNotExists (dataStorePath + strChainID){
err:= os.MkdirAll(dataStorePath + strChainID, 0777)
if err==nil{
log.Println("Created directory " + dataStorePath + strChainID)
} else{
log.Println(err)
}
}
err = ioutil.WriteFile(fmt.Sprintf(dataStorePath + strChainID + "/store.%09d.block", i), data, 0777)
if err != nil {
panic(err)
}
}
}
func serveRESTfulHTTP(w http.ResponseWriter, r *http.Request) {
var resource interface{}
var err *notaryapi.Error
var buf bytes.Buffer
path, method, accept, form, err := parse(r)
switch method {
case "GET":
//resource, err = getServerDataFileMap()
case"POSTFORM":
fmt.Println("POSTFORM")
case "POST":
fmt.Println("Got to POST")
if len(path) != 1 {
err = notaryapi.CreateError(notaryapi.ErrorBadMethod, `POST can only be used in the root context: /v1`)
return
}
datatype := form.Get("datatype")
fmt.Println("set datatype:", datatype)
switch datatype {
case "commitentry":
fmt.Println("Got to commitentry")
var (
hash, pub *notaryapi.Hash
timestamp uint64
)
sig, err := hex.DecodeString(form.Get("signature"))
if err != nil {
fmt.Println("Commit: signature: ", err)
}
pub.Bytes = sig[:32]
data, err := hex.DecodeString(form.Get("data"))
if err != nil {
fmt.Println("Commit: data: ", err)
}
timestamp = binary.BigEndian.Uint64(data[0:8])
hash.Bytes = data[8:]
_, err = processCommitEntry(hash, pub, int64(timestamp))
fmt.Println("got Commit")
fmt.Println("err=", err)
case "revealentry":
fmt.Println("got to revealentry")
var entry *notaryapi.Entry
data, err := hex.DecodeString(form.Get("data"))
if err != nil {
fmt.Println("Reveal: data: ", err)
}
entry.UnmarshalBinary(data)
_, err = processRevealEntry(entry)
fmt.Println("got Reveal")
fmt.Println("err=", err)
case "chain":
resource, err = postChain("/"+strings.Join(path, "/"), form)
case "buycredit":
pubKey, err := notaryapi.HexToHash(form.Get("ECPubKey"))
if err!=nil{
fmt.Println("Error in parsing pubKey:", err.Error())
}
value, err := strconv.ParseUint(form.Get("factoidbase"), 10, 64)
if err!=nil{
fmt.Println("Error in parsing value:", err.Error())
}
credits := value * creditsPerFactoid / 1000000000
resource, err = processBuyEntryCredit(pubKey, int32(credits), pubKey)
printCreditMap()
case "getbalance":
pubKey, err := notaryapi.HexToHash(form.Get("ECPubKey"))
if err!=nil{
fmt.Println("Error in parsing pubKey:", err.Error())
}
resource, err = getEntryCreditBalance(pubKey)
case "filelist":
resource, err = getServerDataFileMapJSON()
case "file":
fileKey := form.Get("filekey")
filename := serverDataFileMap[fileKey]
http.ServeFile(w, r, dataStorePath + "csv/"+filename)
return
default:
resource, err = postEntry("/"+strings.Join(path, "/"), form)
}
default:
err = notaryapi.CreateError(notaryapi.ErrorBadMethod, fmt.Sprintf(`The HTTP %s method is not supported`, method))
return
}
if err != nil {
resource = err
}
alt := false
for _, s := range form["byref"] {
b, err := strconv.ParseBool(s)
if err == nil {
alt = b
break
}
}
err = notaryapi.Marshal(resource, accept, &buf, alt)
switch accept {
case "text":
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
case "json":
w.Header().Set("Content-Type", "application/json; charset=utf-8")
case "xml":
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
case "html":
w.Header().Set("Content-Type", "text/html; charset=utf-8")
}
if err != nil {
var r *notaryapi.Error
buf.Reset()
r = notaryapi.Marshal(err, accept, &buf, false)
if r != nil {
err = r
}
w.WriteHeader(err.HTTPCode)
}
//buf.WriteTo(w)
if resource != nil {
//Send back entry hash
w.Write(resource.([]byte))
}else{
w.Write([]byte("\n\n"))
}
}
func postEntry(context string, form url.Values) (interface{}, *notaryapi.Error) {
newEntry := new(notaryapi.Entry)
format, data := form.Get("format"), form.Get("entry")
switch format {
case "", "json":
reader := gocoding.ReadString(data)
err := notaryapi.UnmarshalJSON(reader, newEntry)
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorJSONUnmarshal, err.Error())
}
case "xml":
err := xml.Unmarshal([]byte(data), newEntry)
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorXMLUnmarshal, err.Error())
}
case "binary":
binaryEntry,_ := notaryapi.DecodeBinary(&data)
fmt.Println("data:%v", data)
err := newEntry.UnmarshalBinary(binaryEntry)
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorXMLUnmarshal, err.Error())
}
default:
return nil, notaryapi.CreateError(notaryapi.ErrorUnsupportedUnmarshal, fmt.Sprintf(`The format "%s" is not supported`, format))
}
return processRevealEntry(newEntry)
}
func processRevealEntry(newEntry *notaryapi.Entry) ([]byte, *notaryapi.Error) {
if newEntry == nil {
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `Entity to be POSTed is nil`)
}
fmt.Println("chainID:", newEntry.ChainID.String())
chain := chainIDMap[newEntry.ChainID.String()]
if chain == nil{
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `This chain is not supported`) //ErrorInternal?
}
// store the new entry in db
entryBinary, _ := newEntry.MarshalBinary()
entryHash := notaryapi.Sha(entryBinary)
db.InsertEntryAndQueue( entryHash, &entryBinary, newEntry, &chain.ChainID.Bytes)
// Precalculate the key for prePaidEntryMap
key := entryHash.String()
chain.BlockMutex.Lock()
// Delete the entry in the prePaidEntryMap in memory
payments, ok := prePaidEntryMap[key]
if ok {
if payments > 1 {
prePaidEntryMap[key] = payments - 1
} else {
delete (prePaidEntryMap, key)
}
} else{
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `Credit needs to paid first before reveal an entry:` + entryHash.String())
}
err := chain.Blocks[len(chain.Blocks)-1].AddEBEntry(newEntry)
chain.BlockMutex.Unlock()
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, fmt.Sprintf(`Error while adding Entity to Block: %s`, err.Error()))
}
return entryHash.Bytes, nil
}
func processCommitEntry(entryHash *notaryapi.Hash, pubKey *notaryapi.Hash, timeStamp int64) ([]byte, error) {
// Create PayEntryCBEntry
cbEntry := notaryapi.NewPayEntryCBEntry(pubKey, entryHash, creditsPerEntry, timeStamp)
cchain.BlockMutex.Lock()
// Update the credit balance in memory
credits, _ := eCreditMap[pubKey.String()]
if credits + creditsPerEntry < 0 {
return nil, errors.New("Not enough credit for public key:" + pubKey.String() + " Balance:" + fmt.Sprint(credits))
}
eCreditMap[pubKey.String()] = credits + creditsPerEntry
err := cchain.Blocks[len(cchain.Blocks)-1].AddCBEntry(cbEntry)
// Update the prePaidEntryMapin memory
payments, _ := prePaidEntryMap[entryHash.String()]
prePaidEntryMap[entryHash.String()] = payments + 1
cchain.BlockMutex.Unlock()
return entryHash.Bytes, err
}
func processCommitChain(entryHash *notaryapi.Hash, chainIDHash *notaryapi.Hash, entryChainIDHash *notaryapi.Hash, pubKey *notaryapi.Hash) ([]byte, error) {
// Precalculate the key and value pair for prePaidEntryMap
key := getPrePaidChainKey(entryHash, chainIDHash)
// Create PayChainCBEntry
cbEntry := notaryapi.NewPayChainCBEntry(pubKey, entryHash, creditsPerChain, chainIDHash, entryChainIDHash)
cchain.BlockMutex.Lock()
// Update the credit balance in memory
credits, _ := eCreditMap[pubKey.String()]
if credits + creditsPerEntry < 0 {
return nil, errors.New("Insufficient credits for public key:" + pubKey.String() + " Balance:" + fmt.Sprint(credits))
}
eCreditMap[pubKey.String()] = credits + creditsPerChain
err := cchain.Blocks[len(cchain.Blocks)-1].AddCBEntry(cbEntry)
// Update the prePaidEntryMapin memory
payments, _ := prePaidEntryMap[key]
prePaidEntryMap[key] = payments + 1
cchain.BlockMutex.Unlock()
return chainIDHash.Bytes, err
}
func processBuyEntryCredit(pubKey *notaryapi.Hash, credits int32, factoidTxHash *notaryapi.Hash) ([]byte, error) {
cbEntry := notaryapi.NewBuyCBEntry(pubKey, factoidTxHash, credits)
cchain.BlockMutex.Lock()
err := cchain.Blocks[len(cchain.Blocks)-1].AddCBEntry(cbEntry)
// Update the credit balance in memory
balance, _ := eCreditMap[pubKey.String()]
eCreditMap[pubKey.String()] = balance + credits
cchain.BlockMutex.Unlock()
return pubKey.Bytes, err
}
func processRevealChain(newChain *notaryapi.EChain) ([]byte, *notaryapi.Error) {
chain := chainIDMap[newChain.ChainID.String()]
if chain != nil{
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `This chain is already existing`) //ErrorInternal?
}
if newChain.FirstEntry == nil{
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `The first entry is required to create a new chain.`) //ErrorInternal?
}
// Remove the entry for prePaidEntryMap
binaryEntry, _ := newChain.FirstEntry.MarshalBinary()
firstEntryHash := notaryapi.Sha(binaryEntry)
key := getPrePaidChainKey(firstEntryHash, newChain.ChainID)
_, ok := prePaidEntryMap[key]
if ok {
delete(prePaidEntryMap, key)
} else{
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `Credit needs to paid first before creating a new chain:` + newChain.ChainID.String())
}
// Store the new chain in db
db.InsertChain(newChain)
// Chain initialization
initWithBinary(newChain)
fmt.Println("Loaded", len(newChain.Blocks)-1, "blocks for chain: " + newChain.ChainID.String())
// Add the new chain in the chainIDMap
chainIDMap[newChain.ChainID.String()] = newChain
// store the new entry in db
entryBinary, _ := newChain.FirstEntry.MarshalBinary()
entryHash := notaryapi.Sha(entryBinary)
db.InsertEntryAndQueue( entryHash, &entryBinary, newChain.FirstEntry, &newChain.ChainID.Bytes)
newChain.BlockMutex.Lock()
err := newChain.Blocks[len(newChain.Blocks)-1].AddEBEntry(newChain.FirstEntry)
newChain.BlockMutex.Unlock()
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, fmt.Sprintf(`Error while adding the First Entry to Block: %s`, err.Error()))
}
ExportDataFromDbToFile()
return newChain.ChainID.Bytes, nil
}
func getEntryCreditBalance(pubKey *notaryapi.Hash) ([]byte, error) {
var buf bytes.Buffer
binary.Write(&buf, binary.BigEndian, eCreditMap[pubKey.String()])
return buf.Bytes(), nil
}
func postChain(context string, form url.Values) (interface{}, *notaryapi.Error) {
newChain := new(notaryapi.EChain)
format, data := form.Get("format"), form.Get("chain")
switch format {
case "binary":
binaryChain,_ := notaryapi.DecodeBinary(&data)
err := newChain.UnmarshalBinary(binaryChain)
newChain.GenerateIDFromName()
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, err.Error())
}
default:
return nil, notaryapi.CreateError(notaryapi.ErrorUnsupportedUnmarshal, fmt.Sprintf(`The format "%s" is not supported`, format))
}
if newChain == nil {
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `Chain is nil`)
}
return processRevealChain(newChain)
}
func saveDChain(chain *notaryapi.DChain) {
if len(chain.Blocks)==0{
//log.Println("no blocks to save for chain: " + string (*chain.ChainID))
return
}
bcp := make([]*notaryapi.DBlock, len(chain.Blocks))
chain.BlockMutex.Lock()
copy(bcp, chain.Blocks)
chain.BlockMutex.Unlock()
for i, block := range bcp {
//the open block is not saved
if block.IsSealed == false {
continue
}
data, err := block.MarshalBinary()
if err != nil {
panic(err)
}
strChainID := notaryapi.EncodeBinary(chain.ChainID)
if fileNotExists (dataStorePath + strChainID){
err:= os.MkdirAll(dataStorePath + strChainID, 0777)
if err==nil{
log.Println("Created directory " + dataStorePath + strChainID)
} else{
log.Println(err)
}
}
err = ioutil.WriteFile(fmt.Sprintf(dataStorePath + strChainID + "/store.%09d.block", i), data, 0777)
if err != nil {
panic(err)
}
}
}
func saveCChain(chain *notaryapi.CChain) {
if len(chain.Blocks)==0{
//log.Println("no blocks to save for chain: " + string (*chain.ChainID))
return
}
bcp := make([]*notaryapi.CBlock, len(chain.Blocks))
chain.BlockMutex.Lock()
copy(bcp, chain.Blocks)
chain.BlockMutex.Unlock()
for i, block := range bcp {
//the open block is not saved
if block.IsSealed == false {
continue
}
data, err := block.MarshalBinary()
if err != nil {
panic(err)
}
strChainID := chain.ChainID.String()
if fileNotExists (dataStorePath + strChainID){
err:= os.MkdirAll(dataStorePath + strChainID, 0777)
if err==nil{
log.Println("Created directory " + dataStorePath + strChainID)
} else{
log.Println(err)
}
}
err = ioutil.WriteFile(fmt.Sprintf(dataStorePath + strChainID + "/store.%09d.block", i), data, 0777)
if err != nil {
panic(err)
}
}
}
func initDChain() {
dchain = new (notaryapi.DChain)
barray := (make([]byte, 32))
dchain.ChainID = &barray
matches, err := filepath.Glob(dataStorePath + notaryapi.EncodeBinary(dchain.ChainID) + "/store.*.block") // need to get it from a property file??
if err != nil {
panic(err)
}
dchain.Blocks = make([]*notaryapi.DBlock, len(matches))
num := 0
for _, match := range matches {
data, err := ioutil.ReadFile(match)
if err != nil {
panic(err)
}
block := new(notaryapi.DBlock)
err = block.UnmarshalBinary(data)
if err != nil {
panic(err)
}
block.Chain = dchain
block.IsSealed = true
dchain.Blocks[num] = block
num++
}
//Create an empty block and append to the chain
if len(dchain.Blocks) == 0{
dchain.NextBlockID = 0
newblock, _ := notaryapi.CreateDBlock(dchain, nil, 10)
dchain.Blocks = append(dchain.Blocks, newblock)
} else{
dchain.NextBlockID = uint64(len(dchain.Blocks))
newblock,_ := notaryapi.CreateDBlock(dchain, dchain.Blocks[len(dchain.Blocks)-1], 10)
dchain.Blocks = append(dchain.Blocks, newblock)
}
//Get the unprocessed entries in db for the past # of mins for the open block
binaryTimestamp := make([]byte, 8)
binary.BigEndian.PutUint64(binaryTimestamp, uint64(0))
if dchain.Blocks[dchain.NextBlockID].IsSealed == true {
panic ("dchain.Blocks[dchain.NextBlockID].IsSealed for chain:" + notaryapi.EncodeBinary(dchain.ChainID))
}
dchain.Blocks[dchain.NextBlockID].DBEntries, _ = db.FetchDBEntriesFromQueue(&binaryTimestamp)
}
func initCChain() {
eCreditMap = make(map[string]int32)
prePaidEntryMap = make(map[string]int32)
cchain = new (notaryapi.CChain)
//to be improved??
barray := (make([]byte, 32))
barray[0] = 1
cchain.ChainID = new (notaryapi.Hash)
cchain.ChainID.SetBytes(barray)
matches, err := filepath.Glob(dataStorePath + cchain.ChainID.String() + "/store.*.block") // need to get it from a property file??
if err != nil {
panic(err)
}
cchain.Blocks = make([]*notaryapi.CBlock, len(matches))
num := 0
for _, match := range matches {
data, err := ioutil.ReadFile(match)
if err != nil {
panic(err)
}
block := new(notaryapi.CBlock)
err = block.UnmarshalBinary(data)
if err != nil {
panic(err)
}
block.Chain = cchain
block.IsSealed = true
// Calculate the EC balance for each account
initializeECreditMap(block)
cchain.Blocks[num] = block
num++
}
//Create an empty block and append to the chain
if len(cchain.Blocks) == 0{
cchain.NextBlockID = 0
newblock, _ := notaryapi.CreateCBlock(cchain, nil, 10)
cchain.Blocks = append(cchain.Blocks, newblock)
} else{
cchain.NextBlockID = uint64(len(cchain.Blocks))
newblock,_ := notaryapi.CreateCBlock(cchain, cchain.Blocks[len(cchain.Blocks)-1], 10)
cchain.Blocks = append(cchain.Blocks, newblock)
}
//Get the unprocessed entries in db for the past # of mins for the open block
/* binaryTimestamp := make([]byte, 8)
binary.BigEndian.PutUint64(binaryTimestamp, uint64(0))
if cchain.Blocks[cchain.NextBlockID].IsSealed == true {
panic ("dchain.Blocks[dchain.NextBlockID].IsSealed for chain:" + notaryapi.EncodeBinary(dchain.ChainID))
}
dchain.Blocks[dchain.NextBlockID].DBEntries, _ = db.FetchDBEntriesFromQueue(&binaryTimestamp)
*/
}
func ExportDbToFile(dbHash *notaryapi.Hash) {
if fileNotExists( dataStorePath+"csv/") {
os.MkdirAll(dataStorePath+"csv/" , 0755)
}
//write the records to a csv file:
filename := time.Now().Format(time.RFC3339) + "." + dbHash.String() + ".csv"
file, err := os.Create(dataStorePath+"csv/" + filename)
if err != nil {panic(err)}
defer file.Close()
writer := csv.NewWriter(file)
ldbMap, err := db.FetchAllDBRecordsByDBHash(dbHash, cchain.ChainID)
if err != nil{
log.Println(err)
return
}
for key, value := range ldbMap{
//csv header: key, value
writer.Write([]string {key, value})
}
writer.Flush()
// Add the file to the distribution list
hash := notaryapi.Sha([]byte(filename))
serverDataFileMap[hash.String()] = filename
}
func ExportDataFromDbToFile() {
if fileNotExists( dataStorePath+"csv/") {
os.MkdirAll(dataStorePath+"csv/" , 0755)
}
//write the records to a csv file:
filename := time.Now().Format(time.RFC3339) + ".supportdata.csv"
file, err := os.Create(dataStorePath+"csv/" + filename)
if err != nil {panic(err)}
defer file.Close()
writer := csv.NewWriter(file)
ldbMap, err := db.FetchSupportDBRecords()
if err != nil{
log.Println(err)
return
}
for key, value := range ldbMap{
//csv header: key, value
writer.Write([]string {key, value})
}
writer.Flush()
// Add the file to the distribution list
hash := notaryapi.Sha([]byte(filename))
serverDataFileMap[hash.String()] = filename
}
func initChains() {
//initChainIDs()
chainIDMap = make(map[string]*notaryapi.EChain)
//chainNameMap = make(map[string]*notaryapi.Chain)
chains, err := db.FetchAllChainsByName(nil)
if err != nil{
panic (err)
}
for _, chain := range *chains {
var newChain = chain
chainIDMap[newChain.ChainID.String()] = &newChain
//chainIDMap[string(chain.ChainID.Bytes)] = &chain
}
}
func initializeECreditMap(block *notaryapi.CBlock) {
for _, cbEntry := range block.CBEntries{
credits, _ := eCreditMap[cbEntry.PublicKey().String()]
eCreditMap[cbEntry.PublicKey().String()] = credits + cbEntry.Credits()
}
}
func getPrePaidChainKey(entryHash *notaryapi.Hash, chainIDHash *notaryapi.Hash) string {
return chainIDHash.String() + entryHash.String()
}
func printCreditMap(){
fmt.Println("eCreditMap:")
for key := range eCreditMap {
fmt.Println("Key:", key, "Value", eCreditMap[key])
}
}
func printPaidEntryMap(){
fmt.Println("prePaidEntryMap:")
for key := range prePaidEntryMap {
fmt.Println("Key:", key, "Value", prePaidEntryMap[key])
}
}
func printCChain(){
fmt.Println("cchain:", cchain.ChainID.String())
for i, block := range cchain.Blocks {
if !block.IsSealed{
continue
}
var buf bytes.Buffer
err := factomapi.SafeMarshal(&buf, block.Header)
fmt.Println("block.Header", string(i), ":", string(buf.Bytes()))
for _, cbentry := range block.CBEntries {
t := reflect.TypeOf(cbentry)
fmt.Println("cbEntry Type:", t.Name(), t.String())
if strings.Contains(t.String(), "PayChainCBEntry"){
fmt.Println("PayChainCBEntry - pubkey:", cbentry.PublicKey().String(), " Credits:", cbentry.Credits())
var buf bytes.Buffer
err := factomapi.SafeMarshal(&buf, cbentry)
if err!=nil{
fmt.Println("Error:%v", err)
}
fmt.Println("PayChainCBEntry JSON", ":", string(buf.Bytes()))
} else if strings.Contains(t.String(), "PayEntryCBEntry"){
fmt.Println("PayEntryCBEntry - pubkey:", cbentry.PublicKey().String(), " Credits:", cbentry.Credits())
var buf bytes.Buffer
err := factomapi.SafeMarshal(&buf, cbentry)
if err!=nil{
fmt.Println("Error:%v", err)
}
fmt.Println("PayEntryCBEntry JSON", ":", string(buf.Bytes()))
} else if strings.Contains(t.String(), "BuyCBEntry"){
fmt.Println("BuyCBEntry - pubkey:", cbentry.PublicKey().String(), " Credits:", cbentry.Credits())
var buf bytes.Buffer
err := factomapi.SafeMarshal(&buf, cbentry)
if err!=nil{
fmt.Println("Error:%v", err)
}
fmt.Println("BuyCBEntry JSON", ":", string(buf.Bytes()))
}
}
if err != nil {
fmt.Println("Error:%v", err)
}
}
}
// Initialize the export file list
func initServerDataFileMap() error {
serverDataFileMap = make(map[string]string)
fiList, err := ioutil.ReadDir(dataStorePath +"csv")
if err != nil {
fmt.Println("Error in initServerDataFileMap:", err.Error())
return err
}
for _, file := range fiList{
if !file.IsDir() && strings.HasSuffix(file.Name(), ".csv") {
hash := notaryapi.Sha([]byte(file.Name()))
serverDataFileMap[hash.String()] = file.Name()
}
}
return nil
}
func getServerDataFileMapJSON() (interface{}, *notaryapi.Error) {
buf := new(bytes.Buffer)
err := factomapi.SafeMarshal(buf, serverDataFileMap)
var e *notaryapi.Error
if err!=nil{
e = notaryapi.CreateError(notaryapi.ErrorBadPOSTData, err.Error())
}
return buf.Bytes(), e
}
main.go:445 removed POSTFORM added println form
package main
import (
"bytes"
"encoding/hex"
"encoding/xml"
"errors"
"flag"
"fmt"
"github.com/FactomProject/FactomCode/notaryapi"
"github.com/conformal/btcrpcclient"
"github.com/conformal/btcutil"
"github.com/FactomProject/dynrsrc"
"github.com/FactomProject/gobundle"
"github.com/FactomProject/gocoding"
"io/ioutil"
"net/http"
"net/url"
"path/filepath"
"strconv"
"strings"
"sync"
"os"
"time"
"log"
"encoding/binary"
"encoding/csv"
"github.com/FactomProject/FactomCode/database"
"github.com/FactomProject/FactomCode/database/ldb"
"code.google.com/p/gcfg"
"reflect"
"github.com/FactomProject/FactomCode/factomapi"
)
var (
wclient *btcrpcclient.Client //rpc client for btcwallet rpc server
dclient *btcrpcclient.Client //rpc client for btcd rpc server
currentAddr btcutil.Address
tickers [2]*time.Ticker
db database.Db // database
chainIDMap map[string]*notaryapi.EChain // ChainIDMap with chainID string([32]byte) as key
//chainNameMap map[string]*notaryapi.Chain // ChainNameMap with chain name string as key
dchain *notaryapi.DChain //Directory Block Chain
cchain *notaryapi.CChain //Entry Credit Chain
creditsPerChain int32 = -5
creditsPerEntry int32 = -1
creditsPerFactoid uint64 = 1000
eCreditMap map[string]int32 // eCreditMap with public key string([32]byte) as key, credit balance as value
prePaidEntryMap map[string]int32 // Paid but unrevealed entries string(Etnry Hash) as key, Number of payments as value
// dbBatches []*notaryapi.FBBatch
dbBatches *DBBatches
dbBatch *notaryapi.DBBatch
//Map to store export csv files
serverDataFileMap map[string]string
)
var (
logLevel = "DEBUG"
portNumber int = 8083
sendToBTCinSeconds = 600
directoryBlockInSeconds = 60
applicationName = "factom/restapi"
dataStorePath = "/tmp/store/seed/"
ldbpath = "/tmp/ldb9"
//BTC:
// addrStr = "movaFTARmsaTMk3j71MpX8HtMURpsKhdra"
walletPassphrase = "lindasilva"
certHomePath = "btcwallet"
rpcClientHost = "localhost:18332" //btcwallet rpcserver address
rpcClientEndpoint = "ws"
rpcClientUser = "testuser"
rpcClientPass = "notarychain"
btcTransFee float64 = 0.0001
certHomePathBtcd = "btcd"
rpcBtcdHost = "localhost:18334" //btcd rpcserver address
)
type DBBatches struct {
batches []*notaryapi.DBBatch
batchMutex sync.Mutex
}
func loadConfigurations(){
cfg := struct {
App struct{
PortNumber int
ApplicationName string
LdbPath string
DataStorePath string
DirectoryBlockInSeconds int
}
Btc struct{
BTCPubAddr string
SendToBTCinSeconds int
WalletPassphrase string
CertHomePath string
RpcClientHost string
RpcClientEndpoint string
RpcClientUser string
RpcClientPass string
BtcTransFee float64
}
Log struct{
LogLevel string
}
}{}
wd, err := os.Getwd()
if err != nil{
log.Println(err)
}
err = gcfg.ReadFileInto(&cfg, wd+"/restapi.conf")
if err != nil{
log.Println(err)
log.Println("Server starting with default settings...")
} else {
//setting the variables by the valued form the config file
logLevel = cfg.Log.LogLevel
applicationName = cfg.App.ApplicationName
portNumber = cfg.App.PortNumber
dataStorePath = cfg.App.DataStorePath
ldbpath = cfg.App.LdbPath
directoryBlockInSeconds = cfg.App.DirectoryBlockInSeconds
// addrStr = cfg.Btc.BTCPubAddr
sendToBTCinSeconds = cfg.Btc.SendToBTCinSeconds
walletPassphrase = cfg.Btc.WalletPassphrase
certHomePath = cfg.Btc.CertHomePath
rpcClientHost = cfg.Btc.RpcClientHost
rpcClientEndpoint = cfg.Btc.RpcClientEndpoint
rpcClientUser = cfg.Btc.RpcClientUser
rpcClientPass = cfg.Btc.RpcClientPass
btcTransFee = cfg.Btc.BtcTransFee
}
}
func watchError(err error) {
panic(err)
}
func readError(err error) {
fmt.Println("error: ", err)
}
func initWithBinary(chain *notaryapi.EChain) {
matches, err := filepath.Glob(dataStorePath + chain.ChainID.String() + "/store.*.block") // need to get it from a property file??
if err != nil {
panic(err)
}
chain.Blocks = make([]*notaryapi.EBlock, len(matches))
num := 0
for _, match := range matches {
data, err := ioutil.ReadFile(match)
if err != nil {
panic(err)
}
block := new(notaryapi.EBlock)
err = block.UnmarshalBinary(data)
if err != nil {
panic(err)
}
block.Chain = chain
block.IsSealed = true
chain.Blocks[num] = block
num++
}
//Create an empty block and append to the chain
if len(chain.Blocks) == 0{
chain.NextBlockID = 0
newblock, _ := notaryapi.CreateBlock(chain, nil, 10)
chain.Blocks = append(chain.Blocks, newblock)
} else{
chain.NextBlockID = uint64(len(chain.Blocks))
newblock,_ := notaryapi.CreateBlock(chain, chain.Blocks[len(chain.Blocks)-1], 10)
chain.Blocks = append(chain.Blocks, newblock)
}
//Get the unprocessed entries in db for the past # of mins for the open block
binaryTimestamp := make([]byte, 8)
binary.BigEndian.PutUint64(binaryTimestamp, uint64(0))
if chain.Blocks[chain.NextBlockID].IsSealed == true {
panic ("chain.Blocks[chain.NextBlockID].IsSealed for chain:" + chain.ChainID.String())
}
chain.Blocks[chain.NextBlockID].EBEntries, _ = db.FetchEBEntriesFromQueue(&chain.ChainID.Bytes, &binaryTimestamp)
}
func initDB() {
//init db
var err error
db, err = ldb.OpenLevelDB(ldbpath, false)
if err != nil{
log.Println("err opening db: %v", err)
}
if db == nil{
log.Println("Creating new db ...")
db, err = ldb.OpenLevelDB(ldbpath, true)
if err!=nil{
panic(err)
}
}
log.Println("Database started from: " + ldbpath)
}
func init() {
loadConfigurations()
gobundle.Setup.Application.Name = applicationName
gobundle.Init()
initDB()
initChains()
dynrsrc.Start(watchError, readError)
notaryapi.StartDynamic(gobundle.DataFile("html.gwp"), readError)
for _, chain := range chainIDMap {
initWithBinary(chain)
fmt.Println("Loaded", len(chain.Blocks)-1, "blocks for chain: " + chain.ChainID.String())
for i := 0; i < len(chain.Blocks); i = i + 1 {
if uint64(i) != chain.Blocks[i].Header.BlockID {
panic(errors.New("BlockID does not equal index"))
}
}
}
// init Directory Block Chain
initDChain()
fmt.Println("Loaded", len(dchain.Blocks)-1, "Directory blocks for chain: "+ notaryapi.EncodeBinary(dchain.ChainID))
// init Entry Credit Chain
initCChain()
fmt.Println("Loaded", len(cchain.Blocks)-1, "Entry Credit blocks for chain: "+ cchain.ChainID.String())
// init dbBatches, dbBatch
dbBatches = &DBBatches {
batches: make([]*notaryapi.DBBatch, 0, 100),
}
dbBatch := ¬aryapi.DBBatch {
DBlocks: make([]*notaryapi.DBlock, 0, 10),
}
dbBatches.batches = append(dbBatches.batches, dbBatch)
// init the export file list for client distribution
initServerDataFileMap()
// create EBlocks and FBlock every 60 seconds
tickers[0] = time.NewTicker(time.Second * time.Duration(directoryBlockInSeconds))
// write 10 FBlock in a batch to BTC every 10 minutes
tickers[1] = time.NewTicker(time.Second * time.Duration(sendToBTCinSeconds))
go func() {
for _ = range tickers[0].C {
fmt.Println("in tickers[0]: newEntryBlock & newFactomBlock")
// Entry Chains
for _, chain := range chainIDMap {
eblock := newEntryBlock(chain)
if eblock != nil{
dchain.AddDBEntry(eblock)
}
save(chain)
}
// Entry Credit Chain
cBlock := newEntryCreditBlock(cchain)
if cBlock != nil{
dchain.AddCBlockToDBEntry(cBlock)
}
saveCChain(cchain)
// Directory Block chain
dbBlock := newDirectoryBlock(dchain)
if dbBlock != nil {
// mark the start block of a DBBatch
fmt.Println("in tickers[0]: len(dbBatch.DBlocks)=", len(dbBatch.DBlocks))
if len(dbBatch.DBlocks) == 0 {
dbBlock.Header.BatchFlag = byte(1)
}
dbBatch.DBlocks = append(dbBatch.DBlocks, dbBlock)
fmt.Println("in tickers[0]: ADDED FBBLOCK: len(dbBatch.DBlocks)=", len(dbBatch.DBlocks))
}
saveDChain(dchain)
}
}()
go func() {
for _ = range tickers[1].C {
fmt.Println("in tickers[1]: new FBBatch. len(dbBatch.DBlocks)=", len(dbBatch.DBlocks))
// skip empty dbBatch.
if len(dbBatch.DBlocks) > 0 {
doneBatch := dbBatch
dbBatch = ¬aryapi.DBBatch {
DBlocks: make([]*notaryapi.DBlock, 0, 10),
}
dbBatches.batchMutex.Lock()
dbBatches.batches = append(dbBatches.batches, doneBatch)
dbBatches.batchMutex.Unlock()
fmt.Printf("in tickers[1]: doneBatch=%#v\n", doneBatch)
// go routine here?
saveDBBatchMerkleRoottoBTC(doneBatch)
}
}
}()
}
func main() {
//addrStr := "muhXX7mXoMZUBvGLCgfjuoY2n2mziYETYC"
//addrStr := "movaFTARmsaTMk3j71MpX8HtMURpsKhdra"
// err := initRPCClient()
// if err != nil {
// log.Fatalf("cannot init rpc client: %s", err)
// }
// defer shutdown()
//
// if err := initWallet(); err != nil {
// log.Fatalf("cannot init wallet: %s", err)
// }
//doEntries()
flag.Parse()
defer func() {
tickers[0].Stop()
tickers[1].Stop()
dynrsrc.Stop()
db.Close()
}()
/*
err :=http.ListenAndServe(":8081", http.FileServer(http.Dir("/tmp/store/seed/csv")))
if err != nil {
panic(err)
}
*/
http.HandleFunc("/", serveRESTfulHTTP)
err1 := http.ListenAndServe(":"+strconv.Itoa(portNumber), nil)
if err1 != nil {
panic(err1)
}
}
func fileNotExists(name string) (bool) {
_, err := os.Stat(name)
if os.IsNotExist(err) {
return true
}
return err != nil
}
func save(chain *notaryapi.EChain) {
if len(chain.Blocks)==0{
log.Println("no blocks to save for chain: " + chain.ChainID.String())
return
}
bcp := make([]*notaryapi.EBlock, len(chain.Blocks))
chain.BlockMutex.Lock()
copy(bcp, chain.Blocks)
chain.BlockMutex.Unlock()
for i, block := range bcp {
//the open block is not saved
if block.IsSealed == false {
continue
}
data, err := block.MarshalBinary()
if err != nil {
panic(err)
}
strChainID := chain.ChainID.String()
if fileNotExists (dataStorePath + strChainID){
err:= os.MkdirAll(dataStorePath + strChainID, 0777)
if err==nil{
log.Println("Created directory " + dataStorePath + strChainID)
} else{
log.Println(err)
}
}
err = ioutil.WriteFile(fmt.Sprintf(dataStorePath + strChainID + "/store.%09d.block", i), data, 0777)
if err != nil {
panic(err)
}
}
}
func serveRESTfulHTTP(w http.ResponseWriter, r *http.Request) {
var resource interface{}
var err *notaryapi.Error
var buf bytes.Buffer
path, method, accept, form, err := parse(r)
switch method {
case "GET":
//resource, err = getServerDataFileMap()
case "POST":
fmt.Println("Got to POST")
fmt.Println(form)
if len(path) != 1 {
err = notaryapi.CreateError(notaryapi.ErrorBadMethod, `POST can only be used in the root context: /v1`)
return
}
datatype := form.Get("datatype")
fmt.Println("set datatype:", datatype)
switch datatype {
case "commitentry":
fmt.Println("Got to commitentry")
var (
hash, pub *notaryapi.Hash
timestamp uint64
)
sig, err := hex.DecodeString(form.Get("signature"))
if err != nil {
fmt.Println("Commit: signature: ", err)
}
pub.Bytes = sig[:32]
data, err := hex.DecodeString(form.Get("data"))
if err != nil {
fmt.Println("Commit: data: ", err)
}
timestamp = binary.BigEndian.Uint64(data[0:8])
hash.Bytes = data[8:]
_, err = processCommitEntry(hash, pub, int64(timestamp))
fmt.Println("got Commit")
fmt.Println("err=", err)
case "revealentry":
fmt.Println("got to revealentry")
var entry *notaryapi.Entry
data, err := hex.DecodeString(form.Get("data"))
if err != nil {
fmt.Println("Reveal: data: ", err)
}
entry.UnmarshalBinary(data)
_, err = processRevealEntry(entry)
fmt.Println("got Reveal")
fmt.Println("err=", err)
case "chain":
resource, err = postChain("/"+strings.Join(path, "/"), form)
case "buycredit":
pubKey, err := notaryapi.HexToHash(form.Get("ECPubKey"))
if err!=nil{
fmt.Println("Error in parsing pubKey:", err.Error())
}
value, err := strconv.ParseUint(form.Get("factoidbase"), 10, 64)
if err!=nil{
fmt.Println("Error in parsing value:", err.Error())
}
credits := value * creditsPerFactoid / 1000000000
resource, err = processBuyEntryCredit(pubKey, int32(credits), pubKey)
printCreditMap()
case "getbalance":
pubKey, err := notaryapi.HexToHash(form.Get("ECPubKey"))
if err!=nil{
fmt.Println("Error in parsing pubKey:", err.Error())
}
resource, err = getEntryCreditBalance(pubKey)
case "filelist":
resource, err = getServerDataFileMapJSON()
case "file":
fileKey := form.Get("filekey")
filename := serverDataFileMap[fileKey]
http.ServeFile(w, r, dataStorePath + "csv/"+filename)
return
default:
resource, err = postEntry("/"+strings.Join(path, "/"), form)
}
default:
err = notaryapi.CreateError(notaryapi.ErrorBadMethod, fmt.Sprintf(`The HTTP %s method is not supported`, method))
return
}
if err != nil {
resource = err
}
alt := false
for _, s := range form["byref"] {
b, err := strconv.ParseBool(s)
if err == nil {
alt = b
break
}
}
err = notaryapi.Marshal(resource, accept, &buf, alt)
switch accept {
case "text":
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
case "json":
w.Header().Set("Content-Type", "application/json; charset=utf-8")
case "xml":
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
case "html":
w.Header().Set("Content-Type", "text/html; charset=utf-8")
}
if err != nil {
var r *notaryapi.Error
buf.Reset()
r = notaryapi.Marshal(err, accept, &buf, false)
if r != nil {
err = r
}
w.WriteHeader(err.HTTPCode)
}
//buf.WriteTo(w)
if resource != nil {
//Send back entry hash
w.Write(resource.([]byte))
}else{
w.Write([]byte("\n\n"))
}
}
func postEntry(context string, form url.Values) (interface{}, *notaryapi.Error) {
newEntry := new(notaryapi.Entry)
format, data := form.Get("format"), form.Get("entry")
switch format {
case "", "json":
reader := gocoding.ReadString(data)
err := notaryapi.UnmarshalJSON(reader, newEntry)
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorJSONUnmarshal, err.Error())
}
case "xml":
err := xml.Unmarshal([]byte(data), newEntry)
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorXMLUnmarshal, err.Error())
}
case "binary":
binaryEntry,_ := notaryapi.DecodeBinary(&data)
fmt.Println("data:%v", data)
err := newEntry.UnmarshalBinary(binaryEntry)
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorXMLUnmarshal, err.Error())
}
default:
return nil, notaryapi.CreateError(notaryapi.ErrorUnsupportedUnmarshal, fmt.Sprintf(`The format "%s" is not supported`, format))
}
return processRevealEntry(newEntry)
}
func processRevealEntry(newEntry *notaryapi.Entry) ([]byte, *notaryapi.Error) {
if newEntry == nil {
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `Entity to be POSTed is nil`)
}
fmt.Println("chainID:", newEntry.ChainID.String())
chain := chainIDMap[newEntry.ChainID.String()]
if chain == nil{
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `This chain is not supported`) //ErrorInternal?
}
// store the new entry in db
entryBinary, _ := newEntry.MarshalBinary()
entryHash := notaryapi.Sha(entryBinary)
db.InsertEntryAndQueue( entryHash, &entryBinary, newEntry, &chain.ChainID.Bytes)
// Precalculate the key for prePaidEntryMap
key := entryHash.String()
chain.BlockMutex.Lock()
// Delete the entry in the prePaidEntryMap in memory
payments, ok := prePaidEntryMap[key]
if ok {
if payments > 1 {
prePaidEntryMap[key] = payments - 1
} else {
delete (prePaidEntryMap, key)
}
} else{
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `Credit needs to paid first before reveal an entry:` + entryHash.String())
}
err := chain.Blocks[len(chain.Blocks)-1].AddEBEntry(newEntry)
chain.BlockMutex.Unlock()
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, fmt.Sprintf(`Error while adding Entity to Block: %s`, err.Error()))
}
return entryHash.Bytes, nil
}
func processCommitEntry(entryHash *notaryapi.Hash, pubKey *notaryapi.Hash, timeStamp int64) ([]byte, error) {
// Create PayEntryCBEntry
cbEntry := notaryapi.NewPayEntryCBEntry(pubKey, entryHash, creditsPerEntry, timeStamp)
cchain.BlockMutex.Lock()
// Update the credit balance in memory
credits, _ := eCreditMap[pubKey.String()]
if credits + creditsPerEntry < 0 {
return nil, errors.New("Not enough credit for public key:" + pubKey.String() + " Balance:" + fmt.Sprint(credits))
}
eCreditMap[pubKey.String()] = credits + creditsPerEntry
err := cchain.Blocks[len(cchain.Blocks)-1].AddCBEntry(cbEntry)
// Update the prePaidEntryMapin memory
payments, _ := prePaidEntryMap[entryHash.String()]
prePaidEntryMap[entryHash.String()] = payments + 1
cchain.BlockMutex.Unlock()
return entryHash.Bytes, err
}
func processCommitChain(entryHash *notaryapi.Hash, chainIDHash *notaryapi.Hash, entryChainIDHash *notaryapi.Hash, pubKey *notaryapi.Hash) ([]byte, error) {
// Precalculate the key and value pair for prePaidEntryMap
key := getPrePaidChainKey(entryHash, chainIDHash)
// Create PayChainCBEntry
cbEntry := notaryapi.NewPayChainCBEntry(pubKey, entryHash, creditsPerChain, chainIDHash, entryChainIDHash)
cchain.BlockMutex.Lock()
// Update the credit balance in memory
credits, _ := eCreditMap[pubKey.String()]
if credits + creditsPerEntry < 0 {
return nil, errors.New("Insufficient credits for public key:" + pubKey.String() + " Balance:" + fmt.Sprint(credits))
}
eCreditMap[pubKey.String()] = credits + creditsPerChain
err := cchain.Blocks[len(cchain.Blocks)-1].AddCBEntry(cbEntry)
// Update the prePaidEntryMapin memory
payments, _ := prePaidEntryMap[key]
prePaidEntryMap[key] = payments + 1
cchain.BlockMutex.Unlock()
return chainIDHash.Bytes, err
}
func processBuyEntryCredit(pubKey *notaryapi.Hash, credits int32, factoidTxHash *notaryapi.Hash) ([]byte, error) {
cbEntry := notaryapi.NewBuyCBEntry(pubKey, factoidTxHash, credits)
cchain.BlockMutex.Lock()
err := cchain.Blocks[len(cchain.Blocks)-1].AddCBEntry(cbEntry)
// Update the credit balance in memory
balance, _ := eCreditMap[pubKey.String()]
eCreditMap[pubKey.String()] = balance + credits
cchain.BlockMutex.Unlock()
return pubKey.Bytes, err
}
func processRevealChain(newChain *notaryapi.EChain) ([]byte, *notaryapi.Error) {
chain := chainIDMap[newChain.ChainID.String()]
if chain != nil{
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `This chain is already existing`) //ErrorInternal?
}
if newChain.FirstEntry == nil{
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `The first entry is required to create a new chain.`) //ErrorInternal?
}
// Remove the entry for prePaidEntryMap
binaryEntry, _ := newChain.FirstEntry.MarshalBinary()
firstEntryHash := notaryapi.Sha(binaryEntry)
key := getPrePaidChainKey(firstEntryHash, newChain.ChainID)
_, ok := prePaidEntryMap[key]
if ok {
delete(prePaidEntryMap, key)
} else{
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `Credit needs to paid first before creating a new chain:` + newChain.ChainID.String())
}
// Store the new chain in db
db.InsertChain(newChain)
// Chain initialization
initWithBinary(newChain)
fmt.Println("Loaded", len(newChain.Blocks)-1, "blocks for chain: " + newChain.ChainID.String())
// Add the new chain in the chainIDMap
chainIDMap[newChain.ChainID.String()] = newChain
// store the new entry in db
entryBinary, _ := newChain.FirstEntry.MarshalBinary()
entryHash := notaryapi.Sha(entryBinary)
db.InsertEntryAndQueue( entryHash, &entryBinary, newChain.FirstEntry, &newChain.ChainID.Bytes)
newChain.BlockMutex.Lock()
err := newChain.Blocks[len(newChain.Blocks)-1].AddEBEntry(newChain.FirstEntry)
newChain.BlockMutex.Unlock()
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, fmt.Sprintf(`Error while adding the First Entry to Block: %s`, err.Error()))
}
ExportDataFromDbToFile()
return newChain.ChainID.Bytes, nil
}
func getEntryCreditBalance(pubKey *notaryapi.Hash) ([]byte, error) {
var buf bytes.Buffer
binary.Write(&buf, binary.BigEndian, eCreditMap[pubKey.String()])
return buf.Bytes(), nil
}
func postChain(context string, form url.Values) (interface{}, *notaryapi.Error) {
newChain := new(notaryapi.EChain)
format, data := form.Get("format"), form.Get("chain")
switch format {
case "binary":
binaryChain,_ := notaryapi.DecodeBinary(&data)
err := newChain.UnmarshalBinary(binaryChain)
newChain.GenerateIDFromName()
if err != nil {
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, err.Error())
}
default:
return nil, notaryapi.CreateError(notaryapi.ErrorUnsupportedUnmarshal, fmt.Sprintf(`The format "%s" is not supported`, format))
}
if newChain == nil {
return nil, notaryapi.CreateError(notaryapi.ErrorInternal, `Chain is nil`)
}
return processRevealChain(newChain)
}
func saveDChain(chain *notaryapi.DChain) {
if len(chain.Blocks)==0{
//log.Println("no blocks to save for chain: " + string (*chain.ChainID))
return
}
bcp := make([]*notaryapi.DBlock, len(chain.Blocks))
chain.BlockMutex.Lock()
copy(bcp, chain.Blocks)
chain.BlockMutex.Unlock()
for i, block := range bcp {
//the open block is not saved
if block.IsSealed == false {
continue
}
data, err := block.MarshalBinary()
if err != nil {
panic(err)
}
strChainID := notaryapi.EncodeBinary(chain.ChainID)
if fileNotExists (dataStorePath + strChainID){
err:= os.MkdirAll(dataStorePath + strChainID, 0777)
if err==nil{
log.Println("Created directory " + dataStorePath + strChainID)
} else{
log.Println(err)
}
}
err = ioutil.WriteFile(fmt.Sprintf(dataStorePath + strChainID + "/store.%09d.block", i), data, 0777)
if err != nil {
panic(err)
}
}
}
func saveCChain(chain *notaryapi.CChain) {
if len(chain.Blocks)==0{
//log.Println("no blocks to save for chain: " + string (*chain.ChainID))
return
}
bcp := make([]*notaryapi.CBlock, len(chain.Blocks))
chain.BlockMutex.Lock()
copy(bcp, chain.Blocks)
chain.BlockMutex.Unlock()
for i, block := range bcp {
//the open block is not saved
if block.IsSealed == false {
continue
}
data, err := block.MarshalBinary()
if err != nil {
panic(err)
}
strChainID := chain.ChainID.String()
if fileNotExists (dataStorePath + strChainID){
err:= os.MkdirAll(dataStorePath + strChainID, 0777)
if err==nil{
log.Println("Created directory " + dataStorePath + strChainID)
} else{
log.Println(err)
}
}
err = ioutil.WriteFile(fmt.Sprintf(dataStorePath + strChainID + "/store.%09d.block", i), data, 0777)
if err != nil {
panic(err)
}
}
}
func initDChain() {
dchain = new (notaryapi.DChain)
barray := (make([]byte, 32))
dchain.ChainID = &barray
matches, err := filepath.Glob(dataStorePath + notaryapi.EncodeBinary(dchain.ChainID) + "/store.*.block") // need to get it from a property file??
if err != nil {
panic(err)
}
dchain.Blocks = make([]*notaryapi.DBlock, len(matches))
num := 0
for _, match := range matches {
data, err := ioutil.ReadFile(match)
if err != nil {
panic(err)
}
block := new(notaryapi.DBlock)
err = block.UnmarshalBinary(data)
if err != nil {
panic(err)
}
block.Chain = dchain
block.IsSealed = true
dchain.Blocks[num] = block
num++
}
//Create an empty block and append to the chain
if len(dchain.Blocks) == 0{
dchain.NextBlockID = 0
newblock, _ := notaryapi.CreateDBlock(dchain, nil, 10)
dchain.Blocks = append(dchain.Blocks, newblock)
} else{
dchain.NextBlockID = uint64(len(dchain.Blocks))
newblock,_ := notaryapi.CreateDBlock(dchain, dchain.Blocks[len(dchain.Blocks)-1], 10)
dchain.Blocks = append(dchain.Blocks, newblock)
}
//Get the unprocessed entries in db for the past # of mins for the open block
binaryTimestamp := make([]byte, 8)
binary.BigEndian.PutUint64(binaryTimestamp, uint64(0))
if dchain.Blocks[dchain.NextBlockID].IsSealed == true {
panic ("dchain.Blocks[dchain.NextBlockID].IsSealed for chain:" + notaryapi.EncodeBinary(dchain.ChainID))
}
dchain.Blocks[dchain.NextBlockID].DBEntries, _ = db.FetchDBEntriesFromQueue(&binaryTimestamp)
}
func initCChain() {
eCreditMap = make(map[string]int32)
prePaidEntryMap = make(map[string]int32)
cchain = new (notaryapi.CChain)
//to be improved??
barray := (make([]byte, 32))
barray[0] = 1
cchain.ChainID = new (notaryapi.Hash)
cchain.ChainID.SetBytes(barray)
matches, err := filepath.Glob(dataStorePath + cchain.ChainID.String() + "/store.*.block") // need to get it from a property file??
if err != nil {
panic(err)
}
cchain.Blocks = make([]*notaryapi.CBlock, len(matches))
num := 0
for _, match := range matches {
data, err := ioutil.ReadFile(match)
if err != nil {
panic(err)
}
block := new(notaryapi.CBlock)
err = block.UnmarshalBinary(data)
if err != nil {
panic(err)
}
block.Chain = cchain
block.IsSealed = true
// Calculate the EC balance for each account
initializeECreditMap(block)
cchain.Blocks[num] = block
num++
}
//Create an empty block and append to the chain
if len(cchain.Blocks) == 0{
cchain.NextBlockID = 0
newblock, _ := notaryapi.CreateCBlock(cchain, nil, 10)
cchain.Blocks = append(cchain.Blocks, newblock)
} else{
cchain.NextBlockID = uint64(len(cchain.Blocks))
newblock,_ := notaryapi.CreateCBlock(cchain, cchain.Blocks[len(cchain.Blocks)-1], 10)
cchain.Blocks = append(cchain.Blocks, newblock)
}
//Get the unprocessed entries in db for the past # of mins for the open block
/* binaryTimestamp := make([]byte, 8)
binary.BigEndian.PutUint64(binaryTimestamp, uint64(0))
if cchain.Blocks[cchain.NextBlockID].IsSealed == true {
panic ("dchain.Blocks[dchain.NextBlockID].IsSealed for chain:" + notaryapi.EncodeBinary(dchain.ChainID))
}
dchain.Blocks[dchain.NextBlockID].DBEntries, _ = db.FetchDBEntriesFromQueue(&binaryTimestamp)
*/
}
func ExportDbToFile(dbHash *notaryapi.Hash) {
if fileNotExists( dataStorePath+"csv/") {
os.MkdirAll(dataStorePath+"csv/" , 0755)
}
//write the records to a csv file:
filename := time.Now().Format(time.RFC3339) + "." + dbHash.String() + ".csv"
file, err := os.Create(dataStorePath+"csv/" + filename)
if err != nil {panic(err)}
defer file.Close()
writer := csv.NewWriter(file)
ldbMap, err := db.FetchAllDBRecordsByDBHash(dbHash, cchain.ChainID)
if err != nil{
log.Println(err)
return
}
for key, value := range ldbMap{
//csv header: key, value
writer.Write([]string {key, value})
}
writer.Flush()
// Add the file to the distribution list
hash := notaryapi.Sha([]byte(filename))
serverDataFileMap[hash.String()] = filename
}
func ExportDataFromDbToFile() {
if fileNotExists( dataStorePath+"csv/") {
os.MkdirAll(dataStorePath+"csv/" , 0755)
}
//write the records to a csv file:
filename := time.Now().Format(time.RFC3339) + ".supportdata.csv"
file, err := os.Create(dataStorePath+"csv/" + filename)
if err != nil {panic(err)}
defer file.Close()
writer := csv.NewWriter(file)
ldbMap, err := db.FetchSupportDBRecords()
if err != nil{
log.Println(err)
return
}
for key, value := range ldbMap{
//csv header: key, value
writer.Write([]string {key, value})
}
writer.Flush()
// Add the file to the distribution list
hash := notaryapi.Sha([]byte(filename))
serverDataFileMap[hash.String()] = filename
}
func initChains() {
//initChainIDs()
chainIDMap = make(map[string]*notaryapi.EChain)
//chainNameMap = make(map[string]*notaryapi.Chain)
chains, err := db.FetchAllChainsByName(nil)
if err != nil{
panic (err)
}
for _, chain := range *chains {
var newChain = chain
chainIDMap[newChain.ChainID.String()] = &newChain
//chainIDMap[string(chain.ChainID.Bytes)] = &chain
}
}
func initializeECreditMap(block *notaryapi.CBlock) {
for _, cbEntry := range block.CBEntries{
credits, _ := eCreditMap[cbEntry.PublicKey().String()]
eCreditMap[cbEntry.PublicKey().String()] = credits + cbEntry.Credits()
}
}
func getPrePaidChainKey(entryHash *notaryapi.Hash, chainIDHash *notaryapi.Hash) string {
return chainIDHash.String() + entryHash.String()
}
func printCreditMap(){
fmt.Println("eCreditMap:")
for key := range eCreditMap {
fmt.Println("Key:", key, "Value", eCreditMap[key])
}
}
func printPaidEntryMap(){
fmt.Println("prePaidEntryMap:")
for key := range prePaidEntryMap {
fmt.Println("Key:", key, "Value", prePaidEntryMap[key])
}
}
func printCChain(){
fmt.Println("cchain:", cchain.ChainID.String())
for i, block := range cchain.Blocks {
if !block.IsSealed{
continue
}
var buf bytes.Buffer
err := factomapi.SafeMarshal(&buf, block.Header)
fmt.Println("block.Header", string(i), ":", string(buf.Bytes()))
for _, cbentry := range block.CBEntries {
t := reflect.TypeOf(cbentry)
fmt.Println("cbEntry Type:", t.Name(), t.String())
if strings.Contains(t.String(), "PayChainCBEntry"){
fmt.Println("PayChainCBEntry - pubkey:", cbentry.PublicKey().String(), " Credits:", cbentry.Credits())
var buf bytes.Buffer
err := factomapi.SafeMarshal(&buf, cbentry)
if err!=nil{
fmt.Println("Error:%v", err)
}
fmt.Println("PayChainCBEntry JSON", ":", string(buf.Bytes()))
} else if strings.Contains(t.String(), "PayEntryCBEntry"){
fmt.Println("PayEntryCBEntry - pubkey:", cbentry.PublicKey().String(), " Credits:", cbentry.Credits())
var buf bytes.Buffer
err := factomapi.SafeMarshal(&buf, cbentry)
if err!=nil{
fmt.Println("Error:%v", err)
}
fmt.Println("PayEntryCBEntry JSON", ":", string(buf.Bytes()))
} else if strings.Contains(t.String(), "BuyCBEntry"){
fmt.Println("BuyCBEntry - pubkey:", cbentry.PublicKey().String(), " Credits:", cbentry.Credits())
var buf bytes.Buffer
err := factomapi.SafeMarshal(&buf, cbentry)
if err!=nil{
fmt.Println("Error:%v", err)
}
fmt.Println("BuyCBEntry JSON", ":", string(buf.Bytes()))
}
}
if err != nil {
fmt.Println("Error:%v", err)
}
}
}
// Initialize the export file list
func initServerDataFileMap() error {
serverDataFileMap = make(map[string]string)
fiList, err := ioutil.ReadDir(dataStorePath +"csv")
if err != nil {
fmt.Println("Error in initServerDataFileMap:", err.Error())
return err
}
for _, file := range fiList{
if !file.IsDir() && strings.HasSuffix(file.Name(), ".csv") {
hash := notaryapi.Sha([]byte(file.Name()))
serverDataFileMap[hash.String()] = file.Name()
}
}
return nil
}
func getServerDataFileMapJSON() (interface{}, *notaryapi.Error) {
buf := new(bytes.Buffer)
err := factomapi.SafeMarshal(buf, serverDataFileMap)
var e *notaryapi.Error
if err!=nil{
e = notaryapi.CreateError(notaryapi.ErrorBadPOSTData, err.Error())
}
return buf.Bytes(), e
} |
package routing
import (
"fmt"
"net"
"sync"
"sync/atomic"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnwire"
)
// TopologyClient represents an intent to receive notifications from the
// channel router regarding changes to the topology of the channel graph. The
// TopologyChanges channel will be sent upon with new updates to the channel
// graph in real-time as they're encountered.
type TopologyClient struct {
// TopologyChanges is a receive only channel that new channel graph
// updates will be sent over.
//
// TODO(roasbeef): chan for each update type instead?
TopologyChanges <-chan *TopologyChange
// Cancel is a function closure that should be executed when the client
// wishes to cancel their notification intent. Doing so allows the
// ChannelRouter to free up resources.
Cancel func()
}
// topologyClientUpdate is a message sent to the channel router to either
// register a new topology client or re-register an existing client.
type topologyClientUpdate struct {
// cancel indicates if the update to the client is cancelling an
// existing client's notifications. If not then this update will be to
// register a new set of notifications.
cancel bool
// clientID is the unique identifier for this client. Any further
// updates (deleting or adding) to this notification client will be
// dispatched according to the target clientID.
clientID uint64
// ntfnChan is a *send-only* channel in which notifications should be
// sent over from router -> client.
ntfnChan chan<- *TopologyChange
}
// SubscribeTopology returns a new topology client which can be used by the
// caller to receive notifications whenever a change in the channel graph
// topology occurs. Changes that will be sent at notifications include: new
// nodes appearing, node updating their attributes, new channels, channels
// closing, and updates in the routing policies of a channel's directed edges.
func (r *ChannelRouter) SubscribeTopology() (*TopologyClient, error) {
// We'll first atomically obtain the next ID for this client from the
// incrementing client ID counter.
clientID := atomic.AddUint64(&r.ntfnClientCounter, 1)
log.Debugf("New graph topology client subscription, client %v",
clientID)
ntfnChan := make(chan *TopologyChange, 10)
select {
case r.ntfnClientUpdates <- &topologyClientUpdate{
cancel: false,
clientID: clientID,
ntfnChan: ntfnChan,
}:
case <-r.quit:
return nil, errors.New("ChannelRouter shutting down")
}
return &TopologyClient{
TopologyChanges: ntfnChan,
Cancel: func() {
select {
case r.ntfnClientUpdates <- &topologyClientUpdate{
cancel: true,
clientID: clientID,
}:
case <-r.quit:
return
}
},
}, nil
}
// topologyClient is a data-structure use by the channel router to couple the
// client's notification channel along with a special "exit" channel that can
// be used to cancel all lingering goroutines blocked on a send to the
// notification channel.
type topologyClient struct {
// ntfnChan is a send-only channel that's used to propagate
// notification s from the channel router to an instance of a
// topologyClient client.
ntfnChan chan<- *TopologyChange
// exit is a channel that is used internally by the channel router to
// cancel any active un-consumed goroutine notifications.
exit chan struct{}
wg sync.WaitGroup
}
// notifyTopologyChange notifies all registered clients of a new change in
// graph topology in a non-blocking.
func (r *ChannelRouter) notifyTopologyChange(topologyDiff *TopologyChange) {
r.RLock()
numClients := len(r.topologyClients)
r.RUnlock()
// Do not reacquire the lock twice unnecessarily.
if numClients == 0 {
return
}
log.Tracef("Sending topology notification to %v clients %v",
numClients,
newLogClosure(func() string {
return spew.Sdump(topologyDiff)
}),
)
r.RLock()
for _, client := range r.topologyClients {
client.wg.Add(1)
go func(c *topologyClient) {
defer c.wg.Done()
select {
// In this case we'll try to send the notification
// directly to the upstream client consumer.
case c.ntfnChan <- topologyDiff:
// If the client cancels the notifications, then we'll
// exit early.
case <-c.exit:
// Similarly, if the ChannelRouter itself exists early,
// then we'll also exit ourselves.
case <-r.quit:
}
}(client)
}
r.RUnlock()
}
// TopologyChange represents a new set of modifications to the channel graph.
// Topology changes will be dispatched in real-time as the ChannelGraph
// validates and process modifications to the authenticated channel graph.
type TopologyChange struct {
// NodeUpdates is a slice of nodes which are either new to the channel
// graph, or have had their attributes updated in an authenticated
// manner.
NodeUpdates []*NetworkNodeUpdate
// ChanelEdgeUpdates is a slice of channel edges which are either newly
// opened and authenticated, or have had their routing policies
// updated.
ChannelEdgeUpdates []*ChannelEdgeUpdate
// ClosedChannels contains a slice of close channel summaries which
// described which block a channel was closed at, and also carry
// supplemental information such as the capacity of the former channel.
ClosedChannels []*ClosedChanSummary
}
// isEmpty returns true if the TopologyChange is empty. A TopologyChange is
// considered empty, if it contains no *new* updates of any type.
func (t *TopologyChange) isEmpty() bool {
return len(t.NodeUpdates) == 0 && len(t.ChannelEdgeUpdates) == 0 &&
len(t.ClosedChannels) == 0
}
// ClosedChanSummary is a summary of a channel that was detected as being
// closed by monitoring the blockchain. Once a channel's funding point has been
// spent, the channel will automatically be marked as closed by the
// ChainNotifier.
//
// TODO(roasbeef): add nodes involved?
type ClosedChanSummary struct {
// ChanID is the short-channel ID which uniquely identifies the
// channel.
ChanID uint64
// Capacity was the total capacity of the channel before it was closed.
Capacity btcutil.Amount
// ClosedHeight is the height in the chain that the channel was closed
// at.
ClosedHeight uint32
// ChanPoint is the funding point, or the multi-sig utxo which
// previously represented the channel.
ChanPoint wire.OutPoint
}
// createCloseSummaries takes in a slice of channels closed at the target block
// height and creates a slice of summaries which of each channel closure.
func createCloseSummaries(blockHeight uint32,
closedChans ...*channeldb.ChannelEdgeInfo) []*ClosedChanSummary {
closeSummaries := make([]*ClosedChanSummary, len(closedChans))
for i, closedChan := range closedChans {
closeSummaries[i] = &ClosedChanSummary{
ChanID: closedChan.ChannelID,
Capacity: closedChan.Capacity,
ClosedHeight: blockHeight,
ChanPoint: closedChan.ChannelPoint,
}
}
return closeSummaries
}
// NetworkNodeUpdate is an update for a node within the Lightning Network. A
// NetworkNodeUpdate is sent out either when a new node joins the network, or a
// node broadcasts a new update with a newer time stamp that supersedes its
// old update. All updates are properly authenticated.
type NetworkNodeUpdate struct {
// Addresses is a slice of all the node's known addresses.
Addresses []net.Addr
// IdentityKey is the identity public key of the target node. This is
// used to encrypt onion blobs as well as to authenticate any new
// updates.
IdentityKey *btcec.PublicKey
// GlobalFeatures is a set of opaque bytes that describe the set of
// features supported by the node.
GlobalFeatures []byte
// Alias is the alias or nick name of the node.
Alias string
}
// ChannelEdgeUpdate is an update for a new channel within the ChannelGraph.
// This update is sent out once a new authenticated channel edge is discovered
// within the network. These updates are directional, so if a channel is fully
// public, then there will be two updates sent out: one for each direction
// within the channel. Each update will carry that particular routing edge
// policy for the channel direction.
//
// An edge is a channel in the direction of AdvertisingNode -> ConnectingNode.
type ChannelEdgeUpdate struct {
// ChanID is the unique short channel ID for the channel. This encodes
// where in the blockchain the channel's funding transaction was
// originally confirmed.
ChanID uint64
// ChanPoint is the outpoint which represents the multi-sig funding
// output for the channel.
ChanPoint wire.OutPoint
// Capacity is the capacity of the newly created channel.
Capacity btcutil.Amount
// MinHTLC is the minimum HTLC amount that this channel will forward.
MinHTLC lnwire.MilliSatoshi
// MaxHTLC is the maximum HTLC amount that this channel will forward.
MaxHTLC lnwire.MilliSatoshi
// BaseFee is the base fee that will charged for all HTLC's forwarded
// across the this channel direction.
BaseFee lnwire.MilliSatoshi
// FeeRate is the fee rate that will be shared for all HTLC's forwarded
// across this channel direction.
FeeRate lnwire.MilliSatoshi
// TimeLockDelta is the time-lock expressed in blocks that will be
// added to outgoing HTLC's from incoming HTLC's. This value is the
// difference of the incoming and outgoing HTLC's time-locks routed
// through this hop.
TimeLockDelta uint16
// AdvertisingNode is the node that's advertising this edge.
AdvertisingNode *btcec.PublicKey
// ConnectingNode is the node that the advertising node connects to.
ConnectingNode *btcec.PublicKey
// Disabled, if true, signals that the channel is unavailable to relay
// payments.
Disabled bool
}
// appendTopologyChange appends the passed update message to the passed
// TopologyChange, properly identifying which type of update the message
// constitutes. This function will also fetch any required auxiliary
// information required to create the topology change update from the graph
// database.
func addToTopologyChange(graph *channeldb.ChannelGraph, update *TopologyChange,
msg interface{}) error {
switch m := msg.(type) {
// Any node announcement maps directly to a NetworkNodeUpdate struct.
// No further data munging or db queries are required.
case *channeldb.LightningNode:
pubKey, err := m.PubKey()
if err != nil {
return err
}
nodeUpdate := &NetworkNodeUpdate{
Addresses: m.Addresses,
IdentityKey: pubKey,
Alias: m.Alias,
}
nodeUpdate.IdentityKey.Curve = nil
update.NodeUpdates = append(update.NodeUpdates, nodeUpdate)
return nil
// We ignore initial channel announcements as we'll only send out
// updates once the individual edges themselves have been updated.
case *channeldb.ChannelEdgeInfo:
return nil
// Any new ChannelUpdateAnnouncements will generate a corresponding
// ChannelEdgeUpdate notification.
case *channeldb.ChannelEdgePolicy:
// We'll need to fetch the edge's information from the database
// in order to get the information concerning which nodes are
// being connected.
edgeInfo, _, _, err := graph.FetchChannelEdgesByID(m.ChannelID)
if err != nil {
return errors.Errorf("unable fetch channel edge: %v",
err)
}
// If the flag is one, then the advertising node is actually
// the second node.
sourceNode := edgeInfo.NodeKey1
connectingNode := edgeInfo.NodeKey2
if m.ChannelFlags&lnwire.ChanUpdateDirection == 1 {
sourceNode = edgeInfo.NodeKey2
connectingNode = edgeInfo.NodeKey1
}
aNode, err := sourceNode()
if err != nil {
return err
}
cNode, err := connectingNode()
if err != nil {
return err
}
edgeUpdate := &ChannelEdgeUpdate{
ChanID: m.ChannelID,
ChanPoint: edgeInfo.ChannelPoint,
TimeLockDelta: m.TimeLockDelta,
Capacity: edgeInfo.Capacity,
MinHTLC: m.MinHTLC,
MaxHTLC: m.MaxHTLC,
BaseFee: m.FeeBaseMSat,
FeeRate: m.FeeProportionalMillionths,
AdvertisingNode: aNode,
ConnectingNode: cNode,
Disabled: m.ChannelFlags&lnwire.ChanUpdateDisabled != 0,
}
edgeUpdate.AdvertisingNode.Curve = nil
edgeUpdate.ConnectingNode.Curve = nil
// TODO(roasbeef): add bit to toggle
update.ChannelEdgeUpdates = append(update.ChannelEdgeUpdates,
edgeUpdate)
return nil
default:
return fmt.Errorf("Unable to add to topology change, "+
"unknown message type %T", msg)
}
}
routing/notifications: check router startd on SubscribeTopology
If the router is not yet started, return an error to avoid a deadlock
waiting for it to handle the subscription request.
package routing
import (
"fmt"
"net"
"sync"
"sync/atomic"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnwire"
)
// TopologyClient represents an intent to receive notifications from the
// channel router regarding changes to the topology of the channel graph. The
// TopologyChanges channel will be sent upon with new updates to the channel
// graph in real-time as they're encountered.
type TopologyClient struct {
// TopologyChanges is a receive only channel that new channel graph
// updates will be sent over.
//
// TODO(roasbeef): chan for each update type instead?
TopologyChanges <-chan *TopologyChange
// Cancel is a function closure that should be executed when the client
// wishes to cancel their notification intent. Doing so allows the
// ChannelRouter to free up resources.
Cancel func()
}
// topologyClientUpdate is a message sent to the channel router to either
// register a new topology client or re-register an existing client.
type topologyClientUpdate struct {
// cancel indicates if the update to the client is cancelling an
// existing client's notifications. If not then this update will be to
// register a new set of notifications.
cancel bool
// clientID is the unique identifier for this client. Any further
// updates (deleting or adding) to this notification client will be
// dispatched according to the target clientID.
clientID uint64
// ntfnChan is a *send-only* channel in which notifications should be
// sent over from router -> client.
ntfnChan chan<- *TopologyChange
}
// SubscribeTopology returns a new topology client which can be used by the
// caller to receive notifications whenever a change in the channel graph
// topology occurs. Changes that will be sent at notifications include: new
// nodes appearing, node updating their attributes, new channels, channels
// closing, and updates in the routing policies of a channel's directed edges.
func (r *ChannelRouter) SubscribeTopology() (*TopologyClient, error) {
// If the router is not yet started, return an error to avoid a
// deadlock waiting for it to handle the subscription request.
if atomic.LoadUint32(&r.started) == 0 {
return nil, fmt.Errorf("router not started")
}
// We'll first atomically obtain the next ID for this client from the
// incrementing client ID counter.
clientID := atomic.AddUint64(&r.ntfnClientCounter, 1)
log.Debugf("New graph topology client subscription, client %v",
clientID)
ntfnChan := make(chan *TopologyChange, 10)
select {
case r.ntfnClientUpdates <- &topologyClientUpdate{
cancel: false,
clientID: clientID,
ntfnChan: ntfnChan,
}:
case <-r.quit:
return nil, errors.New("ChannelRouter shutting down")
}
return &TopologyClient{
TopologyChanges: ntfnChan,
Cancel: func() {
select {
case r.ntfnClientUpdates <- &topologyClientUpdate{
cancel: true,
clientID: clientID,
}:
case <-r.quit:
return
}
},
}, nil
}
// topologyClient is a data-structure use by the channel router to couple the
// client's notification channel along with a special "exit" channel that can
// be used to cancel all lingering goroutines blocked on a send to the
// notification channel.
type topologyClient struct {
// ntfnChan is a send-only channel that's used to propagate
// notification s from the channel router to an instance of a
// topologyClient client.
ntfnChan chan<- *TopologyChange
// exit is a channel that is used internally by the channel router to
// cancel any active un-consumed goroutine notifications.
exit chan struct{}
wg sync.WaitGroup
}
// notifyTopologyChange notifies all registered clients of a new change in
// graph topology in a non-blocking.
func (r *ChannelRouter) notifyTopologyChange(topologyDiff *TopologyChange) {
r.RLock()
numClients := len(r.topologyClients)
r.RUnlock()
// Do not reacquire the lock twice unnecessarily.
if numClients == 0 {
return
}
log.Tracef("Sending topology notification to %v clients %v",
numClients,
newLogClosure(func() string {
return spew.Sdump(topologyDiff)
}),
)
r.RLock()
for _, client := range r.topologyClients {
client.wg.Add(1)
go func(c *topologyClient) {
defer c.wg.Done()
select {
// In this case we'll try to send the notification
// directly to the upstream client consumer.
case c.ntfnChan <- topologyDiff:
// If the client cancels the notifications, then we'll
// exit early.
case <-c.exit:
// Similarly, if the ChannelRouter itself exists early,
// then we'll also exit ourselves.
case <-r.quit:
}
}(client)
}
r.RUnlock()
}
// TopologyChange represents a new set of modifications to the channel graph.
// Topology changes will be dispatched in real-time as the ChannelGraph
// validates and process modifications to the authenticated channel graph.
type TopologyChange struct {
// NodeUpdates is a slice of nodes which are either new to the channel
// graph, or have had their attributes updated in an authenticated
// manner.
NodeUpdates []*NetworkNodeUpdate
// ChanelEdgeUpdates is a slice of channel edges which are either newly
// opened and authenticated, or have had their routing policies
// updated.
ChannelEdgeUpdates []*ChannelEdgeUpdate
// ClosedChannels contains a slice of close channel summaries which
// described which block a channel was closed at, and also carry
// supplemental information such as the capacity of the former channel.
ClosedChannels []*ClosedChanSummary
}
// isEmpty returns true if the TopologyChange is empty. A TopologyChange is
// considered empty, if it contains no *new* updates of any type.
func (t *TopologyChange) isEmpty() bool {
return len(t.NodeUpdates) == 0 && len(t.ChannelEdgeUpdates) == 0 &&
len(t.ClosedChannels) == 0
}
// ClosedChanSummary is a summary of a channel that was detected as being
// closed by monitoring the blockchain. Once a channel's funding point has been
// spent, the channel will automatically be marked as closed by the
// ChainNotifier.
//
// TODO(roasbeef): add nodes involved?
type ClosedChanSummary struct {
// ChanID is the short-channel ID which uniquely identifies the
// channel.
ChanID uint64
// Capacity was the total capacity of the channel before it was closed.
Capacity btcutil.Amount
// ClosedHeight is the height in the chain that the channel was closed
// at.
ClosedHeight uint32
// ChanPoint is the funding point, or the multi-sig utxo which
// previously represented the channel.
ChanPoint wire.OutPoint
}
// createCloseSummaries takes in a slice of channels closed at the target block
// height and creates a slice of summaries which of each channel closure.
func createCloseSummaries(blockHeight uint32,
closedChans ...*channeldb.ChannelEdgeInfo) []*ClosedChanSummary {
closeSummaries := make([]*ClosedChanSummary, len(closedChans))
for i, closedChan := range closedChans {
closeSummaries[i] = &ClosedChanSummary{
ChanID: closedChan.ChannelID,
Capacity: closedChan.Capacity,
ClosedHeight: blockHeight,
ChanPoint: closedChan.ChannelPoint,
}
}
return closeSummaries
}
// NetworkNodeUpdate is an update for a node within the Lightning Network. A
// NetworkNodeUpdate is sent out either when a new node joins the network, or a
// node broadcasts a new update with a newer time stamp that supersedes its
// old update. All updates are properly authenticated.
type NetworkNodeUpdate struct {
// Addresses is a slice of all the node's known addresses.
Addresses []net.Addr
// IdentityKey is the identity public key of the target node. This is
// used to encrypt onion blobs as well as to authenticate any new
// updates.
IdentityKey *btcec.PublicKey
// GlobalFeatures is a set of opaque bytes that describe the set of
// features supported by the node.
GlobalFeatures []byte
// Alias is the alias or nick name of the node.
Alias string
}
// ChannelEdgeUpdate is an update for a new channel within the ChannelGraph.
// This update is sent out once a new authenticated channel edge is discovered
// within the network. These updates are directional, so if a channel is fully
// public, then there will be two updates sent out: one for each direction
// within the channel. Each update will carry that particular routing edge
// policy for the channel direction.
//
// An edge is a channel in the direction of AdvertisingNode -> ConnectingNode.
type ChannelEdgeUpdate struct {
// ChanID is the unique short channel ID for the channel. This encodes
// where in the blockchain the channel's funding transaction was
// originally confirmed.
ChanID uint64
// ChanPoint is the outpoint which represents the multi-sig funding
// output for the channel.
ChanPoint wire.OutPoint
// Capacity is the capacity of the newly created channel.
Capacity btcutil.Amount
// MinHTLC is the minimum HTLC amount that this channel will forward.
MinHTLC lnwire.MilliSatoshi
// MaxHTLC is the maximum HTLC amount that this channel will forward.
MaxHTLC lnwire.MilliSatoshi
// BaseFee is the base fee that will charged for all HTLC's forwarded
// across the this channel direction.
BaseFee lnwire.MilliSatoshi
// FeeRate is the fee rate that will be shared for all HTLC's forwarded
// across this channel direction.
FeeRate lnwire.MilliSatoshi
// TimeLockDelta is the time-lock expressed in blocks that will be
// added to outgoing HTLC's from incoming HTLC's. This value is the
// difference of the incoming and outgoing HTLC's time-locks routed
// through this hop.
TimeLockDelta uint16
// AdvertisingNode is the node that's advertising this edge.
AdvertisingNode *btcec.PublicKey
// ConnectingNode is the node that the advertising node connects to.
ConnectingNode *btcec.PublicKey
// Disabled, if true, signals that the channel is unavailable to relay
// payments.
Disabled bool
}
// appendTopologyChange appends the passed update message to the passed
// TopologyChange, properly identifying which type of update the message
// constitutes. This function will also fetch any required auxiliary
// information required to create the topology change update from the graph
// database.
func addToTopologyChange(graph *channeldb.ChannelGraph, update *TopologyChange,
msg interface{}) error {
switch m := msg.(type) {
// Any node announcement maps directly to a NetworkNodeUpdate struct.
// No further data munging or db queries are required.
case *channeldb.LightningNode:
pubKey, err := m.PubKey()
if err != nil {
return err
}
nodeUpdate := &NetworkNodeUpdate{
Addresses: m.Addresses,
IdentityKey: pubKey,
Alias: m.Alias,
}
nodeUpdate.IdentityKey.Curve = nil
update.NodeUpdates = append(update.NodeUpdates, nodeUpdate)
return nil
// We ignore initial channel announcements as we'll only send out
// updates once the individual edges themselves have been updated.
case *channeldb.ChannelEdgeInfo:
return nil
// Any new ChannelUpdateAnnouncements will generate a corresponding
// ChannelEdgeUpdate notification.
case *channeldb.ChannelEdgePolicy:
// We'll need to fetch the edge's information from the database
// in order to get the information concerning which nodes are
// being connected.
edgeInfo, _, _, err := graph.FetchChannelEdgesByID(m.ChannelID)
if err != nil {
return errors.Errorf("unable fetch channel edge: %v",
err)
}
// If the flag is one, then the advertising node is actually
// the second node.
sourceNode := edgeInfo.NodeKey1
connectingNode := edgeInfo.NodeKey2
if m.ChannelFlags&lnwire.ChanUpdateDirection == 1 {
sourceNode = edgeInfo.NodeKey2
connectingNode = edgeInfo.NodeKey1
}
aNode, err := sourceNode()
if err != nil {
return err
}
cNode, err := connectingNode()
if err != nil {
return err
}
edgeUpdate := &ChannelEdgeUpdate{
ChanID: m.ChannelID,
ChanPoint: edgeInfo.ChannelPoint,
TimeLockDelta: m.TimeLockDelta,
Capacity: edgeInfo.Capacity,
MinHTLC: m.MinHTLC,
MaxHTLC: m.MaxHTLC,
BaseFee: m.FeeBaseMSat,
FeeRate: m.FeeProportionalMillionths,
AdvertisingNode: aNode,
ConnectingNode: cNode,
Disabled: m.ChannelFlags&lnwire.ChanUpdateDisabled != 0,
}
edgeUpdate.AdvertisingNode.Curve = nil
edgeUpdate.ConnectingNode.Curve = nil
// TODO(roasbeef): add bit to toggle
update.ChannelEdgeUpdates = append(update.ChannelEdgeUpdates,
edgeUpdate)
return nil
default:
return fmt.Errorf("Unable to add to topology change, "+
"unknown message type %T", msg)
}
}
|
//
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"bytes"
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strconv"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
"github.com/01org/ciao/payloads"
)
var scopedToken string
const openstackComputePort = 8774
const openstackComputeVersion = "v2.1"
type action uint8
const (
computeActionStart action = iota
computeActionStop
)
func debugf(format string, args ...interface{}) {
glog.V(2).Infof("ciao-cli DEBUG: "+format, args...)
}
func infof(format string, args ...interface{}) {
glog.V(1).Infof("ciao-cli INFO: "+format, args...)
}
func warningf(format string, args ...interface{}) {
glog.Warningf("ciao-cli WARNING: "+format, args...)
}
func errorf(format string, args ...interface{}) {
glog.Errorf("ciao-cli ERROR: "+format, args...)
}
func fatalf(format string, args ...interface{}) {
glog.Fatalf("ciao-cli FATAL: "+format, args...)
os.Exit(1)
}
var (
allInstances = flag.Bool("all-instances", false, "Select all instances")
instanceLabel = flag.String("instance-label", "", "Set a frame label. This will trigger frame tracing")
listInstances = flag.Bool("list-instances", false, "List all instances for a tenant")
listCNInstances = flag.Bool("list-cn-instances", false, "List all instances for a compute node")
listWlInstances = flag.Bool("list-wl-instances", false, "List all instances for a workload")
listQuotas = flag.Bool("list-quotas", false, "List quotas status for a tenant")
listResources = flag.Bool("list-resources", false, "List consumed resources for a tenant for the past 15mn")
listWorkloads = flag.Bool("list-workloads", false, "List all workloads")
listUserTenants = flag.Bool("list-tenants", false, "List all tenants for a given user")
listTenants = flag.Bool("list-all-tenants", false, "List all tenants")
listComputeNodes = flag.Bool("list-cns", false, "List all compute nodes")
listCNCIs = flag.Bool("list-cncis", false, "List all CNCIs")
listLength = flag.Int("list-length", 0, "Maximum number of items in the reponse")
listLabels = flag.Bool("list-labels", false, "List all trace labels")
listAllEvents = flag.Bool("list-all-events", false, "List all cluster events")
listEvents = flag.Bool("list-events", false, "List all events for a tenant")
dumpCNCI = flag.Bool("dump-cnci", false, "Dump a CNCI details")
dumpToken = flag.Bool("dump-token", false, "Dump keystone tokens")
clusterStatus = flag.Bool("cluster-status", false, "List all compute nodes")
launchInstances = flag.Bool("launch-instances", false, "Launch Ciao instances")
deleteInstance = flag.Bool("delete-instance", false, "Delete a Ciao instance")
deleteEvents = flag.Bool("delete-events", false, "Delete all stored Ciao events")
stopInstance = flag.Bool("stop-instance", false, "Stop a Ciao instance")
restartInstance = flag.Bool("restart-instance", false, "Restart a Ciao instance")
workload = flag.String("workload", "", "Workload UUID")
instances = flag.Int("instances", 1, "Number of instances to create")
instance = flag.String("instance", "", "Instance UUID")
instanceMarker = flag.String("instance-marker", "", "Show instance list starting from the next instance after instance-marker")
instanceOffset = flag.Int("instance-offset", 0, "Show instance list starting from instance #instance-offset")
tenantID = flag.String("tenant-id", "", "Tenant UUID")
tenantName = flag.String("tenant-name", "", "Tenant name")
computeNode = flag.String("cn", "", "Compute node UUID")
cnci = flag.String("cnci", "", "CNCI UUID")
controllerURL = flag.String("controller", "", "Controller URL")
computePort = flag.Int("computeport", openstackComputePort, "Openstack Compute API port")
identityURL = flag.String("identity", "", "Keystone URL")
identityUser = flag.String("username", "", "Openstack Service Username")
identityPassword = flag.String("password", "", "Openstack Service Username")
dumpLabel = flag.String("dump-label", "", "Dump all trace data for a given label")
)
const (
ciaoIdentityEnv = "CIAO_IDENTITY"
ciaoControllerEnv = "CIAO_CONTROLLER"
ciaoUsernameEnv = "CIAO_USERNAME"
ciaoPasswordEnv = "CIAO_PASSWORD"
ciaoComputePortEnv = "CIAO_COMPUTEPORT"
)
type queryValue struct {
name, value string
}
func buildComputeURL(format string, args ...interface{}) string {
prefix := fmt.Sprintf("https://%s:%d/%s/", *controllerURL, *computePort, openstackComputeVersion)
return fmt.Sprintf(prefix+format, args...)
}
func sendHTTPRequestToken(method string, url string, values []queryValue, token string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest(method, os.ExpandEnv(url), body)
if err != nil {
return nil, err
}
infof("Sending %s %s\n", method, url)
if values != nil {
v := req.URL.Query()
for _, value := range values {
infof("Adding URL query %s=%s\n", value.name, value.value)
v.Add(value.name, value.value)
}
req.URL.RawQuery = v.Encode()
}
if token != "" {
req.Header.Add("X-Auth-Token", token)
}
if body != nil {
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
}
warningf("Skipping TLS verification\n")
tlsConfig := &tls.Config{InsecureSkipVerify: true}
transport := &http.Transport{
TLSClientConfig: tlsConfig,
}
client := &http.Client{Transport: transport}
resp, err := client.Do(req)
if err != nil {
errorf("Could not send HTTP request %s\n", err)
return nil, err
}
infof("Got HTTP response (status %s)\n", resp.Status)
if resp.StatusCode >= http.StatusBadRequest {
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
errorf("Could not read the HTTP response %s\n", err)
spew.Dump(resp.Body)
return resp, err
}
return resp, fmt.Errorf("HTTP Error [%d] for [%s %s]: %s", resp.StatusCode, method, url, respBody)
}
return resp, err
}
func sendHTTPRequest(method string, url string, values []queryValue, body io.Reader) (*http.Response, error) {
return sendHTTPRequestToken(method, url, values, scopedToken, body)
}
func unmarshalHTTPResponse(resp *http.Response, v interface{}) error {
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
errorf("Could not read the HTTP response %s\n", err)
return err
}
err = json.Unmarshal(body, v)
if err != nil {
errorf("Could not unmarshal the HTTP response %s\n", err)
return err
}
if glog.V(2) {
spew.Dump(v)
}
return nil
}
func listAllInstances(tenant string, workload string, marker string, offset int, limit int) {
var servers payloads.ComputeServers
var url string
if *listInstances == true && tenant == "" {
fatalf("Missing required -tenant-id parameter")
}
if *listWlInstances == true && workload == "" {
fatalf("Missing required -workload parameter")
}
if tenant != "" {
url = buildComputeURL("%s/servers/detail", tenant)
} else if workload != "" {
url = buildComputeURL("flavors/%s/servers/detail", workload)
}
var values []queryValue
if limit > 0 {
values = append(values, queryValue{
name: "limit",
value: fmt.Sprintf("%d", limit),
})
}
if offset > 0 {
values = append(values, queryValue{
name: "offset",
value: fmt.Sprintf("%d", offset),
})
}
if marker != "" {
values = append(values, queryValue{
name: "marker",
value: marker,
})
}
resp, err := sendHTTPRequest("GET", url, values, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &servers)
if err != nil {
fatalf(err.Error())
}
for i, server := range servers.Servers {
fmt.Printf("Instance #%d\n", i+1)
fmt.Printf("\tUUID: %s\n", server.ID)
fmt.Printf("\tStatus: %s\n", server.Status)
fmt.Printf("\tPrivate IP: %s\n", server.Addresses.Private[0].Addr)
fmt.Printf("\tMAC Address: %s\n", server.Addresses.Private[0].OSEXTIPSMACMacAddr)
fmt.Printf("\tCN UUID: %s\n", server.HostID)
fmt.Printf("\tImage UUID: %s\n", server.Image.ID)
fmt.Printf("\tTenant UUID: %s\n", server.TenantID)
if server.SSHIP != "" {
fmt.Printf("\tSSH IP: %s\n", server.SSHIP)
fmt.Printf("\tSSH Port: %d\n", server.SSHPort)
}
}
}
func limitToString(limit int) string {
if limit == -1 {
return "Unlimited"
}
return fmt.Sprintf("%d", limit)
}
func listTenantQuotas(tenant string) {
if tenant == "" {
fatalf("Missing required -tenant-id parameter")
}
var resources payloads.CiaoTenantResources
url := buildComputeURL("%s/quotas", tenant)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &resources)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("Quotas for tenant %s:\n", resources.ID)
fmt.Printf("\tInstances: %d | %s\n", resources.InstanceUsage, limitToString(resources.InstanceLimit))
fmt.Printf("\tCPUs: %d | %s\n", resources.VCPUUsage, limitToString(resources.VCPULimit))
fmt.Printf("\tMemory: %d | %s\n", resources.MemUsage, limitToString(resources.MemLimit))
fmt.Printf("\tDisk: %d | %s\n", resources.DiskUsage, limitToString(resources.DiskLimit))
}
func listTenantResources(tenant string) {
if tenant == "" {
fatalf("Missing required -tenant-id parameter")
}
var usage payloads.CiaoUsageHistory
url := buildComputeURL("%s/resources", tenant)
now := time.Now()
values := []queryValue{
{
name: "start_date",
value: now.Add(-15 * time.Minute).Format(time.RFC3339),
},
{
name: "end_date",
value: now.Format(time.RFC3339),
},
}
resp, err := sendHTTPRequest("GET", url, values, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &usage)
if err != nil {
fatalf(err.Error())
}
if len(usage.Usages) == 0 {
fmt.Printf("No usage history for %s\n", tenant)
return
}
fmt.Printf("Usage for tenant %s:\n", tenant)
for _, u := range usage.Usages {
fmt.Printf("\t%v: [%d CPUs] [%d MB memory] [%d MB disk]\n", u.Timestamp, u.VCPU, u.Memory, u.Disk)
}
}
func workloadDetail(tenant string, workload string) string {
var flavor payloads.ComputeFlavorDetails
url := buildComputeURL("%s/flavors/%s", tenant, workload)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &flavor)
if err != nil {
fatalf(err.Error())
}
return fmt.Sprintf("\tName: %s\n\tUUID:%s\n\tImage UUID: %s\n\tCPUs: %d\n\tMemory: %d MB\n",
flavor.Flavor.Name, flavor.Flavor.ID, flavor.Flavor.Disk, flavor.Flavor.Vcpus, flavor.Flavor.RAM)
}
func listTenantWorkloads(tenant string) {
if tenant == "" {
fatalf("Missing required -tenant-id parameter")
}
var flavors payloads.ComputeFlavors
if tenant == "" {
tenant = "faketenant"
}
url := buildComputeURL("%s/flavors", tenant)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &flavors)
if err != nil {
fatalf(err.Error())
}
for i, flavor := range flavors.Flavors {
fmt.Printf("Workload %d\n", i+1)
fmt.Printf(workloadDetail(tenant, flavor.ID))
}
}
func listAllTenants() {
projects, err := getAllProjects(*identityUser, *identityPassword)
if err != nil {
fatalf(err.Error())
}
for i, project := range projects.Projects {
fmt.Printf("Tenant [%d]\n", i+1)
fmt.Printf("\tUUID: %s\n", project.ID)
fmt.Printf("\tName: %s\n", project.Name)
}
}
func listUserSpecificTenants(username, password string) {
projects, err := getUserProjects(username, password)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("Projects for user %s\n", username)
for _, project := range projects {
fmt.Printf("\tUUID: %s\n", project.ID)
fmt.Printf("\tName: %s\n", project.Name)
}
}
func listAllComputeNodes() {
var nodes payloads.CiaoComputeNodes
url := buildComputeURL("nodes")
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &nodes)
if err != nil {
fatalf(err.Error())
}
for i, node := range nodes.Nodes {
fmt.Printf("Compute Node %d\n", i+1)
fmt.Printf("\tUUID: %s\n", node.ID)
fmt.Printf("\tStatus: %s\n", node.Status)
fmt.Printf("\tLoad: %d\n", node.Load)
fmt.Printf("\tAvailable/Total memory: %d/%d MB\n", node.MemAvailable, node.MemTotal)
fmt.Printf("\tAvailable/Total disk: %d/%d MB\n", node.DiskAvailable, node.DiskTotal)
fmt.Printf("\tTotal Instances: %d\n", node.TotalInstances)
fmt.Printf("\t\tRunning Instances: %d\n", node.TotalRunningInstances)
fmt.Printf("\t\tPending Instances: %d\n", node.TotalPendingInstances)
fmt.Printf("\t\tPaused Instances: %d\n", node.TotalPausedInstances)
}
}
func listAllCNCIs() {
var cncis payloads.CiaoCNCIs
url := buildComputeURL("cncis")
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &cncis)
if err != nil {
fatalf(err.Error())
}
for i, cnci := range cncis.CNCIs {
fmt.Printf("CNCI %d\n", i+1)
fmt.Printf("\tCNCI UUID: %s\n", cnci.ID)
fmt.Printf("\tTenant UUID: %s\n", cnci.TenantID)
fmt.Printf("\tIPv4: %s\n", cnci.IPv4)
fmt.Printf("\tSubnets:\n")
for _, subnet := range cnci.Subnets {
fmt.Printf("\t\t%s\n", subnet.Subnet)
}
}
}
func dumpCNCIDetails(cnciID string) {
var cnci payloads.CiaoCNCI
url := buildComputeURL("cncis/%s/detail", cnciID)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &cnci)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("\tCNCI UUID: %s\n", cnci.ID)
fmt.Printf("\tTenant UUID: %s\n", cnci.TenantID)
fmt.Printf("\tIPv4: %s\n", cnci.IPv4)
fmt.Printf("\tSubnets:\n")
for _, subnet := range cnci.Subnets {
fmt.Printf("\t\t%s\n", subnet.Subnet)
}
}
func createTenantInstance(tenant string, workload string, instances int, label string) {
if tenant == "" {
fatalf("Missing required -tenant-id parameter")
}
if workload == "" {
fatalf("Missing required -workload parameter")
}
var server payloads.ComputeCreateServer
var servers payloads.ComputeServers
server.Server.Name = label
server.Server.Workload = workload
server.Server.MaxInstances = instances
server.Server.MinInstances = 1
serverBytes, err := json.Marshal(server)
if err != nil {
fatalf(err.Error())
}
body := bytes.NewReader(serverBytes)
url := buildComputeURL("%s/servers", tenant)
resp, err := sendHTTPRequest("POST", url, nil, body)
if err != nil {
fatalf(err.Error())
}
if resp.StatusCode != http.StatusAccepted {
fatalf("Instance creation failed: %s", resp.Status)
}
err = unmarshalHTTPResponse(resp, &servers)
if err != nil {
fatalf(err.Error())
}
for _, server := range servers.Servers {
fmt.Printf("Created new instance: %s\n", server.ID)
}
}
func deleteTenantInstance(tenant string, instance string) {
url := buildComputeURL("%s/servers/%s", tenant, instance)
resp, err := sendHTTPRequest("DELETE", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusAccepted {
fatalf("Instance deletion failed: %s", resp.Status)
}
fmt.Printf("Deleted instance: %s\n", instance)
}
func actionAllTenantInstance(tenant string, osAction string) {
var action payloads.CiaoServersAction
url := buildComputeURL("%s/servers/action", tenant)
action.Action = osAction
actionBytes, err := json.Marshal(action)
if err != nil {
fatalf(err.Error())
}
body := bytes.NewReader(actionBytes)
resp, err := sendHTTPRequest("POST", url, nil, body)
if err != nil {
fatalf(err.Error())
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusAccepted {
fatalf("Action %s on all instances failed: %s", osAction, resp.Status)
}
fmt.Printf("%s all instances for tenant %s\n", osAction, tenant)
}
func listNodeInstances(node string) {
if node == "" {
fatalf("Missing required -cn parameter")
}
var servers payloads.CiaoServersStats
url := buildComputeURL("nodes/%s/servers/detail", node)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &servers)
if err != nil {
fatalf(err.Error())
}
for i, server := range servers.Servers {
fmt.Printf("Instance #%d\n", i+1)
fmt.Printf("\tUUID: %s\n", server.ID)
fmt.Printf("\tStatus: %s\n", server.Status)
fmt.Printf("\tTenant UUID: %s\n", server.TenantID)
fmt.Printf("\tIPv4: %s\n", server.IPv4)
fmt.Printf("\tCPUs used: %d\n", server.VCPUUsage)
fmt.Printf("\tMemory used: %d MB\n", server.MemUsage)
fmt.Printf("\tDisk used: %d MB\n", server.DiskUsage)
}
}
func dumpClusterStatus() {
var status payloads.CiaoClusterStatus
url := buildComputeURL("nodes/summary")
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &status)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("Total Nodes %d\n", status.Status.TotalNodes)
fmt.Printf("\tReady %d\n", status.Status.TotalNodesReady)
fmt.Printf("\tFull %d\n", status.Status.TotalNodesFull)
fmt.Printf("\tOffline %d\n", status.Status.TotalNodesOffline)
fmt.Printf("\tMaintenance %d\n", status.Status.TotalNodesMaintenance)
}
const (
osStart = "os-start"
osStop = "os-stop"
osDelete = "os-delete"
)
func startStopInstance(tenant, instance string, action action) {
var actionBytes []byte
switch action {
case computeActionStart:
actionBytes = []byte(osStart)
case computeActionStop:
actionBytes = []byte(osStop)
default:
fatalf("Unsupported action %d\n", action)
}
body := bytes.NewReader(actionBytes)
url := buildComputeURL("%s/servers/%s/action", tenant, instance)
resp, err := sendHTTPRequest("POST", url, nil, body)
if err != nil {
fatalf(err.Error())
}
if resp.StatusCode != http.StatusAccepted {
fatalf("Instance action failed: %s", resp.Status)
}
switch action {
case computeActionStart:
fmt.Printf("Instance %s restarted\n", instance)
case computeActionStop:
fmt.Printf("Instance %s stopped\n", instance)
}
}
func listAllLabels() {
var traces payloads.CiaoTracesSummary
url := buildComputeURL("traces")
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &traces)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("%d trace label(s) available\n", len(traces.Summaries))
for i, summary := range traces.Summaries {
fmt.Printf("\tLabel #%d: %s (%d instances running)\n", i+1, summary.Label, summary.Instances)
}
}
func listClusterEvents(tenant string) {
var events payloads.CiaoEvents
var url string
if tenant == "" {
url = buildComputeURL("events")
} else {
url = buildComputeURL("%s/events", tenant)
}
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &events)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("%d Ciao event(s):\n", len(events.Events))
for i, event := range events.Events {
fmt.Printf("\t[%d] %v: %s:%s (Tenant %s)\n", i+1, event.Timestamp, event.EventType, event.Message, event.TenantID)
}
}
func deleteAllEvents() {
url := buildComputeURL("events")
resp, err := sendHTTPRequest("DELETE", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusAccepted {
fatalf("Events log deletion failed: %s", resp.Status)
}
fmt.Printf("Deleted all event logs\n")
}
func dumpTraceData(label string) {
var traceData payloads.CiaoTraceData
url := buildComputeURL("traces/%s", label)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &traceData)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("Trace data for [%s]:\n", label)
fmt.Printf("\tNumber of instances: %d\n", traceData.Summary.NumInstances)
fmt.Printf("\tTotal time elapsed : %f seconds\n", traceData.Summary.TotalElapsed)
fmt.Printf("\tAverage time elapsed : %f seconds\n", traceData.Summary.AverageElapsed)
fmt.Printf("\tAverage Controller time: %f seconds\n", traceData.Summary.AverageControllerElapsed)
fmt.Printf("\tAverage Scheduler time : %f seconds\n", traceData.Summary.AverageSchedulerElapsed)
fmt.Printf("\tAverage Launcher time : %f seconds\n", traceData.Summary.AverageLauncherElapsed)
fmt.Printf("\tController variance : %f seconds²\n", traceData.Summary.VarianceController)
fmt.Printf("\tScheduler variance : %f seconds²\n", traceData.Summary.VarianceScheduler)
fmt.Printf("\tLauncher variance : %f seconds²\n", traceData.Summary.VarianceLauncher)
}
func getCiaoEnvVariables() {
identity := os.Getenv(ciaoIdentityEnv)
controller := os.Getenv(ciaoControllerEnv)
username := os.Getenv(ciaoUsernameEnv)
password := os.Getenv(ciaoPasswordEnv)
port := os.Getenv(ciaoComputePortEnv)
infof("Ciao environment variables:\n")
infof("\t%s:%s\n", ciaoIdentityEnv, identity)
infof("\t%s:%s\n", ciaoControllerEnv, controller)
infof("\t%s:%s\n", ciaoUsernameEnv, username)
infof("\t%s:%s\n", ciaoPasswordEnv, password)
infof("\t%s:%s\n", ciaoComputePortEnv, port)
if identity != "" && *identityURL == "" {
*identityURL = identity
}
if controller != "" && *controllerURL == "" {
*controllerURL = controller
}
if username != "" && *identityUser == "" {
*identityUser = username
}
if password != "" && *identityPassword == "" {
*identityPassword = password
}
if port != "" && *computePort == openstackComputePort {
*computePort, _ = strconv.Atoi(port)
}
}
func checkCompulsoryOptions() {
fatal := ""
if *identityURL == "" {
fatal += "Missing required identity URL\n"
}
if *identityUser == "" {
fatal += "Missing required username\n"
}
if *identityPassword == "" {
fatal += "Missing required password\n"
}
if *controllerURL == "" {
fatal += "Missing required Ciao controller URL\n"
}
if fatal != "" {
fatalf(fatal)
}
}
func main() {
var err error
flag.Parse()
getCiaoEnvVariables()
checkCompulsoryOptions()
/* First check if we're being asked for a tenants list */
if *listTenants == true {
listAllTenants()
return
}
if *listUserTenants == true {
listUserSpecificTenants(*identityUser, *identityPassword)
return
}
/* If we're missing the tenant name let's try to fetch one */
if *tenantName == "" {
*tenantName, *tenantID, err = getTenant(*identityUser, *identityPassword, *tenantID)
if err != nil {
fatalf(err.Error())
}
warningf("Unspecified scope, using (%s, %s)", *tenantName, *tenantID)
}
scopedToken, *tenantID, _, err = getScopedToken(*identityUser, *identityPassword, *tenantName)
if err != nil {
fatalf(err.Error())
}
if *listInstances == true {
listAllInstances(*tenantID, "", *instanceMarker, *instanceOffset, *listLength)
}
if *listWlInstances == true {
listAllInstances("", *workload, *instanceMarker, *instanceOffset, *listLength)
}
if *listCNInstances == true {
listNodeInstances(*computeNode)
}
if *listQuotas == true {
listTenantQuotas(*tenantID)
}
if *listResources == true {
listTenantResources(*tenantID)
}
if *listWorkloads == true {
listTenantWorkloads(*tenantID)
}
if *listComputeNodes == true {
listAllComputeNodes()
}
if *listCNCIs == true {
listAllCNCIs()
}
if *clusterStatus == true {
dumpClusterStatus()
}
if *launchInstances == true {
createTenantInstance(*tenantID, *workload, *instances, *instanceLabel)
}
if *deleteInstance == true {
if len(*tenantID) == 0 {
fatalf("Missing required -tenant-id parameter")
}
if len(*instance) == 0 && *allInstances == false {
fatalf("Missing required -instance parameter")
}
if *allInstances == false {
deleteTenantInstance(*tenantID, *instance)
} else {
actionAllTenantInstance(*tenantID, osDelete)
}
}
if *dumpCNCI == true {
if len(*cnci) == 0 {
fatalf("Missing required -cnci parameter")
}
dumpCNCIDetails(*cnci)
}
if *stopInstance == true || *restartInstance == true {
if len(*tenantID) == 0 {
fatalf("Missing required -tenant-id parameter")
}
if len(*instance) == 0 {
fatalf("Missing required -instance parameter")
}
action := computeActionStart
if *stopInstance == true {
action = computeActionStop
}
startStopInstance(*tenantID, *instance, action)
}
if *listLabels == true {
listAllLabels()
}
if *dumpLabel != "" {
dumpTraceData(*dumpLabel)
}
if *listEvents == true {
if len(*tenantID) == 0 {
fatalf("Missing required -tenant-id parameter")
}
listClusterEvents(*tenantID)
}
if *listAllEvents == true {
listClusterEvents("")
}
if *deleteEvents == true {
deleteAllEvents()
}
}
ciao-cli: Simplify dumpCNCIDetails call
By exiting when the given parameter is invalid.
Signed-off-by: Samuel Ortiz <0ba86cb3f08bbb861958e54bd3438887adb4263c@linux.intel.com>
//
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"bytes"
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strconv"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
"github.com/01org/ciao/payloads"
)
var scopedToken string
const openstackComputePort = 8774
const openstackComputeVersion = "v2.1"
type action uint8
const (
computeActionStart action = iota
computeActionStop
)
func debugf(format string, args ...interface{}) {
glog.V(2).Infof("ciao-cli DEBUG: "+format, args...)
}
func infof(format string, args ...interface{}) {
glog.V(1).Infof("ciao-cli INFO: "+format, args...)
}
func warningf(format string, args ...interface{}) {
glog.Warningf("ciao-cli WARNING: "+format, args...)
}
func errorf(format string, args ...interface{}) {
glog.Errorf("ciao-cli ERROR: "+format, args...)
}
func fatalf(format string, args ...interface{}) {
glog.Fatalf("ciao-cli FATAL: "+format, args...)
os.Exit(1)
}
var (
allInstances = flag.Bool("all-instances", false, "Select all instances")
instanceLabel = flag.String("instance-label", "", "Set a frame label. This will trigger frame tracing")
listInstances = flag.Bool("list-instances", false, "List all instances for a tenant")
listCNInstances = flag.Bool("list-cn-instances", false, "List all instances for a compute node")
listWlInstances = flag.Bool("list-wl-instances", false, "List all instances for a workload")
listQuotas = flag.Bool("list-quotas", false, "List quotas status for a tenant")
listResources = flag.Bool("list-resources", false, "List consumed resources for a tenant for the past 15mn")
listWorkloads = flag.Bool("list-workloads", false, "List all workloads")
listUserTenants = flag.Bool("list-tenants", false, "List all tenants for a given user")
listTenants = flag.Bool("list-all-tenants", false, "List all tenants")
listComputeNodes = flag.Bool("list-cns", false, "List all compute nodes")
listCNCIs = flag.Bool("list-cncis", false, "List all CNCIs")
listLength = flag.Int("list-length", 0, "Maximum number of items in the reponse")
listLabels = flag.Bool("list-labels", false, "List all trace labels")
listAllEvents = flag.Bool("list-all-events", false, "List all cluster events")
listEvents = flag.Bool("list-events", false, "List all events for a tenant")
dumpCNCI = flag.Bool("dump-cnci", false, "Dump a CNCI details")
dumpToken = flag.Bool("dump-token", false, "Dump keystone tokens")
clusterStatus = flag.Bool("cluster-status", false, "List all compute nodes")
launchInstances = flag.Bool("launch-instances", false, "Launch Ciao instances")
deleteInstance = flag.Bool("delete-instance", false, "Delete a Ciao instance")
deleteEvents = flag.Bool("delete-events", false, "Delete all stored Ciao events")
stopInstance = flag.Bool("stop-instance", false, "Stop a Ciao instance")
restartInstance = flag.Bool("restart-instance", false, "Restart a Ciao instance")
workload = flag.String("workload", "", "Workload UUID")
instances = flag.Int("instances", 1, "Number of instances to create")
instance = flag.String("instance", "", "Instance UUID")
instanceMarker = flag.String("instance-marker", "", "Show instance list starting from the next instance after instance-marker")
instanceOffset = flag.Int("instance-offset", 0, "Show instance list starting from instance #instance-offset")
tenantID = flag.String("tenant-id", "", "Tenant UUID")
tenantName = flag.String("tenant-name", "", "Tenant name")
computeNode = flag.String("cn", "", "Compute node UUID")
cnci = flag.String("cnci", "", "CNCI UUID")
controllerURL = flag.String("controller", "", "Controller URL")
computePort = flag.Int("computeport", openstackComputePort, "Openstack Compute API port")
identityURL = flag.String("identity", "", "Keystone URL")
identityUser = flag.String("username", "", "Openstack Service Username")
identityPassword = flag.String("password", "", "Openstack Service Username")
dumpLabel = flag.String("dump-label", "", "Dump all trace data for a given label")
)
const (
ciaoIdentityEnv = "CIAO_IDENTITY"
ciaoControllerEnv = "CIAO_CONTROLLER"
ciaoUsernameEnv = "CIAO_USERNAME"
ciaoPasswordEnv = "CIAO_PASSWORD"
ciaoComputePortEnv = "CIAO_COMPUTEPORT"
)
type queryValue struct {
name, value string
}
func buildComputeURL(format string, args ...interface{}) string {
prefix := fmt.Sprintf("https://%s:%d/%s/", *controllerURL, *computePort, openstackComputeVersion)
return fmt.Sprintf(prefix+format, args...)
}
func sendHTTPRequestToken(method string, url string, values []queryValue, token string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest(method, os.ExpandEnv(url), body)
if err != nil {
return nil, err
}
infof("Sending %s %s\n", method, url)
if values != nil {
v := req.URL.Query()
for _, value := range values {
infof("Adding URL query %s=%s\n", value.name, value.value)
v.Add(value.name, value.value)
}
req.URL.RawQuery = v.Encode()
}
if token != "" {
req.Header.Add("X-Auth-Token", token)
}
if body != nil {
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
}
warningf("Skipping TLS verification\n")
tlsConfig := &tls.Config{InsecureSkipVerify: true}
transport := &http.Transport{
TLSClientConfig: tlsConfig,
}
client := &http.Client{Transport: transport}
resp, err := client.Do(req)
if err != nil {
errorf("Could not send HTTP request %s\n", err)
return nil, err
}
infof("Got HTTP response (status %s)\n", resp.Status)
if resp.StatusCode >= http.StatusBadRequest {
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
errorf("Could not read the HTTP response %s\n", err)
spew.Dump(resp.Body)
return resp, err
}
return resp, fmt.Errorf("HTTP Error [%d] for [%s %s]: %s", resp.StatusCode, method, url, respBody)
}
return resp, err
}
func sendHTTPRequest(method string, url string, values []queryValue, body io.Reader) (*http.Response, error) {
return sendHTTPRequestToken(method, url, values, scopedToken, body)
}
func unmarshalHTTPResponse(resp *http.Response, v interface{}) error {
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
errorf("Could not read the HTTP response %s\n", err)
return err
}
err = json.Unmarshal(body, v)
if err != nil {
errorf("Could not unmarshal the HTTP response %s\n", err)
return err
}
if glog.V(2) {
spew.Dump(v)
}
return nil
}
func listAllInstances(tenant string, workload string, marker string, offset int, limit int) {
var servers payloads.ComputeServers
var url string
if *listInstances == true && tenant == "" {
fatalf("Missing required -tenant-id parameter")
}
if *listWlInstances == true && workload == "" {
fatalf("Missing required -workload parameter")
}
if tenant != "" {
url = buildComputeURL("%s/servers/detail", tenant)
} else if workload != "" {
url = buildComputeURL("flavors/%s/servers/detail", workload)
}
var values []queryValue
if limit > 0 {
values = append(values, queryValue{
name: "limit",
value: fmt.Sprintf("%d", limit),
})
}
if offset > 0 {
values = append(values, queryValue{
name: "offset",
value: fmt.Sprintf("%d", offset),
})
}
if marker != "" {
values = append(values, queryValue{
name: "marker",
value: marker,
})
}
resp, err := sendHTTPRequest("GET", url, values, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &servers)
if err != nil {
fatalf(err.Error())
}
for i, server := range servers.Servers {
fmt.Printf("Instance #%d\n", i+1)
fmt.Printf("\tUUID: %s\n", server.ID)
fmt.Printf("\tStatus: %s\n", server.Status)
fmt.Printf("\tPrivate IP: %s\n", server.Addresses.Private[0].Addr)
fmt.Printf("\tMAC Address: %s\n", server.Addresses.Private[0].OSEXTIPSMACMacAddr)
fmt.Printf("\tCN UUID: %s\n", server.HostID)
fmt.Printf("\tImage UUID: %s\n", server.Image.ID)
fmt.Printf("\tTenant UUID: %s\n", server.TenantID)
if server.SSHIP != "" {
fmt.Printf("\tSSH IP: %s\n", server.SSHIP)
fmt.Printf("\tSSH Port: %d\n", server.SSHPort)
}
}
}
func limitToString(limit int) string {
if limit == -1 {
return "Unlimited"
}
return fmt.Sprintf("%d", limit)
}
func listTenantQuotas(tenant string) {
if tenant == "" {
fatalf("Missing required -tenant-id parameter")
}
var resources payloads.CiaoTenantResources
url := buildComputeURL("%s/quotas", tenant)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &resources)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("Quotas for tenant %s:\n", resources.ID)
fmt.Printf("\tInstances: %d | %s\n", resources.InstanceUsage, limitToString(resources.InstanceLimit))
fmt.Printf("\tCPUs: %d | %s\n", resources.VCPUUsage, limitToString(resources.VCPULimit))
fmt.Printf("\tMemory: %d | %s\n", resources.MemUsage, limitToString(resources.MemLimit))
fmt.Printf("\tDisk: %d | %s\n", resources.DiskUsage, limitToString(resources.DiskLimit))
}
func listTenantResources(tenant string) {
if tenant == "" {
fatalf("Missing required -tenant-id parameter")
}
var usage payloads.CiaoUsageHistory
url := buildComputeURL("%s/resources", tenant)
now := time.Now()
values := []queryValue{
{
name: "start_date",
value: now.Add(-15 * time.Minute).Format(time.RFC3339),
},
{
name: "end_date",
value: now.Format(time.RFC3339),
},
}
resp, err := sendHTTPRequest("GET", url, values, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &usage)
if err != nil {
fatalf(err.Error())
}
if len(usage.Usages) == 0 {
fmt.Printf("No usage history for %s\n", tenant)
return
}
fmt.Printf("Usage for tenant %s:\n", tenant)
for _, u := range usage.Usages {
fmt.Printf("\t%v: [%d CPUs] [%d MB memory] [%d MB disk]\n", u.Timestamp, u.VCPU, u.Memory, u.Disk)
}
}
func workloadDetail(tenant string, workload string) string {
var flavor payloads.ComputeFlavorDetails
url := buildComputeURL("%s/flavors/%s", tenant, workload)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &flavor)
if err != nil {
fatalf(err.Error())
}
return fmt.Sprintf("\tName: %s\n\tUUID:%s\n\tImage UUID: %s\n\tCPUs: %d\n\tMemory: %d MB\n",
flavor.Flavor.Name, flavor.Flavor.ID, flavor.Flavor.Disk, flavor.Flavor.Vcpus, flavor.Flavor.RAM)
}
func listTenantWorkloads(tenant string) {
if tenant == "" {
fatalf("Missing required -tenant-id parameter")
}
var flavors payloads.ComputeFlavors
if tenant == "" {
tenant = "faketenant"
}
url := buildComputeURL("%s/flavors", tenant)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &flavors)
if err != nil {
fatalf(err.Error())
}
for i, flavor := range flavors.Flavors {
fmt.Printf("Workload %d\n", i+1)
fmt.Printf(workloadDetail(tenant, flavor.ID))
}
}
func listAllTenants() {
projects, err := getAllProjects(*identityUser, *identityPassword)
if err != nil {
fatalf(err.Error())
}
for i, project := range projects.Projects {
fmt.Printf("Tenant [%d]\n", i+1)
fmt.Printf("\tUUID: %s\n", project.ID)
fmt.Printf("\tName: %s\n", project.Name)
}
}
func listUserSpecificTenants(username, password string) {
projects, err := getUserProjects(username, password)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("Projects for user %s\n", username)
for _, project := range projects {
fmt.Printf("\tUUID: %s\n", project.ID)
fmt.Printf("\tName: %s\n", project.Name)
}
}
func listAllComputeNodes() {
var nodes payloads.CiaoComputeNodes
url := buildComputeURL("nodes")
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &nodes)
if err != nil {
fatalf(err.Error())
}
for i, node := range nodes.Nodes {
fmt.Printf("Compute Node %d\n", i+1)
fmt.Printf("\tUUID: %s\n", node.ID)
fmt.Printf("\tStatus: %s\n", node.Status)
fmt.Printf("\tLoad: %d\n", node.Load)
fmt.Printf("\tAvailable/Total memory: %d/%d MB\n", node.MemAvailable, node.MemTotal)
fmt.Printf("\tAvailable/Total disk: %d/%d MB\n", node.DiskAvailable, node.DiskTotal)
fmt.Printf("\tTotal Instances: %d\n", node.TotalInstances)
fmt.Printf("\t\tRunning Instances: %d\n", node.TotalRunningInstances)
fmt.Printf("\t\tPending Instances: %d\n", node.TotalPendingInstances)
fmt.Printf("\t\tPaused Instances: %d\n", node.TotalPausedInstances)
}
}
func listAllCNCIs() {
var cncis payloads.CiaoCNCIs
url := buildComputeURL("cncis")
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &cncis)
if err != nil {
fatalf(err.Error())
}
for i, cnci := range cncis.CNCIs {
fmt.Printf("CNCI %d\n", i+1)
fmt.Printf("\tCNCI UUID: %s\n", cnci.ID)
fmt.Printf("\tTenant UUID: %s\n", cnci.TenantID)
fmt.Printf("\tIPv4: %s\n", cnci.IPv4)
fmt.Printf("\tSubnets:\n")
for _, subnet := range cnci.Subnets {
fmt.Printf("\t\t%s\n", subnet.Subnet)
}
}
}
func dumpCNCIDetails(cnciID string) {
if cnciID == "" {
fatalf("Missing required -cnci parameter")
}
var cnci payloads.CiaoCNCI
url := buildComputeURL("cncis/%s/detail", cnciID)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &cnci)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("\tCNCI UUID: %s\n", cnci.ID)
fmt.Printf("\tTenant UUID: %s\n", cnci.TenantID)
fmt.Printf("\tIPv4: %s\n", cnci.IPv4)
fmt.Printf("\tSubnets:\n")
for _, subnet := range cnci.Subnets {
fmt.Printf("\t\t%s\n", subnet.Subnet)
}
}
func createTenantInstance(tenant string, workload string, instances int, label string) {
if tenant == "" {
fatalf("Missing required -tenant-id parameter")
}
if workload == "" {
fatalf("Missing required -workload parameter")
}
var server payloads.ComputeCreateServer
var servers payloads.ComputeServers
server.Server.Name = label
server.Server.Workload = workload
server.Server.MaxInstances = instances
server.Server.MinInstances = 1
serverBytes, err := json.Marshal(server)
if err != nil {
fatalf(err.Error())
}
body := bytes.NewReader(serverBytes)
url := buildComputeURL("%s/servers", tenant)
resp, err := sendHTTPRequest("POST", url, nil, body)
if err != nil {
fatalf(err.Error())
}
if resp.StatusCode != http.StatusAccepted {
fatalf("Instance creation failed: %s", resp.Status)
}
err = unmarshalHTTPResponse(resp, &servers)
if err != nil {
fatalf(err.Error())
}
for _, server := range servers.Servers {
fmt.Printf("Created new instance: %s\n", server.ID)
}
}
func deleteTenantInstance(tenant string, instance string) {
url := buildComputeURL("%s/servers/%s", tenant, instance)
resp, err := sendHTTPRequest("DELETE", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusAccepted {
fatalf("Instance deletion failed: %s", resp.Status)
}
fmt.Printf("Deleted instance: %s\n", instance)
}
func actionAllTenantInstance(tenant string, osAction string) {
var action payloads.CiaoServersAction
url := buildComputeURL("%s/servers/action", tenant)
action.Action = osAction
actionBytes, err := json.Marshal(action)
if err != nil {
fatalf(err.Error())
}
body := bytes.NewReader(actionBytes)
resp, err := sendHTTPRequest("POST", url, nil, body)
if err != nil {
fatalf(err.Error())
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusAccepted {
fatalf("Action %s on all instances failed: %s", osAction, resp.Status)
}
fmt.Printf("%s all instances for tenant %s\n", osAction, tenant)
}
func listNodeInstances(node string) {
if node == "" {
fatalf("Missing required -cn parameter")
}
var servers payloads.CiaoServersStats
url := buildComputeURL("nodes/%s/servers/detail", node)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &servers)
if err != nil {
fatalf(err.Error())
}
for i, server := range servers.Servers {
fmt.Printf("Instance #%d\n", i+1)
fmt.Printf("\tUUID: %s\n", server.ID)
fmt.Printf("\tStatus: %s\n", server.Status)
fmt.Printf("\tTenant UUID: %s\n", server.TenantID)
fmt.Printf("\tIPv4: %s\n", server.IPv4)
fmt.Printf("\tCPUs used: %d\n", server.VCPUUsage)
fmt.Printf("\tMemory used: %d MB\n", server.MemUsage)
fmt.Printf("\tDisk used: %d MB\n", server.DiskUsage)
}
}
func dumpClusterStatus() {
var status payloads.CiaoClusterStatus
url := buildComputeURL("nodes/summary")
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &status)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("Total Nodes %d\n", status.Status.TotalNodes)
fmt.Printf("\tReady %d\n", status.Status.TotalNodesReady)
fmt.Printf("\tFull %d\n", status.Status.TotalNodesFull)
fmt.Printf("\tOffline %d\n", status.Status.TotalNodesOffline)
fmt.Printf("\tMaintenance %d\n", status.Status.TotalNodesMaintenance)
}
const (
osStart = "os-start"
osStop = "os-stop"
osDelete = "os-delete"
)
func startStopInstance(tenant, instance string, action action) {
var actionBytes []byte
switch action {
case computeActionStart:
actionBytes = []byte(osStart)
case computeActionStop:
actionBytes = []byte(osStop)
default:
fatalf("Unsupported action %d\n", action)
}
body := bytes.NewReader(actionBytes)
url := buildComputeURL("%s/servers/%s/action", tenant, instance)
resp, err := sendHTTPRequest("POST", url, nil, body)
if err != nil {
fatalf(err.Error())
}
if resp.StatusCode != http.StatusAccepted {
fatalf("Instance action failed: %s", resp.Status)
}
switch action {
case computeActionStart:
fmt.Printf("Instance %s restarted\n", instance)
case computeActionStop:
fmt.Printf("Instance %s stopped\n", instance)
}
}
func listAllLabels() {
var traces payloads.CiaoTracesSummary
url := buildComputeURL("traces")
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &traces)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("%d trace label(s) available\n", len(traces.Summaries))
for i, summary := range traces.Summaries {
fmt.Printf("\tLabel #%d: %s (%d instances running)\n", i+1, summary.Label, summary.Instances)
}
}
func listClusterEvents(tenant string) {
var events payloads.CiaoEvents
var url string
if tenant == "" {
url = buildComputeURL("events")
} else {
url = buildComputeURL("%s/events", tenant)
}
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &events)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("%d Ciao event(s):\n", len(events.Events))
for i, event := range events.Events {
fmt.Printf("\t[%d] %v: %s:%s (Tenant %s)\n", i+1, event.Timestamp, event.EventType, event.Message, event.TenantID)
}
}
func deleteAllEvents() {
url := buildComputeURL("events")
resp, err := sendHTTPRequest("DELETE", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusAccepted {
fatalf("Events log deletion failed: %s", resp.Status)
}
fmt.Printf("Deleted all event logs\n")
}
func dumpTraceData(label string) {
var traceData payloads.CiaoTraceData
url := buildComputeURL("traces/%s", label)
resp, err := sendHTTPRequest("GET", url, nil, nil)
if err != nil {
fatalf(err.Error())
}
err = unmarshalHTTPResponse(resp, &traceData)
if err != nil {
fatalf(err.Error())
}
fmt.Printf("Trace data for [%s]:\n", label)
fmt.Printf("\tNumber of instances: %d\n", traceData.Summary.NumInstances)
fmt.Printf("\tTotal time elapsed : %f seconds\n", traceData.Summary.TotalElapsed)
fmt.Printf("\tAverage time elapsed : %f seconds\n", traceData.Summary.AverageElapsed)
fmt.Printf("\tAverage Controller time: %f seconds\n", traceData.Summary.AverageControllerElapsed)
fmt.Printf("\tAverage Scheduler time : %f seconds\n", traceData.Summary.AverageSchedulerElapsed)
fmt.Printf("\tAverage Launcher time : %f seconds\n", traceData.Summary.AverageLauncherElapsed)
fmt.Printf("\tController variance : %f seconds²\n", traceData.Summary.VarianceController)
fmt.Printf("\tScheduler variance : %f seconds²\n", traceData.Summary.VarianceScheduler)
fmt.Printf("\tLauncher variance : %f seconds²\n", traceData.Summary.VarianceLauncher)
}
func getCiaoEnvVariables() {
identity := os.Getenv(ciaoIdentityEnv)
controller := os.Getenv(ciaoControllerEnv)
username := os.Getenv(ciaoUsernameEnv)
password := os.Getenv(ciaoPasswordEnv)
port := os.Getenv(ciaoComputePortEnv)
infof("Ciao environment variables:\n")
infof("\t%s:%s\n", ciaoIdentityEnv, identity)
infof("\t%s:%s\n", ciaoControllerEnv, controller)
infof("\t%s:%s\n", ciaoUsernameEnv, username)
infof("\t%s:%s\n", ciaoPasswordEnv, password)
infof("\t%s:%s\n", ciaoComputePortEnv, port)
if identity != "" && *identityURL == "" {
*identityURL = identity
}
if controller != "" && *controllerURL == "" {
*controllerURL = controller
}
if username != "" && *identityUser == "" {
*identityUser = username
}
if password != "" && *identityPassword == "" {
*identityPassword = password
}
if port != "" && *computePort == openstackComputePort {
*computePort, _ = strconv.Atoi(port)
}
}
func checkCompulsoryOptions() {
fatal := ""
if *identityURL == "" {
fatal += "Missing required identity URL\n"
}
if *identityUser == "" {
fatal += "Missing required username\n"
}
if *identityPassword == "" {
fatal += "Missing required password\n"
}
if *controllerURL == "" {
fatal += "Missing required Ciao controller URL\n"
}
if fatal != "" {
fatalf(fatal)
}
}
func main() {
var err error
flag.Parse()
getCiaoEnvVariables()
checkCompulsoryOptions()
/* First check if we're being asked for a tenants list */
if *listTenants == true {
listAllTenants()
return
}
if *listUserTenants == true {
listUserSpecificTenants(*identityUser, *identityPassword)
return
}
/* If we're missing the tenant name let's try to fetch one */
if *tenantName == "" {
*tenantName, *tenantID, err = getTenant(*identityUser, *identityPassword, *tenantID)
if err != nil {
fatalf(err.Error())
}
warningf("Unspecified scope, using (%s, %s)", *tenantName, *tenantID)
}
scopedToken, *tenantID, _, err = getScopedToken(*identityUser, *identityPassword, *tenantName)
if err != nil {
fatalf(err.Error())
}
if *listInstances == true {
listAllInstances(*tenantID, "", *instanceMarker, *instanceOffset, *listLength)
}
if *listWlInstances == true {
listAllInstances("", *workload, *instanceMarker, *instanceOffset, *listLength)
}
if *listCNInstances == true {
listNodeInstances(*computeNode)
}
if *listQuotas == true {
listTenantQuotas(*tenantID)
}
if *listResources == true {
listTenantResources(*tenantID)
}
if *listWorkloads == true {
listTenantWorkloads(*tenantID)
}
if *listComputeNodes == true {
listAllComputeNodes()
}
if *listCNCIs == true {
listAllCNCIs()
}
if *clusterStatus == true {
dumpClusterStatus()
}
if *launchInstances == true {
createTenantInstance(*tenantID, *workload, *instances, *instanceLabel)
}
if *deleteInstance == true {
if len(*tenantID) == 0 {
fatalf("Missing required -tenant-id parameter")
}
if len(*instance) == 0 && *allInstances == false {
fatalf("Missing required -instance parameter")
}
if *allInstances == false {
deleteTenantInstance(*tenantID, *instance)
} else {
actionAllTenantInstance(*tenantID, osDelete)
}
}
if *dumpCNCI == true {
dumpCNCIDetails(*cnci)
}
if *stopInstance == true || *restartInstance == true {
if len(*tenantID) == 0 {
fatalf("Missing required -tenant-id parameter")
}
if len(*instance) == 0 {
fatalf("Missing required -instance parameter")
}
action := computeActionStart
if *stopInstance == true {
action = computeActionStop
}
startStopInstance(*tenantID, *instance, action)
}
if *listLabels == true {
listAllLabels()
}
if *dumpLabel != "" {
dumpTraceData(*dumpLabel)
}
if *listEvents == true {
if len(*tenantID) == 0 {
fatalf("Missing required -tenant-id parameter")
}
listClusterEvents(*tenantID)
}
if *listAllEvents == true {
listClusterEvents("")
}
if *deleteEvents == true {
deleteAllEvents()
}
}
|
// Copyright 2015 Factom Foundation
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package wire
import (
// "bytes"
"encoding/binary"
"errors"
"fmt"
"io"
// "strconv"
"github.com/FactomProject/FactomCode/factoid"
"github.com/FactomProject/FactomCode/util"
)
const (
maxTxOutPerTX = 1 // need some upper limit
maxTxInPerTX = 1 // need some upper limit
)
// good check to run after deserialization
func factoid_CountCheck(tx *MsgTx) bool {
l1 := len(tx.TxIn)
l2 := len(tx.TxSig)
return l1 == l2
}
func readRCD(r io.Reader, pver uint32, rcd *RCD) error {
return nil
}
/*
// readOutPoint reads the next sequence of bytes from r as an OutPoint.
func factom_readOutPoint(r io.Reader, pver uint32, op *OutPoint) error {
_, err := io.ReadFull(r, op.txid[:])
if err != nil {
return err
}
var buf [4]byte
_, err = io.ReadFull(r, buf[:])
if err != nil {
return err
}
op.idx, _ = readVarInt(r, pver)
return nil
}
*/
// readOutPoint reads the next sequence of bytes from r as an OutPoint.
func readOutPoint(r io.Reader, pver uint32, op *OutPoint) error {
_, err := io.ReadFull(r, op.Hash[:])
if err != nil {
return err
}
var buf [4]byte
_, err = io.ReadFull(r, buf[:])
if err != nil {
return err
}
op.Index = binary.LittleEndian.Uint32(buf[:])
return nil
}
// writeOutPoint encodes op to the bitcoin protocol encoding for an OutPoint
// to w.
func writeOutPoint(w io.Writer, pver uint32, op *OutPoint) error {
_, err := w.Write(op.Hash[:])
if err != nil {
return err
}
var buf [4]byte
binary.LittleEndian.PutUint32(buf[:], op.Index)
_, err = w.Write(buf[:])
if err != nil {
return err
}
return nil
}
// readTxIn reads the next sequence of bytes from r as a transaction input
func readTxIn(r io.Reader, pver uint32, ti *TxIn) error {
var op OutPoint
err := readOutPoint(r, pver, &op)
if err != nil {
return err
}
ti.PreviousOutPoint = op
var buf [1]byte
_, err = io.ReadFull(r, buf[:])
if err != nil {
return err
}
ti.sighash = uint8(buf[0])
return nil
}
// readTxOut reads the next sequence of bytes from r as a transaction output
// (TxOut).
func readTxOut(r io.Reader, pver uint32, to *TxOut) error {
value, err := readVarInt(r, pver)
if err != nil {
return err
}
to.Value = int64(value)
b := make([]byte, 32)
_, err = io.ReadFull(r, b)
copy(to.RCDHash[:], b)
return nil
}
func readECOut(r io.Reader, pver uint32, eco *TxEntryCreditOut) error {
value, err := readVarInt(r, pver)
if err != nil {
return err
}
eco.Value = int64(value)
b := make([]byte, 32)
_, err = io.ReadFull(r, b)
copy(eco.ECpubkey[:], b)
return nil
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding transactions stored to disk, such as in a
// database, as opposed to decoding transactions from the wire.
func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
util.Trace()
var buf [1]byte
_, err := io.ReadFull(r, buf[:])
if err != nil {
return err
}
msg.Version = uint8(buf[0])
if !factoid.FactoidTx_VersionCheck(msg.Version) {
return errors.New("fTx version check")
}
msg.LockTime = int64(binary.BigEndian.Uint64(buf[:])) // FIXME: must do 5 bytes here
if !factoid.FactoidTx_LocktimeCheck(msg.LockTime) {
return errors.New("fTx locktime check")
}
outcount, err := readVarInt(r, pver)
if err != nil {
return err
}
// Prevent more input transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if outcount > uint64(maxTxOutPerTX) {
str := fmt.Sprintf("too many input transactions to fit into "+
"max message size [count %d, max %d]", outcount,
maxTxOutPerTX)
return messageError("MsgTx.BtcDecode maxtxout", str)
}
msg.TxOut = make([]*TxOut, outcount)
for i := uint64(0); i < outcount; i++ {
to := TxOut{}
err = readTxOut(r, pver, &to)
if err != nil {
return err
}
msg.TxOut[i] = &to
}
eccount, err := readVarInt(r, pver)
if err != nil {
return err
}
// Prevent more input transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if eccount > uint64(maxTxOutPerTX) {
str := fmt.Sprintf("too many input transactions to fit into "+
"max message size [count %d, max %d]", eccount,
maxTxOutPerTX)
return messageError("MsgTx.BtcDecode maxtxout", str)
}
// Prevent more input transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if eccount > uint64(maxTxInPerTX) {
str := fmt.Sprintf("too many input transactions to fit into "+
"max message size [count %d, max %d]", eccount,
maxTxInPerTX)
return messageError("MsgTx.BtcDecode maxtxout", str)
}
msg.ECOut = make([]*TxEntryCreditOut, eccount)
for i := uint64(0); i < eccount; i++ {
eco := TxEntryCreditOut{}
err = readECOut(r, pver, &eco)
if err != nil {
return err
}
msg.ECOut[i] = &eco
}
incount, err := readVarInt(r, pver)
msg.TxIn = make([]*TxIn, incount)
for i := uint64(0); i < incount; i++ {
ti := TxIn{}
err = readTxIn(r, pver, &ti)
if err != nil {
return err
}
msg.TxIn[i] = &ti
}
_, err = io.ReadFull(r, buf[:])
if err != nil {
return err
}
rcdcount, err := readVarInt(r, pver)
if rcdcount > uint64(maxTxInPerTX) {
str := fmt.Sprintf("too many RCDs to fit into "+
"max message size [count %d, max %d]", rcdcount,
maxTxInPerTX)
return messageError("MsgTx.BtcDecode max rcd", str)
}
msg.RCD = make([]*RCD, rcdcount)
for i := uint64(0); i < rcdcount; i++ {
rcd := RCD{}
err = readRCD(r, pver, &rcd)
if err != nil {
return err
}
msg.RCD[i] = &rcd
}
// ----------------------------------------------
if !factoid_CountCheck(msg) {
errors.New("Factoid check 1")
}
return nil
}
// FactoidEncode encodes the receiver to w using the bitcoin protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding transactions to be stored to disk, such as in a
// database, as opposed to encoding transactions for the wire.
func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32) error {
util.Trace(" NOT IMPLEMENTED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
/*
var buf [4]byte
binary.BigEndian.PutUint32(buf[:], uint32(msg.Version))
_, err := w.Write(buf[:])
if err != nil {
return err
}
count := uint64(len(msg.TxIn))
err = writeVarInt(w, pver, count)
if err != nil {
return err
}
for _, ti := range msg.TxIn {
err = writeTxIn(w, pver, ti)
if err != nil {
return err
}
}
count = uint64(len(msg.TxOut))
err = writeVarInt(w, pver, count)
if err != nil {
return err
}
for _, to := range msg.TxOut {
err = writeTxOut(w, pver, to)
if err != nil {
return err
}
}
binary.BigEndian.PutUint64(buf[:], uint64(msg.LockTime)) // FIXME: must do 5 bytes here
_, err = w.Write(buf[:])
if err != nil {
return err
}
*/
return nil
}
// writeTxIn encodes ti to the bitcoin protocol encoding for a transaction
// input (TxIn) to w.
func writeTxIn(w io.Writer, pver uint32, ti *TxIn) error {
util.Trace(" NOT IMPLEMENTED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
/*
err := writeOutPoint(w, pver, &ti.PreviousOutPoint)
if err != nil {
return err
}
err = writeVarBytes(w, pver, ti.SignatureScript)
if err != nil {
return err
}
var buf [4]byte
binary.BigEndian.PutUint32(buf[:], ti.Sequence)
_, err = w.Write(buf[:])
if err != nil {
return err
}
*/
return nil
}
// writeTxOut encodes to into the bitcoin protocol encoding for a transaction
// output (TxOut) to w.
func writeTxOut(w io.Writer, pver uint32, to *TxOut) error {
util.Trace(" NOT IMPLEMENTED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
/*
var buf [8]byte
binary.BigEndian.PutUint64(buf[:], uint64(to.Value))
_, err := w.Write(buf[:])
if err != nil {
return err
}
err = writeVarBytes(w, pver, to.PkScript)
if err != nil {
return err
}
*/
return nil
}
marshalling WIP
// Copyright 2015 Factom Foundation
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package wire
import (
// "bytes"
"encoding/binary"
"errors"
"fmt"
"io"
// "strconv"
"github.com/FactomProject/FactomCode/factoid"
"github.com/FactomProject/FactomCode/util"
)
const (
inNout_cap = 16000 // per spec
)
// good check to run after deserialization
func factoid_CountCheck(tx *MsgTx) bool {
l1 := len(tx.TxIn)
l2 := len(tx.TxSig)
return l1 == l2
}
func readRCD(r io.Reader, pver uint32, rcd *RCD) error {
return nil
}
// readOutPoint reads the next sequence of bytes from r as an OutPoint.
func readOutPoint(r io.Reader, pver uint32, op *OutPoint) error {
_, err := io.ReadFull(r, op.Hash[:])
if err != nil {
return err
}
var buf [4]byte
_, err = io.ReadFull(r, buf[:])
if err != nil {
return err
}
// op.Index = binary.LittleEndian.Uint32(buf[:])
// varint on the wire, but easily fits into uint32
index, err := readVarInt(r, pver)
if inNout_cap < index {
return fmt.Errorf("OutPoint trouble, index too large: %d", index)
}
op.Index = uint32(index)
// return nil
return err
}
// writeOutPoint encodes op to the protocol encoding for an OutPoint to w.
func writeOutPoint(w io.Writer, pver uint32, op *OutPoint) error {
_, err := w.Write(op.Hash[:])
if err != nil {
return err
}
err = writeVarInt(w, pver, uint64(op.Index))
if err != nil {
return err
}
return nil
}
// readTxIn reads the next sequence of bytes from r as a transaction input
func readTxIn(r io.Reader, pver uint32, ti *TxIn) error {
var op OutPoint
err := readOutPoint(r, pver, &op)
if err != nil {
return err
}
ti.PreviousOutPoint = op
var buf [1]byte
_, err = io.ReadFull(r, buf[:])
if err != nil {
return err
}
ti.sighash = uint8(buf[0])
return nil
}
// readTxOut reads the next sequence of bytes from r as a transaction output
// (TxOut).
func readTxOut(r io.Reader, pver uint32, to *TxOut) error {
value, err := readVarInt(r, pver)
if err != nil {
return err
}
to.Value = int64(value)
b := make([]byte, 32)
_, err = io.ReadFull(r, b)
copy(to.RCDHash[:], b)
return nil
}
func readECOut(r io.Reader, pver uint32, eco *TxEntryCreditOut) error {
value, err := readVarInt(r, pver)
if err != nil {
return err
}
eco.Value = int64(value)
b := make([]byte, 32)
_, err = io.ReadFull(r, b)
copy(eco.ECpubkey[:], b)
return nil
}
// BtcDecode decodes r using the protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding transactions stored to disk, such as in a
// database, as opposed to decoding transactions from the wire.
func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
util.Trace()
var buf [1]byte
_, err := io.ReadFull(r, buf[:])
if err != nil {
return err
}
msg.Version = uint8(buf[0])
if !factoid.FactoidTx_VersionCheck(msg.Version) {
return errors.New("fTx version check")
}
msg.LockTime = int64(binary.BigEndian.Uint64(buf[:])) // FIXME: must do 5 bytes here
if !factoid.FactoidTx_LocktimeCheck(msg.LockTime) {
return errors.New("fTx locktime check")
}
outcount, err := readVarInt(r, pver)
if err != nil {
return err
}
// Prevent more input transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if outcount > uint64(inNout_cap) {
str := fmt.Sprintf("too many input transactions to fit into "+
"max message size [count %d, max %d]", outcount,
inNout_cap)
return messageError("MsgTx.BtcDecode maxtxout", str)
}
msg.TxOut = make([]*TxOut, outcount)
for i := uint64(0); i < outcount; i++ {
to := TxOut{}
err = readTxOut(r, pver, &to)
if err != nil {
return err
}
msg.TxOut[i] = &to
}
eccount, err := readVarInt(r, pver)
if err != nil {
return err
}
// Prevent more input transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if eccount > uint64(inNout_cap) {
str := fmt.Sprintf("too many input transactions to fit into "+
"max message size [count %d, max %d]", eccount,
inNout_cap)
return messageError("MsgTx.BtcDecode maxtxout", str)
}
// Prevent more input transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if eccount > uint64(inNout_cap) {
str := fmt.Sprintf("too many input transactions to fit into "+
"max message size [count %d, max %d]", eccount,
inNout_cap)
return messageError("MsgTx.BtcDecode maxtxout", str)
}
msg.ECOut = make([]*TxEntryCreditOut, eccount)
for i := uint64(0); i < eccount; i++ {
eco := TxEntryCreditOut{}
err = readECOut(r, pver, &eco)
if err != nil {
return err
}
msg.ECOut[i] = &eco
}
incount, err := readVarInt(r, pver)
msg.TxIn = make([]*TxIn, incount)
for i := uint64(0); i < incount; i++ {
ti := TxIn{}
err = readTxIn(r, pver, &ti)
if err != nil {
return err
}
msg.TxIn[i] = &ti
}
_, err = io.ReadFull(r, buf[:])
if err != nil {
return err
}
rcdcount, err := readVarInt(r, pver)
if rcdcount > uint64(inNout_cap) {
str := fmt.Sprintf("too many RCDs to fit into "+
"max message size [count %d, max %d]", rcdcount,
inNout_cap)
return messageError("MsgTx.BtcDecode max rcd", str)
}
msg.RCD = make([]*RCD, rcdcount)
for i := uint64(0); i < rcdcount; i++ {
rcd := RCD{}
err = readRCD(r, pver, &rcd)
if err != nil {
return err
}
msg.RCD[i] = &rcd
}
// ----------------------------------------------
if !factoid_CountCheck(msg) {
errors.New("Factoid check 1")
}
return nil
}
// FactoidEncode encodes the receiver to w using the protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding transactions to be stored to disk, such as in a
// database, as opposed to encoding transactions for the wire.
func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32) error {
util.Trace(" WIP !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
var buf [1]byte
buf[0] = msg.Version
_, err := w.Write(buf[:])
if err != nil {
return err
}
var buf8 [8]byte
binary.LittleEndian.PutUint64(buf8[:], uint64(msg.LockTime)) // FIXME: must do 5 bytes here
_, err = w.Write(buf8[:])
if err != nil {
return err
}
txoutcount := uint64(len(msg.TxOut))
err = writeVarInt(w, pver, txoutcount)
if err != nil {
return err
}
for _, to := range msg.TxOut {
err = writeTxOut(w, pver, to)
if err != nil {
return err
}
}
ecoutcount := uint64(len(msg.ECOut))
err = writeVarInt(w, pver, ecoutcount)
if err != nil {
return err
}
/*
count = uint64(len(msg.TxOut))
err = writeVarInt(w, pver, count)
if err != nil {
return err
}
for _, to := range msg.TxOut {
err = writeTxOut(w, pver, to)
if err != nil {
return err
}
}
binary.BigEndian.PutUint64(buf[:], uint64(msg.LockTime)) // FIXME: must do 5 bytes here
_, err = w.Write(buf[:])
if err != nil {
return err
}
*/
return nil
}
// writeTxIn encodes ti to the protocol encoding for a transaction
// input (TxIn) to w.
func writeTxIn(w io.Writer, pver uint32, ti *TxIn) error {
util.Trace(" NOT IMPLEMENTED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
/*
err := writeOutPoint(w, pver, &ti.PreviousOutPoint)
if err != nil {
return err
}
err = writeVarBytes(w, pver, ti.SignatureScript)
if err != nil {
return err
}
var buf [4]byte
binary.BigEndian.PutUint32(buf[:], ti.Sequence)
_, err = w.Write(buf[:])
if err != nil {
return err
}
*/
return nil
}
// writeTxOut encodes to into the protocol encoding for a transaction
// output (TxOut) to w.
func writeTxOut(w io.Writer, pver uint32, to *TxOut) error {
util.Trace(" NOT IMPLEMENTED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
/*
var buf [8]byte
binary.BigEndian.PutUint64(buf[:], uint64(to.Value))
_, err := w.Write(buf[:])
if err != nil {
return err
}
err = writeVarBytes(w, pver, to.PkScript)
if err != nil {
return err
}
*/
return nil
}
|
// Big monolithic binding file.
// Binds a ton of things.
package middleware
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"github.com/DeedleFake/Go-PhysicsFS/physfs"
"github.com/GeertJohan/go.linenoise"
"github.com/carbonsrv/carbon/modules/glue"
"github.com/carbonsrv/carbon/modules/helpers"
"github.com/carbonsrv/carbon/modules/scheduler"
"github.com/carbonsrv/carbon/modules/static"
"github.com/fzzy/radix/redis"
"github.com/gin-gonic/gin"
"github.com/pmylund/go-cache"
"github.com/shurcooL/github_flavored_markdown"
"github.com/vifino/contrib/gzip"
"github.com/vifino/golua/lua"
"github.com/vifino/luar"
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
"regexp"
"time"
)
// Vars
var webroot string
// Bind all things
func Bind(L *lua.State, root string) {
webroot = root
luar.Register(L, "var", luar.Map{ // Vars
"root": root,
})
BindCarbon(L)
BindMiddleware(L)
BindRedis(L)
BindKVStore(L)
BindPhysFS(L)
BindIOEnhancements(L)
BindOSEnhancements(L)
BindThread(L)
BindNet(L)
BindConversions(L)
BindComs(L)
BindEncoding(L)
BindMarkdown(L)
BindLinenoise(L)
BindOther(L)
}
// BindCarbon binds glue func
func BindCarbon(L *lua.State) {
luar.Register(L, "carbon", luar.Map{ // Carbon specific API
"glue": glue.GetGlue,
})
}
// BindEngine binds the engine creation.
func BindEngine(L *lua.State) {
luar.Register(L, "carbon", luar.Map{
"_gin_new": gin.New,
})
}
// BindMiddleware binds the middleware
func BindMiddleware(L *lua.State) {
luar.Register(L, "mw", luar.Map{
// Essentials
"Logger": gin.Logger,
"Recovery": gin.Recovery,
// Lua related stuff
"Lua": Lua,
"DLR_NS": DLR_NS,
"DLR_RUS": DLR_RUS,
"DLRWS_NS": DLRWS_NS,
"DLRWS_RUS": DLRWS_RUS,
// Custom sub-routers.
"ExtRoute": (func(plan map[string]interface{}) func(*gin.Context) {
newplan := make(Plan, len(plan))
for k, v := range plan {
newplan[k] = v.(func(*gin.Context))
}
return ExtRoute(newplan)
}),
"VHOST": (func(plan map[string]interface{}) func(*gin.Context) {
newplan := make(Plan, len(plan))
for k, v := range plan {
newplan[k] = v.(func(*gin.Context))
}
return VHOST(newplan)
}),
"VHOST_Middleware": (func(plan map[string]interface{}) gin.HandlerFunc {
newplan := make(Plan, len(plan))
for k, v := range plan {
newplan[k] = v.(gin.HandlerFunc)
}
return VHOST_Middleware(newplan)
}),
// To run or not to run, that is the question!
"if_regex": If_Regexp,
"if_written": If_Written,
"if_status": If_Status,
"if_not_regex": If_Not_Regexp,
"if_not_written": If_Not_Written,
"if_not_status": If_Not_Status,
// Modification stuff.
"GZip": func() func(*gin.Context) {
return gzip.Gzip(gzip.DefaultCompression)
},
// Basic
"Echo": EchoHTML,
"EchoText": Echo,
})
luar.Register(L, "carbon", luar.Map{
"_mw_CGI": CGI, // Run an CGI App!
"_mw_CGI_Dynamic": CGI_Dynamic, // Run CGI Apps based on path!
"_mw_combine": (func(middlewares []interface{}) func(*gin.Context) { // Combine routes, doesn't properly route like middleware or anything.
newmiddlewares := make([]func(*gin.Context), len(middlewares))
for k, v := range middlewares {
newmiddlewares[k] = v.(func(*gin.Context))
}
return Combine(newmiddlewares)
}),
})
L.DoString(glue.RouteGlue())
}
// BindStatic binds the static file server thing.
func BindStatic(L *lua.State, cfe *cache.Cache) {
luar.Register(L, "carbon", luar.Map{
"_staticserve": (func(path, prefix string) func(*gin.Context) {
return staticServe.ServeCached(prefix, staticServe.PhysFS(path, prefix, true, true), cfe)
}),
})
}
// BindPhysFS binds the physfs library functions.
func BindPhysFS(L *lua.State) {
luar.Register(L, "carbon", luar.Map{ // PhysFS
"_fs_mount": physfs.Mount,
"_fs_exists": physfs.Exists,
"_fs_getFS": physfs.FileSystem,
"_fs_mkdir": physfs.Mkdir,
"_fs_umount": physfs.RemoveFromSearchPath,
"_fs_delete": physfs.Delete,
"_fs_setWriteDir": physfs.SetWriteDir,
"_fs_getWriteDir": physfs.GetWriteDir,
"_fs_list": physfs.EnumerateFiles,
"_fs_readfile": func(name string) (string, error) {
file, err := physfs.Open(name)
if err != nil {
return "", err
}
buf := bytes.NewBuffer(nil)
io.Copy(buf, file)
file.Close()
return string(buf.Bytes()), nil
},
})
}
// BindIOEnhancements binds small functions to enhance the IO library
func BindIOEnhancements(L *lua.State) {
luar.Register(L, "carbon", luar.Map{ // Small enhancements to the io stuff.
"_io_list": (func(path string) ([]string, error) {
files, err := ioutil.ReadDir(path)
if err != nil {
return make([]string, 1), err
} else {
list := make([]string, len(files))
for i := range files {
list[i] = files[i].Name()
}
return list, nil
}
}),
"_io_glob": filepath.Glob,
"_io_modtime": (func(path string) (int, error) {
info, err := os.Stat(path)
if err != nil {
return -1, err
} else {
return int(info.ModTime().UTC().Unix()), nil
}
}),
})
}
// BindOSEnhancements does the same as above, but for the OS library
func BindOSEnhancements(L *lua.State) {
luar.Register(L, "carbon", luar.Map{ // Small enhancements to the io stuff.
"_os_exists": (func(path string) bool {
if _, err := os.Stat(path); err == nil {
return true
} else {
return false
}
}),
"_os_sleep": (func(secs int64) {
time.Sleep(time.Duration(secs) * time.Second)
}),
"_os_chdir": os.Chdir,
"_os_abspath": filepath.Abs,
"_os_pwd": os.Getwd,
})
}
// BindRedis binds the redis library
func BindRedis(L *lua.State) {
luar.Register(L, "redis", luar.Map{
"connectTimeout": (func(host string, timeout int) (*redis.Client, error) {
return redis.DialTimeout("tcp", host, time.Duration(timeout)*time.Second)
}),
"connect": (func(host string) (*redis.Client, error) {
return redis.Dial("tcp", host)
}),
})
}
// BindKVStore binds the kv store for internal carbon cache or similar.
func BindKVStore(L *lua.State) { // Thread safe Key Value Store that doesn't persist.
luar.Register(L, "kvstore", luar.Map{
"_set": (func(k string, v interface{}) {
kvstore.Set(k, v, -1)
}),
"_del": (func(k string) {
kvstore.Delete(k)
}),
"_get": (func(k string) interface{} {
res, found := kvstore.Get(k)
if found {
return res
} else {
return nil
}
}),
"_inc": (func(k string, n int64) error {
return kvstore.Increment(k, n)
}),
"_dec": (func(k string, n int64) error {
return kvstore.Decrement(k, n)
}),
})
}
// BindThread binds state creation and stuff.
func BindThread(L *lua.State) {
luar.Register(L, "thread", luar.Map{
"_spawn": (func(bcode string, dobind bool, vals map[string]interface{}, buffer int) (chan interface{}, error) {
var ch chan interface{}
if buffer == -1 {
ch = make(chan interface{})
} else {
ch = make(chan interface{}, buffer)
}
L := luar.Init()
Bind(L, webroot)
err := L.DoString(glue.MainGlue())
if err != nil {
panic(err)
}
luar.Register(L, "", luar.Map{
"threadcom": ch,
})
if dobind {
luar.Register(L, "", vals)
}
if L.LoadBuffer(bcode, len(bcode), "thread") != 0 {
return make(chan interface{}), errors.New(L.ToString(-1))
}
scheduler.Add(func() {
if L.Pcall(0, 0, 0) != 0 { // != 0 means error in execution
fmt.Println("thread error: " + L.ToString(-1))
}
})
return ch, nil
}),
})
}
// BindComs binds the com.* funcs.
func BindComs(L *lua.State) {
luar.Register(L, "com", luar.Map{
"create": (func() chan interface{} {
return make(chan interface{})
}),
"createBuffered": (func(buffer int) chan interface{} {
return make(chan interface{}, buffer)
}),
"receive": (func(c chan interface{}) interface{} {
return <-c
}),
"try_receive": (func(c chan interface{}) interface{} {
select {
case msg := <-c:
return msg
default:
return nil
}
}),
"send": (func(c chan interface{}, val interface{}) bool {
c <- val
return true
}),
"try_send": (func(c chan interface{}, val interface{}) bool {
select {
case c <- val:
return true
default:
return false
}
}),
"size": (func(c chan interface{}) int {
return len(c)
}),
"cap": (func(c chan interface{}) int {
return cap(c)
}),
"pipe": (func(a, b chan interface{}) {
for {
b <- <-a
}
}),
"pipe_background": (func(a, b chan interface{}) {
scheduler.Add(func() {
for {
b <- <-a
}
})
}),
})
}
// BindNet binds sockets, not really that good. needs rework.
func BindNet(L *lua.State) {
luar.Register(L, "net", luar.Map{
"dial": net.Dial,
"dial_tls": func(proto, addr string) (net.Conn, error) {
config := tls.Config{InsecureSkipVerify: true} // Because I'm not gonna bother with auth.
return tls.Dial(proto, addr, &config)
},
"write": (func(con interface{}, str string) {
fmt.Fprintf(con.(net.Conn), str)
}),
"readline": (func(con interface{}) (string, error) {
return bufio.NewReader(con.(net.Conn)).ReadString('\n')
}),
"pipe_conn": (func(con interface{}, input, output chan interface{}) {
go func() {
reader := bufio.NewReader(con.(net.Conn))
for {
line, _ := reader.ReadString('\n')
output <- line
}
}()
for {
line := <-input
fmt.Fprintf(con.(net.Conn), line.(string))
}
}),
"pipe_conn_background": (func(con interface{}, input, output chan interface{}) {
scheduler.Add(func() {
reader := bufio.NewReader(con.(net.Conn))
for {
line, _ := reader.ReadString('\n')
output <- line
}
})
scheduler.Add(func() {
for {
line := <-input
fmt.Fprintf(con.(net.Conn), line.(string))
}
})
}),
})
}
// BindConversions binds helpers to convert between go types.
func BindConversions(L *lua.State) {
luar.Register(L, "convert", luar.Map{
"stringtocharslice": (func(x string) []byte {
return []byte(x)
}),
"charslicetostring": (func(x []byte) string {
return string(x)
}),
})
}
// BindEncoding binds functions to encode and decode between things.
func BindEncoding(L *lua.State) {
luar.Register(L, "carbon", luar.Map{
"_enc_base64_enc": (func(str string) string {
return base64.StdEncoding.EncodeToString([]byte(str))
}),
"_enc_base64_dec": (func(str string) (string, error) {
data, err := base64.StdEncoding.DecodeString(str)
return string(data), err
}),
})
}
// BindMarkdown binds a markdown renderer.
func BindMarkdown(L *lua.State) {
luar.Register(L, "markdown", luar.Map{
"github": (func(source string) string {
return string(github_flavored_markdown.Markdown([]byte(source)))
}),
})
}
// BindLinenoise binds the linenoise library.
func BindLinenoise(L *lua.State) {
luar.Register(L, "linenoise", luar.Map{
"line": linenoise.Line,
"clear": linenoise.Clear,
"addHistory": linenoise.AddHistory,
"saveHistory": linenoise.SaveHistory,
"loadHistory": linenoise.LoadHistory,
"setMultiline": linenoise.SetMultiline,
})
}
// BindOther binds misc things
func BindOther(L *lua.State) {
luar.Register(L, "", luar.Map{
"unixtime": (func() int {
return int(time.Now().UTC().Unix())
}),
"regexp": regexp.Compile,
})
luar.Register(L, "carbon", luar.Map{
"_syntaxhl": helpers.SyntaxHL,
})
}
// BindContext binds the gin context.
func BindContext(L *lua.State, context *gin.Context) {
luar.Register(L, "", luar.Map{
"context": context,
"req": context.Request,
"host": context.Request.URL.Host,
"path": context.Request.URL.Path,
"scheme": context.Request.URL.Scheme,
})
luar.Register(L, "carbon", luar.Map{
"_header_set": context.Header,
"_header_get": context.Request.Header.Get,
"_paramfunc": context.Param,
"_formfunc": context.PostForm,
"_queryfunc": context.Query,
})
}
Fixes. Go-PhysicsFS's EnumerateFiles does not error when the directory does not exist, apparently.
// Big monolithic binding file.
// Binds a ton of things.
package middleware
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"github.com/DeedleFake/Go-PhysicsFS/physfs"
"github.com/GeertJohan/go.linenoise"
"github.com/carbonsrv/carbon/modules/glue"
"github.com/carbonsrv/carbon/modules/helpers"
"github.com/carbonsrv/carbon/modules/scheduler"
"github.com/carbonsrv/carbon/modules/static"
"github.com/fzzy/radix/redis"
"github.com/gin-gonic/gin"
"github.com/pmylund/go-cache"
"github.com/shurcooL/github_flavored_markdown"
"github.com/vifino/contrib/gzip"
"github.com/vifino/golua/lua"
"github.com/vifino/luar"
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
"regexp"
"time"
)
// Vars
var webroot string
// Bind all things
func Bind(L *lua.State, root string) {
webroot = root
luar.Register(L, "var", luar.Map{ // Vars
"root": root,
})
BindCarbon(L)
BindMiddleware(L)
BindRedis(L)
BindKVStore(L)
BindPhysFS(L)
BindIOEnhancements(L)
BindOSEnhancements(L)
BindThread(L)
BindNet(L)
BindConversions(L)
BindComs(L)
BindEncoding(L)
BindMarkdown(L)
BindLinenoise(L)
BindOther(L)
}
// BindCarbon binds glue func
func BindCarbon(L *lua.State) {
luar.Register(L, "carbon", luar.Map{ // Carbon specific API
"glue": glue.GetGlue,
})
}
// BindEngine binds the engine creation.
func BindEngine(L *lua.State) {
luar.Register(L, "carbon", luar.Map{
"_gin_new": gin.New,
})
}
// BindMiddleware binds the middleware
func BindMiddleware(L *lua.State) {
luar.Register(L, "mw", luar.Map{
// Essentials
"Logger": gin.Logger,
"Recovery": gin.Recovery,
// Lua related stuff
"Lua": Lua,
"DLR_NS": DLR_NS,
"DLR_RUS": DLR_RUS,
"DLRWS_NS": DLRWS_NS,
"DLRWS_RUS": DLRWS_RUS,
// Custom sub-routers.
"ExtRoute": (func(plan map[string]interface{}) func(*gin.Context) {
newplan := make(Plan, len(plan))
for k, v := range plan {
newplan[k] = v.(func(*gin.Context))
}
return ExtRoute(newplan)
}),
"VHOST": (func(plan map[string]interface{}) func(*gin.Context) {
newplan := make(Plan, len(plan))
for k, v := range plan {
newplan[k] = v.(func(*gin.Context))
}
return VHOST(newplan)
}),
"VHOST_Middleware": (func(plan map[string]interface{}) gin.HandlerFunc {
newplan := make(Plan, len(plan))
for k, v := range plan {
newplan[k] = v.(gin.HandlerFunc)
}
return VHOST_Middleware(newplan)
}),
// To run or not to run, that is the question!
"if_regex": If_Regexp,
"if_written": If_Written,
"if_status": If_Status,
"if_not_regex": If_Not_Regexp,
"if_not_written": If_Not_Written,
"if_not_status": If_Not_Status,
// Modification stuff.
"GZip": func() func(*gin.Context) {
return gzip.Gzip(gzip.DefaultCompression)
},
// Basic
"Echo": EchoHTML,
"EchoText": Echo,
})
luar.Register(L, "carbon", luar.Map{
"_mw_CGI": CGI, // Run an CGI App!
"_mw_CGI_Dynamic": CGI_Dynamic, // Run CGI Apps based on path!
"_mw_combine": (func(middlewares []interface{}) func(*gin.Context) { // Combine routes, doesn't properly route like middleware or anything.
newmiddlewares := make([]func(*gin.Context), len(middlewares))
for k, v := range middlewares {
newmiddlewares[k] = v.(func(*gin.Context))
}
return Combine(newmiddlewares)
}),
})
L.DoString(glue.RouteGlue())
}
// BindStatic binds the static file server thing.
func BindStatic(L *lua.State, cfe *cache.Cache) {
luar.Register(L, "carbon", luar.Map{
"_staticserve": (func(path, prefix string) func(*gin.Context) {
return staticServe.ServeCached(prefix, staticServe.PhysFS(path, prefix, true, true), cfe)
}),
})
}
// BindPhysFS binds the physfs library functions.
func BindPhysFS(L *lua.State) {
luar.Register(L, "carbon", luar.Map{ // PhysFS
"_fs_mount": physfs.Mount,
"_fs_exists": physfs.Exists,
"_fs_getFS": physfs.FileSystem,
"_fs_mkdir": physfs.Mkdir,
"_fs_umount": physfs.RemoveFromSearchPath,
"_fs_delete": physfs.Delete,
"_fs_setWriteDir": physfs.SetWriteDir,
"_fs_getWriteDir": physfs.GetWriteDir,
"_fs_list": func(name string) (fl []string, err error) {
if physfs.Exists(name) {
return physfs.EnumerateFiles(name)
}
return nil, "open " + name + ": no such file or directory"
},
"_fs_readfile": func(name string) (string, error) {
file, err := physfs.Open(name)
if err != nil {
return "", err
}
buf := bytes.NewBuffer(nil)
io.Copy(buf, file)
file.Close()
return string(buf.Bytes()), nil
},
})
}
// BindIOEnhancements binds small functions to enhance the IO library
func BindIOEnhancements(L *lua.State) {
luar.Register(L, "carbon", luar.Map{ // Small enhancements to the io stuff.
"_io_list": (func(path string) ([]string, error) {
files, err := ioutil.ReadDir(path)
if err != nil {
return make([]string, 1), err
} else {
list := make([]string, len(files))
for i := range files {
list[i] = files[i].Name()
}
return list, nil
}
}),
"_io_glob": filepath.Glob,
"_io_modtime": (func(path string) (int, error) {
info, err := os.Stat(path)
if err != nil {
return -1, err
} else {
return int(info.ModTime().UTC().Unix()), nil
}
}),
})
}
// BindOSEnhancements does the same as above, but for the OS library
func BindOSEnhancements(L *lua.State) {
luar.Register(L, "carbon", luar.Map{ // Small enhancements to the io stuff.
"_os_exists": (func(path string) bool {
if _, err := os.Stat(path); err == nil {
return true
} else {
return false
}
}),
"_os_sleep": (func(secs int64) {
time.Sleep(time.Duration(secs) * time.Second)
}),
"_os_chdir": os.Chdir,
"_os_abspath": filepath.Abs,
"_os_pwd": os.Getwd,
})
}
// BindRedis binds the redis library
func BindRedis(L *lua.State) {
luar.Register(L, "redis", luar.Map{
"connectTimeout": (func(host string, timeout int) (*redis.Client, error) {
return redis.DialTimeout("tcp", host, time.Duration(timeout)*time.Second)
}),
"connect": (func(host string) (*redis.Client, error) {
return redis.Dial("tcp", host)
}),
})
}
// BindKVStore binds the kv store for internal carbon cache or similar.
func BindKVStore(L *lua.State) { // Thread safe Key Value Store that doesn't persist.
luar.Register(L, "kvstore", luar.Map{
"_set": (func(k string, v interface{}) {
kvstore.Set(k, v, -1)
}),
"_del": (func(k string) {
kvstore.Delete(k)
}),
"_get": (func(k string) interface{} {
res, found := kvstore.Get(k)
if found {
return res
} else {
return nil
}
}),
"_inc": (func(k string, n int64) error {
return kvstore.Increment(k, n)
}),
"_dec": (func(k string, n int64) error {
return kvstore.Decrement(k, n)
}),
})
}
// BindThread binds state creation and stuff.
func BindThread(L *lua.State) {
luar.Register(L, "thread", luar.Map{
"_spawn": (func(bcode string, dobind bool, vals map[string]interface{}, buffer int) (chan interface{}, error) {
var ch chan interface{}
if buffer == -1 {
ch = make(chan interface{})
} else {
ch = make(chan interface{}, buffer)
}
L := luar.Init()
Bind(L, webroot)
err := L.DoString(glue.MainGlue())
if err != nil {
panic(err)
}
luar.Register(L, "", luar.Map{
"threadcom": ch,
})
if dobind {
luar.Register(L, "", vals)
}
if L.LoadBuffer(bcode, len(bcode), "thread") != 0 {
return make(chan interface{}), errors.New(L.ToString(-1))
}
scheduler.Add(func() {
if L.Pcall(0, 0, 0) != 0 { // != 0 means error in execution
fmt.Println("thread error: " + L.ToString(-1))
}
})
return ch, nil
}),
})
}
// BindComs binds the com.* funcs.
func BindComs(L *lua.State) {
luar.Register(L, "com", luar.Map{
"create": (func() chan interface{} {
return make(chan interface{})
}),
"createBuffered": (func(buffer int) chan interface{} {
return make(chan interface{}, buffer)
}),
"receive": (func(c chan interface{}) interface{} {
return <-c
}),
"try_receive": (func(c chan interface{}) interface{} {
select {
case msg := <-c:
return msg
default:
return nil
}
}),
"send": (func(c chan interface{}, val interface{}) bool {
c <- val
return true
}),
"try_send": (func(c chan interface{}, val interface{}) bool {
select {
case c <- val:
return true
default:
return false
}
}),
"size": (func(c chan interface{}) int {
return len(c)
}),
"cap": (func(c chan interface{}) int {
return cap(c)
}),
"pipe": (func(a, b chan interface{}) {
for {
b <- <-a
}
}),
"pipe_background": (func(a, b chan interface{}) {
scheduler.Add(func() {
for {
b <- <-a
}
})
}),
})
}
// BindNet binds sockets, not really that good. needs rework.
func BindNet(L *lua.State) {
luar.Register(L, "net", luar.Map{
"dial": net.Dial,
"dial_tls": func(proto, addr string) (net.Conn, error) {
config := tls.Config{InsecureSkipVerify: true} // Because I'm not gonna bother with auth.
return tls.Dial(proto, addr, &config)
},
"write": (func(con interface{}, str string) {
fmt.Fprintf(con.(net.Conn), str)
}),
"readline": (func(con interface{}) (string, error) {
return bufio.NewReader(con.(net.Conn)).ReadString('\n')
}),
"pipe_conn": (func(con interface{}, input, output chan interface{}) {
go func() {
reader := bufio.NewReader(con.(net.Conn))
for {
line, _ := reader.ReadString('\n')
output <- line
}
}()
for {
line := <-input
fmt.Fprintf(con.(net.Conn), line.(string))
}
}),
"pipe_conn_background": (func(con interface{}, input, output chan interface{}) {
scheduler.Add(func() {
reader := bufio.NewReader(con.(net.Conn))
for {
line, _ := reader.ReadString('\n')
output <- line
}
})
scheduler.Add(func() {
for {
line := <-input
fmt.Fprintf(con.(net.Conn), line.(string))
}
})
}),
})
}
// BindConversions binds helpers to convert between go types.
func BindConversions(L *lua.State) {
luar.Register(L, "convert", luar.Map{
"stringtocharslice": (func(x string) []byte {
return []byte(x)
}),
"charslicetostring": (func(x []byte) string {
return string(x)
}),
})
}
// BindEncoding binds functions to encode and decode between things.
func BindEncoding(L *lua.State) {
luar.Register(L, "carbon", luar.Map{
"_enc_base64_enc": (func(str string) string {
return base64.StdEncoding.EncodeToString([]byte(str))
}),
"_enc_base64_dec": (func(str string) (string, error) {
data, err := base64.StdEncoding.DecodeString(str)
return string(data), err
}),
})
}
// BindMarkdown binds a markdown renderer.
func BindMarkdown(L *lua.State) {
luar.Register(L, "markdown", luar.Map{
"github": (func(source string) string {
return string(github_flavored_markdown.Markdown([]byte(source)))
}),
})
}
// BindLinenoise binds the linenoise library.
func BindLinenoise(L *lua.State) {
luar.Register(L, "linenoise", luar.Map{
"line": linenoise.Line,
"clear": linenoise.Clear,
"addHistory": linenoise.AddHistory,
"saveHistory": linenoise.SaveHistory,
"loadHistory": linenoise.LoadHistory,
"setMultiline": linenoise.SetMultiline,
})
}
// BindOther binds misc things
func BindOther(L *lua.State) {
luar.Register(L, "", luar.Map{
"unixtime": (func() int {
return int(time.Now().UTC().Unix())
}),
"regexp": regexp.Compile,
})
luar.Register(L, "carbon", luar.Map{
"_syntaxhl": helpers.SyntaxHL,
})
}
// BindContext binds the gin context.
func BindContext(L *lua.State, context *gin.Context) {
luar.Register(L, "", luar.Map{
"context": context,
"req": context.Request,
"host": context.Request.URL.Host,
"path": context.Request.URL.Path,
"scheme": context.Request.URL.Scheme,
})
luar.Register(L, "carbon", luar.Map{
"_header_set": context.Header,
"_header_get": context.Request.Header.Get,
"_paramfunc": context.Param,
"_formfunc": context.PostForm,
"_queryfunc": context.Query,
})
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package sandbox creates and manipulates sandboxes.
package sandbox
import (
"context"
"fmt"
"math"
"os"
"os/exec"
"strconv"
"syscall"
"time"
"github.com/cenkalti/backoff"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/syndtr/gocapability/capability"
"gvisor.dev/gvisor/pkg/control/client"
"gvisor.dev/gvisor/pkg/control/server"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/urpc"
"gvisor.dev/gvisor/runsc/boot"
"gvisor.dev/gvisor/runsc/boot/platforms"
"gvisor.dev/gvisor/runsc/cgroup"
"gvisor.dev/gvisor/runsc/console"
"gvisor.dev/gvisor/runsc/specutils"
)
// Sandbox wraps a sandbox process.
//
// It is used to start/stop sandbox process (and associated processes like
// gofers), as well as for running and manipulating containers inside a running
// sandbox.
//
// Note: Sandbox must be immutable because a copy of it is saved for each
// container and changes would not be synchronized to all of them.
type Sandbox struct {
// ID is the id of the sandbox (immutable). By convention, this is the same
// ID as the first container run in the sandbox.
ID string `json:"id"`
// Pid is the pid of the running sandbox (immutable). May be 0 if the sandbox
// is not running.
Pid int `json:"pid"`
// Cgroup has the cgroup configuration for the sandbox.
Cgroup *cgroup.Cgroup `json:"cgroup"`
// child is set if a sandbox process is a child of the current process.
//
// This field isn't saved to json, because only a creator of sandbox
// will have it as a child process.
child bool
// status is an exit status of a sandbox process.
status syscall.WaitStatus
// statusMu protects status.
statusMu sync.Mutex
}
// Args is used to configure a new sandbox.
type Args struct {
// ID is the sandbox unique identifier.
ID string
// Spec is the OCI spec that describes the container.
Spec *specs.Spec
// BundleDir is the directory containing the container bundle.
BundleDir string
// ConsoleSocket is the path to a unix domain socket that will receive
// the console FD. It may be empty.
ConsoleSocket string
// UserLog is the filename to send user-visible logs to. It may be empty.
UserLog string
// IOFiles is the list of files that connect to a 9P endpoint for the mounts
// points using Gofers. They must be in the same order as mounts appear in
// the spec.
IOFiles []*os.File
// MountsFile is a file container mount information from the spec. It's
// equivalent to the mounts from the spec, except that all paths have been
// resolved to their final absolute location.
MountsFile *os.File
// Gcgroup is the cgroup that the sandbox is part of.
Cgroup *cgroup.Cgroup
// Attached indicates that the sandbox lifecycle is attached with the caller.
// If the caller exits, the sandbox should exit too.
Attached bool
}
// New creates the sandbox process. The caller must call Destroy() on the
// sandbox.
func New(conf *boot.Config, args *Args) (*Sandbox, error) {
s := &Sandbox{ID: args.ID, Cgroup: args.Cgroup}
// The Cleanup object cleans up partially created sandboxes when an error
// occurs. Any errors occurring during cleanup itself are ignored.
c := specutils.MakeCleanup(func() {
err := s.destroy()
log.Warningf("error destroying sandbox: %v", err)
})
defer c.Clean()
// Create pipe to synchronize when sandbox process has been booted.
clientSyncFile, sandboxSyncFile, err := os.Pipe()
if err != nil {
return nil, fmt.Errorf("creating pipe for sandbox %q: %v", s.ID, err)
}
defer clientSyncFile.Close()
// Create the sandbox process.
err = s.createSandboxProcess(conf, args, sandboxSyncFile)
// sandboxSyncFile has to be closed to be able to detect when the sandbox
// process exits unexpectedly.
sandboxSyncFile.Close()
if err != nil {
return nil, err
}
// Wait until the sandbox has booted.
b := make([]byte, 1)
if l, err := clientSyncFile.Read(b); err != nil || l != 1 {
return nil, fmt.Errorf("waiting for sandbox to start: %v", err)
}
c.Release()
return s, nil
}
// CreateContainer creates a non-root container inside the sandbox.
func (s *Sandbox) CreateContainer(cid string) error {
log.Debugf("Create non-root container %q in sandbox %q, PID: %d", cid, s.ID, s.Pid)
sandboxConn, err := s.sandboxConnect()
if err != nil {
return fmt.Errorf("couldn't connect to sandbox: %v", err)
}
defer sandboxConn.Close()
if err := sandboxConn.Call(boot.ContainerCreate, &cid, nil); err != nil {
return fmt.Errorf("creating non-root container %q: %v", cid, err)
}
return nil
}
// StartRoot starts running the root container process inside the sandbox.
func (s *Sandbox) StartRoot(spec *specs.Spec, conf *boot.Config) error {
log.Debugf("Start root sandbox %q, PID: %d", s.ID, s.Pid)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
// Configure the network.
if err := setupNetwork(conn, s.Pid, spec, conf); err != nil {
return fmt.Errorf("setting up network: %v", err)
}
// Send a message to the sandbox control server to start the root
// container.
if err := conn.Call(boot.RootContainerStart, &s.ID, nil); err != nil {
return fmt.Errorf("starting root container: %v", err)
}
return nil
}
// StartContainer starts running a non-root container inside the sandbox.
func (s *Sandbox) StartContainer(spec *specs.Spec, conf *boot.Config, cid string, goferFiles []*os.File) error {
for _, f := range goferFiles {
defer f.Close()
}
log.Debugf("Start non-root container %q in sandbox %q, PID: %d", cid, s.ID, s.Pid)
sandboxConn, err := s.sandboxConnect()
if err != nil {
return fmt.Errorf("couldn't connect to sandbox: %v", err)
}
defer sandboxConn.Close()
// The payload must container stdin/stdout/stderr followed by gofer
// files.
files := append([]*os.File{os.Stdin, os.Stdout, os.Stderr}, goferFiles...)
// Start running the container.
args := boot.StartArgs{
Spec: spec,
Conf: conf,
CID: cid,
FilePayload: urpc.FilePayload{Files: files},
}
if err := sandboxConn.Call(boot.ContainerStart, &args, nil); err != nil {
return fmt.Errorf("starting non-root container %v: %v", spec.Process.Args, err)
}
return nil
}
// Restore sends the restore call for a container in the sandbox.
func (s *Sandbox) Restore(cid string, spec *specs.Spec, conf *boot.Config, filename string) error {
log.Debugf("Restore sandbox %q", s.ID)
rf, err := os.Open(filename)
if err != nil {
return fmt.Errorf("opening restore file %q failed: %v", filename, err)
}
defer rf.Close()
opt := boot.RestoreOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{rf},
},
SandboxID: s.ID,
}
// If the platform needs a device FD we must pass it in.
if deviceFile, err := deviceFileForPlatform(conf.Platform); err != nil {
return err
} else if deviceFile != nil {
defer deviceFile.Close()
opt.FilePayload.Files = append(opt.FilePayload.Files, deviceFile)
}
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
// Configure the network.
if err := setupNetwork(conn, s.Pid, spec, conf); err != nil {
return fmt.Errorf("setting up network: %v", err)
}
// Restore the container and start the root container.
if err := conn.Call(boot.ContainerRestore, &opt, nil); err != nil {
return fmt.Errorf("restoring container %q: %v", cid, err)
}
return nil
}
// Processes retrieves the list of processes and associated metadata for a
// given container in this sandbox.
func (s *Sandbox) Processes(cid string) ([]*control.Process, error) {
log.Debugf("Getting processes for container %q in sandbox %q", cid, s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return nil, err
}
defer conn.Close()
var pl []*control.Process
if err := conn.Call(boot.ContainerProcesses, &cid, &pl); err != nil {
return nil, fmt.Errorf("retrieving process data from sandbox: %v", err)
}
return pl, nil
}
// Execute runs the specified command in the container. It returns the PID of
// the newly created process.
func (s *Sandbox) Execute(args *control.ExecArgs) (int32, error) {
log.Debugf("Executing new process in container %q in sandbox %q", args.ContainerID, s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return 0, s.connError(err)
}
defer conn.Close()
// Send a message to the sandbox control server to start the container.
var pid int32
if err := conn.Call(boot.ContainerExecuteAsync, args, &pid); err != nil {
return 0, fmt.Errorf("executing command %q in sandbox: %v", args, err)
}
return pid, nil
}
// Event retrieves stats about the sandbox such as memory and CPU utilization.
func (s *Sandbox) Event(cid string) (*boot.Event, error) {
log.Debugf("Getting events for container %q in sandbox %q", cid, s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return nil, err
}
defer conn.Close()
var e boot.Event
// TODO(b/129292330): Pass in the container id (cid) here. The sandbox
// should return events only for that container.
if err := conn.Call(boot.ContainerEvent, nil, &e); err != nil {
return nil, fmt.Errorf("retrieving event data from sandbox: %v", err)
}
e.ID = cid
return &e, nil
}
func (s *Sandbox) sandboxConnect() (*urpc.Client, error) {
log.Debugf("Connecting to sandbox %q", s.ID)
conn, err := client.ConnectTo(boot.ControlSocketAddr(s.ID))
if err != nil {
return nil, s.connError(err)
}
return conn, nil
}
func (s *Sandbox) connError(err error) error {
return fmt.Errorf("connecting to control server at PID %d: %v", s.Pid, err)
}
// createSandboxProcess starts the sandbox as a subprocess by running the "boot"
// command, passing in the bundle dir.
func (s *Sandbox) createSandboxProcess(conf *boot.Config, args *Args, startSyncFile *os.File) error {
// nextFD is used to get unused FDs that we can pass to the sandbox. It
// starts at 3 because 0, 1, and 2 are taken by stdin/out/err.
nextFD := 3
binPath := specutils.ExePath
cmd := exec.Command(binPath, conf.ToFlags()...)
cmd.SysProcAttr = &syscall.SysProcAttr{}
// Open the log files to pass to the sandbox as FDs.
//
// These flags must come BEFORE the "boot" command in cmd.Args.
if conf.LogFilename != "" {
logFile, err := os.OpenFile(conf.LogFilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return fmt.Errorf("opening log file %q: %v", conf.LogFilename, err)
}
defer logFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, logFile)
cmd.Args = append(cmd.Args, "--log-fd="+strconv.Itoa(nextFD))
nextFD++
}
if conf.DebugLog != "" {
test := ""
if len(conf.TestOnlyTestNameEnv) != 0 {
// Fetch test name if one is provided and the test only flag was set.
if t, ok := specutils.EnvVar(args.Spec.Process.Env, conf.TestOnlyTestNameEnv); ok {
test = t
}
}
debugLogFile, err := specutils.DebugLogFile(conf.DebugLog, "boot", test)
if err != nil {
return fmt.Errorf("opening debug log file in %q: %v", conf.DebugLog, err)
}
defer debugLogFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, debugLogFile)
cmd.Args = append(cmd.Args, "--debug-log-fd="+strconv.Itoa(nextFD))
nextFD++
}
if conf.PanicLog != "" {
test := ""
if len(conf.TestOnlyTestNameEnv) != 0 {
// Fetch test name if one is provided and the test only flag was set.
if t, ok := specutils.EnvVar(args.Spec.Process.Env, conf.TestOnlyTestNameEnv); ok {
test = t
}
}
panicLogFile, err := specutils.DebugLogFile(conf.PanicLog, "panic", test)
if err != nil {
return fmt.Errorf("opening debug log file in %q: %v", conf.PanicLog, err)
}
defer panicLogFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, panicLogFile)
cmd.Args = append(cmd.Args, "--panic-log-fd="+strconv.Itoa(nextFD))
nextFD++
}
cmd.Args = append(cmd.Args, "--panic-signal="+strconv.Itoa(int(syscall.SIGTERM)))
// Add the "boot" command to the args.
//
// All flags after this must be for the boot command
cmd.Args = append(cmd.Args, "boot", "--bundle="+args.BundleDir)
// Create a socket for the control server and donate it to the sandbox.
addr := boot.ControlSocketAddr(s.ID)
sockFD, err := server.CreateSocket(addr)
log.Infof("Creating sandbox process with addr: %s", addr[1:]) // skip "\00".
if err != nil {
return fmt.Errorf("creating control server socket for sandbox %q: %v", s.ID, err)
}
controllerFile := os.NewFile(uintptr(sockFD), "control_server_socket")
defer controllerFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, controllerFile)
cmd.Args = append(cmd.Args, "--controller-fd="+strconv.Itoa(nextFD))
nextFD++
defer args.MountsFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, args.MountsFile)
cmd.Args = append(cmd.Args, "--mounts-fd="+strconv.Itoa(nextFD))
nextFD++
specFile, err := specutils.OpenSpec(args.BundleDir)
if err != nil {
return err
}
defer specFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, specFile)
cmd.Args = append(cmd.Args, "--spec-fd="+strconv.Itoa(nextFD))
nextFD++
cmd.ExtraFiles = append(cmd.ExtraFiles, startSyncFile)
cmd.Args = append(cmd.Args, "--start-sync-fd="+strconv.Itoa(nextFD))
nextFD++
// If there is a gofer, sends all socket ends to the sandbox.
for _, f := range args.IOFiles {
defer f.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, f)
cmd.Args = append(cmd.Args, "--io-fds="+strconv.Itoa(nextFD))
nextFD++
}
// If the platform needs a device FD we must pass it in.
if deviceFile, err := deviceFileForPlatform(conf.Platform); err != nil {
return err
} else if deviceFile != nil {
defer deviceFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, deviceFile)
cmd.Args = append(cmd.Args, "--device-fd="+strconv.Itoa(nextFD))
nextFD++
}
// TODO(b/151157106): syscall tests fail by timeout if asyncpreemptoff
// isn't set.
if conf.Platform == "kvm" {
cmd.Env = append(cmd.Env, "GODEBUG=asyncpreemptoff=1")
}
// The current process' stdio must be passed to the application via the
// --stdio-fds flag. The stdio of the sandbox process itself must not
// be connected to the same FDs, otherwise we risk leaking sandbox
// errors to the application, so we set the sandbox stdio to nil,
// causing them to read/write from the null device.
cmd.Stdin = nil
cmd.Stdout = nil
cmd.Stderr = nil
// If the console control socket file is provided, then create a new
// pty master/slave pair and set the TTY on the sandbox process.
if args.ConsoleSocket != "" {
cmd.Args = append(cmd.Args, "--console=true")
// console.NewWithSocket will send the master on the given
// socket, and return the slave.
tty, err := console.NewWithSocket(args.ConsoleSocket)
if err != nil {
return fmt.Errorf("setting up console with socket %q: %v", args.ConsoleSocket, err)
}
defer tty.Close()
// Set the TTY as a controlling TTY on the sandbox process.
cmd.SysProcAttr.Setctty = true
// The Ctty FD must be the FD in the child process's FD table,
// which will be nextFD in this case.
// See https://github.com/golang/go/issues/29458.
cmd.SysProcAttr.Ctty = nextFD
// Pass the tty as all stdio fds to sandbox.
for i := 0; i < 3; i++ {
cmd.ExtraFiles = append(cmd.ExtraFiles, tty)
cmd.Args = append(cmd.Args, "--stdio-fds="+strconv.Itoa(nextFD))
nextFD++
}
if conf.Debug {
// If debugging, send the boot process stdio to the
// TTY, so that it is easier to find.
cmd.Stdin = tty
cmd.Stdout = tty
cmd.Stderr = tty
}
} else {
// If not using a console, pass our current stdio as the
// container stdio via flags.
for _, f := range []*os.File{os.Stdin, os.Stdout, os.Stderr} {
cmd.ExtraFiles = append(cmd.ExtraFiles, f)
cmd.Args = append(cmd.Args, "--stdio-fds="+strconv.Itoa(nextFD))
nextFD++
}
if conf.Debug {
// If debugging, send the boot process stdio to the
// this process' stdio, so that is is easier to find.
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
}
// Detach from this session, otherwise cmd will get SIGHUP and SIGCONT
// when re-parented.
cmd.SysProcAttr.Setsid = true
// nss is the set of namespaces to join or create before starting the sandbox
// process. Mount, IPC and UTS namespaces from the host are not used as they
// are virtualized inside the sandbox. Be paranoid and run inside an empty
// namespace for these. Don't unshare cgroup because sandbox is added to a
// cgroup in the caller's namespace.
log.Infof("Sandbox will be started in new mount, IPC and UTS namespaces")
nss := []specs.LinuxNamespace{
{Type: specs.IPCNamespace},
{Type: specs.MountNamespace},
{Type: specs.UTSNamespace},
}
if conf.Platform == platforms.Ptrace {
// TODO(b/75837838): Also set a new PID namespace so that we limit
// access to other host processes.
log.Infof("Sandbox will be started in the current PID namespace")
} else {
log.Infof("Sandbox will be started in a new PID namespace")
nss = append(nss, specs.LinuxNamespace{Type: specs.PIDNamespace})
cmd.Args = append(cmd.Args, "--pidns=true")
}
// Joins the network namespace if network is enabled. the sandbox talks
// directly to the host network, which may have been configured in the
// namespace.
if ns, ok := specutils.GetNS(specs.NetworkNamespace, args.Spec); ok && conf.Network != boot.NetworkNone {
log.Infof("Sandbox will be started in the container's network namespace: %+v", ns)
nss = append(nss, ns)
} else if conf.Network == boot.NetworkHost {
log.Infof("Sandbox will be started in the host network namespace")
} else {
log.Infof("Sandbox will be started in new network namespace")
nss = append(nss, specs.LinuxNamespace{Type: specs.NetworkNamespace})
}
// User namespace depends on the network type. Host network requires to run
// inside the user namespace specified in the spec or the current namespace
// if none is configured.
if conf.Network == boot.NetworkHost {
if userns, ok := specutils.GetNS(specs.UserNamespace, args.Spec); ok {
log.Infof("Sandbox will be started in container's user namespace: %+v", userns)
nss = append(nss, userns)
specutils.SetUIDGIDMappings(cmd, args.Spec)
} else {
log.Infof("Sandbox will be started in the current user namespace")
}
// When running in the caller's defined user namespace, apply the same
// capabilities to the sandbox process to ensure it abides to the same
// rules.
cmd.Args = append(cmd.Args, "--apply-caps=true")
// If we have CAP_SYS_ADMIN, we can create an empty chroot and
// bind-mount the executable inside it.
if conf.TestOnlyAllowRunAsCurrentUserWithoutChroot {
log.Warningf("Running sandbox in test mode without chroot. This is only safe in tests!")
} else if specutils.HasCapabilities(capability.CAP_SYS_ADMIN) {
log.Infof("Sandbox will be started in minimal chroot")
cmd.Args = append(cmd.Args, "--setup-root")
} else {
return fmt.Errorf("can't run sandbox process in minimal chroot since we don't have CAP_SYS_ADMIN")
}
} else {
// If we have CAP_SETUID and CAP_SETGID, then we can also run
// as user nobody.
if conf.TestOnlyAllowRunAsCurrentUserWithoutChroot {
log.Warningf("Running sandbox in test mode as current user (uid=%d gid=%d). This is only safe in tests!", os.Getuid(), os.Getgid())
log.Warningf("Running sandbox in test mode without chroot. This is only safe in tests!")
} else if specutils.HasCapabilities(capability.CAP_SETUID, capability.CAP_SETGID) {
log.Infof("Sandbox will be started in new user namespace")
nss = append(nss, specs.LinuxNamespace{Type: specs.UserNamespace})
cmd.Args = append(cmd.Args, "--setup-root")
if conf.Rootless {
log.Infof("Rootless mode: sandbox will run as root inside user namespace, mapped to the current user, uid: %d, gid: %d", os.Getuid(), os.Getgid())
cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: os.Getuid(),
Size: 1,
},
}
cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: os.Getgid(),
Size: 1,
},
}
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: 0, Gid: 0}
} else {
// Map nobody in the new namespace to nobody in the parent namespace.
//
// A sandbox process will construct an empty
// root for itself, so it has to have the CAP_SYS_ADMIN
// capability.
//
// FIXME(b/122554829): The current implementations of
// os/exec doesn't allow to set ambient capabilities if
// a process is started in a new user namespace. As a
// workaround, we start the sandbox process with the 0
// UID and then it constructs a chroot and sets UID to
// nobody. https://github.com/golang/go/issues/2315
const nobody = 65534
cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: nobody - 1,
Size: 1,
},
{
ContainerID: nobody,
HostID: nobody,
Size: 1,
},
}
cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{
{
ContainerID: nobody,
HostID: nobody,
Size: 1,
},
}
// Set credentials to run as user and group nobody.
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: 0, Gid: nobody}
}
} else {
return fmt.Errorf("can't run sandbox process as user nobody since we don't have CAP_SETUID or CAP_SETGID")
}
}
cmd.Args[0] = "runsc-sandbox"
if s.Cgroup != nil {
cpuNum, err := s.Cgroup.NumCPU()
if err != nil {
return fmt.Errorf("getting cpu count from cgroups: %v", err)
}
if conf.CPUNumFromQuota {
// Dropping below 2 CPUs can trigger application to disable
// locks that can lead do hard to debug errors, so just
// leaving two cores as reasonable default.
const minCPUs = 2
quota, err := s.Cgroup.CPUQuota()
if err != nil {
return fmt.Errorf("getting cpu qouta from cgroups: %v", err)
}
if n := int(math.Ceil(quota)); n > 0 {
if n < minCPUs {
n = minCPUs
}
if n < cpuNum {
// Only lower the cpu number.
cpuNum = n
}
}
}
cmd.Args = append(cmd.Args, "--cpu-num", strconv.Itoa(cpuNum))
mem, err := s.Cgroup.MemoryLimit()
if err != nil {
return fmt.Errorf("getting memory limit from cgroups: %v", err)
}
// When memory limit is unset, a "large" number is returned. In that case,
// just stick with the default.
if mem < 0x7ffffffffffff000 {
cmd.Args = append(cmd.Args, "--total-memory", strconv.FormatUint(mem, 10))
}
}
if args.UserLog != "" {
f, err := os.OpenFile(args.UserLog, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0664)
if err != nil {
return fmt.Errorf("opening compat log file: %v", err)
}
defer f.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, f)
cmd.Args = append(cmd.Args, "--user-log-fd", strconv.Itoa(nextFD))
nextFD++
}
if args.Attached {
// Kill sandbox if parent process exits in attached mode.
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
// Tells boot that any process it creates must have pdeathsig set.
cmd.Args = append(cmd.Args, "--attached")
}
// Add container as the last argument.
cmd.Args = append(cmd.Args, s.ID)
// Log the FDs we are donating to the sandbox process.
for i, f := range cmd.ExtraFiles {
log.Debugf("Donating FD %d: %q", i+3, f.Name())
}
log.Debugf("Starting sandbox: %s %v", binPath, cmd.Args)
log.Debugf("SysProcAttr: %+v", cmd.SysProcAttr)
if err := specutils.StartInNS(cmd, nss); err != nil {
return fmt.Errorf("Sandbox: %v", err)
}
s.child = true
s.Pid = cmd.Process.Pid
log.Infof("Sandbox started, PID: %d", s.Pid)
return nil
}
// Wait waits for the containerized process to exit, and returns its WaitStatus.
func (s *Sandbox) Wait(cid string) (syscall.WaitStatus, error) {
log.Debugf("Waiting for container %q in sandbox %q", cid, s.ID)
var ws syscall.WaitStatus
if conn, err := s.sandboxConnect(); err != nil {
// The sandbox may have exited while before we had a chance to
// wait on it.
log.Warningf("Wait on container %q failed: %v. Will try waiting on the sandbox process instead.", cid, err)
} else {
defer conn.Close()
// Try the Wait RPC to the sandbox.
err = conn.Call(boot.ContainerWait, &cid, &ws)
if err == nil {
// It worked!
return ws, nil
}
// The sandbox may have exited after we connected, but before
// or during the Wait RPC.
log.Warningf("Wait RPC to container %q failed: %v. Will try waiting on the sandbox process instead.", cid, err)
}
// The sandbox may have already exited, or exited while handling the
// Wait RPC. The best we can do is ask Linux what the sandbox exit
// status was, since in most cases that will be the same as the
// container exit status.
if err := s.waitForStopped(); err != nil {
return ws, err
}
if !s.child {
return ws, fmt.Errorf("sandbox no longer running and its exit status is unavailable")
}
return s.status, nil
}
// WaitPID waits for process 'pid' in the container's sandbox and returns its
// WaitStatus.
func (s *Sandbox) WaitPID(cid string, pid int32) (syscall.WaitStatus, error) {
log.Debugf("Waiting for PID %d in sandbox %q", pid, s.ID)
var ws syscall.WaitStatus
conn, err := s.sandboxConnect()
if err != nil {
return ws, err
}
defer conn.Close()
args := &boot.WaitPIDArgs{
PID: pid,
CID: cid,
}
if err := conn.Call(boot.ContainerWaitPID, args, &ws); err != nil {
return ws, fmt.Errorf("waiting on PID %d in sandbox %q: %v", pid, s.ID, err)
}
return ws, nil
}
// IsRootContainer returns true if the specified container ID belongs to the
// root container.
func (s *Sandbox) IsRootContainer(cid string) bool {
return s.ID == cid
}
// Destroy frees all resources associated with the sandbox. It fails fast and
// is idempotent.
func (s *Sandbox) destroy() error {
log.Debugf("Destroy sandbox %q", s.ID)
if s.Pid != 0 {
log.Debugf("Killing sandbox %q", s.ID)
if err := syscall.Kill(s.Pid, syscall.SIGKILL); err != nil && err != syscall.ESRCH {
return fmt.Errorf("killing sandbox %q PID %q: %v", s.ID, s.Pid, err)
}
if err := s.waitForStopped(); err != nil {
return fmt.Errorf("waiting sandbox %q stop: %v", s.ID, err)
}
}
return nil
}
// SignalContainer sends the signal to a container in the sandbox. If all is
// true and signal is SIGKILL, then waits for all processes to exit before
// returning.
func (s *Sandbox) SignalContainer(cid string, sig syscall.Signal, all bool) error {
log.Debugf("Signal sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
mode := boot.DeliverToProcess
if all {
mode = boot.DeliverToAllProcesses
}
args := boot.SignalArgs{
CID: cid,
Signo: int32(sig),
Mode: mode,
}
if err := conn.Call(boot.ContainerSignal, &args, nil); err != nil {
return fmt.Errorf("signaling container %q: %v", cid, err)
}
return nil
}
// SignalProcess sends the signal to a particular process in the container. If
// fgProcess is true, then the signal is sent to the foreground process group
// in the same session that PID belongs to. This is only valid if the process
// is attached to a host TTY.
func (s *Sandbox) SignalProcess(cid string, pid int32, sig syscall.Signal, fgProcess bool) error {
log.Debugf("Signal sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
mode := boot.DeliverToProcess
if fgProcess {
mode = boot.DeliverToForegroundProcessGroup
}
args := boot.SignalArgs{
CID: cid,
Signo: int32(sig),
PID: pid,
Mode: mode,
}
if err := conn.Call(boot.ContainerSignal, &args, nil); err != nil {
return fmt.Errorf("signaling container %q PID %d: %v", cid, pid, err)
}
return nil
}
// Checkpoint sends the checkpoint call for a container in the sandbox.
// The statefile will be written to f.
func (s *Sandbox) Checkpoint(cid string, f *os.File) error {
log.Debugf("Checkpoint sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opt := control.SaveOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.ContainerCheckpoint, &opt, nil); err != nil {
return fmt.Errorf("checkpointing container %q: %v", cid, err)
}
return nil
}
// Pause sends the pause call for a container in the sandbox.
func (s *Sandbox) Pause(cid string) error {
log.Debugf("Pause sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.ContainerPause, nil, nil); err != nil {
return fmt.Errorf("pausing container %q: %v", cid, err)
}
return nil
}
// Resume sends the resume call for a container in the sandbox.
func (s *Sandbox) Resume(cid string) error {
log.Debugf("Resume sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.ContainerResume, nil, nil); err != nil {
return fmt.Errorf("resuming container %q: %v", cid, err)
}
return nil
}
// IsRunning returns true if the sandbox or gofer process is running.
func (s *Sandbox) IsRunning() bool {
if s.Pid != 0 {
// Send a signal 0 to the sandbox process.
if err := syscall.Kill(s.Pid, 0); err == nil {
// Succeeded, process is running.
return true
}
}
return false
}
// Stacks collects and returns all stacks for the sandbox.
func (s *Sandbox) Stacks() (string, error) {
log.Debugf("Stacks sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return "", err
}
defer conn.Close()
var stacks string
if err := conn.Call(boot.SandboxStacks, nil, &stacks); err != nil {
return "", fmt.Errorf("getting sandbox %q stacks: %v", s.ID, err)
}
return stacks, nil
}
// HeapProfile writes a heap profile to the given file.
func (s *Sandbox) HeapProfile(f *os.File) error {
log.Debugf("Heap profile %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.HeapProfile, &opts, nil); err != nil {
return fmt.Errorf("getting sandbox %q heap profile: %v", s.ID, err)
}
return nil
}
// StartCPUProfile start CPU profile writing to the given file.
func (s *Sandbox) StartCPUProfile(f *os.File) error {
log.Debugf("CPU profile start %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.StartCPUProfile, &opts, nil); err != nil {
return fmt.Errorf("starting sandbox %q CPU profile: %v", s.ID, err)
}
return nil
}
// StopCPUProfile stops a previously started CPU profile.
func (s *Sandbox) StopCPUProfile() error {
log.Debugf("CPU profile stop %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.StopCPUProfile, nil, nil); err != nil {
return fmt.Errorf("stopping sandbox %q CPU profile: %v", s.ID, err)
}
return nil
}
// GoroutineProfile writes a goroutine profile to the given file.
func (s *Sandbox) GoroutineProfile(f *os.File) error {
log.Debugf("Goroutine profile %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.GoroutineProfile, &opts, nil); err != nil {
return fmt.Errorf("getting sandbox %q goroutine profile: %v", s.ID, err)
}
return nil
}
// BlockProfile writes a block profile to the given file.
func (s *Sandbox) BlockProfile(f *os.File) error {
log.Debugf("Block profile %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.BlockProfile, &opts, nil); err != nil {
return fmt.Errorf("getting sandbox %q block profile: %v", s.ID, err)
}
return nil
}
// MutexProfile writes a mutex profile to the given file.
func (s *Sandbox) MutexProfile(f *os.File) error {
log.Debugf("Mutex profile %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.MutexProfile, &opts, nil); err != nil {
return fmt.Errorf("getting sandbox %q mutex profile: %v", s.ID, err)
}
return nil
}
// StartTrace start trace writing to the given file.
func (s *Sandbox) StartTrace(f *os.File) error {
log.Debugf("Trace start %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.StartTrace, &opts, nil); err != nil {
return fmt.Errorf("starting sandbox %q trace: %v", s.ID, err)
}
return nil
}
// StopTrace stops a previously started trace.
func (s *Sandbox) StopTrace() error {
log.Debugf("Trace stop %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.StopTrace, nil, nil); err != nil {
return fmt.Errorf("stopping sandbox %q trace: %v", s.ID, err)
}
return nil
}
// ChangeLogging changes logging options.
func (s *Sandbox) ChangeLogging(args control.LoggingArgs) error {
log.Debugf("Change logging start %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.ChangeLogging, &args, nil); err != nil {
return fmt.Errorf("changing sandbox %q logging: %v", s.ID, err)
}
return nil
}
// DestroyContainer destroys the given container. If it is the root container,
// then the entire sandbox is destroyed.
func (s *Sandbox) DestroyContainer(cid string) error {
if err := s.destroyContainer(cid); err != nil {
// If the sandbox isn't running, the container has already been destroyed,
// ignore the error in this case.
if s.IsRunning() {
return err
}
}
return nil
}
func (s *Sandbox) destroyContainer(cid string) error {
if s.IsRootContainer(cid) {
log.Debugf("Destroying root container %q by destroying sandbox", cid)
return s.destroy()
}
log.Debugf("Destroying container %q in sandbox %q", cid, s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.ContainerDestroy, &cid, nil); err != nil {
return fmt.Errorf("destroying container %q: %v", cid, err)
}
return nil
}
func (s *Sandbox) waitForStopped() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
op := func() error {
if s.child {
s.statusMu.Lock()
defer s.statusMu.Unlock()
if s.Pid == 0 {
return nil
}
// The sandbox process is a child of the current process,
// so we can wait it and collect its zombie.
wpid, err := syscall.Wait4(int(s.Pid), &s.status, syscall.WNOHANG, nil)
if err != nil {
return fmt.Errorf("error waiting the sandbox process: %v", err)
}
if wpid == 0 {
return fmt.Errorf("sandbox is still running")
}
s.Pid = 0
} else if s.IsRunning() {
return fmt.Errorf("sandbox is still running")
}
return nil
}
return backoff.Retry(op, b)
}
// deviceFileForPlatform opens the device file for the given platform. If the
// platform does not need a device file, then nil is returned.
func deviceFileForPlatform(name string) (*os.File, error) {
p, err := platform.Lookup(name)
if err != nil {
return nil, err
}
f, err := p.OpenDevice()
if err != nil {
return nil, fmt.Errorf("opening device file for platform %q: %v", p, err)
}
return f, nil
}
Don't map the 0 uid into a sandbox user namespace
Starting with go1.13, we can specify ambient capabilities when we execute a new
process with os/exe.Cmd.
PiperOrigin-RevId: 305366706
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package sandbox creates and manipulates sandboxes.
package sandbox
import (
"context"
"fmt"
"math"
"os"
"os/exec"
"strconv"
"syscall"
"time"
"github.com/cenkalti/backoff"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/syndtr/gocapability/capability"
"gvisor.dev/gvisor/pkg/control/client"
"gvisor.dev/gvisor/pkg/control/server"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/urpc"
"gvisor.dev/gvisor/runsc/boot"
"gvisor.dev/gvisor/runsc/boot/platforms"
"gvisor.dev/gvisor/runsc/cgroup"
"gvisor.dev/gvisor/runsc/console"
"gvisor.dev/gvisor/runsc/specutils"
)
// Sandbox wraps a sandbox process.
//
// It is used to start/stop sandbox process (and associated processes like
// gofers), as well as for running and manipulating containers inside a running
// sandbox.
//
// Note: Sandbox must be immutable because a copy of it is saved for each
// container and changes would not be synchronized to all of them.
type Sandbox struct {
// ID is the id of the sandbox (immutable). By convention, this is the same
// ID as the first container run in the sandbox.
ID string `json:"id"`
// Pid is the pid of the running sandbox (immutable). May be 0 if the sandbox
// is not running.
Pid int `json:"pid"`
// Cgroup has the cgroup configuration for the sandbox.
Cgroup *cgroup.Cgroup `json:"cgroup"`
// child is set if a sandbox process is a child of the current process.
//
// This field isn't saved to json, because only a creator of sandbox
// will have it as a child process.
child bool
// status is an exit status of a sandbox process.
status syscall.WaitStatus
// statusMu protects status.
statusMu sync.Mutex
}
// Args is used to configure a new sandbox.
type Args struct {
// ID is the sandbox unique identifier.
ID string
// Spec is the OCI spec that describes the container.
Spec *specs.Spec
// BundleDir is the directory containing the container bundle.
BundleDir string
// ConsoleSocket is the path to a unix domain socket that will receive
// the console FD. It may be empty.
ConsoleSocket string
// UserLog is the filename to send user-visible logs to. It may be empty.
UserLog string
// IOFiles is the list of files that connect to a 9P endpoint for the mounts
// points using Gofers. They must be in the same order as mounts appear in
// the spec.
IOFiles []*os.File
// MountsFile is a file container mount information from the spec. It's
// equivalent to the mounts from the spec, except that all paths have been
// resolved to their final absolute location.
MountsFile *os.File
// Gcgroup is the cgroup that the sandbox is part of.
Cgroup *cgroup.Cgroup
// Attached indicates that the sandbox lifecycle is attached with the caller.
// If the caller exits, the sandbox should exit too.
Attached bool
}
// New creates the sandbox process. The caller must call Destroy() on the
// sandbox.
func New(conf *boot.Config, args *Args) (*Sandbox, error) {
s := &Sandbox{ID: args.ID, Cgroup: args.Cgroup}
// The Cleanup object cleans up partially created sandboxes when an error
// occurs. Any errors occurring during cleanup itself are ignored.
c := specutils.MakeCleanup(func() {
err := s.destroy()
log.Warningf("error destroying sandbox: %v", err)
})
defer c.Clean()
// Create pipe to synchronize when sandbox process has been booted.
clientSyncFile, sandboxSyncFile, err := os.Pipe()
if err != nil {
return nil, fmt.Errorf("creating pipe for sandbox %q: %v", s.ID, err)
}
defer clientSyncFile.Close()
// Create the sandbox process.
err = s.createSandboxProcess(conf, args, sandboxSyncFile)
// sandboxSyncFile has to be closed to be able to detect when the sandbox
// process exits unexpectedly.
sandboxSyncFile.Close()
if err != nil {
return nil, err
}
// Wait until the sandbox has booted.
b := make([]byte, 1)
if l, err := clientSyncFile.Read(b); err != nil || l != 1 {
return nil, fmt.Errorf("waiting for sandbox to start: %v", err)
}
c.Release()
return s, nil
}
// CreateContainer creates a non-root container inside the sandbox.
func (s *Sandbox) CreateContainer(cid string) error {
log.Debugf("Create non-root container %q in sandbox %q, PID: %d", cid, s.ID, s.Pid)
sandboxConn, err := s.sandboxConnect()
if err != nil {
return fmt.Errorf("couldn't connect to sandbox: %v", err)
}
defer sandboxConn.Close()
if err := sandboxConn.Call(boot.ContainerCreate, &cid, nil); err != nil {
return fmt.Errorf("creating non-root container %q: %v", cid, err)
}
return nil
}
// StartRoot starts running the root container process inside the sandbox.
func (s *Sandbox) StartRoot(spec *specs.Spec, conf *boot.Config) error {
log.Debugf("Start root sandbox %q, PID: %d", s.ID, s.Pid)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
// Configure the network.
if err := setupNetwork(conn, s.Pid, spec, conf); err != nil {
return fmt.Errorf("setting up network: %v", err)
}
// Send a message to the sandbox control server to start the root
// container.
if err := conn.Call(boot.RootContainerStart, &s.ID, nil); err != nil {
return fmt.Errorf("starting root container: %v", err)
}
return nil
}
// StartContainer starts running a non-root container inside the sandbox.
func (s *Sandbox) StartContainer(spec *specs.Spec, conf *boot.Config, cid string, goferFiles []*os.File) error {
for _, f := range goferFiles {
defer f.Close()
}
log.Debugf("Start non-root container %q in sandbox %q, PID: %d", cid, s.ID, s.Pid)
sandboxConn, err := s.sandboxConnect()
if err != nil {
return fmt.Errorf("couldn't connect to sandbox: %v", err)
}
defer sandboxConn.Close()
// The payload must container stdin/stdout/stderr followed by gofer
// files.
files := append([]*os.File{os.Stdin, os.Stdout, os.Stderr}, goferFiles...)
// Start running the container.
args := boot.StartArgs{
Spec: spec,
Conf: conf,
CID: cid,
FilePayload: urpc.FilePayload{Files: files},
}
if err := sandboxConn.Call(boot.ContainerStart, &args, nil); err != nil {
return fmt.Errorf("starting non-root container %v: %v", spec.Process.Args, err)
}
return nil
}
// Restore sends the restore call for a container in the sandbox.
func (s *Sandbox) Restore(cid string, spec *specs.Spec, conf *boot.Config, filename string) error {
log.Debugf("Restore sandbox %q", s.ID)
rf, err := os.Open(filename)
if err != nil {
return fmt.Errorf("opening restore file %q failed: %v", filename, err)
}
defer rf.Close()
opt := boot.RestoreOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{rf},
},
SandboxID: s.ID,
}
// If the platform needs a device FD we must pass it in.
if deviceFile, err := deviceFileForPlatform(conf.Platform); err != nil {
return err
} else if deviceFile != nil {
defer deviceFile.Close()
opt.FilePayload.Files = append(opt.FilePayload.Files, deviceFile)
}
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
// Configure the network.
if err := setupNetwork(conn, s.Pid, spec, conf); err != nil {
return fmt.Errorf("setting up network: %v", err)
}
// Restore the container and start the root container.
if err := conn.Call(boot.ContainerRestore, &opt, nil); err != nil {
return fmt.Errorf("restoring container %q: %v", cid, err)
}
return nil
}
// Processes retrieves the list of processes and associated metadata for a
// given container in this sandbox.
func (s *Sandbox) Processes(cid string) ([]*control.Process, error) {
log.Debugf("Getting processes for container %q in sandbox %q", cid, s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return nil, err
}
defer conn.Close()
var pl []*control.Process
if err := conn.Call(boot.ContainerProcesses, &cid, &pl); err != nil {
return nil, fmt.Errorf("retrieving process data from sandbox: %v", err)
}
return pl, nil
}
// Execute runs the specified command in the container. It returns the PID of
// the newly created process.
func (s *Sandbox) Execute(args *control.ExecArgs) (int32, error) {
log.Debugf("Executing new process in container %q in sandbox %q", args.ContainerID, s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return 0, s.connError(err)
}
defer conn.Close()
// Send a message to the sandbox control server to start the container.
var pid int32
if err := conn.Call(boot.ContainerExecuteAsync, args, &pid); err != nil {
return 0, fmt.Errorf("executing command %q in sandbox: %v", args, err)
}
return pid, nil
}
// Event retrieves stats about the sandbox such as memory and CPU utilization.
func (s *Sandbox) Event(cid string) (*boot.Event, error) {
log.Debugf("Getting events for container %q in sandbox %q", cid, s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return nil, err
}
defer conn.Close()
var e boot.Event
// TODO(b/129292330): Pass in the container id (cid) here. The sandbox
// should return events only for that container.
if err := conn.Call(boot.ContainerEvent, nil, &e); err != nil {
return nil, fmt.Errorf("retrieving event data from sandbox: %v", err)
}
e.ID = cid
return &e, nil
}
func (s *Sandbox) sandboxConnect() (*urpc.Client, error) {
log.Debugf("Connecting to sandbox %q", s.ID)
conn, err := client.ConnectTo(boot.ControlSocketAddr(s.ID))
if err != nil {
return nil, s.connError(err)
}
return conn, nil
}
func (s *Sandbox) connError(err error) error {
return fmt.Errorf("connecting to control server at PID %d: %v", s.Pid, err)
}
// createSandboxProcess starts the sandbox as a subprocess by running the "boot"
// command, passing in the bundle dir.
func (s *Sandbox) createSandboxProcess(conf *boot.Config, args *Args, startSyncFile *os.File) error {
// nextFD is used to get unused FDs that we can pass to the sandbox. It
// starts at 3 because 0, 1, and 2 are taken by stdin/out/err.
nextFD := 3
binPath := specutils.ExePath
cmd := exec.Command(binPath, conf.ToFlags()...)
cmd.SysProcAttr = &syscall.SysProcAttr{}
// Open the log files to pass to the sandbox as FDs.
//
// These flags must come BEFORE the "boot" command in cmd.Args.
if conf.LogFilename != "" {
logFile, err := os.OpenFile(conf.LogFilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return fmt.Errorf("opening log file %q: %v", conf.LogFilename, err)
}
defer logFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, logFile)
cmd.Args = append(cmd.Args, "--log-fd="+strconv.Itoa(nextFD))
nextFD++
}
if conf.DebugLog != "" {
test := ""
if len(conf.TestOnlyTestNameEnv) != 0 {
// Fetch test name if one is provided and the test only flag was set.
if t, ok := specutils.EnvVar(args.Spec.Process.Env, conf.TestOnlyTestNameEnv); ok {
test = t
}
}
debugLogFile, err := specutils.DebugLogFile(conf.DebugLog, "boot", test)
if err != nil {
return fmt.Errorf("opening debug log file in %q: %v", conf.DebugLog, err)
}
defer debugLogFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, debugLogFile)
cmd.Args = append(cmd.Args, "--debug-log-fd="+strconv.Itoa(nextFD))
nextFD++
}
if conf.PanicLog != "" {
test := ""
if len(conf.TestOnlyTestNameEnv) != 0 {
// Fetch test name if one is provided and the test only flag was set.
if t, ok := specutils.EnvVar(args.Spec.Process.Env, conf.TestOnlyTestNameEnv); ok {
test = t
}
}
panicLogFile, err := specutils.DebugLogFile(conf.PanicLog, "panic", test)
if err != nil {
return fmt.Errorf("opening debug log file in %q: %v", conf.PanicLog, err)
}
defer panicLogFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, panicLogFile)
cmd.Args = append(cmd.Args, "--panic-log-fd="+strconv.Itoa(nextFD))
nextFD++
}
cmd.Args = append(cmd.Args, "--panic-signal="+strconv.Itoa(int(syscall.SIGTERM)))
// Add the "boot" command to the args.
//
// All flags after this must be for the boot command
cmd.Args = append(cmd.Args, "boot", "--bundle="+args.BundleDir)
// Create a socket for the control server and donate it to the sandbox.
addr := boot.ControlSocketAddr(s.ID)
sockFD, err := server.CreateSocket(addr)
log.Infof("Creating sandbox process with addr: %s", addr[1:]) // skip "\00".
if err != nil {
return fmt.Errorf("creating control server socket for sandbox %q: %v", s.ID, err)
}
controllerFile := os.NewFile(uintptr(sockFD), "control_server_socket")
defer controllerFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, controllerFile)
cmd.Args = append(cmd.Args, "--controller-fd="+strconv.Itoa(nextFD))
nextFD++
defer args.MountsFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, args.MountsFile)
cmd.Args = append(cmd.Args, "--mounts-fd="+strconv.Itoa(nextFD))
nextFD++
specFile, err := specutils.OpenSpec(args.BundleDir)
if err != nil {
return err
}
defer specFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, specFile)
cmd.Args = append(cmd.Args, "--spec-fd="+strconv.Itoa(nextFD))
nextFD++
cmd.ExtraFiles = append(cmd.ExtraFiles, startSyncFile)
cmd.Args = append(cmd.Args, "--start-sync-fd="+strconv.Itoa(nextFD))
nextFD++
// If there is a gofer, sends all socket ends to the sandbox.
for _, f := range args.IOFiles {
defer f.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, f)
cmd.Args = append(cmd.Args, "--io-fds="+strconv.Itoa(nextFD))
nextFD++
}
// If the platform needs a device FD we must pass it in.
if deviceFile, err := deviceFileForPlatform(conf.Platform); err != nil {
return err
} else if deviceFile != nil {
defer deviceFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, deviceFile)
cmd.Args = append(cmd.Args, "--device-fd="+strconv.Itoa(nextFD))
nextFD++
}
// TODO(b/151157106): syscall tests fail by timeout if asyncpreemptoff
// isn't set.
if conf.Platform == "kvm" {
cmd.Env = append(cmd.Env, "GODEBUG=asyncpreemptoff=1")
}
// The current process' stdio must be passed to the application via the
// --stdio-fds flag. The stdio of the sandbox process itself must not
// be connected to the same FDs, otherwise we risk leaking sandbox
// errors to the application, so we set the sandbox stdio to nil,
// causing them to read/write from the null device.
cmd.Stdin = nil
cmd.Stdout = nil
cmd.Stderr = nil
// If the console control socket file is provided, then create a new
// pty master/slave pair and set the TTY on the sandbox process.
if args.ConsoleSocket != "" {
cmd.Args = append(cmd.Args, "--console=true")
// console.NewWithSocket will send the master on the given
// socket, and return the slave.
tty, err := console.NewWithSocket(args.ConsoleSocket)
if err != nil {
return fmt.Errorf("setting up console with socket %q: %v", args.ConsoleSocket, err)
}
defer tty.Close()
// Set the TTY as a controlling TTY on the sandbox process.
cmd.SysProcAttr.Setctty = true
// The Ctty FD must be the FD in the child process's FD table,
// which will be nextFD in this case.
// See https://github.com/golang/go/issues/29458.
cmd.SysProcAttr.Ctty = nextFD
// Pass the tty as all stdio fds to sandbox.
for i := 0; i < 3; i++ {
cmd.ExtraFiles = append(cmd.ExtraFiles, tty)
cmd.Args = append(cmd.Args, "--stdio-fds="+strconv.Itoa(nextFD))
nextFD++
}
if conf.Debug {
// If debugging, send the boot process stdio to the
// TTY, so that it is easier to find.
cmd.Stdin = tty
cmd.Stdout = tty
cmd.Stderr = tty
}
} else {
// If not using a console, pass our current stdio as the
// container stdio via flags.
for _, f := range []*os.File{os.Stdin, os.Stdout, os.Stderr} {
cmd.ExtraFiles = append(cmd.ExtraFiles, f)
cmd.Args = append(cmd.Args, "--stdio-fds="+strconv.Itoa(nextFD))
nextFD++
}
if conf.Debug {
// If debugging, send the boot process stdio to the
// this process' stdio, so that is is easier to find.
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
}
// Detach from this session, otherwise cmd will get SIGHUP and SIGCONT
// when re-parented.
cmd.SysProcAttr.Setsid = true
// nss is the set of namespaces to join or create before starting the sandbox
// process. Mount, IPC and UTS namespaces from the host are not used as they
// are virtualized inside the sandbox. Be paranoid and run inside an empty
// namespace for these. Don't unshare cgroup because sandbox is added to a
// cgroup in the caller's namespace.
log.Infof("Sandbox will be started in new mount, IPC and UTS namespaces")
nss := []specs.LinuxNamespace{
{Type: specs.IPCNamespace},
{Type: specs.MountNamespace},
{Type: specs.UTSNamespace},
}
if conf.Platform == platforms.Ptrace {
// TODO(b/75837838): Also set a new PID namespace so that we limit
// access to other host processes.
log.Infof("Sandbox will be started in the current PID namespace")
} else {
log.Infof("Sandbox will be started in a new PID namespace")
nss = append(nss, specs.LinuxNamespace{Type: specs.PIDNamespace})
cmd.Args = append(cmd.Args, "--pidns=true")
}
// Joins the network namespace if network is enabled. the sandbox talks
// directly to the host network, which may have been configured in the
// namespace.
if ns, ok := specutils.GetNS(specs.NetworkNamespace, args.Spec); ok && conf.Network != boot.NetworkNone {
log.Infof("Sandbox will be started in the container's network namespace: %+v", ns)
nss = append(nss, ns)
} else if conf.Network == boot.NetworkHost {
log.Infof("Sandbox will be started in the host network namespace")
} else {
log.Infof("Sandbox will be started in new network namespace")
nss = append(nss, specs.LinuxNamespace{Type: specs.NetworkNamespace})
}
// User namespace depends on the network type. Host network requires to run
// inside the user namespace specified in the spec or the current namespace
// if none is configured.
if conf.Network == boot.NetworkHost {
if userns, ok := specutils.GetNS(specs.UserNamespace, args.Spec); ok {
log.Infof("Sandbox will be started in container's user namespace: %+v", userns)
nss = append(nss, userns)
specutils.SetUIDGIDMappings(cmd, args.Spec)
} else {
log.Infof("Sandbox will be started in the current user namespace")
}
// When running in the caller's defined user namespace, apply the same
// capabilities to the sandbox process to ensure it abides to the same
// rules.
cmd.Args = append(cmd.Args, "--apply-caps=true")
// If we have CAP_SYS_ADMIN, we can create an empty chroot and
// bind-mount the executable inside it.
if conf.TestOnlyAllowRunAsCurrentUserWithoutChroot {
log.Warningf("Running sandbox in test mode without chroot. This is only safe in tests!")
} else if specutils.HasCapabilities(capability.CAP_SYS_ADMIN) {
log.Infof("Sandbox will be started in minimal chroot")
cmd.Args = append(cmd.Args, "--setup-root")
} else {
return fmt.Errorf("can't run sandbox process in minimal chroot since we don't have CAP_SYS_ADMIN")
}
} else {
// If we have CAP_SETUID and CAP_SETGID, then we can also run
// as user nobody.
if conf.TestOnlyAllowRunAsCurrentUserWithoutChroot {
log.Warningf("Running sandbox in test mode as current user (uid=%d gid=%d). This is only safe in tests!", os.Getuid(), os.Getgid())
log.Warningf("Running sandbox in test mode without chroot. This is only safe in tests!")
} else if specutils.HasCapabilities(capability.CAP_SETUID, capability.CAP_SETGID) {
log.Infof("Sandbox will be started in new user namespace")
nss = append(nss, specs.LinuxNamespace{Type: specs.UserNamespace})
cmd.Args = append(cmd.Args, "--setup-root")
const nobody = 65534
if conf.Rootless {
log.Infof("Rootless mode: sandbox will run as nobody inside user namespace, mapped to the current user, uid: %d, gid: %d", os.Getuid(), os.Getgid())
cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{
{
ContainerID: nobody,
HostID: os.Getuid(),
Size: 1,
},
}
cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{
{
ContainerID: nobody,
HostID: os.Getgid(),
Size: 1,
},
}
} else {
// Map nobody in the new namespace to nobody in the parent namespace.
//
// A sandbox process will construct an empty
// root for itself, so it has to have
// CAP_SYS_ADMIN and CAP_SYS_CHROOT capabilities.
cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{
{
ContainerID: nobody,
HostID: nobody,
Size: 1,
},
}
cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{
{
ContainerID: nobody,
HostID: nobody,
Size: 1,
},
}
}
// Set credentials to run as user and group nobody.
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: nobody, Gid: nobody}
cmd.SysProcAttr.AmbientCaps = append(cmd.SysProcAttr.AmbientCaps, uintptr(capability.CAP_SYS_ADMIN), uintptr(capability.CAP_SYS_CHROOT))
} else {
return fmt.Errorf("can't run sandbox process as user nobody since we don't have CAP_SETUID or CAP_SETGID")
}
}
cmd.Args[0] = "runsc-sandbox"
if s.Cgroup != nil {
cpuNum, err := s.Cgroup.NumCPU()
if err != nil {
return fmt.Errorf("getting cpu count from cgroups: %v", err)
}
if conf.CPUNumFromQuota {
// Dropping below 2 CPUs can trigger application to disable
// locks that can lead do hard to debug errors, so just
// leaving two cores as reasonable default.
const minCPUs = 2
quota, err := s.Cgroup.CPUQuota()
if err != nil {
return fmt.Errorf("getting cpu qouta from cgroups: %v", err)
}
if n := int(math.Ceil(quota)); n > 0 {
if n < minCPUs {
n = minCPUs
}
if n < cpuNum {
// Only lower the cpu number.
cpuNum = n
}
}
}
cmd.Args = append(cmd.Args, "--cpu-num", strconv.Itoa(cpuNum))
mem, err := s.Cgroup.MemoryLimit()
if err != nil {
return fmt.Errorf("getting memory limit from cgroups: %v", err)
}
// When memory limit is unset, a "large" number is returned. In that case,
// just stick with the default.
if mem < 0x7ffffffffffff000 {
cmd.Args = append(cmd.Args, "--total-memory", strconv.FormatUint(mem, 10))
}
}
if args.UserLog != "" {
f, err := os.OpenFile(args.UserLog, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0664)
if err != nil {
return fmt.Errorf("opening compat log file: %v", err)
}
defer f.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, f)
cmd.Args = append(cmd.Args, "--user-log-fd", strconv.Itoa(nextFD))
nextFD++
}
if args.Attached {
// Kill sandbox if parent process exits in attached mode.
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
// Tells boot that any process it creates must have pdeathsig set.
cmd.Args = append(cmd.Args, "--attached")
}
// Add container as the last argument.
cmd.Args = append(cmd.Args, s.ID)
// Log the FDs we are donating to the sandbox process.
for i, f := range cmd.ExtraFiles {
log.Debugf("Donating FD %d: %q", i+3, f.Name())
}
log.Debugf("Starting sandbox: %s %v", binPath, cmd.Args)
log.Debugf("SysProcAttr: %+v", cmd.SysProcAttr)
if err := specutils.StartInNS(cmd, nss); err != nil {
return fmt.Errorf("Sandbox: %v", err)
}
s.child = true
s.Pid = cmd.Process.Pid
log.Infof("Sandbox started, PID: %d", s.Pid)
return nil
}
// Wait waits for the containerized process to exit, and returns its WaitStatus.
func (s *Sandbox) Wait(cid string) (syscall.WaitStatus, error) {
log.Debugf("Waiting for container %q in sandbox %q", cid, s.ID)
var ws syscall.WaitStatus
if conn, err := s.sandboxConnect(); err != nil {
// The sandbox may have exited while before we had a chance to
// wait on it.
log.Warningf("Wait on container %q failed: %v. Will try waiting on the sandbox process instead.", cid, err)
} else {
defer conn.Close()
// Try the Wait RPC to the sandbox.
err = conn.Call(boot.ContainerWait, &cid, &ws)
if err == nil {
// It worked!
return ws, nil
}
// The sandbox may have exited after we connected, but before
// or during the Wait RPC.
log.Warningf("Wait RPC to container %q failed: %v. Will try waiting on the sandbox process instead.", cid, err)
}
// The sandbox may have already exited, or exited while handling the
// Wait RPC. The best we can do is ask Linux what the sandbox exit
// status was, since in most cases that will be the same as the
// container exit status.
if err := s.waitForStopped(); err != nil {
return ws, err
}
if !s.child {
return ws, fmt.Errorf("sandbox no longer running and its exit status is unavailable")
}
return s.status, nil
}
// WaitPID waits for process 'pid' in the container's sandbox and returns its
// WaitStatus.
func (s *Sandbox) WaitPID(cid string, pid int32) (syscall.WaitStatus, error) {
log.Debugf("Waiting for PID %d in sandbox %q", pid, s.ID)
var ws syscall.WaitStatus
conn, err := s.sandboxConnect()
if err != nil {
return ws, err
}
defer conn.Close()
args := &boot.WaitPIDArgs{
PID: pid,
CID: cid,
}
if err := conn.Call(boot.ContainerWaitPID, args, &ws); err != nil {
return ws, fmt.Errorf("waiting on PID %d in sandbox %q: %v", pid, s.ID, err)
}
return ws, nil
}
// IsRootContainer returns true if the specified container ID belongs to the
// root container.
func (s *Sandbox) IsRootContainer(cid string) bool {
return s.ID == cid
}
// Destroy frees all resources associated with the sandbox. It fails fast and
// is idempotent.
func (s *Sandbox) destroy() error {
log.Debugf("Destroy sandbox %q", s.ID)
if s.Pid != 0 {
log.Debugf("Killing sandbox %q", s.ID)
if err := syscall.Kill(s.Pid, syscall.SIGKILL); err != nil && err != syscall.ESRCH {
return fmt.Errorf("killing sandbox %q PID %q: %v", s.ID, s.Pid, err)
}
if err := s.waitForStopped(); err != nil {
return fmt.Errorf("waiting sandbox %q stop: %v", s.ID, err)
}
}
return nil
}
// SignalContainer sends the signal to a container in the sandbox. If all is
// true and signal is SIGKILL, then waits for all processes to exit before
// returning.
func (s *Sandbox) SignalContainer(cid string, sig syscall.Signal, all bool) error {
log.Debugf("Signal sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
mode := boot.DeliverToProcess
if all {
mode = boot.DeliverToAllProcesses
}
args := boot.SignalArgs{
CID: cid,
Signo: int32(sig),
Mode: mode,
}
if err := conn.Call(boot.ContainerSignal, &args, nil); err != nil {
return fmt.Errorf("signaling container %q: %v", cid, err)
}
return nil
}
// SignalProcess sends the signal to a particular process in the container. If
// fgProcess is true, then the signal is sent to the foreground process group
// in the same session that PID belongs to. This is only valid if the process
// is attached to a host TTY.
func (s *Sandbox) SignalProcess(cid string, pid int32, sig syscall.Signal, fgProcess bool) error {
log.Debugf("Signal sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
mode := boot.DeliverToProcess
if fgProcess {
mode = boot.DeliverToForegroundProcessGroup
}
args := boot.SignalArgs{
CID: cid,
Signo: int32(sig),
PID: pid,
Mode: mode,
}
if err := conn.Call(boot.ContainerSignal, &args, nil); err != nil {
return fmt.Errorf("signaling container %q PID %d: %v", cid, pid, err)
}
return nil
}
// Checkpoint sends the checkpoint call for a container in the sandbox.
// The statefile will be written to f.
func (s *Sandbox) Checkpoint(cid string, f *os.File) error {
log.Debugf("Checkpoint sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opt := control.SaveOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.ContainerCheckpoint, &opt, nil); err != nil {
return fmt.Errorf("checkpointing container %q: %v", cid, err)
}
return nil
}
// Pause sends the pause call for a container in the sandbox.
func (s *Sandbox) Pause(cid string) error {
log.Debugf("Pause sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.ContainerPause, nil, nil); err != nil {
return fmt.Errorf("pausing container %q: %v", cid, err)
}
return nil
}
// Resume sends the resume call for a container in the sandbox.
func (s *Sandbox) Resume(cid string) error {
log.Debugf("Resume sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.ContainerResume, nil, nil); err != nil {
return fmt.Errorf("resuming container %q: %v", cid, err)
}
return nil
}
// IsRunning returns true if the sandbox or gofer process is running.
func (s *Sandbox) IsRunning() bool {
if s.Pid != 0 {
// Send a signal 0 to the sandbox process.
if err := syscall.Kill(s.Pid, 0); err == nil {
// Succeeded, process is running.
return true
}
}
return false
}
// Stacks collects and returns all stacks for the sandbox.
func (s *Sandbox) Stacks() (string, error) {
log.Debugf("Stacks sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return "", err
}
defer conn.Close()
var stacks string
if err := conn.Call(boot.SandboxStacks, nil, &stacks); err != nil {
return "", fmt.Errorf("getting sandbox %q stacks: %v", s.ID, err)
}
return stacks, nil
}
// HeapProfile writes a heap profile to the given file.
func (s *Sandbox) HeapProfile(f *os.File) error {
log.Debugf("Heap profile %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.HeapProfile, &opts, nil); err != nil {
return fmt.Errorf("getting sandbox %q heap profile: %v", s.ID, err)
}
return nil
}
// StartCPUProfile start CPU profile writing to the given file.
func (s *Sandbox) StartCPUProfile(f *os.File) error {
log.Debugf("CPU profile start %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.StartCPUProfile, &opts, nil); err != nil {
return fmt.Errorf("starting sandbox %q CPU profile: %v", s.ID, err)
}
return nil
}
// StopCPUProfile stops a previously started CPU profile.
func (s *Sandbox) StopCPUProfile() error {
log.Debugf("CPU profile stop %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.StopCPUProfile, nil, nil); err != nil {
return fmt.Errorf("stopping sandbox %q CPU profile: %v", s.ID, err)
}
return nil
}
// GoroutineProfile writes a goroutine profile to the given file.
func (s *Sandbox) GoroutineProfile(f *os.File) error {
log.Debugf("Goroutine profile %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.GoroutineProfile, &opts, nil); err != nil {
return fmt.Errorf("getting sandbox %q goroutine profile: %v", s.ID, err)
}
return nil
}
// BlockProfile writes a block profile to the given file.
func (s *Sandbox) BlockProfile(f *os.File) error {
log.Debugf("Block profile %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.BlockProfile, &opts, nil); err != nil {
return fmt.Errorf("getting sandbox %q block profile: %v", s.ID, err)
}
return nil
}
// MutexProfile writes a mutex profile to the given file.
func (s *Sandbox) MutexProfile(f *os.File) error {
log.Debugf("Mutex profile %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.MutexProfile, &opts, nil); err != nil {
return fmt.Errorf("getting sandbox %q mutex profile: %v", s.ID, err)
}
return nil
}
// StartTrace start trace writing to the given file.
func (s *Sandbox) StartTrace(f *os.File) error {
log.Debugf("Trace start %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
opts := control.ProfileOpts{
FilePayload: urpc.FilePayload{
Files: []*os.File{f},
},
}
if err := conn.Call(boot.StartTrace, &opts, nil); err != nil {
return fmt.Errorf("starting sandbox %q trace: %v", s.ID, err)
}
return nil
}
// StopTrace stops a previously started trace.
func (s *Sandbox) StopTrace() error {
log.Debugf("Trace stop %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.StopTrace, nil, nil); err != nil {
return fmt.Errorf("stopping sandbox %q trace: %v", s.ID, err)
}
return nil
}
// ChangeLogging changes logging options.
func (s *Sandbox) ChangeLogging(args control.LoggingArgs) error {
log.Debugf("Change logging start %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.ChangeLogging, &args, nil); err != nil {
return fmt.Errorf("changing sandbox %q logging: %v", s.ID, err)
}
return nil
}
// DestroyContainer destroys the given container. If it is the root container,
// then the entire sandbox is destroyed.
func (s *Sandbox) DestroyContainer(cid string) error {
if err := s.destroyContainer(cid); err != nil {
// If the sandbox isn't running, the container has already been destroyed,
// ignore the error in this case.
if s.IsRunning() {
return err
}
}
return nil
}
func (s *Sandbox) destroyContainer(cid string) error {
if s.IsRootContainer(cid) {
log.Debugf("Destroying root container %q by destroying sandbox", cid)
return s.destroy()
}
log.Debugf("Destroying container %q in sandbox %q", cid, s.ID)
conn, err := s.sandboxConnect()
if err != nil {
return err
}
defer conn.Close()
if err := conn.Call(boot.ContainerDestroy, &cid, nil); err != nil {
return fmt.Errorf("destroying container %q: %v", cid, err)
}
return nil
}
func (s *Sandbox) waitForStopped() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
op := func() error {
if s.child {
s.statusMu.Lock()
defer s.statusMu.Unlock()
if s.Pid == 0 {
return nil
}
// The sandbox process is a child of the current process,
// so we can wait it and collect its zombie.
wpid, err := syscall.Wait4(int(s.Pid), &s.status, syscall.WNOHANG, nil)
if err != nil {
return fmt.Errorf("error waiting the sandbox process: %v", err)
}
if wpid == 0 {
return fmt.Errorf("sandbox is still running")
}
s.Pid = 0
} else if s.IsRunning() {
return fmt.Errorf("sandbox is still running")
}
return nil
}
return backoff.Retry(op, b)
}
// deviceFileForPlatform opens the device file for the given platform. If the
// platform does not need a device file, then nil is returned.
func deviceFileForPlatform(name string) (*os.File, error) {
p, err := platform.Lookup(name)
if err != nil {
return nil, err
}
f, err := p.OpenDevice()
if err != nil {
return nil, fmt.Errorf("opening device file for platform %q: %v", p, err)
}
return f, nil
}
|
// prints the content found at a URL
package main
import(
"fmt"
"io/ioutil"
"net/http"
"os"
)
func main(){
for _, url := range os.Args[1:]{
resp, err := http.Get(url)
if err != nil{
fmt.Fprintf(os.Stderr, "Fetch: %v\n", err)
os.Exit(1)
}
b, err := ioutil.ReadAll(resp.Body)
s:= resp.Status
resp.Body.Close()
if err != nil{
fmt.Fprintf(os.Stderr, "Fetch: reading %s: %v\n", url, err)
os.Exit(1)
}
fmt.Printf("%s\n", b)
fmt.Printf("%s\n", s)
}
}
Checks for HTTP or HTTPS
// prints the content found at a URL
package main
import(
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
)
func main(){
for _, url := range os.Args[1:]{
if !strings.HasPrefix(url, "http://") || !strings.HasPrefix(url, "https://"){
url = "http://" + url
}
resp, err := http.Get(url)
if err != nil{
fmt.Fprintf(os.Stderr, "Fetch: %v\n", err)
os.Exit(1)
}
b, err := ioutil.ReadAll(resp.Body)
s:= resp.Status
resp.Body.Close()
if err != nil{
fmt.Fprintf(os.Stderr, "Fetch: reading %s: %v\n", url, err)
os.Exit(1)
}
fmt.Printf("%s\n", b)
fmt.Printf("%s\n", s)
}
}
|
package main
import (
"chaos-galago/processor/Godeps/_workspace/src/chaos-galago/shared/utils"
"chaos-galago/processor/Godeps/_workspace/src/github.com/cloudfoundry-community/go-cfclient"
"chaos-galago/processor/utils"
"database/sql"
"fmt"
"os"
"strconv"
"time"
)
var (
dbConnectionString string
err error
config *cfclient.Config
)
func init() {
dbConnectionString, err = sharedUtils.GetDBConnectionDetails()
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
config = utils.LoadCFConfig()
fmt.Println("\nConfig loaded:")
fmt.Println(config)
}
func freakOut(err error) bool {
if err != nil {
fmt.Println("An error has occured")
fmt.Println(err.Error())
time.Sleep(60 * time.Second)
return true
}
return false
}
func main() {
cfClient := cfclient.NewClient(config)
OUTER:
for {
db, err := sql.Open("mysql", dbConnectionString)
if freakOut(err) {
db.Close()
continue OUTER
}
services := utils.GetBoundApps(db)
if len(services) == 0 {
db.Close()
time.Sleep(60 * time.Second)
continue OUTER
}
SERVICES:
for _, service := range services {
if utils.ShouldProcess(service.Frequency, service.LastProcessed) {
fmt.Printf("\nProcessing chaos for %s", service.AppID)
err = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())
if freakOut(err) {
continue SERVICES
}
if utils.ShouldRun(service.Probability) {
fmt.Printf("\nRunning chaos for %s", service.AppID)
appInstances := cfClient.GetAppInstances(service.AppID)
if utils.IsAppHealthy(appInstances) {
fmt.Printf("\nApp %s is Healthy\n", service.AppID)
chaosInstance := strconv.Itoa(utils.PickAppInstance(appInstances))
fmt.Printf("\nAbout to kill app instance: %s at index: %s", service.AppID, chaosInstance)
cfClient.KillAppInstance(service.AppID, chaosInstance)
err = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())
if freakOut(err) {
continue SERVICES
}
} else {
fmt.Printf("\nApp %s is unhealthy, skipping\n", service.AppID)
continue SERVICES
}
} else {
fmt.Printf("\nNot running chaos for %s", service.AppID)
err = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())
if freakOut(err) {
continue SERVICES
}
}
} else {
fmt.Printf("\nSkipping processing chaos for %s", service.AppID)
continue SERVICES
}
}
db.Close()
time.Sleep(60 * time.Second)
}
}
prevent cfclient password from appearing in logs
package main
import (
"chaos-galago/processor/Godeps/_workspace/src/chaos-galago/shared/utils"
"chaos-galago/processor/Godeps/_workspace/src/github.com/cloudfoundry-community/go-cfclient"
"chaos-galago/processor/utils"
"database/sql"
"fmt"
"os"
"strconv"
"time"
)
var (
dbConnectionString string
err error
config *cfclient.Config
)
func init() {
dbConnectionString, err = sharedUtils.GetDBConnectionDetails()
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
config = utils.LoadCFConfig()
fmt.Println("\nConfig loaded:")
fmt.Println("ApiAddress: ", config.ApiAddress)
fmt.Println("LoginAddress: ", config.LoginAddress)
fmt.Println("Username: ", config.Username)
fmt.Println("SkipSslValidation: ", config.SkipSslValidation)
}
func freakOut(err error) bool {
if err != nil {
fmt.Println("An error has occured")
fmt.Println(err.Error())
time.Sleep(60 * time.Second)
return true
}
return false
}
func main() {
cfClient := cfclient.NewClient(config)
OUTER:
for {
db, err := sql.Open("mysql", dbConnectionString)
if freakOut(err) {
db.Close()
continue OUTER
}
services := utils.GetBoundApps(db)
if len(services) == 0 {
db.Close()
time.Sleep(60 * time.Second)
continue OUTER
}
SERVICES:
for _, service := range services {
if utils.ShouldProcess(service.Frequency, service.LastProcessed) {
fmt.Printf("\nProcessing chaos for %s", service.AppID)
err = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())
if freakOut(err) {
continue SERVICES
}
if utils.ShouldRun(service.Probability) {
fmt.Printf("\nRunning chaos for %s", service.AppID)
appInstances := cfClient.GetAppInstances(service.AppID)
if utils.IsAppHealthy(appInstances) {
fmt.Printf("\nApp %s is Healthy\n", service.AppID)
chaosInstance := strconv.Itoa(utils.PickAppInstance(appInstances))
fmt.Printf("\nAbout to kill app instance: %s at index: %s", service.AppID, chaosInstance)
cfClient.KillAppInstance(service.AppID, chaosInstance)
err = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())
if freakOut(err) {
continue SERVICES
}
} else {
fmt.Printf("\nApp %s is unhealthy, skipping\n", service.AppID)
continue SERVICES
}
} else {
fmt.Printf("\nNot running chaos for %s", service.AppID)
err = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())
if freakOut(err) {
continue SERVICES
}
}
} else {
fmt.Printf("\nSkipping processing chaos for %s", service.AppID)
continue SERVICES
}
}
db.Close()
time.Sleep(60 * time.Second)
}
}
|
// Copyright 2015 trivago GmbH
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package producer
import (
kafka "github.com/shopify/sarama" // "gopkg.in/Shopify/sarama.v1"
"github.com/trivago/gollum/core"
"github.com/trivago/gollum/core/log"
"github.com/trivago/gollum/shared"
"strings"
"sync"
"sync/atomic"
"time"
)
const (
partRandom = "random"
partRoundrobin = "roundrobin"
partHash = "hash"
compressNone = "none"
compressGZIP = "zip"
compressSnappy = "snappy"
)
// Kafka producer plugin
// Configuration example
//
// - "producer.Kafka":
// Enable: true
// ClientId: "weblog"
// Partitioner: "Roundrobin"
// RequiredAcks: 1
// TimeoutMs: 1500
// SendRetries: 3
// Compression: "None"
// MaxOpenRequests: 5
// BatchMinCount: 10
// BatchMaxCount: 1
// BatchSizeByte: 8192
// BatchSizeMaxKB: 1024
// BatchTimeoutSec: 3
// ServerTimeoutSec: 30
// SendTimeoutMs: 250
// ElectRetries: 3
// ElectTimeoutMs: 250
// MetadataRefreshMs: 10000
// Servers:
// - "localhost:9092"
// Topic:
// "console" : "console"
// Stream:
// - "console"
//
// The kafka producer writes messages to a kafka cluster. This producer is
// backed by the sarama library so most settings relate to that library.
// This producer uses a fuse breaker if the connection reports an error.
//
// ClientId sets the client id of this producer. By default this is "gollum".
//
// Partitioner sets the distribution algorithm to use. Valid values are:
// "Random","Roundrobin" and "Hash". By default "Hash" is set.
//
// RequiredAcks defines the acknowledgement level required by the broker.
// 0 = No responses required. 1 = wait for the local commit. -1 = wait for
// all replicas to commit. >1 = wait for a specific number of commits.
// By default this is set to 1.
//
// TimeoutMs denotes the maximum time the broker will wait for acks. This
// setting becomes active when RequiredAcks is set to wait for multiple commits.
// By default this is set to 1500.
//
// SendRetries defines how many times to retry sending data before marking a
// server as not reachable. By default this is set to 3.
//
// Compression sets the method of compression to use. Valid values are:
// "None","Zip" and "Snappy". By default "None" is set.
//
// MaxOpenRequests defines the number of simultanious connections are allowed.
// By default this is set to 5.
//
// BatchMinCount sets the minimum number of messages required to trigger a
// flush. By default this is set to 1.
//
// BatchMaxCount defines the maximum number of messages processed per
// request. By default this is set to 0 for "unlimited".
//
// BatchSizeByte sets the mimimum number of bytes to collect before a new flush
// is triggered. By default this is set to 8192.
//
// BatchSizeMaxKB defines the maximum allowed message size. By default this is
// set to 1024.
//
// BatchTimeoutSec sets the minimum time in seconds to pass after wich a new
// flush will be triggered. By default this is set to 3.
//
// MessageBufferCount sets the internal channel size for the kafka client.
// By default this is set to 256.
//
// ServerTimeoutSec defines the time after which a connection is set to timed
// out. By default this is set to 30 seconds.
//
// SendTimeoutMs defines the number of milliseconds to wait for a server to
// resond before triggering a timeout. Defaults to 250.
//
// ElectRetries defines how many times to retry during a leader election.
// By default this is set to 3.
//
// ElectTimeoutMs defines the number of milliseconds to wait for the cluster to
// elect a new leader. Defaults to 250.
//
// MetadataRefreshMs set the interval in seconds for fetching cluster metadata.
// By default this is set to 10000. This corresponds to the JVM setting
// `topic.metadata.refresh.interval.ms`.
//
// Servers contains the list of all kafka servers to connect to. By default this
// is set to contain only "localhost:9092".
//
// Topic maps a stream to a specific kafka topic. You can define the
// wildcard stream (*) here, too. If defined, all streams that do not have a
// specific mapping will go to this topic (including _GOLLUM_).
// If no topic mappings are set the stream names will be used as topic.
type Kafka struct {
core.ProducerBase
servers []string
topic map[core.MessageStreamID]string
clientID string
client kafka.Client
config *kafka.Config
batch core.MessageBatch
producer kafka.AsyncProducer
counters map[string]*int64
missCount int64
lastMetricUpdate time.Time
}
const (
kafkaMetricMessages = "Kafka:Messages-"
kafkaMetricMessagesSec = "Kafka:MessagesSec-"
kafkaMetricMissCount = "Kafka:ResponsesQueued"
)
func init() {
shared.TypeRegistry.Register(Kafka{})
}
// Configure initializes this producer with values from a plugin config.
func (prod *Kafka) Configure(conf core.PluginConfig) error {
err := prod.ProducerBase.Configure(conf)
if err != nil {
return err
}
prod.SetStopCallback(prod.close)
prod.servers = conf.GetStringArray("Servers", []string{"localhost:9092"})
prod.topic = conf.GetStreamMap("Topic", "")
prod.clientID = conf.GetString("ClientId", "gollum")
prod.lastMetricUpdate = time.Now()
prod.config = kafka.NewConfig()
prod.config.ClientID = conf.GetString("ClientId", "gollum")
prod.config.ChannelBufferSize = conf.GetInt("MessageBufferCount", 256)
prod.config.Net.MaxOpenRequests = conf.GetInt("MaxOpenRequests", 5)
prod.config.Net.DialTimeout = time.Duration(conf.GetInt("ServerTimeoutSec", 30)) * time.Second
prod.config.Net.ReadTimeout = prod.config.Net.DialTimeout
prod.config.Net.WriteTimeout = prod.config.Net.DialTimeout
prod.config.Metadata.Retry.Max = conf.GetInt("ElectRetries", 3)
prod.config.Metadata.Retry.Backoff = time.Duration(conf.GetInt("ElectTimeoutMs", 250)) * time.Millisecond
prod.config.Metadata.RefreshFrequency = time.Duration(conf.GetInt("MetadataRefreshMs", 10000)) * time.Millisecond
prod.config.Producer.MaxMessageBytes = conf.GetInt("BatchSizeMaxKB", 1<<10) << 10
prod.config.Producer.RequiredAcks = kafka.RequiredAcks(conf.GetInt("RequiredAcks", int(kafka.WaitForLocal)))
prod.config.Producer.Timeout = time.Duration(conf.GetInt("TimoutMs", 1500)) * time.Millisecond
prod.config.Producer.Return.Errors = true
prod.config.Producer.Return.Successes = true
switch strings.ToLower(conf.GetString("Compression", compressNone)) {
default:
fallthrough
case compressNone:
prod.config.Producer.Compression = kafka.CompressionNone
case compressGZIP:
prod.config.Producer.Compression = kafka.CompressionGZIP
case compressSnappy:
prod.config.Producer.Compression = kafka.CompressionSnappy
}
switch strings.ToLower(conf.GetString("Partitioner", partRandom)) {
case partRandom:
prod.config.Producer.Partitioner = kafka.NewRandomPartitioner
case partRoundrobin:
prod.config.Producer.Partitioner = kafka.NewRoundRobinPartitioner
default:
fallthrough
case partHash:
prod.config.Producer.Partitioner = kafka.NewHashPartitioner
}
prod.config.Producer.Flush.Bytes = conf.GetInt("BatchSizeByte", 8192)
prod.config.Producer.Flush.Messages = conf.GetInt("BatchMinCount", 1)
prod.config.Producer.Flush.Frequency = time.Duration(conf.GetInt("BatchTimeoutSec", 3)) * time.Second
prod.config.Producer.Flush.MaxMessages = conf.GetInt("BatchMaxCount", 0)
prod.config.Producer.Retry.Max = conf.GetInt("SendRetries", 3)
prod.config.Producer.Retry.Backoff = time.Duration(conf.GetInt("SendTimeoutMs", 100)) * time.Millisecond
prod.batch = core.NewMessageBatch(conf.GetInt("Channel", 8192))
prod.counters = make(map[string]*int64)
for _, topic := range prod.topic {
shared.Metric.New(kafkaMetricMessages + topic)
shared.Metric.New(kafkaMetricMessagesSec + topic)
prod.counters[topic] = new(int64)
}
shared.Metric.New(kafkaMetricMissCount)
prod.SetCheckFuseCallback(prod.tryOpenConnection)
return nil
}
func (prod *Kafka) bufferMessage(msg core.Message) {
prod.batch.AppendOrFlush(msg, prod.sendBatch, prod.IsActiveOrStopping, prod.Drop)
}
func (prod *Kafka) sendBatchOnTimeOut() {
// Flush if necessary
if prod.batch.ReachedTimeThreshold(prod.config.Producer.Flush.Frequency) || prod.batch.ReachedSizeThreshold(prod.batch.Len()/2) {
prod.sendBatch()
}
}
func (prod *Kafka) sendBatch() {
if prod.tryOpenConnection() {
prod.batch.Flush(prod.transformMessages)
} else if prod.IsStopping() {
prod.batch.Flush(prod.dropMessages)
} else {
return // ### return, do not update metrics ###
}
// Update metrics
duration := time.Since(prod.lastMetricUpdate)
prod.lastMetricUpdate = time.Now()
for category, counter := range prod.counters {
count := atomic.SwapInt64(counter, 0)
shared.Metric.Add(kafkaMetricMessages+category, count)
shared.Metric.SetF(kafkaMetricMessagesSec+category, float64(count)/duration.Seconds())
}
}
func (prod *Kafka) dropMessages(messages []core.Message) {
for _, msg := range messages {
prod.Drop(msg)
}
}
func (prod *Kafka) transformMessages(messages []core.Message) {
defer func() { shared.Metric.Set(kafkaMetricMissCount, prod.missCount) }()
for _, msg := range messages {
originalMsg := msg
msg.Data, msg.StreamID = prod.ProducerBase.Format(msg)
// Store current client and producer to avoid races
client := prod.client
producer := prod.producer
// Check if connected
if client == nil || producer == nil {
prod.Drop(originalMsg)
continue // ### return, not connected ###
}
// Send message
topic, topicMapped := prod.topic[msg.StreamID]
if !topicMapped {
// Use wildcard fallback or stream name if not set
topic, topicMapped = prod.topic[core.WildcardStreamID]
if !topicMapped {
topic = core.StreamRegistry.GetStreamName(msg.StreamID)
}
shared.Metric.New(kafkaMetricMessages + topic)
shared.Metric.New(kafkaMetricMessagesSec + topic)
prod.counters[topic] = new(int64)
prod.topic[msg.StreamID] = topic
}
producer.Input() <- &kafka.ProducerMessage{
Topic: topic,
Value: kafka.ByteEncoder(msg.Data),
Metadata: originalMsg,
}
atomic.AddInt64(prod.counters[topic], 1)
prod.missCount++
}
// Wait for errors to be returned
errors := make(map[string]bool)
topicState := make(map[string]bool)
for timeout := time.NewTimer(prod.config.Producer.Flush.Frequency); prod.missCount > 0; prod.missCount-- {
select {
case succ := <-prod.producer.Successes():
topicState[succ.Topic] = true // overwrite negative states
case err := <-prod.producer.Errors():
if _, errorExists := errors[err.Error()]; !errorExists {
Log.Error.Printf("Kafka producer error: %s", err.Error())
errors[err.Error()] = true
if _, stateSet := topicState[err.Msg.Topic]; !stateSet {
topicState[err.Msg.Topic] = false
}
}
if msg, hasMsg := err.Msg.Metadata.(core.Message); hasMsg {
prod.Drop(msg)
}
case <-timeout.C:
Log.Warning.Printf("Kafka flush timed out with %d messages left", prod.missCount)
break // ### break, took too long ###
}
}
if len(errors) > 0 {
allTopicsOk := true
for _, state := range topicState {
allTopicsOk = state && allTopicsOk
}
if !allTopicsOk {
// Only restart if all topics report an error
// This is done to separate topic related errors from server related errors
Log.Error.Printf("%d error type(s) for this batch. Triggering a reconnect", len(errors))
prod.closeConnection()
}
}
}
func (prod *Kafka) tryOpenConnection() bool {
// Reconnect the client first
if prod.client == nil {
if client, err := kafka.NewClient(prod.servers, prod.config); err == nil {
prod.client = client
} else {
Log.Error.Print("Kafka client error:", err)
prod.client = nil
prod.producer = nil
return false // ### return, connection failed ###
}
}
// Make sure we have a producer up and running
if prod.producer == nil {
if producer, err := kafka.NewAsyncProducerFromClient(prod.client); err == nil {
prod.producer = producer
} else {
Log.Error.Print("Kafka producer error:", err)
prod.client.Close()
prod.client = nil
prod.producer = nil
return false // ### return, connection failed ###
}
}
prod.Control() <- core.PluginControlFuseActive
return true
}
func (prod *Kafka) closeConnection() {
if prod.producer != nil {
prod.producer.Close()
prod.producer = nil
}
if prod.client != nil && !prod.client.Closed() {
prod.client.Close()
prod.client = nil
if !prod.IsStopping() {
prod.Control() <- core.PluginControlFuseBurn
}
}
}
func (prod *Kafka) close() {
defer prod.WorkerDone()
prod.CloseMessageChannel(prod.bufferMessage)
prod.batch.Close(prod.transformMessages, prod.GetShutdownTimeout())
prod.closeConnection()
}
// Produce writes to a buffer that is sent to a given socket.
func (prod *Kafka) Produce(workers *sync.WaitGroup) {
prod.AddMainWorker(workers)
prod.tryOpenConnection()
prod.TickerMessageControlLoop(prod.bufferMessage, prod.config.Producer.Timeout, prod.sendBatchOnTimeOut)
}
fixed logic problem with topic based error handling (kafka producer)
// Copyright 2015 trivago GmbH
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package producer
import (
kafka "github.com/shopify/sarama" // "gopkg.in/Shopify/sarama.v1"
"github.com/trivago/gollum/core"
"github.com/trivago/gollum/core/log"
"github.com/trivago/gollum/shared"
"strings"
"sync"
"sync/atomic"
"time"
)
const (
partRandom = "random"
partRoundrobin = "roundrobin"
partHash = "hash"
compressNone = "none"
compressGZIP = "zip"
compressSnappy = "snappy"
)
// Kafka producer plugin
// Configuration example
//
// - "producer.Kafka":
// Enable: true
// ClientId: "weblog"
// Partitioner: "Roundrobin"
// RequiredAcks: 1
// TimeoutMs: 1500
// SendRetries: 3
// Compression: "None"
// MaxOpenRequests: 5
// BatchMinCount: 10
// BatchMaxCount: 1
// BatchSizeByte: 8192
// BatchSizeMaxKB: 1024
// BatchTimeoutSec: 3
// ServerTimeoutSec: 30
// SendTimeoutMs: 250
// ElectRetries: 3
// ElectTimeoutMs: 250
// MetadataRefreshMs: 10000
// Servers:
// - "localhost:9092"
// Topic:
// "console" : "console"
// Stream:
// - "console"
//
// The kafka producer writes messages to a kafka cluster. This producer is
// backed by the sarama library so most settings relate to that library.
// This producer uses a fuse breaker if the connection reports an error.
//
// ClientId sets the client id of this producer. By default this is "gollum".
//
// Partitioner sets the distribution algorithm to use. Valid values are:
// "Random","Roundrobin" and "Hash". By default "Hash" is set.
//
// RequiredAcks defines the acknowledgement level required by the broker.
// 0 = No responses required. 1 = wait for the local commit. -1 = wait for
// all replicas to commit. >1 = wait for a specific number of commits.
// By default this is set to 1.
//
// TimeoutMs denotes the maximum time the broker will wait for acks. This
// setting becomes active when RequiredAcks is set to wait for multiple commits.
// By default this is set to 1500.
//
// SendRetries defines how many times to retry sending data before marking a
// server as not reachable. By default this is set to 3.
//
// Compression sets the method of compression to use. Valid values are:
// "None","Zip" and "Snappy". By default "None" is set.
//
// MaxOpenRequests defines the number of simultanious connections are allowed.
// By default this is set to 5.
//
// BatchMinCount sets the minimum number of messages required to trigger a
// flush. By default this is set to 1.
//
// BatchMaxCount defines the maximum number of messages processed per
// request. By default this is set to 0 for "unlimited".
//
// BatchSizeByte sets the mimimum number of bytes to collect before a new flush
// is triggered. By default this is set to 8192.
//
// BatchSizeMaxKB defines the maximum allowed message size. By default this is
// set to 1024.
//
// BatchTimeoutSec sets the minimum time in seconds to pass after wich a new
// flush will be triggered. By default this is set to 3.
//
// MessageBufferCount sets the internal channel size for the kafka client.
// By default this is set to 256.
//
// ServerTimeoutSec defines the time after which a connection is set to timed
// out. By default this is set to 30 seconds.
//
// SendTimeoutMs defines the number of milliseconds to wait for a server to
// resond before triggering a timeout. Defaults to 250.
//
// ElectRetries defines how many times to retry during a leader election.
// By default this is set to 3.
//
// ElectTimeoutMs defines the number of milliseconds to wait for the cluster to
// elect a new leader. Defaults to 250.
//
// MetadataRefreshMs set the interval in seconds for fetching cluster metadata.
// By default this is set to 10000. This corresponds to the JVM setting
// `topic.metadata.refresh.interval.ms`.
//
// Servers contains the list of all kafka servers to connect to. By default this
// is set to contain only "localhost:9092".
//
// Topic maps a stream to a specific kafka topic. You can define the
// wildcard stream (*) here, too. If defined, all streams that do not have a
// specific mapping will go to this topic (including _GOLLUM_).
// If no topic mappings are set the stream names will be used as topic.
type Kafka struct {
core.ProducerBase
servers []string
topic map[core.MessageStreamID]string
clientID string
client kafka.Client
config *kafka.Config
batch core.MessageBatch
producer kafka.AsyncProducer
counters map[string]*int64
missCount int64
lastMetricUpdate time.Time
}
const (
kafkaMetricMessages = "Kafka:Messages-"
kafkaMetricMessagesSec = "Kafka:MessagesSec-"
kafkaMetricMissCount = "Kafka:ResponsesQueued"
)
func init() {
shared.TypeRegistry.Register(Kafka{})
}
// Configure initializes this producer with values from a plugin config.
func (prod *Kafka) Configure(conf core.PluginConfig) error {
err := prod.ProducerBase.Configure(conf)
if err != nil {
return err
}
prod.SetStopCallback(prod.close)
prod.servers = conf.GetStringArray("Servers", []string{"localhost:9092"})
prod.topic = conf.GetStreamMap("Topic", "")
prod.clientID = conf.GetString("ClientId", "gollum")
prod.lastMetricUpdate = time.Now()
prod.config = kafka.NewConfig()
prod.config.ClientID = conf.GetString("ClientId", "gollum")
prod.config.ChannelBufferSize = conf.GetInt("MessageBufferCount", 256)
prod.config.Net.MaxOpenRequests = conf.GetInt("MaxOpenRequests", 5)
prod.config.Net.DialTimeout = time.Duration(conf.GetInt("ServerTimeoutSec", 30)) * time.Second
prod.config.Net.ReadTimeout = prod.config.Net.DialTimeout
prod.config.Net.WriteTimeout = prod.config.Net.DialTimeout
prod.config.Metadata.Retry.Max = conf.GetInt("ElectRetries", 3)
prod.config.Metadata.Retry.Backoff = time.Duration(conf.GetInt("ElectTimeoutMs", 250)) * time.Millisecond
prod.config.Metadata.RefreshFrequency = time.Duration(conf.GetInt("MetadataRefreshMs", 10000)) * time.Millisecond
prod.config.Producer.MaxMessageBytes = conf.GetInt("BatchSizeMaxKB", 1<<10) << 10
prod.config.Producer.RequiredAcks = kafka.RequiredAcks(conf.GetInt("RequiredAcks", int(kafka.WaitForLocal)))
prod.config.Producer.Timeout = time.Duration(conf.GetInt("TimoutMs", 1500)) * time.Millisecond
prod.config.Producer.Return.Errors = true
prod.config.Producer.Return.Successes = true
switch strings.ToLower(conf.GetString("Compression", compressNone)) {
default:
fallthrough
case compressNone:
prod.config.Producer.Compression = kafka.CompressionNone
case compressGZIP:
prod.config.Producer.Compression = kafka.CompressionGZIP
case compressSnappy:
prod.config.Producer.Compression = kafka.CompressionSnappy
}
switch strings.ToLower(conf.GetString("Partitioner", partRandom)) {
case partRandom:
prod.config.Producer.Partitioner = kafka.NewRandomPartitioner
case partRoundrobin:
prod.config.Producer.Partitioner = kafka.NewRoundRobinPartitioner
default:
fallthrough
case partHash:
prod.config.Producer.Partitioner = kafka.NewHashPartitioner
}
prod.config.Producer.Flush.Bytes = conf.GetInt("BatchSizeByte", 8192)
prod.config.Producer.Flush.Messages = conf.GetInt("BatchMinCount", 1)
prod.config.Producer.Flush.Frequency = time.Duration(conf.GetInt("BatchTimeoutSec", 3)) * time.Second
prod.config.Producer.Flush.MaxMessages = conf.GetInt("BatchMaxCount", 0)
prod.config.Producer.Retry.Max = conf.GetInt("SendRetries", 3)
prod.config.Producer.Retry.Backoff = time.Duration(conf.GetInt("SendTimeoutMs", 100)) * time.Millisecond
prod.batch = core.NewMessageBatch(conf.GetInt("Channel", 8192))
prod.counters = make(map[string]*int64)
for _, topic := range prod.topic {
shared.Metric.New(kafkaMetricMessages + topic)
shared.Metric.New(kafkaMetricMessagesSec + topic)
prod.counters[topic] = new(int64)
}
shared.Metric.New(kafkaMetricMissCount)
prod.SetCheckFuseCallback(prod.tryOpenConnection)
return nil
}
func (prod *Kafka) bufferMessage(msg core.Message) {
prod.batch.AppendOrFlush(msg, prod.sendBatch, prod.IsActiveOrStopping, prod.Drop)
}
func (prod *Kafka) sendBatchOnTimeOut() {
// Flush if necessary
if prod.batch.ReachedTimeThreshold(prod.config.Producer.Flush.Frequency) || prod.batch.ReachedSizeThreshold(prod.batch.Len()/2) {
prod.sendBatch()
}
}
func (prod *Kafka) sendBatch() {
if prod.tryOpenConnection() {
prod.batch.Flush(prod.transformMessages)
} else if prod.IsStopping() {
prod.batch.Flush(prod.dropMessages)
} else {
return // ### return, do not update metrics ###
}
// Update metrics
duration := time.Since(prod.lastMetricUpdate)
prod.lastMetricUpdate = time.Now()
for category, counter := range prod.counters {
count := atomic.SwapInt64(counter, 0)
shared.Metric.Add(kafkaMetricMessages+category, count)
shared.Metric.SetF(kafkaMetricMessagesSec+category, float64(count)/duration.Seconds())
}
}
func (prod *Kafka) dropMessages(messages []core.Message) {
for _, msg := range messages {
prod.Drop(msg)
}
}
func (prod *Kafka) transformMessages(messages []core.Message) {
defer func() { shared.Metric.Set(kafkaMetricMissCount, prod.missCount) }()
for _, msg := range messages {
originalMsg := msg
msg.Data, msg.StreamID = prod.ProducerBase.Format(msg)
// Store current client and producer to avoid races
client := prod.client
producer := prod.producer
// Check if connected
if client == nil || producer == nil {
prod.Drop(originalMsg)
continue // ### return, not connected ###
}
// Send message
topic, topicMapped := prod.topic[msg.StreamID]
if !topicMapped {
// Use wildcard fallback or stream name if not set
topic, topicMapped = prod.topic[core.WildcardStreamID]
if !topicMapped {
topic = core.StreamRegistry.GetStreamName(msg.StreamID)
}
shared.Metric.New(kafkaMetricMessages + topic)
shared.Metric.New(kafkaMetricMessagesSec + topic)
prod.counters[topic] = new(int64)
prod.topic[msg.StreamID] = topic
}
producer.Input() <- &kafka.ProducerMessage{
Topic: topic,
Value: kafka.ByteEncoder(msg.Data),
Metadata: originalMsg,
}
atomic.AddInt64(prod.counters[topic], 1)
prod.missCount++
}
// Wait for errors to be returned
errors := make(map[string]bool)
topicsBad := make(map[string]bool)
for timeout := time.NewTimer(prod.config.Producer.Flush.Frequency); prod.missCount > 0; prod.missCount-- {
select {
case succ := <-prod.producer.Successes():
topicsBad[succ.Topic] = false // ok overwrites bad
case err := <-prod.producer.Errors():
if _, errorExists := errors[err.Error()]; !errorExists {
Log.Error.Printf("Kafka producer error: %s", err.Error())
errors[err.Error()] = true
// Do not overwrite ok states (one ok = server reachable)
if _, stateSet := topicsBad[err.Msg.Topic]; !stateSet {
topicsBad[err.Msg.Topic] = true
}
}
if msg, hasMsg := err.Msg.Metadata.(core.Message); hasMsg {
prod.Drop(msg)
}
case <-timeout.C:
Log.Warning.Printf("Kafka flush timed out with %d messages left", prod.missCount)
break // ### break, took too long ###
}
}
if len(errors) > 0 {
allTopicsBad := true
for _, topicBad := range topicsBad {
allTopicsBad = topicBad && allTopicsBad
}
if allTopicsBad {
// Only restart if all topics report an error
// This is done to separate topic related errors from server related errors
Log.Error.Printf("%d error type(s) for this batch. Triggering a reconnect", len(errors))
prod.closeConnection()
}
}
}
func (prod *Kafka) tryOpenConnection() bool {
// Reconnect the client first
if prod.client == nil {
if client, err := kafka.NewClient(prod.servers, prod.config); err == nil {
prod.client = client
} else {
Log.Error.Print("Kafka client error:", err)
prod.client = nil
prod.producer = nil
return false // ### return, connection failed ###
}
}
// Make sure we have a producer up and running
if prod.producer == nil {
if producer, err := kafka.NewAsyncProducerFromClient(prod.client); err == nil {
prod.producer = producer
} else {
Log.Error.Print("Kafka producer error:", err)
prod.client.Close()
prod.client = nil
prod.producer = nil
return false // ### return, connection failed ###
}
}
prod.Control() <- core.PluginControlFuseActive
return true
}
func (prod *Kafka) closeConnection() {
if prod.producer != nil {
prod.producer.Close()
prod.producer = nil
}
if prod.client != nil && !prod.client.Closed() {
prod.client.Close()
prod.client = nil
if !prod.IsStopping() {
prod.Control() <- core.PluginControlFuseBurn
}
}
}
func (prod *Kafka) close() {
defer prod.WorkerDone()
prod.CloseMessageChannel(prod.bufferMessage)
prod.batch.Close(prod.transformMessages, prod.GetShutdownTimeout())
prod.closeConnection()
}
// Produce writes to a buffer that is sent to a given socket.
func (prod *Kafka) Produce(workers *sync.WaitGroup) {
prod.AddMainWorker(workers)
prod.tryOpenConnection()
prod.TickerMessageControlLoop(prod.bufferMessage, prod.config.Producer.Timeout, prod.sendBatchOnTimeOut)
}
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Download remote packages.
package main
import (
"http"
"os"
"path/filepath"
"regexp"
"strings"
)
const dashboardURL = "http://godashboard.appspot.com/package"
// maybeReportToDashboard reports path to dashboard unless
// -dashboard=false is on command line. It ignores errors.
func maybeReportToDashboard(path string) {
// if -dashboard=false was on command line, do nothing
if !*reportToDashboard {
return
}
// otherwise lob url to dashboard
r, _ := http.Post(dashboardURL, "application/x-www-form-urlencoded", strings.NewReader("path="+path))
if r != nil && r.Body != nil {
r.Body.Close()
}
}
var vcsPatterns = map[string]*regexp.Regexp{
"googlecode": regexp.MustCompile(`^([a-z0-9\-]+\.googlecode\.com/(svn|hg))(/[a-z0-9A-Z_.\-/]*)?$`),
"github": regexp.MustCompile(`^(github\.com/[a-z0-9A-Z_.\-]+/[a-z0-9A-Z_.\-]+)(/[a-z0-9A-Z_.\-/]*)?$`),
"bitbucket": regexp.MustCompile(`^(bitbucket\.org/[a-z0-9A-Z_.\-]+/[a-z0-9A-Z_.\-]+)(/[a-z0-9A-Z_.\-/]*)?$`),
"launchpad": regexp.MustCompile(`^(launchpad\.net/([a-z0-9A-Z_.\-]+(/[a-z0-9A-Z_.\-]+)?|~[a-z0-9A-Z_.\-]+/(\+junk|[a-z0-9A-Z_.\-]+)/[a-z0-9A-Z_.\-]+))(/[a-z0-9A-Z_.\-/]+)?$`),
}
// isRemote returns true if the provided package path
// matches one of the supported remote repositories.
func isRemote(pkg string) bool {
for _, r := range vcsPatterns {
if r.MatchString(pkg) {
return true
}
}
return false
}
// download checks out or updates pkg from the remote server.
func download(pkg, srcDir string) os.Error {
if strings.Contains(pkg, "..") {
return os.ErrorString("invalid path (contains ..)")
}
if m := vcsPatterns["bitbucket"].FindStringSubmatch(pkg); m != nil {
if err := vcsCheckout(&hg, srcDir, m[1], "http://"+m[1], m[1]); err != nil {
return err
}
return nil
}
if m := vcsPatterns["googlecode"].FindStringSubmatch(pkg); m != nil {
var v *vcs
switch m[2] {
case "hg":
v = &hg
case "svn":
v = &svn
default:
// regexp only allows hg, svn to get through
panic("missing case in download: " + pkg)
}
if err := vcsCheckout(v, srcDir, m[1], "https://"+m[1], m[1]); err != nil {
return err
}
return nil
}
if m := vcsPatterns["github"].FindStringSubmatch(pkg); m != nil {
if strings.HasSuffix(m[1], ".git") {
return os.ErrorString("repository " + pkg + " should not have .git suffix")
}
if err := vcsCheckout(&git, srcDir, m[1], "http://"+m[1]+".git", m[1]); err != nil {
return err
}
return nil
}
if m := vcsPatterns["launchpad"].FindStringSubmatch(pkg); m != nil {
// Either lp.net/<project>[/<series>[/<path>]]
// or lp.net/~<user or team>/<project>/<branch>[/<path>]
if err := vcsCheckout(&bzr, srcDir, m[1], "https://"+m[1], m[1]); err != nil {
return err
}
return nil
}
return os.ErrorString("unknown repository: " + pkg)
}
// a vcs represents a version control system
// like Mercurial, Git, or Subversion.
type vcs struct {
cmd string
metadir string
checkout string
clone string
update string
updateReleaseFlag string
pull string
pullForceFlag string
log string
logLimitFlag string
logReleaseFlag string
}
var hg = vcs{
cmd: "hg",
metadir: ".hg",
checkout: "checkout",
clone: "clone",
update: "update",
updateReleaseFlag: "release",
pull: "pull",
log: "log",
logLimitFlag: "-l1",
logReleaseFlag: "-rrelease",
}
var git = vcs{
cmd: "git",
metadir: ".git",
checkout: "checkout",
clone: "clone",
update: "pull",
updateReleaseFlag: "release",
pull: "fetch",
log: "show-ref",
logLimitFlag: "",
logReleaseFlag: "release",
}
var svn = vcs{
cmd: "svn",
metadir: ".svn",
checkout: "checkout",
clone: "checkout",
update: "update",
updateReleaseFlag: "release",
log: "log",
logLimitFlag: "-l1",
logReleaseFlag: "release",
}
var bzr = vcs{
cmd: "bzr",
metadir: ".bzr",
checkout: "update",
clone: "branch",
update: "update",
updateReleaseFlag: "-rrelease",
pull: "pull",
pullForceFlag: "--overwrite",
log: "log",
logLimitFlag: "-l1",
logReleaseFlag: "-rrelease",
}
// Try to detect if a "release" tag exists. If it does, update
// to the tagged version, otherwise just update the current branch.
// NOTE(_nil): svn will always fail because it is trying to get
// the revision history of a file named "release" instead of
// looking for a commit with a release tag
func (v *vcs) updateRepo(dst string) os.Error {
if err := quietRun(dst, nil, v.cmd, v.log, v.logLimitFlag, v.logReleaseFlag); err == nil {
if err := run(dst, nil, v.cmd, v.checkout, v.updateReleaseFlag); err != nil {
return err
}
} else if err := run(dst, nil, v.cmd, v.update); err != nil {
return err
}
return nil
}
// vcsCheckout checks out repo into dst using vcs.
// It tries to check out (or update, if the dst already
// exists and -u was specified on the command line)
// the repository at tag/branch "release". If there is no
// such tag or branch, it falls back to the repository tip.
func vcsCheckout(vcs *vcs, srcDir, pkgprefix, repo, dashpath string) os.Error {
dst := filepath.Join(srcDir, filepath.FromSlash(pkgprefix))
dir, err := os.Stat(filepath.Join(dst, vcs.metadir))
if err == nil && !dir.IsDirectory() {
return os.ErrorString("not a directory: " + dst)
}
if err != nil {
parent, _ := filepath.Split(dst)
if err := os.MkdirAll(parent, 0777); err != nil {
return err
}
if err := run(string(filepath.Separator), nil, vcs.cmd, vcs.clone, repo, dst); err != nil {
return err
}
if err := vcs.updateRepo(dst); err != nil {
return err
}
// success on first installation - report
maybeReportToDashboard(dashpath)
} else if *update {
// Retrieve new revisions from the remote branch, if the VCS
// supports this operation independently (e.g. svn doesn't)
if vcs.pull != "" {
if vcs.pullForceFlag != "" {
if err := run(dst, nil, vcs.cmd, vcs.pull, vcs.pullForceFlag); err != nil {
return err
}
} else if err := run(dst, nil, vcs.cmd, vcs.pull); err != nil {
return err
}
}
// Update to release or latest revision
if err := vcs.updateRepo(dst); err != nil {
return err
}
}
return nil
}
goinstall: Add support for arbitary code repositories
Extend goinstall to support downloading from any hg/git/svn/bzr hosting
site, not just the standard ones. The type of hosting is automatically
checked by trying all the tools, so the import statement looks like:
import "example.com/mything"
Which will work for Mercurial (http), Subversion (http, svn), Git (http,
git) and Bazaar (http, bzr) hosting.
All the existing package imports will work through this new mechanism,
but the existing hard-coded host support is left in place to ensure
there is no change in behaviour.
R=golang-dev, bradfitz, fvbommel, go.peter.90, n13m3y3r, adg, duperray.olivier
CC=golang-dev
http://codereview.appspot.com/4650043
Committer: Andrew Gerrand <395a7d33bec8475c9b83b7d440f141bcbd994aa5@golang.org>
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Download remote packages.
package main
import (
"exec"
"http"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
)
const dashboardURL = "http://godashboard.appspot.com/package"
// maybeReportToDashboard reports path to dashboard unless
// -dashboard=false is on command line. It ignores errors.
func maybeReportToDashboard(path string) {
// if -dashboard=false was on command line, do nothing
if !*reportToDashboard {
return
}
// otherwise lob url to dashboard
r, _ := http.Post(dashboardURL, "application/x-www-form-urlencoded", strings.NewReader("path="+path))
if r != nil && r.Body != nil {
r.Body.Close()
}
}
type host struct {
pattern *regexp.Regexp
protocol string
}
// a vcs represents a version control system
// like Mercurial, Git, or Subversion.
type vcs struct {
name string
cmd string
metadir string
checkout string
clone string
update string
updateReleaseFlag string
pull string
pullForceFlag string
log string
logLimitFlag string
logReleaseFlag string
check string
protocols []string
suffix string
findRepos bool
defaultHosts []host
// Is this tool present? (set by findTools)
available bool
}
type vcsMatch struct {
*vcs
prefix, repo string
}
var hg = vcs{
name: "Mercurial",
cmd: "hg",
metadir: ".hg",
checkout: "checkout",
clone: "clone",
update: "update",
updateReleaseFlag: "release",
pull: "pull",
log: "log",
logLimitFlag: "-l1",
logReleaseFlag: "-rrelease",
check: "identify",
protocols: []string{"http"},
findRepos: true,
defaultHosts: []host{
{regexp.MustCompile(`^([a-z0-9\-]+\.googlecode\.com/hg)(/[a-z0-9A-Z_.\-/]*)?$`), "https"},
{regexp.MustCompile(`^(bitbucket\.org/[a-z0-9A-Z_.\-]+/[a-z0-9A-Z_.\-]+)(/[a-z0-9A-Z_.\-/]*)?$`), "http"},
},
}
var git = vcs{
name: "Git",
cmd: "git",
metadir: ".git",
checkout: "checkout",
clone: "clone",
update: "pull",
updateReleaseFlag: "release",
pull: "fetch",
log: "show-ref",
logLimitFlag: "",
logReleaseFlag: "release",
check: "peek-remote",
protocols: []string{"git", "http"},
suffix: ".git",
findRepos: true,
defaultHosts: []host{
{regexp.MustCompile(`^(github\.com/[a-z0-9A-Z_.\-]+/[a-z0-9A-Z_.\-]+)(/[a-z0-9A-Z_.\-/]*)?$`), "http"},
},
}
var svn = vcs{
name: "Subversion",
cmd: "svn",
metadir: ".svn",
checkout: "checkout",
clone: "checkout",
update: "update",
updateReleaseFlag: "release",
log: "log",
logLimitFlag: "-l1",
logReleaseFlag: "release",
check: "info",
protocols: []string{"http", "svn"},
findRepos: false,
defaultHosts: []host{
{regexp.MustCompile(`^([a-z0-9\-]+\.googlecode\.com/svn)(/[a-z0-9A-Z_.\-/]*)?$`), "https"},
},
}
var bzr = vcs{
name: "Bazaar",
cmd: "bzr",
metadir: ".bzr",
checkout: "update",
clone: "branch",
update: "update",
updateReleaseFlag: "-rrelease",
pull: "pull",
pullForceFlag: "--overwrite",
log: "log",
logLimitFlag: "-l1",
logReleaseFlag: "-rrelease",
check: "info",
protocols: []string{"http", "bzr"},
findRepos: true,
defaultHosts: []host{
{regexp.MustCompile(`^(launchpad\.net/([a-z0-9A-Z_.\-]+(/[a-z0-9A-Z_.\-]+)?|~[a-z0-9A-Z_.\-]+/(\+junk|[a-z0-9A-Z_.\-]+)/[a-z0-9A-Z_.\-]+))(/[a-z0-9A-Z_.\-/]+)?$`), "https"},
},
}
var vcsList = []*vcs{&git, &hg, &bzr, &svn}
func potentialPrefixes(pkg string) []string {
prefixes := []string{}
parts := strings.Split(pkg, "/", -1)
elem := parts[0]
for _, part := range parts[1:] {
elem = path.Join(elem, part)
prefixes = append(prefixes, elem)
}
return prefixes
}
func tryCommand(c chan *vcsMatch, v *vcs, prefixes []string) {
for _, proto := range v.protocols {
for _, prefix := range prefixes {
repo := proto + "://" + prefix + v.suffix
if exec.Command(v.cmd, v.check, repo).Run() == nil {
c <- &vcsMatch{v, prefix, repo}
return
}
}
}
}
var findToolsOnce sync.Once
func findTools() {
for _, v := range vcsList {
v.available = exec.Command(v.cmd, "help").Run() == nil
}
}
var logMissingToolsOnce sync.Once
func logMissingTools() {
for _, v := range vcsList {
if !v.available {
logf("%s not found; %s packages will be ignored\n", v.cmd, v.name)
}
}
}
func findVcs(pkg string) *vcsMatch {
c := make(chan *vcsMatch, len(vcsList))
findToolsOnce.Do(findTools)
// we don't know how much of the name constitutes the repository prefix, so
// build a list of possibilities
prefixes := potentialPrefixes(pkg)
for _, v := range vcsList {
if !v.available {
continue
}
if v.findRepos {
go tryCommand(c, v, prefixes)
} else {
go tryCommand(c, v, []string{pkg})
}
}
select {
case m := <-c:
return m
case <-time.After(20 * 1e9):
}
logMissingToolsOnce.Do(logMissingTools)
return nil
}
// isRemote returns true if the first part of the package name looks like a
// hostname - i.e. contains at least one '.' and the last part is at least 2
// characters.
func isRemote(pkg string) bool {
parts := strings.Split(pkg, "/", 2)
if len(parts) != 2 {
return false
}
parts = strings.Split(parts[0], ".", -1)
if len(parts) < 2 || len(parts[len(parts)-1]) < 2 {
return false
}
return true
}
// download checks out or updates pkg from the remote server.
func download(pkg, srcDir string) os.Error {
if strings.Contains(pkg, "..") {
return os.ErrorString("invalid path (contains ..)")
}
var m *vcsMatch
for _, v := range vcsList {
for _, host := range v.defaultHosts {
if hm := host.pattern.FindStringSubmatch(pkg); hm != nil {
if v.suffix != "" && strings.HasSuffix(hm[1], v.suffix) {
return os.ErrorString("repository " + pkg + " should not have " + v.suffix + " suffix")
}
repo := host.protocol + "://" + hm[1] + v.suffix
m = &vcsMatch{v, hm[1], repo}
}
}
}
if m == nil {
m = findVcs(pkg)
}
if m == nil {
return os.ErrorString("cannot download: " + pkg)
}
return vcsCheckout(m.vcs, srcDir, m.prefix, m.repo, pkg)
}
// Try to detect if a "release" tag exists. If it does, update
// to the tagged version, otherwise just update the current branch.
// NOTE(_nil): svn will always fail because it is trying to get
// the revision history of a file named "release" instead of
// looking for a commit with a release tag
func (v *vcs) updateRepo(dst string) os.Error {
if err := quietRun(dst, nil, v.cmd, v.log, v.logLimitFlag, v.logReleaseFlag); err == nil {
if err := run(dst, nil, v.cmd, v.checkout, v.updateReleaseFlag); err != nil {
return err
}
} else if err := run(dst, nil, v.cmd, v.update); err != nil {
return err
}
return nil
}
// vcsCheckout checks out repo into dst using vcs.
// It tries to check out (or update, if the dst already
// exists and -u was specified on the command line)
// the repository at tag/branch "release". If there is no
// such tag or branch, it falls back to the repository tip.
func vcsCheckout(vcs *vcs, srcDir, pkgprefix, repo, dashpath string) os.Error {
dst := filepath.Join(srcDir, filepath.FromSlash(pkgprefix))
dir, err := os.Stat(filepath.Join(dst, vcs.metadir))
if err == nil && !dir.IsDirectory() {
return os.ErrorString("not a directory: " + dst)
}
if err != nil {
parent, _ := filepath.Split(dst)
if err := os.MkdirAll(parent, 0777); err != nil {
return err
}
if err := run(string(filepath.Separator), nil, vcs.cmd, vcs.clone, repo, dst); err != nil {
return err
}
if err := vcs.updateRepo(dst); err != nil {
return err
}
// success on first installation - report
maybeReportToDashboard(dashpath)
} else if *update {
// Retrieve new revisions from the remote branch, if the VCS
// supports this operation independently (e.g. svn doesn't)
if vcs.pull != "" {
if vcs.pullForceFlag != "" {
if err := run(dst, nil, vcs.cmd, vcs.pull, vcs.pullForceFlag); err != nil {
return err
}
} else if err := run(dst, nil, vcs.cmd, vcs.pull); err != nil {
return err
}
}
// Update to release or latest revision
if err := vcs.updateRepo(dst); err != nil {
return err
}
}
return nil
}
|
package http
import (
"sort"
"strconv"
"strings"
"time"
"github.com/Cepave/open-falcon-backend/modules/query/g"
log "github.com/Sirupsen/logrus"
"github.com/astaxie/beego/orm"
"github.com/jasonlvhit/gocron"
)
type Contacts struct {
Id int
Name string
Phone string
Email string
Updated string
}
type Hosts struct {
Id int
Hostname string
Exist int
Activate int
Platform string
Platforms string
Idc string
Ip string
Isp string
Province string
City string
Status string
Updated string
}
type Idcs struct {
Id int
Popid int
Idc string
Bandwidth int
Count int
Area string
Province string
City string
Updated string
}
type Ips struct {
Id int
Ip string
Exist int
Status int
Hostname string
Platform string
Updated string
}
type Platforms struct {
Id int
Platform string
Contacts string
Principal string
Deputy string
Upgrader string
Count int
Updated string
}
func SyncHostsAndContactsTable() {
if g.Config().Hosts.Enabled || g.Config().Contacts.Enabled {
if g.Config().Hosts.Enabled {
updateMapData()
syncHostsTable()
intervalToSyncHostsTable := uint64(g.Config().Hosts.Interval)
gocron.Every(intervalToSyncHostsTable).Seconds().Do(syncHostsTable)
}
if g.Config().Contacts.Enabled {
syncContactsTable()
intervalToSyncContactsTable := uint64(g.Config().Contacts.Interval)
gocron.Every(intervalToSyncContactsTable).Seconds().Do(syncContactsTable)
}
<-gocron.Start()
}
}
func getIDCMap() map[string]interface{} {
idcMap := map[string]interface{}{}
o := orm.NewOrm()
var idcs []Idc
sqlcommand := "SELECT pop_id, name, province, city FROM grafana.idc ORDER BY pop_id ASC"
_, err := o.Raw(sqlcommand).QueryRows(&idcs)
if err != nil {
log.Errorf(err.Error())
}
for _, idc := range idcs {
idcMap[strconv.Itoa(idc.Pop_id)] = idc
}
return idcMap
}
func updateHostsTable(hostnames []string, hostsMap map[string]map[string]string) {
log.Debugf("func updateHostsTable()")
var hosts []Hosts
o := orm.NewOrm()
o.Using("boss")
_, err := o.QueryTable("hosts").Limit(10000).All(&hosts)
if err != nil {
log.Errorf(err.Error())
} else {
format := "2006-01-02 15:04:05"
for _, host := range hosts {
updatedTime, _ := time.Parse(format, host.Updated)
currentTime, _ := time.Parse(format, getNow())
diff := currentTime.Unix() - updatedTime.Unix()
if diff > 600 {
host.Exist = 0
_, err := o.Update(&host)
if err != nil {
log.Errorf(err.Error())
}
}
}
}
hosts = []Hosts{}
idcMap := getIDCMap()
var host Hosts
for _, hostname := range hostnames {
item := hostsMap[hostname]
activate, _ := strconv.Atoi(item["activate"])
host.Hostname = item["hostname"]
host.Exist = 1
host.Activate = activate
host.Platform = item["platform"]
host.Ip = item["ip"]
host.Isp = strings.Split(item["hostname"], "-")[0]
host.Updated = getNow()
idcID := item["idcID"]
if _, ok := idcMap[idcID]; ok {
idc := idcMap[idcID]
host.Idc = idc.(Idc).Name
host.Province = idc.(Idc).Province
host.City = idc.(Idc).City
}
hosts = append(hosts, host)
}
for _, item := range hosts {
err := o.QueryTable("hosts").Limit(10000).Filter("hostname", item.Hostname).One(&host)
if err == orm.ErrNoRows {
sql := "INSERT INTO boss.hosts("
sql += "hostname, exist, activate, platform, idc, ip, "
sql += "isp, province, city, updated) "
sql += "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
_, err := o.Raw(sql, item.Hostname, item.Exist, item.Activate, item.Platform, item.Idc, item.Ip, item.Isp, item.Province, item.City, item.Updated).Exec()
if err != nil {
log.Errorf(err.Error())
}
} else if err != nil {
log.Errorf(err.Error())
} else {
item.Id = host.Id
_, err := o.Update(&item)
if err != nil {
log.Errorf(err.Error())
}
}
}
}
func updatePlatformsTable(platformNames []string, platformsMap map[string]map[string]interface{}) {
log.Debugf("func updatePlatformsTable()")
now := getNow()
o := orm.NewOrm()
o.Using("boss")
var platform Platforms
for _, platformName := range platformNames {
count, err := o.QueryTable("ips").Filter("platform", platformName).Filter("exist", 1).Filter("status", 1).Exclude("hostname__isnull", true).Count()
if err != nil {
count = 0
}
group := platformsMap[platformName]
err = o.QueryTable("platforms").Filter("platform", group["platformName"]).One(&platform)
if err == orm.ErrNoRows {
sql := "INSERT INTO boss.platforms(platform, count, updated) VALUES(?, ?, ?)"
_, err := o.Raw(sql, group["platformName"], count, now).Exec()
if err != nil {
log.Errorf(err.Error())
}
} else if err != nil {
log.Errorf(err.Error())
} else {
platform.Platform = group["platformName"].(string)
platform.Count = int(count)
platform.Updated = now
_, err := o.Update(&platform)
if err != nil {
log.Errorf(err.Error())
}
}
}
}
func updateContactsTable(contactNames []string, contactsMap map[string]map[string]string) {
log.Debugf("func updateContactsTable()")
o := orm.NewOrm()
o.Using("boss")
var contact Contacts
for _, contactName := range contactNames {
user := contactsMap[contactName]
err := o.QueryTable("contacts").Filter("name", user["name"]).One(&contact)
if err == orm.ErrNoRows {
sql := "INSERT INTO boss.contacts(name, phone, email, updated) VALUES(?, ?, ?, ?)"
_, err := o.Raw(sql, user["name"], user["phone"], user["email"], getNow()).Exec()
if err != nil {
log.Errorf(err.Error())
}
} else if err != nil {
log.Errorf(err.Error())
} else {
contact.Email = user["email"]
contact.Phone = user["phone"]
contact.Updated = getNow()
_, err := o.Update(&contact)
if err != nil {
log.Errorf(err.Error())
}
}
}
}
func addContactsToPlatformsTable(contacts map[string]interface{}) {
log.Debugf("func addContactsToPlatformsTable()")
o := orm.NewOrm()
o.Using("boss")
var platforms []Platforms
_, err := o.QueryTable("platforms").All(&platforms)
if err != nil {
log.Errorf(err.Error())
} else {
for _, platform := range platforms {
contactsOfPlatform := []string{}
platformName := platform.Platform
if users, ok := contacts[platformName]; ok {
for _, user := range users.([]interface{}) {
contactName := user.(map[string]interface{})["name"].(string)
contactsOfPlatform = appendUniqueString(contactsOfPlatform, contactName)
}
}
if len(contactsOfPlatform) > 0 {
platform.Contacts = strings.Join(contactsOfPlatform, ",")
platform.Updated = getNow()
_, err := o.Update(&platform)
if err != nil {
log.Errorf(err.Error())
}
}
}
}
}
func syncHostsTable() {
o := orm.NewOrm()
o.Using("boss")
var rows []orm.Params
sql := "SELECT updated FROM boss.hosts WHERE exist = 1 ORDER BY updated DESC LIMIT 1"
num, err := o.Raw(sql).Values(&rows)
if err != nil {
log.Errorf(err.Error())
return
} else if num > 0 {
format := "2006-01-02 15:04:05"
updatedTime, _ := time.Parse(format, rows[0]["updated"].(string))
currentTime, _ := time.Parse(format, getNow())
diff := currentTime.Unix() - updatedTime.Unix()
if int(diff) < g.Config().Hosts.Interval {
return
}
}
var nodes = make(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
getPlatformJSON(nodes, result)
if nodes["status"] == nil {
return
} else if int(nodes["status"].(float64)) != 1 {
return
}
platformNames := []string{}
platformsMap := map[string]map[string]interface{}{}
hostnames := []string{}
hostsMap := map[string]map[string]string{}
hostnamesMap := map[string]int{}
idcIDs := []string{}
hostname := ""
for _, platform := range nodes["result"].([]interface{}) {
countOfHosts := 0
platformName := platform.(map[string]interface{})["platform"].(string)
platformNames = appendUniqueString(platformNames, platformName)
for _, device := range platform.(map[string]interface{})["ip_list"].([]interface{}) {
hostname = device.(map[string]interface{})["hostname"].(string)
ip := device.(map[string]interface{})["ip"].(string)
if len(ip) > 0 && ip == getIPFromHostname(hostname, result) {
if _, ok := hostnamesMap[hostname]; !ok {
hostnames = append(hostnames, hostname)
idcID := device.(map[string]interface{})["pop_id"].(string)
host := map[string]string{
"hostname": hostname,
"activate": device.(map[string]interface{})["ip_status"].(string),
"platform": platformName,
"idcID": idcID,
"ip": ip,
}
hostsMap[hostname] = host
idcIDs = appendUniqueString(idcIDs, idcID)
hostnamesMap[hostname] = 1
countOfHosts++
}
}
}
platformsMap[platformName] = map[string]interface{}{
"platformName": platformName,
"count": countOfHosts,
"contacts": "",
}
}
sort.Strings(hostnames)
sort.Strings(platformNames)
log.Debugf("platformNames =", platformNames)
updateHostsTable(hostnames, hostsMap)
updatePlatformsTable(platformNames, platformsMap)
}
func syncContactsTable() {
log.Debugf("func syncContactsTable()")
o := orm.NewOrm()
o.Using("boss")
var rows []orm.Params
sql := "SELECT updated FROM boss.contacts ORDER BY updated DESC LIMIT 1"
num, err := o.Raw(sql).Values(&rows)
if err != nil {
log.Errorf(err.Error())
return
} else if num > 0 {
format := "2006-01-02 15:04:05"
updatedTime, _ := time.Parse(format, rows[0]["updated"].(string))
currentTime, _ := time.Parse(format, getNow())
diff := currentTime.Unix() - updatedTime.Unix()
if int(diff) < g.Config().Contacts.Interval {
return
}
}
platformNames := []string{}
sql = "SELECT DISTINCT platform FROM boss.platforms ORDER BY platform ASC"
num, err = o.Raw(sql).Values(&rows)
if err != nil {
log.Errorf(err.Error())
return
} else if num > 0 {
for _, row := range rows {
platformNames = append(platformNames, row["platform"].(string))
}
}
var nodes = make(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
getPlatformContact(strings.Join(platformNames, ","), nodes)
contactNames := []string{}
contactsMap := map[string]map[string]interface{}{}
contacts := nodes["result"].(map[string]interface{})["items"].(map[string]interface{})
for _, platformName := range platformNames {
if items, ok := contacts[platformName]; ok {
for _, user := range items.([]interface{}) {
contactName := user.(map[string]interface{})["name"].(string)
if _, ok := contactsMap[contactName]; !ok {
contactsMap[contactName] = user.(map[string]interface{})
contactNames = append(contactNames, contactName)
}
}
}
}
sort.Strings(contactNames)
updateContactsTable(contactNames, contactsMap)
addContactsToPlatformsTable(contacts)
}
[OWL-1165][query] refine func addContactsToPlatformsTable()
package http
import (
"sort"
"strconv"
"strings"
"time"
"github.com/Cepave/open-falcon-backend/modules/query/g"
log "github.com/Sirupsen/logrus"
"github.com/astaxie/beego/orm"
"github.com/jasonlvhit/gocron"
)
type Contacts struct {
Id int
Name string
Phone string
Email string
Updated string
}
type Hosts struct {
Id int
Hostname string
Exist int
Activate int
Platform string
Platforms string
Idc string
Ip string
Isp string
Province string
City string
Status string
Updated string
}
type Idcs struct {
Id int
Popid int
Idc string
Bandwidth int
Count int
Area string
Province string
City string
Updated string
}
type Ips struct {
Id int
Ip string
Exist int
Status int
Hostname string
Platform string
Updated string
}
type Platforms struct {
Id int
Platform string
Contacts string
Principal string
Deputy string
Upgrader string
Count int
Updated string
}
func SyncHostsAndContactsTable() {
if g.Config().Hosts.Enabled || g.Config().Contacts.Enabled {
if g.Config().Hosts.Enabled {
updateMapData()
syncHostsTable()
intervalToSyncHostsTable := uint64(g.Config().Hosts.Interval)
gocron.Every(intervalToSyncHostsTable).Seconds().Do(syncHostsTable)
}
if g.Config().Contacts.Enabled {
syncContactsTable()
intervalToSyncContactsTable := uint64(g.Config().Contacts.Interval)
gocron.Every(intervalToSyncContactsTable).Seconds().Do(syncContactsTable)
}
<-gocron.Start()
}
}
func getIDCMap() map[string]interface{} {
idcMap := map[string]interface{}{}
o := orm.NewOrm()
var idcs []Idc
sqlcommand := "SELECT pop_id, name, province, city FROM grafana.idc ORDER BY pop_id ASC"
_, err := o.Raw(sqlcommand).QueryRows(&idcs)
if err != nil {
log.Errorf(err.Error())
}
for _, idc := range idcs {
idcMap[strconv.Itoa(idc.Pop_id)] = idc
}
return idcMap
}
func updateHostsTable(hostnames []string, hostsMap map[string]map[string]string) {
log.Debugf("func updateHostsTable()")
var hosts []Hosts
o := orm.NewOrm()
o.Using("boss")
_, err := o.QueryTable("hosts").Limit(10000).All(&hosts)
if err != nil {
log.Errorf(err.Error())
} else {
format := "2006-01-02 15:04:05"
for _, host := range hosts {
updatedTime, _ := time.Parse(format, host.Updated)
currentTime, _ := time.Parse(format, getNow())
diff := currentTime.Unix() - updatedTime.Unix()
if diff > 600 {
host.Exist = 0
_, err := o.Update(&host)
if err != nil {
log.Errorf(err.Error())
}
}
}
}
hosts = []Hosts{}
idcMap := getIDCMap()
var host Hosts
for _, hostname := range hostnames {
item := hostsMap[hostname]
activate, _ := strconv.Atoi(item["activate"])
host.Hostname = item["hostname"]
host.Exist = 1
host.Activate = activate
host.Platform = item["platform"]
host.Ip = item["ip"]
host.Isp = strings.Split(item["hostname"], "-")[0]
host.Updated = getNow()
idcID := item["idcID"]
if _, ok := idcMap[idcID]; ok {
idc := idcMap[idcID]
host.Idc = idc.(Idc).Name
host.Province = idc.(Idc).Province
host.City = idc.(Idc).City
}
hosts = append(hosts, host)
}
for _, item := range hosts {
err := o.QueryTable("hosts").Limit(10000).Filter("hostname", item.Hostname).One(&host)
if err == orm.ErrNoRows {
sql := "INSERT INTO boss.hosts("
sql += "hostname, exist, activate, platform, idc, ip, "
sql += "isp, province, city, updated) "
sql += "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
_, err := o.Raw(sql, item.Hostname, item.Exist, item.Activate, item.Platform, item.Idc, item.Ip, item.Isp, item.Province, item.City, item.Updated).Exec()
if err != nil {
log.Errorf(err.Error())
}
} else if err != nil {
log.Errorf(err.Error())
} else {
item.Id = host.Id
_, err := o.Update(&item)
if err != nil {
log.Errorf(err.Error())
}
}
}
}
func updatePlatformsTable(platformNames []string, platformsMap map[string]map[string]interface{}) {
log.Debugf("func updatePlatformsTable()")
now := getNow()
o := orm.NewOrm()
o.Using("boss")
var platform Platforms
for _, platformName := range platformNames {
count, err := o.QueryTable("ips").Filter("platform", platformName).Filter("exist", 1).Filter("status", 1).Exclude("hostname__isnull", true).Count()
if err != nil {
count = 0
}
group := platformsMap[platformName]
err = o.QueryTable("platforms").Filter("platform", group["platformName"]).One(&platform)
if err == orm.ErrNoRows {
sql := "INSERT INTO boss.platforms(platform, count, updated) VALUES(?, ?, ?)"
_, err := o.Raw(sql, group["platformName"], count, now).Exec()
if err != nil {
log.Errorf(err.Error())
}
} else if err != nil {
log.Errorf(err.Error())
} else {
platform.Platform = group["platformName"].(string)
platform.Count = int(count)
platform.Updated = now
_, err := o.Update(&platform)
if err != nil {
log.Errorf(err.Error())
}
}
}
}
func updateContactsTable(contactNames []string, contactsMap map[string]map[string]string) {
log.Debugf("func updateContactsTable()")
o := orm.NewOrm()
o.Using("boss")
var contact Contacts
for _, contactName := range contactNames {
user := contactsMap[contactName]
err := o.QueryTable("contacts").Filter("name", user["name"]).One(&contact)
if err == orm.ErrNoRows {
sql := "INSERT INTO boss.contacts(name, phone, email, updated) VALUES(?, ?, ?, ?)"
_, err := o.Raw(sql, user["name"], user["phone"], user["email"], getNow()).Exec()
if err != nil {
log.Errorf(err.Error())
}
} else if err != nil {
log.Errorf(err.Error())
} else {
contact.Email = user["email"]
contact.Phone = user["phone"]
contact.Updated = getNow()
_, err := o.Update(&contact)
if err != nil {
log.Errorf(err.Error())
}
}
}
}
func addContactsToPlatformsTable(contacts map[string]interface{}) {
log.Debugf("func addContactsToPlatformsTable()")
now := getNow()
o := orm.NewOrm()
o.Using("boss")
var platforms []Platforms
_, err := o.QueryTable("platforms").All(&platforms)
if err != nil {
log.Errorf(err.Error())
} else {
for _, platform := range platforms {
platformName := platform.Platform
if items, ok := contacts[platformName]; ok {
contacts := []string{}
for role, user := range items.(map[string]map[string]string) {
if (role == "principal") {
platform.Principal = user["name"]
} else if (role == "deputy") {
platform.Deputy = user["name"]
} else if (role == "upgrader") {
platform.Upgrader = user["name"]
}
}
if (len(platform.Principal) > 0) {
contacts = append(contacts, platform.Principal)
}
if (len(platform.Deputy) > 0) {
contacts = append(contacts, platform.Deputy)
}
if (len(platform.Upgrader) > 0) {
contacts = append(contacts, platform.Upgrader)
}
platform.Contacts = strings.Join(contacts, ",")
}
platform.Updated = now
_, err := o.Update(&platform)
if err != nil {
log.Errorf(err.Error())
}
}
}
}
func syncHostsTable() {
o := orm.NewOrm()
o.Using("boss")
var rows []orm.Params
sql := "SELECT updated FROM boss.hosts WHERE exist = 1 ORDER BY updated DESC LIMIT 1"
num, err := o.Raw(sql).Values(&rows)
if err != nil {
log.Errorf(err.Error())
return
} else if num > 0 {
format := "2006-01-02 15:04:05"
updatedTime, _ := time.Parse(format, rows[0]["updated"].(string))
currentTime, _ := time.Parse(format, getNow())
diff := currentTime.Unix() - updatedTime.Unix()
if int(diff) < g.Config().Hosts.Interval {
return
}
}
var nodes = make(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
getPlatformJSON(nodes, result)
if nodes["status"] == nil {
return
} else if int(nodes["status"].(float64)) != 1 {
return
}
platformNames := []string{}
platformsMap := map[string]map[string]interface{}{}
hostnames := []string{}
hostsMap := map[string]map[string]string{}
hostnamesMap := map[string]int{}
idcIDs := []string{}
hostname := ""
for _, platform := range nodes["result"].([]interface{}) {
countOfHosts := 0
platformName := platform.(map[string]interface{})["platform"].(string)
platformNames = appendUniqueString(platformNames, platformName)
for _, device := range platform.(map[string]interface{})["ip_list"].([]interface{}) {
hostname = device.(map[string]interface{})["hostname"].(string)
ip := device.(map[string]interface{})["ip"].(string)
if len(ip) > 0 && ip == getIPFromHostname(hostname, result) {
if _, ok := hostnamesMap[hostname]; !ok {
hostnames = append(hostnames, hostname)
idcID := device.(map[string]interface{})["pop_id"].(string)
host := map[string]string{
"hostname": hostname,
"activate": device.(map[string]interface{})["ip_status"].(string),
"platform": platformName,
"idcID": idcID,
"ip": ip,
}
hostsMap[hostname] = host
idcIDs = appendUniqueString(idcIDs, idcID)
hostnamesMap[hostname] = 1
countOfHosts++
}
}
}
platformsMap[platformName] = map[string]interface{}{
"platformName": platformName,
"count": countOfHosts,
"contacts": "",
}
}
sort.Strings(hostnames)
sort.Strings(platformNames)
log.Debugf("platformNames =", platformNames)
updateHostsTable(hostnames, hostsMap)
updatePlatformsTable(platformNames, platformsMap)
}
func syncContactsTable() {
log.Debugf("func syncContactsTable()")
o := orm.NewOrm()
o.Using("boss")
var rows []orm.Params
sql := "SELECT updated FROM boss.contacts ORDER BY updated DESC LIMIT 1"
num, err := o.Raw(sql).Values(&rows)
if err != nil {
log.Errorf(err.Error())
return
} else if num > 0 {
format := "2006-01-02 15:04:05"
updatedTime, _ := time.Parse(format, rows[0]["updated"].(string))
currentTime, _ := time.Parse(format, getNow())
diff := currentTime.Unix() - updatedTime.Unix()
if int(diff) < g.Config().Contacts.Interval {
return
}
}
platformNames := []string{}
sql = "SELECT DISTINCT platform FROM boss.platforms ORDER BY platform ASC"
num, err = o.Raw(sql).Values(&rows)
if err != nil {
log.Errorf(err.Error())
return
} else if num > 0 {
for _, row := range rows {
platformNames = append(platformNames, row["platform"].(string))
}
}
var nodes = make(map[string]interface{})
errors := []string{}
var result = make(map[string]interface{})
result["error"] = errors
getPlatformContact(strings.Join(platformNames, ","), nodes)
contactNames := []string{}
contactsMap := map[string]map[string]interface{}{}
contacts := nodes["result"].(map[string]interface{})["items"].(map[string]interface{})
for _, platformName := range platformNames {
if items, ok := contacts[platformName]; ok {
for _, user := range items.([]interface{}) {
contactName := user.(map[string]interface{})["name"].(string)
if _, ok := contactsMap[contactName]; !ok {
contactsMap[contactName] = user.(map[string]interface{})
contactNames = append(contactNames, contactName)
}
}
}
}
sort.Strings(contactNames)
updateContactsTable(contactNames, contactsMap)
addContactsToPlatformsTable(contacts)
}
|
package metalarchives
import (
"reflect"
"testing"
"github.com/StalkR/goircbot/lib/metal"
)
func TestSearch(t *testing.T) {
for _, tt := range []struct {
name string
want []metal.Band
}{
{
name: "Convergence",
want: []metal.Band{
metal.Band{Name: "Convergence", Genre: "Atmospheric/Industrial Dark/Death Metal", Country: "Austria"},
metal.Band{Name: "Convergence", Genre: "Melodic Death Metal (early), Nu-metal/Alternative Rock (later)", Country: "Italy"},
metal.Band{Name: "Convergence from Within", Genre: "Death Metal", Country: "United States"},
},
},
{
name: "Psycroptic",
want: []metal.Band{
metal.Band{Name: "Psycroptic", Genre: "Technical Death Metal", Country: "Australia"},
},
},
{
name: "sdfgsdfg",
want: nil,
},
} {
got, err := Search(tt.name)
if err != nil {
t.Errorf("Search(%s): err: %v", tt.name, err)
continue
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Search(%s): got %s; want %s", tt.name, got, tt.want)
}
}
}
lib/metal/metalarchives: update test
package metalarchives
import (
"reflect"
"testing"
"github.com/StalkR/goircbot/lib/metal"
)
func TestSearch(t *testing.T) {
for _, tt := range []struct {
name string
want []metal.Band
}{
{
name: "Convergence",
want: []metal.Band{
metal.Band{Name: "Convergence", Genre: "Atmospheric/Industrial Death Metal", Country: "Austria"},
metal.Band{Name: "Convergence", Genre: "Melodic Death Metal (early), Nu-metal/Alternative Rock (later)", Country: "Italy"},
metal.Band{Name: "Convergence from Within", Genre: "Death Metal", Country: "United States"},
},
},
{
name: "Psycroptic",
want: []metal.Band{
metal.Band{Name: "Psycroptic", Genre: "Technical Death Metal", Country: "Australia"},
},
},
{
name: "sdfgsdfg",
want: nil,
},
} {
got, err := Search(tt.name)
if err != nil {
t.Errorf("Search(%s): err: %v", tt.name, err)
continue
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Search(%s): got %s; want %s", tt.name, got, tt.want)
}
}
}
|
package repository_test
import (
"testing"
"github.com/syou6162/go-active-learning/lib/example"
"github.com/syou6162/go-active-learning/lib/model"
"github.com/syou6162/go-active-learning/lib/repository"
)
func TestUpdateUpdateRelatedExamples(t *testing.T) {
repo, err := repository.New()
if err != nil {
t.Errorf(err.Error())
}
defer repo.Close()
e1 := example.NewExample("http://hoge1.com", model.POSITIVE)
e2 := example.NewExample("http://hoge2.com", model.NEGATIVE)
e3 := example.NewExample("http://hoge3.com", model.UNLABELED)
examples := model.Examples{e1, e2, e3}
for _, e := range examples {
err = repo.UpdateOrCreateExample(e)
if err != nil {
t.Error(err)
}
}
related := model.RelatedExamples{ExampleId: e1.Id, RelatedExampleIds: []int{e2.Id, e3.Id}}
err = repo.UpdateRelatedExamples(related)
if err != nil {
t.Error(err)
}
{
related, err := repo.FindRelatedExamples(e1)
if err != nil {
t.Error(err)
}
if len(related.RelatedExampleIds) != 2 {
t.Error("len(related.RelatedExampleIds) must be 2")
}
}
}
制約のテスト
package repository_test
import (
"testing"
"github.com/syou6162/go-active-learning/lib/example"
"github.com/syou6162/go-active-learning/lib/model"
"github.com/syou6162/go-active-learning/lib/repository"
)
func TestUpdateRelatedExamples(t *testing.T) {
repo, err := repository.New()
if err != nil {
t.Errorf(err.Error())
}
defer repo.Close()
e1 := example.NewExample("http://hoge1.com", model.POSITIVE)
e2 := example.NewExample("http://hoge2.com", model.NEGATIVE)
e3 := example.NewExample("http://hoge3.com", model.UNLABELED)
examples := model.Examples{e1, e2, e3}
for _, e := range examples {
err = repo.UpdateOrCreateExample(e)
if err != nil {
t.Error(err)
}
}
related := model.RelatedExamples{ExampleId: e1.Id, RelatedExampleIds: []int{e2.Id, e3.Id}}
err = repo.UpdateRelatedExamples(related)
if err != nil {
t.Error(err)
}
{
related, err := repo.FindRelatedExamples(e1)
if err != nil {
t.Error(err)
}
if len(related.RelatedExampleIds) != 2 {
t.Error("len(related.RelatedExampleIds) must be 2")
}
}
}
func TestUpdateRelatedExamplesMyOwn(t *testing.T) {
repo, err := repository.New()
if err != nil {
t.Errorf(err.Error())
}
defer repo.Close()
e1 := example.NewExample("http://hoge1.com", model.POSITIVE)
e2 := example.NewExample("http://hoge2.com", model.NEGATIVE)
e3 := example.NewExample("http://hoge3.com", model.UNLABELED)
examples := model.Examples{e1, e2, e3}
for _, e := range examples {
err = repo.UpdateOrCreateExample(e)
if err != nil {
t.Error(err)
}
}
related := model.RelatedExamples{ExampleId: e1.Id, RelatedExampleIds: []int{e1.Id, e2.Id, e3.Id}}
err = repo.UpdateRelatedExamples(related)
if err == nil {
t.Error("自身と同一のexample_idを持つ事例はrelated_example_idに追加できない")
}
}
|
package display
import (
"fmt"
"os"
"strings"
)
func MOTD() {
os.Stderr.WriteString(fmt.Sprintf(`
**
********
***************
*********************
*****************
:: ********* ::
:: *** ::
++ ::: ::: ++
++ ::: ++
++ ++
+
_ _ ____ _ _ ____ ___ ____ _ _
|\ | |__| |\ | | | |__) | | \/
| \| | | | \| |__| |__) |__| _/\_
`))
}
func InfoProductionHost() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ WARNING:
+ You are on a live, production Linux server.
+ This host is primarily responsible for running docker containers.
+ Changes made to this machine have real consequences.
+ Proceed at your own risk.
--------------------------------------------------------------------------------
`))
}
func InfoProductionContainer() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ WARNING:
+ You are in a live, production Linux container.
+ Changes made to this machine have real consequences.
+ Proceed at your own risk.
--------------------------------------------------------------------------------
`))
}
func InfoLocalContainer() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ You are inside a Linux container on your local machine.
+ Anything here can be undone, so have fun and explore!
--------------------------------------------------------------------------------
`))
}
func InfoDevContainer(ip string) {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ You are in a Linux container
+ Your local source code has been mounted into the container
+ Changes to your code in either the container or desktop will be mirrored
+ If you run a server, access it at >> %s
--------------------------------------------------------------------------------
`, ip))
}
func InfoDevRunContainer(cmd, ip string) {
os.Stderr.WriteString(fmt.Sprintf(`
**
*********
*************** Your command will run in an isolated Linux container
:: ********* :: Code changes in either the container or desktop are mirrored
" ::: *** ::: " ------------------------------------------------------------
"" ::: "" If you run a server, access it at >> %s
"" " ""
"
RUNNING > %s
`, ip, cmd))
os.Stderr.WriteString(fmt.Sprintf("%s\n", strings.Repeat("-", len(cmd)+10)))
}
func InfoSimDeploy(ip string) {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ Your app is running in simulated production environment
+ Access your app at >> %s
--------------------------------------------------------------------------------
`, ip))
}
func DevRunEmpty() {
os.Stderr.WriteString(fmt.Sprintf(`
! You don't have any web or worker start commands specified in your
boxfile.yml. More information about start commands is available here:
https://docs.nanobox.io/boxfile/web/#start-command
`))
}
func FirstDeploy() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ HEADS UP:
+ This is the first deploy to this app and the upload takes longer than usual.
+ Future deploys only sync the differences and will be much faster.
--------------------------------------------------------------------------------
`))
}
func FirstBuild() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ HEADS UP:
+ This is the first build for this project and will take longer than usual.
+ Future builds will pull from the cache and will be much faster.
--------------------------------------------------------------------------------
`))
}
func ProviderSetup() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ HEADS UP:
+ Nanobox will run a single VM transparently within VirtualBox.
+ All apps and containers will be launched within the same VM.
--------------------------------------------------------------------------------
`))
}
func MigrateOldRequired() {
os.Stderr.WriteString(fmt.Sprintf(`
Great news! Nanobox can now optionally run directly on top of Docker. This change constitutes a major architectural refactor as well as data re-structure. To use this version we need to purge your current apps. Fortunately, nanobox will re-build them for you the next time you use "nanobox run".
`))
}
func MigrateProviderRequired() {
os.Stderr.WriteString(fmt.Sprintf(`
To migrate between providers we need to implode.. LACE UP ITS GOIGN TO GET BUMPY!
`))
}
Add messages for upgrading
package display
import (
"fmt"
"os"
"strings"
)
func MOTD() {
os.Stderr.WriteString(fmt.Sprintf(`
**
********
***************
*********************
*****************
:: ********* ::
:: *** ::
++ ::: ::: ++
++ ::: ++
++ ++
+
_ _ ____ _ _ ____ ___ ____ _ _
|\ | |__| |\ | | | |__) | | \/
| \| | | | \| |__| |__) |__| _/\_
`))
}
func InfoProductionHost() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ WARNING:
+ You are on a live, production Linux server.
+ This host is primarily responsible for running docker containers.
+ Changes made to this machine have real consequences.
+ Proceed at your own risk.
--------------------------------------------------------------------------------
`))
}
func InfoProductionContainer() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ WARNING:
+ You are in a live, production Linux container.
+ Changes made to this machine have real consequences.
+ Proceed at your own risk.
--------------------------------------------------------------------------------
`))
}
func InfoLocalContainer() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ You are inside a Linux container on your local machine.
+ Anything here can be undone, so have fun and explore!
--------------------------------------------------------------------------------
`))
}
func InfoDevContainer(ip string) {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ You are in a Linux container
+ Your local source code has been mounted into the container
+ Changes to your code in either the container or desktop will be mirrored
+ If you run a server, access it at >> %s
--------------------------------------------------------------------------------
`, ip))
}
func InfoDevRunContainer(cmd, ip string) {
os.Stderr.WriteString(fmt.Sprintf(`
**
*********
*************** Your command will run in an isolated Linux container
:: ********* :: Code changes in either the container or desktop are mirrored
" ::: *** ::: " ------------------------------------------------------------
"" ::: "" If you run a server, access it at >> %s
"" " ""
"
RUNNING > %s
`, ip, cmd))
os.Stderr.WriteString(fmt.Sprintf("%s\n", strings.Repeat("-", len(cmd)+10)))
}
func InfoSimDeploy(ip string) {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ Your app is running in simulated production environment
+ Access your app at >> %s
--------------------------------------------------------------------------------
`, ip))
}
func DevRunEmpty() {
os.Stderr.WriteString(fmt.Sprintf(`
! You don't have any web or worker start commands specified in your
boxfile.yml. More information about start commands is available here:
https://docs.nanobox.io/boxfile/web/#start-command
`))
}
func FirstDeploy() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ HEADS UP:
+ This is the first deploy to this app and the upload takes longer than usual.
+ Future deploys only sync the differences and will be much faster.
--------------------------------------------------------------------------------
`))
}
func FirstBuild() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ HEADS UP:
+ This is the first build for this project and will take longer than usual.
+ Future builds will pull from the cache and will be much faster.
--------------------------------------------------------------------------------
`))
}
func ProviderSetup() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ HEADS UP:
+ Nanobox will run a single VM transparently within VirtualBox.
+ All apps and containers will be launched within the same VM.
--------------------------------------------------------------------------------
`))
}
func MigrateOldRequired() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ WARNING:
+ Nanobox has been successfully upgraded! This change constitutes a major
+ architectural refactor as well as data re-structure. To use this version we
+ need to purge your current apps. No worries, nanobox will re-build them for
+ you the next time you use "nanobox run".
--------------------------------------------------------------------------------
`))
}
func MigrateProviderRequired() {
os.Stderr.WriteString(fmt.Sprintf(`
--------------------------------------------------------------------------------
+ WARNING:
+ It looks like you want to use a different provider, cool! Just FYI, we have
+ to bring down your existing apps as providers are not compatible. No worries,
+ nanobox will re-build them for you the next time you use "nanobox run".
--------------------------------------------------------------------------------
`))
}
|
package github
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
"strings"
"time"
"github.com/github/hub/version"
)
const (
GitHubHost string = "github.com"
OAuthAppURL string = "https://hub.github.com/"
)
var UserAgent = "Hub " + version.Version
func NewClient(h string) *Client {
return NewClientWithHost(&Host{Host: h})
}
func NewClientWithHost(host *Host) *Client {
return &Client{Host: host}
}
type Client struct {
Host *Host
cachedClient *simpleClient
}
type Gist struct {
Files map[string]GistFile `json:"files"`
Description string `json:"description,omitempty"`
Id string `json:"id,omitempty"`
Public bool `json:"public"`
HtmlUrl string `json:"html_url"`
}
type GistFile struct {
Type string `json:"type,omitempty"`
Language string `json:"language,omitempty"`
Content string `json:"content"`
RawUrl string `json:"raw_url"`
}
func (client *Client) FetchPullRequests(project *Project, filterParams map[string]interface{}, limit int, filter func(*PullRequest) bool) (pulls []PullRequest, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/pulls?per_page=%d", project.Owner, project.Name, perPage(limit, 100))
if filterParams != nil {
path = addQuery(path, filterParams)
}
pulls = []PullRequest{}
var res *simpleResponse
for path != "" {
res, err = api.GetFile(path, draftsType)
if err = checkStatus(200, "fetching pull requests", res, err); err != nil {
return
}
path = res.Link("next")
pullsPage := []PullRequest{}
if err = res.Unmarshal(&pullsPage); err != nil {
return
}
for _, pr := range pullsPage {
if filter == nil || filter(&pr) {
pulls = append(pulls, pr)
if limit > 0 && len(pulls) == limit {
path = ""
break
}
}
}
}
return
}
func (client *Client) PullRequest(project *Project, id string) (pr *PullRequest, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s/pulls/%s", project.Owner, project.Name, id))
if err = checkStatus(200, "getting pull request", res, err); err != nil {
return
}
pr = &PullRequest{}
err = res.Unmarshal(pr)
return
}
func (client *Client) PullRequestPatch(project *Project, id string) (patch io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.GetFile(fmt.Sprintf("repos/%s/%s/pulls/%s", project.Owner, project.Name, id), patchMediaType)
if err = checkStatus(200, "getting pull request patch", res, err); err != nil {
return
}
return res.Body, nil
}
func (client *Client) CreatePullRequest(project *Project, params map[string]interface{}) (pr *PullRequest, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSONPreview(fmt.Sprintf("repos/%s/%s/pulls", project.Owner, project.Name), params, draftsType)
if err = checkStatus(201, "creating pull request", res, err); err != nil {
if res != nil && res.StatusCode == 404 {
projectUrl := strings.SplitN(project.WebURL("", "", ""), "://", 2)[1]
err = fmt.Errorf("%s\nAre you sure that %s exists?", err, projectUrl)
}
return
}
pr = &PullRequest{}
err = res.Unmarshal(pr)
return
}
func (client *Client) RequestReview(project *Project, prNumber int, params map[string]interface{}) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", project.Owner, project.Name, prNumber), params)
if err = checkStatus(201, "requesting reviewer", res, err); err != nil {
return
}
res.Body.Close()
return
}
func (client *Client) CommitPatch(project *Project, sha string) (patch io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.GetFile(fmt.Sprintf("repos/%s/%s/commits/%s", project.Owner, project.Name, sha), patchMediaType)
if err = checkStatus(200, "getting commit patch", res, err); err != nil {
return
}
return res.Body, nil
}
func (client *Client) GistPatch(id string) (patch io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("gists/%s", id))
if err = checkStatus(200, "getting gist patch", res, err); err != nil {
return
}
gist := Gist{}
if err = res.Unmarshal(&gist); err != nil {
return
}
rawUrl := ""
for _, file := range gist.Files {
rawUrl = file.RawUrl
break
}
res, err = api.GetFile(rawUrl, textMediaType)
if err = checkStatus(200, "getting gist patch", res, err); err != nil {
return
}
return res.Body, nil
}
func (client *Client) Repository(project *Project) (repo *Repository, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s", project.Owner, project.Name))
if err = checkStatus(200, "getting repository info", res, err); err != nil {
return
}
repo = &Repository{}
err = res.Unmarshal(&repo)
return
}
func (client *Client) CreateRepository(project *Project, description, homepage string, isPrivate bool) (repo *Repository, err error) {
repoURL := "user/repos"
if project.Owner != client.Host.User {
repoURL = fmt.Sprintf("orgs/%s/repos", project.Owner)
}
params := map[string]interface{}{
"name": project.Name,
"description": description,
"homepage": homepage,
"private": isPrivate,
}
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(repoURL, params)
if err = checkStatus(201, "creating repository", res, err); err != nil {
return
}
repo = &Repository{}
err = res.Unmarshal(repo)
return
}
func (client *Client) DeleteRepository(project *Project) error {
api, err := client.simpleApi()
if err != nil {
return err
}
repoURL := fmt.Sprintf("repos/%s/%s", project.Owner, project.Name)
res, err := api.Delete(repoURL)
return checkStatus(204, "deleting repository", res, err)
}
type Release struct {
Name string `json:"name"`
TagName string `json:"tag_name"`
TargetCommitish string `json:"target_commitish"`
Body string `json:"body"`
Draft bool `json:"draft"`
Prerelease bool `json:"prerelease"`
Assets []ReleaseAsset `json:"assets"`
TarballUrl string `json:"tarball_url"`
ZipballUrl string `json:"zipball_url"`
HtmlUrl string `json:"html_url"`
UploadUrl string `json:"upload_url"`
ApiUrl string `json:"url"`
CreatedAt time.Time `json:"created_at"`
PublishedAt time.Time `json:"published_at"`
}
type ReleaseAsset struct {
Name string `json:"name"`
Label string `json:"label"`
DownloadUrl string `json:"browser_download_url"`
ApiUrl string `json:"url"`
}
func (client *Client) FetchReleases(project *Project, limit int, filter func(*Release) bool) (releases []Release, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/releases?per_page=%d", project.Owner, project.Name, perPage(limit, 100))
releases = []Release{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching releases", res, err); err != nil {
return
}
path = res.Link("next")
releasesPage := []Release{}
if err = res.Unmarshal(&releasesPage); err != nil {
return
}
for _, release := range releasesPage {
if filter == nil || filter(&release) {
releases = append(releases, release)
if limit > 0 && len(releases) == limit {
path = ""
break
}
}
}
}
return
}
func (client *Client) FetchRelease(project *Project, tagName string) (*Release, error) {
releases, err := client.FetchReleases(project, 100, func(release *Release) bool {
return release.TagName == tagName
})
if err == nil {
if len(releases) < 1 {
return nil, fmt.Errorf("Unable to find release with tag name `%s'", tagName)
} else {
return &releases[0], nil
}
} else {
return nil, err
}
}
func (client *Client) CreateRelease(project *Project, releaseParams *Release) (release *Release, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/releases", project.Owner, project.Name), releaseParams)
if err = checkStatus(201, "creating release", res, err); err != nil {
return
}
release = &Release{}
err = res.Unmarshal(release)
return
}
func (client *Client) EditRelease(release *Release, releaseParams map[string]interface{}) (updatedRelease *Release, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PatchJSON(release.ApiUrl, releaseParams)
if err = checkStatus(200, "editing release", res, err); err != nil {
return
}
updatedRelease = &Release{}
err = res.Unmarshal(updatedRelease)
return
}
func (client *Client) DeleteRelease(release *Release) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Delete(release.ApiUrl)
if err = checkStatus(204, "deleting release", res, err); err != nil {
return
}
return
}
type LocalAsset struct {
Name string
Label string
Contents io.Reader
Size int64
}
func (client *Client) UploadReleaseAssets(release *Release, assets []LocalAsset) (doneAssets []*ReleaseAsset, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
idx := strings.Index(release.UploadUrl, "{")
uploadURL := release.UploadUrl[0:idx]
for _, asset := range assets {
for _, existingAsset := range release.Assets {
if existingAsset.Name == asset.Name {
if err = client.DeleteReleaseAsset(&existingAsset); err != nil {
return
}
break
}
}
params := map[string]interface{}{"name": filepath.Base(asset.Name)}
if asset.Label != "" {
params["label"] = asset.Label
}
uploadPath := addQuery(uploadURL, params)
var res *simpleResponse
attempts := 0
maxAttempts := 3
body := asset.Contents
for {
res, err = api.PostFile(uploadPath, body, asset.Size)
if err == nil && res.StatusCode >= 500 && res.StatusCode < 600 && attempts < maxAttempts {
attempts++
time.Sleep(time.Second * time.Duration(attempts))
var f *os.File
f, err = os.Open(asset.Name)
if err != nil {
return
}
defer f.Close()
body = f
continue
}
if err = checkStatus(201, "uploading release asset", res, err); err != nil {
return
}
break
}
newAsset := ReleaseAsset{}
err = res.Unmarshal(&newAsset)
if err != nil {
return
}
doneAssets = append(doneAssets, &newAsset)
}
return
}
func (client *Client) DeleteReleaseAsset(asset *ReleaseAsset) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Delete(asset.ApiUrl)
err = checkStatus(204, "deleting release asset", res, err)
return
}
func (client *Client) DownloadReleaseAsset(url string) (asset io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
resp, err := api.GetFile(url, "application/octet-stream")
if err = checkStatus(200, "downloading asset", resp, err); err != nil {
return
}
return resp.Body, err
}
type CIStatusResponse struct {
State string `json:"state"`
Statuses []CIStatus `json:"statuses"`
}
type CIStatus struct {
State string `json:"state"`
Context string `json:"context"`
TargetUrl string `json:"target_url"`
}
type CheckRunsResponse struct {
CheckRuns []CheckRun `json:"check_runs"`
}
type CheckRun struct {
Status string `json:"status"`
Conclusion string `json:"conclusion"`
Name string `json:"name"`
HtmlUrl string `json:"html_url"`
}
func (client *Client) FetchCIStatus(project *Project, sha string) (status *CIStatusResponse, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s/commits/%s/status", project.Owner, project.Name, sha))
if err = checkStatus(200, "fetching statuses", res, err); err != nil {
return
}
status = &CIStatusResponse{}
if err = res.Unmarshal(status); err != nil {
return
}
sortStatuses := func() {
sort.Slice(status.Statuses, func(a, b int) bool {
sA := status.Statuses[a]
sB := status.Statuses[b]
cmp := strings.Compare(strings.ToLower(sA.Context), strings.ToLower(sB.Context))
if cmp == 0 {
return strings.Compare(sA.TargetUrl, sB.TargetUrl) < 0
} else {
return cmp < 0
}
})
}
sortStatuses()
res, err = api.GetFile(fmt.Sprintf("repos/%s/%s/commits/%s/check-runs", project.Owner, project.Name, sha), checksType)
if err == nil && (res.StatusCode == 403 || res.StatusCode == 404 || res.StatusCode == 422) {
return
}
if err = checkStatus(200, "fetching checks", res, err); err != nil {
return
}
checks := &CheckRunsResponse{}
if err = res.Unmarshal(checks); err != nil {
return
}
for _, checkRun := range checks.CheckRuns {
state := "pending"
if checkRun.Status == "completed" {
state = checkRun.Conclusion
}
checkStatus := CIStatus{
State: state,
Context: checkRun.Name,
TargetUrl: checkRun.HtmlUrl,
}
status.Statuses = append(status.Statuses, checkStatus)
}
sortStatuses()
return
}
type Repository struct {
Name string `json:"name"`
FullName string `json:"full_name"`
Parent *Repository `json:"parent"`
Owner *User `json:"owner"`
Private bool `json:"private"`
HasWiki bool `json:"has_wiki"`
Permissions *RepositoryPermissions `json:"permissions"`
HtmlUrl string `json:"html_url"`
DefaultBranch string `json:"default_branch"`
}
type RepositoryPermissions struct {
Admin bool `json:"admin"`
Push bool `json:"push"`
Pull bool `json:"pull"`
}
func (client *Client) ForkRepository(project *Project, params map[string]interface{}) (repo *Repository, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/forks", project.Owner, project.Name), params)
if err = checkStatus(202, "creating fork", res, err); err != nil {
return
}
repo = &Repository{}
err = res.Unmarshal(repo)
return
}
type Comment struct {
Id int `json:"id"`
Body string `json:"body"`
User *User `json:"user"`
CreatedAt time.Time `json:"created_at"`
}
type Issue struct {
Number int `json:"number"`
State string `json:"state"`
Title string `json:"title"`
Body string `json:"body"`
User *User `json:"user"`
PullRequest *PullRequest `json:"pull_request"`
Head *PullRequestSpec `json:"head"`
Base *PullRequestSpec `json:"base"`
MergeCommitSha string `json:"merge_commit_sha"`
MaintainerCanModify bool `json:"maintainer_can_modify"`
Draft bool `json:"draft"`
Comments int `json:"comments"`
Labels []IssueLabel `json:"labels"`
Assignees []User `json:"assignees"`
Milestone *Milestone `json:"milestone"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
MergedAt time.Time `json:"merged_at"`
RequestedReviewers []User `json:"requested_reviewers"`
RequestedTeams []Team `json:"requested_teams"`
ApiUrl string `json:"url"`
HtmlUrl string `json:"html_url"`
ClosedBy *User `json:"closed_by"`
}
type PullRequest Issue
type PullRequestSpec struct {
Label string `json:"label"`
Ref string `json:"ref"`
Sha string `json:"sha"`
Repo *Repository `json:"repo"`
}
func (pr *PullRequest) IsSameRepo() bool {
return pr.Head != nil && pr.Head.Repo != nil &&
pr.Head.Repo.Name == pr.Base.Repo.Name &&
pr.Head.Repo.Owner.Login == pr.Base.Repo.Owner.Login
}
func (pr *PullRequest) HasRequestedReviewer(name string) bool {
for _, user := range pr.RequestedReviewers {
if strings.EqualFold(user.Login, name) {
return true
}
}
return false
}
func (pr *PullRequest) HasRequestedTeam(name string) bool {
for _, team := range pr.RequestedTeams {
if strings.EqualFold(team.Slug, name) {
return true
}
}
return false
}
type IssueLabel struct {
Name string `json:"name"`
Color string `json:"color"`
}
type User struct {
Login string `json:"login"`
}
type Team struct {
Name string `json:"name"`
Slug string `json:"slug"`
}
type Milestone struct {
Number int `json:"number"`
Title string `json:"title"`
}
func (client *Client) FetchIssues(project *Project, filterParams map[string]interface{}, limit int, filter func(*Issue) bool) (issues []Issue, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/issues?per_page=%d", project.Owner, project.Name, perPage(limit, 100))
if filterParams != nil {
path = addQuery(path, filterParams)
}
issues = []Issue{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching issues", res, err); err != nil {
return
}
path = res.Link("next")
issuesPage := []Issue{}
if err = res.Unmarshal(&issuesPage); err != nil {
return
}
for _, issue := range issuesPage {
if filter == nil || filter(&issue) {
issues = append(issues, issue)
if limit > 0 && len(issues) == limit {
path = ""
break
}
}
}
}
return
}
func (client *Client) FetchIssue(project *Project, number string) (issue *Issue, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s/issues/%s", project.Owner, project.Name, number))
if err = checkStatus(200, "fetching issue", res, err); err != nil {
return nil, err
}
issue = &Issue{}
err = res.Unmarshal(issue)
return
}
func (client *Client) FetchComments(project *Project, number string) (comments []Comment, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s/issues/%s/comments", project.Owner, project.Name, number))
if err = checkStatus(200, "fetching comments for issue", res, err); err != nil {
return nil, err
}
comments = []Comment{}
err = res.Unmarshal(&comments)
return
}
func (client *Client) CreateIssue(project *Project, params interface{}) (issue *Issue, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/issues", project.Owner, project.Name), params)
if err = checkStatus(201, "creating issue", res, err); err != nil {
return
}
issue = &Issue{}
err = res.Unmarshal(issue)
return
}
func (client *Client) UpdateIssue(project *Project, issueNumber int, params map[string]interface{}) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PatchJSON(fmt.Sprintf("repos/%s/%s/issues/%d", project.Owner, project.Name, issueNumber), params)
if err = checkStatus(200, "updating issue", res, err); err != nil {
return
}
res.Body.Close()
return
}
type sortedLabels []IssueLabel
func (s sortedLabels) Len() int {
return len(s)
}
func (s sortedLabels) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sortedLabels) Less(i, j int) bool {
return strings.Compare(strings.ToLower(s[i].Name), strings.ToLower(s[j].Name)) < 0
}
func (client *Client) FetchLabels(project *Project) (labels []IssueLabel, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/labels?per_page=100", project.Owner, project.Name)
labels = []IssueLabel{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching labels", res, err); err != nil {
return
}
path = res.Link("next")
labelsPage := []IssueLabel{}
if err = res.Unmarshal(&labelsPage); err != nil {
return
}
labels = append(labels, labelsPage...)
}
sort.Sort(sortedLabels(labels))
return
}
func (client *Client) FetchMilestones(project *Project) (milestones []Milestone, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/milestones?per_page=100", project.Owner, project.Name)
milestones = []Milestone{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching milestones", res, err); err != nil {
return
}
path = res.Link("next")
milestonesPage := []Milestone{}
if err = res.Unmarshal(&milestonesPage); err != nil {
return
}
milestones = append(milestones, milestonesPage...)
}
return
}
func (client *Client) GenericAPIRequest(method, path string, data interface{}, headers map[string]string, ttl int) (*simpleResponse, error) {
api, err := client.simpleApi()
if err != nil {
return nil, err
}
api.CacheTTL = ttl
var body io.Reader
switch d := data.(type) {
case map[string]interface{}:
if method == "GET" {
path = addQuery(path, d)
} else if len(d) > 0 {
json, err := json.Marshal(d)
if err != nil {
return nil, err
}
body = bytes.NewBuffer(json)
}
case io.Reader:
body = d
}
return api.performRequest(method, path, body, func(req *http.Request) {
if body != nil {
req.Header.Set("Content-Type", "application/json; charset=utf-8")
}
for key, value := range headers {
req.Header.Set(key, value)
}
})
}
// GraphQL facilitates performing a GraphQL request and parsing the response
func (client *Client) GraphQL(query string, variables interface{}, data interface{}) error {
api, err := client.simpleApi()
if err != nil {
return err
}
payload := map[string]interface{}{
"query": query,
"variables": variables,
}
resp, err := api.PostJSON("graphql", payload)
if err = checkStatus(200, "performing GraphQL", resp, err); err != nil {
return err
}
responseData := struct {
Data interface{}
Errors []struct {
Message string
}
}{
Data: data,
}
err = resp.Unmarshal(&responseData)
if err != nil {
return err
}
if len(responseData.Errors) > 0 {
messages := []string{}
for _, e := range responseData.Errors {
messages = append(messages, e.Message)
}
return fmt.Errorf("API error: %s", strings.Join(messages, "; "))
}
return nil
}
func (client *Client) CurrentUser() (user *User, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get("user")
if err = checkStatus(200, "getting current user", res, err); err != nil {
return
}
user = &User{}
err = res.Unmarshal(user)
return
}
type AuthorizationEntry struct {
Token string `json:"token"`
}
func isToken(api *simpleClient, password string) bool {
api.PrepareRequest = func(req *http.Request) {
req.Header.Set("Authorization", "token "+password)
}
res, _ := api.Get("user")
if res != nil && res.StatusCode == 200 {
return true
}
return false
}
func (client *Client) FindOrCreateToken(user, password, twoFactorCode string) (token string, err error) {
api := client.apiClient()
if len(password) >= 40 && isToken(api, password) {
return password, nil
}
params := map[string]interface{}{
"scopes": []string{"repo", "gist"},
"note_url": OAuthAppURL,
}
api.PrepareRequest = func(req *http.Request) {
req.SetBasicAuth(user, password)
if twoFactorCode != "" {
req.Header.Set("X-GitHub-OTP", twoFactorCode)
}
}
count := 1
maxTries := 9
for {
params["note"], err = authTokenNote(count)
if err != nil {
return
}
res, postErr := api.PostJSON("authorizations", params)
if postErr != nil {
err = postErr
break
}
if res.StatusCode == 201 {
auth := &AuthorizationEntry{}
if err = res.Unmarshal(auth); err != nil {
return
}
token = auth.Token
break
} else if res.StatusCode == 422 && count < maxTries {
count++
} else {
errInfo, e := res.ErrorInfo()
if e == nil {
err = errInfo
} else {
err = e
}
return
}
}
return
}
func (client *Client) ensureAccessToken() error {
if client.Host.AccessToken == "" {
host, err := CurrentConfig().PromptForHost(client.Host.Host)
if err != nil {
return err
}
client.Host = host
}
return nil
}
func (client *Client) simpleApi() (c *simpleClient, err error) {
err = client.ensureAccessToken()
if err != nil {
return
}
if client.cachedClient != nil {
c = client.cachedClient
return
}
c = client.apiClient()
c.PrepareRequest = func(req *http.Request) {
clientDomain := normalizeHost(client.Host.Host)
if strings.HasPrefix(clientDomain, "api.github.") {
clientDomain = strings.TrimPrefix(clientDomain, "api.")
}
requestHost := strings.ToLower(req.URL.Host)
if requestHost == clientDomain || strings.HasSuffix(requestHost, "."+clientDomain) {
req.Header.Set("Authorization", "token "+client.Host.AccessToken)
}
}
client.cachedClient = c
return
}
func (client *Client) apiClient() *simpleClient {
unixSocket := os.ExpandEnv(client.Host.UnixSocket)
httpClient := newHttpClient(os.Getenv("HUB_TEST_HOST"), os.Getenv("HUB_VERBOSE") != "", unixSocket)
apiRoot := client.absolute(normalizeHost(client.Host.Host))
if !strings.HasPrefix(apiRoot.Host, "api.github.") {
apiRoot.Path = "/api/v3/"
}
return &simpleClient{
httpClient: httpClient,
rootUrl: apiRoot,
}
}
func (client *Client) absolute(host string) *url.URL {
u, err := url.Parse("https://" + host + "/")
if err != nil {
panic(err)
} else if client.Host != nil && client.Host.Protocol != "" {
u.Scheme = client.Host.Protocol
}
return u
}
func (client *Client) FetchGist(id string) (gist *Gist, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
response, err := api.Get(fmt.Sprintf("gists/%s", id))
if err = checkStatus(200, "getting gist", response, err); err != nil {
return
}
response.Unmarshal(&gist)
return
}
func (client *Client) CreateGist(filenames []string, public bool) (gist *Gist, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
files := map[string]GistFile{}
var basename string
var content []byte
var gf GistFile
for _, file := range filenames {
if file == "-" {
content, err = ioutil.ReadAll(os.Stdin)
basename = "gistfile1.txt"
} else {
content, err = ioutil.ReadFile(file)
basename = path.Base(file)
}
if err != nil {
return
}
gf = GistFile{Content: string(content)}
files[basename] = gf
}
g := Gist{
Files: files,
Public: public,
}
res, err := api.PostJSON("gists", &g)
if err = checkStatus(201, "creating gist", res, err); err != nil {
return
}
err = res.Unmarshal(&gist)
return
}
func normalizeHost(host string) string {
if host == "" {
return GitHubHost
} else if strings.EqualFold(host, GitHubHost) {
return "api.github.com"
} else if strings.EqualFold(host, "github.localhost") {
return "api.github.localhost"
} else {
return strings.ToLower(host)
}
}
func reverseNormalizeHost(host string) string {
switch host {
case "api.github.com":
return GitHubHost
case "api.github.localhost":
return "github.localhost"
default:
return host
}
}
func checkStatus(expectedStatus int, action string, response *simpleResponse, err error) error {
if err != nil {
return fmt.Errorf("Error %s: %s", action, err.Error())
} else if response.StatusCode != expectedStatus {
errInfo, err := response.ErrorInfo()
if err != nil {
return fmt.Errorf("Error %s: %s (HTTP %d)", action, err.Error(), response.StatusCode)
}
return FormatError(action, errInfo)
}
return nil
}
// FormatError annotates an HTTP response error with user-friendly messages
func FormatError(action string, err error) error {
if e, ok := err.(*errorInfo); ok {
return formatError(action, e)
}
return err
}
func formatError(action string, e *errorInfo) error {
var reason string
if s := strings.SplitN(e.Response.Status, " ", 2); len(s) >= 2 {
reason = strings.TrimSpace(s[1])
}
errStr := fmt.Sprintf("Error %s: %s (HTTP %d)", action, reason, e.Response.StatusCode)
var errorSentences []string
for _, err := range e.Errors {
switch err.Code {
case "custom":
errorSentences = append(errorSentences, err.Message)
case "missing_field":
errorSentences = append(errorSentences, fmt.Sprintf("Missing field: \"%s\"", err.Field))
case "already_exists":
errorSentences = append(errorSentences, fmt.Sprintf("Duplicate value for \"%s\"", err.Field))
case "invalid":
errorSentences = append(errorSentences, fmt.Sprintf("Invalid value for \"%s\"", err.Field))
case "unauthorized":
errorSentences = append(errorSentences, fmt.Sprintf("Not allowed to change field \"%s\"", err.Field))
}
}
var errorMessage string
if len(errorSentences) > 0 {
errorMessage = strings.Join(errorSentences, "\n")
} else {
errorMessage = e.Message
if action == "getting current user" && e.Message == "Resource not accessible by integration" {
errorMessage = errorMessage + "\nYou must specify GITHUB_USER via environment variable."
}
}
if errorMessage != "" {
errStr = fmt.Sprintf("%s\n%s", errStr, errorMessage)
}
if ssoErr := ValidateGitHubSSO(e.Response); ssoErr != nil {
return fmt.Errorf("%s\n%s", errStr, ssoErr)
}
if scopeErr := ValidateSufficientOAuthScopes(e.Response); scopeErr != nil {
return fmt.Errorf("%s\n%s", errStr, scopeErr)
}
return errors.New(errStr)
}
// ValidateGitHubSSO checks for the challenge via `X-Github-Sso` header
func ValidateGitHubSSO(res *http.Response) error {
if res.StatusCode != 403 {
return nil
}
sso := res.Header.Get("X-Github-Sso")
if !strings.HasPrefix(sso, "required; url=") {
return nil
}
url := sso[strings.IndexByte(sso, '=')+1:]
return fmt.Errorf("You must authorize your token to access this organization:\n%s", url)
}
// ValidateSufficientOAuthScopes warns about insufficient OAuth scopes
func ValidateSufficientOAuthScopes(res *http.Response) error {
if res.StatusCode != 404 && res.StatusCode != 403 {
return nil
}
needScopes := newScopeSet(res.Header.Get("X-Accepted-Oauth-Scopes"))
if len(needScopes) == 0 && isGistWrite(res.Request) {
// compensate for a GitHub bug: gist APIs omit proper `X-Accepted-Oauth-Scopes` in responses
needScopes = newScopeSet("gist")
}
haveScopes := newScopeSet(res.Header.Get("X-Oauth-Scopes"))
if len(needScopes) == 0 || needScopes.Intersects(haveScopes) {
return nil
}
return fmt.Errorf("Your access token may have insufficient scopes. Visit %s://%s/settings/tokens\n"+
"to edit the 'hub' token and enable one of the following scopes: %s",
res.Request.URL.Scheme,
reverseNormalizeHost(res.Request.Host),
needScopes)
}
func isGistWrite(req *http.Request) bool {
if req.Method == "GET" {
return false
}
path := strings.TrimPrefix(req.URL.Path, "/v3")
return strings.HasPrefix(path, "/gists")
}
type scopeSet map[string]struct{}
func (s scopeSet) String() string {
scopes := make([]string, 0, len(s))
for scope := range s {
scopes = append(scopes, scope)
}
sort.Sort(sort.StringSlice(scopes))
return strings.Join(scopes, ", ")
}
func (s scopeSet) Intersects(other scopeSet) bool {
for scope := range s {
if _, found := other[scope]; found {
return true
}
}
return false
}
func newScopeSet(s string) scopeSet {
scopes := scopeSet{}
for _, s := range strings.SplitN(s, ",", -1) {
if s = strings.TrimSpace(s); s != "" {
scopes[s] = struct{}{}
}
}
return scopes
}
func authTokenNote(num int) (string, error) {
n := os.Getenv("USER")
if n == "" {
n = os.Getenv("USERNAME")
}
if n == "" {
whoami := exec.Command("whoami")
whoamiOut, err := whoami.Output()
if err != nil {
return "", err
}
n = strings.TrimSpace(string(whoamiOut))
}
h, err := os.Hostname()
if err != nil {
return "", err
}
if num > 1 {
return fmt.Sprintf("hub for %s@%s %d", n, h, num), nil
}
return fmt.Sprintf("hub for %s@%s", n, h), nil
}
func perPage(limit, max int) int {
if limit > 0 {
limit = limit + (limit / 2)
if limit < max {
return limit
}
}
return max
}
func addQuery(path string, params map[string]interface{}) string {
if len(params) == 0 {
return path
}
query := url.Values{}
for key, value := range params {
switch v := value.(type) {
case string:
query.Add(key, v)
case nil:
query.Add(key, "")
case int:
query.Add(key, fmt.Sprintf("%d", v))
case bool:
query.Add(key, fmt.Sprintf("%v", v))
}
}
sep := "?"
if strings.Contains(path, sep) {
sep = "&"
}
return path + sep + query.Encode()
}
Normalize `url.Error` message for Go 1.14
Go 1.13: `Post <URL>: <message>`
Go 1.14: `Post "<URL>": <message>`
Since we have an existing test that includes this error message, make
sure that `url.Error`s are output without quotes around the URL.
package github
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
"strings"
"time"
"github.com/github/hub/version"
)
const (
GitHubHost string = "github.com"
OAuthAppURL string = "https://hub.github.com/"
)
var UserAgent = "Hub " + version.Version
func NewClient(h string) *Client {
return NewClientWithHost(&Host{Host: h})
}
func NewClientWithHost(host *Host) *Client {
return &Client{Host: host}
}
type Client struct {
Host *Host
cachedClient *simpleClient
}
type Gist struct {
Files map[string]GistFile `json:"files"`
Description string `json:"description,omitempty"`
Id string `json:"id,omitempty"`
Public bool `json:"public"`
HtmlUrl string `json:"html_url"`
}
type GistFile struct {
Type string `json:"type,omitempty"`
Language string `json:"language,omitempty"`
Content string `json:"content"`
RawUrl string `json:"raw_url"`
}
func (client *Client) FetchPullRequests(project *Project, filterParams map[string]interface{}, limit int, filter func(*PullRequest) bool) (pulls []PullRequest, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/pulls?per_page=%d", project.Owner, project.Name, perPage(limit, 100))
if filterParams != nil {
path = addQuery(path, filterParams)
}
pulls = []PullRequest{}
var res *simpleResponse
for path != "" {
res, err = api.GetFile(path, draftsType)
if err = checkStatus(200, "fetching pull requests", res, err); err != nil {
return
}
path = res.Link("next")
pullsPage := []PullRequest{}
if err = res.Unmarshal(&pullsPage); err != nil {
return
}
for _, pr := range pullsPage {
if filter == nil || filter(&pr) {
pulls = append(pulls, pr)
if limit > 0 && len(pulls) == limit {
path = ""
break
}
}
}
}
return
}
func (client *Client) PullRequest(project *Project, id string) (pr *PullRequest, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s/pulls/%s", project.Owner, project.Name, id))
if err = checkStatus(200, "getting pull request", res, err); err != nil {
return
}
pr = &PullRequest{}
err = res.Unmarshal(pr)
return
}
func (client *Client) PullRequestPatch(project *Project, id string) (patch io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.GetFile(fmt.Sprintf("repos/%s/%s/pulls/%s", project.Owner, project.Name, id), patchMediaType)
if err = checkStatus(200, "getting pull request patch", res, err); err != nil {
return
}
return res.Body, nil
}
func (client *Client) CreatePullRequest(project *Project, params map[string]interface{}) (pr *PullRequest, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSONPreview(fmt.Sprintf("repos/%s/%s/pulls", project.Owner, project.Name), params, draftsType)
if err = checkStatus(201, "creating pull request", res, err); err != nil {
if res != nil && res.StatusCode == 404 {
projectUrl := strings.SplitN(project.WebURL("", "", ""), "://", 2)[1]
err = fmt.Errorf("%s\nAre you sure that %s exists?", err, projectUrl)
}
return
}
pr = &PullRequest{}
err = res.Unmarshal(pr)
return
}
func (client *Client) RequestReview(project *Project, prNumber int, params map[string]interface{}) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", project.Owner, project.Name, prNumber), params)
if err = checkStatus(201, "requesting reviewer", res, err); err != nil {
return
}
res.Body.Close()
return
}
func (client *Client) CommitPatch(project *Project, sha string) (patch io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.GetFile(fmt.Sprintf("repos/%s/%s/commits/%s", project.Owner, project.Name, sha), patchMediaType)
if err = checkStatus(200, "getting commit patch", res, err); err != nil {
return
}
return res.Body, nil
}
func (client *Client) GistPatch(id string) (patch io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("gists/%s", id))
if err = checkStatus(200, "getting gist patch", res, err); err != nil {
return
}
gist := Gist{}
if err = res.Unmarshal(&gist); err != nil {
return
}
rawUrl := ""
for _, file := range gist.Files {
rawUrl = file.RawUrl
break
}
res, err = api.GetFile(rawUrl, textMediaType)
if err = checkStatus(200, "getting gist patch", res, err); err != nil {
return
}
return res.Body, nil
}
func (client *Client) Repository(project *Project) (repo *Repository, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s", project.Owner, project.Name))
if err = checkStatus(200, "getting repository info", res, err); err != nil {
return
}
repo = &Repository{}
err = res.Unmarshal(&repo)
return
}
func (client *Client) CreateRepository(project *Project, description, homepage string, isPrivate bool) (repo *Repository, err error) {
repoURL := "user/repos"
if project.Owner != client.Host.User {
repoURL = fmt.Sprintf("orgs/%s/repos", project.Owner)
}
params := map[string]interface{}{
"name": project.Name,
"description": description,
"homepage": homepage,
"private": isPrivate,
}
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(repoURL, params)
if err = checkStatus(201, "creating repository", res, err); err != nil {
return
}
repo = &Repository{}
err = res.Unmarshal(repo)
return
}
func (client *Client) DeleteRepository(project *Project) error {
api, err := client.simpleApi()
if err != nil {
return err
}
repoURL := fmt.Sprintf("repos/%s/%s", project.Owner, project.Name)
res, err := api.Delete(repoURL)
return checkStatus(204, "deleting repository", res, err)
}
type Release struct {
Name string `json:"name"`
TagName string `json:"tag_name"`
TargetCommitish string `json:"target_commitish"`
Body string `json:"body"`
Draft bool `json:"draft"`
Prerelease bool `json:"prerelease"`
Assets []ReleaseAsset `json:"assets"`
TarballUrl string `json:"tarball_url"`
ZipballUrl string `json:"zipball_url"`
HtmlUrl string `json:"html_url"`
UploadUrl string `json:"upload_url"`
ApiUrl string `json:"url"`
CreatedAt time.Time `json:"created_at"`
PublishedAt time.Time `json:"published_at"`
}
type ReleaseAsset struct {
Name string `json:"name"`
Label string `json:"label"`
DownloadUrl string `json:"browser_download_url"`
ApiUrl string `json:"url"`
}
func (client *Client) FetchReleases(project *Project, limit int, filter func(*Release) bool) (releases []Release, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/releases?per_page=%d", project.Owner, project.Name, perPage(limit, 100))
releases = []Release{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching releases", res, err); err != nil {
return
}
path = res.Link("next")
releasesPage := []Release{}
if err = res.Unmarshal(&releasesPage); err != nil {
return
}
for _, release := range releasesPage {
if filter == nil || filter(&release) {
releases = append(releases, release)
if limit > 0 && len(releases) == limit {
path = ""
break
}
}
}
}
return
}
func (client *Client) FetchRelease(project *Project, tagName string) (*Release, error) {
releases, err := client.FetchReleases(project, 100, func(release *Release) bool {
return release.TagName == tagName
})
if err == nil {
if len(releases) < 1 {
return nil, fmt.Errorf("Unable to find release with tag name `%s'", tagName)
} else {
return &releases[0], nil
}
} else {
return nil, err
}
}
func (client *Client) CreateRelease(project *Project, releaseParams *Release) (release *Release, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/releases", project.Owner, project.Name), releaseParams)
if err = checkStatus(201, "creating release", res, err); err != nil {
return
}
release = &Release{}
err = res.Unmarshal(release)
return
}
func (client *Client) EditRelease(release *Release, releaseParams map[string]interface{}) (updatedRelease *Release, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PatchJSON(release.ApiUrl, releaseParams)
if err = checkStatus(200, "editing release", res, err); err != nil {
return
}
updatedRelease = &Release{}
err = res.Unmarshal(updatedRelease)
return
}
func (client *Client) DeleteRelease(release *Release) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Delete(release.ApiUrl)
if err = checkStatus(204, "deleting release", res, err); err != nil {
return
}
return
}
type LocalAsset struct {
Name string
Label string
Contents io.Reader
Size int64
}
func (client *Client) UploadReleaseAssets(release *Release, assets []LocalAsset) (doneAssets []*ReleaseAsset, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
idx := strings.Index(release.UploadUrl, "{")
uploadURL := release.UploadUrl[0:idx]
for _, asset := range assets {
for _, existingAsset := range release.Assets {
if existingAsset.Name == asset.Name {
if err = client.DeleteReleaseAsset(&existingAsset); err != nil {
return
}
break
}
}
params := map[string]interface{}{"name": filepath.Base(asset.Name)}
if asset.Label != "" {
params["label"] = asset.Label
}
uploadPath := addQuery(uploadURL, params)
var res *simpleResponse
attempts := 0
maxAttempts := 3
body := asset.Contents
for {
res, err = api.PostFile(uploadPath, body, asset.Size)
if err == nil && res.StatusCode >= 500 && res.StatusCode < 600 && attempts < maxAttempts {
attempts++
time.Sleep(time.Second * time.Duration(attempts))
var f *os.File
f, err = os.Open(asset.Name)
if err != nil {
return
}
defer f.Close()
body = f
continue
}
if err = checkStatus(201, "uploading release asset", res, err); err != nil {
return
}
break
}
newAsset := ReleaseAsset{}
err = res.Unmarshal(&newAsset)
if err != nil {
return
}
doneAssets = append(doneAssets, &newAsset)
}
return
}
func (client *Client) DeleteReleaseAsset(asset *ReleaseAsset) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Delete(asset.ApiUrl)
err = checkStatus(204, "deleting release asset", res, err)
return
}
func (client *Client) DownloadReleaseAsset(url string) (asset io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
resp, err := api.GetFile(url, "application/octet-stream")
if err = checkStatus(200, "downloading asset", resp, err); err != nil {
return
}
return resp.Body, err
}
type CIStatusResponse struct {
State string `json:"state"`
Statuses []CIStatus `json:"statuses"`
}
type CIStatus struct {
State string `json:"state"`
Context string `json:"context"`
TargetUrl string `json:"target_url"`
}
type CheckRunsResponse struct {
CheckRuns []CheckRun `json:"check_runs"`
}
type CheckRun struct {
Status string `json:"status"`
Conclusion string `json:"conclusion"`
Name string `json:"name"`
HtmlUrl string `json:"html_url"`
}
func (client *Client) FetchCIStatus(project *Project, sha string) (status *CIStatusResponse, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s/commits/%s/status", project.Owner, project.Name, sha))
if err = checkStatus(200, "fetching statuses", res, err); err != nil {
return
}
status = &CIStatusResponse{}
if err = res.Unmarshal(status); err != nil {
return
}
sortStatuses := func() {
sort.Slice(status.Statuses, func(a, b int) bool {
sA := status.Statuses[a]
sB := status.Statuses[b]
cmp := strings.Compare(strings.ToLower(sA.Context), strings.ToLower(sB.Context))
if cmp == 0 {
return strings.Compare(sA.TargetUrl, sB.TargetUrl) < 0
} else {
return cmp < 0
}
})
}
sortStatuses()
res, err = api.GetFile(fmt.Sprintf("repos/%s/%s/commits/%s/check-runs", project.Owner, project.Name, sha), checksType)
if err == nil && (res.StatusCode == 403 || res.StatusCode == 404 || res.StatusCode == 422) {
return
}
if err = checkStatus(200, "fetching checks", res, err); err != nil {
return
}
checks := &CheckRunsResponse{}
if err = res.Unmarshal(checks); err != nil {
return
}
for _, checkRun := range checks.CheckRuns {
state := "pending"
if checkRun.Status == "completed" {
state = checkRun.Conclusion
}
checkStatus := CIStatus{
State: state,
Context: checkRun.Name,
TargetUrl: checkRun.HtmlUrl,
}
status.Statuses = append(status.Statuses, checkStatus)
}
sortStatuses()
return
}
type Repository struct {
Name string `json:"name"`
FullName string `json:"full_name"`
Parent *Repository `json:"parent"`
Owner *User `json:"owner"`
Private bool `json:"private"`
HasWiki bool `json:"has_wiki"`
Permissions *RepositoryPermissions `json:"permissions"`
HtmlUrl string `json:"html_url"`
DefaultBranch string `json:"default_branch"`
}
type RepositoryPermissions struct {
Admin bool `json:"admin"`
Push bool `json:"push"`
Pull bool `json:"pull"`
}
func (client *Client) ForkRepository(project *Project, params map[string]interface{}) (repo *Repository, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/forks", project.Owner, project.Name), params)
if err = checkStatus(202, "creating fork", res, err); err != nil {
return
}
repo = &Repository{}
err = res.Unmarshal(repo)
return
}
type Comment struct {
Id int `json:"id"`
Body string `json:"body"`
User *User `json:"user"`
CreatedAt time.Time `json:"created_at"`
}
type Issue struct {
Number int `json:"number"`
State string `json:"state"`
Title string `json:"title"`
Body string `json:"body"`
User *User `json:"user"`
PullRequest *PullRequest `json:"pull_request"`
Head *PullRequestSpec `json:"head"`
Base *PullRequestSpec `json:"base"`
MergeCommitSha string `json:"merge_commit_sha"`
MaintainerCanModify bool `json:"maintainer_can_modify"`
Draft bool `json:"draft"`
Comments int `json:"comments"`
Labels []IssueLabel `json:"labels"`
Assignees []User `json:"assignees"`
Milestone *Milestone `json:"milestone"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
MergedAt time.Time `json:"merged_at"`
RequestedReviewers []User `json:"requested_reviewers"`
RequestedTeams []Team `json:"requested_teams"`
ApiUrl string `json:"url"`
HtmlUrl string `json:"html_url"`
ClosedBy *User `json:"closed_by"`
}
type PullRequest Issue
type PullRequestSpec struct {
Label string `json:"label"`
Ref string `json:"ref"`
Sha string `json:"sha"`
Repo *Repository `json:"repo"`
}
func (pr *PullRequest) IsSameRepo() bool {
return pr.Head != nil && pr.Head.Repo != nil &&
pr.Head.Repo.Name == pr.Base.Repo.Name &&
pr.Head.Repo.Owner.Login == pr.Base.Repo.Owner.Login
}
func (pr *PullRequest) HasRequestedReviewer(name string) bool {
for _, user := range pr.RequestedReviewers {
if strings.EqualFold(user.Login, name) {
return true
}
}
return false
}
func (pr *PullRequest) HasRequestedTeam(name string) bool {
for _, team := range pr.RequestedTeams {
if strings.EqualFold(team.Slug, name) {
return true
}
}
return false
}
type IssueLabel struct {
Name string `json:"name"`
Color string `json:"color"`
}
type User struct {
Login string `json:"login"`
}
type Team struct {
Name string `json:"name"`
Slug string `json:"slug"`
}
type Milestone struct {
Number int `json:"number"`
Title string `json:"title"`
}
func (client *Client) FetchIssues(project *Project, filterParams map[string]interface{}, limit int, filter func(*Issue) bool) (issues []Issue, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/issues?per_page=%d", project.Owner, project.Name, perPage(limit, 100))
if filterParams != nil {
path = addQuery(path, filterParams)
}
issues = []Issue{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching issues", res, err); err != nil {
return
}
path = res.Link("next")
issuesPage := []Issue{}
if err = res.Unmarshal(&issuesPage); err != nil {
return
}
for _, issue := range issuesPage {
if filter == nil || filter(&issue) {
issues = append(issues, issue)
if limit > 0 && len(issues) == limit {
path = ""
break
}
}
}
}
return
}
func (client *Client) FetchIssue(project *Project, number string) (issue *Issue, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s/issues/%s", project.Owner, project.Name, number))
if err = checkStatus(200, "fetching issue", res, err); err != nil {
return nil, err
}
issue = &Issue{}
err = res.Unmarshal(issue)
return
}
func (client *Client) FetchComments(project *Project, number string) (comments []Comment, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s/issues/%s/comments", project.Owner, project.Name, number))
if err = checkStatus(200, "fetching comments for issue", res, err); err != nil {
return nil, err
}
comments = []Comment{}
err = res.Unmarshal(&comments)
return
}
func (client *Client) CreateIssue(project *Project, params interface{}) (issue *Issue, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/issues", project.Owner, project.Name), params)
if err = checkStatus(201, "creating issue", res, err); err != nil {
return
}
issue = &Issue{}
err = res.Unmarshal(issue)
return
}
func (client *Client) UpdateIssue(project *Project, issueNumber int, params map[string]interface{}) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PatchJSON(fmt.Sprintf("repos/%s/%s/issues/%d", project.Owner, project.Name, issueNumber), params)
if err = checkStatus(200, "updating issue", res, err); err != nil {
return
}
res.Body.Close()
return
}
type sortedLabels []IssueLabel
func (s sortedLabels) Len() int {
return len(s)
}
func (s sortedLabels) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sortedLabels) Less(i, j int) bool {
return strings.Compare(strings.ToLower(s[i].Name), strings.ToLower(s[j].Name)) < 0
}
func (client *Client) FetchLabels(project *Project) (labels []IssueLabel, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/labels?per_page=100", project.Owner, project.Name)
labels = []IssueLabel{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching labels", res, err); err != nil {
return
}
path = res.Link("next")
labelsPage := []IssueLabel{}
if err = res.Unmarshal(&labelsPage); err != nil {
return
}
labels = append(labels, labelsPage...)
}
sort.Sort(sortedLabels(labels))
return
}
func (client *Client) FetchMilestones(project *Project) (milestones []Milestone, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/milestones?per_page=100", project.Owner, project.Name)
milestones = []Milestone{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching milestones", res, err); err != nil {
return
}
path = res.Link("next")
milestonesPage := []Milestone{}
if err = res.Unmarshal(&milestonesPage); err != nil {
return
}
milestones = append(milestones, milestonesPage...)
}
return
}
func (client *Client) GenericAPIRequest(method, path string, data interface{}, headers map[string]string, ttl int) (*simpleResponse, error) {
api, err := client.simpleApi()
if err != nil {
return nil, err
}
api.CacheTTL = ttl
var body io.Reader
switch d := data.(type) {
case map[string]interface{}:
if method == "GET" {
path = addQuery(path, d)
} else if len(d) > 0 {
json, err := json.Marshal(d)
if err != nil {
return nil, err
}
body = bytes.NewBuffer(json)
}
case io.Reader:
body = d
}
return api.performRequest(method, path, body, func(req *http.Request) {
if body != nil {
req.Header.Set("Content-Type", "application/json; charset=utf-8")
}
for key, value := range headers {
req.Header.Set(key, value)
}
})
}
// GraphQL facilitates performing a GraphQL request and parsing the response
func (client *Client) GraphQL(query string, variables interface{}, data interface{}) error {
api, err := client.simpleApi()
if err != nil {
return err
}
payload := map[string]interface{}{
"query": query,
"variables": variables,
}
resp, err := api.PostJSON("graphql", payload)
if err = checkStatus(200, "performing GraphQL", resp, err); err != nil {
return err
}
responseData := struct {
Data interface{}
Errors []struct {
Message string
}
}{
Data: data,
}
err = resp.Unmarshal(&responseData)
if err != nil {
return err
}
if len(responseData.Errors) > 0 {
messages := []string{}
for _, e := range responseData.Errors {
messages = append(messages, e.Message)
}
return fmt.Errorf("API error: %s", strings.Join(messages, "; "))
}
return nil
}
func (client *Client) CurrentUser() (user *User, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get("user")
if err = checkStatus(200, "getting current user", res, err); err != nil {
return
}
user = &User{}
err = res.Unmarshal(user)
return
}
type AuthorizationEntry struct {
Token string `json:"token"`
}
func isToken(api *simpleClient, password string) bool {
api.PrepareRequest = func(req *http.Request) {
req.Header.Set("Authorization", "token "+password)
}
res, _ := api.Get("user")
if res != nil && res.StatusCode == 200 {
return true
}
return false
}
func (client *Client) FindOrCreateToken(user, password, twoFactorCode string) (token string, err error) {
api := client.apiClient()
if len(password) >= 40 && isToken(api, password) {
return password, nil
}
params := map[string]interface{}{
"scopes": []string{"repo", "gist"},
"note_url": OAuthAppURL,
}
api.PrepareRequest = func(req *http.Request) {
req.SetBasicAuth(user, password)
if twoFactorCode != "" {
req.Header.Set("X-GitHub-OTP", twoFactorCode)
}
}
count := 1
maxTries := 9
for {
params["note"], err = authTokenNote(count)
if err != nil {
return
}
res, postErr := api.PostJSON("authorizations", params)
if postErr != nil {
err = postErr
break
}
if res.StatusCode == 201 {
auth := &AuthorizationEntry{}
if err = res.Unmarshal(auth); err != nil {
return
}
token = auth.Token
break
} else if res.StatusCode == 422 && count < maxTries {
count++
} else {
errInfo, e := res.ErrorInfo()
if e == nil {
err = errInfo
} else {
err = e
}
return
}
}
return
}
func (client *Client) ensureAccessToken() error {
if client.Host.AccessToken == "" {
host, err := CurrentConfig().PromptForHost(client.Host.Host)
if err != nil {
return err
}
client.Host = host
}
return nil
}
func (client *Client) simpleApi() (c *simpleClient, err error) {
err = client.ensureAccessToken()
if err != nil {
return
}
if client.cachedClient != nil {
c = client.cachedClient
return
}
c = client.apiClient()
c.PrepareRequest = func(req *http.Request) {
clientDomain := normalizeHost(client.Host.Host)
if strings.HasPrefix(clientDomain, "api.github.") {
clientDomain = strings.TrimPrefix(clientDomain, "api.")
}
requestHost := strings.ToLower(req.URL.Host)
if requestHost == clientDomain || strings.HasSuffix(requestHost, "."+clientDomain) {
req.Header.Set("Authorization", "token "+client.Host.AccessToken)
}
}
client.cachedClient = c
return
}
func (client *Client) apiClient() *simpleClient {
unixSocket := os.ExpandEnv(client.Host.UnixSocket)
httpClient := newHttpClient(os.Getenv("HUB_TEST_HOST"), os.Getenv("HUB_VERBOSE") != "", unixSocket)
apiRoot := client.absolute(normalizeHost(client.Host.Host))
if !strings.HasPrefix(apiRoot.Host, "api.github.") {
apiRoot.Path = "/api/v3/"
}
return &simpleClient{
httpClient: httpClient,
rootUrl: apiRoot,
}
}
func (client *Client) absolute(host string) *url.URL {
u, err := url.Parse("https://" + host + "/")
if err != nil {
panic(err)
} else if client.Host != nil && client.Host.Protocol != "" {
u.Scheme = client.Host.Protocol
}
return u
}
func (client *Client) FetchGist(id string) (gist *Gist, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
response, err := api.Get(fmt.Sprintf("gists/%s", id))
if err = checkStatus(200, "getting gist", response, err); err != nil {
return
}
response.Unmarshal(&gist)
return
}
func (client *Client) CreateGist(filenames []string, public bool) (gist *Gist, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
files := map[string]GistFile{}
var basename string
var content []byte
var gf GistFile
for _, file := range filenames {
if file == "-" {
content, err = ioutil.ReadAll(os.Stdin)
basename = "gistfile1.txt"
} else {
content, err = ioutil.ReadFile(file)
basename = path.Base(file)
}
if err != nil {
return
}
gf = GistFile{Content: string(content)}
files[basename] = gf
}
g := Gist{
Files: files,
Public: public,
}
res, err := api.PostJSON("gists", &g)
if err = checkStatus(201, "creating gist", res, err); err != nil {
return
}
err = res.Unmarshal(&gist)
return
}
func normalizeHost(host string) string {
if host == "" {
return GitHubHost
} else if strings.EqualFold(host, GitHubHost) {
return "api.github.com"
} else if strings.EqualFold(host, "github.localhost") {
return "api.github.localhost"
} else {
return strings.ToLower(host)
}
}
func reverseNormalizeHost(host string) string {
switch host {
case "api.github.com":
return GitHubHost
case "api.github.localhost":
return "github.localhost"
default:
return host
}
}
func checkStatus(expectedStatus int, action string, response *simpleResponse, err error) error {
if err != nil {
errStr := err.Error()
if urlErr, isURLErr := err.(*url.Error); isURLErr {
errStr = fmt.Sprintf("%s %s: %s", urlErr.Op, urlErr.URL, urlErr.Err)
}
return fmt.Errorf("Error %s: %s", action, errStr)
} else if response.StatusCode != expectedStatus {
errInfo, err := response.ErrorInfo()
if err != nil {
return fmt.Errorf("Error %s: %s (HTTP %d)", action, err.Error(), response.StatusCode)
}
return FormatError(action, errInfo)
}
return nil
}
// FormatError annotates an HTTP response error with user-friendly messages
func FormatError(action string, err error) error {
if e, ok := err.(*errorInfo); ok {
return formatError(action, e)
}
return err
}
func formatError(action string, e *errorInfo) error {
var reason string
if s := strings.SplitN(e.Response.Status, " ", 2); len(s) >= 2 {
reason = strings.TrimSpace(s[1])
}
errStr := fmt.Sprintf("Error %s: %s (HTTP %d)", action, reason, e.Response.StatusCode)
var errorSentences []string
for _, err := range e.Errors {
switch err.Code {
case "custom":
errorSentences = append(errorSentences, err.Message)
case "missing_field":
errorSentences = append(errorSentences, fmt.Sprintf("Missing field: \"%s\"", err.Field))
case "already_exists":
errorSentences = append(errorSentences, fmt.Sprintf("Duplicate value for \"%s\"", err.Field))
case "invalid":
errorSentences = append(errorSentences, fmt.Sprintf("Invalid value for \"%s\"", err.Field))
case "unauthorized":
errorSentences = append(errorSentences, fmt.Sprintf("Not allowed to change field \"%s\"", err.Field))
}
}
var errorMessage string
if len(errorSentences) > 0 {
errorMessage = strings.Join(errorSentences, "\n")
} else {
errorMessage = e.Message
if action == "getting current user" && e.Message == "Resource not accessible by integration" {
errorMessage = errorMessage + "\nYou must specify GITHUB_USER via environment variable."
}
}
if errorMessage != "" {
errStr = fmt.Sprintf("%s\n%s", errStr, errorMessage)
}
if ssoErr := ValidateGitHubSSO(e.Response); ssoErr != nil {
return fmt.Errorf("%s\n%s", errStr, ssoErr)
}
if scopeErr := ValidateSufficientOAuthScopes(e.Response); scopeErr != nil {
return fmt.Errorf("%s\n%s", errStr, scopeErr)
}
return errors.New(errStr)
}
// ValidateGitHubSSO checks for the challenge via `X-Github-Sso` header
func ValidateGitHubSSO(res *http.Response) error {
if res.StatusCode != 403 {
return nil
}
sso := res.Header.Get("X-Github-Sso")
if !strings.HasPrefix(sso, "required; url=") {
return nil
}
url := sso[strings.IndexByte(sso, '=')+1:]
return fmt.Errorf("You must authorize your token to access this organization:\n%s", url)
}
// ValidateSufficientOAuthScopes warns about insufficient OAuth scopes
func ValidateSufficientOAuthScopes(res *http.Response) error {
if res.StatusCode != 404 && res.StatusCode != 403 {
return nil
}
needScopes := newScopeSet(res.Header.Get("X-Accepted-Oauth-Scopes"))
if len(needScopes) == 0 && isGistWrite(res.Request) {
// compensate for a GitHub bug: gist APIs omit proper `X-Accepted-Oauth-Scopes` in responses
needScopes = newScopeSet("gist")
}
haveScopes := newScopeSet(res.Header.Get("X-Oauth-Scopes"))
if len(needScopes) == 0 || needScopes.Intersects(haveScopes) {
return nil
}
return fmt.Errorf("Your access token may have insufficient scopes. Visit %s://%s/settings/tokens\n"+
"to edit the 'hub' token and enable one of the following scopes: %s",
res.Request.URL.Scheme,
reverseNormalizeHost(res.Request.Host),
needScopes)
}
func isGistWrite(req *http.Request) bool {
if req.Method == "GET" {
return false
}
path := strings.TrimPrefix(req.URL.Path, "/v3")
return strings.HasPrefix(path, "/gists")
}
type scopeSet map[string]struct{}
func (s scopeSet) String() string {
scopes := make([]string, 0, len(s))
for scope := range s {
scopes = append(scopes, scope)
}
sort.Sort(sort.StringSlice(scopes))
return strings.Join(scopes, ", ")
}
func (s scopeSet) Intersects(other scopeSet) bool {
for scope := range s {
if _, found := other[scope]; found {
return true
}
}
return false
}
func newScopeSet(s string) scopeSet {
scopes := scopeSet{}
for _, s := range strings.SplitN(s, ",", -1) {
if s = strings.TrimSpace(s); s != "" {
scopes[s] = struct{}{}
}
}
return scopes
}
func authTokenNote(num int) (string, error) {
n := os.Getenv("USER")
if n == "" {
n = os.Getenv("USERNAME")
}
if n == "" {
whoami := exec.Command("whoami")
whoamiOut, err := whoami.Output()
if err != nil {
return "", err
}
n = strings.TrimSpace(string(whoamiOut))
}
h, err := os.Hostname()
if err != nil {
return "", err
}
if num > 1 {
return fmt.Sprintf("hub for %s@%s %d", n, h, num), nil
}
return fmt.Sprintf("hub for %s@%s", n, h), nil
}
func perPage(limit, max int) int {
if limit > 0 {
limit = limit + (limit / 2)
if limit < max {
return limit
}
}
return max
}
func addQuery(path string, params map[string]interface{}) string {
if len(params) == 0 {
return path
}
query := url.Values{}
for key, value := range params {
switch v := value.(type) {
case string:
query.Add(key, v)
case nil:
query.Add(key, "")
case int:
query.Add(key, fmt.Sprintf("%d", v))
case bool:
query.Add(key, fmt.Sprintf("%v", v))
}
}
sep := "?"
if strings.Contains(path, sep) {
sep = "&"
}
return path + sep + query.Encode()
}
|
package sous
import (
"fmt"
"net/url"
"strings"
"github.com/samsalisbury/semv"
"golang.org/x/text/unicode/norm"
)
type (
// SourceID identifies a specific snapshot of a body of source code,
// including its location and version.
SourceID struct {
// Location is the repo/dir pair indicating the location of the source
// code. Note that not all locations will be valid with all Versions.
Location SourceLocation
// Version identifies a specific version of the source code at Repo/Dir.
Version semv.Version
}
//MissingRepo indicates that Sous couldn't determine which repo was intended for this SL
MissingRepo struct {
parsing string
}
//MissingVersion indicates that Sous couldn't determine what version was intended for this SL
MissingVersion struct {
repo string
parsing string
}
//MissingPath indicates that Sous couldn't determine what repo offset was intended for this SL
MissingPath struct {
repo string
parsing string
}
//IncludesVersion indicates that Sous couldn't determine what version was intended for this SL
IncludesVersion struct {
parsing string
}
)
// DefaultDelim is the default delimiter between parts of the string
// representation of a SourceID or a SourceLocation.
const DefaultDelim = ","
func (sid SourceID) String() string {
if sid.Location.Dir == "" {
return fmt.Sprintf("%s,%s", sid.Location.Repo, sid.Version)
}
return fmt.Sprintf("%s,%s,%s", sid.Location.Repo, sid.Version, sid.Location.Dir)
}
// QueryValues returns the url.Values for this SourceIDs
func (sid SourceID) QueryValues() url.Values {
v := url.Values{}
v.Set("repo", sid.Location.Repo)
v.Set("offset", sid.Location.Dir)
v.Set("version", sid.Version.String())
return v
}
// Tag returns the version tag for this source ID.
func (sid SourceID) Tag() string {
return sid.Version.Format(semv.MajorMinorPatch)
}
// RevID returns the revision id for this SourceID.
func (sid SourceID) RevID() string {
return sid.Version.Meta
}
// Equal tests the equality between this SourceID and another.
func (sid SourceID) Equal(o SourceID) bool {
return sid == o
}
func (err *IncludesVersion) Error() string {
return fmt.Sprintf("Three parts found (includes a version?) in a canonical name: %q", err.parsing)
}
func (err *MissingRepo) Error() string {
return fmt.Sprintf("No repository found in %q", err.parsing)
}
func (err *MissingVersion) Error() string {
return fmt.Sprintf("No version found in %q (did find repo: %q)", err.parsing, err.repo)
}
func (err *MissingPath) Error() string {
return fmt.Sprintf("No path found in %q (did find repo: %q)", err.parsing, err.repo)
}
func parseChunks(sourceStr string) []string {
if len(sourceStr) == 0 {
return []string{}
}
source := norm.NFC.String(sourceStr)
delim := DefaultDelim
if !('A' <= source[0] && source[0] <= 'Z') && !('a' <= source[0] && source[0] <= 'z') {
delim = source[0:1]
source = source[1:]
}
return strings.Split(source, delim)
}
func sourceIDFromChunks(source string, chunks []string) (SourceID, error) {
if len(chunks[0]) == 0 {
return SourceID{}, &MissingRepo{source}
}
repoURL := chunks[0]
version, err := semv.Parse(string(chunks[1]))
if err != nil {
return SourceID{}, err
}
repoOffset := ""
if len(chunks) > 2 {
repoOffset = chunks[2]
}
return SourceID{
Location: SourceLocation{
Dir: repoOffset,
Repo: repoURL,
},
Version: version,
}, nil
}
// ParseSourceID parses an entire SourceID.
func ParseSourceID(s string) (SourceID, error) {
chunks := parseChunks(s)
return sourceIDFromChunks(s, chunks)
}
// MustParseSourceID wraps ParseSourceID and panics if it returns an error.
func MustParseSourceID(s string) SourceID {
sid, err := ParseSourceID(s)
if err != nil {
panic(err)
}
return sid
}
// NewSourceID attempts to create a new SourceID from strings representing the
// separate components.
func NewSourceID(repo, offset, version string) (SourceID, error) {
v, err := semv.Parse(version)
if err != nil {
return SourceID{}, err
}
return SourceID{
Location: SourceLocation{
Repo: repo, Dir: offset,
},
Version: v,
}, nil
}
// MustNewSourceID wraps NewSourceID and panics if it returns an error.
func MustNewSourceID(repo, offset, version string) SourceID {
sid, err := NewSourceID(repo, offset, version)
if err != nil {
panic(err)
}
return sid
}
Correct SourceID.Equals logic.
package sous
import (
"fmt"
"net/url"
"strings"
"github.com/samsalisbury/semv"
"golang.org/x/text/unicode/norm"
)
type (
// SourceID identifies a specific snapshot of a body of source code,
// including its location and version.
SourceID struct {
// Location is the repo/dir pair indicating the location of the source
// code. Note that not all locations will be valid with all Versions.
Location SourceLocation
// Version identifies a specific version of the source code at Repo/Dir.
Version semv.Version
}
//MissingRepo indicates that Sous couldn't determine which repo was intended for this SL
MissingRepo struct {
parsing string
}
//MissingVersion indicates that Sous couldn't determine what version was intended for this SL
MissingVersion struct {
repo string
parsing string
}
//MissingPath indicates that Sous couldn't determine what repo offset was intended for this SL
MissingPath struct {
repo string
parsing string
}
//IncludesVersion indicates that Sous couldn't determine what version was intended for this SL
IncludesVersion struct {
parsing string
}
)
// DefaultDelim is the default delimiter between parts of the string
// representation of a SourceID or a SourceLocation.
const DefaultDelim = ","
func (sid SourceID) String() string {
if sid.Location.Dir == "" {
return fmt.Sprintf("%s,%s", sid.Location.Repo, sid.Version)
}
return fmt.Sprintf("%s,%s,%s", sid.Location.Repo, sid.Version, sid.Location.Dir)
}
// QueryValues returns the url.Values for this SourceIDs
func (sid SourceID) QueryValues() url.Values {
v := url.Values{}
v.Set("repo", sid.Location.Repo)
v.Set("offset", sid.Location.Dir)
v.Set("version", sid.Version.String())
return v
}
// Tag returns the version tag for this source ID.
func (sid SourceID) Tag() string {
return sid.Version.Format(semv.MajorMinorPatch)
}
// RevID returns the revision id for this SourceID.
func (sid SourceID) RevID() string {
return sid.Version.Meta
}
// Equal tests the equality between this SourceID and another.
func (sid SourceID) Equal(o SourceID) bool {
if !sid.Version.Equals(o.Version) {
return false
}
// Equalise the versions so we can do a simple equality test.
// This is safe because sid and o are values not pointers.
sid.Version = o.Version
return sid == o
}
func (err *IncludesVersion) Error() string {
return fmt.Sprintf("Three parts found (includes a version?) in a canonical name: %q", err.parsing)
}
func (err *MissingRepo) Error() string {
return fmt.Sprintf("No repository found in %q", err.parsing)
}
func (err *MissingVersion) Error() string {
return fmt.Sprintf("No version found in %q (did find repo: %q)", err.parsing, err.repo)
}
func (err *MissingPath) Error() string {
return fmt.Sprintf("No path found in %q (did find repo: %q)", err.parsing, err.repo)
}
func parseChunks(sourceStr string) []string {
if len(sourceStr) == 0 {
return []string{}
}
source := norm.NFC.String(sourceStr)
delim := DefaultDelim
if !('A' <= source[0] && source[0] <= 'Z') && !('a' <= source[0] && source[0] <= 'z') {
delim = source[0:1]
source = source[1:]
}
return strings.Split(source, delim)
}
func sourceIDFromChunks(source string, chunks []string) (SourceID, error) {
if len(chunks[0]) == 0 {
return SourceID{}, &MissingRepo{source}
}
repoURL := chunks[0]
version, err := semv.Parse(string(chunks[1]))
if err != nil {
return SourceID{}, err
}
repoOffset := ""
if len(chunks) > 2 {
repoOffset = chunks[2]
}
return SourceID{
Location: SourceLocation{
Dir: repoOffset,
Repo: repoURL,
},
Version: version,
}, nil
}
// ParseSourceID parses an entire SourceID.
func ParseSourceID(s string) (SourceID, error) {
chunks := parseChunks(s)
return sourceIDFromChunks(s, chunks)
}
// MustParseSourceID wraps ParseSourceID and panics if it returns an error.
func MustParseSourceID(s string) SourceID {
sid, err := ParseSourceID(s)
if err != nil {
panic(err)
}
return sid
}
// NewSourceID attempts to create a new SourceID from strings representing the
// separate components.
func NewSourceID(repo, offset, version string) (SourceID, error) {
v, err := semv.Parse(version)
if err != nil {
return SourceID{}, err
}
return SourceID{
Location: SourceLocation{
Repo: repo, Dir: offset,
},
Version: v,
}, nil
}
// MustNewSourceID wraps NewSourceID and panics if it returns an error.
func MustNewSourceID(repo, offset, version string) SourceID {
sid, err := NewSourceID(repo, offset, version)
if err != nil {
panic(err)
}
return sid
}
|
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"runtime"
"strings"
"time"
)
type unsetColumn struct{}
// UnsetValue represents a value used in a query binding that will be ignored by Cassandra.
//
// By setting a field to the unset value Cassandra will ignore the write completely.
// The main advantage is the ability to keep the same prepared statement even when you don't
// want to update some fields, where before you needed to make another prepared statement.
//
// UnsetValue is only available when using the version 4 of the protocol.
var UnsetValue = unsetColumn{}
type namedValue struct {
name string
value interface{}
}
// NamedValue produce a value which will bind to the named parameter in a query
func NamedValue(name string, value interface{}) interface{} {
return &namedValue{
name: name,
value: value,
}
}
const (
protoDirectionMask = 0x80
protoVersionMask = 0x7F
protoVersion1 = 0x01
protoVersion2 = 0x02
protoVersion3 = 0x03
protoVersion4 = 0x04
protoVersion5 = 0x05
maxFrameSize = 256 * 1024 * 1024
)
type protoVersion byte
func (p protoVersion) request() bool {
return p&protoDirectionMask == 0x00
}
func (p protoVersion) response() bool {
return p&protoDirectionMask == 0x80
}
func (p protoVersion) version() byte {
return byte(p) & protoVersionMask
}
func (p protoVersion) String() string {
dir := "REQ"
if p.response() {
dir = "RESP"
}
return fmt.Sprintf("[version=%d direction=%s]", p.version(), dir)
}
type frameOp byte
const (
// header ops
opError frameOp = 0x00
opStartup frameOp = 0x01
opReady frameOp = 0x02
opAuthenticate frameOp = 0x03
opOptions frameOp = 0x05
opSupported frameOp = 0x06
opQuery frameOp = 0x07
opResult frameOp = 0x08
opPrepare frameOp = 0x09
opExecute frameOp = 0x0A
opRegister frameOp = 0x0B
opEvent frameOp = 0x0C
opBatch frameOp = 0x0D
opAuthChallenge frameOp = 0x0E
opAuthResponse frameOp = 0x0F
opAuthSuccess frameOp = 0x10
)
func (f frameOp) String() string {
switch f {
case opError:
return "ERROR"
case opStartup:
return "STARTUP"
case opReady:
return "READY"
case opAuthenticate:
return "AUTHENTICATE"
case opOptions:
return "OPTIONS"
case opSupported:
return "SUPPORTED"
case opQuery:
return "QUERY"
case opResult:
return "RESULT"
case opPrepare:
return "PREPARE"
case opExecute:
return "EXECUTE"
case opRegister:
return "REGISTER"
case opEvent:
return "EVENT"
case opBatch:
return "BATCH"
case opAuthChallenge:
return "AUTH_CHALLENGE"
case opAuthResponse:
return "AUTH_RESPONSE"
case opAuthSuccess:
return "AUTH_SUCCESS"
default:
return fmt.Sprintf("UNKNOWN_OP_%d", f)
}
}
const (
// result kind
resultKindVoid = 1
resultKindRows = 2
resultKindKeyspace = 3
resultKindPrepared = 4
resultKindSchemaChanged = 5
// rows flags
flagGlobalTableSpec int = 0x01
flagHasMorePages int = 0x02
flagNoMetaData int = 0x04
// query flags
flagValues byte = 0x01
flagSkipMetaData byte = 0x02
flagPageSize byte = 0x04
flagWithPagingState byte = 0x08
flagWithSerialConsistency byte = 0x10
flagDefaultTimestamp byte = 0x20
flagWithNameValues byte = 0x40
// header flags
flagCompress byte = 0x01
flagTracing byte = 0x02
flagCustomPayload byte = 0x04
flagWarning byte = 0x08
)
type Consistency uint16
const (
Any Consistency = 0x00
One Consistency = 0x01
Two Consistency = 0x02
Three Consistency = 0x03
Quorum Consistency = 0x04
All Consistency = 0x05
LocalQuorum Consistency = 0x06
EachQuorum Consistency = 0x07
LocalOne Consistency = 0x0A
)
func (c Consistency) String() string {
switch c {
case Any:
return "ANY"
case One:
return "ONE"
case Two:
return "TWO"
case Three:
return "THREE"
case Quorum:
return "QUORUM"
case All:
return "ALL"
case LocalQuorum:
return "LOCAL_QUORUM"
case EachQuorum:
return "EACH_QUORUM"
case LocalOne:
return "LOCAL_ONE"
default:
return fmt.Sprintf("UNKNOWN_CONS_0x%x", uint16(c))
}
}
func (c Consistency) MarshalText() (text []byte, err error) {
return []byte(c.String()), nil
}
func (c *Consistency) UnmarshalText(text []byte) error {
switch string(text) {
case "ANY":
*c = Any
case "ONE":
*c = One
case "TWO":
*c = Two
case "THREE":
*c = Three
case "QUORUM":
*c = Quorum
case "ALL":
*c = All
case "LOCAL_QUORUM":
*c = LocalQuorum
case "EACH_QUORUM":
*c = EachQuorum
case "LOCAL_ONE":
*c = LocalOne
default:
return fmt.Errorf("invalid consistency %q", string(text))
}
return nil
}
func ParseConsistency(s string) Consistency {
var c Consistency
if err := c.UnmarshalText([]byte(strings.ToUpper(s))); err != nil {
panic(err)
}
return c
}
// ParseConsistencyWrapper wraps gocql.ParseConsistency to provide an err
// return instead of a panic
func ParseConsistencyWrapper(s string) (consistency Consistency, err error) {
err = consistency.UnmarshalText([]byte(strings.ToUpper(s)))
return
}
// MustParseConsistency is the same as ParseConsistency except it returns
// an error (never). It is kept here since breaking changes are not good.
// DEPRECATED: use ParseConsistency if you want a panic on parse error.
func MustParseConsistency(s string) (Consistency, error) {
c, err := ParseConsistencyWrapper(s)
if err != nil {
panic(err)
}
return c, nil
}
type SerialConsistency uint16
const (
Serial SerialConsistency = 0x08
LocalSerial SerialConsistency = 0x09
)
func (s SerialConsistency) String() string {
switch s {
case Serial:
return "SERIAL"
case LocalSerial:
return "LOCAL_SERIAL"
default:
return fmt.Sprintf("UNKNOWN_SERIAL_CONS_0x%x", uint16(s))
}
}
func (s SerialConsistency) MarshalText() (text []byte, err error) {
return []byte(s.String()), nil
}
func (s *SerialConsistency) UnmarshalText(text []byte) error {
switch string(text) {
case "SERIAL":
*s = Serial
case "LOCAL_SERIAL":
*s = LocalSerial
default:
return fmt.Errorf("invalid consistency %q", string(text))
}
return nil
}
const (
apacheCassandraTypePrefix = "org.apache.cassandra.db.marshal."
)
var (
ErrFrameTooBig = errors.New("frame length is bigger than the maximum allowed")
)
const maxFrameHeaderSize = 9
func writeInt(p []byte, n int32) {
p[0] = byte(n >> 24)
p[1] = byte(n >> 16)
p[2] = byte(n >> 8)
p[3] = byte(n)
}
func readInt(p []byte) int32 {
return int32(p[0])<<24 | int32(p[1])<<16 | int32(p[2])<<8 | int32(p[3])
}
func writeShort(p []byte, n uint16) {
p[0] = byte(n >> 8)
p[1] = byte(n)
}
func readShort(p []byte) uint16 {
return uint16(p[0])<<8 | uint16(p[1])
}
type frameHeader struct {
version protoVersion
flags byte
stream int
op frameOp
length int
customPayload map[string][]byte
warnings []string
}
func (f frameHeader) String() string {
return fmt.Sprintf("[header version=%s flags=0x%x stream=%d op=%s length=%d]", f.version, f.flags, f.stream, f.op, f.length)
}
func (f frameHeader) Header() frameHeader {
return f
}
const defaultBufSize = 128
// a framer is responsible for reading, writing and parsing frames on a single stream
type framer struct {
r io.Reader
w io.Writer
proto byte
// flags are for outgoing flags, enabling compression and tracing etc
flags byte
compres Compressor
headSize int
// if this frame was read then the header will be here
header *frameHeader
// if tracing flag is set this is not nil
traceID []byte
// holds a ref to the whole byte slice for rbuf so that it can be reset to
// 0 after a read.
readBuffer []byte
rbuf []byte
wbuf []byte
}
func newFramer(r io.Reader, w io.Writer, compressor Compressor, version byte) *framer {
f := &framer{
wbuf: make([]byte, defaultBufSize),
readBuffer: make([]byte, defaultBufSize),
}
var flags byte
if compressor != nil {
flags |= flagCompress
}
version &= protoVersionMask
headSize := 8
if version > protoVersion2 {
headSize = 9
}
f.compres = compressor
f.proto = version
f.flags = flags
f.headSize = headSize
f.r = r
f.rbuf = f.readBuffer[:0]
f.w = w
f.wbuf = f.wbuf[:0]
f.header = nil
f.traceID = nil
return f
}
type frame interface {
Header() frameHeader
}
func readHeader(r io.Reader, p []byte) (head frameHeader, err error) {
_, err = io.ReadFull(r, p[:1])
if err != nil {
return frameHeader{}, err
}
version := p[0] & protoVersionMask
if version < protoVersion1 || version > protoVersion4 {
return frameHeader{}, fmt.Errorf("gocql: unsupported protocol response version: %d", version)
}
headSize := 9
if version < protoVersion3 {
headSize = 8
}
_, err = io.ReadFull(r, p[1:headSize])
if err != nil {
return frameHeader{}, err
}
p = p[:headSize]
head.version = protoVersion(p[0])
head.flags = p[1]
if version > protoVersion2 {
if len(p) != 9 {
return frameHeader{}, fmt.Errorf("not enough bytes to read header require 9 got: %d", len(p))
}
head.stream = int(int16(p[2])<<8 | int16(p[3]))
head.op = frameOp(p[4])
head.length = int(readInt(p[5:]))
} else {
if len(p) != 8 {
return frameHeader{}, fmt.Errorf("not enough bytes to read header require 8 got: %d", len(p))
}
head.stream = int(int8(p[2]))
head.op = frameOp(p[3])
head.length = int(readInt(p[4:]))
}
return head, nil
}
// explicitly enables tracing for the framers outgoing requests
func (f *framer) trace() {
f.flags |= flagTracing
}
// reads a frame form the wire into the framers buffer
func (f *framer) readFrame(head *frameHeader) error {
if head.length < 0 {
return fmt.Errorf("frame body length can not be less than 0: %d", head.length)
} else if head.length > maxFrameSize {
// need to free up the connection to be used again
_, err := io.CopyN(ioutil.Discard, f.r, int64(head.length))
if err != nil {
return fmt.Errorf("error whilst trying to discard frame with invalid length: %v", err)
}
return ErrFrameTooBig
}
if cap(f.readBuffer) >= head.length {
f.rbuf = f.readBuffer[:head.length]
} else {
f.readBuffer = make([]byte, head.length)
f.rbuf = f.readBuffer
}
// assume the underlying reader takes care of timeouts and retries
n, err := io.ReadFull(f.r, f.rbuf)
if err != nil {
return fmt.Errorf("unable to read frame body: read %d/%d bytes: %v", n, head.length, err)
}
if head.flags&flagCompress == flagCompress {
if f.compres == nil {
return NewErrProtocol("no compressor available with compressed frame body")
}
f.rbuf, err = f.compres.Decode(f.rbuf)
if err != nil {
return err
}
}
f.header = head
return nil
}
func (f *framer) parseFrame() (frame frame, err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
err = r.(error)
}
}()
if f.header.version.request() {
return nil, NewErrProtocol("got a request frame from server: %v", f.header.version)
}
if f.header.flags&flagTracing == flagTracing {
f.readTrace()
}
if f.header.flags&flagWarning == flagWarning {
f.header.warnings = f.readStringList()
}
if f.header.flags&flagCustomPayload == flagCustomPayload {
f.header.customPayload = f.readBytesMap()
}
// assumes that the frame body has been read into rbuf
switch f.header.op {
case opError:
frame = f.parseErrorFrame()
case opReady:
frame = f.parseReadyFrame()
case opResult:
frame, err = f.parseResultFrame()
case opSupported:
frame = f.parseSupportedFrame()
case opAuthenticate:
frame = f.parseAuthenticateFrame()
case opAuthChallenge:
frame = f.parseAuthChallengeFrame()
case opAuthSuccess:
frame = f.parseAuthSuccessFrame()
case opEvent:
frame = f.parseEventFrame()
default:
return nil, NewErrProtocol("unknown op in frame header: %s", f.header.op)
}
return
}
func (f *framer) parseErrorFrame() frame {
code := f.readInt()
msg := f.readString()
errD := errorFrame{
frameHeader: *f.header,
code: code,
message: msg,
}
switch code {
case errUnavailable:
cl := f.readConsistency()
required := f.readInt()
alive := f.readInt()
return &RequestErrUnavailable{
errorFrame: errD,
Consistency: cl,
Required: required,
Alive: alive,
}
case errWriteTimeout:
cl := f.readConsistency()
received := f.readInt()
blockfor := f.readInt()
writeType := f.readString()
return &RequestErrWriteTimeout{
errorFrame: errD,
Consistency: cl,
Received: received,
BlockFor: blockfor,
WriteType: writeType,
}
case errReadTimeout:
cl := f.readConsistency()
received := f.readInt()
blockfor := f.readInt()
dataPresent := f.readByte()
return &RequestErrReadTimeout{
errorFrame: errD,
Consistency: cl,
Received: received,
BlockFor: blockfor,
DataPresent: dataPresent,
}
case errAlreadyExists:
ks := f.readString()
table := f.readString()
return &RequestErrAlreadyExists{
errorFrame: errD,
Keyspace: ks,
Table: table,
}
case errUnprepared:
stmtId := f.readShortBytes()
return &RequestErrUnprepared{
errorFrame: errD,
StatementId: copyBytes(stmtId), // defensively copy
}
case errReadFailure:
res := &RequestErrReadFailure{
errorFrame: errD,
}
res.Consistency = f.readConsistency()
res.Received = f.readInt()
res.BlockFor = f.readInt()
res.DataPresent = f.readByte() != 0
return res
case errWriteFailure:
res := &RequestErrWriteFailure{
errorFrame: errD,
}
res.Consistency = f.readConsistency()
res.Received = f.readInt()
res.BlockFor = f.readInt()
res.NumFailures = f.readInt()
res.WriteType = f.readString()
return res
case errFunctionFailure:
res := RequestErrFunctionFailure{
errorFrame: errD,
}
res.Keyspace = f.readString()
res.Function = f.readString()
res.ArgTypes = f.readStringList()
return res
case errInvalid, errBootstrapping, errConfig, errCredentials, errOverloaded,
errProtocol, errServer, errSyntax, errTruncate, errUnauthorized:
// TODO(zariel): we should have some distinct types for these errors
return errD
default:
panic(fmt.Errorf("unknown error code: 0x%x", errD.code))
}
}
func (f *framer) writeHeader(flags byte, op frameOp, stream int) {
f.wbuf = f.wbuf[:0]
f.wbuf = append(f.wbuf,
f.proto,
flags,
)
if f.proto > protoVersion2 {
f.wbuf = append(f.wbuf,
byte(stream>>8),
byte(stream),
)
} else {
f.wbuf = append(f.wbuf,
byte(stream),
)
}
// pad out length
f.wbuf = append(f.wbuf,
byte(op),
0,
0,
0,
0,
)
}
func (f *framer) setLength(length int) {
p := 4
if f.proto > protoVersion2 {
p = 5
}
f.wbuf[p+0] = byte(length >> 24)
f.wbuf[p+1] = byte(length >> 16)
f.wbuf[p+2] = byte(length >> 8)
f.wbuf[p+3] = byte(length)
}
func (f *framer) finishWrite() error {
if len(f.wbuf) > maxFrameSize {
// huge app frame, lets remove it so it doesn't bloat the heap
f.wbuf = make([]byte, defaultBufSize)
return ErrFrameTooBig
}
if f.wbuf[1]&flagCompress == flagCompress {
if f.compres == nil {
panic("compress flag set with no compressor")
}
// TODO: only compress frames which are big enough
compressed, err := f.compres.Encode(f.wbuf[f.headSize:])
if err != nil {
return err
}
f.wbuf = append(f.wbuf[:f.headSize], compressed...)
}
length := len(f.wbuf) - f.headSize
f.setLength(length)
_, err := f.w.Write(f.wbuf)
if err != nil {
return err
}
return nil
}
func (f *framer) readTrace() {
f.traceID = f.readUUID().Bytes()
}
type readyFrame struct {
frameHeader
}
func (f *framer) parseReadyFrame() frame {
return &readyFrame{
frameHeader: *f.header,
}
}
type supportedFrame struct {
frameHeader
supported map[string][]string
}
// TODO: if we move the body buffer onto the frameHeader then we only need a single
// framer, and can move the methods onto the header.
func (f *framer) parseSupportedFrame() frame {
return &supportedFrame{
frameHeader: *f.header,
supported: f.readStringMultiMap(),
}
}
type writeStartupFrame struct {
opts map[string]string
}
func (w writeStartupFrame) String() string {
return fmt.Sprintf("[startup opts=%+v]", w.opts)
}
func (w *writeStartupFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeStartupFrame(streamID, w.opts)
}
func (f *framer) writeStartupFrame(streamID int, options map[string]string) error {
f.writeHeader(f.flags&^flagCompress, opStartup, streamID)
f.writeStringMap(options)
return f.finishWrite()
}
type writePrepareFrame struct {
statement string
}
func (w *writePrepareFrame) writeFrame(framer *framer, streamID int) error {
return framer.writePrepareFrame(streamID, w.statement)
}
func (f *framer) writePrepareFrame(stream int, statement string) error {
f.writeHeader(f.flags, opPrepare, stream)
f.writeLongString(statement)
return f.finishWrite()
}
func (f *framer) readTypeInfo() TypeInfo {
// TODO: factor this out so the same code paths can be used to parse custom
// types and other types, as much of the logic will be duplicated.
id := f.readShort()
simple := NativeType{
proto: f.proto,
typ: Type(id),
}
if simple.typ == TypeCustom {
simple.custom = f.readString()
if cassType := getApacheCassandraType(simple.custom); cassType != TypeCustom {
simple.typ = cassType
}
}
switch simple.typ {
case TypeTuple:
n := f.readShort()
tuple := TupleTypeInfo{
NativeType: simple,
Elems: make([]TypeInfo, n),
}
for i := 0; i < int(n); i++ {
tuple.Elems[i] = f.readTypeInfo()
}
return tuple
case TypeUDT:
udt := UDTTypeInfo{
NativeType: simple,
}
udt.KeySpace = f.readString()
udt.Name = f.readString()
n := f.readShort()
udt.Elements = make([]UDTField, n)
for i := 0; i < int(n); i++ {
field := &udt.Elements[i]
field.Name = f.readString()
field.Type = f.readTypeInfo()
}
return udt
case TypeMap, TypeList, TypeSet:
collection := CollectionType{
NativeType: simple,
}
if simple.typ == TypeMap {
collection.Key = f.readTypeInfo()
}
collection.Elem = f.readTypeInfo()
return collection
}
return simple
}
type preparedMetadata struct {
resultMetadata
// proto v4+
pkeyColumns []int
}
func (r preparedMetadata) String() string {
return fmt.Sprintf("[prepared flags=0x%x pkey=%v paging_state=% X columns=%v col_count=%d actual_col_count=%d]", r.flags, r.pkeyColumns, r.pagingState, r.columns, r.colCount, r.actualColCount)
}
func (f *framer) parsePreparedMetadata() preparedMetadata {
// TODO: deduplicate this from parseMetadata
meta := preparedMetadata{}
meta.flags = f.readInt()
meta.colCount = f.readInt()
if meta.colCount < 0 {
panic(fmt.Errorf("received negative column count: %d", meta.colCount))
}
meta.actualColCount = meta.colCount
if f.proto >= protoVersion4 {
pkeyCount := f.readInt()
pkeys := make([]int, pkeyCount)
for i := 0; i < pkeyCount; i++ {
pkeys[i] = int(f.readShort())
}
meta.pkeyColumns = pkeys
}
if meta.flags&flagHasMorePages == flagHasMorePages {
meta.pagingState = copyBytes(f.readBytes())
}
if meta.flags&flagNoMetaData == flagNoMetaData {
return meta
}
var keyspace, table string
globalSpec := meta.flags&flagGlobalTableSpec == flagGlobalTableSpec
if globalSpec {
keyspace = f.readString()
table = f.readString()
}
var cols []ColumnInfo
if meta.colCount < 1000 {
// preallocate columninfo to avoid excess copying
cols = make([]ColumnInfo, meta.colCount)
for i := 0; i < meta.colCount; i++ {
f.readCol(&cols[i], &meta.resultMetadata, globalSpec, keyspace, table)
}
} else {
// use append, huge number of columns usually indicates a corrupt frame or
// just a huge row.
for i := 0; i < meta.colCount; i++ {
var col ColumnInfo
f.readCol(&col, &meta.resultMetadata, globalSpec, keyspace, table)
cols = append(cols, col)
}
}
meta.columns = cols
return meta
}
type resultMetadata struct {
flags int
// only if flagPageState
pagingState []byte
columns []ColumnInfo
colCount int
// this is a count of the total number of columns which can be scanned,
// it is at minimum len(columns) but may be larger, for instance when a column
// is a UDT or tuple.
actualColCount int
}
func (r resultMetadata) String() string {
return fmt.Sprintf("[metadata flags=0x%x paging_state=% X columns=%v]", r.flags, r.pagingState, r.columns)
}
func (f *framer) readCol(col *ColumnInfo, meta *resultMetadata, globalSpec bool, keyspace, table string) {
if !globalSpec {
col.Keyspace = f.readString()
col.Table = f.readString()
} else {
col.Keyspace = keyspace
col.Table = table
}
col.Name = f.readString()
col.TypeInfo = f.readTypeInfo()
switch v := col.TypeInfo.(type) {
// maybe also UDT
case TupleTypeInfo:
// -1 because we already included the tuple column
meta.actualColCount += len(v.Elems) - 1
}
}
func (f *framer) parseResultMetadata() resultMetadata {
var meta resultMetadata
meta.flags = f.readInt()
meta.colCount = f.readInt()
if meta.colCount < 0 {
panic(fmt.Errorf("received negative column count: %d", meta.colCount))
}
meta.actualColCount = meta.colCount
if meta.flags&flagHasMorePages == flagHasMorePages {
meta.pagingState = copyBytes(f.readBytes())
}
if meta.flags&flagNoMetaData == flagNoMetaData {
return meta
}
var keyspace, table string
globalSpec := meta.flags&flagGlobalTableSpec == flagGlobalTableSpec
if globalSpec {
keyspace = f.readString()
table = f.readString()
}
var cols []ColumnInfo
if meta.colCount < 1000 {
// preallocate columninfo to avoid excess copying
cols = make([]ColumnInfo, meta.colCount)
for i := 0; i < meta.colCount; i++ {
f.readCol(&cols[i], &meta, globalSpec, keyspace, table)
}
} else {
// use append, huge number of columns usually indicates a corrupt frame or
// just a huge row.
for i := 0; i < meta.colCount; i++ {
var col ColumnInfo
f.readCol(&col, &meta, globalSpec, keyspace, table)
cols = append(cols, col)
}
}
meta.columns = cols
return meta
}
type resultVoidFrame struct {
frameHeader
}
func (f *resultVoidFrame) String() string {
return "[result_void]"
}
func (f *framer) parseResultFrame() (frame, error) {
kind := f.readInt()
switch kind {
case resultKindVoid:
return &resultVoidFrame{frameHeader: *f.header}, nil
case resultKindRows:
return f.parseResultRows(), nil
case resultKindKeyspace:
return f.parseResultSetKeyspace(), nil
case resultKindPrepared:
return f.parseResultPrepared(), nil
case resultKindSchemaChanged:
return f.parseResultSchemaChange(), nil
}
return nil, NewErrProtocol("unknown result kind: %x", kind)
}
type resultRowsFrame struct {
frameHeader
meta resultMetadata
// dont parse the rows here as we only need to do it once
numRows int
}
func (f *resultRowsFrame) String() string {
return fmt.Sprintf("[result_rows meta=%v]", f.meta)
}
func (f *framer) parseResultRows() frame {
result := &resultRowsFrame{}
result.meta = f.parseResultMetadata()
result.numRows = f.readInt()
if result.numRows < 0 {
panic(fmt.Errorf("invalid row_count in result frame: %d", result.numRows))
}
return result
}
type resultKeyspaceFrame struct {
frameHeader
keyspace string
}
func (r *resultKeyspaceFrame) String() string {
return fmt.Sprintf("[result_keyspace keyspace=%s]", r.keyspace)
}
func (f *framer) parseResultSetKeyspace() frame {
return &resultKeyspaceFrame{
frameHeader: *f.header,
keyspace: f.readString(),
}
}
type resultPreparedFrame struct {
frameHeader
preparedID []byte
reqMeta preparedMetadata
respMeta resultMetadata
}
func (f *framer) parseResultPrepared() frame {
frame := &resultPreparedFrame{
frameHeader: *f.header,
preparedID: f.readShortBytes(),
reqMeta: f.parsePreparedMetadata(),
}
if f.proto < protoVersion2 {
return frame
}
frame.respMeta = f.parseResultMetadata()
return frame
}
type schemaChangeKeyspace struct {
frameHeader
change string
keyspace string
}
func (f schemaChangeKeyspace) String() string {
return fmt.Sprintf("[event schema_change_keyspace change=%q keyspace=%q]", f.change, f.keyspace)
}
type schemaChangeTable struct {
frameHeader
change string
keyspace string
object string
}
func (f schemaChangeTable) String() string {
return fmt.Sprintf("[event schema_change change=%q keyspace=%q object=%q]", f.change, f.keyspace, f.object)
}
type schemaChangeType struct {
frameHeader
change string
keyspace string
object string
}
type schemaChangeFunction struct {
frameHeader
change string
keyspace string
name string
args []string
}
type schemaChangeAggregate struct {
frameHeader
change string
keyspace string
name string
args []string
}
func (f *framer) parseResultSchemaChange() frame {
if f.proto <= protoVersion2 {
change := f.readString()
keyspace := f.readString()
table := f.readString()
if table != "" {
return &schemaChangeTable{
frameHeader: *f.header,
change: change,
keyspace: keyspace,
object: table,
}
} else {
return &schemaChangeKeyspace{
frameHeader: *f.header,
change: change,
keyspace: keyspace,
}
}
} else {
change := f.readString()
target := f.readString()
// TODO: could just use a separate type for each target
switch target {
case "KEYSPACE":
frame := &schemaChangeKeyspace{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
return frame
case "TABLE":
frame := &schemaChangeTable{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
frame.object = f.readString()
return frame
case "TYPE":
frame := &schemaChangeType{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
frame.object = f.readString()
return frame
case "FUNCTION":
frame := &schemaChangeFunction{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
frame.name = f.readString()
frame.args = f.readStringList()
return frame
case "AGGREGATE":
frame := &schemaChangeAggregate{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
frame.name = f.readString()
frame.args = f.readStringList()
return frame
default:
panic(fmt.Errorf("gocql: unknown SCHEMA_CHANGE target: %q change: %q", target, change))
}
}
}
type authenticateFrame struct {
frameHeader
class string
}
func (a *authenticateFrame) String() string {
return fmt.Sprintf("[authenticate class=%q]", a.class)
}
func (f *framer) parseAuthenticateFrame() frame {
return &authenticateFrame{
frameHeader: *f.header,
class: f.readString(),
}
}
type authSuccessFrame struct {
frameHeader
data []byte
}
func (a *authSuccessFrame) String() string {
return fmt.Sprintf("[auth_success data=%q]", a.data)
}
func (f *framer) parseAuthSuccessFrame() frame {
return &authSuccessFrame{
frameHeader: *f.header,
data: f.readBytes(),
}
}
type authChallengeFrame struct {
frameHeader
data []byte
}
func (a *authChallengeFrame) String() string {
return fmt.Sprintf("[auth_challenge data=%q]", a.data)
}
func (f *framer) parseAuthChallengeFrame() frame {
return &authChallengeFrame{
frameHeader: *f.header,
data: f.readBytes(),
}
}
type statusChangeEventFrame struct {
frameHeader
change string
host net.IP
port int
}
func (t statusChangeEventFrame) String() string {
return fmt.Sprintf("[status_change change=%s host=%v port=%v]", t.change, t.host, t.port)
}
// essentially the same as statusChange
type topologyChangeEventFrame struct {
frameHeader
change string
host net.IP
port int
}
func (t topologyChangeEventFrame) String() string {
return fmt.Sprintf("[topology_change change=%s host=%v port=%v]", t.change, t.host, t.port)
}
func (f *framer) parseEventFrame() frame {
eventType := f.readString()
switch eventType {
case "TOPOLOGY_CHANGE":
frame := &topologyChangeEventFrame{frameHeader: *f.header}
frame.change = f.readString()
frame.host, frame.port = f.readInet()
return frame
case "STATUS_CHANGE":
frame := &statusChangeEventFrame{frameHeader: *f.header}
frame.change = f.readString()
frame.host, frame.port = f.readInet()
return frame
case "SCHEMA_CHANGE":
// this should work for all versions
return f.parseResultSchemaChange()
default:
panic(fmt.Errorf("gocql: unknown event type: %q", eventType))
}
}
type writeAuthResponseFrame struct {
data []byte
}
func (a *writeAuthResponseFrame) String() string {
return fmt.Sprintf("[auth_response data=%q]", a.data)
}
func (a *writeAuthResponseFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeAuthResponseFrame(streamID, a.data)
}
func (f *framer) writeAuthResponseFrame(streamID int, data []byte) error {
f.writeHeader(f.flags, opAuthResponse, streamID)
f.writeBytes(data)
return f.finishWrite()
}
type queryValues struct {
value []byte
// optional name, will set With names for values flag
name string
isUnset bool
}
type queryParams struct {
consistency Consistency
// v2+
skipMeta bool
values []queryValues
pageSize int
pagingState []byte
serialConsistency SerialConsistency
// v3+
defaultTimestamp bool
defaultTimestampValue int64
}
func (q queryParams) String() string {
return fmt.Sprintf("[query_params consistency=%v skip_meta=%v page_size=%d paging_state=%q serial_consistency=%v default_timestamp=%v values=%v]",
q.consistency, q.skipMeta, q.pageSize, q.pagingState, q.serialConsistency, q.defaultTimestamp, q.values)
}
func (f *framer) writeQueryParams(opts *queryParams) {
f.writeConsistency(opts.consistency)
if f.proto == protoVersion1 {
return
}
var flags byte
if len(opts.values) > 0 {
flags |= flagValues
}
if opts.skipMeta {
flags |= flagSkipMetaData
}
if opts.pageSize > 0 {
flags |= flagPageSize
}
if len(opts.pagingState) > 0 {
flags |= flagWithPagingState
}
if opts.serialConsistency > 0 {
flags |= flagWithSerialConsistency
}
names := false
// protoV3 specific things
if f.proto > protoVersion2 {
if opts.defaultTimestamp {
flags |= flagDefaultTimestamp
}
if len(opts.values) > 0 && opts.values[0].name != "" {
flags |= flagWithNameValues
names = true
}
}
f.writeByte(flags)
if n := len(opts.values); n > 0 {
f.writeShort(uint16(n))
for i := 0; i < n; i++ {
if names {
f.writeString(opts.values[i].name)
}
if opts.values[i].isUnset {
f.writeUnset()
} else {
f.writeBytes(opts.values[i].value)
}
}
}
if opts.pageSize > 0 {
f.writeInt(int32(opts.pageSize))
}
if len(opts.pagingState) > 0 {
f.writeBytes(opts.pagingState)
}
if opts.serialConsistency > 0 {
f.writeConsistency(Consistency(opts.serialConsistency))
}
if f.proto > protoVersion2 && opts.defaultTimestamp {
// timestamp in microseconds
var ts int64
if opts.defaultTimestampValue != 0 {
ts = opts.defaultTimestampValue
} else {
ts = time.Now().UnixNano() / 1000
}
f.writeLong(ts)
}
}
type writeQueryFrame struct {
statement string
params queryParams
}
func (w *writeQueryFrame) String() string {
return fmt.Sprintf("[query statement=%q params=%v]", w.statement, w.params)
}
func (w *writeQueryFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeQueryFrame(streamID, w.statement, &w.params)
}
func (f *framer) writeQueryFrame(streamID int, statement string, params *queryParams) error {
f.writeHeader(f.flags, opQuery, streamID)
f.writeLongString(statement)
f.writeQueryParams(params)
return f.finishWrite()
}
type frameWriter interface {
writeFrame(framer *framer, streamID int) error
}
type frameWriterFunc func(framer *framer, streamID int) error
func (f frameWriterFunc) writeFrame(framer *framer, streamID int) error {
return f(framer, streamID)
}
type writeExecuteFrame struct {
preparedID []byte
params queryParams
}
func (e *writeExecuteFrame) String() string {
return fmt.Sprintf("[execute id=% X params=%v]", e.preparedID, &e.params)
}
func (e *writeExecuteFrame) writeFrame(fr *framer, streamID int) error {
return fr.writeExecuteFrame(streamID, e.preparedID, &e.params)
}
func (f *framer) writeExecuteFrame(streamID int, preparedID []byte, params *queryParams) error {
f.writeHeader(f.flags, opExecute, streamID)
f.writeShortBytes(preparedID)
if f.proto > protoVersion1 {
f.writeQueryParams(params)
} else {
n := len(params.values)
f.writeShort(uint16(n))
for i := 0; i < n; i++ {
if params.values[i].isUnset {
f.writeUnset()
} else {
f.writeBytes(params.values[i].value)
}
}
f.writeConsistency(params.consistency)
}
return f.finishWrite()
}
// TODO: can we replace BatchStatemt with batchStatement? As they prety much
// duplicate each other
type batchStatment struct {
preparedID []byte
statement string
values []queryValues
}
type writeBatchFrame struct {
typ BatchType
statements []batchStatment
consistency Consistency
// v3+
serialConsistency SerialConsistency
defaultTimestamp bool
defaultTimestampValue int64
}
func (w *writeBatchFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeBatchFrame(streamID, w)
}
func (f *framer) writeBatchFrame(streamID int, w *writeBatchFrame) error {
f.writeHeader(f.flags, opBatch, streamID)
f.writeByte(byte(w.typ))
n := len(w.statements)
f.writeShort(uint16(n))
var flags byte
for i := 0; i < n; i++ {
b := &w.statements[i]
if len(b.preparedID) == 0 {
f.writeByte(0)
f.writeLongString(b.statement)
} else {
f.writeByte(1)
f.writeShortBytes(b.preparedID)
}
f.writeShort(uint16(len(b.values)))
for j := range b.values {
col := b.values[j]
if f.proto > protoVersion2 && col.name != "" {
// TODO: move this check into the caller and set a flag on writeBatchFrame
// to indicate using named values
if f.proto <= protoVersion5 {
return fmt.Errorf("gocql: named query values are not supported in batches, please see https://issues.apache.org/jira/browse/CASSANDRA-10246")
}
flags |= flagWithNameValues
f.writeString(col.name)
}
if col.isUnset {
f.writeUnset()
} else {
f.writeBytes(col.value)
}
}
}
f.writeConsistency(w.consistency)
if f.proto > protoVersion2 {
if w.serialConsistency > 0 {
flags |= flagWithSerialConsistency
}
if w.defaultTimestamp {
flags |= flagDefaultTimestamp
}
f.writeByte(flags)
if w.serialConsistency > 0 {
f.writeConsistency(Consistency(w.serialConsistency))
}
if w.defaultTimestamp {
var ts int64
if w.defaultTimestampValue != 0 {
ts = w.defaultTimestampValue
} else {
ts = time.Now().UnixNano() / 1000
}
f.writeLong(ts)
}
}
return f.finishWrite()
}
type writeOptionsFrame struct{}
func (w *writeOptionsFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeOptionsFrame(streamID, w)
}
func (f *framer) writeOptionsFrame(stream int, _ *writeOptionsFrame) error {
f.writeHeader(f.flags, opOptions, stream)
return f.finishWrite()
}
type writeRegisterFrame struct {
events []string
}
func (w *writeRegisterFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeRegisterFrame(streamID, w)
}
func (f *framer) writeRegisterFrame(streamID int, w *writeRegisterFrame) error {
f.writeHeader(f.flags, opRegister, streamID)
f.writeStringList(w.events)
return f.finishWrite()
}
func (f *framer) readByte() byte {
if len(f.rbuf) < 1 {
panic(fmt.Errorf("not enough bytes in buffer to read byte require 1 got: %d", len(f.rbuf)))
}
b := f.rbuf[0]
f.rbuf = f.rbuf[1:]
return b
}
func (f *framer) readInt() (n int) {
if len(f.rbuf) < 4 {
panic(fmt.Errorf("not enough bytes in buffer to read int require 4 got: %d", len(f.rbuf)))
}
n = int(int32(f.rbuf[0])<<24 | int32(f.rbuf[1])<<16 | int32(f.rbuf[2])<<8 | int32(f.rbuf[3]))
f.rbuf = f.rbuf[4:]
return
}
func (f *framer) readShort() (n uint16) {
if len(f.rbuf) < 2 {
panic(fmt.Errorf("not enough bytes in buffer to read short require 2 got: %d", len(f.rbuf)))
}
n = uint16(f.rbuf[0])<<8 | uint16(f.rbuf[1])
f.rbuf = f.rbuf[2:]
return
}
func (f *framer) readLong() (n int64) {
if len(f.rbuf) < 8 {
panic(fmt.Errorf("not enough bytes in buffer to read long require 8 got: %d", len(f.rbuf)))
}
n = int64(f.rbuf[0])<<56 | int64(f.rbuf[1])<<48 | int64(f.rbuf[2])<<40 | int64(f.rbuf[3])<<32 |
int64(f.rbuf[4])<<24 | int64(f.rbuf[5])<<16 | int64(f.rbuf[6])<<8 | int64(f.rbuf[7])
f.rbuf = f.rbuf[8:]
return
}
func (f *framer) readString() (s string) {
size := f.readShort()
if len(f.rbuf) < int(size) {
panic(fmt.Errorf("not enough bytes in buffer to read string require %d got: %d", size, len(f.rbuf)))
}
s = string(f.rbuf[:size])
f.rbuf = f.rbuf[size:]
return
}
func (f *framer) readLongString() (s string) {
size := f.readInt()
if len(f.rbuf) < size {
panic(fmt.Errorf("not enough bytes in buffer to read long string require %d got: %d", size, len(f.rbuf)))
}
s = string(f.rbuf[:size])
f.rbuf = f.rbuf[size:]
return
}
func (f *framer) readUUID() *UUID {
if len(f.rbuf) < 16 {
panic(fmt.Errorf("not enough bytes in buffer to read uuid require %d got: %d", 16, len(f.rbuf)))
}
// TODO: how to handle this error, if it is a uuid, then sureley, problems?
u, _ := UUIDFromBytes(f.rbuf[:16])
f.rbuf = f.rbuf[16:]
return &u
}
func (f *framer) readStringList() []string {
size := f.readShort()
l := make([]string, size)
for i := 0; i < int(size); i++ {
l[i] = f.readString()
}
return l
}
func (f *framer) readBytesInternal() ([]byte, error) {
size := f.readInt()
if size < 0 {
return nil, nil
}
if len(f.rbuf) < size {
return nil, fmt.Errorf("not enough bytes in buffer to read bytes require %d got: %d", size, len(f.rbuf))
}
l := f.rbuf[:size]
f.rbuf = f.rbuf[size:]
return l, nil
}
func (f *framer) readBytes() []byte {
l, err := f.readBytesInternal()
if err != nil {
panic(err)
}
return l
}
func (f *framer) readShortBytes() []byte {
size := f.readShort()
if len(f.rbuf) < int(size) {
panic(fmt.Errorf("not enough bytes in buffer to read short bytes: require %d got %d", size, len(f.rbuf)))
}
l := f.rbuf[:size]
f.rbuf = f.rbuf[size:]
return l
}
func (f *framer) readInet() (net.IP, int) {
if len(f.rbuf) < 1 {
panic(fmt.Errorf("not enough bytes in buffer to read inet size require %d got: %d", 1, len(f.rbuf)))
}
size := f.rbuf[0]
f.rbuf = f.rbuf[1:]
if !(size == 4 || size == 16) {
panic(fmt.Errorf("invalid IP size: %d", size))
}
if len(f.rbuf) < 1 {
panic(fmt.Errorf("not enough bytes in buffer to read inet require %d got: %d", size, len(f.rbuf)))
}
ip := make([]byte, size)
copy(ip, f.rbuf[:size])
f.rbuf = f.rbuf[size:]
port := f.readInt()
return net.IP(ip), port
}
func (f *framer) readConsistency() Consistency {
return Consistency(f.readShort())
}
func (f *framer) readStringMap() map[string]string {
size := f.readShort()
m := make(map[string]string, size)
for i := 0; i < int(size); i++ {
k := f.readString()
v := f.readString()
m[k] = v
}
return m
}
func (f *framer) readBytesMap() map[string][]byte {
size := f.readShort()
m := make(map[string][]byte, size)
for i := 0; i < int(size); i++ {
k := f.readString()
v := f.readBytes()
m[k] = v
}
return m
}
func (f *framer) readStringMultiMap() map[string][]string {
size := f.readShort()
m := make(map[string][]string, size)
for i := 0; i < int(size); i++ {
k := f.readString()
v := f.readStringList()
m[k] = v
}
return m
}
func (f *framer) writeByte(b byte) {
f.wbuf = append(f.wbuf, b)
}
func appendBytes(p []byte, d []byte) []byte {
if d == nil {
return appendInt(p, -1)
}
p = appendInt(p, int32(len(d)))
p = append(p, d...)
return p
}
func appendShort(p []byte, n uint16) []byte {
return append(p,
byte(n>>8),
byte(n),
)
}
func appendInt(p []byte, n int32) []byte {
return append(p, byte(n>>24),
byte(n>>16),
byte(n>>8),
byte(n))
}
func appendLong(p []byte, n int64) []byte {
return append(p,
byte(n>>56),
byte(n>>48),
byte(n>>40),
byte(n>>32),
byte(n>>24),
byte(n>>16),
byte(n>>8),
byte(n),
)
}
// these are protocol level binary types
func (f *framer) writeInt(n int32) {
f.wbuf = appendInt(f.wbuf, n)
}
func (f *framer) writeShort(n uint16) {
f.wbuf = appendShort(f.wbuf, n)
}
func (f *framer) writeLong(n int64) {
f.wbuf = appendLong(f.wbuf, n)
}
func (f *framer) writeString(s string) {
f.writeShort(uint16(len(s)))
f.wbuf = append(f.wbuf, s...)
}
func (f *framer) writeLongString(s string) {
f.writeInt(int32(len(s)))
f.wbuf = append(f.wbuf, s...)
}
func (f *framer) writeUUID(u *UUID) {
f.wbuf = append(f.wbuf, u[:]...)
}
func (f *framer) writeStringList(l []string) {
f.writeShort(uint16(len(l)))
for _, s := range l {
f.writeString(s)
}
}
func (f *framer) writeUnset() {
// Protocol version 4 specifies that bind variables do not require having a
// value when executing a statement. Bind variables without a value are
// called 'unset'. The 'unset' bind variable is serialized as the int
// value '-2' without following bytes.
f.writeInt(-2)
}
func (f *framer) writeBytes(p []byte) {
// TODO: handle null case correctly,
// [bytes] A [int] n, followed by n bytes if n >= 0. If n < 0,
// no byte should follow and the value represented is `null`.
if p == nil {
f.writeInt(-1)
} else {
f.writeInt(int32(len(p)))
f.wbuf = append(f.wbuf, p...)
}
}
func (f *framer) writeShortBytes(p []byte) {
f.writeShort(uint16(len(p)))
f.wbuf = append(f.wbuf, p...)
}
func (f *framer) writeInet(ip net.IP, port int) {
f.wbuf = append(f.wbuf,
byte(len(ip)),
)
f.wbuf = append(f.wbuf,
[]byte(ip)...,
)
f.writeInt(int32(port))
}
func (f *framer) writeConsistency(cons Consistency) {
f.writeShort(uint16(cons))
}
func (f *framer) writeStringMap(m map[string]string) {
f.writeShort(uint16(len(m)))
for k, v := range m {
f.writeString(k)
f.writeString(v)
}
}
framer: remove nested calls in write methods
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"runtime"
"strings"
"time"
)
type unsetColumn struct{}
// UnsetValue represents a value used in a query binding that will be ignored by Cassandra.
//
// By setting a field to the unset value Cassandra will ignore the write completely.
// The main advantage is the ability to keep the same prepared statement even when you don't
// want to update some fields, where before you needed to make another prepared statement.
//
// UnsetValue is only available when using the version 4 of the protocol.
var UnsetValue = unsetColumn{}
type namedValue struct {
name string
value interface{}
}
// NamedValue produce a value which will bind to the named parameter in a query
func NamedValue(name string, value interface{}) interface{} {
return &namedValue{
name: name,
value: value,
}
}
const (
protoDirectionMask = 0x80
protoVersionMask = 0x7F
protoVersion1 = 0x01
protoVersion2 = 0x02
protoVersion3 = 0x03
protoVersion4 = 0x04
protoVersion5 = 0x05
maxFrameSize = 256 * 1024 * 1024
)
type protoVersion byte
func (p protoVersion) request() bool {
return p&protoDirectionMask == 0x00
}
func (p protoVersion) response() bool {
return p&protoDirectionMask == 0x80
}
func (p protoVersion) version() byte {
return byte(p) & protoVersionMask
}
func (p protoVersion) String() string {
dir := "REQ"
if p.response() {
dir = "RESP"
}
return fmt.Sprintf("[version=%d direction=%s]", p.version(), dir)
}
type frameOp byte
const (
// header ops
opError frameOp = 0x00
opStartup frameOp = 0x01
opReady frameOp = 0x02
opAuthenticate frameOp = 0x03
opOptions frameOp = 0x05
opSupported frameOp = 0x06
opQuery frameOp = 0x07
opResult frameOp = 0x08
opPrepare frameOp = 0x09
opExecute frameOp = 0x0A
opRegister frameOp = 0x0B
opEvent frameOp = 0x0C
opBatch frameOp = 0x0D
opAuthChallenge frameOp = 0x0E
opAuthResponse frameOp = 0x0F
opAuthSuccess frameOp = 0x10
)
func (f frameOp) String() string {
switch f {
case opError:
return "ERROR"
case opStartup:
return "STARTUP"
case opReady:
return "READY"
case opAuthenticate:
return "AUTHENTICATE"
case opOptions:
return "OPTIONS"
case opSupported:
return "SUPPORTED"
case opQuery:
return "QUERY"
case opResult:
return "RESULT"
case opPrepare:
return "PREPARE"
case opExecute:
return "EXECUTE"
case opRegister:
return "REGISTER"
case opEvent:
return "EVENT"
case opBatch:
return "BATCH"
case opAuthChallenge:
return "AUTH_CHALLENGE"
case opAuthResponse:
return "AUTH_RESPONSE"
case opAuthSuccess:
return "AUTH_SUCCESS"
default:
return fmt.Sprintf("UNKNOWN_OP_%d", f)
}
}
const (
// result kind
resultKindVoid = 1
resultKindRows = 2
resultKindKeyspace = 3
resultKindPrepared = 4
resultKindSchemaChanged = 5
// rows flags
flagGlobalTableSpec int = 0x01
flagHasMorePages int = 0x02
flagNoMetaData int = 0x04
// query flags
flagValues byte = 0x01
flagSkipMetaData byte = 0x02
flagPageSize byte = 0x04
flagWithPagingState byte = 0x08
flagWithSerialConsistency byte = 0x10
flagDefaultTimestamp byte = 0x20
flagWithNameValues byte = 0x40
// header flags
flagCompress byte = 0x01
flagTracing byte = 0x02
flagCustomPayload byte = 0x04
flagWarning byte = 0x08
)
type Consistency uint16
const (
Any Consistency = 0x00
One Consistency = 0x01
Two Consistency = 0x02
Three Consistency = 0x03
Quorum Consistency = 0x04
All Consistency = 0x05
LocalQuorum Consistency = 0x06
EachQuorum Consistency = 0x07
LocalOne Consistency = 0x0A
)
func (c Consistency) String() string {
switch c {
case Any:
return "ANY"
case One:
return "ONE"
case Two:
return "TWO"
case Three:
return "THREE"
case Quorum:
return "QUORUM"
case All:
return "ALL"
case LocalQuorum:
return "LOCAL_QUORUM"
case EachQuorum:
return "EACH_QUORUM"
case LocalOne:
return "LOCAL_ONE"
default:
return fmt.Sprintf("UNKNOWN_CONS_0x%x", uint16(c))
}
}
func (c Consistency) MarshalText() (text []byte, err error) {
return []byte(c.String()), nil
}
func (c *Consistency) UnmarshalText(text []byte) error {
switch string(text) {
case "ANY":
*c = Any
case "ONE":
*c = One
case "TWO":
*c = Two
case "THREE":
*c = Three
case "QUORUM":
*c = Quorum
case "ALL":
*c = All
case "LOCAL_QUORUM":
*c = LocalQuorum
case "EACH_QUORUM":
*c = EachQuorum
case "LOCAL_ONE":
*c = LocalOne
default:
return fmt.Errorf("invalid consistency %q", string(text))
}
return nil
}
func ParseConsistency(s string) Consistency {
var c Consistency
if err := c.UnmarshalText([]byte(strings.ToUpper(s))); err != nil {
panic(err)
}
return c
}
// ParseConsistencyWrapper wraps gocql.ParseConsistency to provide an err
// return instead of a panic
func ParseConsistencyWrapper(s string) (consistency Consistency, err error) {
err = consistency.UnmarshalText([]byte(strings.ToUpper(s)))
return
}
// MustParseConsistency is the same as ParseConsistency except it returns
// an error (never). It is kept here since breaking changes are not good.
// DEPRECATED: use ParseConsistency if you want a panic on parse error.
func MustParseConsistency(s string) (Consistency, error) {
c, err := ParseConsistencyWrapper(s)
if err != nil {
panic(err)
}
return c, nil
}
type SerialConsistency uint16
const (
Serial SerialConsistency = 0x08
LocalSerial SerialConsistency = 0x09
)
func (s SerialConsistency) String() string {
switch s {
case Serial:
return "SERIAL"
case LocalSerial:
return "LOCAL_SERIAL"
default:
return fmt.Sprintf("UNKNOWN_SERIAL_CONS_0x%x", uint16(s))
}
}
func (s SerialConsistency) MarshalText() (text []byte, err error) {
return []byte(s.String()), nil
}
func (s *SerialConsistency) UnmarshalText(text []byte) error {
switch string(text) {
case "SERIAL":
*s = Serial
case "LOCAL_SERIAL":
*s = LocalSerial
default:
return fmt.Errorf("invalid consistency %q", string(text))
}
return nil
}
const (
apacheCassandraTypePrefix = "org.apache.cassandra.db.marshal."
)
var (
ErrFrameTooBig = errors.New("frame length is bigger than the maximum allowed")
)
const maxFrameHeaderSize = 9
func writeInt(p []byte, n int32) {
p[0] = byte(n >> 24)
p[1] = byte(n >> 16)
p[2] = byte(n >> 8)
p[3] = byte(n)
}
func readInt(p []byte) int32 {
return int32(p[0])<<24 | int32(p[1])<<16 | int32(p[2])<<8 | int32(p[3])
}
func writeShort(p []byte, n uint16) {
p[0] = byte(n >> 8)
p[1] = byte(n)
}
func readShort(p []byte) uint16 {
return uint16(p[0])<<8 | uint16(p[1])
}
type frameHeader struct {
version protoVersion
flags byte
stream int
op frameOp
length int
customPayload map[string][]byte
warnings []string
}
func (f frameHeader) String() string {
return fmt.Sprintf("[header version=%s flags=0x%x stream=%d op=%s length=%d]", f.version, f.flags, f.stream, f.op, f.length)
}
func (f frameHeader) Header() frameHeader {
return f
}
const defaultBufSize = 128
// a framer is responsible for reading, writing and parsing frames on a single stream
type framer struct {
r io.Reader
w io.Writer
proto byte
// flags are for outgoing flags, enabling compression and tracing etc
flags byte
compres Compressor
headSize int
// if this frame was read then the header will be here
header *frameHeader
// if tracing flag is set this is not nil
traceID []byte
// holds a ref to the whole byte slice for rbuf so that it can be reset to
// 0 after a read.
readBuffer []byte
rbuf []byte
wbuf []byte
}
func newFramer(r io.Reader, w io.Writer, compressor Compressor, version byte) *framer {
f := &framer{
wbuf: make([]byte, defaultBufSize),
readBuffer: make([]byte, defaultBufSize),
}
var flags byte
if compressor != nil {
flags |= flagCompress
}
version &= protoVersionMask
headSize := 8
if version > protoVersion2 {
headSize = 9
}
f.compres = compressor
f.proto = version
f.flags = flags
f.headSize = headSize
f.r = r
f.rbuf = f.readBuffer[:0]
f.w = w
f.wbuf = f.wbuf[:0]
f.header = nil
f.traceID = nil
return f
}
type frame interface {
Header() frameHeader
}
func readHeader(r io.Reader, p []byte) (head frameHeader, err error) {
_, err = io.ReadFull(r, p[:1])
if err != nil {
return frameHeader{}, err
}
version := p[0] & protoVersionMask
if version < protoVersion1 || version > protoVersion4 {
return frameHeader{}, fmt.Errorf("gocql: unsupported protocol response version: %d", version)
}
headSize := 9
if version < protoVersion3 {
headSize = 8
}
_, err = io.ReadFull(r, p[1:headSize])
if err != nil {
return frameHeader{}, err
}
p = p[:headSize]
head.version = protoVersion(p[0])
head.flags = p[1]
if version > protoVersion2 {
if len(p) != 9 {
return frameHeader{}, fmt.Errorf("not enough bytes to read header require 9 got: %d", len(p))
}
head.stream = int(int16(p[2])<<8 | int16(p[3]))
head.op = frameOp(p[4])
head.length = int(readInt(p[5:]))
} else {
if len(p) != 8 {
return frameHeader{}, fmt.Errorf("not enough bytes to read header require 8 got: %d", len(p))
}
head.stream = int(int8(p[2]))
head.op = frameOp(p[3])
head.length = int(readInt(p[4:]))
}
return head, nil
}
// explicitly enables tracing for the framers outgoing requests
func (f *framer) trace() {
f.flags |= flagTracing
}
// reads a frame form the wire into the framers buffer
func (f *framer) readFrame(head *frameHeader) error {
if head.length < 0 {
return fmt.Errorf("frame body length can not be less than 0: %d", head.length)
} else if head.length > maxFrameSize {
// need to free up the connection to be used again
_, err := io.CopyN(ioutil.Discard, f.r, int64(head.length))
if err != nil {
return fmt.Errorf("error whilst trying to discard frame with invalid length: %v", err)
}
return ErrFrameTooBig
}
if cap(f.readBuffer) >= head.length {
f.rbuf = f.readBuffer[:head.length]
} else {
f.readBuffer = make([]byte, head.length)
f.rbuf = f.readBuffer
}
// assume the underlying reader takes care of timeouts and retries
n, err := io.ReadFull(f.r, f.rbuf)
if err != nil {
return fmt.Errorf("unable to read frame body: read %d/%d bytes: %v", n, head.length, err)
}
if head.flags&flagCompress == flagCompress {
if f.compres == nil {
return NewErrProtocol("no compressor available with compressed frame body")
}
f.rbuf, err = f.compres.Decode(f.rbuf)
if err != nil {
return err
}
}
f.header = head
return nil
}
func (f *framer) parseFrame() (frame frame, err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
err = r.(error)
}
}()
if f.header.version.request() {
return nil, NewErrProtocol("got a request frame from server: %v", f.header.version)
}
if f.header.flags&flagTracing == flagTracing {
f.readTrace()
}
if f.header.flags&flagWarning == flagWarning {
f.header.warnings = f.readStringList()
}
if f.header.flags&flagCustomPayload == flagCustomPayload {
f.header.customPayload = f.readBytesMap()
}
// assumes that the frame body has been read into rbuf
switch f.header.op {
case opError:
frame = f.parseErrorFrame()
case opReady:
frame = f.parseReadyFrame()
case opResult:
frame, err = f.parseResultFrame()
case opSupported:
frame = f.parseSupportedFrame()
case opAuthenticate:
frame = f.parseAuthenticateFrame()
case opAuthChallenge:
frame = f.parseAuthChallengeFrame()
case opAuthSuccess:
frame = f.parseAuthSuccessFrame()
case opEvent:
frame = f.parseEventFrame()
default:
return nil, NewErrProtocol("unknown op in frame header: %s", f.header.op)
}
return
}
func (f *framer) parseErrorFrame() frame {
code := f.readInt()
msg := f.readString()
errD := errorFrame{
frameHeader: *f.header,
code: code,
message: msg,
}
switch code {
case errUnavailable:
cl := f.readConsistency()
required := f.readInt()
alive := f.readInt()
return &RequestErrUnavailable{
errorFrame: errD,
Consistency: cl,
Required: required,
Alive: alive,
}
case errWriteTimeout:
cl := f.readConsistency()
received := f.readInt()
blockfor := f.readInt()
writeType := f.readString()
return &RequestErrWriteTimeout{
errorFrame: errD,
Consistency: cl,
Received: received,
BlockFor: blockfor,
WriteType: writeType,
}
case errReadTimeout:
cl := f.readConsistency()
received := f.readInt()
blockfor := f.readInt()
dataPresent := f.readByte()
return &RequestErrReadTimeout{
errorFrame: errD,
Consistency: cl,
Received: received,
BlockFor: blockfor,
DataPresent: dataPresent,
}
case errAlreadyExists:
ks := f.readString()
table := f.readString()
return &RequestErrAlreadyExists{
errorFrame: errD,
Keyspace: ks,
Table: table,
}
case errUnprepared:
stmtId := f.readShortBytes()
return &RequestErrUnprepared{
errorFrame: errD,
StatementId: copyBytes(stmtId), // defensively copy
}
case errReadFailure:
res := &RequestErrReadFailure{
errorFrame: errD,
}
res.Consistency = f.readConsistency()
res.Received = f.readInt()
res.BlockFor = f.readInt()
res.DataPresent = f.readByte() != 0
return res
case errWriteFailure:
res := &RequestErrWriteFailure{
errorFrame: errD,
}
res.Consistency = f.readConsistency()
res.Received = f.readInt()
res.BlockFor = f.readInt()
res.NumFailures = f.readInt()
res.WriteType = f.readString()
return res
case errFunctionFailure:
res := RequestErrFunctionFailure{
errorFrame: errD,
}
res.Keyspace = f.readString()
res.Function = f.readString()
res.ArgTypes = f.readStringList()
return res
case errInvalid, errBootstrapping, errConfig, errCredentials, errOverloaded,
errProtocol, errServer, errSyntax, errTruncate, errUnauthorized:
// TODO(zariel): we should have some distinct types for these errors
return errD
default:
panic(fmt.Errorf("unknown error code: 0x%x", errD.code))
}
}
func (f *framer) writeHeader(flags byte, op frameOp, stream int) {
f.wbuf = f.wbuf[:0]
f.wbuf = append(f.wbuf,
f.proto,
flags,
)
if f.proto > protoVersion2 {
f.wbuf = append(f.wbuf,
byte(stream>>8),
byte(stream),
)
} else {
f.wbuf = append(f.wbuf,
byte(stream),
)
}
// pad out length
f.wbuf = append(f.wbuf,
byte(op),
0,
0,
0,
0,
)
}
func (f *framer) setLength(length int) {
p := 4
if f.proto > protoVersion2 {
p = 5
}
f.wbuf[p+0] = byte(length >> 24)
f.wbuf[p+1] = byte(length >> 16)
f.wbuf[p+2] = byte(length >> 8)
f.wbuf[p+3] = byte(length)
}
func (f *framer) finishWrite() error {
if len(f.wbuf) > maxFrameSize {
// huge app frame, lets remove it so it doesn't bloat the heap
f.wbuf = make([]byte, defaultBufSize)
return ErrFrameTooBig
}
if f.wbuf[1]&flagCompress == flagCompress {
if f.compres == nil {
panic("compress flag set with no compressor")
}
// TODO: only compress frames which are big enough
compressed, err := f.compres.Encode(f.wbuf[f.headSize:])
if err != nil {
return err
}
f.wbuf = append(f.wbuf[:f.headSize], compressed...)
}
length := len(f.wbuf) - f.headSize
f.setLength(length)
_, err := f.w.Write(f.wbuf)
if err != nil {
return err
}
return nil
}
func (f *framer) readTrace() {
f.traceID = f.readUUID().Bytes()
}
type readyFrame struct {
frameHeader
}
func (f *framer) parseReadyFrame() frame {
return &readyFrame{
frameHeader: *f.header,
}
}
type supportedFrame struct {
frameHeader
supported map[string][]string
}
// TODO: if we move the body buffer onto the frameHeader then we only need a single
// framer, and can move the methods onto the header.
func (f *framer) parseSupportedFrame() frame {
return &supportedFrame{
frameHeader: *f.header,
supported: f.readStringMultiMap(),
}
}
type writeStartupFrame struct {
opts map[string]string
}
func (w writeStartupFrame) String() string {
return fmt.Sprintf("[startup opts=%+v]", w.opts)
}
func (w *writeStartupFrame) writeFrame(f *framer, streamID int) error {
f.writeHeader(f.flags&^flagCompress, opStartup, streamID)
f.writeStringMap(w.opts)
return f.finishWrite()
}
type writePrepareFrame struct {
statement string
}
func (w *writePrepareFrame) writeFrame(f *framer, streamID int) error {
f.writeHeader(f.flags, opPrepare, streamID)
f.writeLongString(w.statement)
return f.finishWrite()
}
func (f *framer) readTypeInfo() TypeInfo {
// TODO: factor this out so the same code paths can be used to parse custom
// types and other types, as much of the logic will be duplicated.
id := f.readShort()
simple := NativeType{
proto: f.proto,
typ: Type(id),
}
if simple.typ == TypeCustom {
simple.custom = f.readString()
if cassType := getApacheCassandraType(simple.custom); cassType != TypeCustom {
simple.typ = cassType
}
}
switch simple.typ {
case TypeTuple:
n := f.readShort()
tuple := TupleTypeInfo{
NativeType: simple,
Elems: make([]TypeInfo, n),
}
for i := 0; i < int(n); i++ {
tuple.Elems[i] = f.readTypeInfo()
}
return tuple
case TypeUDT:
udt := UDTTypeInfo{
NativeType: simple,
}
udt.KeySpace = f.readString()
udt.Name = f.readString()
n := f.readShort()
udt.Elements = make([]UDTField, n)
for i := 0; i < int(n); i++ {
field := &udt.Elements[i]
field.Name = f.readString()
field.Type = f.readTypeInfo()
}
return udt
case TypeMap, TypeList, TypeSet:
collection := CollectionType{
NativeType: simple,
}
if simple.typ == TypeMap {
collection.Key = f.readTypeInfo()
}
collection.Elem = f.readTypeInfo()
return collection
}
return simple
}
type preparedMetadata struct {
resultMetadata
// proto v4+
pkeyColumns []int
}
func (r preparedMetadata) String() string {
return fmt.Sprintf("[prepared flags=0x%x pkey=%v paging_state=% X columns=%v col_count=%d actual_col_count=%d]", r.flags, r.pkeyColumns, r.pagingState, r.columns, r.colCount, r.actualColCount)
}
func (f *framer) parsePreparedMetadata() preparedMetadata {
// TODO: deduplicate this from parseMetadata
meta := preparedMetadata{}
meta.flags = f.readInt()
meta.colCount = f.readInt()
if meta.colCount < 0 {
panic(fmt.Errorf("received negative column count: %d", meta.colCount))
}
meta.actualColCount = meta.colCount
if f.proto >= protoVersion4 {
pkeyCount := f.readInt()
pkeys := make([]int, pkeyCount)
for i := 0; i < pkeyCount; i++ {
pkeys[i] = int(f.readShort())
}
meta.pkeyColumns = pkeys
}
if meta.flags&flagHasMorePages == flagHasMorePages {
meta.pagingState = copyBytes(f.readBytes())
}
if meta.flags&flagNoMetaData == flagNoMetaData {
return meta
}
var keyspace, table string
globalSpec := meta.flags&flagGlobalTableSpec == flagGlobalTableSpec
if globalSpec {
keyspace = f.readString()
table = f.readString()
}
var cols []ColumnInfo
if meta.colCount < 1000 {
// preallocate columninfo to avoid excess copying
cols = make([]ColumnInfo, meta.colCount)
for i := 0; i < meta.colCount; i++ {
f.readCol(&cols[i], &meta.resultMetadata, globalSpec, keyspace, table)
}
} else {
// use append, huge number of columns usually indicates a corrupt frame or
// just a huge row.
for i := 0; i < meta.colCount; i++ {
var col ColumnInfo
f.readCol(&col, &meta.resultMetadata, globalSpec, keyspace, table)
cols = append(cols, col)
}
}
meta.columns = cols
return meta
}
type resultMetadata struct {
flags int
// only if flagPageState
pagingState []byte
columns []ColumnInfo
colCount int
// this is a count of the total number of columns which can be scanned,
// it is at minimum len(columns) but may be larger, for instance when a column
// is a UDT or tuple.
actualColCount int
}
func (r resultMetadata) String() string {
return fmt.Sprintf("[metadata flags=0x%x paging_state=% X columns=%v]", r.flags, r.pagingState, r.columns)
}
func (f *framer) readCol(col *ColumnInfo, meta *resultMetadata, globalSpec bool, keyspace, table string) {
if !globalSpec {
col.Keyspace = f.readString()
col.Table = f.readString()
} else {
col.Keyspace = keyspace
col.Table = table
}
col.Name = f.readString()
col.TypeInfo = f.readTypeInfo()
switch v := col.TypeInfo.(type) {
// maybe also UDT
case TupleTypeInfo:
// -1 because we already included the tuple column
meta.actualColCount += len(v.Elems) - 1
}
}
func (f *framer) parseResultMetadata() resultMetadata {
var meta resultMetadata
meta.flags = f.readInt()
meta.colCount = f.readInt()
if meta.colCount < 0 {
panic(fmt.Errorf("received negative column count: %d", meta.colCount))
}
meta.actualColCount = meta.colCount
if meta.flags&flagHasMorePages == flagHasMorePages {
meta.pagingState = copyBytes(f.readBytes())
}
if meta.flags&flagNoMetaData == flagNoMetaData {
return meta
}
var keyspace, table string
globalSpec := meta.flags&flagGlobalTableSpec == flagGlobalTableSpec
if globalSpec {
keyspace = f.readString()
table = f.readString()
}
var cols []ColumnInfo
if meta.colCount < 1000 {
// preallocate columninfo to avoid excess copying
cols = make([]ColumnInfo, meta.colCount)
for i := 0; i < meta.colCount; i++ {
f.readCol(&cols[i], &meta, globalSpec, keyspace, table)
}
} else {
// use append, huge number of columns usually indicates a corrupt frame or
// just a huge row.
for i := 0; i < meta.colCount; i++ {
var col ColumnInfo
f.readCol(&col, &meta, globalSpec, keyspace, table)
cols = append(cols, col)
}
}
meta.columns = cols
return meta
}
type resultVoidFrame struct {
frameHeader
}
func (f *resultVoidFrame) String() string {
return "[result_void]"
}
func (f *framer) parseResultFrame() (frame, error) {
kind := f.readInt()
switch kind {
case resultKindVoid:
return &resultVoidFrame{frameHeader: *f.header}, nil
case resultKindRows:
return f.parseResultRows(), nil
case resultKindKeyspace:
return f.parseResultSetKeyspace(), nil
case resultKindPrepared:
return f.parseResultPrepared(), nil
case resultKindSchemaChanged:
return f.parseResultSchemaChange(), nil
}
return nil, NewErrProtocol("unknown result kind: %x", kind)
}
type resultRowsFrame struct {
frameHeader
meta resultMetadata
// dont parse the rows here as we only need to do it once
numRows int
}
func (f *resultRowsFrame) String() string {
return fmt.Sprintf("[result_rows meta=%v]", f.meta)
}
func (f *framer) parseResultRows() frame {
result := &resultRowsFrame{}
result.meta = f.parseResultMetadata()
result.numRows = f.readInt()
if result.numRows < 0 {
panic(fmt.Errorf("invalid row_count in result frame: %d", result.numRows))
}
return result
}
type resultKeyspaceFrame struct {
frameHeader
keyspace string
}
func (r *resultKeyspaceFrame) String() string {
return fmt.Sprintf("[result_keyspace keyspace=%s]", r.keyspace)
}
func (f *framer) parseResultSetKeyspace() frame {
return &resultKeyspaceFrame{
frameHeader: *f.header,
keyspace: f.readString(),
}
}
type resultPreparedFrame struct {
frameHeader
preparedID []byte
reqMeta preparedMetadata
respMeta resultMetadata
}
func (f *framer) parseResultPrepared() frame {
frame := &resultPreparedFrame{
frameHeader: *f.header,
preparedID: f.readShortBytes(),
reqMeta: f.parsePreparedMetadata(),
}
if f.proto < protoVersion2 {
return frame
}
frame.respMeta = f.parseResultMetadata()
return frame
}
type schemaChangeKeyspace struct {
frameHeader
change string
keyspace string
}
func (f schemaChangeKeyspace) String() string {
return fmt.Sprintf("[event schema_change_keyspace change=%q keyspace=%q]", f.change, f.keyspace)
}
type schemaChangeTable struct {
frameHeader
change string
keyspace string
object string
}
func (f schemaChangeTable) String() string {
return fmt.Sprintf("[event schema_change change=%q keyspace=%q object=%q]", f.change, f.keyspace, f.object)
}
type schemaChangeType struct {
frameHeader
change string
keyspace string
object string
}
type schemaChangeFunction struct {
frameHeader
change string
keyspace string
name string
args []string
}
type schemaChangeAggregate struct {
frameHeader
change string
keyspace string
name string
args []string
}
func (f *framer) parseResultSchemaChange() frame {
if f.proto <= protoVersion2 {
change := f.readString()
keyspace := f.readString()
table := f.readString()
if table != "" {
return &schemaChangeTable{
frameHeader: *f.header,
change: change,
keyspace: keyspace,
object: table,
}
} else {
return &schemaChangeKeyspace{
frameHeader: *f.header,
change: change,
keyspace: keyspace,
}
}
} else {
change := f.readString()
target := f.readString()
// TODO: could just use a separate type for each target
switch target {
case "KEYSPACE":
frame := &schemaChangeKeyspace{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
return frame
case "TABLE":
frame := &schemaChangeTable{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
frame.object = f.readString()
return frame
case "TYPE":
frame := &schemaChangeType{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
frame.object = f.readString()
return frame
case "FUNCTION":
frame := &schemaChangeFunction{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
frame.name = f.readString()
frame.args = f.readStringList()
return frame
case "AGGREGATE":
frame := &schemaChangeAggregate{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
frame.name = f.readString()
frame.args = f.readStringList()
return frame
default:
panic(fmt.Errorf("gocql: unknown SCHEMA_CHANGE target: %q change: %q", target, change))
}
}
}
type authenticateFrame struct {
frameHeader
class string
}
func (a *authenticateFrame) String() string {
return fmt.Sprintf("[authenticate class=%q]", a.class)
}
func (f *framer) parseAuthenticateFrame() frame {
return &authenticateFrame{
frameHeader: *f.header,
class: f.readString(),
}
}
type authSuccessFrame struct {
frameHeader
data []byte
}
func (a *authSuccessFrame) String() string {
return fmt.Sprintf("[auth_success data=%q]", a.data)
}
func (f *framer) parseAuthSuccessFrame() frame {
return &authSuccessFrame{
frameHeader: *f.header,
data: f.readBytes(),
}
}
type authChallengeFrame struct {
frameHeader
data []byte
}
func (a *authChallengeFrame) String() string {
return fmt.Sprintf("[auth_challenge data=%q]", a.data)
}
func (f *framer) parseAuthChallengeFrame() frame {
return &authChallengeFrame{
frameHeader: *f.header,
data: f.readBytes(),
}
}
type statusChangeEventFrame struct {
frameHeader
change string
host net.IP
port int
}
func (t statusChangeEventFrame) String() string {
return fmt.Sprintf("[status_change change=%s host=%v port=%v]", t.change, t.host, t.port)
}
// essentially the same as statusChange
type topologyChangeEventFrame struct {
frameHeader
change string
host net.IP
port int
}
func (t topologyChangeEventFrame) String() string {
return fmt.Sprintf("[topology_change change=%s host=%v port=%v]", t.change, t.host, t.port)
}
func (f *framer) parseEventFrame() frame {
eventType := f.readString()
switch eventType {
case "TOPOLOGY_CHANGE":
frame := &topologyChangeEventFrame{frameHeader: *f.header}
frame.change = f.readString()
frame.host, frame.port = f.readInet()
return frame
case "STATUS_CHANGE":
frame := &statusChangeEventFrame{frameHeader: *f.header}
frame.change = f.readString()
frame.host, frame.port = f.readInet()
return frame
case "SCHEMA_CHANGE":
// this should work for all versions
return f.parseResultSchemaChange()
default:
panic(fmt.Errorf("gocql: unknown event type: %q", eventType))
}
}
type writeAuthResponseFrame struct {
data []byte
}
func (a *writeAuthResponseFrame) String() string {
return fmt.Sprintf("[auth_response data=%q]", a.data)
}
func (a *writeAuthResponseFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeAuthResponseFrame(streamID, a.data)
}
func (f *framer) writeAuthResponseFrame(streamID int, data []byte) error {
f.writeHeader(f.flags, opAuthResponse, streamID)
f.writeBytes(data)
return f.finishWrite()
}
type queryValues struct {
value []byte
// optional name, will set With names for values flag
name string
isUnset bool
}
type queryParams struct {
consistency Consistency
// v2+
skipMeta bool
values []queryValues
pageSize int
pagingState []byte
serialConsistency SerialConsistency
// v3+
defaultTimestamp bool
defaultTimestampValue int64
}
func (q queryParams) String() string {
return fmt.Sprintf("[query_params consistency=%v skip_meta=%v page_size=%d paging_state=%q serial_consistency=%v default_timestamp=%v values=%v]",
q.consistency, q.skipMeta, q.pageSize, q.pagingState, q.serialConsistency, q.defaultTimestamp, q.values)
}
func (f *framer) writeQueryParams(opts *queryParams) {
f.writeConsistency(opts.consistency)
if f.proto == protoVersion1 {
return
}
var flags byte
if len(opts.values) > 0 {
flags |= flagValues
}
if opts.skipMeta {
flags |= flagSkipMetaData
}
if opts.pageSize > 0 {
flags |= flagPageSize
}
if len(opts.pagingState) > 0 {
flags |= flagWithPagingState
}
if opts.serialConsistency > 0 {
flags |= flagWithSerialConsistency
}
names := false
// protoV3 specific things
if f.proto > protoVersion2 {
if opts.defaultTimestamp {
flags |= flagDefaultTimestamp
}
if len(opts.values) > 0 && opts.values[0].name != "" {
flags |= flagWithNameValues
names = true
}
}
f.writeByte(flags)
if n := len(opts.values); n > 0 {
f.writeShort(uint16(n))
for i := 0; i < n; i++ {
if names {
f.writeString(opts.values[i].name)
}
if opts.values[i].isUnset {
f.writeUnset()
} else {
f.writeBytes(opts.values[i].value)
}
}
}
if opts.pageSize > 0 {
f.writeInt(int32(opts.pageSize))
}
if len(opts.pagingState) > 0 {
f.writeBytes(opts.pagingState)
}
if opts.serialConsistency > 0 {
f.writeConsistency(Consistency(opts.serialConsistency))
}
if f.proto > protoVersion2 && opts.defaultTimestamp {
// timestamp in microseconds
var ts int64
if opts.defaultTimestampValue != 0 {
ts = opts.defaultTimestampValue
} else {
ts = time.Now().UnixNano() / 1000
}
f.writeLong(ts)
}
}
type writeQueryFrame struct {
statement string
params queryParams
}
func (w *writeQueryFrame) String() string {
return fmt.Sprintf("[query statement=%q params=%v]", w.statement, w.params)
}
func (w *writeQueryFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeQueryFrame(streamID, w.statement, &w.params)
}
func (f *framer) writeQueryFrame(streamID int, statement string, params *queryParams) error {
f.writeHeader(f.flags, opQuery, streamID)
f.writeLongString(statement)
f.writeQueryParams(params)
return f.finishWrite()
}
type frameWriter interface {
writeFrame(framer *framer, streamID int) error
}
type frameWriterFunc func(framer *framer, streamID int) error
func (f frameWriterFunc) writeFrame(framer *framer, streamID int) error {
return f(framer, streamID)
}
type writeExecuteFrame struct {
preparedID []byte
params queryParams
}
func (e *writeExecuteFrame) String() string {
return fmt.Sprintf("[execute id=% X params=%v]", e.preparedID, &e.params)
}
func (e *writeExecuteFrame) writeFrame(fr *framer, streamID int) error {
return fr.writeExecuteFrame(streamID, e.preparedID, &e.params)
}
func (f *framer) writeExecuteFrame(streamID int, preparedID []byte, params *queryParams) error {
f.writeHeader(f.flags, opExecute, streamID)
f.writeShortBytes(preparedID)
if f.proto > protoVersion1 {
f.writeQueryParams(params)
} else {
n := len(params.values)
f.writeShort(uint16(n))
for i := 0; i < n; i++ {
if params.values[i].isUnset {
f.writeUnset()
} else {
f.writeBytes(params.values[i].value)
}
}
f.writeConsistency(params.consistency)
}
return f.finishWrite()
}
// TODO: can we replace BatchStatemt with batchStatement? As they prety much
// duplicate each other
type batchStatment struct {
preparedID []byte
statement string
values []queryValues
}
type writeBatchFrame struct {
typ BatchType
statements []batchStatment
consistency Consistency
// v3+
serialConsistency SerialConsistency
defaultTimestamp bool
defaultTimestampValue int64
}
func (w *writeBatchFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeBatchFrame(streamID, w)
}
func (f *framer) writeBatchFrame(streamID int, w *writeBatchFrame) error {
f.writeHeader(f.flags, opBatch, streamID)
f.writeByte(byte(w.typ))
n := len(w.statements)
f.writeShort(uint16(n))
var flags byte
for i := 0; i < n; i++ {
b := &w.statements[i]
if len(b.preparedID) == 0 {
f.writeByte(0)
f.writeLongString(b.statement)
} else {
f.writeByte(1)
f.writeShortBytes(b.preparedID)
}
f.writeShort(uint16(len(b.values)))
for j := range b.values {
col := b.values[j]
if f.proto > protoVersion2 && col.name != "" {
// TODO: move this check into the caller and set a flag on writeBatchFrame
// to indicate using named values
if f.proto <= protoVersion5 {
return fmt.Errorf("gocql: named query values are not supported in batches, please see https://issues.apache.org/jira/browse/CASSANDRA-10246")
}
flags |= flagWithNameValues
f.writeString(col.name)
}
if col.isUnset {
f.writeUnset()
} else {
f.writeBytes(col.value)
}
}
}
f.writeConsistency(w.consistency)
if f.proto > protoVersion2 {
if w.serialConsistency > 0 {
flags |= flagWithSerialConsistency
}
if w.defaultTimestamp {
flags |= flagDefaultTimestamp
}
f.writeByte(flags)
if w.serialConsistency > 0 {
f.writeConsistency(Consistency(w.serialConsistency))
}
if w.defaultTimestamp {
var ts int64
if w.defaultTimestampValue != 0 {
ts = w.defaultTimestampValue
} else {
ts = time.Now().UnixNano() / 1000
}
f.writeLong(ts)
}
}
return f.finishWrite()
}
type writeOptionsFrame struct{}
func (w *writeOptionsFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeOptionsFrame(streamID, w)
}
func (f *framer) writeOptionsFrame(stream int, _ *writeOptionsFrame) error {
f.writeHeader(f.flags, opOptions, stream)
return f.finishWrite()
}
type writeRegisterFrame struct {
events []string
}
func (w *writeRegisterFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeRegisterFrame(streamID, w)
}
func (f *framer) writeRegisterFrame(streamID int, w *writeRegisterFrame) error {
f.writeHeader(f.flags, opRegister, streamID)
f.writeStringList(w.events)
return f.finishWrite()
}
func (f *framer) readByte() byte {
if len(f.rbuf) < 1 {
panic(fmt.Errorf("not enough bytes in buffer to read byte require 1 got: %d", len(f.rbuf)))
}
b := f.rbuf[0]
f.rbuf = f.rbuf[1:]
return b
}
func (f *framer) readInt() (n int) {
if len(f.rbuf) < 4 {
panic(fmt.Errorf("not enough bytes in buffer to read int require 4 got: %d", len(f.rbuf)))
}
n = int(int32(f.rbuf[0])<<24 | int32(f.rbuf[1])<<16 | int32(f.rbuf[2])<<8 | int32(f.rbuf[3]))
f.rbuf = f.rbuf[4:]
return
}
func (f *framer) readShort() (n uint16) {
if len(f.rbuf) < 2 {
panic(fmt.Errorf("not enough bytes in buffer to read short require 2 got: %d", len(f.rbuf)))
}
n = uint16(f.rbuf[0])<<8 | uint16(f.rbuf[1])
f.rbuf = f.rbuf[2:]
return
}
func (f *framer) readLong() (n int64) {
if len(f.rbuf) < 8 {
panic(fmt.Errorf("not enough bytes in buffer to read long require 8 got: %d", len(f.rbuf)))
}
n = int64(f.rbuf[0])<<56 | int64(f.rbuf[1])<<48 | int64(f.rbuf[2])<<40 | int64(f.rbuf[3])<<32 |
int64(f.rbuf[4])<<24 | int64(f.rbuf[5])<<16 | int64(f.rbuf[6])<<8 | int64(f.rbuf[7])
f.rbuf = f.rbuf[8:]
return
}
func (f *framer) readString() (s string) {
size := f.readShort()
if len(f.rbuf) < int(size) {
panic(fmt.Errorf("not enough bytes in buffer to read string require %d got: %d", size, len(f.rbuf)))
}
s = string(f.rbuf[:size])
f.rbuf = f.rbuf[size:]
return
}
func (f *framer) readLongString() (s string) {
size := f.readInt()
if len(f.rbuf) < size {
panic(fmt.Errorf("not enough bytes in buffer to read long string require %d got: %d", size, len(f.rbuf)))
}
s = string(f.rbuf[:size])
f.rbuf = f.rbuf[size:]
return
}
func (f *framer) readUUID() *UUID {
if len(f.rbuf) < 16 {
panic(fmt.Errorf("not enough bytes in buffer to read uuid require %d got: %d", 16, len(f.rbuf)))
}
// TODO: how to handle this error, if it is a uuid, then sureley, problems?
u, _ := UUIDFromBytes(f.rbuf[:16])
f.rbuf = f.rbuf[16:]
return &u
}
func (f *framer) readStringList() []string {
size := f.readShort()
l := make([]string, size)
for i := 0; i < int(size); i++ {
l[i] = f.readString()
}
return l
}
func (f *framer) readBytesInternal() ([]byte, error) {
size := f.readInt()
if size < 0 {
return nil, nil
}
if len(f.rbuf) < size {
return nil, fmt.Errorf("not enough bytes in buffer to read bytes require %d got: %d", size, len(f.rbuf))
}
l := f.rbuf[:size]
f.rbuf = f.rbuf[size:]
return l, nil
}
func (f *framer) readBytes() []byte {
l, err := f.readBytesInternal()
if err != nil {
panic(err)
}
return l
}
func (f *framer) readShortBytes() []byte {
size := f.readShort()
if len(f.rbuf) < int(size) {
panic(fmt.Errorf("not enough bytes in buffer to read short bytes: require %d got %d", size, len(f.rbuf)))
}
l := f.rbuf[:size]
f.rbuf = f.rbuf[size:]
return l
}
func (f *framer) readInet() (net.IP, int) {
if len(f.rbuf) < 1 {
panic(fmt.Errorf("not enough bytes in buffer to read inet size require %d got: %d", 1, len(f.rbuf)))
}
size := f.rbuf[0]
f.rbuf = f.rbuf[1:]
if !(size == 4 || size == 16) {
panic(fmt.Errorf("invalid IP size: %d", size))
}
if len(f.rbuf) < 1 {
panic(fmt.Errorf("not enough bytes in buffer to read inet require %d got: %d", size, len(f.rbuf)))
}
ip := make([]byte, size)
copy(ip, f.rbuf[:size])
f.rbuf = f.rbuf[size:]
port := f.readInt()
return net.IP(ip), port
}
func (f *framer) readConsistency() Consistency {
return Consistency(f.readShort())
}
func (f *framer) readStringMap() map[string]string {
size := f.readShort()
m := make(map[string]string, size)
for i := 0; i < int(size); i++ {
k := f.readString()
v := f.readString()
m[k] = v
}
return m
}
func (f *framer) readBytesMap() map[string][]byte {
size := f.readShort()
m := make(map[string][]byte, size)
for i := 0; i < int(size); i++ {
k := f.readString()
v := f.readBytes()
m[k] = v
}
return m
}
func (f *framer) readStringMultiMap() map[string][]string {
size := f.readShort()
m := make(map[string][]string, size)
for i := 0; i < int(size); i++ {
k := f.readString()
v := f.readStringList()
m[k] = v
}
return m
}
func (f *framer) writeByte(b byte) {
f.wbuf = append(f.wbuf, b)
}
func appendBytes(p []byte, d []byte) []byte {
if d == nil {
return appendInt(p, -1)
}
p = appendInt(p, int32(len(d)))
p = append(p, d...)
return p
}
func appendShort(p []byte, n uint16) []byte {
return append(p,
byte(n>>8),
byte(n),
)
}
func appendInt(p []byte, n int32) []byte {
return append(p, byte(n>>24),
byte(n>>16),
byte(n>>8),
byte(n))
}
func appendLong(p []byte, n int64) []byte {
return append(p,
byte(n>>56),
byte(n>>48),
byte(n>>40),
byte(n>>32),
byte(n>>24),
byte(n>>16),
byte(n>>8),
byte(n),
)
}
// these are protocol level binary types
func (f *framer) writeInt(n int32) {
f.wbuf = appendInt(f.wbuf, n)
}
func (f *framer) writeShort(n uint16) {
f.wbuf = appendShort(f.wbuf, n)
}
func (f *framer) writeLong(n int64) {
f.wbuf = appendLong(f.wbuf, n)
}
func (f *framer) writeString(s string) {
f.writeShort(uint16(len(s)))
f.wbuf = append(f.wbuf, s...)
}
func (f *framer) writeLongString(s string) {
f.writeInt(int32(len(s)))
f.wbuf = append(f.wbuf, s...)
}
func (f *framer) writeUUID(u *UUID) {
f.wbuf = append(f.wbuf, u[:]...)
}
func (f *framer) writeStringList(l []string) {
f.writeShort(uint16(len(l)))
for _, s := range l {
f.writeString(s)
}
}
func (f *framer) writeUnset() {
// Protocol version 4 specifies that bind variables do not require having a
// value when executing a statement. Bind variables without a value are
// called 'unset'. The 'unset' bind variable is serialized as the int
// value '-2' without following bytes.
f.writeInt(-2)
}
func (f *framer) writeBytes(p []byte) {
// TODO: handle null case correctly,
// [bytes] A [int] n, followed by n bytes if n >= 0. If n < 0,
// no byte should follow and the value represented is `null`.
if p == nil {
f.writeInt(-1)
} else {
f.writeInt(int32(len(p)))
f.wbuf = append(f.wbuf, p...)
}
}
func (f *framer) writeShortBytes(p []byte) {
f.writeShort(uint16(len(p)))
f.wbuf = append(f.wbuf, p...)
}
func (f *framer) writeInet(ip net.IP, port int) {
f.wbuf = append(f.wbuf,
byte(len(ip)),
)
f.wbuf = append(f.wbuf,
[]byte(ip)...,
)
f.writeInt(int32(port))
}
func (f *framer) writeConsistency(cons Consistency) {
f.writeShort(uint16(cons))
}
func (f *framer) writeStringMap(m map[string]string) {
f.writeShort(uint16(len(m)))
for k, v := range m {
f.writeString(k)
f.writeString(v)
}
}
|
package yum
import (
"compress/bzip2"
"database/sql"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"sort"
"strconv"
"github.com/gonuts/logger"
_ "github.com/mattn/go-sqlite3"
)
// RepositorySQLiteBackend is Backend querying YUM SQLite repositories
type RepositorySQLiteBackend struct {
Name string
Packages map[string][]*Package
Provides map[string][]*Provides
DBNameCompr string
DBName string
PrimaryCompr string
Primary string
Repository *Repository
db *sql.DB
msg *logger.Logger
}
func NewRepositorySQLiteBackend(repo *Repository) (*RepositorySQLiteBackend, error) {
const comprdbname = "primary.sqlite.bz2"
const dbname = "primary.sqlite"
return &RepositorySQLiteBackend{
Name: "RepositorySQLiteBackend",
Packages: make(map[string][]*Package),
Provides: make(map[string][]*Provides),
DBNameCompr: comprdbname,
DBName: dbname,
PrimaryCompr: filepath.Join(repo.CacheDir, comprdbname),
Primary: filepath.Join(repo.CacheDir, dbname),
Repository: repo,
msg: repo.msg,
}, nil
}
// YumDataType returns the ID for the data type as used in the repomd.xml file
func (repo *RepositorySQLiteBackend) YumDataType() string {
return "primary_db"
}
// Download the DB from server
func (repo *RepositorySQLiteBackend) GetLatestDB(url string) error {
var err error
repo.msg.Debugf("downloading latest version of SQLite DB\n")
out, err := os.Create(repo.PrimaryCompr)
if err != nil {
return err
}
defer out.Close()
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
repo.msg.Debugf("decompressing latest version of SQLite DB\n")
dbfile, err := os.Create(repo.Primary)
if err != nil {
return err
}
defer dbfile.Close()
err = out.Sync()
if err != nil {
return err
}
_, err = out.Seek(0, 0)
if err != nil {
return err
}
return repo.decompress(dbfile, out)
}
// Check whether the DB is there
func (repo *RepositorySQLiteBackend) HasDB() bool {
return path_exists(repo.PrimaryCompr)
}
// Load loads the DB
func (repo *RepositorySQLiteBackend) LoadDB() error {
var err error
if !path_exists(repo.Primary) {
err = repo.decompress2(repo.Primary, repo.PrimaryCompr)
if err != nil {
return err
}
}
db, err := sql.Open("sqlite3", repo.Primary)
if err != nil {
return err
}
repo.db = db
return err
}
// FindLatestMatchingName locats a package by name, returns the latest available version.
func (repo *RepositorySQLiteBackend) FindLatestMatchingName(name, version string, release int) (*Package, error) {
var pkg *Package
var err error
pkgs, err := repo.loadPackagesByName(name, version)
if err != nil {
return nil, err
}
matching := make(RPMSlice, 0, len(pkgs))
req := NewRequires(name, version, release, 0, "EQ", "")
for _, pkg := range pkgs {
if req.ProvideMatches(pkg) {
matching = append(matching, pkg)
}
}
if len(matching) <= 0 {
err = fmt.Errorf("no such package %q", name)
return nil, err
}
sort.Sort(matching)
pkg = matching[len(matching)-1].(*Package)
return pkg, nil
}
// FindLatestMatchingRequire locates a package providing a given functionality.
func (repo *RepositorySQLiteBackend) FindLatestMatchingRequire(requirement *Requires) (*Package, error) {
var pkg *Package
var err error
// list of all Provides with the same name
provides, err := repo.findProvidesByName(requirement.Name())
if err != nil {
return nil, err
}
matching := make(RPMSlice, 0, len(provides))
for _, pr := range provides {
if requirement.ProvideMatches(pr) {
matching = append(matching, pr)
}
}
if len(matching) <= 0 {
err = fmt.Errorf("no Provides for %q", requirement.Name())
return nil, err
}
// now look-up the matching package
sort.Sort(matching)
prov := matching[len(matching)-1].(*Provides)
pkgs, err := repo.loadPackagesProviding(prov)
if err != nil {
return nil, err
}
if len(pkgs) <= 0 {
err = fmt.Errorf("no such package %q", requirement.Name())
return nil, err
}
matching = matching[:0]
for _, p := range pkgs {
matching = append(matching, p)
}
sort.Sort(matching)
pkg = matching[len(matching)-1].(*Package)
return pkg, err
}
// GetPackages returns all the packages known by a YUM repository
func (repo *RepositorySQLiteBackend) GetPackages() []*Package {
query := "select pkgKey, name, version, release, epoch, rpm_group, arch, location_href from packages"
stmt, err := repo.db.Prepare(query)
if err != nil {
repo.msg.Errorf("db-error: %v\n", err)
return nil
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
repo.msg.Errorf("db-error: %v\n", err)
return nil
}
defer rows.Close()
pkgs := make([]*Package, 0)
for rows.Next() {
pkg, err := repo.newPackageFromScan(rows)
if err != nil {
repo.msg.Errorf("db-error: %v\n", err)
repo.msg.Errorf("query: %q\n", query)
panic(err)
return nil
}
pkgs = append(pkgs, pkg)
}
err = rows.Err()
if err != nil {
repo.msg.Errorf("db-error-err: %v\n", err)
panic(err)
return nil
}
err = rows.Close()
if err != nil {
repo.msg.Errorf("db-error-close-row: %v\n", err)
panic(err)
return nil
}
err = stmt.Close()
if err != nil {
repo.msg.Errorf("db-error-close-stmt: %v\n", err)
panic(err)
return nil
}
return pkgs
}
func (repo *RepositorySQLiteBackend) newPackageFromScan(rows *sql.Rows) (*Package, error) {
var pkg Package
pkg.repository = repo.Repository
pkg.requires = make([]*Requires, 0)
pkg.provides = make([]*Provides, 0)
var pkgkey sql.NullInt64
var name []byte
var version []byte
var rel_str []byte
var epoch_str []byte
var group []byte
var arch []byte
var location []byte
err := rows.Scan(
&pkgkey,
&name,
&version,
&rel_str,
&epoch_str,
&group,
&arch,
&location,
)
if err != nil {
repo.msg.Errorf("scan error: %v\n", err)
return nil, err
}
pkg.rpmBase.name = string(name)
pkg.rpmBase.version = string(version)
if string(rel_str) != "" {
rel, err := strconv.Atoi(string(rel_str))
if err != nil {
return nil, err
}
pkg.rpmBase.release = rel
}
if string(epoch_str) != "" {
epoch, err := strconv.Atoi(string(epoch_str))
if err != nil {
return nil, err
}
pkg.rpmBase.epoch = epoch
}
pkg.group = string(group)
pkg.arch = string(arch)
pkg.location = string(location)
err = repo.loadRequires(int(pkgkey.Int64), &pkg)
if err != nil {
repo.msg.Errorf("load-requires error: %v\n", err)
return nil, err
}
err = repo.loadProvides(int(pkgkey.Int64), &pkg)
if err != nil {
repo.msg.Errorf("load-provides error: %v\n", err)
return nil, err
}
return &pkg, nil
}
func (repo *RepositorySQLiteBackend) loadProvides(pkgkey int, pkg *Package) error {
var err error
stmt, err := repo.db.Prepare(
"select name, version, release, epoch, flags from provides where pkgKey=?",
)
if err != nil {
return err
}
defer stmt.Close()
rows, err := stmt.Query(pkgkey)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var p Provides
var name []byte
var version []byte
var release []byte
var epoch []byte
var flags []byte
err = rows.Scan(
&name, &version, &release,
&epoch, &flags,
)
if err != nil {
return err
}
p.rpmBase.name = string(name)
p.rpmBase.version = string(version)
if string(release) != "" {
rel, err := strconv.Atoi(string(release))
if err != nil {
return err
}
p.rpmBase.release = rel
}
if string(epoch) != "" {
epo, err := strconv.Atoi(string(epoch))
if err != nil {
return err
}
p.rpmBase.epoch = epo
}
p.rpmBase.flags = string(flags)
p.Package = pkg
pkg.provides = append(pkg.provides, &p)
}
err = rows.Err()
if err != nil {
return err
}
err = rows.Close()
if err != nil {
return err
}
err = stmt.Close()
if err != nil {
return err
}
return err
}
func (repo *RepositorySQLiteBackend) loadRequires(pkgkey int, pkg *Package) error {
var err error
stmt, err := repo.db.Prepare(
"select name, version, release, epoch, flags, pre from requires where pkgKey=?",
)
if err != nil {
return err
}
defer stmt.Close()
rows, err := stmt.Query(pkgkey)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var req Requires
var name []byte
var version []byte
var release []byte
var epoch []byte
var flags []byte
var pre []byte
err = rows.Scan(
&name, &version, &release,
&epoch, &flags,
&pre,
)
if err != nil {
return err
}
req.rpmBase.name = string(name)
req.rpmBase.version = string(version)
if string(release) != "" {
rel, err := strconv.Atoi(string(release))
if err != nil {
return err
}
req.rpmBase.release = rel
}
if string(epoch) != "" {
epo, err := strconv.Atoi(string(epoch))
if err != nil {
return err
}
req.rpmBase.epoch = epo
}
req.rpmBase.flags = string(flags)
req.pre = string(pre)
if err != nil {
return err
}
if req.rpmBase.flags == "" {
req.rpmBase.flags = "EQ"
}
}
err = rows.Err()
if err != nil {
return err
}
err = rows.Close()
if err != nil {
return err
}
err = stmt.Close()
if err != nil {
return err
}
return err
}
func (repo *RepositorySQLiteBackend) loadPackagesByName(name, version string) ([]*Package, error) {
var err error
pkgs := make([]*Package, 0)
args := []interface{}{name}
query := "select pkgKey, name, version, release, epoch, rpm_group, arch, location_href" +
"from packages where name = ?"
if version != "" {
query += " and version = ?"
args = append(args, version)
}
stmt, err := repo.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(args...)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
pkg, err := repo.newPackageFromScan(rows)
if err != nil {
return nil, err
}
pkgs = append(pkgs, pkg)
}
err = rows.Err()
if err != nil {
return nil, err
}
err = rows.Close()
if err != nil {
return nil, err
}
err = stmt.Close()
if err != nil {
return nil, err
}
return pkgs, err
}
func (repo *RepositorySQLiteBackend) findProvidesByName(name string) ([]*Provides, error) {
var err error
provides := make([]*Provides, 0)
query := "select pkgKey, name, version, release, epoch, flags from provides where name=?"
stmt, err := repo.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(name)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var p Provides
err = rows.Scan(
&p.rpmBase.name, &p.rpmBase.version, &p.rpmBase.release,
&p.rpmBase.epoch,
&p.rpmBase.flags,
)
if err != nil {
return nil, err
}
p.Package = nil
provides = append(provides, &p)
}
err = rows.Err()
if err != nil {
return nil, err
}
err = rows.Close()
if err != nil {
return nil, err
}
err = stmt.Close()
if err != nil {
return nil, err
}
return provides, err
}
func (repo *RepositorySQLiteBackend) loadPackagesProviding(prov *Provides) ([]*Package, error) {
pkgs := make([]*Package, 0)
var err error
args := []interface{}{
prov.Name(),
prov.Version(),
}
query := `select p.pkgKey, p.name, p.version, p.release, p.epoch, p.rpm_group, p.arch, p.location_href
from packages p, provides r
where p.pkgKey = r.pkgKey
and r.name = ?
and r.version = ?`
if prov.Release() > 0 {
query += " and r.release = ?"
args = append(args, prov.Release())
}
stmt, err := repo.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(args...)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
pkg, err := repo.newPackageFromScan(rows)
if err != nil {
return nil, err
}
pkgs = append(pkgs, pkg)
}
err = rows.Err()
if err != nil {
return nil, err
}
err = rows.Close()
if err != nil {
return nil, err
}
if err != nil {
return nil, err
}
return pkgs, err
}
// decompress decompresses src into dst
func (repo *RepositorySQLiteBackend) decompress(dst io.Writer, src io.Reader) error {
var err error
r := bzip2.NewReader(src)
_, err = io.Copy(dst, r)
return err
}
// decompress2 decompresses src into dst
func (repo *RepositorySQLiteBackend) decompress2(dst string, src string) error {
fdst, err := os.Create(dst)
if err != nil {
return err
}
defer fdst.Close()
fsrc, err := os.Open(src)
if err != nil {
return err
}
defer fsrc.Close()
err = repo.decompress(fdst, fsrc)
if err != nil {
return err
}
err = fdst.Sync()
if err != nil {
return err
}
return err
}
func init() {
g_backends["RepositorySQLiteBackend"] = func(repo *Repository) (Backend, error) {
return NewRepositorySQLiteBackend(repo)
}
}
// EOF
yum.sql: pkgKey -> pkgkey
package yum
import (
"compress/bzip2"
"database/sql"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"sort"
"strconv"
"github.com/gonuts/logger"
_ "github.com/mattn/go-sqlite3"
)
// RepositorySQLiteBackend is Backend querying YUM SQLite repositories
type RepositorySQLiteBackend struct {
Name string
Packages map[string][]*Package
Provides map[string][]*Provides
DBNameCompr string
DBName string
PrimaryCompr string
Primary string
Repository *Repository
db *sql.DB
msg *logger.Logger
}
func NewRepositorySQLiteBackend(repo *Repository) (*RepositorySQLiteBackend, error) {
const comprdbname = "primary.sqlite.bz2"
const dbname = "primary.sqlite"
return &RepositorySQLiteBackend{
Name: "RepositorySQLiteBackend",
Packages: make(map[string][]*Package),
Provides: make(map[string][]*Provides),
DBNameCompr: comprdbname,
DBName: dbname,
PrimaryCompr: filepath.Join(repo.CacheDir, comprdbname),
Primary: filepath.Join(repo.CacheDir, dbname),
Repository: repo,
msg: repo.msg,
}, nil
}
// YumDataType returns the ID for the data type as used in the repomd.xml file
func (repo *RepositorySQLiteBackend) YumDataType() string {
return "primary_db"
}
// Download the DB from server
func (repo *RepositorySQLiteBackend) GetLatestDB(url string) error {
var err error
repo.msg.Debugf("downloading latest version of SQLite DB\n")
out, err := os.Create(repo.PrimaryCompr)
if err != nil {
return err
}
defer out.Close()
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
repo.msg.Debugf("decompressing latest version of SQLite DB\n")
dbfile, err := os.Create(repo.Primary)
if err != nil {
return err
}
defer dbfile.Close()
err = out.Sync()
if err != nil {
return err
}
_, err = out.Seek(0, 0)
if err != nil {
return err
}
return repo.decompress(dbfile, out)
}
// Check whether the DB is there
func (repo *RepositorySQLiteBackend) HasDB() bool {
return path_exists(repo.PrimaryCompr)
}
// Load loads the DB
func (repo *RepositorySQLiteBackend) LoadDB() error {
var err error
if !path_exists(repo.Primary) {
err = repo.decompress2(repo.Primary, repo.PrimaryCompr)
if err != nil {
return err
}
}
db, err := sql.Open("sqlite3", repo.Primary)
if err != nil {
return err
}
repo.db = db
return err
}
// FindLatestMatchingName locats a package by name, returns the latest available version.
func (repo *RepositorySQLiteBackend) FindLatestMatchingName(name, version string, release int) (*Package, error) {
var pkg *Package
var err error
pkgs, err := repo.loadPackagesByName(name, version)
if err != nil {
return nil, err
}
matching := make(RPMSlice, 0, len(pkgs))
req := NewRequires(name, version, release, 0, "EQ", "")
for _, pkg := range pkgs {
if req.ProvideMatches(pkg) {
matching = append(matching, pkg)
}
}
if len(matching) <= 0 {
err = fmt.Errorf("no such package %q", name)
return nil, err
}
sort.Sort(matching)
pkg = matching[len(matching)-1].(*Package)
return pkg, nil
}
// FindLatestMatchingRequire locates a package providing a given functionality.
func (repo *RepositorySQLiteBackend) FindLatestMatchingRequire(requirement *Requires) (*Package, error) {
var pkg *Package
var err error
// list of all Provides with the same name
provides, err := repo.findProvidesByName(requirement.Name())
if err != nil {
return nil, err
}
matching := make(RPMSlice, 0, len(provides))
for _, pr := range provides {
if requirement.ProvideMatches(pr) {
matching = append(matching, pr)
}
}
if len(matching) <= 0 {
err = fmt.Errorf("no Provides for %q", requirement.Name())
return nil, err
}
// now look-up the matching package
sort.Sort(matching)
prov := matching[len(matching)-1].(*Provides)
pkgs, err := repo.loadPackagesProviding(prov)
if err != nil {
return nil, err
}
if len(pkgs) <= 0 {
err = fmt.Errorf("no such package %q", requirement.Name())
return nil, err
}
matching = matching[:0]
for _, p := range pkgs {
matching = append(matching, p)
}
sort.Sort(matching)
pkg = matching[len(matching)-1].(*Package)
return pkg, err
}
// GetPackages returns all the packages known by a YUM repository
func (repo *RepositorySQLiteBackend) GetPackages() []*Package {
query := "select pkgkey, name, version, release, epoch, rpm_group, arch, location_href from packages"
stmt, err := repo.db.Prepare(query)
if err != nil {
repo.msg.Errorf("db-error: %v\n", err)
return nil
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
repo.msg.Errorf("db-error: %v\n", err)
return nil
}
defer rows.Close()
pkgs := make([]*Package, 0)
for rows.Next() {
pkg, err := repo.newPackageFromScan(rows)
if err != nil {
repo.msg.Errorf("db-error: %v\n", err)
repo.msg.Errorf("query: %q\n", query)
panic(err)
return nil
}
pkgs = append(pkgs, pkg)
}
err = rows.Err()
if err != nil {
repo.msg.Errorf("db-error-err: %v\n", err)
panic(err)
return nil
}
err = rows.Close()
if err != nil {
repo.msg.Errorf("db-error-close-row: %v\n", err)
panic(err)
return nil
}
err = stmt.Close()
if err != nil {
repo.msg.Errorf("db-error-close-stmt: %v\n", err)
panic(err)
return nil
}
return pkgs
}
func (repo *RepositorySQLiteBackend) newPackageFromScan(rows *sql.Rows) (*Package, error) {
var pkg Package
pkg.repository = repo.Repository
pkg.requires = make([]*Requires, 0)
pkg.provides = make([]*Provides, 0)
var pkgkey sql.NullInt64
var name []byte
var version []byte
var rel_str []byte
var epoch_str []byte
var group []byte
var arch []byte
var location []byte
err := rows.Scan(
&pkgkey,
&name,
&version,
&rel_str,
&epoch_str,
&group,
&arch,
&location,
)
if err != nil {
repo.msg.Errorf("scan error: %v\n", err)
return nil, err
}
pkg.rpmBase.name = string(name)
pkg.rpmBase.version = string(version)
if string(rel_str) != "" {
rel, err := strconv.Atoi(string(rel_str))
if err != nil {
return nil, err
}
pkg.rpmBase.release = rel
}
if string(epoch_str) != "" {
epoch, err := strconv.Atoi(string(epoch_str))
if err != nil {
return nil, err
}
pkg.rpmBase.epoch = epoch
}
pkg.group = string(group)
pkg.arch = string(arch)
pkg.location = string(location)
err = repo.loadRequires(int(pkgkey.Int64), &pkg)
if err != nil {
repo.msg.Errorf("load-requires error: %v\n", err)
return nil, err
}
err = repo.loadProvides(int(pkgkey.Int64), &pkg)
if err != nil {
repo.msg.Errorf("load-provides error: %v\n", err)
return nil, err
}
return &pkg, nil
}
func (repo *RepositorySQLiteBackend) loadProvides(pkgkey int, pkg *Package) error {
var err error
stmt, err := repo.db.Prepare(
"select name, version, release, epoch, flags from provides where pkgkey=?",
)
if err != nil {
return err
}
defer stmt.Close()
rows, err := stmt.Query(pkgkey)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var p Provides
var name []byte
var version []byte
var release []byte
var epoch []byte
var flags []byte
err = rows.Scan(
&name, &version, &release,
&epoch, &flags,
)
if err != nil {
return err
}
p.rpmBase.name = string(name)
p.rpmBase.version = string(version)
if string(release) != "" {
rel, err := strconv.Atoi(string(release))
if err != nil {
return err
}
p.rpmBase.release = rel
}
if string(epoch) != "" {
epo, err := strconv.Atoi(string(epoch))
if err != nil {
return err
}
p.rpmBase.epoch = epo
}
p.rpmBase.flags = string(flags)
p.Package = pkg
pkg.provides = append(pkg.provides, &p)
}
err = rows.Err()
if err != nil {
return err
}
err = rows.Close()
if err != nil {
return err
}
err = stmt.Close()
if err != nil {
return err
}
return err
}
func (repo *RepositorySQLiteBackend) loadRequires(pkgkey int, pkg *Package) error {
var err error
stmt, err := repo.db.Prepare(
"select name, version, release, epoch, flags, pre from requires where pkgkey=?",
)
if err != nil {
return err
}
defer stmt.Close()
rows, err := stmt.Query(pkgkey)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var req Requires
var name []byte
var version []byte
var release []byte
var epoch []byte
var flags []byte
var pre []byte
err = rows.Scan(
&name, &version, &release,
&epoch, &flags,
&pre,
)
if err != nil {
return err
}
req.rpmBase.name = string(name)
req.rpmBase.version = string(version)
if string(release) != "" {
rel, err := strconv.Atoi(string(release))
if err != nil {
return err
}
req.rpmBase.release = rel
}
if string(epoch) != "" {
epo, err := strconv.Atoi(string(epoch))
if err != nil {
return err
}
req.rpmBase.epoch = epo
}
req.rpmBase.flags = string(flags)
req.pre = string(pre)
if err != nil {
return err
}
if req.rpmBase.flags == "" {
req.rpmBase.flags = "EQ"
}
}
err = rows.Err()
if err != nil {
return err
}
err = rows.Close()
if err != nil {
return err
}
err = stmt.Close()
if err != nil {
return err
}
return err
}
func (repo *RepositorySQLiteBackend) loadPackagesByName(name, version string) ([]*Package, error) {
var err error
pkgs := make([]*Package, 0)
args := []interface{}{name}
query := "select pkgkey, name, version, release, epoch, rpm_group, arch, location_href" +
"from packages where name = ?"
if version != "" {
query += " and version = ?"
args = append(args, version)
}
stmt, err := repo.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(args...)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
pkg, err := repo.newPackageFromScan(rows)
if err != nil {
return nil, err
}
pkgs = append(pkgs, pkg)
}
err = rows.Err()
if err != nil {
return nil, err
}
err = rows.Close()
if err != nil {
return nil, err
}
err = stmt.Close()
if err != nil {
return nil, err
}
return pkgs, err
}
func (repo *RepositorySQLiteBackend) findProvidesByName(name string) ([]*Provides, error) {
var err error
provides := make([]*Provides, 0)
query := "select pkgkey, name, version, release, epoch, flags from provides where name=?"
stmt, err := repo.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(name)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var p Provides
err = rows.Scan(
&p.rpmBase.name, &p.rpmBase.version, &p.rpmBase.release,
&p.rpmBase.epoch,
&p.rpmBase.flags,
)
if err != nil {
return nil, err
}
p.Package = nil
provides = append(provides, &p)
}
err = rows.Err()
if err != nil {
return nil, err
}
err = rows.Close()
if err != nil {
return nil, err
}
err = stmt.Close()
if err != nil {
return nil, err
}
return provides, err
}
func (repo *RepositorySQLiteBackend) loadPackagesProviding(prov *Provides) ([]*Package, error) {
pkgs := make([]*Package, 0)
var err error
args := []interface{}{
prov.Name(),
prov.Version(),
}
query := `select p.pkgkey, p.name, p.version, p.release, p.epoch, p.rpm_group, p.arch, p.location_href
from packages p, provides r
where p.pkgkey = r.pkgkey
and r.name = ?
and r.version = ?`
if prov.Release() > 0 {
query += " and r.release = ?"
args = append(args, prov.Release())
}
stmt, err := repo.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(args...)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
pkg, err := repo.newPackageFromScan(rows)
if err != nil {
return nil, err
}
pkgs = append(pkgs, pkg)
}
err = rows.Err()
if err != nil {
return nil, err
}
err = rows.Close()
if err != nil {
return nil, err
}
if err != nil {
return nil, err
}
return pkgs, err
}
// decompress decompresses src into dst
func (repo *RepositorySQLiteBackend) decompress(dst io.Writer, src io.Reader) error {
var err error
r := bzip2.NewReader(src)
_, err = io.Copy(dst, r)
return err
}
// decompress2 decompresses src into dst
func (repo *RepositorySQLiteBackend) decompress2(dst string, src string) error {
fdst, err := os.Create(dst)
if err != nil {
return err
}
defer fdst.Close()
fsrc, err := os.Open(src)
if err != nil {
return err
}
defer fsrc.Close()
err = repo.decompress(fdst, fsrc)
if err != nil {
return err
}
err = fdst.Sync()
if err != nil {
return err
}
return err
}
func init() {
g_backends["RepositorySQLiteBackend"] = func(repo *Repository) (Backend, error) {
return NewRepositorySQLiteBackend(repo)
}
}
// EOF
|
package cfs
import (
"bytes"
"errors"
"fmt"
"github.com/tiglabs/containerfs/logger"
"github.com/tiglabs/containerfs/proto/dp"
"github.com/tiglabs/containerfs/proto/mp"
"github.com/tiglabs/containerfs/proto/vp"
"github.com/tiglabs/containerfs/utils"
"golang.org/x/net/context"
"google.golang.org/grpc"
"io"
"math/rand"
"os"
"sort"
"strconv"
"sync"
"sync/atomic"
"time"
)
// chunksize for write
const (
chunkSize = 64 * 1024 * 1024
oneExpandSize = 30 * 1024 * 1024 * 1024
BlockGroupSize = 5 * 1024 * 1024 * 1024
)
const (
FileNormal = 0
FileError = 2
)
// BufferSize ...
var BufferSize int32
var VolMgrHosts []string
var MetaNodeHosts []string
// CFS ...
type CFS struct {
VolID string
VolMgrConn *grpc.ClientConn
VolMgrLeader string
MetaNodeConn *grpc.ClientConn
MetaNodeLeader string
}
func GetAllDatanode() (int32, []*vp.DataNode) {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetAllDatanode failed,Dial to VolMgrHosts fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pGetDataNodeReq := &vp.GetDataNodeReq{}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetDataNodeAck, err := vc.GetDataNode(ctx, pGetDataNodeReq)
if err != nil {
logger.Error("GetAllDatanode failed,grpc func err :%v", err)
return -1, nil
}
if pGetDataNodeAck.Ret != 0 {
logger.Error("GetAllDatanode failed,grpc func ret :%v", pGetDataNodeAck.Ret)
return -1, nil
}
return 0, pGetDataNodeAck.DataNodes
}
func GetAllMetanode() (int32, []*vp.MetaNode) {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetAllDatanode failed,Dial to VolMgrHosts fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pGetAllMetaNodeReq := &vp.GetAllMetaNodeReq{}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetAllMetaNodeAck, err := vc.GetMetaNode(ctx, pGetAllMetaNodeReq)
if err != nil {
logger.Error("GetAllMetanode failed,grpc func err :%v", err)
return -1, nil
}
if pGetAllMetaNodeAck.Ret != 0 {
logger.Error("GetAllMetanode failed,grpc func ret :%v", pGetAllMetaNodeAck.Ret)
return -1, nil
}
return 0, pGetAllMetaNodeAck.MetaNodes
}
func DelDatanode(host string) int {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetAllDatanode failed,Dial to VolMgrHosts fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pDelDataNodeReq := &vp.DelDataNodeReq{
Host: host,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
ack, err := vc.DelDataNode(ctx, pDelDataNodeReq)
if err != nil {
logger.Error("DelDataNode failed,grpc func err :%v", err)
return -1
}
if ack.Ret != 0 {
logger.Error("DelDataNode failed,grpc func ret :%v", ack.Ret)
return -1
}
return 0
}
// CreateVol volume
func CreateVol(name string, capacity string, tier string) int32 {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("CreateVol failed,Dial to VolMgrHosts fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
spaceQuota, _ := strconv.Atoi(capacity)
pCreateVolReq := &vp.CreateVolReq{
VolName: name,
SpaceQuota: int32(spaceQuota),
Tier: tier,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ack, err := vc.CreateVol(ctx, pCreateVolReq)
if err != nil {
logger.Error("CreateVol failed, VolMgr Leader return failed, err:%v", err)
if ack != nil && ack.UUID != "" {
DeleteVol(ack.UUID)
}
return -1
}
if ack.Ret != 0 {
logger.Error("CreateVol failed, VolMgr Leader return failed, ret:%v", ack.Ret)
if ack.UUID != "" {
DeleteVol(ack.UUID)
}
return ack.Ret
}
fmt.Println(ack.UUID)
return 0
}
/* TODO:
// Expand volume once for fuseclient
func ExpandVolRS(UUID string, MtPath string) int32 {
path := MtPath + "/expanding"
fd, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return -2
}
defer fd.Close()
conn, err := DialMeta("Cluster")
if err != nil {
logger.Error("ExpandVolRS failed,Dial to Cluster leader metanode fail :%v", err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pExpandVolRSReq := &mp.ExpandVolRSReq{
VolID: UUID,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pExpandVolRSAck, err := mc.ExpandVolRS(ctx, pExpandVolRSReq)
if err != nil {
logger.Error("ExpandVol once volume:%v failed, Cluster leader metanode return error:%v", UUID, err)
os.Remove(path)
return -1
}
if pExpandVolRSAck.Ret == -1 {
logger.Error("ExpandVol once volume:%v failed, Cluster leader metanode return -1:%v", UUID)
os.Remove(path)
return -1
} else if pExpandVolRSAck.Ret == 0 {
logger.Error("ExpandVol volume:%v once failed, Cluster leader metanode return 0 because volume totalsize not enough expand", UUID)
os.Remove(path)
return 0
}
out := UpdateMetaForExpandVol(UUID, pExpandVolRSAck)
if out != 0 {
logger.Error("ExpandVol volume:%v once cluster leader metanode success but update volume leader metanode fail, so rollback cluster leader metanode this expand resource", UUID)
pDelReq := &mp.DelVolRSForExpandReq{
UUID: UUID,
BGPS: pExpandVolRSAck.BGPS,
}
pDelAck, err := mc.DelVolRSForExpand(ctx, pDelReq)
if err != nil || pDelAck.Ret != 0 {
logger.Error("ExpandVol once volume:%v success but update meta failed, then rollback cluster leader metanode error", UUID)
}
os.Remove(path)
return -1
}
os.Remove(path)
return 1
}
func UpdateMetaForExpandVol(UUID string, ack *mp.ExpandVolRSAck) int {
var mpBlockGroups []*mp.BlockGroup
for _, v := range ack.BGPS {
mpBlockGroup := &mp.BlockGroup{
BlockGroupID: v.Blocks[0].BGID,
FreeSize: BlockGroupSize,
}
mpBlockGroups = append(mpBlockGroups, mpBlockGroup)
}
logger.Debug("ExpandVolRS volume:%v to leader metanode BlockGroups Info:%v", UUID, mpBlockGroups)
// Meta handle
conn2, err := DialMeta(UUID)
if err != nil {
logger.Error("ExpandVol volume:%v once volmgr success but Dial to metanode fail :%v", UUID, err)
return -1
}
defer conn2.Close()
mc := mp.NewMetaNodeClient(conn2)
pmExpandNameSpaceReq := &mp.ExpandNameSpaceReq{
VolID: UUID,
BlockGroups: mpBlockGroups,
}
ctx2, _ := context.WithTimeout(context.Background(), 10*time.Second)
pmExpandNameSpaceAck, err := mc.ExpandNameSpace(ctx2, pmExpandNameSpaceReq)
if err != nil {
logger.Error("ExpandVol volume:%v once volmgr success but MetaNode return error:%v", UUID, err)
return -1
}
if pmExpandNameSpaceAck.Ret != 0 {
logger.Error("ExpandVol volume:%v once volmgr success but MetaNode return not equal 0:%v", UUID)
return -1
}
return 0
}
*/
// CreateVol volume
func ExpandVol(uuid string, capacity string) int32 {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("CreateVol failed,Dial to VolMgrHosts fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
spaceQuota, _ := strconv.Atoi(capacity)
pExpandVolReq := &vp.ExpandVolReq{
UUID: uuid,
Space: int32(spaceQuota),
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ack, err := vc.ExpandVol(ctx, pExpandVolReq)
if err != nil {
logger.Error("ExpandVol failed, VolMgr Leader return failed, err:%v", err)
return -1
}
if ack.Ret != 0 {
logger.Error("ExpandVol failed, VolMgr Leader return failed, ret:%v", ack.Ret)
return -1
}
return 0
}
// Migrate bad DataNode blocks data to some Good DataNodes
func Migrate(host string) int32 {
pMigrateReq := &vp.MigrateReq{
DataNodeHost: host,
}
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("Migrate failed,Dial to metanode fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pMigrateAck, err := vc.Migrate(ctx, pMigrateReq)
if err != nil {
logger.Error("Migrate failed: %v", err)
return -1
}
if pMigrateAck.Ret != 0 {
logger.Error("Migrate failed: %v", pMigrateAck.Ret)
return -1
}
return 0
}
func GetAllVolumeInfos() (int32, []*vp.Volume) {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetAllDatanode failed,Dial to VolMgrHosts fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pVolumeInfosReq := &vp.VolumeInfosReq{}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pVolumeInfosAck, err := vc.VolumeInfos(ctx, pVolumeInfosReq)
if err != nil {
logger.Error("GetAllVolumeInfos failed,grpc func err :%v", err)
return -1, nil
}
if pVolumeInfosAck.Ret != 0 {
logger.Error("GetAllVolumeInfos failed,grpc func ret :%v", pVolumeInfosAck.Ret)
return -1, nil
}
return 0, pVolumeInfosAck.Volumes
}
// GetVolInfo volume info
func GetVolInfo(name string) (int32, *vp.GetVolInfoAck) {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetVolInfo failed,Dial to VolMgrHosts fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pGetVolInfoReq := &vp.GetVolInfoReq{
UUID: name,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ack, err := vc.GetVolInfo(ctx, pGetVolInfoReq)
if err != nil || ack.Ret != 0 {
return -1, &vp.GetVolInfoAck{}
}
return 0, ack
}
//Get blockgroup info
func GetBlockGroupInfo(idStr string) (int32, *vp.GetBlockGroupInfoAck) {
bgID, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
logger.Error("GetBlockGroupInfo parse bdID failed:%v", err)
return -1, nil
}
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetBlockGroupInfo failed,Dial to VolMgrHosts fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pGetBlockGroupInfoReq := &vp.GetBlockGroupInfoReq{
BGID: bgID,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ack, err := vc.GetBlockGroupInfo(ctx, pGetBlockGroupInfoReq)
if err != nil {
logger.Error("GetBlockGroupInfo failed: %v", err)
return -1, &vp.GetBlockGroupInfoAck{}
}
if ack.Ret != 0 {
logger.Error("GetBlockGroupInfo failed: %v", ack.Ret)
return -1, &vp.GetBlockGroupInfoAck{}
}
return 0, ack
}
// SnapShootVol ...
func SnapShotVol(uuid string) int32 {
// send to metadata to delete a map
for _, v := range MetaNodeHosts {
conn, err := utils.Dial(v)
if err != nil {
logger.Error("SnapShotVol failed,Dial to MetaNodeHosts %v fail :%v", v, err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pmSnapShotNameSpaceReq := &mp.SnapShotNameSpaceReq{
VolID: uuid,
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pmSnapShotNameSpaceAck, err := mc.SnapShotNameSpace(ctx, pmSnapShotNameSpaceReq)
if err != nil {
logger.Error("SnapShotVol failed,grpc func err :%v", err)
return -1
}
if pmSnapShotNameSpaceAck.Ret != 0 {
logger.Error("SnapShotVol failed,rpc func ret:%v", pmSnapShotNameSpaceAck.Ret)
return -1
}
}
return 0
}
//Snapshot cluster data on volmgrs
func SnapShotCluster() int32 {
for _, v := range VolMgrHosts {
conn, err := utils.Dial(v)
if err != nil {
logger.Error("SnapShotVol failed,Dial to MetaNodeHosts %v fail :%v", v, err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pSnapShotClusterReq := &vp.SnapShotClusterReq{}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pSnapShotClusterAck, err := vc.SnapShotCluster(ctx, pSnapShotClusterReq)
if err != nil {
logger.Error("SnapShotVol failed,grpc func err :%v", err)
return -1
}
if pSnapShotClusterAck.Ret != 0 {
logger.Error("SnapShotCluster failed,rpc func ret:%v", pSnapShotClusterAck.Ret)
return -1
}
}
return 0
}
// DeleteVol function
func DeleteVol(uuid string) int32 {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("DeleteVol failed,Dial to VolMgrHosts fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pDeleteVolReq := &vp.DeleteVolReq{
UUID: uuid,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pDeleteVolAck, err := vc.DeleteVol(ctx, pDeleteVolReq)
if err != nil {
return -1
}
if pDeleteVolAck.Ret != 0 {
logger.Error("DeleteVol failed :%v", pDeleteVolAck.Ret)
return -1
}
return 0
}
func GetVolMetaLeader(UUID string) (string, error) {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
return "", err
}
vc := vp.NewVolMgrClient(conn)
pGetMetaNodeRGReq := &vp.GetMetaNodeRGReq{
UUID: UUID,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pGetMetaNodeRGAck, err := vc.GetMetaNodeRG(ctx, pGetMetaNodeRGReq)
if err != nil {
return "", err
}
if pGetMetaNodeRGAck.Ret != 0 {
return "", fmt.Errorf("GetVolMetaLeader GetMetaNodeRG failed Ret:%v", pGetMetaNodeRGAck.Ret)
}
return pGetMetaNodeRGAck.Leader, nil
}
// OpenFileSystem ...
func OpenFileSystem(uuid string) *CFS {
cfs := CFS{VolID: uuid}
cfs.GetVolumeMetaPeers(uuid)
err := cfs.GetLeaderInfo(uuid)
if err != nil {
logger.Error("OpenFileSystem GetLeaderConn Failed err:%v", err)
return nil
}
cfs.CheckLeaderConns()
return &cfs
}
func (cfs *CFS) GetLeaderHost() (volMgrLeader string, metaNodeLeader string, err error) {
volMgrLeader, err = utils.GetVolMgrLeader(VolMgrHosts)
if err != nil {
logger.Error("GetLeaderHost failed: %v", err)
return "", "", err
}
metaNodeLeader, err = utils.GetMetaNodeLeader(MetaNodeHosts, cfs.VolID)
if err != nil {
logger.Error("GretLeaderHost failed: %v", err)
return "", "", err
}
return volMgrLeader, metaNodeLeader, nil
}
func (cfs *CFS) GetLeaderInfo(uuid string) error {
var err error
cfs.VolMgrLeader, cfs.VolMgrConn, err = utils.DialVolMgr(VolMgrHosts)
if err != nil {
return err
}
vc := vp.NewVolMgrClient(cfs.VolMgrConn)
pGetMetaNodeRGReq := &vp.GetMetaNodeRGReq{
UUID: uuid,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pGetMetaNodeRGAck, err := vc.GetMetaNodeRG(ctx, pGetMetaNodeRGReq)
if err != nil {
return err
}
if pGetMetaNodeRGAck.Ret != 0 {
logger.Error("GetLeaderConn GetMetaNodeRG failed :%v", pGetMetaNodeRGAck.Ret)
return fmt.Errorf("GetMetaNodeRG Failed Ret:%v", pGetMetaNodeRGAck.Ret)
}
cfs.MetaNodeLeader = pGetMetaNodeRGAck.Leader
cfs.MetaNodeConn, err = utils.Dial(cfs.MetaNodeLeader)
if err != nil {
return err
}
return nil
}
func (cfs *CFS) GetVolumeMetaPeers(uuid string) error {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("DialVolMgr failed: %v", err)
return err
}
vc := vp.NewVolMgrClient(conn)
pGetMetaNodeRGReq := &vp.GetMetaNodeRGReq{
UUID: uuid,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pGetMetaNodeRGAck, err := vc.GetMetaNodeRG(ctx, pGetMetaNodeRGReq)
if err != nil {
return err
}
if pGetMetaNodeRGAck.Ret != 0 {
logger.Error("GetLeaderConn GetMetaNodeRG failed :%v", pGetMetaNodeRGAck.Ret)
return fmt.Errorf("GetMetaNodeRG Failed Ret:%v", pGetMetaNodeRGAck.Ret)
}
for _, v := range pGetMetaNodeRGAck.MetaNodes {
MetaNodeHosts = append(MetaNodeHosts, v.Host+":9901")
}
return nil
}
func (cfs *CFS) CheckLeaderConns() {
ticker := time.NewTicker(time.Millisecond * 500)
go func() {
for range ticker.C {
vLeader, mLeader, err := cfs.GetLeaderHost()
if err != nil {
logger.Error("CheckLeaderConns GetLeaderHost err %v", err)
continue
}
if vLeader != cfs.VolMgrLeader {
logger.Error("VolMgr Leader Change! Old Leader %v,New Leader %v", cfs.VolMgrLeader, vLeader)
if cfs.VolMgrConn != nil {
cfs.VolMgrConn.Close()
cfs.VolMgrConn = nil
}
cfs.VolMgrConn, err = utils.Dial(vLeader)
cfs.VolMgrLeader = vLeader
}
if mLeader != cfs.MetaNodeLeader {
logger.Error("MetaNode Leader Change! Old Leader %v,New Leader %v", cfs.MetaNodeLeader, mLeader)
if cfs.MetaNodeConn != nil {
cfs.MetaNodeConn.Close()
cfs.MetaNodeConn = nil
}
cfs.MetaNodeConn, err = utils.Dial(mLeader)
cfs.MetaNodeLeader = mLeader
}
}
}()
}
// GetFSInfo ...
func (cfs *CFS) GetFSInfo() (int32, *mp.GetFSInfoAck) {
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pGetFSInfoReq := &mp.GetFSInfoReq{
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFSInfoAck, err := mc.GetFSInfo(ctx, pGetFSInfoReq)
if err != nil {
logger.Error("GetFSInfo failed,grpc func err :%v", err)
return 1, nil
}
if pGetFSInfoAck.Ret != 0 {
logger.Error("GetFSInfo failed,grpc func ret :%v", pGetFSInfoAck.Ret)
return 1, nil
}
return 0, pGetFSInfoAck
}
func (cfs *CFS) checkMetaConn() int32 {
for i := 0; cfs.MetaNodeConn == nil && i < 10; i++ {
time.Sleep(300 * time.Millisecond)
}
if cfs.MetaNodeConn == nil {
return -1
}
return 0
}
// CreateDirDirect ...
func (cfs *CFS) CreateDirDirect(pinode uint64, name string) (int32, uint64) {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pCreateDirDirectReq := &mp.CreateDirDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateDirDirectAck, err := mc.CreateDirDirect(ctx, pCreateDirDirectReq)
if err != nil {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ = context.WithTimeout(context.Background(), 5*time.Second)
pCreateDirDirectAck, err = mc.CreateDirDirect(ctx, pCreateDirDirectReq)
if err != nil {
return -1, 0
}
}
return pCreateDirDirectAck.Ret, pCreateDirDirectAck.Inode
}
// GetInodeInfoDirect ...
func (cfs *CFS) GetInodeInfoDirect(pinode uint64, name string) (int32, uint64, *mp.InodeInfo) {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0, nil
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pGetInodeInfoDirectReq := &mp.GetInodeInfoDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetInodeInfoDirectAck, err := mc.GetInodeInfoDirect(ctx, pGetInodeInfoDirectReq)
if err != nil {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0, nil
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetInodeInfoDirectAck, err = mc.GetInodeInfoDirect(ctx, pGetInodeInfoDirectReq)
if err != nil {
return -1, 0, nil
}
}
return pGetInodeInfoDirectAck.Ret, pGetInodeInfoDirectAck.Inode, pGetInodeInfoDirectAck.InodeInfo
}
// StatDirect ...
func (cfs *CFS) StatDirect(pinode uint64, name string) (int32, bool, uint64) {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, false, 0
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pStatDirectReq := &mp.StatDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pStatDirectAck, err := mc.StatDirect(ctx, pStatDirectReq)
if err != nil {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, false, 0
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pStatDirectAck, err = mc.StatDirect(ctx, pStatDirectReq)
if err != nil {
return -1, false, 0
}
}
return pStatDirectAck.Ret, pStatDirectAck.InodeType, pStatDirectAck.Inode
}
// ListDirect ...
func (cfs *CFS) ListDirect(pinode uint64) (int32, []*mp.DirentN) {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, nil
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pListDirectReq := &mp.ListDirectReq{
PInode: pinode,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 60*time.Second)
pListDirectAck, err := mc.ListDirect(ctx, pListDirectReq)
if err != nil {
return -1, nil
}
return pListDirectAck.Ret, pListDirectAck.Dirents
}
// DeleteDirDirect ...
func (cfs *CFS) DeleteDirDirect(pinode uint64, name string) int32 {
ret, _, inode := cfs.StatDirect(pinode, name)
if ret != 0 {
logger.Debug("DeleteDirDirect StatDirect Failed , no such dir")
return 0
}
ret = cfs.checkMetaConn()
if ret != 0 {
return -1
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pListDirectReq := &mp.ListDirectReq{
PInode: inode,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pListDirectAck, err := mc.ListDirect(ctx, pListDirectReq)
if err != nil {
logger.Error("DeleteDirDirect ListDirect :%v\n", err)
return -1
}
for _, v := range pListDirectAck.Dirents {
/*
if v.InodeType {
cfs.DeleteFileDirect(inode, v.Name)
} else {
cfs.DeleteDirDirect(inode, v.Name)
}
*/
if v.InodeType {
ret := cfs.DeleteFileDirect(inode, v.Name)
if ret != 0 {
return ret
}
} else {
ret := cfs.DeleteDirDirect(inode, v.Name)
if ret != 0 {
return ret
}
}
}
pDeleteDirDirectReq := &mp.DeleteDirDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ = context.WithTimeout(context.Background(), 60*time.Second)
pDeleteDirDirectAck, err := mc.DeleteDirDirect(ctx, pDeleteDirDirectReq)
if err != nil {
return -1
}
return pDeleteDirDirectAck.Ret
}
// RenameDirect ...
func (cfs *CFS) RenameDirect(oldpinode uint64, oldname string, newpinode uint64, newname string) int32 {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pRenameDirectReq := &mp.RenameDirectReq{
OldPInode: oldpinode,
OldName: oldname,
NewPInode: newpinode,
NewName: newname,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pRenameDirectAck, err := mc.RenameDirect(ctx, pRenameDirectReq)
if err != nil {
return -1
}
return pRenameDirectAck.Ret
}
// CreateFileDirect ...
func (cfs *CFS) CreateFileDirect(pinode uint64, name string, flags int) (int32, *CFile) {
var writer int32
if flags&os.O_EXCL != 0 {
if ret, _, _ := cfs.StatDirect(pinode, name); ret == 0 {
return 17, nil
}
}
ret, inode := cfs.createFileDirect(pinode, name)
if ret != 0 {
return ret, nil
}
cfile := CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: 0,
FileSizeInCache: 0,
ParentInodeID: pinode,
Inode: inode,
Name: name,
wBuffer: wBuffer{buffer: new(bytes.Buffer), freeSize: BufferSize},
DataCache: make(map[uint64]*Data),
DataQueue: make(chan *chanData, 1),
CloseSignal: make(chan struct{}, 10),
WriteErrSignal: make(chan bool, 2),
DataConn: make(map[string]*grpc.ClientConn),
errDataNodeCache: make(map[string]bool),
}
go cfile.WriteThread()
return 0, &cfile
}
// OpenFileDirect ...
func (cfs *CFS) OpenFileDirect(pinode uint64, name string, flags int) (int32, *CFile) {
logger.Debug("OpenFileDirect: name: %v, flags: %v\n", name, flags)
ret, chunkInfos, inode := cfs.GetFileChunksDirect(pinode, name)
if ret != 0 {
return ret, nil
}
var tmpFileSize int64
if len(chunkInfos) > 0 {
for i := range chunkInfos {
tmpFileSize += int64(chunkInfos[i].ChunkSize)
}
}
cfile := CFile{
OpenFlag: flags,
cfs: cfs,
FileSize: tmpFileSize,
FileSizeInCache: tmpFileSize,
ParentInodeID: pinode,
Inode: inode,
wBuffer: wBuffer{buffer: new(bytes.Buffer), freeSize: BufferSize},
Name: name,
chunks: chunkInfos,
DataCache: make(map[uint64]*Data),
DataQueue: make(chan *chanData, 1),
CloseSignal: make(chan struct{}, 10),
WriteErrSignal: make(chan bool, 2),
DataConn: make(map[string]*grpc.ClientConn),
errDataNodeCache: make(map[string]bool),
}
go cfile.WriteThread()
return 0, &cfile
}
// UpdateOpenFileDirect ...
func (cfs *CFS) UpdateOpenFileDirect(pinode uint64, name string, cfile *CFile, flags int) int32 {
return 0
}
// createFileDirect ...
func (cfs *CFS) createFileDirect(pinode uint64, name string) (int32, uint64) {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pCreateFileDirectReq := &mp.CreateFileDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateFileDirectAck, err := mc.CreateFileDirect(ctx, pCreateFileDirectReq)
if err != nil || pCreateFileDirectAck.Ret != 0 {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateFileDirectAck, err = mc.CreateFileDirect(ctx, pCreateFileDirectReq)
if err != nil {
logger.Error("CreateFileDirect failed,grpc func failed :%v\n", err)
return -1, 0
}
}
if pCreateFileDirectAck.Ret == 1 {
return 1, 0
}
if pCreateFileDirectAck.Ret == 2 {
return 2, 0
}
if pCreateFileDirectAck.Ret == 17 {
return 17, 0
}
return 0, pCreateFileDirectAck.Inode
}
// DeleteFileDirect ...
func (cfs *CFS) DeleteFileDirect(pinode uint64, name string) int32 {
ret, chunkInfos, _ := cfs.GetFileChunksDirect(pinode, name)
if ret == 0 && chunkInfos != nil {
for _, v1 := range chunkInfos {
for _, v2 := range v1.BlockGroupWithHost.Hosts {
conn, err := utils.Dial(v2)
if err != nil || conn == nil {
time.Sleep(time.Second)
conn, err = utils.Dial(v2)
if err != nil || conn == nil {
logger.Error("DeleteFile failed,Dial to datanode fail :%v\n", err)
continue
}
}
dc := dp.NewDataNodeClient(conn)
dpDeleteChunkReq := &dp.DeleteChunkReq{
ChunkID: v1.ChunkID,
BlockGroupID: v1.BlockGroupWithHost.BlockGroupID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err = dc.DeleteChunk(ctx, dpDeleteChunkReq)
if err != nil {
logger.Error("DeleteFile failed,rpc to datanode fail :%v\n", err)
}
conn.Close()
}
}
}
ret = cfs.checkMetaConn()
if ret != 0 {
return -1
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
mpDeleteFileDirectReq := &mp.DeleteFileDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
mpDeleteFileDirectAck, err := mc.DeleteFileDirect(ctx, mpDeleteFileDirectReq)
if err != nil || mpDeleteFileDirectAck.Ret != 0 {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
return -1
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
mpDeleteFileDirectAck, err = mc.DeleteFileDirect(ctx, mpDeleteFileDirectReq)
if err != nil {
logger.Error("DeleteFile failed,grpc func err :%v\n", err)
return -1
}
}
return mpDeleteFileDirectAck.Ret
}
// GetFileChunksDirect ...
func (cfs *CFS) GetFileChunksDirect(pinode uint64, name string) (int32, []*mp.ChunkInfoWithBG, uint64) {
ret := cfs.checkMetaConn()
if ret != 0 {
logger.Error("GetFileChunksDirect cfs.Conn nil ...")
return -1, nil, 0
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pGetFileChunksDirectReq := &mp.GetFileChunksDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFileChunksDirectAck, err := mc.GetFileChunksDirect(ctx, pGetFileChunksDirectReq)
if err != nil || pGetFileChunksDirectAck.Ret != 0 {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
logger.Error("GetFileChunksDirect cfs.Conn nil ...")
return -1, nil, 0
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFileChunksDirectAck, err = mc.GetFileChunksDirect(ctx, pGetFileChunksDirectReq)
if err != nil {
logger.Error("GetFileChunks failed,grpc func failed :%v\n", err)
return -1, nil, 0
}
}
return pGetFileChunksDirectAck.Ret, pGetFileChunksDirectAck.ChunkInfos, pGetFileChunksDirectAck.Inode
}
type Data struct {
DataBuf *bytes.Buffer
Status int32
timer *time.Timer
ID uint64
}
// ReadCacheT ...
type ReadCache struct {
LastOffset int64
readBuf []byte
Ch chan *bytes.Buffer
}
type wBuffer struct {
freeSize int32 // chunk size
chunkInfo *mp.ChunkInfoWithBG // chunk info
buffer *bytes.Buffer // chunk data
startOffset int64
endOffset int64
}
type chanData struct {
data []byte
}
type Chunk struct {
CFile *CFile
ChunkFreeSize int
ChunkInfo *mp.ChunkInfoWithBG
ChunkWriteSteam dp.DataNode_C2MReplClient
ChunkWriteRecvExitSignal chan struct{}
}
// CFile ...
type CFile struct {
cfs *CFS
ParentInodeID uint64
Name string
Inode uint64
OpenFlag int
FileSize int64
FileSizeInCache int64
Status int32 // 0 ok
DataConnLocker sync.RWMutex
DataConn map[string]*grpc.ClientConn
// for write
wBuffer wBuffer
wgWriteReps sync.WaitGroup
atomicNum uint64
curNum uint64
Writer int32
DataCacheLocker sync.RWMutex
DataCache map[uint64]*Data
DataQueue chan *chanData
WriteErrSignal chan bool
WriteRetrySignal chan bool
Closing bool
CloseSignal chan struct{}
CurChunk *Chunk
WriteLocker sync.Mutex
// for read
//lastoffset int64
RMutex sync.Mutex
chunks []*mp.ChunkInfoWithBG // chunkinfo
//readBuf []byte
readCache ReadCache
errDataNodeCache map[string]bool
}
type extentInfo struct {
pos int32 //pos in chunks of cfile
offset int32 //offset in chunk
length int32 //length in chunk
}
func (cfile *CFile) newDataConn(addr string) *grpc.ClientConn {
cfile.DataConnLocker.RLock()
if v, ok := cfile.DataConn[addr]; ok {
cfile.DataConnLocker.RUnlock()
return v
}
cfile.DataConnLocker.RUnlock()
conn, err := utils.Dial(addr)
if err != nil || conn == nil {
logger.Error("Dial to %v failed! err: %v", addr, err)
return nil
}
cfile.DataConnLocker.Lock()
if v, ok := cfile.DataConn[addr]; ok {
cfile.DataConnLocker.RUnlock()
conn.Close()
return v
}
cfile.DataConn[addr] = conn
cfile.DataConnLocker.Unlock()
return conn
}
//close and delete conn when err
func (cfile *CFile) delErrDataConn(addr string) {
cfile.DataConnLocker.Lock()
if v, ok := cfile.DataConn[addr]; ok {
v.Close()
delete(cfile.DataConn, addr)
}
cfile.DataConnLocker.Unlock()
}
//only delele all conn when closing file
func (cfile *CFile) delAllDataConn() {
cfile.DataConnLocker.Lock()
for k, v := range cfile.DataConn {
v.Close()
delete(cfile.DataConn, k)
}
cfile.DataConnLocker.Unlock()
}
func (cfile *CFile) streamRead(chunkidx int, ch chan *bytes.Buffer, offset int64, size int64) {
var conn *grpc.ClientConn
var buffer *bytes.Buffer
outflag := 0
inflag := 0
idxs := utils.GenerateRandomNumber(0, 3, 3)
for n := 0; n < 3; n++ {
i := idxs[n]
addr := cfile.chunks[chunkidx].BlockGroupWithHost.Hosts[i]
_, ok := cfile.errDataNodeCache[addr]
if !ok {
if n != 0 {
tmp := idxs[0]
idxs[0] = i
idxs[n] = tmp
}
break
}
}
for n := 0; n < len(cfile.chunks[chunkidx].BlockGroupWithHost.Hosts); n++ {
i := idxs[n]
buffer = new(bytes.Buffer)
addr := cfile.chunks[chunkidx].BlockGroupWithHost.Hosts[i]
conn = cfile.newDataConn(addr)
if conn == nil {
cfile.errDataNodeCache[addr] = true
outflag++
continue
}
dc := dp.NewDataNodeClient(conn)
streamreadChunkReq := &dp.StreamReadChunkReq{
ChunkID: cfile.chunks[chunkidx].ChunkID,
BlockGroupID: cfile.chunks[chunkidx].BlockGroupWithHost.BlockGroupID,
Offset: offset,
Readsize: size,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
stream, err := dc.StreamReadChunk(ctx, streamreadChunkReq)
if err != nil {
cfile.delErrDataConn(addr)
conn = cfile.newDataConn(addr)
if conn == nil {
logger.Error("StreamReadChunk return error:%v and re-dial failed, so retry other datanode!", err)
cfile.errDataNodeCache[addr] = true
outflag++
continue
} else {
dc = dp.NewDataNodeClient(conn)
streamreadChunkReq := &dp.StreamReadChunkReq{
ChunkID: cfile.chunks[chunkidx].ChunkID,
BlockGroupID: cfile.chunks[chunkidx].BlockGroupWithHost.BlockGroupID,
Offset: offset,
Readsize: size,
}
ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
stream, err = dc.StreamReadChunk(ctx, streamreadChunkReq)
if err != nil {
cfile.delErrDataConn(addr)
logger.Error("StreamReadChunk StreamReadChunk error:%v, so retry other datanode!", err)
cfile.errDataNodeCache[addr] = true
outflag++
continue
}
}
}
delete(cfile.errDataNodeCache, addr)
for {
ack, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
logger.Error("=== streamreadChunkReq Recv err:%v ===", err)
inflag++
outflag++
break
}
if ack != nil {
if len(ack.Databuf) == 0 {
continue
} else {
buffer.Write(ack.Databuf)
inflag = 0
}
} else {
continue
}
}
if inflag == 0 {
ch <- buffer
break
} else if inflag == 3 {
buffer = new(bytes.Buffer)
buffer.Write([]byte{})
logger.Error("Stream Read the chunk three copy Recv error")
ch <- buffer
break
} else if inflag < 3 {
logger.Error("Stream Read the chunk %v copy Recv error, so need retry other datanode!!!", inflag)
continue
}
}
if outflag >= 3 {
buffer = new(bytes.Buffer)
buffer.Write([]byte{})
logger.Error("Stream Read the chunk three copy Datanode error")
ch <- buffer
}
}
func (cfile *CFile) readChunk(eInfo extentInfo, data *[]byte, offset int64) int32 {
//check if hit readBuf
readBufOffset := cfile.readCache.LastOffset
readBufLen := len(cfile.readCache.readBuf)
if offset >= readBufOffset && offset+int64(eInfo.length) <= readBufOffset+int64(readBufLen) {
pos := int32(offset - readBufOffset)
*data = append(*data, cfile.readCache.readBuf[pos:pos+eInfo.length]...)
logger.Debug("cfile %v hit read buffer, offset:%v len:%v, readBuf offset:%v, len:%v", cfile.Name, offset, eInfo.length, readBufOffset, readBufLen)
return eInfo.length
}
//prepare to read from datanode
cfile.readCache.readBuf = []byte{}
buffer := new(bytes.Buffer)
cfile.readCache.Ch = make(chan *bytes.Buffer)
readSize := eInfo.length
if readSize < int32(BufferSize) {
readSize = int32(BufferSize)
}
//go streamRead
go cfile.streamRead(int(eInfo.pos), cfile.readCache.Ch, int64(eInfo.offset), int64(readSize))
buffer = <-cfile.readCache.Ch
bLen := buffer.Len()
if bLen == 0 {
logger.Error("try to read %v chunk:%v from datanode size:%v, but return:%v", cfile.Name, eInfo.pos, readSize, bLen)
return -1
}
cfile.readCache.readBuf = buffer.Next(bLen)
cfile.readCache.LastOffset = offset
appendLen := eInfo.length
if appendLen > int32(bLen) {
appendLen = int32(bLen)
}
*data = append(*data, cfile.readCache.readBuf[0:appendLen]...)
buffer.Reset()
buffer = nil
return appendLen
}
func (cfile *CFile) disableReadCache(wOffset int64, wLen int32) {
readBufOffset := cfile.readCache.LastOffset
readBufLen := len(cfile.readCache.readBuf)
if readBufLen == 0 {
return
}
if wOffset >= readBufOffset+int64(readBufLen) || wOffset+int64(wLen) <= readBufOffset {
return
}
//we need disable read buffer here
cfile.readCache.readBuf = []byte{}
logger.Debug("cfile %v disableReadCache: offset: %v len %v --> %v", cfile.Name, readBufOffset, readBufLen, len(cfile.readCache.readBuf))
}
//get extent info by [start, end)
func (cfile *CFile) getExtentInfo(start int64, end int64, eInfo *[]extentInfo) {
var i int32
var chunkStart, chunkEnd int64
var tmpInfo extentInfo
for i = 0; i < int32(len(cfile.chunks)) && start < end; i++ {
chunkEnd += int64(cfile.chunks[i].ChunkSize) //@chunkEnd is next chunk's @chunkStart
if start < chunkEnd {
tmpInfo.pos = i
tmpInfo.offset = int32(start - chunkStart)
if chunkEnd < end {
tmpInfo.length = int32(chunkEnd - start)
start = chunkEnd //update @start to next chunk
} else {
tmpInfo.length = int32(end - start)
start = end
}
*eInfo = append(*eInfo, tmpInfo)
}
chunkStart = chunkEnd
}
}
// Read ...
func (cfile *CFile) Read(data *[]byte, offset int64, readsize int64) int64 {
if cfile.Status != FileNormal {
logger.Error("cfile %v status error , read func return -2 ", cfile.Name)
return -2
}
if offset == cfile.FileSizeInCache {
logger.Debug("cfile:%v read offset:%v equals file size in cache ", cfile.Name, offset)
return 0
} else if offset > cfile.FileSizeInCache {
logger.Debug("cfile %v unsupport read beyond file size, offset:%v, filesize in cache:%v ", cfile.Name, offset, cfile.FileSizeInCache)
return 0
}
var i int
var ret int32
var doneFlag bool
start := offset
end := offset + readsize
logger.Debug("cfile %v Read start: offset: %v, len: %v", cfile.Name, offset, readsize)
for start < end && cfile.Status == FileNormal {
eInfo := make([]extentInfo, 0, 4)
cfile.getExtentInfo(start, end, &eInfo)
logger.Debug("cfile %v getExtentInfo: offset: %v, len: %v, eInfo: %v", cfile.Name, start, end, eInfo)
for _, ei := range eInfo {
ret = cfile.readChunk(ei, data, start)
if ret != ei.length {
logger.Error("cfile %v eInfo:%v, readChunk ret %v", cfile.Name, ei, ret)
doneFlag = true
break
}
start += int64(ret)
}
if doneFlag || start == end || start >= cfile.FileSizeInCache {
break
}
//wait append write request in caches
logger.Debug("cfile %v, start to wait append write..FileSize %v, FileSizeInCache %v", cfile.Name, cfile.FileSize, cfile.FileSizeInCache)
for i = 0; i < 10; i++ {
if cfile.FileSize >= end || cfile.FileSize == cfile.FileSizeInCache {
break
}
if len(cfile.DataCache) == 0 {
logger.Debug("cfile %v, FileSize %v, FileSizeInCache %v, but no DataCache", cfile.Name, cfile.FileSize, cfile.FileSizeInCache)
}
time.Sleep(100 * time.Millisecond)
}
logger.Debug("cfile %v, end waiting with FileSize %v, FileSizeInCache %v, time %v ms", cfile.Name, cfile.FileSize, cfile.FileSizeInCache, i*100)
}
if cfile.Status != FileNormal {
logger.Error("cfile %v status error , read func return -2 ", cfile.Name)
return -2
}
logger.Debug("cfile %v Read end: return %v", cfile.Name, start-offset)
return start - offset
}
// Write ...
func (cfile *CFile) Write(buf []byte, offset int64, length int32) int32 {
if cfile.Status != 0 {
logger.Error("cfile %v status error , Write func return -2 ", cfile.Name)
return -2
}
if offset > cfile.FileSizeInCache {
logger.Error("cfile %v unsupport write %v beyond file size %v return -3 ", cfile.Name, offset, cfile.FileSizeInCache)
return -3
}
if offset == cfile.FileSizeInCache {
logger.Debug("cfile %v write append only: offset %v, length %v", cfile.Name, offset, length)
return cfile.appendWrite(buf, length)
}
cfile.disableReadCache(offset, length)
var i int
var ret, pos int32
start := offset
end := offset + int64(length)
logger.Debug("cfile %v write start: offset: %v, len: %v", cfile.Name, offset, length)
for start < end && cfile.Status == FileNormal {
eInfo := make([]extentInfo, 0, 4)
cfile.getExtentInfo(start, end, &eInfo)
logger.Debug("cfile %v getExtentInfo: offset: %v, len: %v, eInfo: %v", cfile.Name, start, end, eInfo)
for _, ei := range eInfo {
ret = cfile.seekWrite(ei, buf[pos:(pos+ei.length)])
if ret < 0 {
logger.Error("cfile %v seekWrite failed %v", cfile.Name, ei)
return int32(start - offset)
}
start += int64(ei.length)
pos += ei.length
}
if start == end {
break
}
if start == cfile.FileSizeInCache {
logger.Debug("cfile %v write append only: offset %v, length %v", cfile.Name, start, length-pos)
ret = cfile.appendWrite(buf[pos:length], length-pos)
if ret < 0 {
logger.Error("cfile %v appendWrite failed %v", cfile.Name, ret)
return int32(start - offset)
}
start = end
break
}
//wait append write request in caches
logger.Debug("cfile %v, start to wait append write..FileSize %v, FileSizeInCache %v", cfile.Name, cfile.FileSize, cfile.FileSizeInCache)
for i = 0; i < 10; i++ {
if cfile.FileSize >= end || cfile.FileSize == cfile.FileSizeInCache {
break
}
if len(cfile.DataCache) == 0 {
logger.Debug("cfile %v, FileSize %v, FileSizeInCache %v, but no DataCache", cfile.Name, cfile.FileSize, cfile.FileSizeInCache)
}
time.Sleep(100 * time.Millisecond)
}
logger.Debug("cfile %v, end waiting with FileSize %v, FileSizeInCache %v, time %v ms", cfile.Name, cfile.FileSize, cfile.FileSizeInCache, i*100)
}
logger.Debug("cfile %v Write end: return %v", cfile.Name, start-offset)
return int32(start - offset)
}
func (cfile *CFile) overwriteBuffer(eInfo extentInfo, buf []byte) int32 {
//read wBuffer all bytes to tmpBuf
bufLen := cfile.wBuffer.buffer.Len()
tmpBuf := cfile.wBuffer.buffer.Next(bufLen)
if len(tmpBuf) != bufLen {
logger.Error("cfile %v read wBuffer len: %v return: %v ", cfile.Name, bufLen, len(tmpBuf))
return -1
}
//copy buf to tmpBuf
n := copy(tmpBuf[eInfo.offset:], buf)
if n != int(eInfo.length) {
logger.Error("cfile %v copy to wBuffer len: %v return n: %v", cfile.Name, eInfo.length, n)
return -1
}
//write to wBuffer
cfile.wBuffer.buffer.Reset()
n, err := cfile.wBuffer.buffer.Write(tmpBuf)
if n != int(bufLen) || err != nil {
logger.Error("cfile %v write wBuffer len: %v return n: %v err %v", cfile.Name, bufLen, n, err)
return -1
}
return 0
}
func (cfile *CFile) seekWriteChunk(addr string, conn *grpc.ClientConn, req *dp.SeekWriteChunkReq, copies *uint64) {
if conn == nil {
} else {
dc := dp.NewDataNodeClient(conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
ret, err := dc.SeekWriteChunk(ctx, req)
if err != nil {
cfile.delErrDataConn(addr)
logger.Error("SeekWriteChunk err %v", err)
} else {
if ret.Ret != 0 {
} else {
atomic.AddUint64(copies, 1)
}
}
}
cfile.wgWriteReps.Add(-1)
}
func (cfile *CFile) seekWrite(eInfo extentInfo, buf []byte) int32 {
chunkInfo := cfile.chunks[eInfo.pos]
var copies uint64
conn := make([]*grpc.ClientConn, 3)
for i, v := range chunkInfo.BlockGroupWithHost.Hosts {
conn[i] = cfile.newDataConn(v)
if conn[i] == nil {
logger.Error("cfile %v dial %v failed!", cfile.Name, v)
return -1
}
}
for i, v := range chunkInfo.BlockGroupWithHost.Hosts {
pSeekWriteChunkReq := &dp.SeekWriteChunkReq{
ChunkID: chunkInfo.ChunkID,
BlockGroupID: chunkInfo.BlockGroupWithHost.BlockGroupID,
Databuf: buf,
ChunkOffset: int64(eInfo.offset),
}
cfile.wgWriteReps.Add(1)
go cfile.seekWriteChunk(v, conn[i], pSeekWriteChunkReq, &copies)
}
cfile.wgWriteReps.Wait()
if copies < 3 {
cfile.Status = FileError
logger.Error("cfile %v seekWriteChunk copies: %v, set error!", cfile.Name, copies)
return -1
}
return 0
}
// Write ...
func (cfile *CFile) appendWrite(buf []byte, length int32) int32 {
if cfile.Status == FileError {
return -2
}
data := &chanData{}
data.data = append(data.data, buf...)
select {
case <-cfile.WriteErrSignal:
logger.Error("Write recv WriteErrSignal ,volumeid %v , pid %v ,fname %v!", cfile.cfs.VolID, cfile.ParentInodeID, cfile.Name)
return -2
case cfile.DataQueue <- data:
}
cfile.FileSizeInCache += int64(length)
return length
}
func (cfile *CFile) WriteThread() {
logger.Debug("Write Thread: file %v start writethread!\n", cfile.Name)
for true {
select {
case chanData := <-cfile.DataQueue:
if chanData == nil {
logger.Debug("WriteThread file %v recv channel close, wait DataCache...", cfile.Name)
var ti uint32
for cfile.Status == FileNormal {
if len(cfile.DataCache) == 0 {
break
}
ti++
time.Sleep(time.Millisecond * 5)
}
logger.Debug("WriteThread file %v wait DataCache == 0 done. loop times: %v", cfile.Name, ti)
if cfile.CurChunk != nil {
if cfile.CurChunk.ChunkWriteSteam != nil {
cfile.CurChunk.ChunkWriteSteam.CloseSend()
}
}
cfile.CloseSignal <- struct{}{}
return
} else {
if cfile.Status == FileError {
continue
}
newData := &Data{}
newData.ID = atomic.AddUint64(&cfile.atomicNum, 1)
newData.DataBuf = new(bytes.Buffer)
newData.DataBuf.Write(chanData.data)
newData.Status = 1
if err := cfile.WriteHandler(newData); err != nil {
logger.Error("WriteThread file %v WriteHandler err %v !", cfile.Name, err)
cfile.Status = FileError
cfile.WriteErrSignal <- true
}
}
}
}
}
func (cfile *CFile) WriteHandler(newData *Data) error {
length := newData.DataBuf.Len()
logger.Debug("WriteHandler: file %v, num:%v, length: %v, \n", cfile.Name, cfile.atomicNum, length)
ALLOCATECHUNK:
if cfile.CurChunk != nil && cfile.CurChunk.ChunkFreeSize-length < 0 {
if cfile.CurChunk.ChunkWriteSteam != nil {
var ti uint32
needClose := bool(true)
logger.Debug("WriteHandler: file %v, begin waiting last chunk: %v\n", cfile.Name, len(cfile.DataCache))
tmpDataCacheLen := len(cfile.DataCache)
for cfile.Status == FileNormal {
if tmpDataCacheLen == 0 {
break
}
time.Sleep(time.Millisecond * 10)
if tmpDataCacheLen == len(cfile.DataCache) {
ti++
} else {
tmpDataCacheLen = len(cfile.DataCache)
ti = 0
}
if ti == 500 {
if cfile.CurChunk.ChunkWriteSteam != nil {
logger.Error("WriteHandler: file %v, dataCacheLen: %v wait last chunk timeout, CloseSend\n", cfile.Name, len(cfile.DataCache))
cfile.CurChunk.ChunkWriteSteam.CloseSend()
needClose = false
}
}
}
if cfile.Status == FileError {
return errors.New("file status err")
}
logger.Debug("WriteHandler: file %v, end wait after loop times %v\n", cfile.Name, ti)
if needClose && cfile.CurChunk.ChunkWriteSteam != nil {
cfile.CurChunk.ChunkWriteSteam.CloseSend()
}
}
cfile.CurChunk = nil
}
if cfile.CurChunk == nil {
for retryCnt := 0; retryCnt < 5; retryCnt++ {
cfile.CurChunk = cfile.AllocateChunk(true)
if cfile.CurChunk == nil {
logger.Error("WriteHandler: file %v, alloc chunk failed for %v times\n", cfile.Name, retryCnt+1)
time.Sleep(time.Millisecond * 500)
continue
}
break
}
if cfile.CurChunk == nil {
return errors.New("AllocateChunk failed for 5 times")
}
}
cfile.DataCacheLocker.Lock()
cfile.DataCache[cfile.atomicNum] = newData
cfile.DataCacheLocker.Unlock()
req := &dp.StreamWriteReq{
ChunkID: cfile.CurChunk.ChunkInfo.ChunkID,
Master: cfile.CurChunk.ChunkInfo.BlockGroupWithHost.Hosts[0],
Slave: cfile.CurChunk.ChunkInfo.BlockGroupWithHost.Hosts[1],
Backup: cfile.CurChunk.ChunkInfo.BlockGroupWithHost.Hosts[2],
Databuf: newData.DataBuf.Bytes(),
DataLen: uint32(length),
CommitID: cfile.atomicNum,
BlockGroupID: cfile.CurChunk.ChunkInfo.BlockGroupWithHost.BlockGroupID,
}
if cfile.CurChunk != nil {
if cfile.CurChunk.ChunkWriteSteam != nil {
if err := cfile.CurChunk.ChunkWriteSteam.Send(req); err != nil {
logger.Debug("WriteHandler: send file %v, chunk %v len: %v failed\n", cfile.Name, cfile.CurChunk, length)
cfile.CurChunk.ChunkFreeSize = 0
} else {
logger.Debug("WriteHandler: send file %v, chunk %v len: %v success\n", cfile.Name, cfile.CurChunk, length)
cfile.CurChunk.ChunkFreeSize -= length
}
} else {
logger.Error("WriteHandler: file %v, CurChunk %v has no write stream\n", cfile.Name, cfile.CurChunk.ChunkInfo.ChunkID)
goto ALLOCATECHUNK
}
} else {
logger.Error("WriteHandler: file %v, CurChunk is nil\n", cfile.Name)
goto ALLOCATECHUNK
}
return nil
}
// AllocateChunk ...
func (cfile *CFile) AllocateChunk(IsStream bool) *Chunk {
logger.Debug("AllocateChunk file: %v begin\n", cfile.Name)
ret := cfile.cfs.checkMetaConn()
if ret != 0 {
logger.Error("AllocateChunk file: %v failed\n", cfile.Name)
return nil
}
mc := mp.NewMetaNodeClient(cfile.cfs.MetaNodeConn)
pAllocateChunkReq := &mp.AllocateChunkReq{
VolID: cfile.cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pAllocateChunkAck, err := mc.AllocateChunk(ctx, pAllocateChunkReq)
if err != nil || pAllocateChunkAck.Ret != 0 {
time.Sleep(time.Second * 2)
ret := cfile.cfs.checkMetaConn()
if ret != 0 {
logger.Error("AllocateChunk file: %v failed\n", cfile.Name)
return nil
}
mc = mp.NewMetaNodeClient(cfile.cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pAllocateChunkAck, err = mc.AllocateChunk(ctx, pAllocateChunkReq)
if err != nil || pAllocateChunkAck.Ret != 0 {
logger.Error("AllocateChunk file: %v failed, err: %v ret: %v\n", cfile.Name, err, pAllocateChunkAck.Ret != 0)
return nil
}
}
curChunk := &Chunk{}
curChunk.CFile = cfile
curChunk.ChunkInfo = pAllocateChunkAck.ChunkInfo
logger.Debug("AllocateChunk file: %v from metanode chunk info:%v\n", cfile.Name, curChunk.ChunkInfo)
if IsStream {
err := utils.TryDial(curChunk.ChunkInfo.BlockGroupWithHost.Hosts[1])
if err != nil {
logger.Error("AllocateChunk file: %v new conn to %v failed, err: %v\n", cfile.Name, curChunk.ChunkInfo.BlockGroupWithHost.Hosts[1], err)
return nil
}
err = utils.TryDial(curChunk.ChunkInfo.BlockGroupWithHost.Hosts[2])
if err != nil {
logger.Error("AllocateChunk file: %v new conn to %v failed, err: %v\n", cfile.Name, curChunk.ChunkInfo.BlockGroupWithHost.Hosts[2], err)
return nil
}
C2Mconn := cfile.newDataConn(curChunk.ChunkInfo.BlockGroupWithHost.Hosts[0])
if C2Mconn == nil {
logger.Error("AllocateChunk file: %v new conn to %v failed\n", cfile.Name, curChunk.ChunkInfo.BlockGroupWithHost.Hosts[0])
return nil
}
C2Mclient := dp.NewDataNodeClient(C2Mconn)
curChunk.ChunkWriteSteam, err = C2Mclient.C2MRepl(context.Background())
if err != nil {
cfile.delErrDataConn(curChunk.ChunkInfo.BlockGroupWithHost.Hosts[0])
logger.Error("AllocateChunk file: %v create stream to %v failed, err: %v\n", cfile.Name, curChunk.ChunkInfo.BlockGroupWithHost.Hosts[0], err)
return nil
}
curChunk.ChunkFreeSize = chunkSize
curChunk.ChunkWriteRecvExitSignal = make(chan struct{})
go curChunk.C2MRecv()
}
logger.Debug("AllocateChunk file: %v success\n", cfile.Name)
return curChunk
}
func (chunk *Chunk) Retry() {
chunk.CFile.DataCacheLocker.Lock()
defer chunk.CFile.DataCacheLocker.Unlock()
if len(chunk.CFile.DataCache) == 0 {
logger.Debug("C2MRecv thread end success for file %v chunk %v", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
return
}
logger.Debug("C2MRecv thread Retry write file %v chunk %v start", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
retrySuccess := false
var err error
for retryCnt := 0; retryCnt < 5; retryCnt++ {
err = chunk.WriteRetryHandle()
if err != nil {
logger.Error("WriteRetryHandle file %v chunk %v err: %v, try again for %v times!", chunk.CFile.Name, chunk.ChunkInfo.ChunkID, err, retryCnt+1)
time.Sleep(time.Millisecond * 500)
continue
} else {
retrySuccess = true
break
}
}
if !retrySuccess {
chunk.CFile.Status = FileError
chunk.CFile.WriteErrSignal <- true
logger.Error("C2MRecv thread Retry write file %v chunk %v failed, set FileError!", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
} else {
chunk.CFile.DataCache = make(map[uint64]*Data)
chunk.ChunkFreeSize = 0
chunk.ChunkWriteSteam = nil
logger.Debug("C2MRecv thread Retry write file %v chunk %v success", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
}
}
func (chunk *Chunk) C2MRecv() {
logger.Debug("C2MRecv thread started success for file %v chunk %v", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
defer chunk.Retry()
for {
in, err := chunk.ChunkWriteSteam.Recv()
if err == io.EOF {
logger.Debug("C2MRecv: file %v chunk %v stream %v EOF\n", chunk.CFile.Name, chunk.ChunkInfo.ChunkID, chunk.ChunkWriteSteam)
break
}
if err != nil {
logger.Debug("C2MRecv: file %v chunk %v stream %v error return : %v\n", chunk.CFile.Name, chunk.ChunkInfo.ChunkID, chunk.ChunkWriteSteam, err)
break
}
if in.Ret == -1 {
logger.Error("C2MRecv: file %v chunk %v ack.Ret -1 , means M2S2B stream err", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
break
}
chunk.CFile.curNum = atomic.AddUint64(&chunk.CFile.curNum, 1)
if in.CommitID != chunk.CFile.curNum {
logger.Error("C2MRecv: write failed! file: %v, ID;%v != curNum: %v, chunk: %v, len: %v\n", chunk.CFile.Name, in.CommitID, chunk.CFile.curNum, in.ChunkID, in.DataLen)
break
}
// update to metanode
logger.Debug("C2MRecv: Write success! try to update metadata file: %v, ID;%v, chunk: %v, len: %v\n",
chunk.CFile.Name, in.CommitID, in.ChunkID, in.DataLen)
mc := mp.NewMetaNodeClient(chunk.CFile.cfs.MetaNodeConn)
pAsyncChunkReq := &mp.AsyncChunkReq{
VolID: chunk.CFile.cfs.VolID,
ParentInodeID: chunk.CFile.ParentInodeID,
Name: chunk.CFile.Name,
ChunkID: in.ChunkID,
CommitSize: in.DataLen,
BlockGroupID: in.BlockGroupID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err2 := mc.AsyncChunk(ctx, pAsyncChunkReq)
if err2 != nil {
break
}
// comfirm data
chunk.CFile.DataCacheLocker.Lock()
//cfile.DataCache[in.CommitID].timer.Stop()
delete(chunk.CFile.DataCache, in.CommitID)
chunk.CFile.DataCacheLocker.Unlock()
chunk.CFile.updateChunkSize(chunk.ChunkInfo, int32(in.DataLen))
}
}
func (chunk *Chunk) WriteRetryHandle() error {
length := len(chunk.CFile.DataCache)
if length == 0 {
return nil
}
tmpchunk := chunk.CFile.AllocateChunk(false)
if tmpchunk == nil {
return errors.New("AllocateChunk error")
}
sortedKeys := make([]int, 0)
for k := range chunk.CFile.DataCache {
sortedKeys = append(sortedKeys, int(k))
}
sort.Ints(sortedKeys)
logger.Debug("WriteRetryHandle AllocateChunk success, begin to retry item num:%v, commitIDs: %v", length, sortedKeys)
var chunkSize int
for _, vv := range sortedKeys {
bufLen := chunk.CFile.DataCache[uint64(vv)].DataBuf.Len()
req := dp.WriteChunkReq{ChunkID: tmpchunk.ChunkInfo.ChunkID,
BlockGroupID: tmpchunk.ChunkInfo.BlockGroupWithHost.BlockGroupID,
Databuf: chunk.CFile.DataCache[uint64(vv)].DataBuf.Bytes(),
CommitID: uint64(vv),
}
for _, v := range tmpchunk.ChunkInfo.BlockGroupWithHost.Hosts {
conn := chunk.CFile.newDataConn(v)
if conn == nil {
logger.Error("WriteRetryHandle newDataConn Failed err")
return fmt.Errorf("WriteRetryHandle newDataConn Failed")
}
dc := dp.NewDataNodeClient(conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err := dc.WriteChunk(ctx, &req)
if err != nil {
logger.Error("WriteRetryHandle WriteChunk to DataNode Host Failed err %v", err)
chunk.CFile.delErrDataConn(v)
return err
}
}
logger.Debug("WriteRetryHandle write CommitID %v bufLen %v success", vv, bufLen)
chunkSize += bufLen
chunk.CFile.curNum = uint64(vv)
}
mc := mp.NewMetaNodeClient(chunk.CFile.cfs.MetaNodeConn)
pAsyncChunkReq := &mp.AsyncChunkReq{
VolID: chunk.CFile.cfs.VolID,
ParentInodeID: chunk.CFile.ParentInodeID,
Name: chunk.CFile.Name,
ChunkID: tmpchunk.ChunkInfo.ChunkID,
CommitSize: uint32(chunkSize),
BlockGroupID: tmpchunk.ChunkInfo.BlockGroupWithHost.BlockGroupID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err2 := mc.AsyncChunk(ctx, pAsyncChunkReq)
if err2 != nil {
logger.Error("WriteRetryHandle AsyncChunk to MetaNode Failed err %v", err2)
return err2
}
logger.Debug("WriteRetryHandle success with ChunkID %v ChunkSize %v", tmpchunk.ChunkInfo.ChunkID, chunkSize)
chunk.CFile.updateChunkSize(tmpchunk.ChunkInfo, int32(chunkSize))
return nil
}
// update ChunkSize and FileSize only if chunk's data has be writted to datanode and syn to metanode
func (cfile *CFile) updateChunkSize(chunkinfo *mp.ChunkInfoWithBG, length int32) {
chunkNum := len(cfile.chunks)
if chunkNum != 0 && cfile.chunks[chunkNum-1].ChunkID == chunkinfo.ChunkID {
cfile.chunks[chunkNum-1].ChunkSize += length
} else {
newchunkinfo := &mp.ChunkInfoWithBG{ChunkID: chunkinfo.ChunkID, ChunkSize: length, BlockGroupWithHost: chunkinfo.BlockGroupWithHost}
cfile.chunks = append(cfile.chunks, newchunkinfo)
}
cfile.FileSize += int64(length)
}
// Sync ...
func (cfile *CFile) Sync() int32 {
if cfile.Status == FileError {
return -1
}
return 0
}
// Sync ...
func (cfile *CFile) Flush() int32 {
if cfile.Status == FileError {
return -1
}
return 0
}
// Close ...
func (cfile *CFile) CloseWrite() int32 {
/*if cfile.Status == FileError {
return -1
} */
cfile.Closing = true
logger.Debug("CloseWrite close cfile.DataQueue")
close(cfile.DataQueue)
<-cfile.CloseSignal
logger.Debug("CloseWrite recv CloseSignal!")
return 0
}
// Close ...
func (cfile *CFile) Close() int32 {
cfile.delAllDataConn()
return 0
}
fix compile bug
package cfs
import (
"bytes"
"errors"
"fmt"
"github.com/tiglabs/containerfs/logger"
"github.com/tiglabs/containerfs/proto/dp"
"github.com/tiglabs/containerfs/proto/mp"
"github.com/tiglabs/containerfs/proto/vp"
"github.com/tiglabs/containerfs/utils"
"golang.org/x/net/context"
"google.golang.org/grpc"
"io"
"os"
"sort"
"strconv"
"sync"
"sync/atomic"
"time"
)
// chunksize for write
const (
chunkSize = 64 * 1024 * 1024
oneExpandSize = 30 * 1024 * 1024 * 1024
BlockGroupSize = 5 * 1024 * 1024 * 1024
)
const (
FileNormal = 0
FileError = 2
)
// BufferSize ...
var BufferSize int32
var VolMgrHosts []string
var MetaNodeHosts []string
// CFS ...
type CFS struct {
VolID string
VolMgrConn *grpc.ClientConn
VolMgrLeader string
MetaNodeConn *grpc.ClientConn
MetaNodeLeader string
}
func GetAllDatanode() (int32, []*vp.DataNode) {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetAllDatanode failed,Dial to VolMgrHosts fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pGetDataNodeReq := &vp.GetDataNodeReq{}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetDataNodeAck, err := vc.GetDataNode(ctx, pGetDataNodeReq)
if err != nil {
logger.Error("GetAllDatanode failed,grpc func err :%v", err)
return -1, nil
}
if pGetDataNodeAck.Ret != 0 {
logger.Error("GetAllDatanode failed,grpc func ret :%v", pGetDataNodeAck.Ret)
return -1, nil
}
return 0, pGetDataNodeAck.DataNodes
}
func GetAllMetanode() (int32, []*vp.MetaNode) {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetAllDatanode failed,Dial to VolMgrHosts fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pGetAllMetaNodeReq := &vp.GetAllMetaNodeReq{}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetAllMetaNodeAck, err := vc.GetMetaNode(ctx, pGetAllMetaNodeReq)
if err != nil {
logger.Error("GetAllMetanode failed,grpc func err :%v", err)
return -1, nil
}
if pGetAllMetaNodeAck.Ret != 0 {
logger.Error("GetAllMetanode failed,grpc func ret :%v", pGetAllMetaNodeAck.Ret)
return -1, nil
}
return 0, pGetAllMetaNodeAck.MetaNodes
}
func DelDatanode(host string) int {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetAllDatanode failed,Dial to VolMgrHosts fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pDelDataNodeReq := &vp.DelDataNodeReq{
Host: host,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
ack, err := vc.DelDataNode(ctx, pDelDataNodeReq)
if err != nil {
logger.Error("DelDataNode failed,grpc func err :%v", err)
return -1
}
if ack.Ret != 0 {
logger.Error("DelDataNode failed,grpc func ret :%v", ack.Ret)
return -1
}
return 0
}
// CreateVol volume
func CreateVol(name string, capacity string, tier string) int32 {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("CreateVol failed,Dial to VolMgrHosts fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
spaceQuota, _ := strconv.Atoi(capacity)
pCreateVolReq := &vp.CreateVolReq{
VolName: name,
SpaceQuota: int32(spaceQuota),
Tier: tier,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ack, err := vc.CreateVol(ctx, pCreateVolReq)
if err != nil {
logger.Error("CreateVol failed, VolMgr Leader return failed, err:%v", err)
if ack != nil && ack.UUID != "" {
DeleteVol(ack.UUID)
}
return -1
}
if ack.Ret != 0 {
logger.Error("CreateVol failed, VolMgr Leader return failed, ret:%v", ack.Ret)
if ack.UUID != "" {
DeleteVol(ack.UUID)
}
return ack.Ret
}
fmt.Println(ack.UUID)
return 0
}
/* TODO:
// Expand volume once for fuseclient
func ExpandVolRS(UUID string, MtPath string) int32 {
path := MtPath + "/expanding"
fd, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return -2
}
defer fd.Close()
conn, err := DialMeta("Cluster")
if err != nil {
logger.Error("ExpandVolRS failed,Dial to Cluster leader metanode fail :%v", err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pExpandVolRSReq := &mp.ExpandVolRSReq{
VolID: UUID,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pExpandVolRSAck, err := mc.ExpandVolRS(ctx, pExpandVolRSReq)
if err != nil {
logger.Error("ExpandVol once volume:%v failed, Cluster leader metanode return error:%v", UUID, err)
os.Remove(path)
return -1
}
if pExpandVolRSAck.Ret == -1 {
logger.Error("ExpandVol once volume:%v failed, Cluster leader metanode return -1:%v", UUID)
os.Remove(path)
return -1
} else if pExpandVolRSAck.Ret == 0 {
logger.Error("ExpandVol volume:%v once failed, Cluster leader metanode return 0 because volume totalsize not enough expand", UUID)
os.Remove(path)
return 0
}
out := UpdateMetaForExpandVol(UUID, pExpandVolRSAck)
if out != 0 {
logger.Error("ExpandVol volume:%v once cluster leader metanode success but update volume leader metanode fail, so rollback cluster leader metanode this expand resource", UUID)
pDelReq := &mp.DelVolRSForExpandReq{
UUID: UUID,
BGPS: pExpandVolRSAck.BGPS,
}
pDelAck, err := mc.DelVolRSForExpand(ctx, pDelReq)
if err != nil || pDelAck.Ret != 0 {
logger.Error("ExpandVol once volume:%v success but update meta failed, then rollback cluster leader metanode error", UUID)
}
os.Remove(path)
return -1
}
os.Remove(path)
return 1
}
func UpdateMetaForExpandVol(UUID string, ack *mp.ExpandVolRSAck) int {
var mpBlockGroups []*mp.BlockGroup
for _, v := range ack.BGPS {
mpBlockGroup := &mp.BlockGroup{
BlockGroupID: v.Blocks[0].BGID,
FreeSize: BlockGroupSize,
}
mpBlockGroups = append(mpBlockGroups, mpBlockGroup)
}
logger.Debug("ExpandVolRS volume:%v to leader metanode BlockGroups Info:%v", UUID, mpBlockGroups)
// Meta handle
conn2, err := DialMeta(UUID)
if err != nil {
logger.Error("ExpandVol volume:%v once volmgr success but Dial to metanode fail :%v", UUID, err)
return -1
}
defer conn2.Close()
mc := mp.NewMetaNodeClient(conn2)
pmExpandNameSpaceReq := &mp.ExpandNameSpaceReq{
VolID: UUID,
BlockGroups: mpBlockGroups,
}
ctx2, _ := context.WithTimeout(context.Background(), 10*time.Second)
pmExpandNameSpaceAck, err := mc.ExpandNameSpace(ctx2, pmExpandNameSpaceReq)
if err != nil {
logger.Error("ExpandVol volume:%v once volmgr success but MetaNode return error:%v", UUID, err)
return -1
}
if pmExpandNameSpaceAck.Ret != 0 {
logger.Error("ExpandVol volume:%v once volmgr success but MetaNode return not equal 0:%v", UUID)
return -1
}
return 0
}
*/
// CreateVol volume
func ExpandVol(uuid string, capacity string) int32 {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("CreateVol failed,Dial to VolMgrHosts fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
spaceQuota, _ := strconv.Atoi(capacity)
pExpandVolReq := &vp.ExpandVolReq{
UUID: uuid,
Space: int32(spaceQuota),
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ack, err := vc.ExpandVol(ctx, pExpandVolReq)
if err != nil {
logger.Error("ExpandVol failed, VolMgr Leader return failed, err:%v", err)
return -1
}
if ack.Ret != 0 {
logger.Error("ExpandVol failed, VolMgr Leader return failed, ret:%v", ack.Ret)
return -1
}
return 0
}
// Migrate bad DataNode blocks data to some Good DataNodes
func Migrate(host string) int32 {
pMigrateReq := &vp.MigrateReq{
DataNodeHost: host,
}
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("Migrate failed,Dial to metanode fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pMigrateAck, err := vc.Migrate(ctx, pMigrateReq)
if err != nil {
logger.Error("Migrate failed: %v", err)
return -1
}
if pMigrateAck.Ret != 0 {
logger.Error("Migrate failed: %v", pMigrateAck.Ret)
return -1
}
return 0
}
func GetAllVolumeInfos() (int32, []*vp.Volume) {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetAllDatanode failed,Dial to VolMgrHosts fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pVolumeInfosReq := &vp.VolumeInfosReq{}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pVolumeInfosAck, err := vc.VolumeInfos(ctx, pVolumeInfosReq)
if err != nil {
logger.Error("GetAllVolumeInfos failed,grpc func err :%v", err)
return -1, nil
}
if pVolumeInfosAck.Ret != 0 {
logger.Error("GetAllVolumeInfos failed,grpc func ret :%v", pVolumeInfosAck.Ret)
return -1, nil
}
return 0, pVolumeInfosAck.Volumes
}
// GetVolInfo volume info
func GetVolInfo(name string) (int32, *vp.GetVolInfoAck) {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetVolInfo failed,Dial to VolMgrHosts fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pGetVolInfoReq := &vp.GetVolInfoReq{
UUID: name,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ack, err := vc.GetVolInfo(ctx, pGetVolInfoReq)
if err != nil || ack.Ret != 0 {
return -1, &vp.GetVolInfoAck{}
}
return 0, ack
}
//Get blockgroup info
func GetBlockGroupInfo(idStr string) (int32, *vp.GetBlockGroupInfoAck) {
bgID, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
logger.Error("GetBlockGroupInfo parse bdID failed:%v", err)
return -1, nil
}
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("GetBlockGroupInfo failed,Dial to VolMgrHosts fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pGetBlockGroupInfoReq := &vp.GetBlockGroupInfoReq{
BGID: bgID,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ack, err := vc.GetBlockGroupInfo(ctx, pGetBlockGroupInfoReq)
if err != nil {
logger.Error("GetBlockGroupInfo failed: %v", err)
return -1, &vp.GetBlockGroupInfoAck{}
}
if ack.Ret != 0 {
logger.Error("GetBlockGroupInfo failed: %v", ack.Ret)
return -1, &vp.GetBlockGroupInfoAck{}
}
return 0, ack
}
// SnapShootVol ...
func SnapShotVol(uuid string) int32 {
// send to metadata to delete a map
for _, v := range MetaNodeHosts {
conn, err := utils.Dial(v)
if err != nil {
logger.Error("SnapShotVol failed,Dial to MetaNodeHosts %v fail :%v", v, err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pmSnapShotNameSpaceReq := &mp.SnapShotNameSpaceReq{
VolID: uuid,
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pmSnapShotNameSpaceAck, err := mc.SnapShotNameSpace(ctx, pmSnapShotNameSpaceReq)
if err != nil {
logger.Error("SnapShotVol failed,grpc func err :%v", err)
return -1
}
if pmSnapShotNameSpaceAck.Ret != 0 {
logger.Error("SnapShotVol failed,rpc func ret:%v", pmSnapShotNameSpaceAck.Ret)
return -1
}
}
return 0
}
//Snapshot cluster data on volmgrs
func SnapShotCluster() int32 {
for _, v := range VolMgrHosts {
conn, err := utils.Dial(v)
if err != nil {
logger.Error("SnapShotVol failed,Dial to MetaNodeHosts %v fail :%v", v, err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pSnapShotClusterReq := &vp.SnapShotClusterReq{}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pSnapShotClusterAck, err := vc.SnapShotCluster(ctx, pSnapShotClusterReq)
if err != nil {
logger.Error("SnapShotVol failed,grpc func err :%v", err)
return -1
}
if pSnapShotClusterAck.Ret != 0 {
logger.Error("SnapShotCluster failed,rpc func ret:%v", pSnapShotClusterAck.Ret)
return -1
}
}
return 0
}
// DeleteVol function
func DeleteVol(uuid string) int32 {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("DeleteVol failed,Dial to VolMgrHosts fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pDeleteVolReq := &vp.DeleteVolReq{
UUID: uuid,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pDeleteVolAck, err := vc.DeleteVol(ctx, pDeleteVolReq)
if err != nil {
return -1
}
if pDeleteVolAck.Ret != 0 {
logger.Error("DeleteVol failed :%v", pDeleteVolAck.Ret)
return -1
}
return 0
}
func GetVolMetaLeader(UUID string) (string, error) {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
return "", err
}
vc := vp.NewVolMgrClient(conn)
pGetMetaNodeRGReq := &vp.GetMetaNodeRGReq{
UUID: UUID,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pGetMetaNodeRGAck, err := vc.GetMetaNodeRG(ctx, pGetMetaNodeRGReq)
if err != nil {
return "", err
}
if pGetMetaNodeRGAck.Ret != 0 {
return "", fmt.Errorf("GetVolMetaLeader GetMetaNodeRG failed Ret:%v", pGetMetaNodeRGAck.Ret)
}
return pGetMetaNodeRGAck.Leader, nil
}
// OpenFileSystem ...
func OpenFileSystem(uuid string) *CFS {
cfs := CFS{VolID: uuid}
cfs.GetVolumeMetaPeers(uuid)
err := cfs.GetLeaderInfo(uuid)
if err != nil {
logger.Error("OpenFileSystem GetLeaderConn Failed err:%v", err)
return nil
}
cfs.CheckLeaderConns()
return &cfs
}
func (cfs *CFS) GetLeaderHost() (volMgrLeader string, metaNodeLeader string, err error) {
volMgrLeader, err = utils.GetVolMgrLeader(VolMgrHosts)
if err != nil {
logger.Error("GetLeaderHost failed: %v", err)
return "", "", err
}
metaNodeLeader, err = utils.GetMetaNodeLeader(MetaNodeHosts, cfs.VolID)
if err != nil {
logger.Error("GretLeaderHost failed: %v", err)
return "", "", err
}
return volMgrLeader, metaNodeLeader, nil
}
func (cfs *CFS) GetLeaderInfo(uuid string) error {
var err error
cfs.VolMgrLeader, cfs.VolMgrConn, err = utils.DialVolMgr(VolMgrHosts)
if err != nil {
return err
}
vc := vp.NewVolMgrClient(cfs.VolMgrConn)
pGetMetaNodeRGReq := &vp.GetMetaNodeRGReq{
UUID: uuid,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pGetMetaNodeRGAck, err := vc.GetMetaNodeRG(ctx, pGetMetaNodeRGReq)
if err != nil {
return err
}
if pGetMetaNodeRGAck.Ret != 0 {
logger.Error("GetLeaderConn GetMetaNodeRG failed :%v", pGetMetaNodeRGAck.Ret)
return fmt.Errorf("GetMetaNodeRG Failed Ret:%v", pGetMetaNodeRGAck.Ret)
}
cfs.MetaNodeLeader = pGetMetaNodeRGAck.Leader
cfs.MetaNodeConn, err = utils.Dial(cfs.MetaNodeLeader)
if err != nil {
return err
}
return nil
}
func (cfs *CFS) GetVolumeMetaPeers(uuid string) error {
_, conn, err := utils.DialVolMgr(VolMgrHosts)
if err != nil {
logger.Error("DialVolMgr failed: %v", err)
return err
}
vc := vp.NewVolMgrClient(conn)
pGetMetaNodeRGReq := &vp.GetMetaNodeRGReq{
UUID: uuid,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
pGetMetaNodeRGAck, err := vc.GetMetaNodeRG(ctx, pGetMetaNodeRGReq)
if err != nil {
return err
}
if pGetMetaNodeRGAck.Ret != 0 {
logger.Error("GetLeaderConn GetMetaNodeRG failed :%v", pGetMetaNodeRGAck.Ret)
return fmt.Errorf("GetMetaNodeRG Failed Ret:%v", pGetMetaNodeRGAck.Ret)
}
for _, v := range pGetMetaNodeRGAck.MetaNodes {
MetaNodeHosts = append(MetaNodeHosts, v.Host+":9901")
}
return nil
}
func (cfs *CFS) CheckLeaderConns() {
ticker := time.NewTicker(time.Millisecond * 500)
go func() {
for range ticker.C {
vLeader, mLeader, err := cfs.GetLeaderHost()
if err != nil {
logger.Error("CheckLeaderConns GetLeaderHost err %v", err)
continue
}
if vLeader != cfs.VolMgrLeader {
logger.Error("VolMgr Leader Change! Old Leader %v,New Leader %v", cfs.VolMgrLeader, vLeader)
if cfs.VolMgrConn != nil {
cfs.VolMgrConn.Close()
cfs.VolMgrConn = nil
}
cfs.VolMgrConn, err = utils.Dial(vLeader)
cfs.VolMgrLeader = vLeader
}
if mLeader != cfs.MetaNodeLeader {
logger.Error("MetaNode Leader Change! Old Leader %v,New Leader %v", cfs.MetaNodeLeader, mLeader)
if cfs.MetaNodeConn != nil {
cfs.MetaNodeConn.Close()
cfs.MetaNodeConn = nil
}
cfs.MetaNodeConn, err = utils.Dial(mLeader)
cfs.MetaNodeLeader = mLeader
}
}
}()
}
// GetFSInfo ...
func (cfs *CFS) GetFSInfo() (int32, *mp.GetFSInfoAck) {
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pGetFSInfoReq := &mp.GetFSInfoReq{
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFSInfoAck, err := mc.GetFSInfo(ctx, pGetFSInfoReq)
if err != nil {
logger.Error("GetFSInfo failed,grpc func err :%v", err)
return 1, nil
}
if pGetFSInfoAck.Ret != 0 {
logger.Error("GetFSInfo failed,grpc func ret :%v", pGetFSInfoAck.Ret)
return 1, nil
}
return 0, pGetFSInfoAck
}
func (cfs *CFS) checkMetaConn() int32 {
for i := 0; cfs.MetaNodeConn == nil && i < 10; i++ {
time.Sleep(300 * time.Millisecond)
}
if cfs.MetaNodeConn == nil {
return -1
}
return 0
}
// CreateDirDirect ...
func (cfs *CFS) CreateDirDirect(pinode uint64, name string) (int32, uint64) {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pCreateDirDirectReq := &mp.CreateDirDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateDirDirectAck, err := mc.CreateDirDirect(ctx, pCreateDirDirectReq)
if err != nil {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ = context.WithTimeout(context.Background(), 5*time.Second)
pCreateDirDirectAck, err = mc.CreateDirDirect(ctx, pCreateDirDirectReq)
if err != nil {
return -1, 0
}
}
return pCreateDirDirectAck.Ret, pCreateDirDirectAck.Inode
}
// GetInodeInfoDirect ...
func (cfs *CFS) GetInodeInfoDirect(pinode uint64, name string) (int32, uint64, *mp.InodeInfo) {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0, nil
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pGetInodeInfoDirectReq := &mp.GetInodeInfoDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetInodeInfoDirectAck, err := mc.GetInodeInfoDirect(ctx, pGetInodeInfoDirectReq)
if err != nil {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0, nil
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetInodeInfoDirectAck, err = mc.GetInodeInfoDirect(ctx, pGetInodeInfoDirectReq)
if err != nil {
return -1, 0, nil
}
}
return pGetInodeInfoDirectAck.Ret, pGetInodeInfoDirectAck.Inode, pGetInodeInfoDirectAck.InodeInfo
}
// StatDirect ...
func (cfs *CFS) StatDirect(pinode uint64, name string) (int32, bool, uint64) {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, false, 0
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pStatDirectReq := &mp.StatDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pStatDirectAck, err := mc.StatDirect(ctx, pStatDirectReq)
if err != nil {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, false, 0
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pStatDirectAck, err = mc.StatDirect(ctx, pStatDirectReq)
if err != nil {
return -1, false, 0
}
}
return pStatDirectAck.Ret, pStatDirectAck.InodeType, pStatDirectAck.Inode
}
// ListDirect ...
func (cfs *CFS) ListDirect(pinode uint64) (int32, []*mp.DirentN) {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, nil
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pListDirectReq := &mp.ListDirectReq{
PInode: pinode,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 60*time.Second)
pListDirectAck, err := mc.ListDirect(ctx, pListDirectReq)
if err != nil {
return -1, nil
}
return pListDirectAck.Ret, pListDirectAck.Dirents
}
// DeleteDirDirect ...
func (cfs *CFS) DeleteDirDirect(pinode uint64, name string) int32 {
ret, _, inode := cfs.StatDirect(pinode, name)
if ret != 0 {
logger.Debug("DeleteDirDirect StatDirect Failed , no such dir")
return 0
}
ret = cfs.checkMetaConn()
if ret != 0 {
return -1
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pListDirectReq := &mp.ListDirectReq{
PInode: inode,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pListDirectAck, err := mc.ListDirect(ctx, pListDirectReq)
if err != nil {
logger.Error("DeleteDirDirect ListDirect :%v\n", err)
return -1
}
for _, v := range pListDirectAck.Dirents {
/*
if v.InodeType {
cfs.DeleteFileDirect(inode, v.Name)
} else {
cfs.DeleteDirDirect(inode, v.Name)
}
*/
if v.InodeType {
ret := cfs.DeleteFileDirect(inode, v.Name)
if ret != 0 {
return ret
}
} else {
ret := cfs.DeleteDirDirect(inode, v.Name)
if ret != 0 {
return ret
}
}
}
pDeleteDirDirectReq := &mp.DeleteDirDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ = context.WithTimeout(context.Background(), 60*time.Second)
pDeleteDirDirectAck, err := mc.DeleteDirDirect(ctx, pDeleteDirDirectReq)
if err != nil {
return -1
}
return pDeleteDirDirectAck.Ret
}
// RenameDirect ...
func (cfs *CFS) RenameDirect(oldpinode uint64, oldname string, newpinode uint64, newname string) int32 {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pRenameDirectReq := &mp.RenameDirectReq{
OldPInode: oldpinode,
OldName: oldname,
NewPInode: newpinode,
NewName: newname,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pRenameDirectAck, err := mc.RenameDirect(ctx, pRenameDirectReq)
if err != nil {
return -1
}
return pRenameDirectAck.Ret
}
// CreateFileDirect ...
func (cfs *CFS) CreateFileDirect(pinode uint64, name string, flags int) (int32, *CFile) {
var writer int32
if flags&os.O_EXCL != 0 {
if ret, _, _ := cfs.StatDirect(pinode, name); ret == 0 {
return 17, nil
}
}
ret, inode := cfs.createFileDirect(pinode, name)
if ret != 0 {
return ret, nil
}
cfile := CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: 0,
FileSizeInCache: 0,
ParentInodeID: pinode,
Inode: inode,
Name: name,
wBuffer: wBuffer{buffer: new(bytes.Buffer), freeSize: BufferSize},
DataCache: make(map[uint64]*Data),
DataQueue: make(chan *chanData, 1),
CloseSignal: make(chan struct{}, 10),
WriteErrSignal: make(chan bool, 2),
DataConn: make(map[string]*grpc.ClientConn),
errDataNodeCache: make(map[string]bool),
}
go cfile.WriteThread()
return 0, &cfile
}
// OpenFileDirect ...
func (cfs *CFS) OpenFileDirect(pinode uint64, name string, flags int) (int32, *CFile) {
logger.Debug("OpenFileDirect: name: %v, flags: %v\n", name, flags)
ret, chunkInfos, inode := cfs.GetFileChunksDirect(pinode, name)
if ret != 0 {
return ret, nil
}
var tmpFileSize int64
if len(chunkInfos) > 0 {
for i := range chunkInfos {
tmpFileSize += int64(chunkInfos[i].ChunkSize)
}
}
cfile := CFile{
OpenFlag: flags,
cfs: cfs,
FileSize: tmpFileSize,
FileSizeInCache: tmpFileSize,
ParentInodeID: pinode,
Inode: inode,
wBuffer: wBuffer{buffer: new(bytes.Buffer), freeSize: BufferSize},
Name: name,
chunks: chunkInfos,
DataCache: make(map[uint64]*Data),
DataQueue: make(chan *chanData, 1),
CloseSignal: make(chan struct{}, 10),
WriteErrSignal: make(chan bool, 2),
DataConn: make(map[string]*grpc.ClientConn),
errDataNodeCache: make(map[string]bool),
}
go cfile.WriteThread()
return 0, &cfile
}
// UpdateOpenFileDirect ...
func (cfs *CFS) UpdateOpenFileDirect(pinode uint64, name string, cfile *CFile, flags int) int32 {
return 0
}
// createFileDirect ...
func (cfs *CFS) createFileDirect(pinode uint64, name string) (int32, uint64) {
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pCreateFileDirectReq := &mp.CreateFileDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateFileDirectAck, err := mc.CreateFileDirect(ctx, pCreateFileDirectReq)
if err != nil || pCreateFileDirectAck.Ret != 0 {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
return -1, 0
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateFileDirectAck, err = mc.CreateFileDirect(ctx, pCreateFileDirectReq)
if err != nil {
logger.Error("CreateFileDirect failed,grpc func failed :%v\n", err)
return -1, 0
}
}
if pCreateFileDirectAck.Ret == 1 {
return 1, 0
}
if pCreateFileDirectAck.Ret == 2 {
return 2, 0
}
if pCreateFileDirectAck.Ret == 17 {
return 17, 0
}
return 0, pCreateFileDirectAck.Inode
}
// DeleteFileDirect ...
func (cfs *CFS) DeleteFileDirect(pinode uint64, name string) int32 {
ret, chunkInfos, _ := cfs.GetFileChunksDirect(pinode, name)
if ret == 0 && chunkInfos != nil {
for _, v1 := range chunkInfos {
for _, v2 := range v1.BlockGroupWithHost.Hosts {
conn, err := utils.Dial(v2)
if err != nil || conn == nil {
time.Sleep(time.Second)
conn, err = utils.Dial(v2)
if err != nil || conn == nil {
logger.Error("DeleteFile failed,Dial to datanode fail :%v\n", err)
continue
}
}
dc := dp.NewDataNodeClient(conn)
dpDeleteChunkReq := &dp.DeleteChunkReq{
ChunkID: v1.ChunkID,
BlockGroupID: v1.BlockGroupWithHost.BlockGroupID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err = dc.DeleteChunk(ctx, dpDeleteChunkReq)
if err != nil {
logger.Error("DeleteFile failed,rpc to datanode fail :%v\n", err)
}
conn.Close()
}
}
}
ret = cfs.checkMetaConn()
if ret != 0 {
return -1
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
mpDeleteFileDirectReq := &mp.DeleteFileDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
mpDeleteFileDirectAck, err := mc.DeleteFileDirect(ctx, mpDeleteFileDirectReq)
if err != nil || mpDeleteFileDirectAck.Ret != 0 {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
return -1
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
mpDeleteFileDirectAck, err = mc.DeleteFileDirect(ctx, mpDeleteFileDirectReq)
if err != nil {
logger.Error("DeleteFile failed,grpc func err :%v\n", err)
return -1
}
}
return mpDeleteFileDirectAck.Ret
}
// GetFileChunksDirect ...
func (cfs *CFS) GetFileChunksDirect(pinode uint64, name string) (int32, []*mp.ChunkInfoWithBG, uint64) {
ret := cfs.checkMetaConn()
if ret != 0 {
logger.Error("GetFileChunksDirect cfs.Conn nil ...")
return -1, nil, 0
}
mc := mp.NewMetaNodeClient(cfs.MetaNodeConn)
pGetFileChunksDirectReq := &mp.GetFileChunksDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFileChunksDirectAck, err := mc.GetFileChunksDirect(ctx, pGetFileChunksDirectReq)
if err != nil || pGetFileChunksDirectAck.Ret != 0 {
time.Sleep(time.Second)
ret := cfs.checkMetaConn()
if ret != 0 {
logger.Error("GetFileChunksDirect cfs.Conn nil ...")
return -1, nil, 0
}
mc = mp.NewMetaNodeClient(cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFileChunksDirectAck, err = mc.GetFileChunksDirect(ctx, pGetFileChunksDirectReq)
if err != nil {
logger.Error("GetFileChunks failed,grpc func failed :%v\n", err)
return -1, nil, 0
}
}
return pGetFileChunksDirectAck.Ret, pGetFileChunksDirectAck.ChunkInfos, pGetFileChunksDirectAck.Inode
}
type Data struct {
DataBuf *bytes.Buffer
Status int32
timer *time.Timer
ID uint64
}
// ReadCacheT ...
type ReadCache struct {
LastOffset int64
readBuf []byte
Ch chan *bytes.Buffer
}
type wBuffer struct {
freeSize int32 // chunk size
chunkInfo *mp.ChunkInfoWithBG // chunk info
buffer *bytes.Buffer // chunk data
startOffset int64
endOffset int64
}
type chanData struct {
data []byte
}
type Chunk struct {
CFile *CFile
ChunkFreeSize int
ChunkInfo *mp.ChunkInfoWithBG
ChunkWriteSteam dp.DataNode_C2MReplClient
ChunkWriteRecvExitSignal chan struct{}
}
// CFile ...
type CFile struct {
cfs *CFS
ParentInodeID uint64
Name string
Inode uint64
OpenFlag int
FileSize int64
FileSizeInCache int64
Status int32 // 0 ok
DataConnLocker sync.RWMutex
DataConn map[string]*grpc.ClientConn
// for write
wBuffer wBuffer
wgWriteReps sync.WaitGroup
atomicNum uint64
curNum uint64
Writer int32
DataCacheLocker sync.RWMutex
DataCache map[uint64]*Data
DataQueue chan *chanData
WriteErrSignal chan bool
WriteRetrySignal chan bool
Closing bool
CloseSignal chan struct{}
CurChunk *Chunk
WriteLocker sync.Mutex
// for read
//lastoffset int64
RMutex sync.Mutex
chunks []*mp.ChunkInfoWithBG // chunkinfo
//readBuf []byte
readCache ReadCache
errDataNodeCache map[string]bool
}
type extentInfo struct {
pos int32 //pos in chunks of cfile
offset int32 //offset in chunk
length int32 //length in chunk
}
func (cfile *CFile) newDataConn(addr string) *grpc.ClientConn {
cfile.DataConnLocker.RLock()
if v, ok := cfile.DataConn[addr]; ok {
cfile.DataConnLocker.RUnlock()
return v
}
cfile.DataConnLocker.RUnlock()
conn, err := utils.Dial(addr)
if err != nil || conn == nil {
logger.Error("Dial to %v failed! err: %v", addr, err)
return nil
}
cfile.DataConnLocker.Lock()
if v, ok := cfile.DataConn[addr]; ok {
cfile.DataConnLocker.RUnlock()
conn.Close()
return v
}
cfile.DataConn[addr] = conn
cfile.DataConnLocker.Unlock()
return conn
}
//close and delete conn when err
func (cfile *CFile) delErrDataConn(addr string) {
cfile.DataConnLocker.Lock()
if v, ok := cfile.DataConn[addr]; ok {
v.Close()
delete(cfile.DataConn, addr)
}
cfile.DataConnLocker.Unlock()
}
//only delele all conn when closing file
func (cfile *CFile) delAllDataConn() {
cfile.DataConnLocker.Lock()
for k, v := range cfile.DataConn {
v.Close()
delete(cfile.DataConn, k)
}
cfile.DataConnLocker.Unlock()
}
func (cfile *CFile) streamRead(chunkidx int, ch chan *bytes.Buffer, offset int64, size int64) {
var conn *grpc.ClientConn
var buffer *bytes.Buffer
outflag := 0
inflag := 0
idxs := utils.GenerateRandomNumber(0, 3, 3)
for n := 0; n < 3; n++ {
i := idxs[n]
addr := cfile.chunks[chunkidx].BlockGroupWithHost.Hosts[i]
_, ok := cfile.errDataNodeCache[addr]
if !ok {
if n != 0 {
tmp := idxs[0]
idxs[0] = i
idxs[n] = tmp
}
break
}
}
for n := 0; n < len(cfile.chunks[chunkidx].BlockGroupWithHost.Hosts); n++ {
i := idxs[n]
buffer = new(bytes.Buffer)
addr := cfile.chunks[chunkidx].BlockGroupWithHost.Hosts[i]
conn = cfile.newDataConn(addr)
if conn == nil {
cfile.errDataNodeCache[addr] = true
outflag++
continue
}
dc := dp.NewDataNodeClient(conn)
streamreadChunkReq := &dp.StreamReadChunkReq{
ChunkID: cfile.chunks[chunkidx].ChunkID,
BlockGroupID: cfile.chunks[chunkidx].BlockGroupWithHost.BlockGroupID,
Offset: offset,
Readsize: size,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
stream, err := dc.StreamReadChunk(ctx, streamreadChunkReq)
if err != nil {
cfile.delErrDataConn(addr)
conn = cfile.newDataConn(addr)
if conn == nil {
logger.Error("StreamReadChunk return error:%v and re-dial failed, so retry other datanode!", err)
cfile.errDataNodeCache[addr] = true
outflag++
continue
} else {
dc = dp.NewDataNodeClient(conn)
streamreadChunkReq := &dp.StreamReadChunkReq{
ChunkID: cfile.chunks[chunkidx].ChunkID,
BlockGroupID: cfile.chunks[chunkidx].BlockGroupWithHost.BlockGroupID,
Offset: offset,
Readsize: size,
}
ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
stream, err = dc.StreamReadChunk(ctx, streamreadChunkReq)
if err != nil {
cfile.delErrDataConn(addr)
logger.Error("StreamReadChunk StreamReadChunk error:%v, so retry other datanode!", err)
cfile.errDataNodeCache[addr] = true
outflag++
continue
}
}
}
delete(cfile.errDataNodeCache, addr)
for {
ack, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
logger.Error("=== streamreadChunkReq Recv err:%v ===", err)
inflag++
outflag++
break
}
if ack != nil {
if len(ack.Databuf) == 0 {
continue
} else {
buffer.Write(ack.Databuf)
inflag = 0
}
} else {
continue
}
}
if inflag == 0 {
ch <- buffer
break
} else if inflag == 3 {
buffer = new(bytes.Buffer)
buffer.Write([]byte{})
logger.Error("Stream Read the chunk three copy Recv error")
ch <- buffer
break
} else if inflag < 3 {
logger.Error("Stream Read the chunk %v copy Recv error, so need retry other datanode!!!", inflag)
continue
}
}
if outflag >= 3 {
buffer = new(bytes.Buffer)
buffer.Write([]byte{})
logger.Error("Stream Read the chunk three copy Datanode error")
ch <- buffer
}
}
func (cfile *CFile) readChunk(eInfo extentInfo, data *[]byte, offset int64) int32 {
//check if hit readBuf
readBufOffset := cfile.readCache.LastOffset
readBufLen := len(cfile.readCache.readBuf)
if offset >= readBufOffset && offset+int64(eInfo.length) <= readBufOffset+int64(readBufLen) {
pos := int32(offset - readBufOffset)
*data = append(*data, cfile.readCache.readBuf[pos:pos+eInfo.length]...)
logger.Debug("cfile %v hit read buffer, offset:%v len:%v, readBuf offset:%v, len:%v", cfile.Name, offset, eInfo.length, readBufOffset, readBufLen)
return eInfo.length
}
//prepare to read from datanode
cfile.readCache.readBuf = []byte{}
buffer := new(bytes.Buffer)
cfile.readCache.Ch = make(chan *bytes.Buffer)
readSize := eInfo.length
if readSize < int32(BufferSize) {
readSize = int32(BufferSize)
}
//go streamRead
go cfile.streamRead(int(eInfo.pos), cfile.readCache.Ch, int64(eInfo.offset), int64(readSize))
buffer = <-cfile.readCache.Ch
bLen := buffer.Len()
if bLen == 0 {
logger.Error("try to read %v chunk:%v from datanode size:%v, but return:%v", cfile.Name, eInfo.pos, readSize, bLen)
return -1
}
cfile.readCache.readBuf = buffer.Next(bLen)
cfile.readCache.LastOffset = offset
appendLen := eInfo.length
if appendLen > int32(bLen) {
appendLen = int32(bLen)
}
*data = append(*data, cfile.readCache.readBuf[0:appendLen]...)
buffer.Reset()
buffer = nil
return appendLen
}
func (cfile *CFile) disableReadCache(wOffset int64, wLen int32) {
readBufOffset := cfile.readCache.LastOffset
readBufLen := len(cfile.readCache.readBuf)
if readBufLen == 0 {
return
}
if wOffset >= readBufOffset+int64(readBufLen) || wOffset+int64(wLen) <= readBufOffset {
return
}
//we need disable read buffer here
cfile.readCache.readBuf = []byte{}
logger.Debug("cfile %v disableReadCache: offset: %v len %v --> %v", cfile.Name, readBufOffset, readBufLen, len(cfile.readCache.readBuf))
}
//get extent info by [start, end)
func (cfile *CFile) getExtentInfo(start int64, end int64, eInfo *[]extentInfo) {
var i int32
var chunkStart, chunkEnd int64
var tmpInfo extentInfo
for i = 0; i < int32(len(cfile.chunks)) && start < end; i++ {
chunkEnd += int64(cfile.chunks[i].ChunkSize) //@chunkEnd is next chunk's @chunkStart
if start < chunkEnd {
tmpInfo.pos = i
tmpInfo.offset = int32(start - chunkStart)
if chunkEnd < end {
tmpInfo.length = int32(chunkEnd - start)
start = chunkEnd //update @start to next chunk
} else {
tmpInfo.length = int32(end - start)
start = end
}
*eInfo = append(*eInfo, tmpInfo)
}
chunkStart = chunkEnd
}
}
// Read ...
func (cfile *CFile) Read(data *[]byte, offset int64, readsize int64) int64 {
if cfile.Status != FileNormal {
logger.Error("cfile %v status error , read func return -2 ", cfile.Name)
return -2
}
if offset == cfile.FileSizeInCache {
logger.Debug("cfile:%v read offset:%v equals file size in cache ", cfile.Name, offset)
return 0
} else if offset > cfile.FileSizeInCache {
logger.Debug("cfile %v unsupport read beyond file size, offset:%v, filesize in cache:%v ", cfile.Name, offset, cfile.FileSizeInCache)
return 0
}
var i int
var ret int32
var doneFlag bool
start := offset
end := offset + readsize
logger.Debug("cfile %v Read start: offset: %v, len: %v", cfile.Name, offset, readsize)
for start < end && cfile.Status == FileNormal {
eInfo := make([]extentInfo, 0, 4)
cfile.getExtentInfo(start, end, &eInfo)
logger.Debug("cfile %v getExtentInfo: offset: %v, len: %v, eInfo: %v", cfile.Name, start, end, eInfo)
for _, ei := range eInfo {
ret = cfile.readChunk(ei, data, start)
if ret != ei.length {
logger.Error("cfile %v eInfo:%v, readChunk ret %v", cfile.Name, ei, ret)
doneFlag = true
break
}
start += int64(ret)
}
if doneFlag || start == end || start >= cfile.FileSizeInCache {
break
}
//wait append write request in caches
logger.Debug("cfile %v, start to wait append write..FileSize %v, FileSizeInCache %v", cfile.Name, cfile.FileSize, cfile.FileSizeInCache)
for i = 0; i < 10; i++ {
if cfile.FileSize >= end || cfile.FileSize == cfile.FileSizeInCache {
break
}
if len(cfile.DataCache) == 0 {
logger.Debug("cfile %v, FileSize %v, FileSizeInCache %v, but no DataCache", cfile.Name, cfile.FileSize, cfile.FileSizeInCache)
}
time.Sleep(100 * time.Millisecond)
}
logger.Debug("cfile %v, end waiting with FileSize %v, FileSizeInCache %v, time %v ms", cfile.Name, cfile.FileSize, cfile.FileSizeInCache, i*100)
}
if cfile.Status != FileNormal {
logger.Error("cfile %v status error , read func return -2 ", cfile.Name)
return -2
}
logger.Debug("cfile %v Read end: return %v", cfile.Name, start-offset)
return start - offset
}
// Write ...
func (cfile *CFile) Write(buf []byte, offset int64, length int32) int32 {
if cfile.Status != 0 {
logger.Error("cfile %v status error , Write func return -2 ", cfile.Name)
return -2
}
if offset > cfile.FileSizeInCache {
logger.Error("cfile %v unsupport write %v beyond file size %v return -3 ", cfile.Name, offset, cfile.FileSizeInCache)
return -3
}
if offset == cfile.FileSizeInCache {
logger.Debug("cfile %v write append only: offset %v, length %v", cfile.Name, offset, length)
return cfile.appendWrite(buf, length)
}
cfile.disableReadCache(offset, length)
var i int
var ret, pos int32
start := offset
end := offset + int64(length)
logger.Debug("cfile %v write start: offset: %v, len: %v", cfile.Name, offset, length)
for start < end && cfile.Status == FileNormal {
eInfo := make([]extentInfo, 0, 4)
cfile.getExtentInfo(start, end, &eInfo)
logger.Debug("cfile %v getExtentInfo: offset: %v, len: %v, eInfo: %v", cfile.Name, start, end, eInfo)
for _, ei := range eInfo {
ret = cfile.seekWrite(ei, buf[pos:(pos+ei.length)])
if ret < 0 {
logger.Error("cfile %v seekWrite failed %v", cfile.Name, ei)
return int32(start - offset)
}
start += int64(ei.length)
pos += ei.length
}
if start == end {
break
}
if start == cfile.FileSizeInCache {
logger.Debug("cfile %v write append only: offset %v, length %v", cfile.Name, start, length-pos)
ret = cfile.appendWrite(buf[pos:length], length-pos)
if ret < 0 {
logger.Error("cfile %v appendWrite failed %v", cfile.Name, ret)
return int32(start - offset)
}
start = end
break
}
//wait append write request in caches
logger.Debug("cfile %v, start to wait append write..FileSize %v, FileSizeInCache %v", cfile.Name, cfile.FileSize, cfile.FileSizeInCache)
for i = 0; i < 10; i++ {
if cfile.FileSize >= end || cfile.FileSize == cfile.FileSizeInCache {
break
}
if len(cfile.DataCache) == 0 {
logger.Debug("cfile %v, FileSize %v, FileSizeInCache %v, but no DataCache", cfile.Name, cfile.FileSize, cfile.FileSizeInCache)
}
time.Sleep(100 * time.Millisecond)
}
logger.Debug("cfile %v, end waiting with FileSize %v, FileSizeInCache %v, time %v ms", cfile.Name, cfile.FileSize, cfile.FileSizeInCache, i*100)
}
logger.Debug("cfile %v Write end: return %v", cfile.Name, start-offset)
return int32(start - offset)
}
func (cfile *CFile) overwriteBuffer(eInfo extentInfo, buf []byte) int32 {
//read wBuffer all bytes to tmpBuf
bufLen := cfile.wBuffer.buffer.Len()
tmpBuf := cfile.wBuffer.buffer.Next(bufLen)
if len(tmpBuf) != bufLen {
logger.Error("cfile %v read wBuffer len: %v return: %v ", cfile.Name, bufLen, len(tmpBuf))
return -1
}
//copy buf to tmpBuf
n := copy(tmpBuf[eInfo.offset:], buf)
if n != int(eInfo.length) {
logger.Error("cfile %v copy to wBuffer len: %v return n: %v", cfile.Name, eInfo.length, n)
return -1
}
//write to wBuffer
cfile.wBuffer.buffer.Reset()
n, err := cfile.wBuffer.buffer.Write(tmpBuf)
if n != int(bufLen) || err != nil {
logger.Error("cfile %v write wBuffer len: %v return n: %v err %v", cfile.Name, bufLen, n, err)
return -1
}
return 0
}
func (cfile *CFile) seekWriteChunk(addr string, conn *grpc.ClientConn, req *dp.SeekWriteChunkReq, copies *uint64) {
if conn == nil {
} else {
dc := dp.NewDataNodeClient(conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
ret, err := dc.SeekWriteChunk(ctx, req)
if err != nil {
cfile.delErrDataConn(addr)
logger.Error("SeekWriteChunk err %v", err)
} else {
if ret.Ret != 0 {
} else {
atomic.AddUint64(copies, 1)
}
}
}
cfile.wgWriteReps.Add(-1)
}
func (cfile *CFile) seekWrite(eInfo extentInfo, buf []byte) int32 {
chunkInfo := cfile.chunks[eInfo.pos]
var copies uint64
conn := make([]*grpc.ClientConn, 3)
for i, v := range chunkInfo.BlockGroupWithHost.Hosts {
conn[i] = cfile.newDataConn(v)
if conn[i] == nil {
logger.Error("cfile %v dial %v failed!", cfile.Name, v)
return -1
}
}
for i, v := range chunkInfo.BlockGroupWithHost.Hosts {
pSeekWriteChunkReq := &dp.SeekWriteChunkReq{
ChunkID: chunkInfo.ChunkID,
BlockGroupID: chunkInfo.BlockGroupWithHost.BlockGroupID,
Databuf: buf,
ChunkOffset: int64(eInfo.offset),
}
cfile.wgWriteReps.Add(1)
go cfile.seekWriteChunk(v, conn[i], pSeekWriteChunkReq, &copies)
}
cfile.wgWriteReps.Wait()
if copies < 3 {
cfile.Status = FileError
logger.Error("cfile %v seekWriteChunk copies: %v, set error!", cfile.Name, copies)
return -1
}
return 0
}
// Write ...
func (cfile *CFile) appendWrite(buf []byte, length int32) int32 {
if cfile.Status == FileError {
return -2
}
data := &chanData{}
data.data = append(data.data, buf...)
select {
case <-cfile.WriteErrSignal:
logger.Error("Write recv WriteErrSignal ,volumeid %v , pid %v ,fname %v!", cfile.cfs.VolID, cfile.ParentInodeID, cfile.Name)
return -2
case cfile.DataQueue <- data:
}
cfile.FileSizeInCache += int64(length)
return length
}
func (cfile *CFile) WriteThread() {
logger.Debug("Write Thread: file %v start writethread!\n", cfile.Name)
for true {
select {
case chanData := <-cfile.DataQueue:
if chanData == nil {
logger.Debug("WriteThread file %v recv channel close, wait DataCache...", cfile.Name)
var ti uint32
for cfile.Status == FileNormal {
if len(cfile.DataCache) == 0 {
break
}
ti++
time.Sleep(time.Millisecond * 5)
}
logger.Debug("WriteThread file %v wait DataCache == 0 done. loop times: %v", cfile.Name, ti)
if cfile.CurChunk != nil {
if cfile.CurChunk.ChunkWriteSteam != nil {
cfile.CurChunk.ChunkWriteSteam.CloseSend()
}
}
cfile.CloseSignal <- struct{}{}
return
} else {
if cfile.Status == FileError {
continue
}
newData := &Data{}
newData.ID = atomic.AddUint64(&cfile.atomicNum, 1)
newData.DataBuf = new(bytes.Buffer)
newData.DataBuf.Write(chanData.data)
newData.Status = 1
if err := cfile.WriteHandler(newData); err != nil {
logger.Error("WriteThread file %v WriteHandler err %v !", cfile.Name, err)
cfile.Status = FileError
cfile.WriteErrSignal <- true
}
}
}
}
}
func (cfile *CFile) WriteHandler(newData *Data) error {
length := newData.DataBuf.Len()
logger.Debug("WriteHandler: file %v, num:%v, length: %v, \n", cfile.Name, cfile.atomicNum, length)
ALLOCATECHUNK:
if cfile.CurChunk != nil && cfile.CurChunk.ChunkFreeSize-length < 0 {
if cfile.CurChunk.ChunkWriteSteam != nil {
var ti uint32
needClose := bool(true)
logger.Debug("WriteHandler: file %v, begin waiting last chunk: %v\n", cfile.Name, len(cfile.DataCache))
tmpDataCacheLen := len(cfile.DataCache)
for cfile.Status == FileNormal {
if tmpDataCacheLen == 0 {
break
}
time.Sleep(time.Millisecond * 10)
if tmpDataCacheLen == len(cfile.DataCache) {
ti++
} else {
tmpDataCacheLen = len(cfile.DataCache)
ti = 0
}
if ti == 500 {
if cfile.CurChunk.ChunkWriteSteam != nil {
logger.Error("WriteHandler: file %v, dataCacheLen: %v wait last chunk timeout, CloseSend\n", cfile.Name, len(cfile.DataCache))
cfile.CurChunk.ChunkWriteSteam.CloseSend()
needClose = false
}
}
}
if cfile.Status == FileError {
return errors.New("file status err")
}
logger.Debug("WriteHandler: file %v, end wait after loop times %v\n", cfile.Name, ti)
if needClose && cfile.CurChunk.ChunkWriteSteam != nil {
cfile.CurChunk.ChunkWriteSteam.CloseSend()
}
}
cfile.CurChunk = nil
}
if cfile.CurChunk == nil {
for retryCnt := 0; retryCnt < 5; retryCnt++ {
cfile.CurChunk = cfile.AllocateChunk(true)
if cfile.CurChunk == nil {
logger.Error("WriteHandler: file %v, alloc chunk failed for %v times\n", cfile.Name, retryCnt+1)
time.Sleep(time.Millisecond * 500)
continue
}
break
}
if cfile.CurChunk == nil {
return errors.New("AllocateChunk failed for 5 times")
}
}
cfile.DataCacheLocker.Lock()
cfile.DataCache[cfile.atomicNum] = newData
cfile.DataCacheLocker.Unlock()
req := &dp.StreamWriteReq{
ChunkID: cfile.CurChunk.ChunkInfo.ChunkID,
Master: cfile.CurChunk.ChunkInfo.BlockGroupWithHost.Hosts[0],
Slave: cfile.CurChunk.ChunkInfo.BlockGroupWithHost.Hosts[1],
Backup: cfile.CurChunk.ChunkInfo.BlockGroupWithHost.Hosts[2],
Databuf: newData.DataBuf.Bytes(),
DataLen: uint32(length),
CommitID: cfile.atomicNum,
BlockGroupID: cfile.CurChunk.ChunkInfo.BlockGroupWithHost.BlockGroupID,
}
if cfile.CurChunk != nil {
if cfile.CurChunk.ChunkWriteSteam != nil {
if err := cfile.CurChunk.ChunkWriteSteam.Send(req); err != nil {
logger.Debug("WriteHandler: send file %v, chunk %v len: %v failed\n", cfile.Name, cfile.CurChunk, length)
cfile.CurChunk.ChunkFreeSize = 0
} else {
logger.Debug("WriteHandler: send file %v, chunk %v len: %v success\n", cfile.Name, cfile.CurChunk, length)
cfile.CurChunk.ChunkFreeSize -= length
}
} else {
logger.Error("WriteHandler: file %v, CurChunk %v has no write stream\n", cfile.Name, cfile.CurChunk.ChunkInfo.ChunkID)
goto ALLOCATECHUNK
}
} else {
logger.Error("WriteHandler: file %v, CurChunk is nil\n", cfile.Name)
goto ALLOCATECHUNK
}
return nil
}
// AllocateChunk ...
func (cfile *CFile) AllocateChunk(IsStream bool) *Chunk {
logger.Debug("AllocateChunk file: %v begin\n", cfile.Name)
ret := cfile.cfs.checkMetaConn()
if ret != 0 {
logger.Error("AllocateChunk file: %v failed\n", cfile.Name)
return nil
}
mc := mp.NewMetaNodeClient(cfile.cfs.MetaNodeConn)
pAllocateChunkReq := &mp.AllocateChunkReq{
VolID: cfile.cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pAllocateChunkAck, err := mc.AllocateChunk(ctx, pAllocateChunkReq)
if err != nil || pAllocateChunkAck.Ret != 0 {
time.Sleep(time.Second * 2)
ret := cfile.cfs.checkMetaConn()
if ret != 0 {
logger.Error("AllocateChunk file: %v failed\n", cfile.Name)
return nil
}
mc = mp.NewMetaNodeClient(cfile.cfs.MetaNodeConn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pAllocateChunkAck, err = mc.AllocateChunk(ctx, pAllocateChunkReq)
if err != nil || pAllocateChunkAck.Ret != 0 {
logger.Error("AllocateChunk file: %v failed, err: %v ret: %v\n", cfile.Name, err, pAllocateChunkAck.Ret != 0)
return nil
}
}
curChunk := &Chunk{}
curChunk.CFile = cfile
curChunk.ChunkInfo = pAllocateChunkAck.ChunkInfo
logger.Debug("AllocateChunk file: %v from metanode chunk info:%v\n", cfile.Name, curChunk.ChunkInfo)
if IsStream {
err := utils.TryDial(curChunk.ChunkInfo.BlockGroupWithHost.Hosts[1])
if err != nil {
logger.Error("AllocateChunk file: %v new conn to %v failed, err: %v\n", cfile.Name, curChunk.ChunkInfo.BlockGroupWithHost.Hosts[1], err)
return nil
}
err = utils.TryDial(curChunk.ChunkInfo.BlockGroupWithHost.Hosts[2])
if err != nil {
logger.Error("AllocateChunk file: %v new conn to %v failed, err: %v\n", cfile.Name, curChunk.ChunkInfo.BlockGroupWithHost.Hosts[2], err)
return nil
}
C2Mconn := cfile.newDataConn(curChunk.ChunkInfo.BlockGroupWithHost.Hosts[0])
if C2Mconn == nil {
logger.Error("AllocateChunk file: %v new conn to %v failed\n", cfile.Name, curChunk.ChunkInfo.BlockGroupWithHost.Hosts[0])
return nil
}
C2Mclient := dp.NewDataNodeClient(C2Mconn)
curChunk.ChunkWriteSteam, err = C2Mclient.C2MRepl(context.Background())
if err != nil {
cfile.delErrDataConn(curChunk.ChunkInfo.BlockGroupWithHost.Hosts[0])
logger.Error("AllocateChunk file: %v create stream to %v failed, err: %v\n", cfile.Name, curChunk.ChunkInfo.BlockGroupWithHost.Hosts[0], err)
return nil
}
curChunk.ChunkFreeSize = chunkSize
curChunk.ChunkWriteRecvExitSignal = make(chan struct{})
go curChunk.C2MRecv()
}
logger.Debug("AllocateChunk file: %v success\n", cfile.Name)
return curChunk
}
func (chunk *Chunk) Retry() {
chunk.CFile.DataCacheLocker.Lock()
defer chunk.CFile.DataCacheLocker.Unlock()
if len(chunk.CFile.DataCache) == 0 {
logger.Debug("C2MRecv thread end success for file %v chunk %v", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
return
}
logger.Debug("C2MRecv thread Retry write file %v chunk %v start", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
retrySuccess := false
var err error
for retryCnt := 0; retryCnt < 5; retryCnt++ {
err = chunk.WriteRetryHandle()
if err != nil {
logger.Error("WriteRetryHandle file %v chunk %v err: %v, try again for %v times!", chunk.CFile.Name, chunk.ChunkInfo.ChunkID, err, retryCnt+1)
time.Sleep(time.Millisecond * 500)
continue
} else {
retrySuccess = true
break
}
}
if !retrySuccess {
chunk.CFile.Status = FileError
chunk.CFile.WriteErrSignal <- true
logger.Error("C2MRecv thread Retry write file %v chunk %v failed, set FileError!", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
} else {
chunk.CFile.DataCache = make(map[uint64]*Data)
chunk.ChunkFreeSize = 0
chunk.ChunkWriteSteam = nil
logger.Debug("C2MRecv thread Retry write file %v chunk %v success", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
}
}
func (chunk *Chunk) C2MRecv() {
logger.Debug("C2MRecv thread started success for file %v chunk %v", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
defer chunk.Retry()
for {
in, err := chunk.ChunkWriteSteam.Recv()
if err == io.EOF {
logger.Debug("C2MRecv: file %v chunk %v stream %v EOF\n", chunk.CFile.Name, chunk.ChunkInfo.ChunkID, chunk.ChunkWriteSteam)
break
}
if err != nil {
logger.Debug("C2MRecv: file %v chunk %v stream %v error return : %v\n", chunk.CFile.Name, chunk.ChunkInfo.ChunkID, chunk.ChunkWriteSteam, err)
break
}
if in.Ret == -1 {
logger.Error("C2MRecv: file %v chunk %v ack.Ret -1 , means M2S2B stream err", chunk.CFile.Name, chunk.ChunkInfo.ChunkID)
break
}
chunk.CFile.curNum = atomic.AddUint64(&chunk.CFile.curNum, 1)
if in.CommitID != chunk.CFile.curNum {
logger.Error("C2MRecv: write failed! file: %v, ID;%v != curNum: %v, chunk: %v, len: %v\n", chunk.CFile.Name, in.CommitID, chunk.CFile.curNum, in.ChunkID, in.DataLen)
break
}
// update to metanode
logger.Debug("C2MRecv: Write success! try to update metadata file: %v, ID;%v, chunk: %v, len: %v\n",
chunk.CFile.Name, in.CommitID, in.ChunkID, in.DataLen)
mc := mp.NewMetaNodeClient(chunk.CFile.cfs.MetaNodeConn)
pAsyncChunkReq := &mp.AsyncChunkReq{
VolID: chunk.CFile.cfs.VolID,
ParentInodeID: chunk.CFile.ParentInodeID,
Name: chunk.CFile.Name,
ChunkID: in.ChunkID,
CommitSize: in.DataLen,
BlockGroupID: in.BlockGroupID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err2 := mc.AsyncChunk(ctx, pAsyncChunkReq)
if err2 != nil {
break
}
// comfirm data
chunk.CFile.DataCacheLocker.Lock()
//cfile.DataCache[in.CommitID].timer.Stop()
delete(chunk.CFile.DataCache, in.CommitID)
chunk.CFile.DataCacheLocker.Unlock()
chunk.CFile.updateChunkSize(chunk.ChunkInfo, int32(in.DataLen))
}
}
func (chunk *Chunk) WriteRetryHandle() error {
length := len(chunk.CFile.DataCache)
if length == 0 {
return nil
}
tmpchunk := chunk.CFile.AllocateChunk(false)
if tmpchunk == nil {
return errors.New("AllocateChunk error")
}
sortedKeys := make([]int, 0)
for k := range chunk.CFile.DataCache {
sortedKeys = append(sortedKeys, int(k))
}
sort.Ints(sortedKeys)
logger.Debug("WriteRetryHandle AllocateChunk success, begin to retry item num:%v, commitIDs: %v", length, sortedKeys)
var chunkSize int
for _, vv := range sortedKeys {
bufLen := chunk.CFile.DataCache[uint64(vv)].DataBuf.Len()
req := dp.WriteChunkReq{ChunkID: tmpchunk.ChunkInfo.ChunkID,
BlockGroupID: tmpchunk.ChunkInfo.BlockGroupWithHost.BlockGroupID,
Databuf: chunk.CFile.DataCache[uint64(vv)].DataBuf.Bytes(),
CommitID: uint64(vv),
}
for _, v := range tmpchunk.ChunkInfo.BlockGroupWithHost.Hosts {
conn := chunk.CFile.newDataConn(v)
if conn == nil {
logger.Error("WriteRetryHandle newDataConn Failed err")
return fmt.Errorf("WriteRetryHandle newDataConn Failed")
}
dc := dp.NewDataNodeClient(conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err := dc.WriteChunk(ctx, &req)
if err != nil {
logger.Error("WriteRetryHandle WriteChunk to DataNode Host Failed err %v", err)
chunk.CFile.delErrDataConn(v)
return err
}
}
logger.Debug("WriteRetryHandle write CommitID %v bufLen %v success", vv, bufLen)
chunkSize += bufLen
chunk.CFile.curNum = uint64(vv)
}
mc := mp.NewMetaNodeClient(chunk.CFile.cfs.MetaNodeConn)
pAsyncChunkReq := &mp.AsyncChunkReq{
VolID: chunk.CFile.cfs.VolID,
ParentInodeID: chunk.CFile.ParentInodeID,
Name: chunk.CFile.Name,
ChunkID: tmpchunk.ChunkInfo.ChunkID,
CommitSize: uint32(chunkSize),
BlockGroupID: tmpchunk.ChunkInfo.BlockGroupWithHost.BlockGroupID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err2 := mc.AsyncChunk(ctx, pAsyncChunkReq)
if err2 != nil {
logger.Error("WriteRetryHandle AsyncChunk to MetaNode Failed err %v", err2)
return err2
}
logger.Debug("WriteRetryHandle success with ChunkID %v ChunkSize %v", tmpchunk.ChunkInfo.ChunkID, chunkSize)
chunk.CFile.updateChunkSize(tmpchunk.ChunkInfo, int32(chunkSize))
return nil
}
// update ChunkSize and FileSize only if chunk's data has be writted to datanode and syn to metanode
func (cfile *CFile) updateChunkSize(chunkinfo *mp.ChunkInfoWithBG, length int32) {
chunkNum := len(cfile.chunks)
if chunkNum != 0 && cfile.chunks[chunkNum-1].ChunkID == chunkinfo.ChunkID {
cfile.chunks[chunkNum-1].ChunkSize += length
} else {
newchunkinfo := &mp.ChunkInfoWithBG{ChunkID: chunkinfo.ChunkID, ChunkSize: length, BlockGroupWithHost: chunkinfo.BlockGroupWithHost}
cfile.chunks = append(cfile.chunks, newchunkinfo)
}
cfile.FileSize += int64(length)
}
// Sync ...
func (cfile *CFile) Sync() int32 {
if cfile.Status == FileError {
return -1
}
return 0
}
// Sync ...
func (cfile *CFile) Flush() int32 {
if cfile.Status == FileError {
return -1
}
return 0
}
// Close ...
func (cfile *CFile) CloseWrite() int32 {
/*if cfile.Status == FileError {
return -1
} */
cfile.Closing = true
logger.Debug("CloseWrite close cfile.DataQueue")
close(cfile.DataQueue)
<-cfile.CloseSignal
logger.Debug("CloseWrite recv CloseSignal!")
return 0
}
// Close ...
func (cfile *CFile) Close() int32 {
cfile.delAllDataConn()
return 0
}
|
package cfs
import (
"bazil.org/fuse"
"bufio"
"bytes"
"errors"
"fmt"
"github.com/ipdcode/containerfs/logger"
dp "github.com/ipdcode/containerfs/proto/dp"
mp "github.com/ipdcode/containerfs/proto/mp"
vp "github.com/ipdcode/containerfs/proto/vp"
"github.com/ipdcode/containerfs/utils"
"golang.org/x/net/context"
"google.golang.org/grpc"
"io"
"math/rand"
"os"
"strconv"
"sync"
"sync/atomic"
"time"
)
// MetaNodePeers ...
var MetaNodePeers []string
// VolMgrAddr ...
var VolMgrAddr string
//MetaNodeAddr ...
var MetaNodeAddr string
// chunksize for write
const (
chunkSize = 64 * 1024 * 1024
oneExpandSize = 30 * 1024 * 1024 * 1024
)
// BufferSize ...
var BufferSize int32
// CFS ...
type CFS struct {
VolID string
Leader string
Conn *grpc.ClientConn
DataConnLocker sync.RWMutex
DataConn map[string]*grpc.ClientConn
//Status int // 0 ok , 1 readonly 2 invaild
}
func (cfs *CFS) GetDataConn(addr string) (*grpc.ClientConn, error) {
cfs.DataConnLocker.RLock()
if v, ok := cfs.DataConn[addr]; ok {
cfs.DataConnLocker.RUnlock()
return v, nil
}
cfs.DataConnLocker.RUnlock()
return nil, errors.New("Key not exists")
}
func (cfs *CFS) SetDataConn(addr string, conn *grpc.ClientConn) {
cfs.DataConnLocker.Lock()
cfs.DataConn[addr] = conn
cfs.DataConnLocker.Unlock()
}
func (cfs *CFS) DelDataConn(addr string) {
cfs.DataConnLocker.Lock()
delete(cfs.DataConn, addr)
cfs.DataConnLocker.Unlock()
}
// CreateVol volume function
func CreateVol(name string, capacity string) int32 {
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("CreateVol failed,Dial to volmgr fail :%v\n", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
spaceQuota, _ := strconv.Atoi(capacity)
pCreateVolReq := &vp.CreateVolReq{
VolName: name,
SpaceQuota: int32(spaceQuota),
MetaDomain: MetaNodeAddr,
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pCreateVolAck, err := vc.CreateVol(ctx, pCreateVolReq)
if err != nil {
return -1
}
if pCreateVolAck.Ret != 0 {
return -1
}
// send to metadata to registry a new map
conn2, err := grpc.Dial(MetaNodeAddr, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(time.Millisecond*300), grpc.FailOnNonTempDialError(true))
if err != nil {
logger.Error("CreateVol failed,Dial to metanode fail :%v\n", err)
return -1
}
defer conn2.Close()
mc := mp.NewMetaNodeClient(conn2)
pmCreateNameSpaceReq := &mp.CreateNameSpaceReq{
VolID: pCreateVolAck.UUID,
RaftGroupID: pCreateVolAck.RaftGroupID,
Type: 0,
}
ctx2, _ := context.WithTimeout(context.Background(), 5*time.Second)
pmCreateNameSpaceAck, err := mc.CreateNameSpace(ctx2, pmCreateNameSpaceReq)
if err != nil {
return -1
}
if pmCreateNameSpaceAck.Ret != 0 {
logger.Error("CreateNameSpace failed :%v\n", pmCreateNameSpaceAck.Ret)
return -1
}
fmt.Println(pCreateVolAck.UUID)
return 0
}
//BlockGroupVp2Mp ...
func BlockGroupVp2Mp(in *vp.BlockGroup) *mp.BlockGroup {
var mpBlockGroup = mp.BlockGroup{}
mpBlockInfos := make([]*mp.BlockInfo, len(in.BlockInfos))
mpBlockGroup.BlockGroupID = in.BlockGroupID
mpBlockGroup.FreeSize = in.FreeSize
mpBlockGroup.Status = in.Status
for i := range in.BlockInfos {
var pVpBlockInfo *vp.BlockInfo
var mpBlockInfo mp.BlockInfo
pVpBlockInfo = in.BlockInfos[i]
mpBlockInfo.BlockID = pVpBlockInfo.BlockID
mpBlockInfo.DataNodeIP = pVpBlockInfo.DataNodeIP
mpBlockInfo.DataNodePort = pVpBlockInfo.DataNodePort
mpBlockInfos[i] = &mpBlockInfo
}
mpBlockGroup.BlockInfos = mpBlockInfos
return &mpBlockGroup
}
// Expand volume once for fuseclient
func ExpandVolRS(UUID string, MtPath string) int32 {
path := MtPath + "/expanding"
fd, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return -2
}
defer fd.Close()
ok, ret := GetFSInfo(UUID)
if ok != 0 {
os.Remove(path)
logger.Error("ExpandVol once volume:%v failed, GetFSInfo error", UUID)
return -1
}
used := ret.TotalSpace - ret.FreeSpace
if float64(ret.FreeSpace)/float64(ret.TotalSpace) > 0.1 {
os.Remove(path)
return 0
}
logger.Debug("Need ExpandVol once volume:%v -- totalsize:%v -- freesize:%v", UUID, ret.TotalSpace, ret.FreeSpace)
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("ExpandVol once volume:%v failed,Dial to volmgr error:%v", UUID, err)
os.Remove(path)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pExpandVolRSReq := &vp.ExpandVolRSReq{
VolID: UUID,
UsedRS: used,
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pExpandVolRSAck, err := vc.ExpandVolRS(ctx, pExpandVolRSReq)
if err != nil {
logger.Error("ExpandVol once volume:%v failed, VolMgr return error:%v", UUID, err)
os.Remove(path)
return -1
}
if pExpandVolRSAck.Ret == -1 {
logger.Error("ExpandVol once volume:%v failed, VolMgr return -1:%v", UUID)
os.Remove(path)
return -1
} else if pExpandVolRSAck.Ret == 0 {
logger.Error("ExpandVol volume:%v once failed, VolMgr return 0 because volume totalsize not enough expand", UUID)
os.Remove(path)
return 0
}
out := UpdateMetaForExpandVol(UUID, pExpandVolRSAck)
if out != 0 {
logger.Error("ExpandVol volume:%v once volmgr success but update metanode fail, so rollback volmgr this expand resource", UUID)
pDelReq := &vp.DelVolRSForExpandReq{
VolID: UUID,
BlockGroups: pExpandVolRSAck.BlockGroups,
}
pDelAck, err := vc.DelVolRSForExpand(ctx, pDelReq)
if err != nil || pDelAck.Ret != 0 {
logger.Error("ExpandVol once volume:%v volmgr success but update meta failed, then rollback volmgr error", UUID)
}
os.Remove(path)
return -1
}
os.Remove(path)
return 1
}
func UpdateMetaForExpandVol(UUID string, ack *vp.ExpandVolRSAck) int {
var mpBlockGroups []*mp.BlockGroup
for _, v := range ack.BlockGroups {
mpBlockGroup := BlockGroupVp2Mp(v)
mpBlockGroups = append(mpBlockGroups, mpBlockGroup)
}
// Meta handle
conn2, err := DialMeta(UUID)
if err != nil {
logger.Error("ExpandVol volume:%v once volmgr success but Dial to metanode fail :%v", UUID, err)
//os.Remove(path)
return -1
}
defer conn2.Close()
mc := mp.NewMetaNodeClient(conn2)
pmExpandNameSpaceReq := &mp.ExpandNameSpaceReq{
VolID: UUID,
BlockGroups: mpBlockGroups,
}
ctx2, _ := context.WithTimeout(context.Background(), 5*time.Second)
pmExpandNameSpaceAck, err := mc.ExpandNameSpace(ctx2, pmExpandNameSpaceReq)
if err != nil {
logger.Error("ExpandVol volume:%v once volmgr success but MetaNode return error:%v", UUID, err)
//os.Remove(path)
return -1
}
if pmExpandNameSpaceAck.Ret != 0 {
logger.Error("ExpandVol volume:%v once volmgr success but MetaNode return not equal 0:%v", UUID)
//os.Remove(path)
return -1
}
return 0
}
// ExpandVol volume totalsize for CLI...
func ExpandVolTS(UUID string, expandQuota string) int32 {
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("ExpandVol failed,Dial to volmgr fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
tmpExpandQuota, _ := strconv.Atoi(expandQuota)
pExpandVolTSReq := &vp.ExpandVolTSReq{
VolID: UUID,
ExpandQuota: int32(tmpExpandQuota),
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pExpandVolTSAck, err := vc.ExpandVolTS(ctx, pExpandVolTSReq)
if err != nil {
logger.Error("Expand Vol:%v TotalSize:%v but VolMgr return error:%v", UUID, expandQuota, err)
return -1
}
if pExpandVolTSAck.Ret != 0 {
logger.Error("Expand Vol:%v TotalSize:%v but VolMgr return -1", UUID, expandQuota)
return -1
}
return 0
}
// Migrate bad DataNode blocks data to some Good DataNodes
func Migrate(ip string, port string) int32 {
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("Migrate DataNode failed,Dial to volmgr fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
dport, _ := strconv.Atoi(port)
pMigrateReq := &vp.MigrateReq{
DataNodeIP: ip,
DataNodePort: int32(dport),
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err = vc.Migrate(ctx, pMigrateReq)
if err != nil {
logger.Error("Migrate bad DataNode(%v:%v) all Blocks not finished err : %v", ip, port, err)
return -1
}
return 0
}
// GetVolInfo volume info
func GetVolInfo(name string) (int32, *vp.GetVolInfoAck) {
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("GetVolInfo failed,Dial to volmgr fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pGetVolInfoReq := &vp.GetVolInfoReq{
UUID: name,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetVolInfoAck, err := vc.GetVolInfo(ctx, pGetVolInfoReq)
if err != nil {
return 1, nil
}
if pGetVolInfoAck.Ret != 0 {
return 1, nil
}
return 0, pGetVolInfoAck
}
// SnapShootVol ...
func SnapShootVol(uuid string) int32 {
// send to metadata to delete a map
conn, err := DialMeta(uuid)
if err != nil {
logger.Error("SnapShootVol failed,Dial to metanode fail :%v", err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pmSnapShootNameSpaceReq := &mp.SnapShootNameSpaceReq{
VolID: uuid,
Type: 0,
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pmSnapShootNameSpaceAck, err := mc.SnapShootNameSpace(ctx, pmSnapShootNameSpaceReq)
if err != nil {
logger.Error("SnapShootVol failed,grpc func err :%v", err)
return -1
}
if pmSnapShootNameSpaceAck.Ret != 0 {
logger.Error("SnapShootVol failed,rpc func ret:%v", pmSnapShootNameSpaceAck.Ret)
return -1
}
return 0
}
func GetVolumeLeader(uuid string) string {
leader, err := GetLeader(uuid)
if err != nil {
return "no leader"
}
return leader
}
// DeleteVol function
func DeleteVol(uuid string) int32 {
// send to metadata to delete a map
conn2, err := DialMeta(uuid)
if err != nil {
logger.Error("DeleteVol failed,Dial to metanode fail :%v\n", err)
return -1
}
defer conn2.Close()
mc := mp.NewMetaNodeClient(conn2)
pmDeleteNameSpaceReq := &mp.DeleteNameSpaceReq{
VolID: uuid,
Type: 0,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pmDeleteNameSpaceAck, err := mc.DeleteNameSpace(ctx, pmDeleteNameSpaceReq)
if err != nil {
return -1
}
if pmDeleteNameSpaceAck.Ret != 0 {
logger.Error("DeleteNameSpace failed :%v", pmDeleteNameSpaceAck.Ret)
return -1
}
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("deleteVol failed,Dial to volmgr fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pDeleteVolReq := &vp.DeleteVolReq{
UUID: uuid,
}
ctx, _ = context.WithTimeout(context.Background(), 5*time.Second)
pDeleteVolAck, err := vc.DeleteVol(ctx, pDeleteVolReq)
if err != nil {
logger.Error("DeleteVol failed,grpc func err :%v", err)
return -1
}
if pDeleteVolAck.Ret != 0 {
logger.Error("DeleteVol failed,grpc func ret :%v", pDeleteVolAck.Ret)
return -1
}
return 0
}
// GetFSInfo ...
func GetFSInfo(name string) (int32, *mp.GetFSInfoAck) {
conn, err := DialMeta(name)
if err != nil {
logger.Error("GetFSInfo failed,Dial to metanode fail :%v\n", err)
return -1, nil
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pGetFSInfoReq := &mp.GetFSInfoReq{
VolID: name,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFSInfoAck, err := mc.GetFSInfo(ctx, pGetFSInfoReq)
if err != nil {
logger.Error("GetFSInfo failed,grpc func err :%v", err)
return 1, nil
}
if pGetFSInfoAck.Ret != 0 {
logger.Error("GetFSInfo failed,grpc func ret :%v", pGetFSInfoAck.Ret)
return 1, nil
}
return 0, pGetFSInfoAck
}
// OpenFileSystem ...
func OpenFileSystem(UUID string) *CFS {
leader, err := GetLeader(UUID)
if err != nil {
return nil
}
conn, err := DialMeta(UUID)
if conn == nil || err != nil {
return nil
}
cfs := CFS{VolID: UUID, Conn: conn, Leader: leader, DataConn: make(map[string]*grpc.ClientConn)}
ticker := time.NewTicker(time.Millisecond * 500)
go func() {
for range ticker.C {
leader, err := GetLeader(UUID)
if err != nil {
cfs.Leader = ""
if cfs.Conn != nil {
cfs.Conn.Close()
}
cfs.Conn = nil
logger.Error("Leader Timer : Get leader failed ,volumeID : %s", UUID)
continue
}
if leader != cfs.Leader {
conn, err := DialMeta(UUID)
if conn == nil || err != nil {
logger.Error("Leader Timer : DialMeta failed ,volumeID : %s", UUID)
continue
}
cfs.Leader = leader
if cfs.Conn != nil {
cfs.Conn.Close()
}
cfs.Conn = conn
}
}
}()
return &cfs
}
// CreateDirDirect ...
func (cfs *CFS) CreateDirDirect(pinode uint64, name string) (int32, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pCreateDirDirectReq := &mp.CreateDirDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateDirDirectAck, err := mc.CreateDirDirect(ctx, pCreateDirDirectReq)
if err != nil {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ = context.WithTimeout(context.Background(), 5*time.Second)
pCreateDirDirectAck, err = mc.CreateDirDirect(ctx, pCreateDirDirectReq)
if err != nil {
return -1, 0
}
}
return pCreateDirDirectAck.Ret, pCreateDirDirectAck.Inode
}
// GetInodeInfoDirect ...
func (cfs *CFS) GetInodeInfoDirect(pinode uint64, name string) (int32, uint64, *mp.InodeInfo) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0, nil
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pGetInodeInfoDirectReq := &mp.GetInodeInfoDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetInodeInfoDirectAck, err := mc.GetInodeInfoDirect(ctx, pGetInodeInfoDirectReq)
if err != nil {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0, nil
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetInodeInfoDirectAck, err = mc.GetInodeInfoDirect(ctx, pGetInodeInfoDirectReq)
if err != nil {
return -1, 0, nil
}
}
return pGetInodeInfoDirectAck.Ret, pGetInodeInfoDirectAck.Inode, pGetInodeInfoDirectAck.InodeInfo
}
// StatDirect ...
func (cfs *CFS) StatDirect(pinode uint64, name string) (int32, bool, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, false, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pStatDirectReq := &mp.StatDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pStatDirectAck, err := mc.StatDirect(ctx, pStatDirectReq)
if err != nil {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, false, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pStatDirectAck, err = mc.StatDirect(ctx, pStatDirectReq)
if err != nil {
return -1, false, 0
}
}
return pStatDirectAck.Ret, pStatDirectAck.InodeType, pStatDirectAck.Inode
}
// ListDirect ...
func (cfs *CFS) ListDirect(pinode uint64) (int32, []*mp.DirentN) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, nil
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pListDirectReq := &mp.ListDirectReq{
PInode: pinode,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 60*time.Second)
pListDirectAck, err := mc.ListDirect(ctx, pListDirectReq)
if err != nil {
return -1, nil
}
return pListDirectAck.Ret, pListDirectAck.Dirents
}
// DeleteDirDirect ...
func (cfs *CFS) DeleteDirDirect(pinode uint64, name string) int32 {
ret, _, inode := cfs.StatDirect(pinode, name)
if ret != 0 {
logger.Debug("DeleteDirDirect StatDirect Failed , no such dir")
return 0
}
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pListDirectReq := &mp.ListDirectReq{
PInode: inode,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pListDirectAck, err := mc.ListDirect(ctx, pListDirectReq)
if err != nil {
logger.Error("DeleteDirDirect ListDirect :%v\n", err)
return -1
}
for _, v := range pListDirectAck.Dirents {
/*
if v.InodeType {
cfs.DeleteFileDirect(inode, v.Name)
} else {
cfs.DeleteDirDirect(inode, v.Name)
}
*/
if v.InodeType {
ret := cfs.DeleteFileDirect(inode, v.Name)
if ret != 0 {
return ret
}
} else {
ret := cfs.DeleteDirDirect(inode, v.Name)
if ret != 0 {
return ret
}
}
}
pDeleteDirDirectReq := &mp.DeleteDirDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ = context.WithTimeout(context.Background(), 60*time.Second)
pDeleteDirDirectAck, err := mc.DeleteDirDirect(ctx, pDeleteDirDirectReq)
if err != nil {
return -1
}
return pDeleteDirDirectAck.Ret
}
// RenameDirect ...
func (cfs *CFS) RenameDirect(oldpinode uint64, oldname string, newpinode uint64, newname string) int32 {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pRenameDirectReq := &mp.RenameDirectReq{
OldPInode: oldpinode,
OldName: oldname,
NewPInode: newpinode,
NewName: newname,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pRenameDirectAck, err := mc.RenameDirect(ctx, pRenameDirectReq)
if err != nil {
return -1
}
return pRenameDirectAck.Ret
}
// CreateFileDirect ...
func (cfs *CFS) CreateFileDirect(pinode uint64, name string, flags int) (int32, *CFile) {
/*
if flags&os.O_TRUNC != 0 {
if ret, _ := cfs.Stat(path); ret == 0 {
cfs.DeleteFile(path)
}
}
*/
if flags&os.O_EXCL != 0 {
if ret, _, _ := cfs.StatDirect(pinode, name); ret == 0 {
return 17, nil
}
}
ret, inode := cfs.createFileDirect(pinode, name)
if ret != 0 {
return ret, nil
}
tmpBuffer := wBuffer{
buffer: new(bytes.Buffer),
freeSize: BufferSize,
}
cfile := CFile{
OpenFlag: flags,
cfs: cfs,
FileSize: 0,
ParentInodeID: pinode,
Inode: inode,
Name: name,
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
wBuffer: tmpBuffer,
}
//go cfile.send()
return 0, &cfile
}
// OpenFileDirect ...
func (cfs *CFS) OpenFileDirect(pinode uint64, name string, flags int) (int32, *CFile) {
var ret int32
var writer int32
var tmpFileSize int64
cfile := CFile{}
if (flags&os.O_WRONLY) != 0 || (flags&os.O_RDWR) != 0 {
chunkInfos := make([]*mp.ChunkInfoWithBG, 0)
var inode uint64
if ret, chunkInfos, inode = cfs.GetFileChunksDirect(pinode, name); ret != 0 {
return ret, nil
}
if len(chunkInfos) > 0 {
for i := range chunkInfos {
tmpFileSize += int64(chunkInfos[i].ChunkSize)
}
lastChunk := chunkInfos[len(chunkInfos)-1]
tmpBuffer := wBuffer{
buffer: new(bytes.Buffer),
freeSize: BufferSize - (lastChunk.ChunkSize % BufferSize),
chunkInfo: lastChunk,
}
cfile = CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: tmpFileSize,
wBuffer: tmpBuffer,
ParentInodeID: pinode,
Inode: inode,
Name: name,
chunks: chunkInfos,
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
}
} else {
tmpBuffer := wBuffer{
buffer: new(bytes.Buffer),
freeSize: BufferSize,
}
cfile = CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: 0,
ParentInodeID: pinode,
Inode: inode,
Name: name,
wBuffer: tmpBuffer,
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
}
}
} else {
chunkInfos := make([]*mp.ChunkInfoWithBG, 0)
var inode uint64
if ret, chunkInfos, inode = cfs.GetFileChunksDirect(pinode, name); ret != 0 {
logger.Error("OpenFile failed , GetFileChunksDirect failed !")
return ret, nil
}
for i := range chunkInfos {
tmpFileSize += int64(chunkInfos[i].ChunkSize)
}
tmpBuffer := wBuffer{
buffer: new(bytes.Buffer),
freeSize: BufferSize,
}
cfile = CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: tmpFileSize,
wBuffer: tmpBuffer,
ParentInodeID: pinode,
Inode: inode,
Name: name,
chunks: chunkInfos,
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
}
}
return 0, &cfile
}
// UpdateOpenFileDirect ...
func (cfs *CFS) UpdateOpenFileDirect(pinode uint64, name string, cfile *CFile, flags int) int32 {
if (flags&os.O_WRONLY) != 0 || (flags&os.O_RDWR) != 0 {
chunkInfos := make([]*mp.ChunkInfoWithBG, 0)
var ret int32
if ret, chunkInfos, _ = cfs.GetFileChunksDirect(pinode, name); ret != 0 {
return ret
}
if len(chunkInfos) > 0 {
lastChunk := chunkInfos[len(chunkInfos)-1]
tmpBuffer := wBuffer{
buffer: new(bytes.Buffer),
freeSize: BufferSize - (lastChunk.ChunkSize % BufferSize),
chunkInfo: lastChunk,
}
cfile.wBuffer = tmpBuffer
}
}
return 0
}
// createFileDirect ...
func (cfs *CFS) createFileDirect(pinode uint64, name string) (int32, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pCreateFileDirectReq := &mp.CreateFileDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateFileDirectAck, err := mc.CreateFileDirect(ctx, pCreateFileDirectReq)
if err != nil || pCreateFileDirectAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateFileDirectAck, err = mc.CreateFileDirect(ctx, pCreateFileDirectReq)
if err != nil {
logger.Error("CreateFileDirect failed,grpc func failed :%v\n", err)
return -1, 0
}
}
if pCreateFileDirectAck.Ret == 1 {
return 1, 0
}
if pCreateFileDirectAck.Ret == 2 {
return 2, 0
}
if pCreateFileDirectAck.Ret == 17 {
return 17, 0
}
return 0, pCreateFileDirectAck.Inode
}
// DeleteFileDirect ...
func (cfs *CFS) DeleteFileDirect(pinode uint64, name string) int32 {
ret, chunkInfos, _ := cfs.GetFileChunksDirect(pinode, name)
if ret == 0 && chunkInfos != nil {
for _, v1 := range chunkInfos {
for _, v2 := range v1.BlockGroup.BlockInfos {
addr := utils.InetNtoa(v2.DataNodeIP).String() + ":" + strconv.Itoa(int(v2.DataNodePort))
conn, err := cfs.GetDataConn(addr)
if err != nil || conn == nil {
conn, err = DialData(addr)
if err != nil || conn == nil {
logger.Error("DeleteFile failed,Dial to datanode fail :%v\n", err)
//return 0
} else {
cfs.SetDataConn(addr, conn)
}
}
dc := dp.NewDataNodeClient(conn)
dpDeleteChunkReq := &dp.DeleteChunkReq{
ChunkID: v1.ChunkID,
BlockID: v2.BlockID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err = dc.DeleteChunk(ctx, dpDeleteChunkReq)
if err != nil {
time.Sleep(time.Second)
conn, err = cfs.GetDataConn(addr)
if err != nil || conn == nil {
logger.Error("DeleteChunk failed,Dial to metanode fail :%v\n", err)
} else {
dc = dp.NewDataNodeClient(conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err = dc.DeleteChunk(ctx, dpDeleteChunkReq)
if err != nil {
cfs.DelDataConn(addr)
logger.Error("DeleteChunk failed,grpc func failed :%v\n", err)
}
}
}
//conn.Close()
}
}
}
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc := mp.NewMetaNodeClient(cfs.Conn)
mpDeleteFileDirectReq := &mp.DeleteFileDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
mpDeleteFileDirectAck, err := mc.DeleteFileDirect(ctx, mpDeleteFileDirectReq)
if err != nil || mpDeleteFileDirectAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
mpDeleteFileDirectAck, err = mc.DeleteFileDirect(ctx, mpDeleteFileDirectReq)
if err != nil {
logger.Error("DeleteFile failed,grpc func err :%v\n", err)
return -1
}
}
return mpDeleteFileDirectAck.Ret
}
// GetFileChunksDirect ...
func (cfs *CFS) GetFileChunksDirect(pinode uint64, name string) (int32, []*mp.ChunkInfoWithBG, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, nil, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pGetFileChunksDirectReq := &mp.GetFileChunksDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFileChunksDirectAck, err := mc.GetFileChunksDirect(ctx, pGetFileChunksDirectReq)
if err != nil || pGetFileChunksDirectAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, nil, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFileChunksDirectAck, err = mc.GetFileChunksDirect(ctx, pGetFileChunksDirectReq)
if err != nil {
logger.Error("GetFileChunks failed,grpc func failed :%v\n", err)
return -1, nil, 0
}
}
return pGetFileChunksDirectAck.Ret, pGetFileChunksDirectAck.ChunkInfos, pGetFileChunksDirectAck.Inode
}
type wBuffer struct {
freeSize int32 // chunk size
chunkInfo *mp.ChunkInfoWithBG // chunk info
buffer *bytes.Buffer // chunk data
startOffset int64
endOffset int64
}
// ReaderInfo ...
type ReaderInfo struct {
LastOffset int64
readBuf []byte
Ch chan *bytes.Buffer
}
// CFile ...
type CFile struct {
cfs *CFS
ParentInodeID uint64
Name string
Inode uint64
OpenFlag int
FileSize int64
Status int32 // 0 ok
// for write
//WMutex sync.Mutex
Writer int32
//FirstW bool
wBuffer wBuffer
wgWriteReps sync.WaitGroup
// for read
//lastoffset int64
RMutex sync.Mutex
chunks []*mp.ChunkInfoWithBG // chunkinfo
//readBuf []byte
ReaderMap map[fuse.HandleID]*ReaderInfo
}
// AllocateChunk ...
func (cfile *CFile) AllocateChunk() (int32, *mp.ChunkInfoWithBG) {
for i := 0; i < 10; i++ {
if cfile.cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfile.cfs.Conn == nil {
return -1, nil
}
mc := mp.NewMetaNodeClient(cfile.cfs.Conn)
pAllocateChunkReq := &mp.AllocateChunkReq{
VolID: cfile.cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pAllocateChunkAck, err := mc.AllocateChunk(ctx, pAllocateChunkReq)
if err != nil || pAllocateChunkAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfile.cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfile.cfs.Conn == nil {
return -1, nil
}
mc = mp.NewMetaNodeClient(cfile.cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pAllocateChunkAck, err = mc.AllocateChunk(ctx, pAllocateChunkReq)
if err != nil {
logger.Error("AllocateChunk failed,grpc func failed :%v\n", err)
return -1, nil
}
}
return pAllocateChunkAck.Ret, pAllocateChunkAck.ChunkInfo
}
func generateRandomNumber(start int, end int, count int) []int {
if end < start || (end-start) < count {
return nil
}
nums := make([]int, 0)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for len(nums) < count {
num := r.Intn((end - start)) + start
exist := false
for _, v := range nums {
if v == num {
exist = true
break
}
}
if !exist {
nums = append(nums, num)
}
}
return nums
}
func (cfile *CFile) streamread(chunkidx int, ch chan *bytes.Buffer, offset int64, size int64) {
var conn *grpc.ClientConn
var err error
var buffer *bytes.Buffer
outflag := 0
inflag := 0
idxs := generateRandomNumber(0, 3, 3)
for n := 0; n < len(cfile.chunks[chunkidx].BlockGroup.BlockInfos); n++ {
i := idxs[n]
buffer = new(bytes.Buffer)
addr := utils.InetNtoa(cfile.chunks[chunkidx].BlockGroup.BlockInfos[i].DataNodeIP).String() + ":" + strconv.Itoa(int(cfile.chunks[chunkidx].BlockGroup.BlockInfos[i].DataNodePort))
conn, err = cfile.cfs.GetDataConn(addr)
if err != nil || conn == nil {
conn, err = DialData(addr)
if err != nil || conn == nil {
logger.Error("streamread failed,Dial to datanode fail :%v", err)
outflag++
continue
} else {
cfile.cfs.SetDataConn(addr, conn)
}
}
dc := dp.NewDataNodeClient(conn)
streamreadChunkReq := &dp.StreamReadChunkReq{
ChunkID: cfile.chunks[chunkidx].ChunkID,
BlockID: cfile.chunks[chunkidx].BlockGroup.BlockInfos[i].BlockID,
Offset: offset,
Readsize: size,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
stream, err := dc.StreamReadChunk(ctx, streamreadChunkReq)
if err != nil {
conn, err = DialData(addr)
if err != nil || conn == nil {
cfile.cfs.DelDataConn(addr)
logger.Error("StreamReadChunk DialData error:%v, so retry other datanode!", err)
outflag++
continue
} else {
cfile.cfs.SetDataConn(addr, conn)
dc = dp.NewDataNodeClient(conn)
streamreadChunkReq := &dp.StreamReadChunkReq{
ChunkID: cfile.chunks[chunkidx].ChunkID,
BlockID: cfile.chunks[chunkidx].BlockGroup.BlockInfos[i].BlockID,
Offset: offset,
Readsize: size,
}
ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
stream, err = dc.StreamReadChunk(ctx, streamreadChunkReq)
if err != nil {
logger.Error("StreamReadChunk StreamReadChunk error:%v, so retry other datanode!", err)
outflag++
continue
}
}
}
for {
ack, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
logger.Error("=== streamreadChunkReq Recv err:%v ===", err)
inflag++
outflag++
break
}
if ack != nil {
if len(ack.Databuf) == 0 {
continue
} else {
buffer.Write(ack.Databuf)
inflag = 0
}
} else {
continue
}
}
if inflag == 0 {
ch <- buffer
break
} else if inflag == 3 {
buffer = new(bytes.Buffer)
buffer.Write([]byte{})
logger.Error("Stream Read the chunk three copy Recv error")
ch <- buffer
break
} else if inflag < 3 {
logger.Error("Stream Read the chunk %v copy Recv error, so need retry other datanode!!!", inflag)
continue
}
}
if outflag >= 3 {
buffer = new(bytes.Buffer)
buffer.Write([]byte{})
logger.Error("Stream Read the chunk three copy Datanode error")
ch <- buffer
}
}
// Read ...
func (cfile *CFile) Read(handleID fuse.HandleID, data *[]byte, offset int64, readsize int64) int64 {
// read data from write buffer
cache := cfile.wBuffer
n := cache.buffer.Len()
if n != 0 && offset >= cache.startOffset {
cfile.ReaderMap[handleID].readBuf = cache.buffer.Bytes()
if offset+readsize < cache.endOffset {
*data = append(*data, cfile.ReaderMap[handleID].readBuf[offset:offset+readsize]...)
return readsize
}
*data = append(*data, cfile.ReaderMap[handleID].readBuf[offset:cache.endOffset]...)
return cache.endOffset - offset
}
if cfile.chunks == nil || len(cfile.chunks) == 0 {
logger.Error("Read File but Chunks not exist")
return -1
}
if offset+readsize > cfile.FileSize {
readsize = cfile.FileSize - offset
}
var length int64
var freeOffset int64
var freeSize int64
var beginChunkNum int
var endChunkNum int
curOffset := offset
for i, v := range cfile.chunks {
freeOffset = curOffset - int64(v.ChunkSize)
if freeOffset < 0 {
beginChunkNum = i
break
} else {
curOffset = freeOffset
}
}
curSize := offset + readsize
for i, v := range cfile.chunks {
freeSize = curSize - int64(v.ChunkSize)
if freeSize <= 0 {
endChunkNum = i
break
} else {
curSize = freeSize
}
}
var eachReadLen int64
freesize := readsize
if endChunkNum < beginChunkNum {
logger.Error("This Read data from beginchunk:%v lager than endchunk:%v", beginChunkNum, endChunkNum)
return -1
}
if beginChunkNum > len(cfile.chunks) || endChunkNum+1 > len(cfile.chunks) || beginChunkNum > cap(cfile.chunks) || endChunkNum+1 > cap(cfile.chunks) {
logger.Error("Read begin or end chunk num not right")
return -1
}
//for i, _ := range cfile.chunks[beginChunkNum : endChunkNum+1] {
for i := 0; i < len(cfile.chunks[beginChunkNum:endChunkNum+1]); i++ {
index := i + beginChunkNum
if curOffset+freesize < int64(cfile.chunks[index].ChunkSize) {
eachReadLen = freesize
} else {
eachReadLen = int64(cfile.chunks[index].ChunkSize) - curOffset
}
if len(cfile.ReaderMap[handleID].readBuf) == 0 {
buffer := new(bytes.Buffer)
cfile.ReaderMap[handleID].Ch = make(chan *bytes.Buffer)
go cfile.streamread(index, cfile.ReaderMap[handleID].Ch, 0, int64(cfile.chunks[index].ChunkSize))
buffer = <-cfile.ReaderMap[handleID].Ch
if buffer.Len() == 0 {
logger.Error("Recv chunk:%v from datanode size:%v , but retsize is 0", index, cfile.chunks[index].ChunkSize)
return -1
}
cfile.ReaderMap[handleID].readBuf = buffer.Next(buffer.Len())
buffer.Reset()
buffer = nil
//logger.Debug("#### Read chunk:%v == bufferlen:%v == curoffset:%v == eachlen:%v ==offset:%v == readsize:%v ####", index, len(cfile.ReaderMap[handleID].readBuf), curOffset, eachReadLen, offset, readsize)
}
buflen := int64(len(cfile.ReaderMap[handleID].readBuf))
bufcap := int64(cap(cfile.ReaderMap[handleID].readBuf))
if curOffset > buflen || curOffset > bufcap {
logger.Error("== Read chunk:%v from datanode (offset:%v -- needreadsize:%v) lager than exist (buflen:%v -- bufcap:%v)\n", index, curOffset, eachReadLen, buflen, bufcap)
return -1
}
if curOffset+eachReadLen > buflen {
eachReadLen = buflen - curOffset
*data = append(*data, cfile.ReaderMap[handleID].readBuf[curOffset:curOffset+eachReadLen]...)
} else {
*data = append(*data, cfile.ReaderMap[handleID].readBuf[curOffset:curOffset+eachReadLen]...)
}
curOffset += eachReadLen
if curOffset == int64(len(cfile.ReaderMap[handleID].readBuf)) {
curOffset = 0
cfile.ReaderMap[handleID].readBuf = []byte{}
}
freesize = freesize - eachReadLen
length += eachReadLen
}
return length
}
// Write ...
func (cfile *CFile) Write(buf []byte, len int32) int32 {
if cfile.Status != 0 {
logger.Error("cfile status error , Write func return -2 ")
return -2
}
var w int32
w = 0
for w < len {
if (cfile.FileSize % chunkSize) == 0 {
logger.Debug("need a new chunk...")
var ret int32
ret, cfile.wBuffer.chunkInfo = cfile.AllocateChunk()
if ret != 0 {
if ret == 28 /*ENOSPC*/ {
return -1
}
return -2
}
}
if cfile.wBuffer.freeSize == 0 {
cfile.wBuffer.buffer = new(bytes.Buffer)
cfile.wBuffer.freeSize = BufferSize
}
if len-w < cfile.wBuffer.freeSize {
if len != w {
cfile.wBuffer.buffer.Write(buf[w:len])
cfile.wBuffer.freeSize = cfile.wBuffer.freeSize - (len - w)
cfile.wBuffer.startOffset = cfile.FileSize
cfile.FileSize = cfile.FileSize + int64(len-w)
cfile.wBuffer.endOffset = cfile.FileSize
cfile.wBuffer.chunkInfo.ChunkSize = cfile.wBuffer.chunkInfo.ChunkSize + int32(len-w)
w = len
}
break
} else {
cfile.wBuffer.buffer.Write(buf[w : w+cfile.wBuffer.freeSize])
w = w + cfile.wBuffer.freeSize
cfile.wBuffer.startOffset = cfile.FileSize
cfile.FileSize = cfile.FileSize + int64(cfile.wBuffer.freeSize)
cfile.wBuffer.endOffset = cfile.FileSize
cfile.wBuffer.chunkInfo.ChunkSize = cfile.wBuffer.chunkInfo.ChunkSize + int32(cfile.wBuffer.freeSize)
cfile.wBuffer.freeSize = 0
}
if cfile.wBuffer.freeSize == 0 {
ret := cfile.push()
if ret != 0 {
return -1
}
}
}
return w
}
func (cfile *CFile) push() int32 {
if cfile.Status != 0 {
logger.Error("cfile status error , push func return err ")
return -1
}
if cfile.wBuffer.chunkInfo == nil {
return 0
}
wBuffer := cfile.wBuffer // record cur buffer
return cfile.send(&wBuffer)
}
// Flush ...
func (cfile *CFile) Flush() int32 {
if cfile.Status != 0 {
logger.Error("cfile status error , Flush func return err ")
return cfile.Status
}
//avoid repeat push for integer file ETC. 64MB , the last push has already done in Write func
if cfile.wBuffer.freeSize != 0 && cfile.wBuffer.chunkInfo != nil {
wBuffer := cfile.wBuffer
cfile.wBuffer.freeSize = 0
logger.Debug("Flush!!!!! len %v name %v pinode %v", cfile.wBuffer.buffer.Len(), cfile.Name, cfile.ParentInodeID)
return cfile.send(&wBuffer)
}
return 0
}
func (cfile *CFile) writeChunk(addr string, conn *grpc.ClientConn, req *dp.WriteChunkReq, blkgrpid uint32, copies *uint64) {
if conn == nil {
} else {
dc := dp.NewDataNodeClient(conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
ret, err := dc.WriteChunk(ctx, req)
if err != nil {
logger.Error("WriteChunk func err %v", err)
cfile.cfs.DelDataConn(addr)
} else {
if ret.Ret != 0 {
logger.Error("WriteChunk Ret err %v", ret.Ret)
cfile.cfs.DelDataConn(addr)
} else {
atomic.AddUint64(copies, 1)
}
}
}
cfile.wgWriteReps.Add(-1)
}
func (cfile *CFile) send(v *wBuffer) int32 {
sendLen := v.buffer.Len()
if sendLen <= 0 {
return 0
}
dataBuf := v.buffer.Next(v.buffer.Len())
var chunkWriteFlag bool
for cnt := 0; cnt < 10; cnt++ {
var copies uint64
for i := range v.chunkInfo.BlockGroup.BlockInfos {
ip := utils.InetNtoa(v.chunkInfo.BlockGroup.BlockInfos[i].DataNodeIP).String()
port := int(v.chunkInfo.BlockGroup.BlockInfos[i].DataNodePort)
addr := ip + ":" + strconv.Itoa(port)
conn, err := cfile.cfs.GetDataConn(addr)
if err != nil || conn == nil {
conn, err = DialData(addr)
if err == nil && conn != nil {
logger.Debug("new datanode conn !!!")
cfile.cfs.SetDataConn(addr, conn)
} else {
logger.Error("new conn to %v failed", addr)
goto fail
}
} else {
//logger.Debug("reuse datanode conn !!!")
}
}
for i := range v.chunkInfo.BlockGroup.BlockInfos {
ip := utils.InetNtoa(v.chunkInfo.BlockGroup.BlockInfos[i].DataNodeIP).String()
port := int(v.chunkInfo.BlockGroup.BlockInfos[i].DataNodePort)
addr := ip + ":" + strconv.Itoa(port)
conn, _ := cfile.cfs.GetDataConn(addr)
blockID := v.chunkInfo.BlockGroup.BlockInfos[i].BlockID
chunkID := v.chunkInfo.ChunkID
pWriteChunkReq := &dp.WriteChunkReq{
ChunkID: chunkID,
BlockID: blockID,
Databuf: dataBuf,
}
cfile.wgWriteReps.Add(1)
go cfile.writeChunk(addr, conn, pWriteChunkReq, v.chunkInfo.BlockGroup.BlockGroupID, &copies)
}
cfile.wgWriteReps.Wait()
fail:
if copies < 3 {
var ret int32
ret, cfile.wBuffer.chunkInfo = cfile.AllocateChunk()
cfile.wBuffer.chunkInfo.ChunkSize = int32(sendLen)
v.chunkInfo = cfile.wBuffer.chunkInfo
if ret != 0 {
cfile.Status = 1
return cfile.Status
}
logger.Debug("write 3 copies failed ,choose new chunk! cnt=%v", cnt)
continue
} else {
chunkWriteFlag = true
break
}
}
if !chunkWriteFlag {
cfile.Status = 1
return cfile.Status
}
pSyncChunkReq := &mp.SyncChunkReq{
ParentInodeID: cfile.ParentInodeID,
Name: cfile.Name,
VolID: cfile.cfs.VolID,
}
var tmpChunkInfo mp.ChunkInfo
tmpChunkInfo.ChunkSize = v.chunkInfo.ChunkSize
tmpChunkInfo.ChunkID = v.chunkInfo.ChunkID
tmpChunkInfo.BlockGroupID = v.chunkInfo.BlockGroup.BlockGroupID
pSyncChunkReq.ChunkInfo = &tmpChunkInfo
wflag := false
for i := 0; i < 10; i++ {
if cfile.cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfile.cfs.Conn == nil {
cfile.Status = 1
return cfile.Status
}
mc := mp.NewMetaNodeClient(cfile.cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pSyncChunkAck, err := mc.SyncChunk(ctx, pSyncChunkReq)
if err == nil && pSyncChunkAck.Ret == 0 {
wflag = true
} else {
logger.Error("SyncChunk failed start to try ,name %v,inode %v,pinode %v", cfile.Name, cfile.Inode, cfile.ParentInodeID)
for i := 0; i < 15; i++ {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfile.cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfile.cfs.Conn == nil {
cfile.Status = 1
return cfile.Status
}
logger.Error("SyncChunk try %v times", i+1)
mc := mp.NewMetaNodeClient(cfile.cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pSyncChunkAck, err := mc.SyncChunk(ctx, pSyncChunkReq)
if err == nil && pSyncChunkAck.Ret == 0 {
wflag = true
break
} else {
logger.Error("SyncChunk grpc func try %v times ,err %v", i+1, err)
if pSyncChunkAck != nil {
logger.Error("SyncChunk grpc func try %v times ,ret %v", i+1, pSyncChunkAck.Ret)
}
}
}
}
if !wflag {
cfile.Status = 1
return cfile.Status
}
chunkNum := len(cfile.chunks)
//v.chunkInfo.Status = tmpChunkInfo.Status
if chunkNum == 0 {
cfile.chunks = append(cfile.chunks, v.chunkInfo)
} else {
if cfile.chunks[chunkNum-1].ChunkID == v.chunkInfo.ChunkID {
cfile.chunks[chunkNum-1].ChunkSize = v.chunkInfo.ChunkSize
//cfile.chunks[chunkNum-1].Status = v.chunkInfo.Status
} else {
cfile.chunks = append(cfile.chunks, v.chunkInfo)
}
}
return cfile.Status
}
// Sync ...
func (cfile *CFile) Sync() int32 {
return 0
}
// Close ...
func (cfile *CFile) Close(flags int) int32 {
return 0
}
// ProcessLocalBuffer ...
func ProcessLocalBuffer(buffer []byte, cfile *CFile) {
cfile.Write(buffer, int32(len(buffer)))
}
// ReadLocalAndWriteCFS ...
func ReadLocalAndWriteCFS(filePth string, bufSize int, hookfn func([]byte, *CFile), cfile *CFile) error {
f, err := os.Open(filePth)
if err != nil {
return err
}
defer f.Close()
buf := make([]byte, bufSize)
bfRd := bufio.NewReader(f)
for {
n, err := bfRd.Read(buf)
hookfn(buf[:n], cfile)
if err != nil {
if err == io.EOF {
return nil
}
return err
}
}
}
Optimize: the CLI CreateVol, try all metanode to create namespace, avoiding one of the metanode unconnected.
Close: #130
package cfs
import (
"bazil.org/fuse"
"bufio"
"bytes"
"errors"
"fmt"
"github.com/ipdcode/containerfs/logger"
dp "github.com/ipdcode/containerfs/proto/dp"
mp "github.com/ipdcode/containerfs/proto/mp"
vp "github.com/ipdcode/containerfs/proto/vp"
"github.com/ipdcode/containerfs/utils"
"golang.org/x/net/context"
"google.golang.org/grpc"
"io"
"math/rand"
"os"
"strconv"
"sync"
"sync/atomic"
"time"
)
// MetaNodePeers ...
var MetaNodePeers []string
// VolMgrAddr ...
var VolMgrAddr string
//MetaNodeAddr ...
var MetaNodeAddr string
// chunksize for write
const (
chunkSize = 64 * 1024 * 1024
oneExpandSize = 30 * 1024 * 1024 * 1024
)
// BufferSize ...
var BufferSize int32
// CFS ...
type CFS struct {
VolID string
Leader string
Conn *grpc.ClientConn
DataConnLocker sync.RWMutex
DataConn map[string]*grpc.ClientConn
//Status int // 0 ok , 1 readonly 2 invaild
}
func (cfs *CFS) GetDataConn(addr string) (*grpc.ClientConn, error) {
cfs.DataConnLocker.RLock()
if v, ok := cfs.DataConn[addr]; ok {
cfs.DataConnLocker.RUnlock()
return v, nil
}
cfs.DataConnLocker.RUnlock()
return nil, errors.New("Key not exists")
}
func (cfs *CFS) SetDataConn(addr string, conn *grpc.ClientConn) {
cfs.DataConnLocker.Lock()
cfs.DataConn[addr] = conn
cfs.DataConnLocker.Unlock()
}
func (cfs *CFS) DelDataConn(addr string) {
cfs.DataConnLocker.Lock()
delete(cfs.DataConn, addr)
cfs.DataConnLocker.Unlock()
}
// CreateVol volume function
func CreateVol(name string, capacity string) int32 {
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("CreateVol failed,Dial to volmgr fail :%v\n", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
spaceQuota, _ := strconv.Atoi(capacity)
pCreateVolReq := &vp.CreateVolReq{
VolName: name,
SpaceQuota: int32(spaceQuota),
MetaDomain: MetaNodeAddr,
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pCreateVolAck, err := vc.CreateVol(ctx, pCreateVolReq)
if err != nil {
return -1
}
if pCreateVolAck.Ret != 0 {
return -1
}
// send to metadata to registry a new map
var flag bool
for _, metaNodeIp := range MetaNodePeers {
conn2, err := grpc.Dial(metaNodeIp, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(time.Millisecond*300), grpc.FailOnNonTempDialError(true))
if err != nil {
logger.Error("CreateVol failed,Dial to metanode fail, try another metanode :%v\n", err)
continue
}
defer conn2.Close()
mc := mp.NewMetaNodeClient(conn2)
pmCreateNameSpaceReq := &mp.CreateNameSpaceReq{
VolID: pCreateVolAck.UUID,
RaftGroupID: pCreateVolAck.RaftGroupID,
Type: 0,
}
ctx2, _ := context.WithTimeout(context.Background(), 5*time.Second)
pmCreateNameSpaceAck, err := mc.CreateNameSpace(ctx2, pmCreateNameSpaceReq)
if err != nil {
continue
}
if pmCreateNameSpaceAck.Ret != 0 {
logger.Error("CreateNameSpace with metanode %s failed, try another metanode :%v\n", metaNodeIp, pmCreateNameSpaceAck.Ret)
continue
}
flag = true
break
}
if !flag {
return -1
}
fmt.Println(pCreateVolAck.UUID)
return 0
}
//BlockGroupVp2Mp ...
func BlockGroupVp2Mp(in *vp.BlockGroup) *mp.BlockGroup {
var mpBlockGroup = mp.BlockGroup{}
mpBlockInfos := make([]*mp.BlockInfo, len(in.BlockInfos))
mpBlockGroup.BlockGroupID = in.BlockGroupID
mpBlockGroup.FreeSize = in.FreeSize
mpBlockGroup.Status = in.Status
for i := range in.BlockInfos {
var pVpBlockInfo *vp.BlockInfo
var mpBlockInfo mp.BlockInfo
pVpBlockInfo = in.BlockInfos[i]
mpBlockInfo.BlockID = pVpBlockInfo.BlockID
mpBlockInfo.DataNodeIP = pVpBlockInfo.DataNodeIP
mpBlockInfo.DataNodePort = pVpBlockInfo.DataNodePort
mpBlockInfos[i] = &mpBlockInfo
}
mpBlockGroup.BlockInfos = mpBlockInfos
return &mpBlockGroup
}
// Expand volume once for fuseclient
func ExpandVolRS(UUID string, MtPath string) int32 {
path := MtPath + "/expanding"
fd, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return -2
}
defer fd.Close()
ok, ret := GetFSInfo(UUID)
if ok != 0 {
os.Remove(path)
logger.Error("ExpandVol once volume:%v failed, GetFSInfo error", UUID)
return -1
}
used := ret.TotalSpace - ret.FreeSpace
if float64(ret.FreeSpace)/float64(ret.TotalSpace) > 0.1 {
os.Remove(path)
return 0
}
logger.Debug("Need ExpandVol once volume:%v -- totalsize:%v -- freesize:%v", UUID, ret.TotalSpace, ret.FreeSpace)
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("ExpandVol once volume:%v failed,Dial to volmgr error:%v", UUID, err)
os.Remove(path)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pExpandVolRSReq := &vp.ExpandVolRSReq{
VolID: UUID,
UsedRS: used,
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pExpandVolRSAck, err := vc.ExpandVolRS(ctx, pExpandVolRSReq)
if err != nil {
logger.Error("ExpandVol once volume:%v failed, VolMgr return error:%v", UUID, err)
os.Remove(path)
return -1
}
if pExpandVolRSAck.Ret == -1 {
logger.Error("ExpandVol once volume:%v failed, VolMgr return -1:%v", UUID)
os.Remove(path)
return -1
} else if pExpandVolRSAck.Ret == 0 {
logger.Error("ExpandVol volume:%v once failed, VolMgr return 0 because volume totalsize not enough expand", UUID)
os.Remove(path)
return 0
}
out := UpdateMetaForExpandVol(UUID, pExpandVolRSAck)
if out != 0 {
logger.Error("ExpandVol volume:%v once volmgr success but update metanode fail, so rollback volmgr this expand resource", UUID)
pDelReq := &vp.DelVolRSForExpandReq{
VolID: UUID,
BlockGroups: pExpandVolRSAck.BlockGroups,
}
pDelAck, err := vc.DelVolRSForExpand(ctx, pDelReq)
if err != nil || pDelAck.Ret != 0 {
logger.Error("ExpandVol once volume:%v volmgr success but update meta failed, then rollback volmgr error", UUID)
}
os.Remove(path)
return -1
}
os.Remove(path)
return 1
}
func UpdateMetaForExpandVol(UUID string, ack *vp.ExpandVolRSAck) int {
var mpBlockGroups []*mp.BlockGroup
for _, v := range ack.BlockGroups {
mpBlockGroup := BlockGroupVp2Mp(v)
mpBlockGroups = append(mpBlockGroups, mpBlockGroup)
}
// Meta handle
conn2, err := DialMeta(UUID)
if err != nil {
logger.Error("ExpandVol volume:%v once volmgr success but Dial to metanode fail :%v", UUID, err)
//os.Remove(path)
return -1
}
defer conn2.Close()
mc := mp.NewMetaNodeClient(conn2)
pmExpandNameSpaceReq := &mp.ExpandNameSpaceReq{
VolID: UUID,
BlockGroups: mpBlockGroups,
}
ctx2, _ := context.WithTimeout(context.Background(), 5*time.Second)
pmExpandNameSpaceAck, err := mc.ExpandNameSpace(ctx2, pmExpandNameSpaceReq)
if err != nil {
logger.Error("ExpandVol volume:%v once volmgr success but MetaNode return error:%v", UUID, err)
//os.Remove(path)
return -1
}
if pmExpandNameSpaceAck.Ret != 0 {
logger.Error("ExpandVol volume:%v once volmgr success but MetaNode return not equal 0:%v", UUID)
//os.Remove(path)
return -1
}
return 0
}
// ExpandVol volume totalsize for CLI...
func ExpandVolTS(UUID string, expandQuota string) int32 {
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("ExpandVol failed,Dial to volmgr fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
tmpExpandQuota, _ := strconv.Atoi(expandQuota)
pExpandVolTSReq := &vp.ExpandVolTSReq{
VolID: UUID,
ExpandQuota: int32(tmpExpandQuota),
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pExpandVolTSAck, err := vc.ExpandVolTS(ctx, pExpandVolTSReq)
if err != nil {
logger.Error("Expand Vol:%v TotalSize:%v but VolMgr return error:%v", UUID, expandQuota, err)
return -1
}
if pExpandVolTSAck.Ret != 0 {
logger.Error("Expand Vol:%v TotalSize:%v but VolMgr return -1", UUID, expandQuota)
return -1
}
return 0
}
// Migrate bad DataNode blocks data to some Good DataNodes
func Migrate(ip string, port string) int32 {
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("Migrate DataNode failed,Dial to volmgr fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
dport, _ := strconv.Atoi(port)
pMigrateReq := &vp.MigrateReq{
DataNodeIP: ip,
DataNodePort: int32(dport),
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err = vc.Migrate(ctx, pMigrateReq)
if err != nil {
logger.Error("Migrate bad DataNode(%v:%v) all Blocks not finished err : %v", ip, port, err)
return -1
}
return 0
}
// GetVolInfo volume info
func GetVolInfo(name string) (int32, *vp.GetVolInfoAck) {
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("GetVolInfo failed,Dial to volmgr fail :%v", err)
return -1, nil
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pGetVolInfoReq := &vp.GetVolInfoReq{
UUID: name,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetVolInfoAck, err := vc.GetVolInfo(ctx, pGetVolInfoReq)
if err != nil {
return 1, nil
}
if pGetVolInfoAck.Ret != 0 {
return 1, nil
}
return 0, pGetVolInfoAck
}
// SnapShootVol ...
func SnapShootVol(uuid string) int32 {
// send to metadata to delete a map
conn, err := DialMeta(uuid)
if err != nil {
logger.Error("SnapShootVol failed,Dial to metanode fail :%v", err)
return -1
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pmSnapShootNameSpaceReq := &mp.SnapShootNameSpaceReq{
VolID: uuid,
Type: 0,
}
ctx, _ := context.WithTimeout(context.Background(), 100*time.Second)
pmSnapShootNameSpaceAck, err := mc.SnapShootNameSpace(ctx, pmSnapShootNameSpaceReq)
if err != nil {
logger.Error("SnapShootVol failed,grpc func err :%v", err)
return -1
}
if pmSnapShootNameSpaceAck.Ret != 0 {
logger.Error("SnapShootVol failed,rpc func ret:%v", pmSnapShootNameSpaceAck.Ret)
return -1
}
return 0
}
func GetVolumeLeader(uuid string) string {
leader, err := GetLeader(uuid)
if err != nil {
return "no leader"
}
return leader
}
// DeleteVol function
func DeleteVol(uuid string) int32 {
// send to metadata to delete a map
conn2, err := DialMeta(uuid)
if err != nil {
logger.Error("DeleteVol failed,Dial to metanode fail :%v\n", err)
return -1
}
defer conn2.Close()
mc := mp.NewMetaNodeClient(conn2)
pmDeleteNameSpaceReq := &mp.DeleteNameSpaceReq{
VolID: uuid,
Type: 0,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pmDeleteNameSpaceAck, err := mc.DeleteNameSpace(ctx, pmDeleteNameSpaceReq)
if err != nil {
return -1
}
if pmDeleteNameSpaceAck.Ret != 0 {
logger.Error("DeleteNameSpace failed :%v", pmDeleteNameSpaceAck.Ret)
return -1
}
conn, err := DialVolmgr(VolMgrAddr)
if err != nil {
logger.Error("deleteVol failed,Dial to volmgr fail :%v", err)
return -1
}
defer conn.Close()
vc := vp.NewVolMgrClient(conn)
pDeleteVolReq := &vp.DeleteVolReq{
UUID: uuid,
}
ctx, _ = context.WithTimeout(context.Background(), 5*time.Second)
pDeleteVolAck, err := vc.DeleteVol(ctx, pDeleteVolReq)
if err != nil {
logger.Error("DeleteVol failed,grpc func err :%v", err)
return -1
}
if pDeleteVolAck.Ret != 0 {
logger.Error("DeleteVol failed,grpc func ret :%v", pDeleteVolAck.Ret)
return -1
}
return 0
}
// GetFSInfo ...
func GetFSInfo(name string) (int32, *mp.GetFSInfoAck) {
conn, err := DialMeta(name)
if err != nil {
logger.Error("GetFSInfo failed,Dial to metanode fail :%v\n", err)
return -1, nil
}
defer conn.Close()
mc := mp.NewMetaNodeClient(conn)
pGetFSInfoReq := &mp.GetFSInfoReq{
VolID: name,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFSInfoAck, err := mc.GetFSInfo(ctx, pGetFSInfoReq)
if err != nil {
logger.Error("GetFSInfo failed,grpc func err :%v", err)
return 1, nil
}
if pGetFSInfoAck.Ret != 0 {
logger.Error("GetFSInfo failed,grpc func ret :%v", pGetFSInfoAck.Ret)
return 1, nil
}
return 0, pGetFSInfoAck
}
// OpenFileSystem ...
func OpenFileSystem(UUID string) *CFS {
leader, err := GetLeader(UUID)
if err != nil {
return nil
}
conn, err := DialMeta(UUID)
if conn == nil || err != nil {
return nil
}
cfs := CFS{VolID: UUID, Conn: conn, Leader: leader, DataConn: make(map[string]*grpc.ClientConn)}
ticker := time.NewTicker(time.Millisecond * 500)
go func() {
for range ticker.C {
leader, err := GetLeader(UUID)
if err != nil {
cfs.Leader = ""
if cfs.Conn != nil {
cfs.Conn.Close()
}
cfs.Conn = nil
logger.Error("Leader Timer : Get leader failed ,volumeID : %s", UUID)
continue
}
if leader != cfs.Leader {
conn, err := DialMeta(UUID)
if conn == nil || err != nil {
logger.Error("Leader Timer : DialMeta failed ,volumeID : %s", UUID)
continue
}
cfs.Leader = leader
if cfs.Conn != nil {
cfs.Conn.Close()
}
cfs.Conn = conn
}
}
}()
return &cfs
}
// CreateDirDirect ...
func (cfs *CFS) CreateDirDirect(pinode uint64, name string) (int32, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pCreateDirDirectReq := &mp.CreateDirDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateDirDirectAck, err := mc.CreateDirDirect(ctx, pCreateDirDirectReq)
if err != nil {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ = context.WithTimeout(context.Background(), 5*time.Second)
pCreateDirDirectAck, err = mc.CreateDirDirect(ctx, pCreateDirDirectReq)
if err != nil {
return -1, 0
}
}
return pCreateDirDirectAck.Ret, pCreateDirDirectAck.Inode
}
// GetInodeInfoDirect ...
func (cfs *CFS) GetInodeInfoDirect(pinode uint64, name string) (int32, uint64, *mp.InodeInfo) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0, nil
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pGetInodeInfoDirectReq := &mp.GetInodeInfoDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetInodeInfoDirectAck, err := mc.GetInodeInfoDirect(ctx, pGetInodeInfoDirectReq)
if err != nil {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0, nil
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetInodeInfoDirectAck, err = mc.GetInodeInfoDirect(ctx, pGetInodeInfoDirectReq)
if err != nil {
return -1, 0, nil
}
}
return pGetInodeInfoDirectAck.Ret, pGetInodeInfoDirectAck.Inode, pGetInodeInfoDirectAck.InodeInfo
}
// StatDirect ...
func (cfs *CFS) StatDirect(pinode uint64, name string) (int32, bool, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, false, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pStatDirectReq := &mp.StatDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pStatDirectAck, err := mc.StatDirect(ctx, pStatDirectReq)
if err != nil {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, false, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pStatDirectAck, err = mc.StatDirect(ctx, pStatDirectReq)
if err != nil {
return -1, false, 0
}
}
return pStatDirectAck.Ret, pStatDirectAck.InodeType, pStatDirectAck.Inode
}
// ListDirect ...
func (cfs *CFS) ListDirect(pinode uint64) (int32, []*mp.DirentN) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, nil
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pListDirectReq := &mp.ListDirectReq{
PInode: pinode,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 60*time.Second)
pListDirectAck, err := mc.ListDirect(ctx, pListDirectReq)
if err != nil {
return -1, nil
}
return pListDirectAck.Ret, pListDirectAck.Dirents
}
// DeleteDirDirect ...
func (cfs *CFS) DeleteDirDirect(pinode uint64, name string) int32 {
ret, _, inode := cfs.StatDirect(pinode, name)
if ret != 0 {
logger.Debug("DeleteDirDirect StatDirect Failed , no such dir")
return 0
}
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pListDirectReq := &mp.ListDirectReq{
PInode: inode,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pListDirectAck, err := mc.ListDirect(ctx, pListDirectReq)
if err != nil {
logger.Error("DeleteDirDirect ListDirect :%v\n", err)
return -1
}
for _, v := range pListDirectAck.Dirents {
/*
if v.InodeType {
cfs.DeleteFileDirect(inode, v.Name)
} else {
cfs.DeleteDirDirect(inode, v.Name)
}
*/
if v.InodeType {
ret := cfs.DeleteFileDirect(inode, v.Name)
if ret != 0 {
return ret
}
} else {
ret := cfs.DeleteDirDirect(inode, v.Name)
if ret != 0 {
return ret
}
}
}
pDeleteDirDirectReq := &mp.DeleteDirDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ = context.WithTimeout(context.Background(), 60*time.Second)
pDeleteDirDirectAck, err := mc.DeleteDirDirect(ctx, pDeleteDirDirectReq)
if err != nil {
return -1
}
return pDeleteDirDirectAck.Ret
}
// RenameDirect ...
func (cfs *CFS) RenameDirect(oldpinode uint64, oldname string, newpinode uint64, newname string) int32 {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pRenameDirectReq := &mp.RenameDirectReq{
OldPInode: oldpinode,
OldName: oldname,
NewPInode: newpinode,
NewName: newname,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pRenameDirectAck, err := mc.RenameDirect(ctx, pRenameDirectReq)
if err != nil {
return -1
}
return pRenameDirectAck.Ret
}
// CreateFileDirect ...
func (cfs *CFS) CreateFileDirect(pinode uint64, name string, flags int) (int32, *CFile) {
/*
if flags&os.O_TRUNC != 0 {
if ret, _ := cfs.Stat(path); ret == 0 {
cfs.DeleteFile(path)
}
}
*/
if flags&os.O_EXCL != 0 {
if ret, _, _ := cfs.StatDirect(pinode, name); ret == 0 {
return 17, nil
}
}
ret, inode := cfs.createFileDirect(pinode, name)
if ret != 0 {
return ret, nil
}
tmpBuffer := wBuffer{
buffer: new(bytes.Buffer),
freeSize: BufferSize,
}
cfile := CFile{
OpenFlag: flags,
cfs: cfs,
FileSize: 0,
ParentInodeID: pinode,
Inode: inode,
Name: name,
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
wBuffer: tmpBuffer,
}
//go cfile.send()
return 0, &cfile
}
// OpenFileDirect ...
func (cfs *CFS) OpenFileDirect(pinode uint64, name string, flags int) (int32, *CFile) {
var ret int32
var writer int32
var tmpFileSize int64
cfile := CFile{}
if (flags&os.O_WRONLY) != 0 || (flags&os.O_RDWR) != 0 {
chunkInfos := make([]*mp.ChunkInfoWithBG, 0)
var inode uint64
if ret, chunkInfos, inode = cfs.GetFileChunksDirect(pinode, name); ret != 0 {
return ret, nil
}
if len(chunkInfos) > 0 {
for i := range chunkInfos {
tmpFileSize += int64(chunkInfos[i].ChunkSize)
}
lastChunk := chunkInfos[len(chunkInfos)-1]
tmpBuffer := wBuffer{
buffer: new(bytes.Buffer),
freeSize: BufferSize - (lastChunk.ChunkSize % BufferSize),
chunkInfo: lastChunk,
}
cfile = CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: tmpFileSize,
wBuffer: tmpBuffer,
ParentInodeID: pinode,
Inode: inode,
Name: name,
chunks: chunkInfos,
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
}
} else {
tmpBuffer := wBuffer{
buffer: new(bytes.Buffer),
freeSize: BufferSize,
}
cfile = CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: 0,
ParentInodeID: pinode,
Inode: inode,
Name: name,
wBuffer: tmpBuffer,
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
}
}
} else {
chunkInfos := make([]*mp.ChunkInfoWithBG, 0)
var inode uint64
if ret, chunkInfos, inode = cfs.GetFileChunksDirect(pinode, name); ret != 0 {
logger.Error("OpenFile failed , GetFileChunksDirect failed !")
return ret, nil
}
for i := range chunkInfos {
tmpFileSize += int64(chunkInfos[i].ChunkSize)
}
tmpBuffer := wBuffer{
buffer: new(bytes.Buffer),
freeSize: BufferSize,
}
cfile = CFile{
OpenFlag: flags,
cfs: cfs,
Writer: writer,
FileSize: tmpFileSize,
wBuffer: tmpBuffer,
ParentInodeID: pinode,
Inode: inode,
Name: name,
chunks: chunkInfos,
ReaderMap: make(map[fuse.HandleID]*ReaderInfo),
}
}
return 0, &cfile
}
// UpdateOpenFileDirect ...
func (cfs *CFS) UpdateOpenFileDirect(pinode uint64, name string, cfile *CFile, flags int) int32 {
if (flags&os.O_WRONLY) != 0 || (flags&os.O_RDWR) != 0 {
chunkInfos := make([]*mp.ChunkInfoWithBG, 0)
var ret int32
if ret, chunkInfos, _ = cfs.GetFileChunksDirect(pinode, name); ret != 0 {
return ret
}
if len(chunkInfos) > 0 {
lastChunk := chunkInfos[len(chunkInfos)-1]
tmpBuffer := wBuffer{
buffer: new(bytes.Buffer),
freeSize: BufferSize - (lastChunk.ChunkSize % BufferSize),
chunkInfo: lastChunk,
}
cfile.wBuffer = tmpBuffer
}
}
return 0
}
// createFileDirect ...
func (cfs *CFS) createFileDirect(pinode uint64, name string) (int32, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pCreateFileDirectReq := &mp.CreateFileDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateFileDirectAck, err := mc.CreateFileDirect(ctx, pCreateFileDirectReq)
if err != nil || pCreateFileDirectAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pCreateFileDirectAck, err = mc.CreateFileDirect(ctx, pCreateFileDirectReq)
if err != nil {
logger.Error("CreateFileDirect failed,grpc func failed :%v\n", err)
return -1, 0
}
}
if pCreateFileDirectAck.Ret == 1 {
return 1, 0
}
if pCreateFileDirectAck.Ret == 2 {
return 2, 0
}
if pCreateFileDirectAck.Ret == 17 {
return 17, 0
}
return 0, pCreateFileDirectAck.Inode
}
// DeleteFileDirect ...
func (cfs *CFS) DeleteFileDirect(pinode uint64, name string) int32 {
ret, chunkInfos, _ := cfs.GetFileChunksDirect(pinode, name)
if ret == 0 && chunkInfos != nil {
for _, v1 := range chunkInfos {
for _, v2 := range v1.BlockGroup.BlockInfos {
addr := utils.InetNtoa(v2.DataNodeIP).String() + ":" + strconv.Itoa(int(v2.DataNodePort))
conn, err := cfs.GetDataConn(addr)
if err != nil || conn == nil {
conn, err = DialData(addr)
if err != nil || conn == nil {
logger.Error("DeleteFile failed,Dial to datanode fail :%v\n", err)
//return 0
} else {
cfs.SetDataConn(addr, conn)
}
}
dc := dp.NewDataNodeClient(conn)
dpDeleteChunkReq := &dp.DeleteChunkReq{
ChunkID: v1.ChunkID,
BlockID: v2.BlockID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err = dc.DeleteChunk(ctx, dpDeleteChunkReq)
if err != nil {
time.Sleep(time.Second)
conn, err = cfs.GetDataConn(addr)
if err != nil || conn == nil {
logger.Error("DeleteChunk failed,Dial to metanode fail :%v\n", err)
} else {
dc = dp.NewDataNodeClient(conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
_, err = dc.DeleteChunk(ctx, dpDeleteChunkReq)
if err != nil {
cfs.DelDataConn(addr)
logger.Error("DeleteChunk failed,grpc func failed :%v\n", err)
}
}
}
//conn.Close()
}
}
}
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc := mp.NewMetaNodeClient(cfs.Conn)
mpDeleteFileDirectReq := &mp.DeleteFileDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
mpDeleteFileDirectAck, err := mc.DeleteFileDirect(ctx, mpDeleteFileDirectReq)
if err != nil || mpDeleteFileDirectAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
mpDeleteFileDirectAck, err = mc.DeleteFileDirect(ctx, mpDeleteFileDirectReq)
if err != nil {
logger.Error("DeleteFile failed,grpc func err :%v\n", err)
return -1
}
}
return mpDeleteFileDirectAck.Ret
}
// GetFileChunksDirect ...
func (cfs *CFS) GetFileChunksDirect(pinode uint64, name string) (int32, []*mp.ChunkInfoWithBG, uint64) {
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, nil, 0
}
mc := mp.NewMetaNodeClient(cfs.Conn)
pGetFileChunksDirectReq := &mp.GetFileChunksDirectReq{
PInode: pinode,
Name: name,
VolID: cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFileChunksDirectAck, err := mc.GetFileChunksDirect(ctx, pGetFileChunksDirectReq)
if err != nil || pGetFileChunksDirectAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfs.Conn == nil {
return -1, nil, 0
}
mc = mp.NewMetaNodeClient(cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pGetFileChunksDirectAck, err = mc.GetFileChunksDirect(ctx, pGetFileChunksDirectReq)
if err != nil {
logger.Error("GetFileChunks failed,grpc func failed :%v\n", err)
return -1, nil, 0
}
}
return pGetFileChunksDirectAck.Ret, pGetFileChunksDirectAck.ChunkInfos, pGetFileChunksDirectAck.Inode
}
type wBuffer struct {
freeSize int32 // chunk size
chunkInfo *mp.ChunkInfoWithBG // chunk info
buffer *bytes.Buffer // chunk data
startOffset int64
endOffset int64
}
// ReaderInfo ...
type ReaderInfo struct {
LastOffset int64
readBuf []byte
Ch chan *bytes.Buffer
}
// CFile ...
type CFile struct {
cfs *CFS
ParentInodeID uint64
Name string
Inode uint64
OpenFlag int
FileSize int64
Status int32 // 0 ok
// for write
//WMutex sync.Mutex
Writer int32
//FirstW bool
wBuffer wBuffer
wgWriteReps sync.WaitGroup
// for read
//lastoffset int64
RMutex sync.Mutex
chunks []*mp.ChunkInfoWithBG // chunkinfo
//readBuf []byte
ReaderMap map[fuse.HandleID]*ReaderInfo
}
// AllocateChunk ...
func (cfile *CFile) AllocateChunk() (int32, *mp.ChunkInfoWithBG) {
for i := 0; i < 10; i++ {
if cfile.cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfile.cfs.Conn == nil {
return -1, nil
}
mc := mp.NewMetaNodeClient(cfile.cfs.Conn)
pAllocateChunkReq := &mp.AllocateChunkReq{
VolID: cfile.cfs.VolID,
}
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pAllocateChunkAck, err := mc.AllocateChunk(ctx, pAllocateChunkReq)
if err != nil || pAllocateChunkAck.Ret != 0 {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfile.cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfile.cfs.Conn == nil {
return -1, nil
}
mc = mp.NewMetaNodeClient(cfile.cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pAllocateChunkAck, err = mc.AllocateChunk(ctx, pAllocateChunkReq)
if err != nil {
logger.Error("AllocateChunk failed,grpc func failed :%v\n", err)
return -1, nil
}
}
return pAllocateChunkAck.Ret, pAllocateChunkAck.ChunkInfo
}
func generateRandomNumber(start int, end int, count int) []int {
if end < start || (end-start) < count {
return nil
}
nums := make([]int, 0)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for len(nums) < count {
num := r.Intn((end - start)) + start
exist := false
for _, v := range nums {
if v == num {
exist = true
break
}
}
if !exist {
nums = append(nums, num)
}
}
return nums
}
func (cfile *CFile) streamread(chunkidx int, ch chan *bytes.Buffer, offset int64, size int64) {
var conn *grpc.ClientConn
var err error
var buffer *bytes.Buffer
outflag := 0
inflag := 0
idxs := generateRandomNumber(0, 3, 3)
for n := 0; n < len(cfile.chunks[chunkidx].BlockGroup.BlockInfos); n++ {
i := idxs[n]
buffer = new(bytes.Buffer)
addr := utils.InetNtoa(cfile.chunks[chunkidx].BlockGroup.BlockInfos[i].DataNodeIP).String() + ":" + strconv.Itoa(int(cfile.chunks[chunkidx].BlockGroup.BlockInfos[i].DataNodePort))
conn, err = cfile.cfs.GetDataConn(addr)
if err != nil || conn == nil {
conn, err = DialData(addr)
if err != nil || conn == nil {
logger.Error("streamread failed,Dial to datanode fail :%v", err)
outflag++
continue
} else {
cfile.cfs.SetDataConn(addr, conn)
}
}
dc := dp.NewDataNodeClient(conn)
streamreadChunkReq := &dp.StreamReadChunkReq{
ChunkID: cfile.chunks[chunkidx].ChunkID,
BlockID: cfile.chunks[chunkidx].BlockGroup.BlockInfos[i].BlockID,
Offset: offset,
Readsize: size,
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
stream, err := dc.StreamReadChunk(ctx, streamreadChunkReq)
if err != nil {
conn, err = DialData(addr)
if err != nil || conn == nil {
cfile.cfs.DelDataConn(addr)
logger.Error("StreamReadChunk DialData error:%v, so retry other datanode!", err)
outflag++
continue
} else {
cfile.cfs.SetDataConn(addr, conn)
dc = dp.NewDataNodeClient(conn)
streamreadChunkReq := &dp.StreamReadChunkReq{
ChunkID: cfile.chunks[chunkidx].ChunkID,
BlockID: cfile.chunks[chunkidx].BlockGroup.BlockInfos[i].BlockID,
Offset: offset,
Readsize: size,
}
ctx, _ = context.WithTimeout(context.Background(), 10*time.Second)
stream, err = dc.StreamReadChunk(ctx, streamreadChunkReq)
if err != nil {
logger.Error("StreamReadChunk StreamReadChunk error:%v, so retry other datanode!", err)
outflag++
continue
}
}
}
for {
ack, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
logger.Error("=== streamreadChunkReq Recv err:%v ===", err)
inflag++
outflag++
break
}
if ack != nil {
if len(ack.Databuf) == 0 {
continue
} else {
buffer.Write(ack.Databuf)
inflag = 0
}
} else {
continue
}
}
if inflag == 0 {
ch <- buffer
break
} else if inflag == 3 {
buffer = new(bytes.Buffer)
buffer.Write([]byte{})
logger.Error("Stream Read the chunk three copy Recv error")
ch <- buffer
break
} else if inflag < 3 {
logger.Error("Stream Read the chunk %v copy Recv error, so need retry other datanode!!!", inflag)
continue
}
}
if outflag >= 3 {
buffer = new(bytes.Buffer)
buffer.Write([]byte{})
logger.Error("Stream Read the chunk three copy Datanode error")
ch <- buffer
}
}
// Read ...
func (cfile *CFile) Read(handleID fuse.HandleID, data *[]byte, offset int64, readsize int64) int64 {
// read data from write buffer
cache := cfile.wBuffer
n := cache.buffer.Len()
if n != 0 && offset >= cache.startOffset {
cfile.ReaderMap[handleID].readBuf = cache.buffer.Bytes()
if offset+readsize < cache.endOffset {
*data = append(*data, cfile.ReaderMap[handleID].readBuf[offset:offset+readsize]...)
return readsize
}
*data = append(*data, cfile.ReaderMap[handleID].readBuf[offset:cache.endOffset]...)
return cache.endOffset - offset
}
if cfile.chunks == nil || len(cfile.chunks) == 0 {
logger.Error("Read File but Chunks not exist")
return -1
}
if offset+readsize > cfile.FileSize {
readsize = cfile.FileSize - offset
}
var length int64
var freeOffset int64
var freeSize int64
var beginChunkNum int
var endChunkNum int
curOffset := offset
for i, v := range cfile.chunks {
freeOffset = curOffset - int64(v.ChunkSize)
if freeOffset < 0 {
beginChunkNum = i
break
} else {
curOffset = freeOffset
}
}
curSize := offset + readsize
for i, v := range cfile.chunks {
freeSize = curSize - int64(v.ChunkSize)
if freeSize <= 0 {
endChunkNum = i
break
} else {
curSize = freeSize
}
}
var eachReadLen int64
freesize := readsize
if endChunkNum < beginChunkNum {
logger.Error("This Read data from beginchunk:%v lager than endchunk:%v", beginChunkNum, endChunkNum)
return -1
}
if beginChunkNum > len(cfile.chunks) || endChunkNum+1 > len(cfile.chunks) || beginChunkNum > cap(cfile.chunks) || endChunkNum+1 > cap(cfile.chunks) {
logger.Error("Read begin or end chunk num not right")
return -1
}
//for i, _ := range cfile.chunks[beginChunkNum : endChunkNum+1] {
for i := 0; i < len(cfile.chunks[beginChunkNum:endChunkNum+1]); i++ {
index := i + beginChunkNum
if curOffset+freesize < int64(cfile.chunks[index].ChunkSize) {
eachReadLen = freesize
} else {
eachReadLen = int64(cfile.chunks[index].ChunkSize) - curOffset
}
if len(cfile.ReaderMap[handleID].readBuf) == 0 {
buffer := new(bytes.Buffer)
cfile.ReaderMap[handleID].Ch = make(chan *bytes.Buffer)
go cfile.streamread(index, cfile.ReaderMap[handleID].Ch, 0, int64(cfile.chunks[index].ChunkSize))
buffer = <-cfile.ReaderMap[handleID].Ch
if buffer.Len() == 0 {
logger.Error("Recv chunk:%v from datanode size:%v , but retsize is 0", index, cfile.chunks[index].ChunkSize)
return -1
}
cfile.ReaderMap[handleID].readBuf = buffer.Next(buffer.Len())
buffer.Reset()
buffer = nil
//logger.Debug("#### Read chunk:%v == bufferlen:%v == curoffset:%v == eachlen:%v ==offset:%v == readsize:%v ####", index, len(cfile.ReaderMap[handleID].readBuf), curOffset, eachReadLen, offset, readsize)
}
buflen := int64(len(cfile.ReaderMap[handleID].readBuf))
bufcap := int64(cap(cfile.ReaderMap[handleID].readBuf))
if curOffset > buflen || curOffset > bufcap {
logger.Error("== Read chunk:%v from datanode (offset:%v -- needreadsize:%v) lager than exist (buflen:%v -- bufcap:%v)\n", index, curOffset, eachReadLen, buflen, bufcap)
return -1
}
if curOffset+eachReadLen > buflen {
eachReadLen = buflen - curOffset
*data = append(*data, cfile.ReaderMap[handleID].readBuf[curOffset:curOffset+eachReadLen]...)
} else {
*data = append(*data, cfile.ReaderMap[handleID].readBuf[curOffset:curOffset+eachReadLen]...)
}
curOffset += eachReadLen
if curOffset == int64(len(cfile.ReaderMap[handleID].readBuf)) {
curOffset = 0
cfile.ReaderMap[handleID].readBuf = []byte{}
}
freesize = freesize - eachReadLen
length += eachReadLen
}
return length
}
// Write ...
func (cfile *CFile) Write(buf []byte, len int32) int32 {
if cfile.Status != 0 {
logger.Error("cfile status error , Write func return -2 ")
return -2
}
var w int32
w = 0
for w < len {
if (cfile.FileSize % chunkSize) == 0 {
logger.Debug("need a new chunk...")
var ret int32
ret, cfile.wBuffer.chunkInfo = cfile.AllocateChunk()
if ret != 0 {
if ret == 28 /*ENOSPC*/ {
return -1
}
return -2
}
}
if cfile.wBuffer.freeSize == 0 {
cfile.wBuffer.buffer = new(bytes.Buffer)
cfile.wBuffer.freeSize = BufferSize
}
if len-w < cfile.wBuffer.freeSize {
if len != w {
cfile.wBuffer.buffer.Write(buf[w:len])
cfile.wBuffer.freeSize = cfile.wBuffer.freeSize - (len - w)
cfile.wBuffer.startOffset = cfile.FileSize
cfile.FileSize = cfile.FileSize + int64(len-w)
cfile.wBuffer.endOffset = cfile.FileSize
cfile.wBuffer.chunkInfo.ChunkSize = cfile.wBuffer.chunkInfo.ChunkSize + int32(len-w)
w = len
}
break
} else {
cfile.wBuffer.buffer.Write(buf[w : w+cfile.wBuffer.freeSize])
w = w + cfile.wBuffer.freeSize
cfile.wBuffer.startOffset = cfile.FileSize
cfile.FileSize = cfile.FileSize + int64(cfile.wBuffer.freeSize)
cfile.wBuffer.endOffset = cfile.FileSize
cfile.wBuffer.chunkInfo.ChunkSize = cfile.wBuffer.chunkInfo.ChunkSize + int32(cfile.wBuffer.freeSize)
cfile.wBuffer.freeSize = 0
}
if cfile.wBuffer.freeSize == 0 {
ret := cfile.push()
if ret != 0 {
return -1
}
}
}
return w
}
func (cfile *CFile) push() int32 {
if cfile.Status != 0 {
logger.Error("cfile status error , push func return err ")
return -1
}
if cfile.wBuffer.chunkInfo == nil {
return 0
}
wBuffer := cfile.wBuffer // record cur buffer
return cfile.send(&wBuffer)
}
// Flush ...
func (cfile *CFile) Flush() int32 {
if cfile.Status != 0 {
logger.Error("cfile status error , Flush func return err ")
return cfile.Status
}
//avoid repeat push for integer file ETC. 64MB , the last push has already done in Write func
if cfile.wBuffer.freeSize != 0 && cfile.wBuffer.chunkInfo != nil {
wBuffer := cfile.wBuffer
cfile.wBuffer.freeSize = 0
logger.Debug("Flush!!!!! len %v name %v pinode %v", cfile.wBuffer.buffer.Len(), cfile.Name, cfile.ParentInodeID)
return cfile.send(&wBuffer)
}
return 0
}
func (cfile *CFile) writeChunk(addr string, conn *grpc.ClientConn, req *dp.WriteChunkReq, blkgrpid uint32, copies *uint64) {
if conn == nil {
} else {
dc := dp.NewDataNodeClient(conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
ret, err := dc.WriteChunk(ctx, req)
if err != nil {
logger.Error("WriteChunk func err %v", err)
cfile.cfs.DelDataConn(addr)
} else {
if ret.Ret != 0 {
logger.Error("WriteChunk Ret err %v", ret.Ret)
cfile.cfs.DelDataConn(addr)
} else {
atomic.AddUint64(copies, 1)
}
}
}
cfile.wgWriteReps.Add(-1)
}
func (cfile *CFile) send(v *wBuffer) int32 {
sendLen := v.buffer.Len()
if sendLen <= 0 {
return 0
}
dataBuf := v.buffer.Next(v.buffer.Len())
var chunkWriteFlag bool
for cnt := 0; cnt < 10; cnt++ {
var copies uint64
for i := range v.chunkInfo.BlockGroup.BlockInfos {
ip := utils.InetNtoa(v.chunkInfo.BlockGroup.BlockInfos[i].DataNodeIP).String()
port := int(v.chunkInfo.BlockGroup.BlockInfos[i].DataNodePort)
addr := ip + ":" + strconv.Itoa(port)
conn, err := cfile.cfs.GetDataConn(addr)
if err != nil || conn == nil {
conn, err = DialData(addr)
if err == nil && conn != nil {
logger.Debug("new datanode conn !!!")
cfile.cfs.SetDataConn(addr, conn)
} else {
logger.Error("new conn to %v failed", addr)
goto fail
}
} else {
//logger.Debug("reuse datanode conn !!!")
}
}
for i := range v.chunkInfo.BlockGroup.BlockInfos {
ip := utils.InetNtoa(v.chunkInfo.BlockGroup.BlockInfos[i].DataNodeIP).String()
port := int(v.chunkInfo.BlockGroup.BlockInfos[i].DataNodePort)
addr := ip + ":" + strconv.Itoa(port)
conn, _ := cfile.cfs.GetDataConn(addr)
blockID := v.chunkInfo.BlockGroup.BlockInfos[i].BlockID
chunkID := v.chunkInfo.ChunkID
pWriteChunkReq := &dp.WriteChunkReq{
ChunkID: chunkID,
BlockID: blockID,
Databuf: dataBuf,
}
cfile.wgWriteReps.Add(1)
go cfile.writeChunk(addr, conn, pWriteChunkReq, v.chunkInfo.BlockGroup.BlockGroupID, &copies)
}
cfile.wgWriteReps.Wait()
fail:
if copies < 3 {
var ret int32
ret, cfile.wBuffer.chunkInfo = cfile.AllocateChunk()
cfile.wBuffer.chunkInfo.ChunkSize = int32(sendLen)
v.chunkInfo = cfile.wBuffer.chunkInfo
if ret != 0 {
cfile.Status = 1
return cfile.Status
}
logger.Debug("write 3 copies failed ,choose new chunk! cnt=%v", cnt)
continue
} else {
chunkWriteFlag = true
break
}
}
if !chunkWriteFlag {
cfile.Status = 1
return cfile.Status
}
pSyncChunkReq := &mp.SyncChunkReq{
ParentInodeID: cfile.ParentInodeID,
Name: cfile.Name,
VolID: cfile.cfs.VolID,
}
var tmpChunkInfo mp.ChunkInfo
tmpChunkInfo.ChunkSize = v.chunkInfo.ChunkSize
tmpChunkInfo.ChunkID = v.chunkInfo.ChunkID
tmpChunkInfo.BlockGroupID = v.chunkInfo.BlockGroup.BlockGroupID
pSyncChunkReq.ChunkInfo = &tmpChunkInfo
wflag := false
for i := 0; i < 10; i++ {
if cfile.cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfile.cfs.Conn == nil {
cfile.Status = 1
return cfile.Status
}
mc := mp.NewMetaNodeClient(cfile.cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pSyncChunkAck, err := mc.SyncChunk(ctx, pSyncChunkReq)
if err == nil && pSyncChunkAck.Ret == 0 {
wflag = true
} else {
logger.Error("SyncChunk failed start to try ,name %v,inode %v,pinode %v", cfile.Name, cfile.Inode, cfile.ParentInodeID)
for i := 0; i < 15; i++ {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
if cfile.cfs.Conn != nil {
break
}
time.Sleep(300 * time.Millisecond)
continue
}
if cfile.cfs.Conn == nil {
cfile.Status = 1
return cfile.Status
}
logger.Error("SyncChunk try %v times", i+1)
mc := mp.NewMetaNodeClient(cfile.cfs.Conn)
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
pSyncChunkAck, err := mc.SyncChunk(ctx, pSyncChunkReq)
if err == nil && pSyncChunkAck.Ret == 0 {
wflag = true
break
} else {
logger.Error("SyncChunk grpc func try %v times ,err %v", i+1, err)
if pSyncChunkAck != nil {
logger.Error("SyncChunk grpc func try %v times ,ret %v", i+1, pSyncChunkAck.Ret)
}
}
}
}
if !wflag {
cfile.Status = 1
return cfile.Status
}
chunkNum := len(cfile.chunks)
//v.chunkInfo.Status = tmpChunkInfo.Status
if chunkNum == 0 {
cfile.chunks = append(cfile.chunks, v.chunkInfo)
} else {
if cfile.chunks[chunkNum-1].ChunkID == v.chunkInfo.ChunkID {
cfile.chunks[chunkNum-1].ChunkSize = v.chunkInfo.ChunkSize
//cfile.chunks[chunkNum-1].Status = v.chunkInfo.Status
} else {
cfile.chunks = append(cfile.chunks, v.chunkInfo)
}
}
return cfile.Status
}
// Sync ...
func (cfile *CFile) Sync() int32 {
return 0
}
// Close ...
func (cfile *CFile) Close(flags int) int32 {
return 0
}
// ProcessLocalBuffer ...
func ProcessLocalBuffer(buffer []byte, cfile *CFile) {
cfile.Write(buffer, int32(len(buffer)))
}
// ReadLocalAndWriteCFS ...
func ReadLocalAndWriteCFS(filePth string, bufSize int, hookfn func([]byte, *CFile), cfile *CFile) error {
f, err := os.Open(filePth)
if err != nil {
return err
}
defer f.Close()
buf := make([]byte, bufSize)
bfRd := bufio.NewReader(f)
for {
n, err := bfRd.Read(buf)
hookfn(buf[:n], cfile)
if err != nil {
if err == io.EOF {
return nil
}
return err
}
}
}
|
package main
import (
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
"github.com/gin-gonic/gin"
)
const VERSION = "0.1.0"
type Response struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type FileEntry struct {
Filename string `json:"filename"`
IsDir bool `json:"directory"`
}
var (
// Regular expression to match all supported video files
Extensions = regexp.MustCompile(".(avi|mpg|mov|flv|wmv|asf|mpeg|m4v|divx|mp4|mkv)$")
// OMXPlayer control commands, these are piped via STDIN to omxplayer process
Commands = map[string]string{
"pause": "p", // Pause/continue playback
"stop": "q", // Stop playback and exit
"volume_up": "+", // Change volume by +3dB
"volume_down": "-", // Change volume by -3dB
"subtitles": "s", // Enable/disable subtitles
"seek_back": "\x1b\x5b\x44", // Seek -30 seconds
"seek_back_fast": "\x1b\x5b\x42", // Seek -600 second
"seek_forward": "\x1b\x5b\x43", // Seek +30 second
"seek_forward_fast": "\x1b\x5b\x41", // Seek +600 seconds
}
// Path where all media files are stored
MediaPath string
// Path to omxplayer executable
OmxPath string
// Child process for spawning omxplayer
Omx *exec.Cmd
// Child process STDIN pipe to send commands
OmxIn io.WriteCloser
// Channel to pass along commands to the player routine
Command chan string
)
// Returns true if specified file exists
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
// Scan given path for all directories and matching video files.
// If nothing was found it will return an empty slice.
func scanPath(path string) []FileEntry {
entries := make([]FileEntry, 0)
files, err := ioutil.ReadDir(path)
if err != nil {
return entries
}
for _, file := range files {
entry := FileEntry{
Filename: file.Name(),
IsDir: file.IsDir(),
}
// Do not include non-video files in the list
if !file.IsDir() && !omxCanPlay(file.Name()) {
continue
}
entries = append(entries, entry)
}
return entries
}
// Determine the full path to omxplayer executable. Returns error if not found.
func omxDetect() error {
buff, err := exec.Command("which", "omxplayer").Output()
if err != nil {
return err
}
// Set path in global variable
OmxPath = strings.TrimSpace(string(buff))
return nil
}
// Start command listener. Commands are coming in through a channel.
func omxListen() {
Command = make(chan string)
for {
command := <-Command
// Skip command handling of omx player is not active
if Omx == nil {
continue
}
// Send command to the player
omxWrite(command)
// Attempt to kill the process if stop command is requested
if command == "stop" {
Omx.Process.Kill()
}
}
}
// Start omxplayer playback for a given video file. Returns error if start fails.
func omxPlay(file string) error {
Omx = exec.Command(
OmxPath, // path to omxplayer executable
"--refresh", // adjust framerate/resolution to video
"--blank", // set background to black
"--adev", // audio out device
"hdmi", // using hdmi for audio/video
file, // path to video file
)
// Grab child process STDIN
stdin, err := Omx.StdinPipe()
if err != nil {
return err
}
defer stdin.Close()
// Redirect output for debugging purposes
Omx.Stdout = os.Stdout
// Start omxplayer execution.
// If successful, something will appear on HDMI display.
err = Omx.Start()
if err != nil {
return err
}
// Make child's STDIN globally available
OmxIn = stdin
// Wait until child process is finished
err = Omx.Wait()
if err != nil {
fmt.Fprintln(os.Stdout, "Process exited with error:", err)
}
omxCleanup()
return nil
}
// Write a command string to the omxplayer process's STDIN
func omxWrite(command string) {
if OmxIn != nil {
io.WriteString(OmxIn, Commands[command])
}
}
// Terminate any running omxplayer processes. Fixes random hangs.
func omxKill() {
exec.Command("killall", "omxplayer.bin").Output()
exec.Command("killall", "omxplayer").Output()
}
// Reset internal state and stop any running processes
func omxCleanup() {
Omx = nil
OmxIn = nil
omxKill()
}
// Check if player is currently active
func omxIsActive() bool {
return Omx != nil
}
// Check if player can play the file
func omxCanPlay(path string) bool {
if Extensions.Match([]byte(path)) {
return true
}
return false
}
func httpBrowse(c *gin.Context) {
path := c.Request.FormValue("path")
if path != "" {
path = fmt.Sprintf("%s/%s", MediaPath, path)
} else {
path = MediaPath
}
c.JSON(200, scanPath(path))
}
func httpCommand(c *gin.Context) {
val := c.Params.ByName("command")
if _, ok := Commands[val]; !ok {
c.JSON(400, Response{false, "Invalid command"})
return
}
fmt.Println("Received command:", val)
// Handle requested commmand
Command <- val
c.JSON(200, Response{true, "OK"})
}
func httpPlay(c *gin.Context) {
if omxIsActive() {
c.JSON(400, Response{false, "Player is already running"})
return
}
file := c.Request.FormValue("file")
if file == "" {
c.JSON(400, Response{false, "File is required"})
return
}
file = fmt.Sprintf("%s/%s", MediaPath, file)
if !fileExists(file) {
c.JSON(400, Response{false, "File does not exist"})
return
}
if !omxCanPlay(file) {
c.JSON(400, Response{false, "File cannot be played"})
return
}
go omxPlay(file)
c.JSON(200, Response{true, "OK"})
}
func httpStatus(c *gin.Context) {
c.String(200, fmt.Sprintf(`{"running":%v}`, omxIsActive()))
}
func httpIndex(c *gin.Context) {
data, err := Asset("static/index.html")
if err != nil {
c.String(400, err.Error())
return
}
c.Data(200, "text/html; charset=utf-8", data)
}
func terminate(message string, code int) {
fmt.Println(message)
os.Exit(code)
}
func usage() {
terminate("Usage: omxremote path/to/media/dir", 0)
}
func main() {
if len(os.Args) < 2 {
usage()
}
// Get path from arguments and remove trailing slash
MediaPath = strings.TrimRight(os.Args[1], "/")
if !fileExists(MediaPath) {
terminate(fmt.Sprintf("Directory does not exist: %s", MediaPath), 1)
}
// Check if player is installed
if omxDetect() != nil {
terminate("omxplayer is not installed", 1)
}
// Make sure nothing is running
omxCleanup()
// Start a remote command listener
go omxListen()
// Disable debugging mode
gin.SetMode("release")
// Setup HTTP server
router := gin.Default()
router.GET("/", httpIndex)
router.GET("/status", httpStatus)
router.GET("/browse", httpBrowse)
router.GET("/play", httpPlay)
router.GET("/command/:command", httpCommand)
router.Run(":8080")
}
Print omxremote version
package main
import (
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
"github.com/gin-gonic/gin"
)
const VERSION = "0.1.0"
type Response struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type FileEntry struct {
Filename string `json:"filename"`
IsDir bool `json:"directory"`
}
var (
// Regular expression to match all supported video files
Extensions = regexp.MustCompile(".(avi|mpg|mov|flv|wmv|asf|mpeg|m4v|divx|mp4|mkv)$")
// OMXPlayer control commands, these are piped via STDIN to omxplayer process
Commands = map[string]string{
"pause": "p", // Pause/continue playback
"stop": "q", // Stop playback and exit
"volume_up": "+", // Change volume by +3dB
"volume_down": "-", // Change volume by -3dB
"subtitles": "s", // Enable/disable subtitles
"seek_back": "\x1b\x5b\x44", // Seek -30 seconds
"seek_back_fast": "\x1b\x5b\x42", // Seek -600 second
"seek_forward": "\x1b\x5b\x43", // Seek +30 second
"seek_forward_fast": "\x1b\x5b\x41", // Seek +600 seconds
}
// Path where all media files are stored
MediaPath string
// Path to omxplayer executable
OmxPath string
// Child process for spawning omxplayer
Omx *exec.Cmd
// Child process STDIN pipe to send commands
OmxIn io.WriteCloser
// Channel to pass along commands to the player routine
Command chan string
)
// Returns true if specified file exists
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
// Scan given path for all directories and matching video files.
// If nothing was found it will return an empty slice.
func scanPath(path string) []FileEntry {
entries := make([]FileEntry, 0)
files, err := ioutil.ReadDir(path)
if err != nil {
return entries
}
for _, file := range files {
entry := FileEntry{
Filename: file.Name(),
IsDir: file.IsDir(),
}
// Do not include non-video files in the list
if !file.IsDir() && !omxCanPlay(file.Name()) {
continue
}
entries = append(entries, entry)
}
return entries
}
// Determine the full path to omxplayer executable. Returns error if not found.
func omxDetect() error {
buff, err := exec.Command("which", "omxplayer").Output()
if err != nil {
return err
}
// Set path in global variable
OmxPath = strings.TrimSpace(string(buff))
return nil
}
// Start command listener. Commands are coming in through a channel.
func omxListen() {
Command = make(chan string)
for {
command := <-Command
// Skip command handling of omx player is not active
if Omx == nil {
continue
}
// Send command to the player
omxWrite(command)
// Attempt to kill the process if stop command is requested
if command == "stop" {
Omx.Process.Kill()
}
}
}
// Start omxplayer playback for a given video file. Returns error if start fails.
func omxPlay(file string) error {
Omx = exec.Command(
OmxPath, // path to omxplayer executable
"--refresh", // adjust framerate/resolution to video
"--blank", // set background to black
"--adev", // audio out device
"hdmi", // using hdmi for audio/video
file, // path to video file
)
// Grab child process STDIN
stdin, err := Omx.StdinPipe()
if err != nil {
return err
}
defer stdin.Close()
// Redirect output for debugging purposes
Omx.Stdout = os.Stdout
// Start omxplayer execution.
// If successful, something will appear on HDMI display.
err = Omx.Start()
if err != nil {
return err
}
// Make child's STDIN globally available
OmxIn = stdin
// Wait until child process is finished
err = Omx.Wait()
if err != nil {
fmt.Fprintln(os.Stdout, "Process exited with error:", err)
}
omxCleanup()
return nil
}
// Write a command string to the omxplayer process's STDIN
func omxWrite(command string) {
if OmxIn != nil {
io.WriteString(OmxIn, Commands[command])
}
}
// Terminate any running omxplayer processes. Fixes random hangs.
func omxKill() {
exec.Command("killall", "omxplayer.bin").Output()
exec.Command("killall", "omxplayer").Output()
}
// Reset internal state and stop any running processes
func omxCleanup() {
Omx = nil
OmxIn = nil
omxKill()
}
// Check if player is currently active
func omxIsActive() bool {
return Omx != nil
}
// Check if player can play the file
func omxCanPlay(path string) bool {
if Extensions.Match([]byte(path)) {
return true
}
return false
}
func httpBrowse(c *gin.Context) {
path := c.Request.FormValue("path")
if path != "" {
path = fmt.Sprintf("%s/%s", MediaPath, path)
} else {
path = MediaPath
}
c.JSON(200, scanPath(path))
}
func httpCommand(c *gin.Context) {
val := c.Params.ByName("command")
if _, ok := Commands[val]; !ok {
c.JSON(400, Response{false, "Invalid command"})
return
}
fmt.Println("Received command:", val)
// Handle requested commmand
Command <- val
c.JSON(200, Response{true, "OK"})
}
func httpPlay(c *gin.Context) {
if omxIsActive() {
c.JSON(400, Response{false, "Player is already running"})
return
}
file := c.Request.FormValue("file")
if file == "" {
c.JSON(400, Response{false, "File is required"})
return
}
file = fmt.Sprintf("%s/%s", MediaPath, file)
if !fileExists(file) {
c.JSON(400, Response{false, "File does not exist"})
return
}
if !omxCanPlay(file) {
c.JSON(400, Response{false, "File cannot be played"})
return
}
go omxPlay(file)
c.JSON(200, Response{true, "OK"})
}
func httpStatus(c *gin.Context) {
c.String(200, fmt.Sprintf(`{"running":%v}`, omxIsActive()))
}
func httpIndex(c *gin.Context) {
data, err := Asset("static/index.html")
if err != nil {
c.String(400, err.Error())
return
}
c.Data(200, "text/html; charset=utf-8", data)
}
func terminate(message string, code int) {
fmt.Println(message)
os.Exit(code)
}
func usage() {
terminate("Usage: omxremote path/to/media/dir", 0)
}
func main() {
fmt.Printf("omxremote v%v\n", VERSION)
if len(os.Args) < 2 {
usage()
}
// Get path from arguments and remove trailing slash
MediaPath = strings.TrimRight(os.Args[1], "/")
if !fileExists(MediaPath) {
terminate(fmt.Sprintf("Directory does not exist: %s", MediaPath), 1)
}
// Check if player is installed
if omxDetect() != nil {
terminate("omxplayer is not installed", 1)
}
// Make sure nothing is running
omxCleanup()
// Start a remote command listener
go omxListen()
// Disable debugging mode
gin.SetMode("release")
// Setup HTTP server
router := gin.Default()
router.GET("/", httpIndex)
router.GET("/status", httpStatus)
router.GET("/browse", httpBrowse)
router.GET("/play", httpPlay)
router.GET("/command/:command", httpCommand)
fmt.Println("Starting server on 0.0.0.0:8080")
router.Run(":8080")
}
|
package main
import (
"github.com/jessevdk/go-operators/types"
"fmt"
"github.com/jessevdk/go-flags"
"go/ast"
"go/build"
"go/format"
"go/parser"
"go/token"
"os"
"path"
"strings"
)
type TypeCheck struct {
Path string
ImportPath string
Package *types.Package
FileSet *token.FileSet
ParseFiles []string
ProcessFiles []string
FilesToAst map[string]*ast.File
Ast []*ast.File
}
var typechecks = make(map[string]*TypeCheck)
func packageLocations(args []string) []string {
if len(args) == 0 {
dirname, err := os.Getwd()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain current working directory\n")
os.Exit(1)
}
return []string{dirname}
}
return args
}
func buildFiles(packageDir string) (parse []string, process []string, pkgname string) {
ctx := build.Default
ctx.BuildTags = []string{"operators"}
p, err := ctx.ImportDir(packageDir, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "Error while importing build: %s\n", err)
os.Exit(1)
}
for _, f := range p.GoFiles {
parse = append(parse, path.Join(packageDir, f))
if strings.HasSuffix(f, ".op.go") {
process = append(process, f)
}
}
return parse, process, p.Name
}
func parseAST(files []string) (fs *token.FileSet, afs []*ast.File, afsmap map[string]*ast.File) {
fs = token.NewFileSet()
afs = make([]*ast.File, 0, len(files))
afsmap = make(map[string]*ast.File)
for _, f := range files {
af, err := parser.ParseFile(fs, f, nil, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "Error while parsing AST: %s\n", err)
os.Exit(1)
}
afsmap[f] = af
afs = append(afs, af)
}
return fs, afs, afsmap
}
func checkTypes(pkgpath string, importpath string) *TypeCheck {
if ret, ok := typechecks[importpath]; ok {
return ret
}
parse, process, pkgname := buildFiles(pkgpath)
fs, afs, afsmap := parseAST(parse)
var conf types.Config
conf.Import = importSources
pp, err := conf.Check(pkgname, fs, afs, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "Error while type checking: %s\n", err)
os.Exit(1)
}
ret := &TypeCheck{
Path: pkgpath,
Package: pp,
ImportPath: importpath,
ParseFiles: parse,
ProcessFiles: process,
FileSet: fs,
Ast: afs,
FilesToAst: afsmap,
}
typechecks[importpath] = ret
return ret
}
func resolvePackage(importpath string, tryLocal bool) *TypeCheck {
if ret, ok := typechecks[importpath]; ok {
return ret
}
// Try local first
if tryLocal {
if _, err := os.Stat(importpath); err == nil {
return checkTypes(importpath, importpath)
}
}
paths := strings.Split(os.Getenv("GOPATH"), ":")
for _, p := range paths {
src := path.Join(p, "src", importpath)
if _, err := os.Stat(src); err != nil {
continue
}
return checkTypes(src, importpath)
}
fmt.Fprintf(os.Stderr, "Could not find package %s.\n", importpath)
os.Exit(1)
return nil
}
func importSource(imports map[string]*types.Package, path string) (pkg *types.Package, err error) {
// Try from source
ct := resolvePackage(path, false)
if ct == nil {
return nil, fmt.Errorf("Could not locate import path %s", path)
}
imports[ct.ImportPath] = ct.Package
return ct.Package, nil
}
func importSources(imports map[string]*types.Package, path string) (pkg *types.Package, err error) {
if operatorPackages[path] {
return importSource(imports, path)
}
pkg, err = types.GcImport(imports, path)
if err != nil {
if path == "C" {
return nil, fmt.Errorf("go-operators does not have support for packages that use cgo at the moment")
}
return importSource(imports, path)
}
return pkg, err
}
func replacer(overloads map[ast.Expr]types.OverloadInfo, node ast.Node) ast.Node {
expr, ok := node.(ast.Expr)
if !ok {
return node
}
info, ok := overloads[expr]
if !ok {
return node
}
sel := &ast.SelectorExpr{
X: info.Recv,
Sel: ast.NewIdent(info.Func.Name()),
}
args := []ast.Expr{}
if info.Oper != nil {
args = append(args, info.Oper)
}
// Create function call expression
call := &ast.CallExpr{
Fun: sel,
Args: args,
}
return call
}
func replaceOperators(ct *TypeCheck) {
overloads := ct.Package.Overloads()
for _, f := range ct.ProcessFiles {
af := ct.FilesToAst[path.Join(ct.Path, f)]
af = replace(func(node ast.Node) ast.Node {
return replacer(overloads, node)
}, af).(*ast.File)
suffix := ".op.go"
outname := f[:len(f)-len(suffix)] + ".go"
fn := path.Join(ct.Path, outname)
of, err := os.Create(fn)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create output file: %s\n", err)
os.Exit(1)
}
defer of.Close()
if opts.Verbose {
fmt.Println(fn)
}
// Write build constraint
fmt.Fprintln(of, "// +build !operators\n")
if err := format.Node(of, ct.FileSet, af); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write code: %s\n", err)
os.Exit(1)
}
}
}
var opts struct {
Verbose bool `short:"v" long:"verbose" description:"Enable verbose mode"`
}
var operatorPackages = make(map[string]bool)
func main() {
fp := flags.NewParser(&opts, flags.Default)
args, err := fp.Parse()
if err != nil {
os.Exit(1)
}
packageDirs := packageLocations(args)
for _, p := range packageDirs {
operatorPackages[p] = true
}
for _, p := range packageDirs {
ct := resolvePackage(p, true)
replaceOperators(ct)
}
}
go fmt
package main
import (
"fmt"
"github.com/jessevdk/go-flags"
"github.com/jessevdk/go-operators/types"
"go/ast"
"go/build"
"go/format"
"go/parser"
"go/token"
"os"
"path"
"strings"
)
type TypeCheck struct {
Path string
ImportPath string
Package *types.Package
FileSet *token.FileSet
ParseFiles []string
ProcessFiles []string
FilesToAst map[string]*ast.File
Ast []*ast.File
}
var typechecks = make(map[string]*TypeCheck)
func packageLocations(args []string) []string {
if len(args) == 0 {
dirname, err := os.Getwd()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain current working directory\n")
os.Exit(1)
}
return []string{dirname}
}
return args
}
func buildFiles(packageDir string) (parse []string, process []string, pkgname string) {
ctx := build.Default
ctx.BuildTags = []string{"operators"}
p, err := ctx.ImportDir(packageDir, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "Error while importing build: %s\n", err)
os.Exit(1)
}
for _, f := range p.GoFiles {
parse = append(parse, path.Join(packageDir, f))
if strings.HasSuffix(f, ".op.go") {
process = append(process, f)
}
}
return parse, process, p.Name
}
func parseAST(files []string) (fs *token.FileSet, afs []*ast.File, afsmap map[string]*ast.File) {
fs = token.NewFileSet()
afs = make([]*ast.File, 0, len(files))
afsmap = make(map[string]*ast.File)
for _, f := range files {
af, err := parser.ParseFile(fs, f, nil, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "Error while parsing AST: %s\n", err)
os.Exit(1)
}
afsmap[f] = af
afs = append(afs, af)
}
return fs, afs, afsmap
}
func checkTypes(pkgpath string, importpath string) *TypeCheck {
if ret, ok := typechecks[importpath]; ok {
return ret
}
parse, process, pkgname := buildFiles(pkgpath)
fs, afs, afsmap := parseAST(parse)
var conf types.Config
conf.Import = importSources
pp, err := conf.Check(pkgname, fs, afs, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "Error while type checking: %s\n", err)
os.Exit(1)
}
ret := &TypeCheck{
Path: pkgpath,
Package: pp,
ImportPath: importpath,
ParseFiles: parse,
ProcessFiles: process,
FileSet: fs,
Ast: afs,
FilesToAst: afsmap,
}
typechecks[importpath] = ret
return ret
}
func resolvePackage(importpath string, tryLocal bool) *TypeCheck {
if ret, ok := typechecks[importpath]; ok {
return ret
}
// Try local first
if tryLocal {
if _, err := os.Stat(importpath); err == nil {
return checkTypes(importpath, importpath)
}
}
paths := strings.Split(os.Getenv("GOPATH"), ":")
for _, p := range paths {
src := path.Join(p, "src", importpath)
if _, err := os.Stat(src); err != nil {
continue
}
return checkTypes(src, importpath)
}
fmt.Fprintf(os.Stderr, "Could not find package %s.\n", importpath)
os.Exit(1)
return nil
}
func importSource(imports map[string]*types.Package, path string) (pkg *types.Package, err error) {
// Try from source
ct := resolvePackage(path, false)
if ct == nil {
return nil, fmt.Errorf("Could not locate import path %s", path)
}
imports[ct.ImportPath] = ct.Package
return ct.Package, nil
}
func importSources(imports map[string]*types.Package, path string) (pkg *types.Package, err error) {
if operatorPackages[path] {
return importSource(imports, path)
}
pkg, err = types.GcImport(imports, path)
if err != nil {
if path == "C" {
return nil, fmt.Errorf("go-operators does not have support for packages that use cgo at the moment")
}
return importSource(imports, path)
}
return pkg, err
}
func replacer(overloads map[ast.Expr]types.OverloadInfo, node ast.Node) ast.Node {
expr, ok := node.(ast.Expr)
if !ok {
return node
}
info, ok := overloads[expr]
if !ok {
return node
}
sel := &ast.SelectorExpr{
X: info.Recv,
Sel: ast.NewIdent(info.Func.Name()),
}
args := []ast.Expr{}
if info.Oper != nil {
args = append(args, info.Oper)
}
// Create function call expression
call := &ast.CallExpr{
Fun: sel,
Args: args,
}
return call
}
func replaceOperators(ct *TypeCheck) {
overloads := ct.Package.Overloads()
for _, f := range ct.ProcessFiles {
af := ct.FilesToAst[path.Join(ct.Path, f)]
af = replace(func(node ast.Node) ast.Node {
return replacer(overloads, node)
}, af).(*ast.File)
suffix := ".op.go"
outname := f[:len(f)-len(suffix)] + ".go"
fn := path.Join(ct.Path, outname)
of, err := os.Create(fn)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create output file: %s\n", err)
os.Exit(1)
}
defer of.Close()
if opts.Verbose {
fmt.Println(fn)
}
// Write build constraint
fmt.Fprintln(of, "// +build !operators\n")
if err := format.Node(of, ct.FileSet, af); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write code: %s\n", err)
os.Exit(1)
}
}
}
var opts struct {
Verbose bool `short:"v" long:"verbose" description:"Enable verbose mode"`
}
var operatorPackages = make(map[string]bool)
func main() {
fp := flags.NewParser(&opts, flags.Default)
args, err := fp.Parse()
if err != nil {
os.Exit(1)
}
packageDirs := packageLocations(args)
for _, p := range packageDirs {
operatorPackages[p] = true
}
for _, p := range packageDirs {
ct := resolvePackage(p, true)
replaceOperators(ct)
}
}
|
package cmd
import (
"fmt"
"net/http"
"strings"
"github.com/freneticmonkey/migrate/go/config"
"github.com/freneticmonkey/migrate/go/management"
"github.com/freneticmonkey/migrate/go/util"
"github.com/freneticmonkey/migrate/go/yaml"
"github.com/urfave/cli"
)
// var conf config.Config
var configURL string
var configFile string
// GetGlobalFlags Configures the global flags used by all subcommands
func GetGlobalFlags() (flags []cli.Flag) {
flags = []cli.Flag{
cli.StringFlag{
Name: "config-url",
Value: "",
Usage: "URL for remote configuration. If supplied config-file is ignored.",
},
cli.StringFlag{
Name: "config-file",
Value: "config.yml",
Usage: "URL for remote configuration.",
},
cli.BoolFlag{
Name: "verbose",
Usage: "Enable verbose logging output",
},
}
return flags
}
func parseGlobalFlags(ctx *cli.Context) {
// Verbose output for now
verbose := true
configFile = ctx.GlobalString("config-file")
if ctx.GlobalIsSet("config-file") {
util.LogInfof("Detected config-file: %s", configFile)
}
if ctx.GlobalIsSet("config-url") {
configURL = ctx.GlobalString("config-url")
util.LogInfof("Detected config-url: %s", configURL)
}
if ctx.GlobalIsSet("verbose") {
verbose = ctx.GlobalBool("verbose")
util.LogInfof("Detected verbose: %t", verbose)
}
util.SetVerbose(verbose)
}
// configureManagement Read the command line parameters,
// load configuration and setup the mananagement database
func configureManagement() (targetConfig config.Config, err error) {
util.ConfigFileSystem()
// Load Configuration
targetConfig, err = loadConfig(configURL, configFile)
if err == nil {
// Set Configuration
err = setConfig(targetConfig)
}
return targetConfig, err
}
// loadConfig Load a configuration from URL and fallback to filepath if URL is not supplied.
// If the URL fails to return a valid configration an error is returned.
func loadConfig(configURL, configFile string) (targetConfig config.Config, err error) {
var configSource string
// If the ConfigURL is set and it's a http URL
if strings.HasPrefix(configURL, "http") {
var response *http.Response
// Download the configuration
response, err = http.Get(configURL)
// If the request was successfull
if err == nil {
// Read the response body
var data []byte
defer response.Body.Close()
data, err = util.ReadAll(response.Body)
if !util.ErrorCheckf(err, "Problem reading the response for the config-url request") {
// Unmarshal the YAML config
err = yaml.ReadData(data, &targetConfig)
configSource = configURL
}
}
} else {
// Assume that it's a local file
err = yaml.ReadFile(configFile, &targetConfig)
configSource = configFile
}
if util.ErrorCheckf(err, "Configuration read failed for: %s", configSource) {
return targetConfig, fmt.Errorf("Unable to read configuration from: [%s]", configSource)
}
util.LogInfo("Successfully read configuration from: " + configSource)
return targetConfig, err
}
// setConfig Initialise using the Config parameter
func setConfig(targetConfig config.Config) (err error) {
// Initialise any utility configuration
util.Config(targetConfig)
// Configure access to the management DB
err = management.Setup(targetConfig)
if util.ErrorCheck(err) {
return fmt.Errorf("Unable configure management database. Error: %v", err)
}
return nil
}
Disabled verbose output by default
package cmd
import (
"fmt"
"net/http"
"strings"
"github.com/freneticmonkey/migrate/go/config"
"github.com/freneticmonkey/migrate/go/management"
"github.com/freneticmonkey/migrate/go/util"
"github.com/freneticmonkey/migrate/go/yaml"
"github.com/urfave/cli"
)
// var conf config.Config
var configURL string
var configFile string
// GetGlobalFlags Configures the global flags used by all subcommands
func GetGlobalFlags() (flags []cli.Flag) {
flags = []cli.Flag{
cli.StringFlag{
Name: "config-url",
Value: "",
Usage: "URL for remote configuration. If supplied config-file is ignored.",
},
cli.StringFlag{
Name: "config-file",
Value: "config.yml",
Usage: "URL for remote configuration.",
},
cli.BoolFlag{
Name: "verbose",
Usage: "Enable verbose logging output",
},
}
return flags
}
func parseGlobalFlags(ctx *cli.Context) {
// Verbose output for now
verbose := false
configFile = ctx.GlobalString("config-file")
if ctx.GlobalIsSet("config-file") {
util.LogInfof("Detected config-file: %s", configFile)
}
if ctx.GlobalIsSet("config-url") {
configURL = ctx.GlobalString("config-url")
util.LogInfof("Detected config-url: %s", configURL)
}
if ctx.GlobalIsSet("verbose") {
verbose = ctx.GlobalBool("verbose")
util.LogInfof("Detected verbose: %t", verbose)
}
util.SetVerbose(verbose)
}
// configureManagement Read the command line parameters,
// load configuration and setup the mananagement database
func configureManagement() (targetConfig config.Config, err error) {
util.ConfigFileSystem()
// Load Configuration
targetConfig, err = loadConfig(configURL, configFile)
if err == nil {
// Set Configuration
err = setConfig(targetConfig)
}
return targetConfig, err
}
// loadConfig Load a configuration from URL and fallback to filepath if URL is not supplied.
// If the URL fails to return a valid configration an error is returned.
func loadConfig(configURL, configFile string) (targetConfig config.Config, err error) {
var configSource string
// If the ConfigURL is set and it's a http URL
if strings.HasPrefix(configURL, "http") {
var response *http.Response
// Download the configuration
response, err = http.Get(configURL)
// If the request was successfull
if err == nil {
// Read the response body
var data []byte
defer response.Body.Close()
data, err = util.ReadAll(response.Body)
if !util.ErrorCheckf(err, "Problem reading the response for the config-url request") {
// Unmarshal the YAML config
err = yaml.ReadData(data, &targetConfig)
configSource = configURL
}
}
} else {
// Assume that it's a local file
err = yaml.ReadFile(configFile, &targetConfig)
configSource = configFile
}
if util.ErrorCheckf(err, "Configuration read failed for: %s", configSource) {
return targetConfig, fmt.Errorf("Unable to read configuration from: [%s]", configSource)
}
util.LogInfo("Successfully read configuration from: " + configSource)
return targetConfig, err
}
// setConfig Initialise using the Config parameter
func setConfig(targetConfig config.Config) (err error) {
// Initialise any utility configuration
util.Config(targetConfig)
// Configure access to the management DB
err = management.Setup(targetConfig)
if err != nil {
return fmt.Errorf("Unable configure management database. Error: %v", err)
}
return nil
}
|
package main
import (
"fmt"
/*
"os"
"os/signal"
*/
"runtime"
"errors"
"time"
"encoding/hex"
"encoding/binary"
"database/sql"
"github.com/fuzxxl/nfc/2.0/nfc"
"github.com/fuzxxl/freefare/0.3/freefare"
_ "github.com/mattn/go-sqlite3"
"github.com/davecheney/gpio"
"./keydiversification"
"./helpers"
)
func heartbeat() {
for {
time.Sleep(2000 * time.Millisecond)
fmt.Println("Dunka-dunk")
}
}
func pulse_gpio(pin gpio.Pin, ms int) {
pin.Set()
time.Sleep(time.Duration(ms) * time.Millisecond)
pin.Clear()
}
func clear_and_close(pin gpio.Pin) {
pin.Clear()
pin.Close()
}
// Use structs to pass data around so I can refactor
type AppInfo struct {
aid freefare.DESFireAid
aidbytes []byte
sysid []byte
acl_read_base []byte
acl_write_base []byte
acl_file_id byte
}
type KeyChain struct {
uid_read_key_id byte
acl_read_key_id byte
acl_write_key_id byte
uid_read_key *freefare.DESFireKey
acl_read_key *freefare.DESFireKey
acl_write_key *freefare.DESFireKey
}
// To pass multiple values over a channel
type TagResult struct {
is_valid bool
err error
}
var (
keychain = KeyChain{}
appinfo = AppInfo{}
)
func init_appinfo() {
keymap, err := helpers.LoadYAMLFile("keys.yaml")
if err != nil {
panic(err)
}
appmap, err := helpers.LoadYAMLFile("apps.yaml")
if err != nil {
panic(err)
}
// Application-id
appinfo.aid, err = helpers.String2aid(appmap["hacklab_acl"].(map[interface{}]interface{})["aid"].(string))
if err != nil {
panic(err)
}
// Needed for diversification
appinfo.aidbytes = helpers.Aid2bytes(appinfo.aid)
appinfo.sysid, err = hex.DecodeString(appmap["hacklab_acl"].(map[interface{}]interface{})["sysid"].(string))
if err != nil {
panic(err)
}
appinfo.acl_file_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_file_id"].(string))
if err != nil {
panic(err)
}
// Key id numbers from config
keychain.uid_read_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["uid_read_key_id"].(string))
if err != nil {
panic(err)
}
keychain.acl_read_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_read_key_id"].(string))
if err != nil {
panic(err)
}
keychain.acl_write_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_write_key_id"].(string))
if err != nil {
panic(err)
}
// The static app key to read UID
keychain.uid_read_key, err = helpers.String2aeskey(keymap["uid_read_key"].(string))
if err != nil {
panic(err)
}
// Bases for the diversified keys
appinfo.acl_read_base, err = hex.DecodeString(keymap["acl_read_key"].(string))
if err != nil {
panic(err)
}
appinfo.acl_write_base, err = hex.DecodeString(keymap["acl_write_key"].(string))
if err != nil {
panic(err)
}
}
func recalculate_diversified_keys(realuid []byte) error {
acl_read_bytes, err := keydiversification.AES128(appinfo.acl_read_base[:], appinfo.aidbytes[:], realuid[:], appinfo.sysid[:])
if err != nil {
return err
}
acl_write_bytes, err := keydiversification.AES128(appinfo.acl_write_base[:], appinfo.aidbytes[:], realuid[:], appinfo.sysid[:])
if err != nil {
return err
}
keychain.acl_read_key = helpers.Bytes2aeskey(acl_read_bytes)
keychain.acl_write_key = helpers.Bytes2aeskey(acl_write_bytes)
return nil
}
func update_acl_file(desfiretag *freefare.DESFireTag, newdata *[]byte) error {
fmt.Print("Re-auth with ACL write key, ")
err := desfiretag.Authenticate(keychain.acl_write_key_id,*keychain.acl_write_key)
if err != nil {
return err
}
fmt.Println("Done")
fmt.Print("Overwriting ACL data file, ")
byteswritten, err := desfiretag.WriteData(appinfo.acl_file_id, 0, *newdata)
if err != nil {
return err
}
if (byteswritten < 8) {
fmt.Println(fmt.Sprintf("WARNING: WriteData wrote %d bytes, 8 expected", byteswritten))
}
fmt.Println("Done")
return nil
}
func check_revoked(desfiretag *freefare.DESFireTag, db *sql.DB, realuid_str string) (bool, error) {
revoked_found := false
stmt, err := db.Prepare("SELECT rowid FROM revoked where uid=?")
if err != nil {
return true, err
}
defer stmt.Close()
rows, err := stmt.Query(realuid_str)
if err != nil {
return true, err
}
defer rows.Close()
for rows.Next() {
revoked_found = true
var rowid int64
rows.Scan(&rowid) // Assigns 1st column to rowid, the rest to row
fmt.Println(fmt.Sprintf("WARNING: Found REVOKED key %s on row %d", realuid_str, rowid))
// TODO: Publish a ZMQ message or something
// Null the ACL file on card
nullaclbytes := make([]byte, 8)
err := update_acl_file(desfiretag, &nullaclbytes)
if err != nil {
return revoked_found, err
}
}
return revoked_found, nil
}
func read_and_parse_acl_file(desfiretag *freefare.DESFireTag) (uint64, error) {
fmt.Print("Re-auth with ACL read key, ")
err := desfiretag.Authenticate(keychain.acl_read_key_id,*keychain.acl_read_key)
if err != nil {
return 0, err
}
fmt.Println("Done")
aclbytes := make([]byte, 8)
fmt.Print("Reading ACL data file, ")
bytesread, err := desfiretag.ReadData(appinfo.acl_file_id, 0, aclbytes)
if err != nil {
return 0, err
}
if (bytesread < 8) {
fmt.Println(fmt.Sprintf("WARNING: ReadData read %d bytes, 8 expected", bytesread))
}
acl, n := binary.Uvarint(aclbytes)
if n <= 0 {
return 0, errors.New(fmt.Sprintf("ERROR: binary.Uvarint returned %d, skipping tag", n))
}
fmt.Println("Done")
return acl, nil
}
func get_db_acl(desfiretag *freefare.DESFireTag, db *sql.DB, realuid_str string) (uint64, error) {
stmt, err := db.Prepare("SELECT rowid,acl FROM keys where uid=?")
if err != nil {
return 0, err
}
defer stmt.Close()
rows, err := stmt.Query(realuid_str)
if err != nil {
return 0, err
}
defer rows.Close()
for rows.Next() {
var rowid int64
var acl int64
rows.Scan(&rowid, &acl)
return uint64(acl), nil
}
return 0, errors.New(fmt.Sprintf("UID not found"))
}
func check_tag_channel(desfiretag *freefare.DESFireTag, db *sql.DB, required_acl uint64, ch chan TagResult) {
result, err := check_tag(desfiretag, db, required_acl)
ch <- TagResult{result, err}
close(ch)
}
func check_tag(desfiretag *freefare.DESFireTag, db *sql.DB, required_acl uint64) (bool, error) {
const errlimit = 3
var err error = nil
var realuid_str string
var realuid []byte
acl := uint64(0)
db_acl := uint64(0)
revoked_found := false
errcnt := 0
connected := false
// TODO: Add a timeout for all of this, if not done in 1s or so we have a problem...
RETRY:
if err != nil {
// TODO: Retry only on RF-errors
errcnt++
if errcnt > errlimit {
fmt.Println(fmt.Sprintf("failed (%s), retry-limit exceeded (%d/%d), skipping tag", err, errcnt, errlimit))
goto FAIL
}
fmt.Println(fmt.Sprintf("failed (%s), retrying (%d)", err, errcnt))
}
if connected {
_ = desfiretag.Disconnect()
}
// Connect to this tag
fmt.Print(fmt.Sprintf("Connecting to %s, ", desfiretag.UID()))
err = desfiretag.Connect()
if err != nil {
goto RETRY
}
fmt.Println("done")
connected = true
fmt.Print(fmt.Sprintf("Selecting application %d, ", appinfo.aid.Aid()))
err = desfiretag.SelectApplication(appinfo.aid);
if err != nil {
goto RETRY
}
fmt.Println("Done")
fmt.Print("Authenticating, ")
err = desfiretag.Authenticate(keychain.uid_read_key_id,*keychain.uid_read_key)
if err != nil {
goto RETRY
}
fmt.Println("Done")
// Get card real UID
realuid_str, err = desfiretag.CardUID()
if err != nil {
// TODO: Retry only on RF-errors
goto RETRY
}
realuid, err = hex.DecodeString(realuid_str)
if err != nil {
fmt.Println(fmt.Sprintf("ERROR: Failed to parse real UID (%s), skipping tag", err))
goto FAIL
}
fmt.Println("Got real UID:", hex.EncodeToString(realuid));
// Calculate the diversified keys
err = recalculate_diversified_keys(realuid[:])
if err != nil {
fmt.Println(fmt.Sprintf("ERROR: Failed to get diversified ACL keys (%s), skipping tag", err))
goto FAIL
}
// Check for revoked key
revoked_found, err = check_revoked(desfiretag, db, realuid_str)
if err != nil {
fmt.Println(fmt.Sprintf("check_revoked returned err (%s)", err))
revoked_found = true
}
if revoked_found {
goto FAIL
}
acl, err = read_and_parse_acl_file(desfiretag)
if err != nil {
goto RETRY
}
//fmt.Println("DEBUG: acl:", acl)
// Get (possibly updated) ACL from DB, if returns error then UID is not known
db_acl, err = get_db_acl(desfiretag, db, realuid_str)
if err != nil {
// No match
fmt.Println(fmt.Sprintf("WARNING: key %s, not found in DB", realuid_str))
// TODO: Should we null the ACL file just in case, because any key that is personalized but not either valid or revoked is in a weird limbo
goto FAIL
}
// Check for ACL update
if (acl != db_acl) {
fmt.Println(fmt.Sprintf("NOTICE: card ACL (%x) does not match DB (%x), ", acl, db_acl))
// Update the ACL file on card
newaclbytes := make([]byte, 8)
n := binary.PutUvarint(newaclbytes, db_acl)
if (n < 0) {
fmt.Println(fmt.Sprintf("binary.PutUvarint returned %d, skipping tag", n))
goto FAIL
}
err := update_acl_file(desfiretag, &newaclbytes)
if err != nil {
goto RETRY
}
}
// Now check the ACL match
if (db_acl & required_acl) == 0 {
fmt.Println(fmt.Sprintf("NOTICE: Found valid key %s, but ACL (%x) not granted", realuid_str, required_acl))
// TODO: Publish a ZMQ message or something
goto FAIL
}
fmt.Println(fmt.Sprintf("SUCCESS: Access granted to %s with ACL (%x)", realuid_str, db_acl))
return true, nil
FAIL:
if connected {
_ = desfiretag.Disconnect()
}
return false, err
}
func main() {
// TODO: configure this somewhere
required_acl := uint64(1)
init_appinfo()
gpiomap, err := helpers.LoadYAMLFile("gpio.yaml")
if err != nil {
panic(err)
}
db, err := sql.Open("sqlite3", "./keys.db")
if err != nil {
panic(err)
}
defer db.Close()
// Open NFC device
nfcd, err := nfc.Open("");
if err != nil {
panic(err);
}
defer nfcd.Close()
// Start heartbeat goroutine
//go heartbeat()
// Get open GPIO pins for our outputs
green_led, err := gpio.OpenPin(gpiomap["green_led"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
fmt.Printf("err opening green_led! %s\n", err)
return
}
red_led, err := gpio.OpenPin(gpiomap["red_led"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
fmt.Printf("err opening green_led! %s\n", err)
return
}
relay, err := gpio.OpenPin(gpiomap["relay"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
fmt.Printf("err opening relay! %s\n", err)
return
}
// turn the leds off on exit
/*
exit_ch := make(chan os.Signal, 1)
signal.Notify(exit_ch, os.Interrupt)
signal.Notify(exit_ch, os.Kill)
go func() {
for _ = range exit_ch {
fmt.Printf("\nClearing and unexporting the pins.\n")
go clear_and_close(green_led)
go clear_and_close(red_led)
go clear_and_close(relay)
os.Exit(0)
}
}()
*/
fmt.Println("Starting mainloop")
// mainloop
for {
// Poll for tags
var tags []freefare.Tag
for {
tags, err = freefare.GetTags(nfcd);
if err != nil {
// TODO: Probably should not panic here
panic(err)
}
if len(tags) > 0 {
break
}
time.Sleep(100 * time.Millisecond)
//fmt.Println("...polling")
}
valid_found := false
for i := 0; i < len(tags); i++ {
tag := tags[i]
if (tag.Type() != freefare.DESFire) {
fmt.Println(fmt.Sprintf("Non-DESFire tag %s skipped", tag.UID()))
continue
}
desfiretag := tag.(freefare.DESFireTag)
ch := make(chan TagResult, 1)
go check_tag_channel(&desfiretag, db, required_acl, ch)
select {
case res, ok := <-ch:
if !ok {
// Channel closed
} else {
if res.is_valid {
valid_found = true
}
}
case <-time.After(time.Second * 1):
fmt.Println("WARNING: Timeout while checking tag")
// TODO: Do we even need this, probably not...
// _ = desfiretag.Disconnect()
}
}
// Mark for GC
tags = nil
if !valid_found {
fmt.Println("Access DENIED")
go pulse_gpio(red_led, gpiomap["red_led"].(map[interface{}]interface{})["time"].(int))
} else {
go pulse_gpio(green_led, gpiomap["green_led"].(map[interface{}]interface{})["time"].(int))
go pulse_gpio(relay, gpiomap["relay"].(map[interface{}]interface{})["time"].(int))
}
// Run GC at this time
runtime.GC()
// Wait a moment before continuing with fast polling
time.Sleep(500 * time.Millisecond)
}
}
we do not need the tag here
package main
import (
"fmt"
/*
"os"
"os/signal"
*/
"runtime"
"errors"
"time"
"encoding/hex"
"encoding/binary"
"database/sql"
"github.com/fuzxxl/nfc/2.0/nfc"
"github.com/fuzxxl/freefare/0.3/freefare"
_ "github.com/mattn/go-sqlite3"
"github.com/davecheney/gpio"
"./keydiversification"
"./helpers"
)
func heartbeat() {
for {
time.Sleep(2000 * time.Millisecond)
fmt.Println("Dunka-dunk")
}
}
func pulse_gpio(pin gpio.Pin, ms int) {
pin.Set()
time.Sleep(time.Duration(ms) * time.Millisecond)
pin.Clear()
}
func clear_and_close(pin gpio.Pin) {
pin.Clear()
pin.Close()
}
// Use structs to pass data around so I can refactor
type AppInfo struct {
aid freefare.DESFireAid
aidbytes []byte
sysid []byte
acl_read_base []byte
acl_write_base []byte
acl_file_id byte
}
type KeyChain struct {
uid_read_key_id byte
acl_read_key_id byte
acl_write_key_id byte
uid_read_key *freefare.DESFireKey
acl_read_key *freefare.DESFireKey
acl_write_key *freefare.DESFireKey
}
// To pass multiple values over a channel
type TagResult struct {
is_valid bool
err error
}
var (
keychain = KeyChain{}
appinfo = AppInfo{}
)
func init_appinfo() {
keymap, err := helpers.LoadYAMLFile("keys.yaml")
if err != nil {
panic(err)
}
appmap, err := helpers.LoadYAMLFile("apps.yaml")
if err != nil {
panic(err)
}
// Application-id
appinfo.aid, err = helpers.String2aid(appmap["hacklab_acl"].(map[interface{}]interface{})["aid"].(string))
if err != nil {
panic(err)
}
// Needed for diversification
appinfo.aidbytes = helpers.Aid2bytes(appinfo.aid)
appinfo.sysid, err = hex.DecodeString(appmap["hacklab_acl"].(map[interface{}]interface{})["sysid"].(string))
if err != nil {
panic(err)
}
appinfo.acl_file_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_file_id"].(string))
if err != nil {
panic(err)
}
// Key id numbers from config
keychain.uid_read_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["uid_read_key_id"].(string))
if err != nil {
panic(err)
}
keychain.acl_read_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_read_key_id"].(string))
if err != nil {
panic(err)
}
keychain.acl_write_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_write_key_id"].(string))
if err != nil {
panic(err)
}
// The static app key to read UID
keychain.uid_read_key, err = helpers.String2aeskey(keymap["uid_read_key"].(string))
if err != nil {
panic(err)
}
// Bases for the diversified keys
appinfo.acl_read_base, err = hex.DecodeString(keymap["acl_read_key"].(string))
if err != nil {
panic(err)
}
appinfo.acl_write_base, err = hex.DecodeString(keymap["acl_write_key"].(string))
if err != nil {
panic(err)
}
}
func recalculate_diversified_keys(realuid []byte) error {
acl_read_bytes, err := keydiversification.AES128(appinfo.acl_read_base[:], appinfo.aidbytes[:], realuid[:], appinfo.sysid[:])
if err != nil {
return err
}
acl_write_bytes, err := keydiversification.AES128(appinfo.acl_write_base[:], appinfo.aidbytes[:], realuid[:], appinfo.sysid[:])
if err != nil {
return err
}
keychain.acl_read_key = helpers.Bytes2aeskey(acl_read_bytes)
keychain.acl_write_key = helpers.Bytes2aeskey(acl_write_bytes)
return nil
}
func update_acl_file(desfiretag *freefare.DESFireTag, newdata *[]byte) error {
fmt.Print("Re-auth with ACL write key, ")
err := desfiretag.Authenticate(keychain.acl_write_key_id,*keychain.acl_write_key)
if err != nil {
return err
}
fmt.Println("Done")
fmt.Print("Overwriting ACL data file, ")
byteswritten, err := desfiretag.WriteData(appinfo.acl_file_id, 0, *newdata)
if err != nil {
return err
}
if (byteswritten < 8) {
fmt.Println(fmt.Sprintf("WARNING: WriteData wrote %d bytes, 8 expected", byteswritten))
}
fmt.Println("Done")
return nil
}
func check_revoked(desfiretag *freefare.DESFireTag, db *sql.DB, realuid_str string) (bool, error) {
revoked_found := false
stmt, err := db.Prepare("SELECT rowid FROM revoked where uid=?")
if err != nil {
return true, err
}
defer stmt.Close()
rows, err := stmt.Query(realuid_str)
if err != nil {
return true, err
}
defer rows.Close()
for rows.Next() {
revoked_found = true
var rowid int64
rows.Scan(&rowid) // Assigns 1st column to rowid, the rest to row
fmt.Println(fmt.Sprintf("WARNING: Found REVOKED key %s on row %d", realuid_str, rowid))
// TODO: Publish a ZMQ message or something
// Null the ACL file on card
nullaclbytes := make([]byte, 8)
err := update_acl_file(desfiretag, &nullaclbytes)
if err != nil {
return revoked_found, err
}
}
return revoked_found, nil
}
func read_and_parse_acl_file(desfiretag *freefare.DESFireTag) (uint64, error) {
fmt.Print("Re-auth with ACL read key, ")
err := desfiretag.Authenticate(keychain.acl_read_key_id,*keychain.acl_read_key)
if err != nil {
return 0, err
}
fmt.Println("Done")
aclbytes := make([]byte, 8)
fmt.Print("Reading ACL data file, ")
bytesread, err := desfiretag.ReadData(appinfo.acl_file_id, 0, aclbytes)
if err != nil {
return 0, err
}
if (bytesread < 8) {
fmt.Println(fmt.Sprintf("WARNING: ReadData read %d bytes, 8 expected", bytesread))
}
acl, n := binary.Uvarint(aclbytes)
if n <= 0 {
return 0, errors.New(fmt.Sprintf("ERROR: binary.Uvarint returned %d, skipping tag", n))
}
fmt.Println("Done")
return acl, nil
}
func get_db_acl(db *sql.DB, realuid_str string) (uint64, error) {
stmt, err := db.Prepare("SELECT rowid,acl FROM keys where uid=?")
if err != nil {
return 0, err
}
defer stmt.Close()
rows, err := stmt.Query(realuid_str)
if err != nil {
return 0, err
}
defer rows.Close()
for rows.Next() {
var rowid int64
var acl int64
rows.Scan(&rowid, &acl)
return uint64(acl), nil
}
return 0, errors.New(fmt.Sprintf("UID not found"))
}
func check_tag_channel(desfiretag *freefare.DESFireTag, db *sql.DB, required_acl uint64, ch chan TagResult) {
result, err := check_tag(desfiretag, db, required_acl)
ch <- TagResult{result, err}
close(ch)
}
func check_tag(desfiretag *freefare.DESFireTag, db *sql.DB, required_acl uint64) (bool, error) {
const errlimit = 3
var err error = nil
var realuid_str string
var realuid []byte
acl := uint64(0)
db_acl := uint64(0)
revoked_found := false
errcnt := 0
connected := false
// TODO: Add a timeout for all of this, if not done in 1s or so we have a problem...
RETRY:
if err != nil {
// TODO: Retry only on RF-errors
errcnt++
if errcnt > errlimit {
fmt.Println(fmt.Sprintf("failed (%s), retry-limit exceeded (%d/%d), skipping tag", err, errcnt, errlimit))
goto FAIL
}
fmt.Println(fmt.Sprintf("failed (%s), retrying (%d)", err, errcnt))
}
if connected {
_ = desfiretag.Disconnect()
}
// Connect to this tag
fmt.Print(fmt.Sprintf("Connecting to %s, ", desfiretag.UID()))
err = desfiretag.Connect()
if err != nil {
goto RETRY
}
fmt.Println("done")
connected = true
fmt.Print(fmt.Sprintf("Selecting application %d, ", appinfo.aid.Aid()))
err = desfiretag.SelectApplication(appinfo.aid);
if err != nil {
goto RETRY
}
fmt.Println("Done")
fmt.Print("Authenticating, ")
err = desfiretag.Authenticate(keychain.uid_read_key_id,*keychain.uid_read_key)
if err != nil {
goto RETRY
}
fmt.Println("Done")
// Get card real UID
realuid_str, err = desfiretag.CardUID()
if err != nil {
// TODO: Retry only on RF-errors
goto RETRY
}
realuid, err = hex.DecodeString(realuid_str)
if err != nil {
fmt.Println(fmt.Sprintf("ERROR: Failed to parse real UID (%s), skipping tag", err))
goto FAIL
}
fmt.Println("Got real UID:", hex.EncodeToString(realuid));
// Calculate the diversified keys
err = recalculate_diversified_keys(realuid[:])
if err != nil {
fmt.Println(fmt.Sprintf("ERROR: Failed to get diversified ACL keys (%s), skipping tag", err))
goto FAIL
}
// Check for revoked key
revoked_found, err = check_revoked(desfiretag, db, realuid_str)
if err != nil {
fmt.Println(fmt.Sprintf("check_revoked returned err (%s)", err))
revoked_found = true
}
if revoked_found {
goto FAIL
}
acl, err = read_and_parse_acl_file(desfiretag)
if err != nil {
goto RETRY
}
//fmt.Println("DEBUG: acl:", acl)
// Get (possibly updated) ACL from DB, if returns error then UID is not known
db_acl, err = get_db_acl(db, realuid_str)
if err != nil {
// No match
fmt.Println(fmt.Sprintf("WARNING: key %s, not found in DB", realuid_str))
// TODO: Should we null the ACL file just in case, because any key that is personalized but not either valid or revoked is in a weird limbo
goto FAIL
}
// Check for ACL update
if (acl != db_acl) {
fmt.Println(fmt.Sprintf("NOTICE: card ACL (%x) does not match DB (%x), ", acl, db_acl))
// Update the ACL file on card
newaclbytes := make([]byte, 8)
n := binary.PutUvarint(newaclbytes, db_acl)
if (n < 0) {
fmt.Println(fmt.Sprintf("binary.PutUvarint returned %d, skipping tag", n))
goto FAIL
}
err := update_acl_file(desfiretag, &newaclbytes)
if err != nil {
goto RETRY
}
}
// Now check the ACL match
if (db_acl & required_acl) == 0 {
fmt.Println(fmt.Sprintf("NOTICE: Found valid key %s, but ACL (%x) not granted", realuid_str, required_acl))
// TODO: Publish a ZMQ message or something
goto FAIL
}
fmt.Println(fmt.Sprintf("SUCCESS: Access granted to %s with ACL (%x)", realuid_str, db_acl))
return true, nil
FAIL:
if connected {
_ = desfiretag.Disconnect()
}
return false, err
}
func main() {
// TODO: configure this somewhere
required_acl := uint64(1)
init_appinfo()
gpiomap, err := helpers.LoadYAMLFile("gpio.yaml")
if err != nil {
panic(err)
}
db, err := sql.Open("sqlite3", "./keys.db")
if err != nil {
panic(err)
}
defer db.Close()
// Open NFC device
nfcd, err := nfc.Open("");
if err != nil {
panic(err);
}
defer nfcd.Close()
// Start heartbeat goroutine
//go heartbeat()
// Get open GPIO pins for our outputs
green_led, err := gpio.OpenPin(gpiomap["green_led"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
fmt.Printf("err opening green_led! %s\n", err)
return
}
red_led, err := gpio.OpenPin(gpiomap["red_led"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
fmt.Printf("err opening green_led! %s\n", err)
return
}
relay, err := gpio.OpenPin(gpiomap["relay"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
fmt.Printf("err opening relay! %s\n", err)
return
}
// turn the leds off on exit
/*
exit_ch := make(chan os.Signal, 1)
signal.Notify(exit_ch, os.Interrupt)
signal.Notify(exit_ch, os.Kill)
go func() {
for _ = range exit_ch {
fmt.Printf("\nClearing and unexporting the pins.\n")
go clear_and_close(green_led)
go clear_and_close(red_led)
go clear_and_close(relay)
os.Exit(0)
}
}()
*/
fmt.Println("Starting mainloop")
// mainloop
for {
// Poll for tags
var tags []freefare.Tag
for {
tags, err = freefare.GetTags(nfcd);
if err != nil {
// TODO: Probably should not panic here
panic(err)
}
if len(tags) > 0 {
break
}
time.Sleep(100 * time.Millisecond)
//fmt.Println("...polling")
}
valid_found := false
for i := 0; i < len(tags); i++ {
tag := tags[i]
if (tag.Type() != freefare.DESFire) {
fmt.Println(fmt.Sprintf("Non-DESFire tag %s skipped", tag.UID()))
continue
}
desfiretag := tag.(freefare.DESFireTag)
ch := make(chan TagResult, 1)
go check_tag_channel(&desfiretag, db, required_acl, ch)
select {
case res, ok := <-ch:
if !ok {
// Channel closed
} else {
if res.is_valid {
valid_found = true
}
}
case <-time.After(time.Second * 1):
fmt.Println("WARNING: Timeout while checking tag")
// TODO: Do we even need this, probably not...
// _ = desfiretag.Disconnect()
}
}
// Mark for GC
tags = nil
if !valid_found {
fmt.Println("Access DENIED")
go pulse_gpio(red_led, gpiomap["red_led"].(map[interface{}]interface{})["time"].(int))
} else {
go pulse_gpio(green_led, gpiomap["green_led"].(map[interface{}]interface{})["time"].(int))
go pulse_gpio(relay, gpiomap["relay"].(map[interface{}]interface{})["time"].(int))
}
// Run GC at this time
runtime.GC()
// Wait a moment before continuing with fast polling
time.Sleep(500 * time.Millisecond)
}
}
|
package main
import (
"fmt"
/*
"os"
"os/signal"
*/
//"runtime"
"errors"
"time"
"encoding/hex"
"encoding/binary"
"database/sql"
"github.com/fuzxxl/nfc/2.0/nfc"
"github.com/fuzxxl/freefare/0.3/freefare"
_ "github.com/mattn/go-sqlite3"
"github.com/davecheney/gpio"
"./keydiversification"
"./helpers"
"github.com/davecheney/profile"
)
func heartbeat() {
for {
time.Sleep(2000 * time.Millisecond)
fmt.Println("Dunka-dunk")
}
}
func pulse_gpio(pin gpio.Pin, ms int) {
pin.Set()
time.Sleep(time.Duration(ms) * time.Millisecond)
pin.Clear()
}
func clear_and_close(pin gpio.Pin) {
pin.Clear()
pin.Close()
}
// Use structs to pass data around so I can refactor
type AppInfo struct {
aid freefare.DESFireAid
aidbytes []byte
sysid []byte
acl_read_base []byte
acl_write_base []byte
acl_file_id byte
}
type KeyChain struct {
uid_read_key_id byte
acl_read_key_id byte
acl_write_key_id byte
uid_read_key *freefare.DESFireKey
acl_read_key *freefare.DESFireKey
acl_write_key *freefare.DESFireKey
}
// To pass multiple values over a channel
type TagResult struct {
is_valid bool
err error
}
var (
keychain = KeyChain{}
appinfo = AppInfo{}
)
func init_appinfo() {
keymap, err := helpers.LoadYAMLFile("keys.yaml")
if err != nil {
panic(err)
}
appmap, err := helpers.LoadYAMLFile("apps.yaml")
if err != nil {
panic(err)
}
// Application-id
appinfo.aid, err = helpers.String2aid(appmap["hacklab_acl"].(map[interface{}]interface{})["aid"].(string))
if err != nil {
panic(err)
}
// Needed for diversification
appinfo.aidbytes = helpers.Aid2bytes(appinfo.aid)
appinfo.sysid, err = hex.DecodeString(appmap["hacklab_acl"].(map[interface{}]interface{})["sysid"].(string))
if err != nil {
panic(err)
}
appinfo.acl_file_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_file_id"].(string))
if err != nil {
panic(err)
}
// Key id numbers from config
keychain.uid_read_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["uid_read_key_id"].(string))
if err != nil {
panic(err)
}
keychain.acl_read_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_read_key_id"].(string))
if err != nil {
panic(err)
}
keychain.acl_write_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_write_key_id"].(string))
if err != nil {
panic(err)
}
// The static app key to read UID
keychain.uid_read_key, err = helpers.String2aeskey(keymap["uid_read_key"].(string))
if err != nil {
panic(err)
}
// Bases for the diversified keys
appinfo.acl_read_base, err = hex.DecodeString(keymap["acl_read_key"].(string))
if err != nil {
panic(err)
}
appinfo.acl_write_base, err = hex.DecodeString(keymap["acl_write_key"].(string))
if err != nil {
panic(err)
}
}
func recalculate_diversified_keys(realuid []byte) error {
acl_read_bytes, err := keydiversification.AES128(appinfo.acl_read_base[:], appinfo.aidbytes[:], realuid[:], appinfo.sysid[:])
if err != nil {
return err
}
acl_write_bytes, err := keydiversification.AES128(appinfo.acl_write_base[:], appinfo.aidbytes[:], realuid[:], appinfo.sysid[:])
if err != nil {
return err
}
keychain.acl_read_key = helpers.Bytes2aeskey(acl_read_bytes)
keychain.acl_write_key = helpers.Bytes2aeskey(acl_write_bytes)
return nil
}
func update_acl_file(desfiretag *freefare.DESFireTag, newdata *[]byte) error {
fmt.Print("Re-auth with ACL write key, ")
err := desfiretag.Authenticate(keychain.acl_write_key_id,*keychain.acl_write_key)
if err != nil {
return err
}
fmt.Println("Done")
fmt.Print("Overwriting ACL data file, ")
byteswritten, err := desfiretag.WriteData(appinfo.acl_file_id, 0, *newdata)
if err != nil {
return err
}
if (byteswritten < 8) {
fmt.Println(fmt.Sprintf("WARNING: WriteData wrote %d bytes, 8 expected", byteswritten))
}
fmt.Println("Done")
return nil
}
func check_revoked(desfiretag *freefare.DESFireTag, db *sql.DB, realuid_str string) (bool, error) {
revoked_found := false
stmt, err := db.Prepare("SELECT rowid FROM revoked where uid=?")
if err != nil {
return true, err
}
defer stmt.Close()
rows, err := stmt.Query(realuid_str)
if err != nil {
return true, err
}
defer rows.Close()
for rows.Next() {
revoked_found = true
var rowid int64
rows.Scan(&rowid) // Assigns 1st column to rowid, the rest to row
fmt.Println(fmt.Sprintf("WARNING: Found REVOKED key %s on row %d", realuid_str, rowid))
// TODO: Publish a ZMQ message or something
// Null the ACL file on card
nullaclbytes := make([]byte, 8)
err := update_acl_file(desfiretag, &nullaclbytes)
if err != nil {
return revoked_found, err
}
}
return revoked_found, nil
}
func read_and_parse_acl_file(desfiretag *freefare.DESFireTag) (uint64, error) {
fmt.Print("Re-auth with ACL read key, ")
err := desfiretag.Authenticate(keychain.acl_read_key_id,*keychain.acl_read_key)
if err != nil {
return 0, err
}
fmt.Println("Done")
aclbytes := make([]byte, 8)
fmt.Print("Reading ACL data file, ")
bytesread, err := desfiretag.ReadData(appinfo.acl_file_id, 0, aclbytes)
if err != nil {
return 0, err
}
if (bytesread < 8) {
fmt.Println(fmt.Sprintf("WARNING: ReadData read %d bytes, 8 expected", bytesread))
}
acl, n := binary.Uvarint(aclbytes)
if n <= 0 {
return 0, errors.New(fmt.Sprintf("ERROR: binary.Uvarint returned %d, skipping tag", n))
}
fmt.Println("Done")
return acl, nil
}
func get_db_acl(db *sql.DB, realuid_str string) (uint64, error) {
stmt, err := db.Prepare("SELECT rowid,acl FROM keys where uid=?")
if err != nil {
return 0, err
}
defer stmt.Close()
rows, err := stmt.Query(realuid_str)
if err != nil {
return 0, err
}
defer rows.Close()
for rows.Next() {
var rowid int64
var acl int64
rows.Scan(&rowid, &acl)
return uint64(acl), nil
}
return 0, errors.New(fmt.Sprintf("UID not found"))
}
func check_tag_channel(desfiretag *freefare.DESFireTag, db *sql.DB, required_acl uint64, ch chan TagResult) {
result, err := check_tag(desfiretag, db, required_acl)
ch <- TagResult{result, err}
close(ch)
}
func check_tag(desfiretag *freefare.DESFireTag, db *sql.DB, required_acl uint64) (bool, error) {
const errlimit = 3
var err error = nil
var realuid_str string
var realuid []byte
acl := uint64(0)
db_acl := uint64(0)
revoked_found := false
errcnt := 0
connected := false
// TODO: Add a timeout for all of this, if not done in 1s or so we have a problem...
RETRY:
if err != nil {
// TODO: Retry only on RF-errors
errcnt++
if errcnt > errlimit {
fmt.Println(fmt.Sprintf("failed (%s), retry-limit exceeded (%d/%d), skipping tag", err, errcnt, errlimit))
goto FAIL
}
fmt.Println(fmt.Sprintf("failed (%s), retrying (%d)", err, errcnt))
}
if connected {
_ = desfiretag.Disconnect()
}
// Connect to this tag
fmt.Print(fmt.Sprintf("Connecting to %s, ", desfiretag.UID()))
err = desfiretag.Connect()
if err != nil {
goto RETRY
}
fmt.Println("done")
connected = true
fmt.Print(fmt.Sprintf("Selecting application %d, ", appinfo.aid.Aid()))
err = desfiretag.SelectApplication(appinfo.aid);
if err != nil {
goto RETRY
}
fmt.Println("Done")
fmt.Print("Authenticating, ")
err = desfiretag.Authenticate(keychain.uid_read_key_id,*keychain.uid_read_key)
if err != nil {
goto RETRY
}
fmt.Println("Done")
// Get card real UID
realuid_str, err = desfiretag.CardUID()
if err != nil {
// TODO: Retry only on RF-errors
goto RETRY
}
realuid, err = hex.DecodeString(realuid_str)
if err != nil {
fmt.Println(fmt.Sprintf("ERROR: Failed to parse real UID (%s), skipping tag", err))
goto FAIL
}
fmt.Println("Got real UID:", hex.EncodeToString(realuid));
// Calculate the diversified keys
err = recalculate_diversified_keys(realuid[:])
if err != nil {
fmt.Println(fmt.Sprintf("ERROR: Failed to get diversified ACL keys (%s), skipping tag", err))
goto FAIL
}
// Check for revoked key
revoked_found, err = check_revoked(desfiretag, db, realuid_str)
if err != nil {
fmt.Println(fmt.Sprintf("check_revoked returned err (%s)", err))
revoked_found = true
}
if revoked_found {
goto FAIL
}
acl, err = read_and_parse_acl_file(desfiretag)
if err != nil {
goto RETRY
}
//fmt.Println("DEBUG: acl:", acl)
// Get (possibly updated) ACL from DB, if returns error then UID is not known
db_acl, err = get_db_acl(db, realuid_str)
if err != nil {
// No match
fmt.Println(fmt.Sprintf("WARNING: key %s, not found in DB", realuid_str))
// TODO: Should we null the ACL file just in case, because any key that is personalized but not either valid or revoked is in a weird limbo
goto FAIL
}
// Check for ACL update
if (acl != db_acl) {
fmt.Println(fmt.Sprintf("NOTICE: card ACL (%x) does not match DB (%x), ", acl, db_acl))
// Update the ACL file on card
newaclbytes := make([]byte, 8)
n := binary.PutUvarint(newaclbytes, db_acl)
if (n < 0) {
fmt.Println(fmt.Sprintf("binary.PutUvarint returned %d, skipping tag", n))
goto FAIL
}
err := update_acl_file(desfiretag, &newaclbytes)
if err != nil {
goto RETRY
}
}
// Now check the ACL match
if (db_acl & required_acl) == 0 {
fmt.Println(fmt.Sprintf("NOTICE: Found valid key %s, but ACL (%x) not granted", realuid_str, required_acl))
// TODO: Publish a ZMQ message or something
goto FAIL
}
fmt.Println(fmt.Sprintf("SUCCESS: Access granted to %s with ACL (%x)", realuid_str, db_acl))
return true, nil
FAIL:
if connected {
_ = desfiretag.Disconnect()
}
return false, err
}
func main() {
cfg := profile.Config {
MemProfile: true,
NoShutdownHook: true, // do not hook SIGINT
}
// p.Stop() must be called before the program exits to
// ensure profiling information is written to disk.
p := profile.Start(&cfg)
defer p.Stop()
// TODO: configure this somewhere
required_acl := uint64(1)
init_appinfo()
gpiomap, err := helpers.LoadYAMLFile("gpio.yaml")
if err != nil {
panic(err)
}
db, err := sql.Open("sqlite3", "./keys.db")
if err != nil {
panic(err)
}
defer db.Close()
// Open NFC device
nfcd, err := nfc.Open("");
if err != nil {
panic(err);
}
defer nfcd.Close()
// Start heartbeat goroutine
//go heartbeat()
// Get open GPIO pins for our outputs
green_led, err := gpio.OpenPin(gpiomap["green_led"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
panic(err)
}
red_led, err := gpio.OpenPin(gpiomap["red_led"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
panic(err)
}
relay, err := gpio.OpenPin(gpiomap["relay"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
panic(err)
}
// turn the leds off on exit
/*
exit_ch := make(chan os.Signal, 1)
signal.Notify(exit_ch, os.Interrupt)
signal.Notify(exit_ch, os.Kill)
go func() {
for _ = range exit_ch {
fmt.Printf("\nClearing and unexporting the pins.\n")
go clear_and_close(green_led)
go clear_and_close(red_led)
go clear_and_close(relay)
os.Exit(0)
}
}()
*/
fmt.Println("Starting mainloop")
// mainloop
for {
// Poll for tags
var tags []freefare.Tag
for {
tags, err = freefare.GetTags(nfcd);
if err != nil {
// TODO: Probably should not panic here
panic(err)
}
if len(tags) > 0 {
break
}
time.Sleep(100 * time.Millisecond)
//fmt.Println("...polling")
}
valid_found := false
for i := 0; i < len(tags); i++ {
tag := tags[i]
if (tag.Type() != freefare.DESFire) {
fmt.Println(fmt.Sprintf("Non-DESFire tag %s skipped", tag.UID()))
continue
}
desfiretag := tag.(freefare.DESFireTag)
ch := make(chan TagResult, 1)
go check_tag_channel(&desfiretag, db, required_acl, ch)
select {
case res, ok := <-ch:
if !ok {
// Channel closed
} else {
if res.is_valid {
valid_found = true
}
}
case <-time.After(time.Second * 1):
fmt.Println("WARNING: Timeout while checking tag")
// TODO: Do we even need this, probably not...
// _ = desfiretag.Disconnect()
}
}
// Mark for GC
tags = nil
if !valid_found {
fmt.Println("Access DENIED")
go pulse_gpio(red_led, gpiomap["red_led"].(map[interface{}]interface{})["time"].(int))
} else {
go pulse_gpio(green_led, gpiomap["green_led"].(map[interface{}]interface{})["time"].(int))
go pulse_gpio(relay, gpiomap["relay"].(map[interface{}]interface{})["time"].(int))
}
// Run GC at this time
//runtime.GC()
// Wait a moment before continuing with fast polling
time.Sleep(500 * time.Millisecond)
}
}
move the overwrite to check_tag so we have one less pointer to pass around
package main
import (
"fmt"
/*
"os"
"os/signal"
*/
//"runtime"
"errors"
"time"
"encoding/hex"
"encoding/binary"
"database/sql"
"github.com/fuzxxl/nfc/2.0/nfc"
"github.com/fuzxxl/freefare/0.3/freefare"
_ "github.com/mattn/go-sqlite3"
"github.com/davecheney/gpio"
"./keydiversification"
"./helpers"
"github.com/davecheney/profile"
)
func heartbeat() {
for {
time.Sleep(2000 * time.Millisecond)
fmt.Println("Dunka-dunk")
}
}
func pulse_gpio(pin gpio.Pin, ms int) {
pin.Set()
time.Sleep(time.Duration(ms) * time.Millisecond)
pin.Clear()
}
func clear_and_close(pin gpio.Pin) {
pin.Clear()
pin.Close()
}
// Use structs to pass data around so I can refactor
type AppInfo struct {
aid freefare.DESFireAid
aidbytes []byte
sysid []byte
acl_read_base []byte
acl_write_base []byte
acl_file_id byte
}
type KeyChain struct {
uid_read_key_id byte
acl_read_key_id byte
acl_write_key_id byte
uid_read_key *freefare.DESFireKey
acl_read_key *freefare.DESFireKey
acl_write_key *freefare.DESFireKey
}
// To pass multiple values over a channel
type TagResult struct {
is_valid bool
err error
}
var (
keychain = KeyChain{}
appinfo = AppInfo{}
)
func init_appinfo() {
keymap, err := helpers.LoadYAMLFile("keys.yaml")
if err != nil {
panic(err)
}
appmap, err := helpers.LoadYAMLFile("apps.yaml")
if err != nil {
panic(err)
}
// Application-id
appinfo.aid, err = helpers.String2aid(appmap["hacklab_acl"].(map[interface{}]interface{})["aid"].(string))
if err != nil {
panic(err)
}
// Needed for diversification
appinfo.aidbytes = helpers.Aid2bytes(appinfo.aid)
appinfo.sysid, err = hex.DecodeString(appmap["hacklab_acl"].(map[interface{}]interface{})["sysid"].(string))
if err != nil {
panic(err)
}
appinfo.acl_file_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_file_id"].(string))
if err != nil {
panic(err)
}
// Key id numbers from config
keychain.uid_read_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["uid_read_key_id"].(string))
if err != nil {
panic(err)
}
keychain.acl_read_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_read_key_id"].(string))
if err != nil {
panic(err)
}
keychain.acl_write_key_id, err = helpers.String2byte(appmap["hacklab_acl"].(map[interface{}]interface{})["acl_write_key_id"].(string))
if err != nil {
panic(err)
}
// The static app key to read UID
keychain.uid_read_key, err = helpers.String2aeskey(keymap["uid_read_key"].(string))
if err != nil {
panic(err)
}
// Bases for the diversified keys
appinfo.acl_read_base, err = hex.DecodeString(keymap["acl_read_key"].(string))
if err != nil {
panic(err)
}
appinfo.acl_write_base, err = hex.DecodeString(keymap["acl_write_key"].(string))
if err != nil {
panic(err)
}
}
func recalculate_diversified_keys(realuid []byte) error {
acl_read_bytes, err := keydiversification.AES128(appinfo.acl_read_base[:], appinfo.aidbytes[:], realuid[:], appinfo.sysid[:])
if err != nil {
return err
}
acl_write_bytes, err := keydiversification.AES128(appinfo.acl_write_base[:], appinfo.aidbytes[:], realuid[:], appinfo.sysid[:])
if err != nil {
return err
}
keychain.acl_read_key = helpers.Bytes2aeskey(acl_read_bytes)
keychain.acl_write_key = helpers.Bytes2aeskey(acl_write_bytes)
return nil
}
func update_acl_file(desfiretag *freefare.DESFireTag, newdata *[]byte) error {
fmt.Print("Re-auth with ACL write key, ")
err := desfiretag.Authenticate(keychain.acl_write_key_id,*keychain.acl_write_key)
if err != nil {
return err
}
fmt.Println("Done")
fmt.Print("Overwriting ACL data file, ")
byteswritten, err := desfiretag.WriteData(appinfo.acl_file_id, 0, *newdata)
if err != nil {
return err
}
if (byteswritten < 8) {
fmt.Println(fmt.Sprintf("WARNING: WriteData wrote %d bytes, 8 expected", byteswritten))
}
fmt.Println("Done")
return nil
}
func check_revoked(db *sql.DB, realuid_str string) (bool, error) {
revoked_found := false
stmt, err := db.Prepare("SELECT rowid FROM revoked where uid=?")
if err != nil {
return true, err
}
defer stmt.Close()
rows, err := stmt.Query(realuid_str)
if err != nil {
return true, err
}
defer rows.Close()
for rows.Next() {
revoked_found = true
var rowid int64
rows.Scan(&rowid) // Assigns 1st column to rowid, the rest to row
fmt.Println(fmt.Sprintf("WARNING: Found REVOKED key %s on row %d", realuid_str, rowid))
// TODO: Publish a ZMQ message or something
}
return revoked_found, nil
}
func read_and_parse_acl_file(desfiretag *freefare.DESFireTag) (uint64, error) {
fmt.Print("Re-auth with ACL read key, ")
err := desfiretag.Authenticate(keychain.acl_read_key_id,*keychain.acl_read_key)
if err != nil {
return 0, err
}
fmt.Println("Done")
aclbytes := make([]byte, 8)
fmt.Print("Reading ACL data file, ")
bytesread, err := desfiretag.ReadData(appinfo.acl_file_id, 0, aclbytes)
if err != nil {
return 0, err
}
if (bytesread < 8) {
fmt.Println(fmt.Sprintf("WARNING: ReadData read %d bytes, 8 expected", bytesread))
}
acl, n := binary.Uvarint(aclbytes)
if n <= 0 {
return 0, errors.New(fmt.Sprintf("ERROR: binary.Uvarint returned %d, skipping tag", n))
}
fmt.Println("Done")
return acl, nil
}
func get_db_acl(db *sql.DB, realuid_str string) (uint64, error) {
stmt, err := db.Prepare("SELECT rowid,acl FROM keys where uid=?")
if err != nil {
return 0, err
}
defer stmt.Close()
rows, err := stmt.Query(realuid_str)
if err != nil {
return 0, err
}
defer rows.Close()
for rows.Next() {
var rowid int64
var acl int64
rows.Scan(&rowid, &acl)
return uint64(acl), nil
}
return 0, errors.New(fmt.Sprintf("UID not found"))
}
func check_tag_channel(desfiretag *freefare.DESFireTag, db *sql.DB, required_acl uint64, ch chan TagResult) {
result, err := check_tag(desfiretag, db, required_acl)
ch <- TagResult{result, err}
close(ch)
}
func check_tag(desfiretag *freefare.DESFireTag, db *sql.DB, required_acl uint64) (bool, error) {
const errlimit = 3
var err error = nil
var realuid_str string
var realuid []byte
acl := uint64(0)
db_acl := uint64(0)
revoked_found := false
errcnt := 0
connected := false
// TODO: Add a timeout for all of this, if not done in 1s or so we have a problem...
RETRY:
if err != nil {
// TODO: Retry only on RF-errors
errcnt++
if errcnt > errlimit {
fmt.Println(fmt.Sprintf("failed (%s), retry-limit exceeded (%d/%d), skipping tag", err, errcnt, errlimit))
goto FAIL
}
fmt.Println(fmt.Sprintf("failed (%s), retrying (%d)", err, errcnt))
}
if connected {
_ = desfiretag.Disconnect()
}
// Connect to this tag
fmt.Print(fmt.Sprintf("Connecting to %s, ", desfiretag.UID()))
err = desfiretag.Connect()
if err != nil {
goto RETRY
}
fmt.Println("done")
connected = true
fmt.Print(fmt.Sprintf("Selecting application %d, ", appinfo.aid.Aid()))
err = desfiretag.SelectApplication(appinfo.aid);
if err != nil {
goto RETRY
}
fmt.Println("Done")
fmt.Print("Authenticating, ")
err = desfiretag.Authenticate(keychain.uid_read_key_id,*keychain.uid_read_key)
if err != nil {
goto RETRY
}
fmt.Println("Done")
// Get card real UID
realuid_str, err = desfiretag.CardUID()
if err != nil {
// TODO: Retry only on RF-errors
goto RETRY
}
realuid, err = hex.DecodeString(realuid_str)
if err != nil {
fmt.Println(fmt.Sprintf("ERROR: Failed to parse real UID (%s), skipping tag", err))
goto FAIL
}
fmt.Println("Got real UID:", hex.EncodeToString(realuid));
// Calculate the diversified keys
err = recalculate_diversified_keys(realuid[:])
if err != nil {
fmt.Println(fmt.Sprintf("ERROR: Failed to get diversified ACL keys (%s), skipping tag", err))
goto FAIL
}
// Check for revoked key
revoked_found, err = check_revoked(db, realuid_str)
if err != nil {
fmt.Println(fmt.Sprintf("check_revoked returned err (%s)", err))
revoked_found = true
}
if revoked_found {
// Null the ACL file on card
nullaclbytes := make([]byte, 8)
// Just go to fail even if this write fails
_ = update_acl_file(desfiretag, &nullaclbytes)
goto FAIL
}
acl, err = read_and_parse_acl_file(desfiretag)
if err != nil {
goto RETRY
}
//fmt.Println("DEBUG: acl:", acl)
// Get (possibly updated) ACL from DB, if returns error then UID is not known
db_acl, err = get_db_acl(db, realuid_str)
if err != nil {
// No match
fmt.Println(fmt.Sprintf("WARNING: key %s, not found in DB", realuid_str))
// TODO: Should we null the ACL file just in case, because any key that is personalized but not either valid or revoked is in a weird limbo
goto FAIL
}
// Check for ACL update
if (acl != db_acl) {
fmt.Println(fmt.Sprintf("NOTICE: card ACL (%x) does not match DB (%x), ", acl, db_acl))
// Update the ACL file on card
newaclbytes := make([]byte, 8)
n := binary.PutUvarint(newaclbytes, db_acl)
if (n < 0) {
fmt.Println(fmt.Sprintf("binary.PutUvarint returned %d, skipping tag", n))
goto FAIL
}
err := update_acl_file(desfiretag, &newaclbytes)
if err != nil {
goto RETRY
}
}
// Now check the ACL match
if (db_acl & required_acl) == 0 {
fmt.Println(fmt.Sprintf("NOTICE: Found valid key %s, but ACL (%x) not granted", realuid_str, required_acl))
// TODO: Publish a ZMQ message or something
goto FAIL
}
fmt.Println(fmt.Sprintf("SUCCESS: Access granted to %s with ACL (%x)", realuid_str, db_acl))
return true, nil
FAIL:
if connected {
_ = desfiretag.Disconnect()
}
return false, err
}
func main() {
cfg := profile.Config {
MemProfile: true,
NoShutdownHook: true, // do not hook SIGINT
}
// p.Stop() must be called before the program exits to
// ensure profiling information is written to disk.
p := profile.Start(&cfg)
defer p.Stop()
// TODO: configure this somewhere
required_acl := uint64(1)
init_appinfo()
gpiomap, err := helpers.LoadYAMLFile("gpio.yaml")
if err != nil {
panic(err)
}
db, err := sql.Open("sqlite3", "./keys.db")
if err != nil {
panic(err)
}
defer db.Close()
// Open NFC device
nfcd, err := nfc.Open("");
if err != nil {
panic(err);
}
defer nfcd.Close()
// Start heartbeat goroutine
//go heartbeat()
// Get open GPIO pins for our outputs
green_led, err := gpio.OpenPin(gpiomap["green_led"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
panic(err)
}
red_led, err := gpio.OpenPin(gpiomap["red_led"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
panic(err)
}
relay, err := gpio.OpenPin(gpiomap["relay"].(map[interface{}]interface{})["pin"].(int), gpio.ModeOutput)
if err != nil {
panic(err)
}
// turn the leds off on exit
/*
exit_ch := make(chan os.Signal, 1)
signal.Notify(exit_ch, os.Interrupt)
signal.Notify(exit_ch, os.Kill)
go func() {
for _ = range exit_ch {
fmt.Printf("\nClearing and unexporting the pins.\n")
go clear_and_close(green_led)
go clear_and_close(red_led)
go clear_and_close(relay)
os.Exit(0)
}
}()
*/
fmt.Println("Starting mainloop")
// mainloop
for {
// Poll for tags
var tags []freefare.Tag
for {
tags, err = freefare.GetTags(nfcd);
if err != nil {
// TODO: Probably should not panic here
panic(err)
}
if len(tags) > 0 {
break
}
time.Sleep(100 * time.Millisecond)
//fmt.Println("...polling")
}
valid_found := false
for i := 0; i < len(tags); i++ {
tag := tags[i]
if (tag.Type() != freefare.DESFire) {
fmt.Println(fmt.Sprintf("Non-DESFire tag %s skipped", tag.UID()))
continue
}
desfiretag := tag.(freefare.DESFireTag)
ch := make(chan TagResult, 1)
go check_tag_channel(&desfiretag, db, required_acl, ch)
select {
case res, ok := <-ch:
if !ok {
// Channel closed
} else {
if res.is_valid {
valid_found = true
}
}
case <-time.After(time.Second * 1):
fmt.Println("WARNING: Timeout while checking tag")
// TODO: Do we even need this, probably not...
// _ = desfiretag.Disconnect()
}
}
// Mark for GC
tags = nil
if !valid_found {
fmt.Println("Access DENIED")
go pulse_gpio(red_led, gpiomap["red_led"].(map[interface{}]interface{})["time"].(int))
} else {
go pulse_gpio(green_led, gpiomap["green_led"].(map[interface{}]interface{})["time"].(int))
go pulse_gpio(relay, gpiomap["relay"].(map[interface{}]interface{})["time"].(int))
}
// Run GC at this time
//runtime.GC()
// Wait a moment before continuing with fast polling
time.Sleep(500 * time.Millisecond)
}
}
|
package teams
import (
"fmt"
"sort"
"sync"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
"github.com/keybase/client/go/engine"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
)
type statusList struct {
Teams []keybase1.MemberInfo `json:"teams"`
Status libkb.AppStatus `json:"status"`
}
func (r *statusList) GetAppStatus() *libkb.AppStatus {
return &r.Status
}
func getTeamsListFromServer(ctx context.Context, g *libkb.GlobalContext, uid keybase1.UID, all bool) ([]keybase1.MemberInfo, error) {
var endpoint string
if all {
endpoint = "team/teammates_for_user"
} else {
endpoint = "team/for_user"
}
a := libkb.NewAPIArg(endpoint)
if uid.Exists() {
a.Args = libkb.HTTPArgs{
"uid": libkb.S{Val: uid.String()},
}
}
a.NetContext = ctx
a.SessionType = libkb.APISessionTypeREQUIRED
var list statusList
if err := g.API.GetDecode(a, &list); err != nil {
return nil, err
}
return list.Teams, nil
}
func memberNeedAdmin(member keybase1.MemberInfo, meUID keybase1.UID) bool {
return member.UserID == meUID &&
(member.Role.IsAdminOrAbove() || (member.Implicit != nil && member.Implicit.Role.IsAdminOrAbove()))
}
// verifyMemberRoleInTeam checks if role give in MemberInfo matches
// what team chain says. Nothing is checked when MemberInfo's role is
// NONE, in this context it means that user has implied membership in
// the team and no role given in sigchain.
func verifyMemberRoleInTeam(ctx context.Context, userID keybase1.UID, expectedRole keybase1.TeamRole, team *Team) error {
if expectedRole == keybase1.TeamRole_NONE {
return nil
}
memberUV, err := team.chain().GetLatestUVWithUID(userID)
if err != nil {
return err
}
role, err := team.chain().GetUserRole(memberUV)
if err != nil {
return err
}
if role != expectedRole {
return fmt.Errorf("unexpected member role: expected %v but actual role is %v", expectedRole, role)
}
return nil
}
// getTeamForMember tries to load team in a recent enough state to
// contain member with correct role as set in MemberInfo. It might
// trigger a reload with ForceRepoll if cached state does not match.
func getTeamForMember(ctx context.Context, g *libkb.GlobalContext, member keybase1.MemberInfo, needAdmin bool) (*Team, error) {
team, err := Load(ctx, g, keybase1.LoadTeamArg{
ID: member.TeamID,
NeedAdmin: needAdmin,
Public: member.TeamID.IsPublic(),
ForceRepoll: false,
})
if err != nil {
return nil, err
}
err = verifyMemberRoleInTeam(ctx, member.UserID, member.Role, team)
if err != nil {
team, err = Load(ctx, g, keybase1.LoadTeamArg{
ID: member.TeamID,
NeedAdmin: needAdmin,
Public: member.TeamID.IsPublic(),
ForceRepoll: true,
})
if err != nil {
return nil, err
}
err = verifyMemberRoleInTeam(ctx, member.UserID, member.Role, team)
if err != nil {
return nil, fmt.Errorf("server was wrong about role in team : %v", err)
}
}
return team, nil
}
func getUsernameAndFullName(ctx context.Context, g *libkb.GlobalContext, uid keybase1.UID) (username libkb.NormalizedUsername, fullName string, err error) {
username, err = g.GetUPAKLoader().LookupUsername(ctx, uid)
if err != nil {
return "", "", err
}
fullName, err = engine.GetFullName(ctx, g, uid)
if err != nil {
return "", "", err
}
return username, fullName, err
}
func fillUsernames(ctx context.Context, g *libkb.GlobalContext, res *keybase1.AnnotatedTeamList) error {
var userList []keybase1.UID
userSet := map[keybase1.UID]int{}
for _, member := range res.Teams {
_, found := userSet[member.UserID]
if !found {
userSet[member.UserID] = len(userList)
userList = append(userList, member.UserID)
}
}
namePkgs, err := g.UIDMapper.MapUIDsToUsernamePackages(ctx, g, userList, 0, 0, true)
if err != nil {
return err
}
for id := range res.Teams {
member := &res.Teams[id]
num := userSet[member.UserID]
pkg := namePkgs[num]
member.Username = pkg.NormalizedUsername.String()
if pkg.FullName != nil {
member.FullName = string(pkg.FullName.FullName)
}
}
return nil
}
// List info about teams
// If an error is encountered while loading some teams, the team is skipped and no error is returned.
// If an error occurs loading all the info, an error is returned.
func List(ctx context.Context, g *libkb.GlobalContext, arg keybase1.TeamListArg) (*keybase1.AnnotatedTeamList, error) {
tracer := g.CTimeTracer(ctx, "TeamList")
defer tracer.Finish()
var queryUID keybase1.UID
if arg.UserAssertion != "" {
res := g.Resolver.ResolveFullExpression(ctx, arg.UserAssertion)
if res.GetError() != nil {
return nil, res.GetError()
}
queryUID = res.GetUID()
}
meUID := g.ActiveDevice.UID()
tracer.Stage("Server")
teams, err := getTeamsListFromServer(ctx, g, queryUID, arg.All)
if err != nil {
return nil, err
}
if arg.UserAssertion == "" {
queryUID = meUID
}
tracer.Stage("LookupOurUsername")
queryUsername, queryFullName, err := getUsernameAndFullName(context.Background(), g, queryUID)
if err != nil {
return nil, err
}
var resLock sync.Mutex
res := &keybase1.AnnotatedTeamList{
Teams: nil,
AnnotatedActiveInvites: make(map[keybase1.TeamInviteID]keybase1.AnnotatedTeamInvite),
}
if len(teams) == 0 {
return res, nil
}
tracer.Stage("Loads")
expectEmptyList := true
// Process all the teams in parallel. Limit to 15 in parallel so
// we don't crush the server. errgroup collects errors and returns
// the first non-nil. subctx is canceled when the group finishes.
const parallelLimit int64 = 15
sem := semaphore.NewWeighted(parallelLimit)
group, subctx := errgroup.WithContext(ctx)
for _, memberInfo := range teams {
memberInfo := memberInfo // https://golang.org/doc/faq#closures_and_goroutines
// Skip implicit teams unless --include-implicit-teams was passed from above.
if memberInfo.IsImplicitTeam && !arg.IncludeImplicitTeams {
g.Log.CDebugf(subctx, "| TeamList skipping implicit team: server-team:%v server-uid:%v", memberInfo.TeamID, memberInfo.UserID)
continue
}
expectEmptyList = false
group.Go(func() error {
// Grab one of the parallelLimit slots
err := sem.Acquire(subctx, 1)
if err != nil {
return err
}
defer sem.Release(1)
g.Log.CDebugf(subctx, "| TeamList entry: server-team:%v server-uid:%v", memberInfo.TeamID, memberInfo.UserID)
memberUID := memberInfo.UserID
var username libkb.NormalizedUsername
var fullName string
if memberUID == queryUID {
username, fullName = queryUsername, queryFullName
}
serverSaysNeedAdmin := memberNeedAdmin(memberInfo, meUID)
team, err := getTeamForMember(subctx, g, memberInfo, serverSaysNeedAdmin)
if err != nil {
g.Log.CDebugf(subctx, "| Error in getTeamForMember %q: %v; skipping member", memberInfo.UserID, err)
return nil
}
type AnnotatedTeamInviteMap map[keybase1.TeamInviteID]keybase1.AnnotatedTeamInvite
var anMemberInfo *keybase1.AnnotatedMemberInfo
var anInvites AnnotatedTeamInviteMap
anMemberInfo = &keybase1.AnnotatedMemberInfo{
TeamID: team.ID,
FqName: team.Name().String(),
UserID: memberInfo.UserID,
Role: memberInfo.Role, // memberInfo.Role has been verified during getTeamForMember
IsImplicitTeam: team.IsImplicit(),
Implicit: memberInfo.Implicit, // This part is still server trust
// Username and FullName for users that are not the current user
// are blank initially and filled by fillUsernames.
Username: username.String(),
FullName: fullName,
}
if !arg.All {
members, err := team.Members()
if err == nil {
anMemberInfo.MemberCount = len(members.AllUIDs())
} else {
g.Log.CDebugf(subctx, "| Failed to get Members() for team %q: %v", team.ID, err)
}
}
anInvites = make(AnnotatedTeamInviteMap)
if serverSaysNeedAdmin {
anInvites, err = AnnotateInvites(subctx, g, team.chain().inner.ActiveInvites, team.Name().String())
if err != nil {
g.Log.CDebugf(subctx, "| Failed to AnnotateInvites for team %q: %v", team.ID, err)
return nil
}
}
// After this lock it is safe to write out results
resLock.Lock()
defer resLock.Unlock()
res.Teams = append(res.Teams, *anMemberInfo)
for teamInviteID, annotatedTeamInvite := range anInvites {
res.AnnotatedActiveInvites[teamInviteID] = annotatedTeamInvite
}
return nil
})
}
err = group.Wait()
if arg.All && len(res.Teams) != 0 {
tracer.Stage("FillUsernames")
err := fillUsernames(ctx, g, res)
if err != nil {
return nil, err
}
}
if len(res.Teams) == 0 && !expectEmptyList {
return res, fmt.Errorf("multiple errors while loading team list")
}
return res, err
}
func ListSubteamsRecursive(ctx context.Context, g *libkb.GlobalContext, parentTeamName string, forceRepoll bool) (res []keybase1.TeamIDAndName, err error) {
parent, err := Load(ctx, g, keybase1.LoadTeamArg{
Name: parentTeamName,
NeedAdmin: true,
ForceRepoll: forceRepoll,
})
if err != nil {
return nil, err
}
teams, err := parent.loadAllTransitiveSubteams(ctx, forceRepoll)
if err != nil {
return nil, err
}
for _, team := range teams {
res = append(res, keybase1.TeamIDAndName{
Id: team.ID,
Name: team.Name(),
})
}
return res, nil
}
func AnnotateInvites(ctx context.Context, g *libkb.GlobalContext, invites map[keybase1.TeamInviteID]keybase1.TeamInvite, teamName string) (map[keybase1.TeamInviteID]keybase1.AnnotatedTeamInvite, error) {
annotatedInvites := make(map[keybase1.TeamInviteID]keybase1.AnnotatedTeamInvite, len(invites))
upakLoader := g.GetUPAKLoader()
for id, invite := range invites {
username, err := upakLoader.LookupUsername(ctx, invite.Inviter.Uid)
if err != nil {
return annotatedInvites, err
}
name := invite.Name
category, err := invite.Type.C()
if err != nil {
return nil, err
}
var uv keybase1.UserVersion
if category == keybase1.TeamInviteCategory_KEYBASE {
// "keybase" invites (i.e. pukless users) have user version for name
var err error
uv, err = invite.KeybaseUserVersion()
if err != nil {
return nil, err
}
up, err := upakLoader.LoadUserPlusKeys(context.Background(), uv.Uid, "")
if err != nil {
return nil, err
}
if uv.EldestSeqno != up.EldestSeqno {
continue
}
name = keybase1.TeamInviteName(up.Username)
}
annotatedInvites[id] = keybase1.AnnotatedTeamInvite{
Role: invite.Role,
Id: invite.Id,
Type: invite.Type,
Name: name,
Uv: uv,
Inviter: invite.Inviter,
InviterUsername: username.String(),
TeamName: teamName,
}
}
return annotatedInvites, nil
}
func TeamTree(ctx context.Context, g *libkb.GlobalContext, arg keybase1.TeamTreeArg) (res keybase1.TeamTreeResult, err error) {
if !arg.Name.IsRootTeam() {
return res, fmt.Errorf("cannot get tree of non-root team")
}
serverList, err := getTeamsListFromServer(ctx, g, "", false)
if err != nil {
return res, err
}
// Map from team name (string) to entry
entryMap := make(map[string]keybase1.TeamTreeEntry)
// The server might have omitted some teams, oh well.
// Trusts the server for role.
// Load the teams by ID to make sure they are valid and get the validated names.
for _, info := range serverList {
serverName, err := info.TeamName()
if err != nil {
return res, err
}
if !serverName.RootAncestorName().Eq(arg.Name) {
// Skip those not in this tree.
continue
}
team, err := Load(ctx, g, keybase1.LoadTeamArg{
ID: info.TeamID,
ForceRepoll: true,
})
if err != nil {
return res, err
}
var admin bool // true if an admin or implicit admin
if info.Role.IsAdminOrAbove() {
admin = true
}
if info.Implicit != nil && info.Implicit.Role.IsAdminOrAbove() {
admin = true
}
entryMap[team.Name().String()] = keybase1.TeamTreeEntry{
Name: team.Name(),
Admin: admin,
}
}
// Add all parent names (recursively)
// So that if only A.B.C is in the list, we add A.B and A as well.
// Adding map entries while iterating is safe.
// "If map entries are created during iteration, that entry may be produced during the iteration or may be skipped."
for _, entry := range entryMap {
name := entry.Name.DeepCopy()
for name.Depth() > 0 {
_, ok := entryMap[name.String()]
if !ok {
entryMap[name.String()] = keybase1.TeamTreeEntry{
Name: name,
Admin: false,
}
}
name, err = name.Parent()
if err != nil {
break
}
}
}
for _, entry := range entryMap {
res.Entries = append(res.Entries, entry)
}
if len(res.Entries) == 0 {
return res, fmt.Errorf("team not found: %v", arg.Name)
}
// Order into a tree order. Which happens to be alphabetical ordering.
// Example: [a, a.b, a.b.c, a.b.d, a.e.f, a.e.g]
sort.Slice(res.Entries, func(i, j int) bool {
return res.Entries[i].Name.String() < res.Entries[j].Name.String()
})
return res, nil
}
team list requires login
package teams
import (
"fmt"
"sort"
"sync"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
"github.com/keybase/client/go/engine"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
)
type statusList struct {
Teams []keybase1.MemberInfo `json:"teams"`
Status libkb.AppStatus `json:"status"`
}
func (r *statusList) GetAppStatus() *libkb.AppStatus {
return &r.Status
}
func getTeamsListFromServer(ctx context.Context, g *libkb.GlobalContext, uid keybase1.UID, all bool) ([]keybase1.MemberInfo, error) {
var endpoint string
if all {
endpoint = "team/teammates_for_user"
} else {
endpoint = "team/for_user"
}
a := libkb.NewAPIArg(endpoint)
if uid.Exists() {
a.Args = libkb.HTTPArgs{
"uid": libkb.S{Val: uid.String()},
}
}
a.NetContext = ctx
a.SessionType = libkb.APISessionTypeREQUIRED
var list statusList
if err := g.API.GetDecode(a, &list); err != nil {
return nil, err
}
return list.Teams, nil
}
func memberNeedAdmin(member keybase1.MemberInfo, meUID keybase1.UID) bool {
return member.UserID == meUID &&
(member.Role.IsAdminOrAbove() || (member.Implicit != nil && member.Implicit.Role.IsAdminOrAbove()))
}
// verifyMemberRoleInTeam checks if role give in MemberInfo matches
// what team chain says. Nothing is checked when MemberInfo's role is
// NONE, in this context it means that user has implied membership in
// the team and no role given in sigchain.
func verifyMemberRoleInTeam(ctx context.Context, userID keybase1.UID, expectedRole keybase1.TeamRole, team *Team) error {
if expectedRole == keybase1.TeamRole_NONE {
return nil
}
memberUV, err := team.chain().GetLatestUVWithUID(userID)
if err != nil {
return err
}
role, err := team.chain().GetUserRole(memberUV)
if err != nil {
return err
}
if role != expectedRole {
return fmt.Errorf("unexpected member role: expected %v but actual role is %v", expectedRole, role)
}
return nil
}
// getTeamForMember tries to load team in a recent enough state to
// contain member with correct role as set in MemberInfo. It might
// trigger a reload with ForceRepoll if cached state does not match.
func getTeamForMember(ctx context.Context, g *libkb.GlobalContext, member keybase1.MemberInfo, needAdmin bool) (*Team, error) {
team, err := Load(ctx, g, keybase1.LoadTeamArg{
ID: member.TeamID,
NeedAdmin: needAdmin,
Public: member.TeamID.IsPublic(),
ForceRepoll: false,
})
if err != nil {
return nil, err
}
err = verifyMemberRoleInTeam(ctx, member.UserID, member.Role, team)
if err != nil {
team, err = Load(ctx, g, keybase1.LoadTeamArg{
ID: member.TeamID,
NeedAdmin: needAdmin,
Public: member.TeamID.IsPublic(),
ForceRepoll: true,
})
if err != nil {
return nil, err
}
err = verifyMemberRoleInTeam(ctx, member.UserID, member.Role, team)
if err != nil {
return nil, fmt.Errorf("server was wrong about role in team : %v", err)
}
}
return team, nil
}
func getUsernameAndFullName(ctx context.Context, g *libkb.GlobalContext, uid keybase1.UID) (username libkb.NormalizedUsername, fullName string, err error) {
username, err = g.GetUPAKLoader().LookupUsername(ctx, uid)
if err != nil {
return "", "", err
}
fullName, err = engine.GetFullName(ctx, g, uid)
if err != nil {
return "", "", err
}
return username, fullName, err
}
func fillUsernames(ctx context.Context, g *libkb.GlobalContext, res *keybase1.AnnotatedTeamList) error {
var userList []keybase1.UID
userSet := map[keybase1.UID]int{}
for _, member := range res.Teams {
_, found := userSet[member.UserID]
if !found {
userSet[member.UserID] = len(userList)
userList = append(userList, member.UserID)
}
}
namePkgs, err := g.UIDMapper.MapUIDsToUsernamePackages(ctx, g, userList, 0, 0, true)
if err != nil {
return err
}
for id := range res.Teams {
member := &res.Teams[id]
num := userSet[member.UserID]
pkg := namePkgs[num]
member.Username = pkg.NormalizedUsername.String()
if pkg.FullName != nil {
member.FullName = string(pkg.FullName.FullName)
}
}
return nil
}
// List info about teams
// If an error is encountered while loading some teams, the team is skipped and no error is returned.
// If an error occurs loading all the info, an error is returned.
func List(ctx context.Context, g *libkb.GlobalContext, arg keybase1.TeamListArg) (*keybase1.AnnotatedTeamList, error) {
tracer := g.CTimeTracer(ctx, "TeamList")
defer tracer.Finish()
var queryUID keybase1.UID
if arg.UserAssertion != "" {
res := g.Resolver.ResolveFullExpression(ctx, arg.UserAssertion)
if res.GetError() != nil {
return nil, res.GetError()
}
queryUID = res.GetUID()
}
meUID := g.ActiveDevice.UID()
if meUID.IsNil() {
return nil, libkb.LoginRequiredError{}
}
tracer.Stage("Server")
teams, err := getTeamsListFromServer(ctx, g, queryUID, arg.All)
if err != nil {
return nil, err
}
if arg.UserAssertion == "" {
queryUID = meUID
}
tracer.Stage("LookupOurUsername")
queryUsername, queryFullName, err := getUsernameAndFullName(context.Background(), g, queryUID)
if err != nil {
return nil, err
}
var resLock sync.Mutex
res := &keybase1.AnnotatedTeamList{
Teams: nil,
AnnotatedActiveInvites: make(map[keybase1.TeamInviteID]keybase1.AnnotatedTeamInvite),
}
if len(teams) == 0 {
return res, nil
}
tracer.Stage("Loads")
expectEmptyList := true
// Process all the teams in parallel. Limit to 15 in parallel so
// we don't crush the server. errgroup collects errors and returns
// the first non-nil. subctx is canceled when the group finishes.
const parallelLimit int64 = 15
sem := semaphore.NewWeighted(parallelLimit)
group, subctx := errgroup.WithContext(ctx)
for _, memberInfo := range teams {
memberInfo := memberInfo // https://golang.org/doc/faq#closures_and_goroutines
// Skip implicit teams unless --include-implicit-teams was passed from above.
if memberInfo.IsImplicitTeam && !arg.IncludeImplicitTeams {
g.Log.CDebugf(subctx, "| TeamList skipping implicit team: server-team:%v server-uid:%v", memberInfo.TeamID, memberInfo.UserID)
continue
}
expectEmptyList = false
group.Go(func() error {
// Grab one of the parallelLimit slots
err := sem.Acquire(subctx, 1)
if err != nil {
return err
}
defer sem.Release(1)
g.Log.CDebugf(subctx, "| TeamList entry: server-team:%v server-uid:%v", memberInfo.TeamID, memberInfo.UserID)
memberUID := memberInfo.UserID
var username libkb.NormalizedUsername
var fullName string
if memberUID == queryUID {
username, fullName = queryUsername, queryFullName
}
serverSaysNeedAdmin := memberNeedAdmin(memberInfo, meUID)
team, err := getTeamForMember(subctx, g, memberInfo, serverSaysNeedAdmin)
if err != nil {
g.Log.CDebugf(subctx, "| Error in getTeamForMember %q: %v; skipping member", memberInfo.UserID, err)
return nil
}
type AnnotatedTeamInviteMap map[keybase1.TeamInviteID]keybase1.AnnotatedTeamInvite
var anMemberInfo *keybase1.AnnotatedMemberInfo
var anInvites AnnotatedTeamInviteMap
anMemberInfo = &keybase1.AnnotatedMemberInfo{
TeamID: team.ID,
FqName: team.Name().String(),
UserID: memberInfo.UserID,
Role: memberInfo.Role, // memberInfo.Role has been verified during getTeamForMember
IsImplicitTeam: team.IsImplicit(),
Implicit: memberInfo.Implicit, // This part is still server trust
// Username and FullName for users that are not the current user
// are blank initially and filled by fillUsernames.
Username: username.String(),
FullName: fullName,
}
if !arg.All {
members, err := team.Members()
if err == nil {
anMemberInfo.MemberCount = len(members.AllUIDs())
} else {
g.Log.CDebugf(subctx, "| Failed to get Members() for team %q: %v", team.ID, err)
}
}
anInvites = make(AnnotatedTeamInviteMap)
if serverSaysNeedAdmin {
anInvites, err = AnnotateInvites(subctx, g, team.chain().inner.ActiveInvites, team.Name().String())
if err != nil {
g.Log.CDebugf(subctx, "| Failed to AnnotateInvites for team %q: %v", team.ID, err)
return nil
}
}
// After this lock it is safe to write out results
resLock.Lock()
defer resLock.Unlock()
res.Teams = append(res.Teams, *anMemberInfo)
for teamInviteID, annotatedTeamInvite := range anInvites {
res.AnnotatedActiveInvites[teamInviteID] = annotatedTeamInvite
}
return nil
})
}
err = group.Wait()
if arg.All && len(res.Teams) != 0 {
tracer.Stage("FillUsernames")
err := fillUsernames(ctx, g, res)
if err != nil {
return nil, err
}
}
if len(res.Teams) == 0 && !expectEmptyList {
return res, fmt.Errorf("multiple errors while loading team list")
}
return res, err
}
func ListSubteamsRecursive(ctx context.Context, g *libkb.GlobalContext, parentTeamName string, forceRepoll bool) (res []keybase1.TeamIDAndName, err error) {
parent, err := Load(ctx, g, keybase1.LoadTeamArg{
Name: parentTeamName,
NeedAdmin: true,
ForceRepoll: forceRepoll,
})
if err != nil {
return nil, err
}
teams, err := parent.loadAllTransitiveSubteams(ctx, forceRepoll)
if err != nil {
return nil, err
}
for _, team := range teams {
res = append(res, keybase1.TeamIDAndName{
Id: team.ID,
Name: team.Name(),
})
}
return res, nil
}
func AnnotateInvites(ctx context.Context, g *libkb.GlobalContext, invites map[keybase1.TeamInviteID]keybase1.TeamInvite, teamName string) (map[keybase1.TeamInviteID]keybase1.AnnotatedTeamInvite, error) {
annotatedInvites := make(map[keybase1.TeamInviteID]keybase1.AnnotatedTeamInvite, len(invites))
upakLoader := g.GetUPAKLoader()
for id, invite := range invites {
username, err := upakLoader.LookupUsername(ctx, invite.Inviter.Uid)
if err != nil {
return annotatedInvites, err
}
name := invite.Name
category, err := invite.Type.C()
if err != nil {
return nil, err
}
var uv keybase1.UserVersion
if category == keybase1.TeamInviteCategory_KEYBASE {
// "keybase" invites (i.e. pukless users) have user version for name
var err error
uv, err = invite.KeybaseUserVersion()
if err != nil {
return nil, err
}
up, err := upakLoader.LoadUserPlusKeys(context.Background(), uv.Uid, "")
if err != nil {
return nil, err
}
if uv.EldestSeqno != up.EldestSeqno {
continue
}
name = keybase1.TeamInviteName(up.Username)
}
annotatedInvites[id] = keybase1.AnnotatedTeamInvite{
Role: invite.Role,
Id: invite.Id,
Type: invite.Type,
Name: name,
Uv: uv,
Inviter: invite.Inviter,
InviterUsername: username.String(),
TeamName: teamName,
}
}
return annotatedInvites, nil
}
func TeamTree(ctx context.Context, g *libkb.GlobalContext, arg keybase1.TeamTreeArg) (res keybase1.TeamTreeResult, err error) {
if !arg.Name.IsRootTeam() {
return res, fmt.Errorf("cannot get tree of non-root team")
}
serverList, err := getTeamsListFromServer(ctx, g, "", false)
if err != nil {
return res, err
}
// Map from team name (string) to entry
entryMap := make(map[string]keybase1.TeamTreeEntry)
// The server might have omitted some teams, oh well.
// Trusts the server for role.
// Load the teams by ID to make sure they are valid and get the validated names.
for _, info := range serverList {
serverName, err := info.TeamName()
if err != nil {
return res, err
}
if !serverName.RootAncestorName().Eq(arg.Name) {
// Skip those not in this tree.
continue
}
team, err := Load(ctx, g, keybase1.LoadTeamArg{
ID: info.TeamID,
ForceRepoll: true,
})
if err != nil {
return res, err
}
var admin bool // true if an admin or implicit admin
if info.Role.IsAdminOrAbove() {
admin = true
}
if info.Implicit != nil && info.Implicit.Role.IsAdminOrAbove() {
admin = true
}
entryMap[team.Name().String()] = keybase1.TeamTreeEntry{
Name: team.Name(),
Admin: admin,
}
}
// Add all parent names (recursively)
// So that if only A.B.C is in the list, we add A.B and A as well.
// Adding map entries while iterating is safe.
// "If map entries are created during iteration, that entry may be produced during the iteration or may be skipped."
for _, entry := range entryMap {
name := entry.Name.DeepCopy()
for name.Depth() > 0 {
_, ok := entryMap[name.String()]
if !ok {
entryMap[name.String()] = keybase1.TeamTreeEntry{
Name: name,
Admin: false,
}
}
name, err = name.Parent()
if err != nil {
break
}
}
}
for _, entry := range entryMap {
res.Entries = append(res.Entries, entry)
}
if len(res.Entries) == 0 {
return res, fmt.Errorf("team not found: %v", arg.Name)
}
// Order into a tree order. Which happens to be alphabetical ordering.
// Example: [a, a.b, a.b.c, a.b.d, a.e.f, a.e.g]
sort.Slice(res.Entries, func(i, j int) bool {
return res.Entries[i].Name.String() < res.Entries[j].Name.String()
})
return res, nil
}
|
package basic
// Instance of the current BASIC machine.
type Instance struct {
CurrentLine int
}
// Parse current code
func (b *Instance) Parse() (err error) {
return
}
add lex
package basic
import (
"fmt"
"io"
"strings"
"github.com/crgimenes/lex"
)
// TokenParsers array
var TokenParsers = []lex.TokenFunction{
lex.Ident,
lex.NewLine,
lex.NotImplemented,
}
// Parse current code
func Parse() (err error) {
code := `10 print "test"
20 goto 10`
var lex lex.Lexer
lex.MaxParseID = len(TokenParsers)
err = lex.Run(strings.NewReader(code))
if err != nil {
if err != io.EOF {
println(err.Error())
return
}
}
for _, t := range lex.Tokens {
fmt.Printf("%v\t%q\n", t.Type, t.Literal)
}
return
}
|
package main
import "testing"
func TestGetBitsFromPacket(t *testing.T) {
var bytePos int
var bitPos int
tests := []struct {
name string
packet []byte
bpP uint
ret uint8
}{
{"24 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 24, 255},
{"21 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 21, 254},
{"18 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 18, 252},
{"15 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 15, 248},
{"12 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 12, 240},
{"9 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 9, 224},
{"6 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 6, 192},
{"3 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 3, 128},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
// Reset position, as the stream of provided bits is limited
bytePos = 0
bitPos = 0
res := getBitsFromPacket(tc.packet, &bytePos, &bitPos, tc.bpP)
if res != tc.ret {
t.Errorf("Input: %d Expected: %d \t Got %d", tc.packet, tc.ret, res)
}
})
}
}
func TestCheckConfig(t *testing.T) {
tests := []struct {
name string
cfg configs
err string
}{
// Testing different output stiles
{name: "Two Bits per Pixel", cfg: configs{2, 0, 0, 0, TERMINAL}, err: "-bits 2 is not divisible by three or one"},
{name: "One Bit per Pixel", cfg: configs{1, 0, 0, 0, TERMINAL}},
{name: "27 Bits per Pixel", cfg: configs{27, 0, 0, 0, TERMINAL}, err: "-bits 27 must be smaller than 25"},
{name: "Terminal only", cfg: configs{3, 0, 0, 0, TERMINAL}},
{name: "Terminal and Timeslize", cfg: configs{3, 0, 0, 0, (TERMINAL | TIMESLIZES)}, err: "-timeslize and -terminal can't be combined"},
{name: "Fixed Slize", cfg: configs{1, 0, 0, 0, 0}},
{name: "Time Slize", cfg: configs{1, 0, 50, 0, 0}},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
res := checkConfig(tc.cfg)
if tc.err != "" {
if res.Error() != tc.err {
t.Errorf("Expected: %v \t Got: %v", tc.err, res)
}
}
})
}
}
func TestCreatePixel(t *testing.T) {
tests := []struct {
name string
packet []byte
byteP int
bitP int
bpP uint
red uint8
green uint8
blue uint8
}{
{"White", []byte{0xFF, 0xFF}, 0, 0, 1, 255, 255, 255},
{"Black", []byte{0x00, 0x00}, 0, 0, 1, 0, 0, 0},
{"Royal Blue", []byte{0x41, 0x69, 0xE1, 0x41, 0x69, 0xE1}, 0, 0, 24, 65, 105, 225},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
c := createPixel(tc.packet, &(tc.byteP), &(tc.bitP), tc.bpP)
r, g, b, _ := c.RGBA()
if uint8(r) != tc.red || uint8(g) != tc.green || uint8(b) != tc.blue {
t.Errorf("Expected: r%dg%db%d\t Got: r%dg%db%d", tc.red, tc.green, tc.blue,uint8(r), uint8(g), uint8(b))
}
})
}
}
Adapt test to change in code
Signed-off-by: Lehner Florian <34c6fceca75e456f25e7e99531e2425c6c1de443@der-flo.net>
package main
import "testing"
func TestGetBitsFromPacket(t *testing.T) {
var bytePos int
var bitPos int
tests := []struct {
name string
packet []byte
bpP uint
ret uint8
}{
{"24 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 24, 255},
{"21 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 21, 254},
{"18 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 18, 252},
{"15 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 15, 248},
{"12 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 12, 240},
{"9 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 9, 224},
{"6 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 6, 192},
{"3 Bits", []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 3, 128},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
// Reset position, as the stream of provided bits is limited
bytePos = 0
bitPos = 0
res := getBitsFromPacket(tc.packet, &bytePos, &bitPos, tc.bpP)
if res != tc.ret {
t.Errorf("Input: %d Expected: %d \t Got %d", tc.packet, tc.ret, res)
}
})
}
}
func TestCheckConfig(t *testing.T) {
tests := []struct {
name string
cfg configs
err string
}{
// Testing different output stiles
{name: "Two Bits per Pixel", cfg: configs{2, 0, 0, 0, TERMINAL}, err: "-bits 2 is not divisible by three or one"},
{name: "One Bit per Pixel", cfg: configs{1, 0, 0, 0, TERMINAL}},
{name: "27 Bits per Pixel", cfg: configs{27, 0, 0, 0, TERMINAL}, err: "-bits 27 must be smaller than 25"},
{name: "Terminal only", cfg: configs{3, 0, 0, 0, TERMINAL}},
{name: "Terminal and Timeslize", cfg: configs{3, 0, 0, 0, (TERMINAL | TIMESLIZES)}, err: "-timeslize and -terminal can't be combined"},
{name: "Fixed Slize", cfg: configs{1, 0, 0, 0, 0}},
{name: "Time Slize", cfg: configs{1, 0, 50, 0, 0}},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
res := checkConfig(&tc.cfg)
if tc.err != "" {
if res.Error() != tc.err {
t.Errorf("Expected: %v \t Got: %v", tc.err, res)
}
}
})
}
}
func TestCreatePixel(t *testing.T) {
tests := []struct {
name string
packet []byte
byteP int
bitP int
bpP uint
red uint8
green uint8
blue uint8
}{
{"White", []byte{0xFF, 0xFF}, 0, 0, 1, 255, 255, 255},
{"Black", []byte{0x00, 0x00}, 0, 0, 1, 0, 0, 0},
{"Royal Blue", []byte{0x41, 0x69, 0xE1, 0x41, 0x69, 0xE1}, 0, 0, 24, 65, 105, 225},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
c := createPixel(tc.packet, &(tc.byteP), &(tc.bitP), tc.bpP)
r, g, b, _ := c.RGBA()
if uint8(r) != tc.red || uint8(g) != tc.green || uint8(b) != tc.blue {
t.Errorf("Expected: r%dg%db%d\t Got: r%dg%db%d", tc.red, tc.green, tc.blue,uint8(r), uint8(g), uint8(b))
}
})
}
}
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package godash
import (
"net/http"
"sort"
"time"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
const project = "golang/go"
const projectOwner = "golang"
const projectRepo = "go"
func NewGitHubClient(project, authToken string, transport http.RoundTripper) *github.Client {
t := &oauth2.Transport{
Source: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: authToken}),
Base: transport,
}
return github.NewClient(&http.Client{Transport: t})
}
func getInt(x *int) int {
if x == nil {
return 0
}
return *x
}
func getString(x *string) string {
if x == nil {
return ""
}
return *x
}
func getUserLogin(x *github.User) string {
if x == nil || x.Login == nil {
return ""
}
return *x.Login
}
func getTime(x *time.Time) time.Time {
if x == nil {
return time.Time{}
}
return (*x).Local()
}
func getMilestoneTitle(x *github.Milestone) string {
if x == nil || x.Title == nil {
return ""
}
return *x.Title
}
func getLabelNames(x []github.Label) []string {
var out []string
for _, lab := range x {
out = append(out, getString(lab.Name))
}
sort.Strings(out)
return out
}
func issueToIssue(issue github.Issue) *Issue {
return &Issue{
Number: getInt(issue.Number),
Title: getString(issue.Title),
State: getString(issue.State),
Assignee: getUserLogin(issue.Assignee),
Closed: getTime(issue.ClosedAt),
Labels: getLabelNames(issue.Labels),
Milestone: getMilestoneTitle(issue.Milestone),
Reporter: getUserLogin(issue.User),
Created: getTime(issue.CreatedAt),
}
}
func listIssues(client *github.Client, opt github.IssueListByRepoOptions) ([]*Issue, error) {
var all []*Issue
for page := 1; ; {
xopt := opt
xopt.ListOptions = github.ListOptions{
Page: page,
PerPage: 100,
}
issues, resp, err := client.Issues.ListByRepo(projectOwner, projectRepo, &xopt)
for _, issue := range issues {
if issue.PullRequestLinks == nil {
all = append(all, issueToIssue(issue))
}
}
if err != nil {
return all, err
}
if resp.NextPage < page {
break
}
page = resp.NextPage
}
return all, nil
}
func searchIssues(client *github.Client, q string) ([]*Issue, error) {
var all []*Issue
for page := 1; ; {
// TODO(rsc): Rethink excluding pull requests.
x, resp, err := client.Search.Issues("type:issue state:open repo:"+project+" "+q, &github.SearchOptions{
ListOptions: github.ListOptions{
Page: page,
PerPage: 100,
},
})
for _, issue := range x.Issues {
all = append(all, issueToIssue(issue))
}
if err != nil {
return all, err
}
if resp.NextPage < page {
break
}
page = resp.NextPage
}
return all, nil
}
func getMilestones(client *github.Client) ([]*github.Milestone, error) {
var all []*github.Milestone
milestones, _, err := client.Issues.ListMilestones(projectOwner, projectRepo, nil)
for i := range milestones {
m := &milestones[i]
if m.Title != nil {
all = append(all, m)
}
}
return all, err
}
godash: adjust to breaking upstream change
Package github.com/google/go-github/github introduced some breaking
changes (changing Issue to *Issue and Milestone to *Milestone in some
return values).
Change-Id: I0eceb6247194fd2f27fe629dbdf70509e03ba86b
Reviewed-on: https://go-review.googlesource.com/25463
Reviewed-by: Brad Fitzpatrick <ae9783c0b0efc69cd85ab025ddd17aa44cdc4aa5@golang.org>
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package godash
import (
"net/http"
"sort"
"time"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
const project = "golang/go"
const projectOwner = "golang"
const projectRepo = "go"
func NewGitHubClient(project, authToken string, transport http.RoundTripper) *github.Client {
t := &oauth2.Transport{
Source: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: authToken}),
Base: transport,
}
return github.NewClient(&http.Client{Transport: t})
}
func getInt(x *int) int {
if x == nil {
return 0
}
return *x
}
func getString(x *string) string {
if x == nil {
return ""
}
return *x
}
func getUserLogin(x *github.User) string {
if x == nil || x.Login == nil {
return ""
}
return *x.Login
}
func getTime(x *time.Time) time.Time {
if x == nil {
return time.Time{}
}
return (*x).Local()
}
func getMilestoneTitle(x *github.Milestone) string {
if x == nil || x.Title == nil {
return ""
}
return *x.Title
}
func getLabelNames(x []github.Label) []string {
var out []string
for _, lab := range x {
out = append(out, getString(lab.Name))
}
sort.Strings(out)
return out
}
func issueToIssue(issue *github.Issue) *Issue {
return &Issue{
Number: getInt(issue.Number),
Title: getString(issue.Title),
State: getString(issue.State),
Assignee: getUserLogin(issue.Assignee),
Closed: getTime(issue.ClosedAt),
Labels: getLabelNames(issue.Labels),
Milestone: getMilestoneTitle(issue.Milestone),
Reporter: getUserLogin(issue.User),
Created: getTime(issue.CreatedAt),
}
}
func listIssues(client *github.Client, opt github.IssueListByRepoOptions) ([]*Issue, error) {
var all []*Issue
for page := 1; ; {
xopt := opt
xopt.ListOptions = github.ListOptions{
Page: page,
PerPage: 100,
}
issues, resp, err := client.Issues.ListByRepo(projectOwner, projectRepo, &xopt)
for _, issue := range issues {
if issue.PullRequestLinks == nil {
all = append(all, issueToIssue(issue))
}
}
if err != nil {
return all, err
}
if resp.NextPage < page {
break
}
page = resp.NextPage
}
return all, nil
}
func searchIssues(client *github.Client, q string) ([]*Issue, error) {
var all []*Issue
for page := 1; ; {
// TODO(rsc): Rethink excluding pull requests.
x, resp, err := client.Search.Issues("type:issue state:open repo:"+project+" "+q, &github.SearchOptions{
ListOptions: github.ListOptions{
Page: page,
PerPage: 100,
},
})
for _, issue := range x.Issues {
all = append(all, issueToIssue(&issue))
}
if err != nil {
return all, err
}
if resp.NextPage < page {
break
}
page = resp.NextPage
}
return all, nil
}
func getMilestones(client *github.Client) ([]*github.Milestone, error) {
var all []*github.Milestone
milestones, _, err := client.Issues.ListMilestones(projectOwner, projectRepo, nil)
for i := range milestones {
m := milestones[i]
if m.Title != nil {
all = append(all, m)
}
}
return all, err
}
|
// Copyright 2012, The gohg Authors. All rights reserved.
// Use of this source code is governed by a BSD style license
// that can be found in the LICENSE.txt file.
// Package gohg is a Go client library for using the Mercurial dvcs
// via it's Command Server.
//
// For Mercurial see: http://mercurial/selenic.com/wiki.
//
// For the Hg Command Server see: http://mercurial.selenic.com/wiki/CommandServer.
package gohg_lib
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
// "path"
"path/filepath"
"strings"
)
// Type HgClient will act as an object (kind of) for working with the Hg CS
// from any program using this gohg client lib.
// It will in fact act as a stand-in for the regular 'hg' command.
// It will get a bunch of fields and methods to make working with it
// as go-like as possible. It might even get a few channels for communications.
type HgClient struct {
hgserver *exec.Cmd
// The in and out pipe ends are to be considered from the point of view
// of the Hg Command Server instance.
pin io.WriteCloser
pout io.ReadCloser
Connected bool // already connected to a Hg CS ?
HgPath string // which hg is used ?
Capabilities []string // as per the hello message
Encoding string // as per the hello message
Repo string // the full path to the Hg repo
HgVersion string // the version number only
// config []string
}
// hgMsg is what we receive from the Hg CS
type hgMsg struct {
Ch string
Ln uint
Data string
}
// hgCmd is what we send to the Hg CS
type hgCmd struct {
Cmd string
Ln uint
Args string
}
var err error
var logfile string
// // init takes care of some householding, namely preparing a logfile where
// // all communication between this lib and the Hg CS can be logged.
// func init() {
// var exedir string
// exedir = path.Dir(os.Args[0])
// exedir, err = filepath.Abs(exedir)
// if err != nil {
// log.Fatal("Could not determine path for the gohg.log logfile.")
// }
// logfile = exedir + string(os.PathSeparator) + "gohg.log"
// } // init()
// NewHgClient creates a new instance of the client object for working with the
// Hg Command Server.
func NewHgClient() *HgClient {
var hgclient = new(HgClient)
return hgclient
}
// Connect establishes the connection with the Mercurial Command Server.
//
// Arguments:
// hgexe
// The command to run mercurial. Optional. The 'hg' command will be used
// when not provided. This allows to run a specific version of Mercurial.
// reponame
// The folder of the Hg repository to work on. Optional.
// When blanc the folder where the program is run is used
// (see function locateRepository()).
// config
// Configuration settings that will be added to the necessary
// fixed settings (see composeHgConfig() for more).
//
// Returns an error if the connection could not be established properly.
func (hgcl *HgClient) Connect(hgexe string, reponame string, config []string) error {
// for example:
// hgcl.hgserver =
// exec.Command("M:\\DEV\\hg-stable\\hg", // the Hg command
// "-R", "C:\\DEV\\go\\src\\golout\\", // the repo
// "--config", "ui.interactive=True", // mandatory settings
// "--config", "extensions.color=!", // more settings (for Windows)
// "serve", "--cmdserver", "pipe") // start the Command Server
// Maybe accept a channel as an extra argument for sending the logging to ?
// And if it's nil, log into a textfile in the folder of this lib.
// Also do not override that logfile every launch.
// Maybe even do this in the init() function ?
if hgcl.hgserver != nil {
return errors.New("Connect(): already running a Hg Command Server for " + hgcl.Repo)
}
if hgexe == "" {
// Let the OS determine what Mercurial to run
// for this machine/user combination.
hgexe = "hg"
}
// The Hg Command Server needs a repository.
hgcl.Repo, err = locateRepository(reponame)
if err != nil {
return err
}
if hgcl.Repo == "" {
return errors.New("Connect(): could not find a Hg repository at: " + reponame)
}
// Maybe we can also offer the possibility of a config file?
// f.i.: a file gohg.cfg in the same folder as the gohg.exe,
// and a section per repo, and one "general" section.
// Or maybe just a [gohg] section in one of the 'normal' Hg config files ?
var hgconfig []string
hgconfig = composeHgConfig(hgexe, hgcl.Repo, config)
hgcl.hgserver = exec.Command(hgexe)
hgcl.hgserver.Args = hgconfig
hgcl.hgserver.Dir = hgcl.Repo
hgcl.pout, err = hgcl.hgserver.StdoutPipe()
if err != nil {
return errors.New("Connect(): could not connect StdoutPipe: " + err.Error())
}
hgcl.pin, err = hgcl.hgserver.StdinPipe()
if err != nil {
log.Fatal("Connect(): could not connect StdinPipe: " + err.Error())
}
if err := hgcl.hgserver.Start(); err != nil {
return errors.New("Connect(): could not start the Hg Command Server: " + err.Error())
}
err = readHelloMessage(hgcl)
if err != nil {
return err
}
err = validateCapabilities(hgcl)
if err != nil {
return err
}
hgcl.Connected = true
hgcl.HgPath = hgexe
err = getHgVersion(hgcl)
if err != nil {
log.Fatal("from HgVersion() : " + string(err.Error()))
}
return nil
} // Connect()
// Close ends the connection with the Mercurial Command Server.
//
// In fact it's closing the stdin of the Hg CS that closes the connection,
// as per the Hg CS documentation.
func (hgcl *HgClient) Close() error {
if hgcl.hgserver == nil {
log.Println("Close(): Trying to close a closed hgserver.")
return nil
}
hgcl.pin.Close()
hgcl.pout.Close()
defer func() { hgcl.hgserver = nil }()
err = hgcl.hgserver.Wait()
if err != nil {
return err
}
return nil
} // Close()
// locateRepository assures we have a Mercurial repository available,
// which is required for working with the Hg Command Server.
func locateRepository(reponame string) (string, error) {
repo := reponame
sep := string(os.PathSeparator)
// first make a correct path from repo
repo, err = filepath.Abs(repo)
if err != nil {
return "", errors.New(err.Error() +
"\ncould not determine absolute path for: " + repo)
}
repo = filepath.Clean(repo)
// If we do not find a Hg repo in this dir, we search for one
// up the path, in case we're deeper in it's working copy.
for {
_, err = os.Stat(repo + sep + ".hg")
if err == nil {
break
}
var file string
repo, file = filepath.Split(repo)
if repo == "" || file == "" {
break
}
}
if err != nil || repo == "" {
return "", nil
}
return repo, nil
} // locateRepository()
// composeHgConfig handles the different config settings that will be used
// to make the connection with the Hg CS. It concerns specific Hg settings.
func composeHgConfig(hgcmd string, repo string, config []string) []string {
var hgconfig []string
// if len(config) > 0 {
// var cfg string
// for i := 0; i < range(config) {
// cfg = cfg + "," + config[i]
// }
// cmd = cmd + "," + cfg
// }
hgconfig = append(hgconfig, hgcmd,
"--cwd", repo,
"-R", repo,
// These arguments are fixed.
// "--config", "ui.interactive=True",
"--config", "ui.interactive=False",
"--config", "extensions.color=!",
"serve", "--cmdserver", "pipe")
return hgconfig
} // composeHgConfig()
// readHelloMessage reads the special hello message send by the Hg CS.
//
// It has a fixed format, and contains info about the possibilities
// of the Hg CS at hand. It's also a first proof of a working connection.
func readHelloMessage(hgcl *HgClient) error {
s := make([]byte, 5)
_, err = hgcl.pout.Read(s)
if err != io.EOF && err != nil {
return err
}
if len(s) == 0 {
return errors.New("no hello message data received from Hg Command Server")
}
const t1 = "hg se" // hg returned: "hg serve [OPTION]"
if string(s[0:len(t1)]) == t1 {
log.Fatal(errors.New(
"need at least version 1.9 of Mercurial to use the Command Server\n" +
"(type 'hg version' and 'which hg' to verify)"))
}
ch := string(s[0])
if ch != "o" {
return errors.New("received unexpected channel '" + ch +
"' for hello message from Hg Command Server")
}
var ln uint32
ln, err = calcDataLength(s[1:5])
if err != nil {
fmt.Println("readHelloMessage(): binary.Read failed:", err)
}
if ln <= 0 {
return errors.New("received invalid length '" + string(ln) +
"' for hello message from Hg Command Server")
}
hello := make([]byte, ln)
_, err = hgcl.pout.Read(hello)
if err != io.EOF && err != nil {
return err
}
const t2 = "capabilities:"
if string(hello[0:len(t2)]) != t2 {
return errors.New("could not determine the capabilities of the Hg Command Server")
}
attr := strings.Split(string(hello), "\n")
hgcl.Capabilities = strings.Fields(attr[0])[1:]
hgcl.Encoding = strings.Split(attr[1], ": ")[1]
return nil
} // readHelloMessage()
func validateCapabilities(hgcl *HgClient) error {
var ok bool
for _, c := range hgcl.Capabilities {
if c == "runcommand" {
ok = true
break
}
}
if ok == false {
log.Fatal("could not detect the 'runcommand' capability")
}
return nil
}
func getHgVersion(hgcl *HgClient) error {
hgcl.HgVersion, err = hgcl.Version([]string{"-q"})
if err != nil {
return err
}
return nil
}
// readFromHg returns the channel and all the data read from it.
// Eventually it returns no (or empty) data but an error.
func readFromHg(hgcl *HgClient) (string, []byte, error) {
var ch string
// get channel and length
data := make([]byte, 5)
_, err = hgcl.pout.Read(data)
if err != io.EOF && err != nil {
return ch, data, err
}
if data == nil {
return ch, data, errors.New("readFromHg(): no data read")
}
ch = string(data[0])
if ch == "" {
return ch, data, errors.New("readFromHg(): no channel read")
}
// get the uint that the Hg CS sent us as the length value
var ln uint32
ln, err = calcDataLength(data[1:5])
if err != nil {
return ch, data, errors.New("readFromHg(): binary.Read failed:" +
string(err.Error()))
}
// now get ln bytes of data
data = make([]byte, ln)
_, err = hgcl.pout.Read(data)
if err != io.EOF && err != nil {
return ch, data, err
}
return ch, data, nil
} // readFromHg()
// sendToHg writes data to the Hg CS,
// returning an error if something went wrong.
func sendToHg(hgcl *HgClient, cmd string, args []byte) error {
cmd = strings.TrimRight(cmd, "\n") + "\n"
lc := len(cmd)
la := len(args)
l := lc // in case cmd == "getencoding" f.i.
if la > 0 {
l = l + 4 + la
}
data := make([]byte, l)
// send the command
copy(data[0:lc], cmd)
if la > 0 {
// send the length of the command arguments
ln := uint32(len(args))
wbuf := new(bytes.Buffer)
err = binary.Write(wbuf, binary.BigEndian, ln)
if err != nil {
return errors.New("sendToHg(): binary.Write failed: " + string(err.Error()))
}
b := make([]byte, 4)
_, err = io.ReadFull(wbuf, b)
if err != nil {
return errors.New("sendToHg(): io.ReadFull failed: " + string(err.Error()))
}
copy(data[lc:lc+4], b)
// send the command arguments
copy(data[lc+4:lc+4+la], args)
}
// perform the actual send to the Hg CS
var i int
i, err = hgcl.pin.Write(data)
if i != len(data) {
return errors.New("sendToHg(): writing data failed: " + string(err.Error()))
}
return nil
} // sendToHg()
// GetEncoding returns the servers encoding on the result channel.
// Currently only UTF8 is supported.
func (hgcl *HgClient) GetEncoding() (string, error) {
var encoding []byte
encoding, _, err = runInHg(hgcl, "getencoding", []string{})
return string(encoding), err
}
// run allows to run a Mercurial command in the Hg Command Server.
// You can only run 'hg' commands that are available in this library.
func (hgcl *HgClient) run(hgcmd []string) ([]byte, int32, error) {
var data []byte
var ret int32
data, ret, err = runInHg(hgcl, "runcommand", hgcmd)
return data, ret, err
}
// runInHg sends a command to the Hg CS (using sendToHg),
// and fetches the result (using readFromHg).
func runInHg(hgcl *HgClient, command string, hgcmd []string) ([]byte, int32, error) {
args := []byte(strings.Join(hgcmd, string(0x0)))
err = sendToHg(hgcl, command, args)
if err != nil {
fmt.Println(err)
return nil, 0, err
}
var data []byte
var buf bytes.Buffer
var ret int32
CHANNEL_LOOP:
for true {
var ch string
ch, data, err = readFromHg(hgcl)
if err != nil || ch == "" {
log.Fatal("runInHg(): readFromHg failed: " + string(err.Error()))
}
switch ch {
case "d":
case "e":
case "o":
buf.WriteString(string(data))
case "r":
{
if command == "getencoding" {
buf.WriteString(string(data))
} else {
ret, err = calcReturncode(data[0:4])
if err != nil {
log.Fatal("runInHg(): binary.read failed: " + string(err.Error()))
}
}
break CHANNEL_LOOP
}
case "I":
case "L":
default:
log.Fatal("runInHg(): unexpected channel '" + ch + "' detected")
} // switch ch
} // for true
return []byte(buf.String()), ret, nil
} // runInHg()
// calcDataLength converts a 4-byte slice into an unsigned int
func calcDataLength(s []byte) (uint32, error) {
var ln int32
ln, err = calcIntFromBytes(s)
return uint32(ln), err
}
// calcReturncode converts a 4-byte slice into a signed int
func calcReturncode(s []byte) (int32, error) {
var rc int32
rc, err = calcIntFromBytes(s)
return rc, err
}
// calcIntFromBytes performs the real conversion
func calcIntFromBytes(s []byte) (int32, error) {
var i int32
buf := bytes.NewBuffer(s[0:4])
err := binary.Read(buf, binary.BigEndian, &i)
return i, err
}
gohg: make HgClient.hgpath unchangeable by using code
// Copyright 2012, The gohg Authors. All rights reserved.
// Use of this source code is governed by a BSD style license
// that can be found in the LICENSE.txt file.
// Package gohg is a Go client library for using the Mercurial dvcs
// via it's Command Server.
//
// For Mercurial see: http://mercurial/selenic.com/wiki.
//
// For the Hg Command Server see: http://mercurial.selenic.com/wiki/CommandServer.
package gohg_lib
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
// "path"
"path/filepath"
"strings"
)
// Type HgClient will act as an object (kind of) for working with the Hg CS
// from any program using this gohg client lib.
// It will in fact act as a stand-in for the regular 'hg' command.
// It will get a bunch of fields and methods to make working with it
// as go-like as possible. It might even get a few channels for communications.
type HgClient struct {
hgserver *exec.Cmd
// The in and out pipe ends are to be considered from the point of view
// of the Hg Command Server instance.
pin io.WriteCloser
pout io.ReadCloser
Connected bool // already connected to a Hg CS ?
hgPath string // which hg is used ?
Capabilities []string // as per the hello message
Encoding string // as per the hello message
Repo string // the full path to the Hg repo
HgVersion string // the version number only
// config []string
}
// hgMsg is what we receive from the Hg CS
type hgMsg struct {
Ch string
Ln uint
Data string
}
// hgCmd is what we send to the Hg CS
type hgCmd struct {
Cmd string
Ln uint
Args string
}
var err error
var logfile string
// // init takes care of some householding, namely preparing a logfile where
// // all communication between this lib and the Hg CS can be logged.
// func init() {
// var exedir string
// exedir = path.Dir(os.Args[0])
// exedir, err = filepath.Abs(exedir)
// if err != nil {
// log.Fatal("Could not determine path for the gohg.log logfile.")
// }
// logfile = exedir + string(os.PathSeparator) + "gohg.log"
// } // init()
// NewHgClient creates a new instance of the client object for working with the
// Hg Command Server.
func NewHgClient() *HgClient {
var hgclient = new(HgClient)
return hgclient
}
// Connect establishes the connection with the Mercurial Command Server.
//
// Arguments:
// hgexe
// The command to run mercurial. Optional. The 'hg' command will be used
// when not provided. This allows to run a specific version of Mercurial.
// reponame
// The folder of the Hg repository to work on. Optional.
// When blanc the folder where the program is run is used
// (see function locateRepository()).
// config
// Configuration settings that will be added to the necessary
// fixed settings (see composeHgConfig() for more).
//
// Returns an error if the connection could not be established properly.
func (hgcl *HgClient) Connect(hgexe string, reponame string, config []string) error {
// for example:
// hgcl.hgserver =
// exec.Command("M:\\DEV\\hg-stable\\hg", // the Hg command
// "-R", "C:\\DEV\\go\\src\\golout\\", // the repo
// "--config", "ui.interactive=True", // mandatory settings
// "--config", "extensions.color=!", // more settings (for Windows)
// "serve", "--cmdserver", "pipe") // start the Command Server
// Maybe accept a channel as an extra argument for sending the logging to ?
// And if it's nil, log into a textfile in the folder of this lib.
// Also do not override that logfile every launch.
// Maybe even do this in the init() function ?
if hgcl.hgserver != nil {
return errors.New("Connect(): already running a Hg Command Server for " + hgcl.Repo)
}
if hgexe == "" {
// Let the OS determine what Mercurial to run
// for this machine/user combination.
hgexe = "hg"
}
// The Hg Command Server needs a repository.
hgcl.Repo, err = locateRepository(reponame)
if err != nil {
return err
}
if hgcl.Repo == "" {
return errors.New("Connect(): could not find a Hg repository at: " + reponame)
}
// Maybe we can also offer the possibility of a config file?
// f.i.: a file gohg.cfg in the same folder as the gohg.exe,
// and a section per repo, and one "general" section.
// Or maybe just a [gohg] section in one of the 'normal' Hg config files ?
var hgconfig []string
hgconfig = composeHgConfig(hgexe, hgcl.Repo, config)
hgcl.hgserver = exec.Command(hgexe)
hgcl.hgserver.Args = hgconfig
hgcl.hgserver.Dir = hgcl.Repo
hgcl.pout, err = hgcl.hgserver.StdoutPipe()
if err != nil {
return errors.New("Connect(): could not connect StdoutPipe: " + err.Error())
}
hgcl.pin, err = hgcl.hgserver.StdinPipe()
if err != nil {
log.Fatal("Connect(): could not connect StdinPipe: " + err.Error())
}
if err := hgcl.hgserver.Start(); err != nil {
return errors.New("Connect(): could not start the Hg Command Server: " + err.Error())
}
err = readHelloMessage(hgcl)
if err != nil {
return err
}
err = validateCapabilities(hgcl)
if err != nil {
return err
}
hgcl.Connected = true
hgcl.hgPath = hgexe
err = getHgVersion(hgcl)
if err != nil {
log.Fatal("from HgVersion() : " + string(err.Error()))
}
return nil
} // Connect()
// Close ends the connection with the Mercurial Command Server.
//
// In fact it's closing the stdin of the Hg CS that closes the connection,
// as per the Hg CS documentation.
func (hgcl *HgClient) Close() error {
if hgcl.hgserver == nil {
log.Println("Close(): Trying to close a closed hgserver.")
return nil
}
hgcl.pin.Close()
hgcl.pout.Close()
defer func() { hgcl.hgserver = nil }()
err = hgcl.hgserver.Wait()
if err != nil {
return err
}
return nil
} // Close()
// locateRepository assures we have a Mercurial repository available,
// which is required for working with the Hg Command Server.
func locateRepository(reponame string) (string, error) {
repo := reponame
sep := string(os.PathSeparator)
// first make a correct path from repo
repo, err = filepath.Abs(repo)
if err != nil {
return "", errors.New(err.Error() +
"\ncould not determine absolute path for: " + repo)
}
repo = filepath.Clean(repo)
// If we do not find a Hg repo in this dir, we search for one
// up the path, in case we're deeper in it's working copy.
for {
_, err = os.Stat(repo + sep + ".hg")
if err == nil {
break
}
var file string
repo, file = filepath.Split(repo)
if repo == "" || file == "" {
break
}
}
if err != nil || repo == "" {
return "", nil
}
return repo, nil
} // locateRepository()
// composeHgConfig handles the different config settings that will be used
// to make the connection with the Hg CS. It concerns specific Hg settings.
func composeHgConfig(hgcmd string, repo string, config []string) []string {
var hgconfig []string
// if len(config) > 0 {
// var cfg string
// for i := 0; i < range(config) {
// cfg = cfg + "," + config[i]
// }
// cmd = cmd + "," + cfg
// }
hgconfig = append(hgconfig, hgcmd,
"--cwd", repo,
"-R", repo,
// These arguments are fixed.
// "--config", "ui.interactive=True",
"--config", "ui.interactive=False",
"--config", "extensions.color=!",
"serve", "--cmdserver", "pipe")
return hgconfig
} // composeHgConfig()
// readHelloMessage reads the special hello message send by the Hg CS.
//
// It has a fixed format, and contains info about the possibilities
// of the Hg CS at hand. It's also a first proof of a working connection.
func readHelloMessage(hgcl *HgClient) error {
s := make([]byte, 5)
_, err = hgcl.pout.Read(s)
if err != io.EOF && err != nil {
return err
}
if len(s) == 0 {
return errors.New("no hello message data received from Hg Command Server")
}
const t1 = "hg se" // hg returned: "hg serve [OPTION]"
if string(s[0:len(t1)]) == t1 {
log.Fatal(errors.New(
"need at least version 1.9 of Mercurial to use the Command Server\n" +
"(type 'hg version' and 'which hg' to verify)"))
}
ch := string(s[0])
if ch != "o" {
return errors.New("received unexpected channel '" + ch +
"' for hello message from Hg Command Server")
}
var ln uint32
ln, err = calcDataLength(s[1:5])
if err != nil {
fmt.Println("readHelloMessage(): binary.Read failed:", err)
}
if ln <= 0 {
return errors.New("received invalid length '" + string(ln) +
"' for hello message from Hg Command Server")
}
hello := make([]byte, ln)
_, err = hgcl.pout.Read(hello)
if err != io.EOF && err != nil {
return err
}
const t2 = "capabilities:"
if string(hello[0:len(t2)]) != t2 {
return errors.New("could not determine the capabilities of the Hg Command Server")
}
attr := strings.Split(string(hello), "\n")
hgcl.Capabilities = strings.Fields(attr[0])[1:]
hgcl.Encoding = strings.Split(attr[1], ": ")[1]
return nil
} // readHelloMessage()
func validateCapabilities(hgcl *HgClient) error {
var ok bool
for _, c := range hgcl.Capabilities {
if c == "runcommand" {
ok = true
break
}
}
if ok == false {
log.Fatal("could not detect the 'runcommand' capability")
}
return nil
}
func getHgVersion(hgcl *HgClient) error {
hgcl.HgVersion, err = hgcl.Version([]string{"-q"})
if err != nil {
return err
}
return nil
}
// readFromHg returns the channel and all the data read from it.
// Eventually it returns no (or empty) data but an error.
func readFromHg(hgcl *HgClient) (string, []byte, error) {
var ch string
// get channel and length
data := make([]byte, 5)
_, err = hgcl.pout.Read(data)
if err != io.EOF && err != nil {
return ch, data, err
}
if data == nil {
return ch, data, errors.New("readFromHg(): no data read")
}
ch = string(data[0])
if ch == "" {
return ch, data, errors.New("readFromHg(): no channel read")
}
// get the uint that the Hg CS sent us as the length value
var ln uint32
ln, err = calcDataLength(data[1:5])
if err != nil {
return ch, data, errors.New("readFromHg(): binary.Read failed:" +
string(err.Error()))
}
// now get ln bytes of data
data = make([]byte, ln)
_, err = hgcl.pout.Read(data)
if err != io.EOF && err != nil {
return ch, data, err
}
return ch, data, nil
} // readFromHg()
// sendToHg writes data to the Hg CS,
// returning an error if something went wrong.
func sendToHg(hgcl *HgClient, cmd string, args []byte) error {
cmd = strings.TrimRight(cmd, "\n") + "\n"
lc := len(cmd)
la := len(args)
l := lc // in case cmd == "getencoding" f.i.
if la > 0 {
l = l + 4 + la
}
data := make([]byte, l)
// send the command
copy(data[0:lc], cmd)
if la > 0 {
// send the length of the command arguments
ln := uint32(len(args))
wbuf := new(bytes.Buffer)
err = binary.Write(wbuf, binary.BigEndian, ln)
if err != nil {
return errors.New("sendToHg(): binary.Write failed: " + string(err.Error()))
}
b := make([]byte, 4)
_, err = io.ReadFull(wbuf, b)
if err != nil {
return errors.New("sendToHg(): io.ReadFull failed: " + string(err.Error()))
}
copy(data[lc:lc+4], b)
// send the command arguments
copy(data[lc+4:lc+4+la], args)
}
// perform the actual send to the Hg CS
var i int
i, err = hgcl.pin.Write(data)
if i != len(data) {
return errors.New("sendToHg(): writing data failed: " + string(err.Error()))
}
return nil
} // sendToHg()
// GetEncoding returns the servers encoding on the result channel.
// Currently only UTF8 is supported.
func (hgcl *HgClient) GetEncoding() (string, error) {
var encoding []byte
encoding, _, err = runInHg(hgcl, "getencoding", []string{})
return string(encoding), err
}
// run allows to run a Mercurial command in the Hg Command Server.
// You can only run 'hg' commands that are available in this library.
func (hgcl *HgClient) run(hgcmd []string) ([]byte, int32, error) {
var data []byte
var ret int32
data, ret, err = runInHg(hgcl, "runcommand", hgcmd)
return data, ret, err
}
// runInHg sends a command to the Hg CS (using sendToHg),
// and fetches the result (using readFromHg).
func runInHg(hgcl *HgClient, command string, hgcmd []string) ([]byte, int32, error) {
args := []byte(strings.Join(hgcmd, string(0x0)))
err = sendToHg(hgcl, command, args)
if err != nil {
fmt.Println(err)
return nil, 0, err
}
var data []byte
var buf bytes.Buffer
var ret int32
CHANNEL_LOOP:
for true {
var ch string
ch, data, err = readFromHg(hgcl)
if err != nil || ch == "" {
log.Fatal("runInHg(): readFromHg failed: " + string(err.Error()))
}
switch ch {
case "d":
case "e":
case "o":
buf.WriteString(string(data))
case "r":
{
if command == "getencoding" {
buf.WriteString(string(data))
} else {
ret, err = calcReturncode(data[0:4])
if err != nil {
log.Fatal("runInHg(): binary.read failed: " + string(err.Error()))
}
}
break CHANNEL_LOOP
}
case "I":
case "L":
default:
log.Fatal("runInHg(): unexpected channel '" + ch + "' detected")
} // switch ch
} // for true
return []byte(buf.String()), ret, nil
} // runInHg()
// calcDataLength converts a 4-byte slice into an unsigned int
func calcDataLength(s []byte) (uint32, error) {
var ln int32
ln, err = calcIntFromBytes(s)
return uint32(ln), err
}
// calcReturncode converts a 4-byte slice into a signed int
func calcReturncode(s []byte) (int32, error) {
var rc int32
rc, err = calcIntFromBytes(s)
return rc, err
}
// calcIntFromBytes performs the real conversion
func calcIntFromBytes(s []byte) (int32, error) {
var i int32
buf := bytes.NewBuffer(s[0:4])
err := binary.Read(buf, binary.BigEndian, &i)
return i, err
}
func (hgcl *HgClient) GetHgPath() string {
return hgcl.hgPath
}
|
package main
import "github.com/gin-gonic/gin"
func main() {
r := gin.Default()
r.GET("/ping", func(c *gin.Context) {
c.JSON(200, gin.H{
"message": "pong",
})
})
r.Run() // listen and server on 0.0.0.0:8080
}
Add preliminary left padding functionality
package main
import (
"github.com/gin-gonic/gin"
"strconv"
"strings"
)
func leftPad(str string, ch string, len int) string {
return strings.Repeat(ch, len) + str
}
func main() {
r := gin.Default()
r.GET("/", func(c *gin.Context) {
str := c.DefaultQuery("str", "")
len := c.DefaultQuery("len", "0")
ch := c.DefaultQuery("ch", " ")
lenInt, err := strconv.Atoi(len)
if err != nil {
lenInt = 0
}
c.JSON(200, gin.H{
"str": leftPad(str, ch, lenInt),
})
})
r.Run(":3000")
}
|
package main
import (
"github.com/coreos/go-etcd/etcd"
"github.com/miekg/dns"
"testing"
"strings"
)
var (
client = etcd.NewClient([]string{"127.0.0.1:4001"})
resolver = &Resolver{etcd: client}
)
func TestEtcd(t *testing.T) {
// Enable debug logging
log_debug = true
if !client.SyncCluster() {
t.Error("Failed to sync etcd cluster")
t.Fatal()
}
}
func TestGetFromStorageSingleKey(t *testing.T) {
resolver.etcdPrefix = "TestGetFromStorageSingleKey/"
client.Set("TestGetFromStorageSingleKey/net/disco/.A", "1.1.1.1", 0)
nodes, err := resolver.GetFromStorage("net/disco/.A")
if err != nil {
t.Error("Error returned from etcd", err)
t.Fatal()
}
if len(nodes) != 1 {
t.Error("Number of nodes should be 1: ", len(nodes))
t.Fatal()
}
node := nodes[0]
if node.node.Value != "1.1.1.1" {
t.Error("Node value should be 1.1.1.1: ", node)
t.Fatal()
}
}
func TestGetFromStorageNestedKeys(t *testing.T) {
resolver.etcdPrefix = "TestGetFromStorageNestedKeys/"
client.Set("TestGetFromStorageNestedKeys/net/disco/.A/0", "1.1.1.1", 0)
client.Set("TestGetFromStorageNestedKeys/net/disco/.A/1", "1.1.1.2", 0)
client.Set("TestGetFromStorageNestedKeys/net/disco/.A/2/0", "1.1.1.3", 0)
nodes, err := resolver.GetFromStorage("net/disco/.A")
if err != nil {
t.Error("Error returned from etcd", err)
t.Fatal()
}
if len(nodes) != 3 {
t.Error("Number of nodes should be 3: ", len(nodes))
t.Fatal()
}
var node *EtcdRecord
node = nodes[0]
if node.node.Value != "1.1.1.1" {
t.Error("Node value should be 1.1.1.1: ", node)
t.Fatal()
}
node = nodes[1]
if node.node.Value != "1.1.1.2" {
t.Error("Node value should be 1.1.1.2: ", node)
t.Fatal()
}
node = nodes[2]
if node.node.Value != "1.1.1.3" {
t.Error("Node value should be 1.1.1.3: ", node)
t.Fatal()
}
}
func TestNameToKeyConverter(t *testing.T) {
var key string
key = nameToKey("foo.net.", "")
if key != "/net/foo" {
t.Error("Expected key /net/foo")
}
key = nameToKey("foo.net", "")
if key != "/net/foo" {
t.Error("Expected key /net/foo")
}
key = nameToKey("foo.net.", "/.A")
if key != "/net/foo/.A" {
t.Error("Expected key /net/foo/.A")
}
}
/**
* Test that the right authority is being returned for different types of DNS
* queries.
*/
func TestAuthorityRoot(t *testing.T) {
resolver.etcdPrefix = "TestAuthorityRoot/"
client.Set("TestAuthorityRoot/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
query := new(dns.Msg)
query.SetQuestion("disco.net.", dns.TypeA)
answer := resolver.Lookup(query)
if len(answer.Answer) > 0 {
t.Error("Expected zero answers")
t.Fatal()
}
if len(answer.Ns) != 1 {
t.Error("Expected one authority record")
t.Fatal()
}
rr := answer.Ns[0].(*dns.SOA)
header := rr.Header()
// Verify the header is correct
if header.Name != "disco.net." {
t.Error("Expected record with name disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeSOA {
t.Error("Expected record with type SOA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.Ns != "ns1.disco.net." {
t.Error("Expected NS to be ns1.disco.net.: ", rr.Ns)
t.Fatal()
}
if rr.Mbox != "admin.disco.net." {
t.Error("Expected MBOX to be admin.disco.net.: ", rr.Mbox)
t.Fatal()
}
// if rr.Serial != "admin.disco.net" {
// t.Error("Expected MBOX to be admin.disco.net: ", rr.Mbox)
// }
if rr.Refresh != 3600 {
t.Error("Expected REFRESH to be 3600: ", rr.Refresh)
t.Fatal()
}
if rr.Retry != 600 {
t.Error("Expected RETRY to be 600: ", rr.Retry)
t.Fatal()
}
if rr.Expire != 86400 {
t.Error("Expected EXPIRE to be 86400: ", rr.Expire)
t.Fatal()
}
if rr.Minttl != 10 {
t.Error("Expected MINTTL to be 10: ", rr.Minttl)
t.Fatal()
}
}
func TestAuthorityDomain(t *testing.T) {
resolver.etcdPrefix = "TestAuthorityDomain/"
client.Set("TestAuthorityDomain/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeA)
answer := resolver.Lookup(query)
if len(answer.Answer) > 0 {
t.Error("Expected zero answers")
t.Fatal()
}
if len(answer.Ns) != 1 {
t.Error("Expected one authority record")
t.Fatal()
}
rr := answer.Ns[0].(*dns.SOA)
header := rr.Header()
// Verify the header is correct
if header.Name != "disco.net." {
t.Error("Expected record with name disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeSOA {
t.Error("Expected record with type SOA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.Ns != "ns1.disco.net." {
t.Error("Expected NS to be ns1.disco.net.: ", rr.Ns)
t.Fatal()
}
if rr.Mbox != "admin.disco.net." {
t.Error("Expected MBOX to be admin.disco.net.: ", rr.Mbox)
t.Fatal()
}
if rr.Refresh != 3600 {
t.Error("Expected REFRESH to be 3600: ", rr.Refresh)
t.Fatal()
}
if rr.Retry != 600 {
t.Error("Expected RETRY to be 600: ", rr.Retry)
t.Fatal()
}
if rr.Expire != 86400 {
t.Error("Expected EXPIRE to be 86400: ", rr.Expire)
t.Fatal()
}
if rr.Minttl != 10 {
t.Error("Expected MINTTL to be 10: ", rr.Minttl)
t.Fatal()
}
}
func TestAuthoritySubdomain(t *testing.T) {
resolver.etcdPrefix = "TestAuthoritySubdomain/"
client.Set("TestAuthoritySubdomain/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
client.Set("TestAuthoritySubdomain/net/disco/bar/.SOA", "ns1.bar.disco.net.\tbar.disco.net.\t3600\t600\t86400\t10", 0)
query := new(dns.Msg)
query.SetQuestion("foo.bar.disco.net.", dns.TypeA)
answer := resolver.Lookup(query)
if len(answer.Answer) > 0 {
t.Error("Expected zero answers")
t.Fatal()
}
if len(answer.Ns) != 1 {
t.Error("Expected one authority record")
t.Fatal()
}
rr := answer.Ns[0].(*dns.SOA)
header := rr.Header()
// Verify the header is correct
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeSOA {
t.Error("Expected record with type SOA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.Ns != "ns1.bar.disco.net." {
t.Error("Expected NS to be ns1.disco.net.: ", rr.Ns)
t.Fatal()
}
if rr.Mbox != "bar.disco.net." {
t.Error("Expected MBOX to be admin.disco.net.: ", rr.Mbox)
t.Fatal()
}
if rr.Refresh != 3600 {
t.Error("Expected REFRESH to be 3600: ", rr.Refresh)
t.Fatal()
}
if rr.Retry != 600 {
t.Error("Expected RETRY to be 600: ", rr.Retry)
t.Fatal()
}
if rr.Expire != 86400 {
t.Error("Expected EXPIRE to be 86400: ", rr.Expire)
t.Fatal()
}
if rr.Minttl != 10 {
t.Error("Expected MINTTL to be 10: ", rr.Minttl)
t.Fatal()
}
}
/**
* Test different that types of DNS queries return the correct answers
**/
func TestAnswerQuestionA(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionA/"
client.Set("TestAnswerQuestionA/net/disco/bar/.A", "1.2.3.4", 0)
client.Set("TestAnswerQuestionA/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeA)
answer := resolver.Lookup(query)
if len(answer.Answer) != 1 {
t.Error("Expected one answer, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
rr := answer.Answer[0].(*dns.A)
header := rr.Header()
// Verify the header is correct
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeA {
t.Error("Expected record with type A:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.A.String() != "1.2.3.4" {
t.Error("Expected A record to be 1.2.3.4: ", rr.A)
t.Fatal()
}
}
func TestAnswerQuestionAAAA(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionAAAA/"
client.Set("TestAnswerQuestionAAAA/net/disco/bar/.AAAA", "::1", 0)
client.Set("TestAnswerQuestionAAAA/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeAAAA)
answer := resolver.Lookup(query)
if len(answer.Answer) != 1 {
t.Error("Expected one answer, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
rr := answer.Answer[0].(*dns.AAAA)
header := rr.Header()
// Verify the header is correct
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeAAAA {
t.Error("Expected record with type AAAA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.AAAA.String() != "::1" {
t.Error("Expected AAAA record to be ::1: ", rr.AAAA)
t.Fatal()
}
}
func TestAnswerQuestionANY(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionANY/"
client.Set("TestAnswerQuestionANY/net/disco/bar/.TXT", "google.com.", 0)
client.Set("TestAnswerQuestionANY/net/disco/bar/.A/0", "1.2.3.4", 0)
client.Set("TestAnswerQuestionANY/net/disco/bar/.A/1", "2.3.4.5", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeANY)
answer := resolver.Lookup(query)
if len(answer.Answer) != 3 {
t.Error("Expected one answer, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
}
func TestAnswerQuestionUnsupportedType(t *testing.T) {
// query for a type that we don't have support for (I tried to pick the most
// obscure rr type that the dns library supports and that we're unlikely to
// add support for)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeEUI64)
answer := resolver.Lookup(query)
if len(answer.Answer) != 0 {
t.Error("Expected no answers, got ", len(answer.Answer))
t.Fatal()
}
if answer.Rcode != dns.RcodeNameError {
t.Error("Expected NXDOMAIN response code, got", dns.RcodeToString[answer.Rcode])
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
}
}
func TestAnswerQuestionWildcardCNAME(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionCNAME/"
client.Set("TestAnswerQuestionCNAME/net/disco/*/.CNAME", "baz.disco.net.", 0)
client.Set("TestAnswerQuestionCNAME/net/disco/baz/.A", "1.2.3.4", 0)
query := new(dns.Msg)
query.SetQuestion("test.disco.net.", dns.TypeCNAME)
answer := resolver.Lookup(query)
if len(answer.Answer) != 1 {
t.Error("Expected one answers, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
rr := answer.Answer[0].(*dns.CNAME)
header := rr.Header()
// Verify the header is correct
if header.Name != "test.disco.net." {
t.Error("Expected record with name test.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeCNAME {
t.Error("Expected record with type AAAA:", header.Rrtype)
t.Fatal()
}
// Verify the CNAME data is correct
if rr.Target != "baz.disco.net." {
t.Error("Expected CNAME target baz.disco.net.:", header.Rrtype)
t.Fatal()
}
}
func TestAnswerQuestionCNAME(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionCNAME/"
client.Set("TestAnswerQuestionCNAME/net/disco/bar/.CNAME", "baz.disco.net.", 0)
client.Set("TestAnswerQuestionCNAME/net/disco/baz/.A", "1.2.3.4", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeCNAME)
answer := resolver.Lookup(query)
if len(answer.Answer) != 1 {
t.Error("Expected one answers, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
rr := answer.Answer[0].(*dns.CNAME)
header := rr.Header()
// Verify the header is correct
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeCNAME {
t.Error("Expected record with type CNAME:", header.Rrtype)
t.Fatal()
}
// Verify the CNAME data is correct
if rr.Target != "baz.disco.net." {
t.Error("Expected CNAME target baz.disco.net.:", header.Rrtype)
t.Fatal()
}
}
func TestAnswerQuestionWildcardAAAANoMatch(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionWildcardANoMatch/"
client.Set("TestAnswerQuestionWildcardANoMatch/net/disco/bar/*/.AAAA", "::1", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeAAAA)
answer := resolver.Lookup(query)
if len(answer.Answer) > 0 {
t.Error("Didn't expect any answers, got ", len(answer.Answer))
t.Fatal()
}
}
func TestAnswerQuestionWildcardAAAA(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionWildcardA/"
client.Set("TestAnswerQuestionWildcardA/net/disco/bar/*/.AAAA", "::1", 0)
query := new(dns.Msg)
query.SetQuestion("baz.bar.disco.net.", dns.TypeAAAA)
answer := resolver.Lookup(query)
if len(answer.Answer) != 1 {
t.Error("Expected one answer, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
rr := answer.Answer[0].(*dns.AAAA)
header := rr.Header()
// Verify the header is correct
if header.Name != "baz.bar.disco.net." {
t.Error("Expected record with name baz.bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeAAAA {
t.Error("Expected record with type AAAA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.AAAA.String() != "::1" {
t.Error("Expected AAAA record to be ::1: ", rr.AAAA)
t.Fatal()
}
}
func TestAnswerQuestionTTL(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTL/"
client.Set("TestAnswerQuestionTTL/net/disco/bar/.A", "1.2.3.4", 0)
client.Set("TestAnswerQuestionTTL/net/disco/bar/.A.ttl", "300", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeA)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.A)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeA {
t.Error("Expected record with type A:", header.Rrtype)
t.Fatal()
}
if header.Ttl != 300 {
t.Error("Expected TTL of 300 seconds:", header.Ttl)
t.Fatal()
}
if rr.A.String() != "1.2.3.4" {
t.Error("Expected A record to be 1.2.3.4: ", rr.A)
t.Fatal()
}
}
func TestAnswerQuestionTTLMultipleRecords(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTLMultipleRecords/"
client.Set("TestAnswerQuestionTTLMultipleRecords/net/disco/bar/.A/0", "1.2.3.4", 0)
client.Set("TestAnswerQuestionTTLMultipleRecords/net/disco/bar/.A/0.ttl", "300", 0)
client.Set("TestAnswerQuestionTTLMultipleRecords/net/disco/bar/.A/1", "8.8.8.8", 0)
client.Set("TestAnswerQuestionTTLMultipleRecords/net/disco/bar/.A/1.ttl", "600", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeA)
if len(records) != 2 {
t.Error("Expected two answers, got ", len(records))
t.Fatal()
}
rrOne := records[0].(*dns.A)
headerOne := rrOne.Header()
if headerOne.Ttl != 300 {
t.Error("Expected TTL of 300 seconds:", headerOne.Ttl)
t.Fatal()
}
if rrOne.A.String() != "1.2.3.4" {
t.Error("Expected A record to be 1.2.3.4: ", rrOne.A)
t.Fatal()
}
rrTwo := records[1].(*dns.A)
headerTwo := rrTwo.Header()
if headerTwo.Ttl != 600 {
t.Error("Expected TTL of 300 seconds:", headerTwo.Ttl)
t.Fatal()
}
if rrTwo.A.String() != "8.8.8.8" {
t.Error("Expected A record to be 8.8.8.8: ", rrTwo.A)
t.Fatal()
}
}
func TestAnswerQuestionTTLInvalidFormat(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTL/"
client.Set("TestAnswerQuestionTTL/net/disco/bar/.A", "1.2.3.4", 0)
client.Set("TestAnswerQuestionTTL/net/disco/bar/.A.ttl", "haha", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeA)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.A)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeA {
t.Error("Expected record with type A:", header.Rrtype)
t.Fatal()
}
if header.Ttl != 0 {
t.Error("Expected TTL of 0 seconds:", header.Ttl)
t.Fatal()
}
if rr.A.String() != "1.2.3.4" {
t.Error("Expected A record to be 1.2.3.4: ", rr.A)
t.Fatal()
}
}
func TestAnswerQuestionTTLDanglingNode(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTLDanglingNode/"
client.Set("TestAnswerQuestionTTLDanglingNode/net/disco/bar/.TXT.ttl", "600", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeTXT)
if len(records) != 0 {
t.Error("Expected no answer, got ", len(records))
t.Fatal()
}
}
func TestAnswerQuestionTTLDanglingDirNode(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTLDanglingDirNode/"
client.Set("TestAnswerQuestionTTLDanglingDirNode/net/disco/bar/.TXT/0.ttl", "600", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeTXT)
if len(records) != 0 {
t.Error("Expected no answer, got ", len(records))
t.Fatal()
}
}
func TestAnswerQuestionTTLDanglingDirSibling(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTLDanglingDirSibling/"
client.Set("TestAnswerQuestionTTLDanglingDirSibling/net/disco/bar/.TXT/0.ttl", "100", 0)
client.Set("TestAnswerQuestionTTLDanglingDirSibling/net/disco/bar/.TXT/1", "foo bar", 0)
client.Set("TestAnswerQuestionTTLDanglingDirSibling/net/disco/bar/.TXT/1.ttl", "600", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeTXT)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.TXT)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeTXT {
t.Error("Expected record with type TXT:", header.Rrtype)
t.Fatal()
}
if header.Ttl != 600 {
t.Error("Expected TTL of 600 seconds:", header.Ttl)
t.Fatal()
}
if strings.Join(rr.Txt, "\n") != "foo bar" {
t.Error("Expected txt record to be 'foo bar': ", rr.Txt)
t.Fatal()
}
}
/**
* Test converstion of names (i.e etcd nodes) to single records of different
* types.
**/
func TestLookupAnswerForA(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForA/"
client.Set("TestLookupAnswerForA/net/disco/bar/.A", "1.2.3.4", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeA)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.A)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeA {
t.Error("Expected record with type A:", header.Rrtype)
t.Fatal()
}
if rr.A.String() != "1.2.3.4" {
t.Error("Expected A record to be 1.2.3.4: ", rr.A)
t.Fatal()
}
}
func TestLookupAnswerForAAAA(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForAAAA/"
client.Set("TestLookupAnswerForAAAA/net/disco/bar/.AAAA", "::1", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeAAAA)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.AAAA)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeAAAA {
t.Error("Expected record with type AAAA:", header.Rrtype)
t.Fatal()
}
if rr.AAAA.String() != "::1" {
t.Error("Expected AAAA record to be ::1: ", rr.AAAA)
t.Fatal()
}
}
func TestLookupAnswerForCNAME(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForCNAME/"
client.Set("TestLookupAnswerForCNAME/net/disco/bar/.CNAME", "cname.google.com.", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeCNAME)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.CNAME)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeCNAME {
t.Error("Expected record with type CNAME:", header.Rrtype)
t.Fatal()
}
if rr.Target != "cname.google.com." {
t.Error("Expected CNAME record to be cname.google.com.: ", rr.Target)
t.Fatal()
}
}
func TestLookupAnswerForNS(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForNS/"
client.Set("TestLookupAnswerForNS/net/disco/bar/.NS", "dns.google.com.", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeNS)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.NS)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeNS {
t.Error("Expected record with type NS:", header.Rrtype)
t.Fatal()
}
if rr.Ns != "dns.google.com." {
t.Error("Expected NS record to be dns.google.com.: ", rr.Ns)
t.Fatal()
}
}
func TestLookupAnswerForSOA(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForSOA/"
client.Set("TestLookupAnswerForSOA/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
records, _ := resolver.LookupAnswersForType("disco.net.", dns.TypeSOA)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.SOA)
header := rr.Header()
if header.Name != "disco.net." {
t.Error("Expected record with name disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeSOA {
t.Error("Expected record with type SOA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.Ns != "ns1.disco.net." {
t.Error("Expected NS to be ns1.disco.net.: ", rr.Ns)
t.Fatal()
}
if rr.Mbox != "admin.disco.net." {
t.Error("Expected MBOX to be admin.disco.net.: ", rr.Mbox)
t.Fatal()
}
if rr.Refresh != 3600 {
t.Error("Expected REFRESH to be 3600: ", rr.Refresh)
t.Fatal()
}
if rr.Retry != 600 {
t.Error("Expected RETRY to be 600: ", rr.Retry)
t.Fatal()
}
if rr.Expire != 86400 {
t.Error("Expected EXPIRE to be 86400: ", rr.Expire)
t.Fatal()
}
if rr.Minttl != 10 {
t.Error("Expected MINTTL to be 10: ", rr.Minttl)
t.Fatal()
}
}
func TestLookupAnswerForPTR(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForPTR/"
client.Set("TestLookupAnswerForPTR/net/disco/alias/.PTR/target1", "target1.disco.net.", 0)
client.Set("TestLookupAnswerForPTR/net/disco/alias/.PTR/target2", "target2.disco.net.", 0)
records, _ := resolver.LookupAnswersForType("alias.disco.net.", dns.TypePTR)
if len(records) != 2 {
t.Error("Expected two answers, got ", len(records))
t.Fatal()
}
seen_1 := false
seen_2 := false
// We can't (and shouldn't try to) guarantee order, so check for all
// expected records the long way
for _, record := range records {
rr := record.(*dns.PTR)
header := rr.Header()
if header.Rrtype != dns.TypePTR {
t.Error("Expected record with type PTR:", header.Rrtype)
t.Fatal()
}
t.Log(rr)
if rr.Ptr == "target1.disco.net." {
seen_1 = true
}
if rr.Ptr == "target2.disco.net." {
seen_2 = true
}
}
if seen_1 == false || seen_2 == false {
t.Error("Didn't get back all expected PTR responses")
t.Fatal()
}
}
func TestLookupAnswerForPTRInvalidDomain(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForPTRInvalidDomain/"
client.Set("TestLookupAnswerForPTRInvalidDomain/net/disco/bad-alias/.PTR", "...", 0)
records, err := resolver.LookupAnswersForType("bad-alias.disco.net.", dns.TypePTR)
if len(records) > 0 {
t.Error("Expected no answers, got ", len(records))
t.Fatal()
}
if err == nil {
t.Error("Expected error, didn't get one")
t.Fatal()
}
}
func TestLookupAnswerForSRV(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForSRV/"
client.Set("TestLookupAnswerForSRV/net/disco/_tcp/_http/.SRV",
"100\t100\t80\tsome-webserver.disco.net",
0)
records, _ := resolver.LookupAnswersForType("_http._tcp.disco.net.", dns.TypeSRV)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.SRV)
if rr.Priority != 100 {
t.Error("Unexpected 'priority' value for SRV record:", rr.Priority)
}
if rr.Weight != 100 {
t.Error("Unexpected 'weight' value for SRV record:", rr.Weight)
}
if rr.Port != 80 {
t.Error("Unexpected 'port' value for SRV record:", rr.Port)
}
if rr.Target != "some-webserver.disco.net." {
t.Error("Unexpected 'target' value for SRV record:", rr.Target)
}
}
func TestLookupAnswerForSRVInvalidValues(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForSRVInvalidValues/"
var bad_vals_map = map[string]string {
"wrong-delimiter": "10 10 80 foo.disco.net",
"not-enough-fields": "0\t0",
"neg-int-priority": "-10\t10\t80\tfoo.disco.net",
"neg-int-weight": "10\t-10\t80\tfoo.disco.net",
"neg-int-port": "10\t10\t-80\tfoo.disco.net",
"large-int-priority": "65536\t10\t80\tfoo.disco.net",
"large-int-weight": "10\t65536\t80\tfoo.disco.net",
"large-int-port": "10\t10\t65536\tfoo.disco.net"}
for name, value := range bad_vals_map {
client.Set("TestLookupAnswerForSRVInvalidValues/net/disco/" + name + "/.SRV", value, 0)
records, err := resolver.LookupAnswersForType(name + ".disco.net.", dns.TypeSRV)
if len(records) > 0 {
t.Error("Expected no answers, got ", len(records))
t.Fatal()
}
if err == nil {
t.Error("Expected error, didn't get one")
t.Fatal()
}
}
}
We already have tests for dns.TypeCNAME
Here we should test that we get a CNAME record back when we query
for an A record, because no A record exists.
package main
import (
"github.com/coreos/go-etcd/etcd"
"github.com/miekg/dns"
"testing"
"strings"
)
var (
client = etcd.NewClient([]string{"127.0.0.1:4001"})
resolver = &Resolver{etcd: client}
)
func TestEtcd(t *testing.T) {
// Enable debug logging
log_debug = true
if !client.SyncCluster() {
t.Error("Failed to sync etcd cluster")
t.Fatal()
}
}
func TestGetFromStorageSingleKey(t *testing.T) {
resolver.etcdPrefix = "TestGetFromStorageSingleKey/"
client.Set("TestGetFromStorageSingleKey/net/disco/.A", "1.1.1.1", 0)
nodes, err := resolver.GetFromStorage("net/disco/.A")
if err != nil {
t.Error("Error returned from etcd", err)
t.Fatal()
}
if len(nodes) != 1 {
t.Error("Number of nodes should be 1: ", len(nodes))
t.Fatal()
}
node := nodes[0]
if node.node.Value != "1.1.1.1" {
t.Error("Node value should be 1.1.1.1: ", node)
t.Fatal()
}
}
func TestGetFromStorageNestedKeys(t *testing.T) {
resolver.etcdPrefix = "TestGetFromStorageNestedKeys/"
client.Set("TestGetFromStorageNestedKeys/net/disco/.A/0", "1.1.1.1", 0)
client.Set("TestGetFromStorageNestedKeys/net/disco/.A/1", "1.1.1.2", 0)
client.Set("TestGetFromStorageNestedKeys/net/disco/.A/2/0", "1.1.1.3", 0)
nodes, err := resolver.GetFromStorage("net/disco/.A")
if err != nil {
t.Error("Error returned from etcd", err)
t.Fatal()
}
if len(nodes) != 3 {
t.Error("Number of nodes should be 3: ", len(nodes))
t.Fatal()
}
var node *EtcdRecord
node = nodes[0]
if node.node.Value != "1.1.1.1" {
t.Error("Node value should be 1.1.1.1: ", node)
t.Fatal()
}
node = nodes[1]
if node.node.Value != "1.1.1.2" {
t.Error("Node value should be 1.1.1.2: ", node)
t.Fatal()
}
node = nodes[2]
if node.node.Value != "1.1.1.3" {
t.Error("Node value should be 1.1.1.3: ", node)
t.Fatal()
}
}
func TestNameToKeyConverter(t *testing.T) {
var key string
key = nameToKey("foo.net.", "")
if key != "/net/foo" {
t.Error("Expected key /net/foo")
}
key = nameToKey("foo.net", "")
if key != "/net/foo" {
t.Error("Expected key /net/foo")
}
key = nameToKey("foo.net.", "/.A")
if key != "/net/foo/.A" {
t.Error("Expected key /net/foo/.A")
}
}
/**
* Test that the right authority is being returned for different types of DNS
* queries.
*/
func TestAuthorityRoot(t *testing.T) {
resolver.etcdPrefix = "TestAuthorityRoot/"
client.Set("TestAuthorityRoot/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
query := new(dns.Msg)
query.SetQuestion("disco.net.", dns.TypeA)
answer := resolver.Lookup(query)
if len(answer.Answer) > 0 {
t.Error("Expected zero answers")
t.Fatal()
}
if len(answer.Ns) != 1 {
t.Error("Expected one authority record")
t.Fatal()
}
rr := answer.Ns[0].(*dns.SOA)
header := rr.Header()
// Verify the header is correct
if header.Name != "disco.net." {
t.Error("Expected record with name disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeSOA {
t.Error("Expected record with type SOA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.Ns != "ns1.disco.net." {
t.Error("Expected NS to be ns1.disco.net.: ", rr.Ns)
t.Fatal()
}
if rr.Mbox != "admin.disco.net." {
t.Error("Expected MBOX to be admin.disco.net.: ", rr.Mbox)
t.Fatal()
}
// if rr.Serial != "admin.disco.net" {
// t.Error("Expected MBOX to be admin.disco.net: ", rr.Mbox)
// }
if rr.Refresh != 3600 {
t.Error("Expected REFRESH to be 3600: ", rr.Refresh)
t.Fatal()
}
if rr.Retry != 600 {
t.Error("Expected RETRY to be 600: ", rr.Retry)
t.Fatal()
}
if rr.Expire != 86400 {
t.Error("Expected EXPIRE to be 86400: ", rr.Expire)
t.Fatal()
}
if rr.Minttl != 10 {
t.Error("Expected MINTTL to be 10: ", rr.Minttl)
t.Fatal()
}
}
func TestAuthorityDomain(t *testing.T) {
resolver.etcdPrefix = "TestAuthorityDomain/"
client.Set("TestAuthorityDomain/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeA)
answer := resolver.Lookup(query)
if len(answer.Answer) > 0 {
t.Error("Expected zero answers")
t.Fatal()
}
if len(answer.Ns) != 1 {
t.Error("Expected one authority record")
t.Fatal()
}
rr := answer.Ns[0].(*dns.SOA)
header := rr.Header()
// Verify the header is correct
if header.Name != "disco.net." {
t.Error("Expected record with name disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeSOA {
t.Error("Expected record with type SOA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.Ns != "ns1.disco.net." {
t.Error("Expected NS to be ns1.disco.net.: ", rr.Ns)
t.Fatal()
}
if rr.Mbox != "admin.disco.net." {
t.Error("Expected MBOX to be admin.disco.net.: ", rr.Mbox)
t.Fatal()
}
if rr.Refresh != 3600 {
t.Error("Expected REFRESH to be 3600: ", rr.Refresh)
t.Fatal()
}
if rr.Retry != 600 {
t.Error("Expected RETRY to be 600: ", rr.Retry)
t.Fatal()
}
if rr.Expire != 86400 {
t.Error("Expected EXPIRE to be 86400: ", rr.Expire)
t.Fatal()
}
if rr.Minttl != 10 {
t.Error("Expected MINTTL to be 10: ", rr.Minttl)
t.Fatal()
}
}
func TestAuthoritySubdomain(t *testing.T) {
resolver.etcdPrefix = "TestAuthoritySubdomain/"
client.Set("TestAuthoritySubdomain/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
client.Set("TestAuthoritySubdomain/net/disco/bar/.SOA", "ns1.bar.disco.net.\tbar.disco.net.\t3600\t600\t86400\t10", 0)
query := new(dns.Msg)
query.SetQuestion("foo.bar.disco.net.", dns.TypeA)
answer := resolver.Lookup(query)
if len(answer.Answer) > 0 {
t.Error("Expected zero answers")
t.Fatal()
}
if len(answer.Ns) != 1 {
t.Error("Expected one authority record")
t.Fatal()
}
rr := answer.Ns[0].(*dns.SOA)
header := rr.Header()
// Verify the header is correct
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeSOA {
t.Error("Expected record with type SOA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.Ns != "ns1.bar.disco.net." {
t.Error("Expected NS to be ns1.disco.net.: ", rr.Ns)
t.Fatal()
}
if rr.Mbox != "bar.disco.net." {
t.Error("Expected MBOX to be admin.disco.net.: ", rr.Mbox)
t.Fatal()
}
if rr.Refresh != 3600 {
t.Error("Expected REFRESH to be 3600: ", rr.Refresh)
t.Fatal()
}
if rr.Retry != 600 {
t.Error("Expected RETRY to be 600: ", rr.Retry)
t.Fatal()
}
if rr.Expire != 86400 {
t.Error("Expected EXPIRE to be 86400: ", rr.Expire)
t.Fatal()
}
if rr.Minttl != 10 {
t.Error("Expected MINTTL to be 10: ", rr.Minttl)
t.Fatal()
}
}
/**
* Test different that types of DNS queries return the correct answers
**/
func TestAnswerQuestionA(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionA/"
client.Set("TestAnswerQuestionA/net/disco/bar/.A", "1.2.3.4", 0)
client.Set("TestAnswerQuestionA/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeA)
answer := resolver.Lookup(query)
if len(answer.Answer) != 1 {
t.Error("Expected one answer, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
rr := answer.Answer[0].(*dns.A)
header := rr.Header()
// Verify the header is correct
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeA {
t.Error("Expected record with type A:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.A.String() != "1.2.3.4" {
t.Error("Expected A record to be 1.2.3.4: ", rr.A)
t.Fatal()
}
}
func TestAnswerQuestionAAAA(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionAAAA/"
client.Set("TestAnswerQuestionAAAA/net/disco/bar/.AAAA", "::1", 0)
client.Set("TestAnswerQuestionAAAA/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeAAAA)
answer := resolver.Lookup(query)
if len(answer.Answer) != 1 {
t.Error("Expected one answer, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
rr := answer.Answer[0].(*dns.AAAA)
header := rr.Header()
// Verify the header is correct
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeAAAA {
t.Error("Expected record with type AAAA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.AAAA.String() != "::1" {
t.Error("Expected AAAA record to be ::1: ", rr.AAAA)
t.Fatal()
}
}
func TestAnswerQuestionANY(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionANY/"
client.Set("TestAnswerQuestionANY/net/disco/bar/.TXT", "google.com.", 0)
client.Set("TestAnswerQuestionANY/net/disco/bar/.A/0", "1.2.3.4", 0)
client.Set("TestAnswerQuestionANY/net/disco/bar/.A/1", "2.3.4.5", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeANY)
answer := resolver.Lookup(query)
if len(answer.Answer) != 3 {
t.Error("Expected one answer, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
}
func TestAnswerQuestionUnsupportedType(t *testing.T) {
// query for a type that we don't have support for (I tried to pick the most
// obscure rr type that the dns library supports and that we're unlikely to
// add support for)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeEUI64)
answer := resolver.Lookup(query)
if len(answer.Answer) != 0 {
t.Error("Expected no answers, got ", len(answer.Answer))
t.Fatal()
}
if answer.Rcode != dns.RcodeNameError {
t.Error("Expected NXDOMAIN response code, got", dns.RcodeToString[answer.Rcode])
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
}
}
func TestAnswerQuestionWildcardCNAME(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionCNAME/"
client.Set("TestAnswerQuestionCNAME/net/disco/*/.CNAME", "baz.disco.net.", 0)
client.Set("TestAnswerQuestionCNAME/net/disco/baz/.A", "1.2.3.4", 0)
query := new(dns.Msg)
query.SetQuestion("test.disco.net.", dns.TypeA)
answer := resolver.Lookup(query)
if len(answer.Answer) != 1 {
t.Error("Expected one answers, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
rr := answer.Answer[0].(*dns.CNAME)
header := rr.Header()
// Verify the header is correct
if header.Name != "test.disco.net." {
t.Error("Expected record with name test.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeCNAME {
t.Error("Expected record with type AAAA:", header.Rrtype)
t.Fatal()
}
// Verify the CNAME data is correct
if rr.Target != "baz.disco.net." {
t.Error("Expected CNAME target baz.disco.net.:", header.Rrtype)
t.Fatal()
}
}
func TestAnswerQuestionCNAME(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionCNAME/"
client.Set("TestAnswerQuestionCNAME/net/disco/bar/.CNAME", "baz.disco.net.", 0)
client.Set("TestAnswerQuestionCNAME/net/disco/baz/.A", "1.2.3.4", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeA)
answer := resolver.Lookup(query)
if len(answer.Answer) != 1 {
t.Error("Expected one answers, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
rr := answer.Answer[0].(*dns.CNAME)
header := rr.Header()
// Verify the header is correct
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeCNAME {
t.Error("Expected record with type CNAME:", header.Rrtype)
t.Fatal()
}
// Verify the CNAME data is correct
if rr.Target != "baz.disco.net." {
t.Error("Expected CNAME target baz.disco.net.:", header.Rrtype)
t.Fatal()
}
}
func TestAnswerQuestionWildcardAAAANoMatch(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionWildcardANoMatch/"
client.Set("TestAnswerQuestionWildcardANoMatch/net/disco/bar/*/.AAAA", "::1", 0)
query := new(dns.Msg)
query.SetQuestion("bar.disco.net.", dns.TypeAAAA)
answer := resolver.Lookup(query)
if len(answer.Answer) > 0 {
t.Error("Didn't expect any answers, got ", len(answer.Answer))
t.Fatal()
}
}
func TestAnswerQuestionWildcardAAAA(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionWildcardA/"
client.Set("TestAnswerQuestionWildcardA/net/disco/bar/*/.AAAA", "::1", 0)
query := new(dns.Msg)
query.SetQuestion("baz.bar.disco.net.", dns.TypeAAAA)
answer := resolver.Lookup(query)
if len(answer.Answer) != 1 {
t.Error("Expected one answer, got ", len(answer.Answer))
t.Fatal()
}
if len(answer.Ns) > 0 {
t.Error("Didn't expect any authority records")
t.Fatal()
}
rr := answer.Answer[0].(*dns.AAAA)
header := rr.Header()
// Verify the header is correct
if header.Name != "baz.bar.disco.net." {
t.Error("Expected record with name baz.bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeAAAA {
t.Error("Expected record with type AAAA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.AAAA.String() != "::1" {
t.Error("Expected AAAA record to be ::1: ", rr.AAAA)
t.Fatal()
}
}
func TestAnswerQuestionTTL(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTL/"
client.Set("TestAnswerQuestionTTL/net/disco/bar/.A", "1.2.3.4", 0)
client.Set("TestAnswerQuestionTTL/net/disco/bar/.A.ttl", "300", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeA)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.A)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeA {
t.Error("Expected record with type A:", header.Rrtype)
t.Fatal()
}
if header.Ttl != 300 {
t.Error("Expected TTL of 300 seconds:", header.Ttl)
t.Fatal()
}
if rr.A.String() != "1.2.3.4" {
t.Error("Expected A record to be 1.2.3.4: ", rr.A)
t.Fatal()
}
}
func TestAnswerQuestionTTLMultipleRecords(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTLMultipleRecords/"
client.Set("TestAnswerQuestionTTLMultipleRecords/net/disco/bar/.A/0", "1.2.3.4", 0)
client.Set("TestAnswerQuestionTTLMultipleRecords/net/disco/bar/.A/0.ttl", "300", 0)
client.Set("TestAnswerQuestionTTLMultipleRecords/net/disco/bar/.A/1", "8.8.8.8", 0)
client.Set("TestAnswerQuestionTTLMultipleRecords/net/disco/bar/.A/1.ttl", "600", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeA)
if len(records) != 2 {
t.Error("Expected two answers, got ", len(records))
t.Fatal()
}
rrOne := records[0].(*dns.A)
headerOne := rrOne.Header()
if headerOne.Ttl != 300 {
t.Error("Expected TTL of 300 seconds:", headerOne.Ttl)
t.Fatal()
}
if rrOne.A.String() != "1.2.3.4" {
t.Error("Expected A record to be 1.2.3.4: ", rrOne.A)
t.Fatal()
}
rrTwo := records[1].(*dns.A)
headerTwo := rrTwo.Header()
if headerTwo.Ttl != 600 {
t.Error("Expected TTL of 300 seconds:", headerTwo.Ttl)
t.Fatal()
}
if rrTwo.A.String() != "8.8.8.8" {
t.Error("Expected A record to be 8.8.8.8: ", rrTwo.A)
t.Fatal()
}
}
func TestAnswerQuestionTTLInvalidFormat(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTL/"
client.Set("TestAnswerQuestionTTL/net/disco/bar/.A", "1.2.3.4", 0)
client.Set("TestAnswerQuestionTTL/net/disco/bar/.A.ttl", "haha", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeA)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.A)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeA {
t.Error("Expected record with type A:", header.Rrtype)
t.Fatal()
}
if header.Ttl != 0 {
t.Error("Expected TTL of 0 seconds:", header.Ttl)
t.Fatal()
}
if rr.A.String() != "1.2.3.4" {
t.Error("Expected A record to be 1.2.3.4: ", rr.A)
t.Fatal()
}
}
func TestAnswerQuestionTTLDanglingNode(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTLDanglingNode/"
client.Set("TestAnswerQuestionTTLDanglingNode/net/disco/bar/.TXT.ttl", "600", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeTXT)
if len(records) != 0 {
t.Error("Expected no answer, got ", len(records))
t.Fatal()
}
}
func TestAnswerQuestionTTLDanglingDirNode(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTLDanglingDirNode/"
client.Set("TestAnswerQuestionTTLDanglingDirNode/net/disco/bar/.TXT/0.ttl", "600", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeTXT)
if len(records) != 0 {
t.Error("Expected no answer, got ", len(records))
t.Fatal()
}
}
func TestAnswerQuestionTTLDanglingDirSibling(t *testing.T) {
resolver.etcdPrefix = "TestAnswerQuestionTTLDanglingDirSibling/"
client.Set("TestAnswerQuestionTTLDanglingDirSibling/net/disco/bar/.TXT/0.ttl", "100", 0)
client.Set("TestAnswerQuestionTTLDanglingDirSibling/net/disco/bar/.TXT/1", "foo bar", 0)
client.Set("TestAnswerQuestionTTLDanglingDirSibling/net/disco/bar/.TXT/1.ttl", "600", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeTXT)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.TXT)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeTXT {
t.Error("Expected record with type TXT:", header.Rrtype)
t.Fatal()
}
if header.Ttl != 600 {
t.Error("Expected TTL of 600 seconds:", header.Ttl)
t.Fatal()
}
if strings.Join(rr.Txt, "\n") != "foo bar" {
t.Error("Expected txt record to be 'foo bar': ", rr.Txt)
t.Fatal()
}
}
/**
* Test converstion of names (i.e etcd nodes) to single records of different
* types.
**/
func TestLookupAnswerForA(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForA/"
client.Set("TestLookupAnswerForA/net/disco/bar/.A", "1.2.3.4", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeA)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.A)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeA {
t.Error("Expected record with type A:", header.Rrtype)
t.Fatal()
}
if rr.A.String() != "1.2.3.4" {
t.Error("Expected A record to be 1.2.3.4: ", rr.A)
t.Fatal()
}
}
func TestLookupAnswerForAAAA(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForAAAA/"
client.Set("TestLookupAnswerForAAAA/net/disco/bar/.AAAA", "::1", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeAAAA)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.AAAA)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeAAAA {
t.Error("Expected record with type AAAA:", header.Rrtype)
t.Fatal()
}
if rr.AAAA.String() != "::1" {
t.Error("Expected AAAA record to be ::1: ", rr.AAAA)
t.Fatal()
}
}
func TestLookupAnswerForCNAME(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForCNAME/"
client.Set("TestLookupAnswerForCNAME/net/disco/bar/.CNAME", "cname.google.com.", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeCNAME)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.CNAME)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeCNAME {
t.Error("Expected record with type CNAME:", header.Rrtype)
t.Fatal()
}
if rr.Target != "cname.google.com." {
t.Error("Expected CNAME record to be cname.google.com.: ", rr.Target)
t.Fatal()
}
}
func TestLookupAnswerForNS(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForNS/"
client.Set("TestLookupAnswerForNS/net/disco/bar/.NS", "dns.google.com.", 0)
records, _ := resolver.LookupAnswersForType("bar.disco.net.", dns.TypeNS)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.NS)
header := rr.Header()
if header.Name != "bar.disco.net." {
t.Error("Expected record with name bar.disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeNS {
t.Error("Expected record with type NS:", header.Rrtype)
t.Fatal()
}
if rr.Ns != "dns.google.com." {
t.Error("Expected NS record to be dns.google.com.: ", rr.Ns)
t.Fatal()
}
}
func TestLookupAnswerForSOA(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForSOA/"
client.Set("TestLookupAnswerForSOA/net/disco/.SOA", "ns1.disco.net.\tadmin.disco.net.\t3600\t600\t86400\t10", 0)
records, _ := resolver.LookupAnswersForType("disco.net.", dns.TypeSOA)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.SOA)
header := rr.Header()
if header.Name != "disco.net." {
t.Error("Expected record with name disco.net.: ", header.Name)
t.Fatal()
}
if header.Rrtype != dns.TypeSOA {
t.Error("Expected record with type SOA:", header.Rrtype)
t.Fatal()
}
// Verify the record itself is correct
if rr.Ns != "ns1.disco.net." {
t.Error("Expected NS to be ns1.disco.net.: ", rr.Ns)
t.Fatal()
}
if rr.Mbox != "admin.disco.net." {
t.Error("Expected MBOX to be admin.disco.net.: ", rr.Mbox)
t.Fatal()
}
if rr.Refresh != 3600 {
t.Error("Expected REFRESH to be 3600: ", rr.Refresh)
t.Fatal()
}
if rr.Retry != 600 {
t.Error("Expected RETRY to be 600: ", rr.Retry)
t.Fatal()
}
if rr.Expire != 86400 {
t.Error("Expected EXPIRE to be 86400: ", rr.Expire)
t.Fatal()
}
if rr.Minttl != 10 {
t.Error("Expected MINTTL to be 10: ", rr.Minttl)
t.Fatal()
}
}
func TestLookupAnswerForPTR(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForPTR/"
client.Set("TestLookupAnswerForPTR/net/disco/alias/.PTR/target1", "target1.disco.net.", 0)
client.Set("TestLookupAnswerForPTR/net/disco/alias/.PTR/target2", "target2.disco.net.", 0)
records, _ := resolver.LookupAnswersForType("alias.disco.net.", dns.TypePTR)
if len(records) != 2 {
t.Error("Expected two answers, got ", len(records))
t.Fatal()
}
seen_1 := false
seen_2 := false
// We can't (and shouldn't try to) guarantee order, so check for all
// expected records the long way
for _, record := range records {
rr := record.(*dns.PTR)
header := rr.Header()
if header.Rrtype != dns.TypePTR {
t.Error("Expected record with type PTR:", header.Rrtype)
t.Fatal()
}
t.Log(rr)
if rr.Ptr == "target1.disco.net." {
seen_1 = true
}
if rr.Ptr == "target2.disco.net." {
seen_2 = true
}
}
if seen_1 == false || seen_2 == false {
t.Error("Didn't get back all expected PTR responses")
t.Fatal()
}
}
func TestLookupAnswerForPTRInvalidDomain(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForPTRInvalidDomain/"
client.Set("TestLookupAnswerForPTRInvalidDomain/net/disco/bad-alias/.PTR", "...", 0)
records, err := resolver.LookupAnswersForType("bad-alias.disco.net.", dns.TypePTR)
if len(records) > 0 {
t.Error("Expected no answers, got ", len(records))
t.Fatal()
}
if err == nil {
t.Error("Expected error, didn't get one")
t.Fatal()
}
}
func TestLookupAnswerForSRV(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForSRV/"
client.Set("TestLookupAnswerForSRV/net/disco/_tcp/_http/.SRV",
"100\t100\t80\tsome-webserver.disco.net",
0)
records, _ := resolver.LookupAnswersForType("_http._tcp.disco.net.", dns.TypeSRV)
if len(records) != 1 {
t.Error("Expected one answer, got ", len(records))
t.Fatal()
}
rr := records[0].(*dns.SRV)
if rr.Priority != 100 {
t.Error("Unexpected 'priority' value for SRV record:", rr.Priority)
}
if rr.Weight != 100 {
t.Error("Unexpected 'weight' value for SRV record:", rr.Weight)
}
if rr.Port != 80 {
t.Error("Unexpected 'port' value for SRV record:", rr.Port)
}
if rr.Target != "some-webserver.disco.net." {
t.Error("Unexpected 'target' value for SRV record:", rr.Target)
}
}
func TestLookupAnswerForSRVInvalidValues(t *testing.T) {
resolver.etcdPrefix = "TestLookupAnswerForSRVInvalidValues/"
var bad_vals_map = map[string]string {
"wrong-delimiter": "10 10 80 foo.disco.net",
"not-enough-fields": "0\t0",
"neg-int-priority": "-10\t10\t80\tfoo.disco.net",
"neg-int-weight": "10\t-10\t80\tfoo.disco.net",
"neg-int-port": "10\t10\t-80\tfoo.disco.net",
"large-int-priority": "65536\t10\t80\tfoo.disco.net",
"large-int-weight": "10\t65536\t80\tfoo.disco.net",
"large-int-port": "10\t10\t65536\tfoo.disco.net"}
for name, value := range bad_vals_map {
client.Set("TestLookupAnswerForSRVInvalidValues/net/disco/" + name + "/.SRV", value, 0)
records, err := resolver.LookupAnswersForType(name + ".disco.net.", dns.TypeSRV)
if len(records) > 0 {
t.Error("Expected no answers, got ", len(records))
t.Fatal()
}
if err == nil {
t.Error("Expected error, didn't get one")
t.Fatal()
}
}
}
|
package resource
import (
"errors"
"fmt"
"github.com/jinzhu/gorm"
"github.com/qor/qor"
"github.com/qor/roles"
"github.com/qor/qor/utils"
)
func (res *Resource) findOneHandler(result interface{}, metaValues *MetaValues, context *qor.Context) error {
if res.HasPermission(roles.Read, context) {
primaryField := res.PrimaryField()
scope := context.GetDB().NewScope(res.Value)
var primaryKey string
if metaValues == nil {
primaryKey = context.ResourceID
} else if id := metaValues.Get(primaryField.Name); id != nil {
primaryKey = utils.ToString(id.Value)
}
if primaryKey != "" {
if metaValues != nil {
if destroy := metaValues.Get("_destroy"); destroy != nil {
if fmt.Sprint(destroy.Value) != "0" && res.HasPermission(roles.Delete, context) {
context.GetDB().Delete(result, fmt.Sprintf("%v = ?", scope.Quote(primaryField.DBName)), primaryKey)
return ErrProcessorSkipLeft
}
}
}
return context.GetDB().First(result, fmt.Sprintf("%v = ?", scope.Quote(primaryField.DBName)), primaryKey).Error
}
return errors.New("failed to find")
} else {
return roles.ErrPermissionDenied
}
}
func (res *Resource) findManyHandler(result interface{}, context *qor.Context) error {
if res.HasPermission(roles.Read, context) {
return context.GetDB().Set("gorm:order_by_primary_key", "DESC").Find(result).Error
} else {
return roles.ErrPermissionDenied
}
}
func (res *Resource) saveHandler(result interface{}, context *qor.Context) error {
if (context.GetDB().NewScope(result).PrimaryKeyZero() &&
res.HasPermission(roles.Create, context)) || // has create permission
res.HasPermission(roles.Update, context) { // has update permission
return context.GetDB().Save(result).Error
} else {
return roles.ErrPermissionDenied
}
}
func (res *Resource) deleteHandler(result interface{}, context *qor.Context) error {
if res.HasPermission(roles.Delete, context) {
scope := context.GetDB().NewScope(res.Value)
if !context.GetDB().First(result, fmt.Sprintf("%v = ?", scope.Quote(res.PrimaryDBName())), context.ResourceID).RecordNotFound() {
return context.GetDB().Delete(result).Error
} else {
return gorm.RecordNotFound
}
} else {
return roles.ErrPermissionDenied
}
}
func (res *Resource) CallFindOne(result interface{}, metaValues *MetaValues, context *qor.Context) error {
return res.FindOneHandler(result, metaValues, context)
}
func (res *Resource) CallFindMany(result interface{}, context *qor.Context) error {
return res.FindManyHandler(result, context)
}
func (res *Resource) CallSave(result interface{}, context *qor.Context) error {
return res.SaveHandler(result, context)
}
func (res *Resource) CallDelete(result interface{}, context *qor.Context) error {
return res.DeleteHandler(result, context)
}
Don't raise error when there are no primary field
package resource
import (
"errors"
"fmt"
"github.com/jinzhu/gorm"
"github.com/qor/qor"
"github.com/qor/qor/utils"
"github.com/qor/roles"
)
func (res *Resource) findOneHandler(result interface{}, metaValues *MetaValues, context *qor.Context) error {
if res.HasPermission(roles.Read, context) {
var (
scope = context.GetDB().NewScope(res.Value)
primaryField = res.PrimaryField()
primaryKey string
)
if metaValues == nil {
primaryKey = context.ResourceID
} else if primaryField == nil {
return nil
} else if id := metaValues.Get(primaryField.Name); id != nil {
primaryKey = utils.ToString(id.Value)
}
if primaryKey != "" {
if metaValues != nil {
if destroy := metaValues.Get("_destroy"); destroy != nil {
if fmt.Sprint(destroy.Value) != "0" && res.HasPermission(roles.Delete, context) {
context.GetDB().Delete(result, fmt.Sprintf("%v = ?", scope.Quote(primaryField.DBName)), primaryKey)
return ErrProcessorSkipLeft
}
}
}
return context.GetDB().First(result, fmt.Sprintf("%v = ?", scope.Quote(primaryField.DBName)), primaryKey).Error
}
return errors.New("failed to find")
} else {
return roles.ErrPermissionDenied
}
}
func (res *Resource) findManyHandler(result interface{}, context *qor.Context) error {
if res.HasPermission(roles.Read, context) {
return context.GetDB().Set("gorm:order_by_primary_key", "DESC").Find(result).Error
} else {
return roles.ErrPermissionDenied
}
}
func (res *Resource) saveHandler(result interface{}, context *qor.Context) error {
if (context.GetDB().NewScope(result).PrimaryKeyZero() &&
res.HasPermission(roles.Create, context)) || // has create permission
res.HasPermission(roles.Update, context) { // has update permission
return context.GetDB().Save(result).Error
} else {
return roles.ErrPermissionDenied
}
}
func (res *Resource) deleteHandler(result interface{}, context *qor.Context) error {
if res.HasPermission(roles.Delete, context) {
scope := context.GetDB().NewScope(res.Value)
if !context.GetDB().First(result, fmt.Sprintf("%v = ?", scope.Quote(res.PrimaryDBName())), context.ResourceID).RecordNotFound() {
return context.GetDB().Delete(result).Error
} else {
return gorm.RecordNotFound
}
} else {
return roles.ErrPermissionDenied
}
}
func (res *Resource) CallFindOne(result interface{}, metaValues *MetaValues, context *qor.Context) error {
return res.FindOneHandler(result, metaValues, context)
}
func (res *Resource) CallFindMany(result interface{}, context *qor.Context) error {
return res.FindManyHandler(result, context)
}
func (res *Resource) CallSave(result interface{}, context *qor.Context) error {
return res.SaveHandler(result, context)
}
func (res *Resource) CallDelete(result interface{}, context *qor.Context) error {
return res.DeleteHandler(result, context)
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"go/build"
"go/format"
"io"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"unicode/utf8"
"github.com/valyala/fasttemplate"
)
const boxFilename = "rice-box.go"
const lowerhex = "0123456789abcdef"
func writeBoxesGo(pkg *build.Package, out io.Writer) error {
boxMap := findBoxes(pkg)
// notify user when no calls to rice.FindBox are made (is this an error and therefore os.Exit(1) ?
if len(boxMap) == 0 {
fmt.Println("no calls to rice.FindBox() found")
return nil
}
verbosef("\n")
var boxes []*boxDataType
for boxname := range boxMap {
// find path and filename for this box
boxPath := filepath.Join(pkg.Dir, boxname)
// Check to see if the path for the box is a symbolic link. If so, simply
// box what the symbolic link points to. Note: the filepath.Walk function
// will NOT follow any nested symbolic links. This only handles the case
// where the root of the box is a symbolic link.
symPath, serr := os.Readlink(boxPath)
if serr == nil {
boxPath = symPath
}
// verbose info
verbosef("embedding box '%s' to '%s'\n", boxname, boxFilename)
// read box metadata
boxInfo, ierr := os.Stat(boxPath)
if ierr != nil {
return fmt.Errorf("Error: unable to access box at %s\n", boxPath)
}
// create box datastructure (used by template)
box := &boxDataType{
BoxName: boxname,
UnixNow: boxInfo.ModTime().Unix(),
Files: make([]*fileDataType, 0),
Dirs: make(map[string]*dirDataType),
}
if !boxInfo.IsDir() {
return fmt.Errorf("Error: Box %s must point to a directory but points to %s instead\n",
boxname, boxPath)
}
// fill box datastructure with file data
err := filepath.Walk(boxPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("error walking box: %s\n", err)
}
filename := strings.TrimPrefix(path, boxPath)
filename = strings.Replace(filename, "\\", "/", -1)
filename = strings.TrimPrefix(filename, "/")
if info.IsDir() {
dirData := &dirDataType{
Identifier: "dir" + nextIdentifier(),
FileName: filename,
ModTime: info.ModTime().Unix(),
ChildFiles: make([]*fileDataType, 0),
ChildDirs: make([]*dirDataType, 0),
}
verbosef("\tincludes dir: '%s'\n", dirData.FileName)
box.Dirs[dirData.FileName] = dirData
// add tree entry (skip for root, it'll create a recursion)
if dirData.FileName != "" {
pathParts := strings.Split(dirData.FileName, "/")
parentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], "/")]
parentDir.ChildDirs = append(parentDir.ChildDirs, dirData)
}
} else {
fileData := &fileDataType{
Identifier: "file" + nextIdentifier(),
FileName: filename,
ModTime: info.ModTime().Unix(),
}
verbosef("\tincludes file: '%s'\n", fileData.FileName)
/*
fileData.Content, err = ioutil.ReadFile(path)
if err != nil {
return fmt.Errorf("error reading file content while walking box: %s\n", err)
}
*/
fileData.Content = []byte("{%" + path + "%}")
box.Files = append(box.Files, fileData)
// add tree entry
pathParts := strings.Split(fileData.FileName, "/")
parentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], "/")]
if parentDir == nil {
return fmt.Errorf("Error: parent of %s is not within the box\n", path)
}
parentDir.ChildFiles = append(parentDir.ChildFiles, fileData)
}
return nil
})
if err != nil {
return err
}
boxes = append(boxes, box)
}
embedSourceUnformated := bytes.NewBuffer(make([]byte, 0))
// execute template to buffer
err := tmplEmbeddedBox.Execute(
embedSourceUnformated,
embedFileDataType{pkg.Name, boxes},
)
if err != nil {
return fmt.Errorf("error writing embedded box to file (template execute): %s\n", err)
}
// format the source code
embedSource, err := format.Source(embedSourceUnformated.Bytes())
if err != nil {
return fmt.Errorf("error formatting embedSource: %s\n", err)
}
// write source to file
// inject file contents
ft, err := fasttemplate.NewTemplate(string(embedSource), "{%", "%}")
if err != nil {
return fmt.Errorf("error writing embedSource to file (fasttemplate compile): %s\n", err)
}
bufWriter := bufio.NewWriterSize(out, 100*1024)
const bufSize = 100 * 1024
var buffer [bufSize]byte
_, err = ft.ExecuteFunc(bufWriter, func(w io.Writer, tag string) (int, error) {
fileName, err := strconv.Unquote("\"" + tag + "\"")
if err != nil {
return 0, err
}
f, err := os.Open(fileName)
if err != nil {
return 0, err
}
n := 0
var processed = bufSize
var dataLen = 0
for {
if processed+utf8.UTFMax > bufSize {
// need to read more
leftover := bufSize - processed
if leftover > 0 {
copy(buffer[:leftover], buffer[processed:])
}
read, peekErr := f.Read(buffer[leftover:])
if peekErr != nil && peekErr != io.EOF {
err = peekErr
break
}
dataLen = leftover + read
processed = 0
}
if dataLen-processed == 0 {
break
}
maxRune := processed + utf8.UTFMax
if maxRune > dataLen {
maxRune = dataLen
}
data := buffer[processed:maxRune]
var discard, n2 int
r, width := utf8.DecodeRune(data)
if width == 1 && r == utf8.RuneError {
w.Write([]byte{'\\', 'x', lowerhex[data[0]>>4], lowerhex[data[0]&0xF]})
n2 = 4
discard = 1
} else {
discard = width
if r == rune('"') || r == '\\' { // always backslashed
w.Write([]byte{'\\', byte(r)})
n2 = 2
} else if strconv.IsPrint(r) {
w.Write(data[:width])
n2 = width
} else {
switch r {
case '\a':
w.Write([]byte{'\\', 'a'})
n2 = 2
case '\b':
w.Write([]byte{'\\', 'b'})
n2 = 2
case '\f':
w.Write([]byte{'\\', 'f'})
n2 = 2
case '\n':
w.Write([]byte{'\\', 'n'})
n2 = 2
case '\r':
w.Write([]byte{'\\', 'r'})
n2 = 2
case '\t':
w.Write([]byte{'\\', 't'})
n2 = 2
case '\v':
w.Write([]byte{'\\', 'v'})
n2 = 2
default:
switch {
case r < ' ':
w.Write([]byte{'\\', 'x', lowerhex[data[0]>>4], lowerhex[data[0]&0xF]})
n2 = 4
case r > utf8.MaxRune:
r = 0xFFFD
fallthrough
case r < 0x10000:
w.Write([]byte{'\\', 'u'})
n2 = 2
for s := 12; s >= 0; s -= 4 {
w.Write([]byte{lowerhex[r>>uint(s)&0xF]})
n2++
}
default:
w.Write([]byte{'\\', 'U'})
n2 = 2
for s := 28; s >= 0; s -= 4 {
w.Write([]byte{lowerhex[r>>uint(s)&0xF]})
n2++
}
}
}
}
}
processed += discard
n += n2
}
f.Close()
return int(n), err
})
if err != nil {
return fmt.Errorf("error writing embedSource to file: %s\n", err)
}
err = bufWriter.Flush()
if err != nil {
return fmt.Errorf("error writing embedSource to file: %s\n", err)
}
return nil
}
func operationEmbedGo(pkg *build.Package) {
// create go file for box
boxFile, err := os.Create(filepath.Join(pkg.Dir, boxFilename))
if err != nil {
log.Printf("error creating embedded box file: %s\n", err)
os.Exit(1)
}
defer boxFile.Close()
err = writeBoxesGo(pkg, boxFile)
if err != nil {
log.Printf("error creating embedded box file: %s\n", err)
os.Exit(1)
}
}
Move streaming strconv.Quote into separate package
package main
import (
"bufio"
"bytes"
"fmt"
"go/build"
"go/format"
"io"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/nkovacs/streamquote"
"github.com/valyala/fasttemplate"
)
const boxFilename = "rice-box.go"
func writeBoxesGo(pkg *build.Package, out io.Writer) error {
boxMap := findBoxes(pkg)
// notify user when no calls to rice.FindBox are made (is this an error and therefore os.Exit(1) ?
if len(boxMap) == 0 {
fmt.Println("no calls to rice.FindBox() found")
return nil
}
verbosef("\n")
var boxes []*boxDataType
for boxname := range boxMap {
// find path and filename for this box
boxPath := filepath.Join(pkg.Dir, boxname)
// Check to see if the path for the box is a symbolic link. If so, simply
// box what the symbolic link points to. Note: the filepath.Walk function
// will NOT follow any nested symbolic links. This only handles the case
// where the root of the box is a symbolic link.
symPath, serr := os.Readlink(boxPath)
if serr == nil {
boxPath = symPath
}
// verbose info
verbosef("embedding box '%s' to '%s'\n", boxname, boxFilename)
// read box metadata
boxInfo, ierr := os.Stat(boxPath)
if ierr != nil {
return fmt.Errorf("Error: unable to access box at %s\n", boxPath)
}
// create box datastructure (used by template)
box := &boxDataType{
BoxName: boxname,
UnixNow: boxInfo.ModTime().Unix(),
Files: make([]*fileDataType, 0),
Dirs: make(map[string]*dirDataType),
}
if !boxInfo.IsDir() {
return fmt.Errorf("Error: Box %s must point to a directory but points to %s instead\n",
boxname, boxPath)
}
// fill box datastructure with file data
err := filepath.Walk(boxPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("error walking box: %s\n", err)
}
filename := strings.TrimPrefix(path, boxPath)
filename = strings.Replace(filename, "\\", "/", -1)
filename = strings.TrimPrefix(filename, "/")
if info.IsDir() {
dirData := &dirDataType{
Identifier: "dir" + nextIdentifier(),
FileName: filename,
ModTime: info.ModTime().Unix(),
ChildFiles: make([]*fileDataType, 0),
ChildDirs: make([]*dirDataType, 0),
}
verbosef("\tincludes dir: '%s'\n", dirData.FileName)
box.Dirs[dirData.FileName] = dirData
// add tree entry (skip for root, it'll create a recursion)
if dirData.FileName != "" {
pathParts := strings.Split(dirData.FileName, "/")
parentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], "/")]
parentDir.ChildDirs = append(parentDir.ChildDirs, dirData)
}
} else {
fileData := &fileDataType{
Identifier: "file" + nextIdentifier(),
FileName: filename,
ModTime: info.ModTime().Unix(),
}
verbosef("\tincludes file: '%s'\n", fileData.FileName)
// Instead of injecting content, inject placeholder for fasttemplate.
// This allows us to stream the content into the final file,
// and it also avoids running gofmt on a very large source code.
fileData.Content = []byte("{%" + path + "%}")
box.Files = append(box.Files, fileData)
// add tree entry
pathParts := strings.Split(fileData.FileName, "/")
parentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], "/")]
if parentDir == nil {
return fmt.Errorf("Error: parent of %s is not within the box\n", path)
}
parentDir.ChildFiles = append(parentDir.ChildFiles, fileData)
}
return nil
})
if err != nil {
return err
}
boxes = append(boxes, box)
}
embedSourceUnformated := bytes.NewBuffer(make([]byte, 0))
// execute template to buffer
err := tmplEmbeddedBox.Execute(
embedSourceUnformated,
embedFileDataType{pkg.Name, boxes},
)
if err != nil {
return fmt.Errorf("error writing embedded box to file (template execute): %s\n", err)
}
// format the source code
embedSource, err := format.Source(embedSourceUnformated.Bytes())
if err != nil {
return fmt.Errorf("error formatting embedSource: %s\n", err)
}
// write source to file
// inject file contents
ft, err := fasttemplate.NewTemplate(string(embedSource), "{%", "%}")
if err != nil {
return fmt.Errorf("error writing embedSource to file (fasttemplate compile): %s\n", err)
}
bufWriter := bufio.NewWriterSize(out, 100*1024)
converter := streamquote.New()
_, err = ft.ExecuteFunc(bufWriter, func(w io.Writer, tag string) (int, error) {
fileName, err := strconv.Unquote("\"" + tag + "\"")
if err != nil {
return 0, err
}
f, err := os.Open(fileName)
if err != nil {
return 0, err
}
n, err := converter.Convert(f, w)
f.Close()
return n, err
})
if err != nil {
return fmt.Errorf("error writing embedSource to file: %s\n", err)
}
err = bufWriter.Flush()
if err != nil {
return fmt.Errorf("error writing embedSource to file: %s\n", err)
}
return nil
}
func operationEmbedGo(pkg *build.Package) {
// create go file for box
boxFile, err := os.Create(filepath.Join(pkg.Dir, boxFilename))
if err != nil {
log.Printf("error creating embedded box file: %s\n", err)
os.Exit(1)
}
defer boxFile.Close()
err = writeBoxesGo(pkg, boxFile)
if err != nil {
log.Printf("error creating embedded box file: %s\n", err)
os.Exit(1)
}
}
|
package yuicompressor
import (
"os"
"testing"
)
func fixture_css() string {
return(
`div.warning {
display: none;
}
div.error {
background: red;
color: white;
}
@media screen and (max-device-width: 640px) {
body { font-size: 90%; }
}`)
}
func fixture_js() string {
return(
`// here's a comment
var Foo = { "a": 1 };
Foo["bar"] = (function(baz) {
/* here's a
multiline comment */
if (false) {
doSomething();
} else {
for (var index = 0; index < baz.length; index++) {
doSomething(baz[index]);
}
}
})("hello");`)
}
func fixture_error_js() string {
return "var x = {class: 'name'};"
}
func TestUseJarPath(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
if yc.Command() != "/usr/bin/java -jar ./yuicompressor-2.4.8.jar" {
t.Error("Impossible to set a new jar_path: " + yc.Command())
}
}
func TestUseJavaPath(t *testing.T) {
yc := New()
yc.UseJavaPath("/var/test/path/java")
yc.UseJarPath("./yuicompressor-2.4.8.jar")
expected_command := "/var/test/path/java -jar ./yuicompressor-2.4.8.jar"
if yc.Command() != expected_command {
t.Error("Impossible to set a new java_path: " + yc.Command())
}
}
func TestUseJvmOptions(t *testing.T) {
yc := New()
yc.UseJavaPath("/usr/bin/java")
yc.UseJvmOptions("-Xms64M -Xmx64M")
yc.UseJarPath("./yuicompressor-2.4.8.jar")
expected_command := "/usr/bin/java -Xms64M -Xmx64M -jar ./yuicompressor-2.4.8.jar"
if yc.Command() != expected_command {
t.Error("Impossible to set jvm opts: " + yc.Command())
}
}
func TestValidity(t *testing.T) {
data_uri_css := `div {
background: white url(\'data:image/png;base64,iVBORw0KGgoAAAANSUhEU
gAAABAAAAAQAQMAAAAlPW0iAAAABlBMVEUAAAD///+l2Z/dAAAAM0lEQVR4nGP4/5/h
/1+G/58ZDrAz3D/McH8yw83NDDeNGe4Ug9C9zwz3gVLMDA/A6P9/AFGGFyjOXZtQAAA
AAElFTkSuQmCC\') no-repeat scroll left top;}`
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
_, err := yc.MinifyCssString(data_uri_css)
if err != nil {
t.Error(err)
t.Fail()
}
}
func TestMinifyCss(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
output, err := yc.MinifyCssString(fixture_css())
if err != nil {
t.Error(err)
}
if output != "div.warning{display:none}div.error{background:red;color:white}@media screen and (max-device-width:640px){body{font-size:90%}}" {
t.Error("The CSS should be compressed and it's not.")
}
}
func TestMinifyCssReader(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
fd, err := os.Open("assets_test/test1.css")
output, err := yc.MinifyCssReader(fd)
if err != nil {
t.Error(err)
}
if output != "div.warning{display:none}div.error{background:red;color:white}@media screen and (max-device-width:640px){body{font-size:90%}}" {
t.Error("The JS should be compressed with a stream and it's not.")
}
}
func TestMinifyCssFile(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
output, err := yc.MinifyCssFile("assets_test/test1.css")
if err != nil {
t.Error(err)
}
if output != "div.warning{display:none}div.error{background:red;color:white}@media screen and (max-device-width:640px){body{font-size:90%}}" {
t.Error("The JS should be compressed with a stream and it's not.")
}
}
func TestMinifyJs(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
output, err := yc.MinifyJsString(fixture_js())
if err != nil {
t.Error(err)
}
if output != "var Foo={a:1};Foo.bar=(function(baz){if(false){doSomething()}else{for(var index=0;index<baz.length;index++){doSomething(baz[index])}}})(\"hello\");" {
t.Error("The JS should be compressed and it's not.")
}
}
func TestMinifyJsReader(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
fd, err := os.Open("assets_test/test1.js")
output, err := yc.MinifyJsReader(fd)
if err != nil {
t.Error(err)
}
if output != "var Foo={a:1};Foo.bar=(function(baz){if(false){doSomething()}else{for(var index=0;index<baz.length;index++){doSomething(baz[index])}}})(\"hello\");" {
t.Error("The JS should be compressed with a stream and it's not.")
}
}
func TestMinifyJsFile(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
output, err := yc.MinifyJsFile("assets_test/test1.js")
if err != nil {
t.Error(err)
}
if output != "var Foo={a:1};Foo.bar=(function(baz){if(false){doSomething()}else{for(var index=0;index<baz.length;index++){doSomething(baz[index])}}})(\"hello\");" {
t.Error("The JS should be compressed with a stream and it's not.")
}
}
Refactor strings in tests
package yuicompressor
import (
"os"
"testing"
)
const fixture_css = (
`div.warning {
display: none;
}
div.error {
background: red;
color: white;
}
@media screen and (max-device-width: 640px) {
body { font-size: 90%; }
}`)
const fixture_js = (
`// here's a comment
var Foo = { "a": 1 };
Foo["bar"] = (function(baz) {
/* here's a
multiline comment */
if (false) {
doSomething();
} else {
for (var index = 0; index < baz.length; index++) {
doSomething(baz[index]);
}
}
})("hello");`)
func TestUseJarPath(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
if yc.Command() != "/usr/bin/java -jar ./yuicompressor-2.4.8.jar" {
t.Error("Impossible to set a new jar_path: " + yc.Command())
}
}
func TestUseJavaPath(t *testing.T) {
yc := New()
yc.UseJavaPath("/var/test/path/java")
yc.UseJarPath("./yuicompressor-2.4.8.jar")
expected_command := "/var/test/path/java -jar ./yuicompressor-2.4.8.jar"
if yc.Command() != expected_command {
t.Error("Impossible to set a new java_path: " + yc.Command())
}
}
func TestUseJvmOptions(t *testing.T) {
yc := New()
yc.UseJavaPath("/usr/bin/java")
yc.UseJvmOptions("-Xms64M -Xmx64M")
yc.UseJarPath("./yuicompressor-2.4.8.jar")
expected_command := "/usr/bin/java -Xms64M -Xmx64M -jar ./yuicompressor-2.4.8.jar"
if yc.Command() != expected_command {
t.Error("Impossible to set jvm opts: " + yc.Command())
}
}
func TestValidity(t *testing.T) {
data_uri_css := `div {
background: white url(\'data:image/png;base64,iVBORw0KGgoAAAANSUhEU
gAAABAAAAAQAQMAAAAlPW0iAAAABlBMVEUAAAD///+l2Z/dAAAAM0lEQVR4nGP4/5/h
/1+G/58ZDrAz3D/McH8yw83NDDeNGe4Ug9C9zwz3gVLMDA/A6P9/AFGGFyjOXZtQAAA
AAElFTkSuQmCC\') no-repeat scroll left top;}`
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
_, err := yc.MinifyCssString(data_uri_css)
if err != nil {
t.Error(err)
t.Fail()
}
}
func TestMinifyCss(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
output, err := yc.MinifyCssString(fixture_css)
if err != nil {
t.Error(err)
}
if output != "div.warning{display:none}div.error{background:red;color:white}@media screen and (max-device-width:640px){body{font-size:90%}}" {
t.Error("The CSS should be compressed and it's not.")
}
}
func TestMinifyCssReader(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
fd, err := os.Open("assets_test/test1.css")
output, err := yc.MinifyCssReader(fd)
if err != nil {
t.Error(err)
}
if output != "div.warning{display:none}div.error{background:red;color:white}@media screen and (max-device-width:640px){body{font-size:90%}}" {
t.Error("The JS should be compressed with a stream and it's not.")
}
}
func TestMinifyCssFile(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
output, err := yc.MinifyCssFile("assets_test/test1.css")
if err != nil {
t.Error(err)
}
if output != "div.warning{display:none}div.error{background:red;color:white}@media screen and (max-device-width:640px){body{font-size:90%}}" {
t.Error("The JS should be compressed with a stream and it's not.")
}
}
func TestMinifyJs(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
output, err := yc.MinifyJsString(fixture_js)
if err != nil {
t.Error(err)
}
if output != "var Foo={a:1};Foo.bar=(function(baz){if(false){doSomething()}else{for(var index=0;index<baz.length;index++){doSomething(baz[index])}}})(\"hello\");" {
t.Error("The JS should be compressed and it's not.")
}
}
func TestMinifyJsReader(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
fd, err := os.Open("assets_test/test1.js")
output, err := yc.MinifyJsReader(fd)
if err != nil {
t.Error(err)
}
if output != "var Foo={a:1};Foo.bar=(function(baz){if(false){doSomething()}else{for(var index=0;index<baz.length;index++){doSomething(baz[index])}}})(\"hello\");" {
t.Error("The JS should be compressed with a stream and it's not.")
}
}
func TestMinifyJsFile(t *testing.T) {
yc := New()
yc.UseJarPath("./yuicompressor-2.4.8.jar")
output, err := yc.MinifyJsFile("assets_test/test1.js")
if err != nil {
t.Error(err)
}
if output != "var Foo={a:1};Foo.bar=(function(baz){if(false){doSomething()}else{for(var index=0;index<baz.length;index++){doSomething(baz[index])}}})(\"hello\");" {
t.Error("The JS should be compressed with a stream and it's not.")
}
} |
package main
import (
"fmt"
"log"
"os"
)
const (
v = "0.1"
)
func main() {
command := ""
args := os.Args[1:] // remove app path from args
// We expect a subcommand and a set of files in args
if len(args) > 0 {
command = args[1]
args = args[1:]
}
var err error
switch command {
case "encrypt", "e":
err = encrypt(args)
case "decrypt", "d":
err = decrypt(args)
case "identity", "i":
err = identity(args)
case "version", "v":
version(args)
case "help", "h":
help(args)
default:
help(args)
}
if err != nil {
log.Fatalf("Sorry, an error occurred:\n\t%s", err)
}
}
func version(args []string) error {
fmt.Printf("\n\t-----\n\tSend to client - version:%s\n\t-----\n", v)
return nil
}
func help(args []string) error {
version(args)
fmt.Printf("\tsendto version - display version\n")
fmt.Printf("\tsendto [username] [files] - encrypt files for a given user\n")
fmt.Printf("\tsendto decrypt [file] - decrypt a file\n")
fmt.Printf("\tsendto identity [name] - sets default sender identity\n\n")
return nil
}
func decrypt(args []string) error {
log.Printf("Sorry, this client does not yet support decrypt")
return nil
}
func encrypt(args []string) error {
log.Printf("Sorry, this client does not yet support encryption")
return nil
}
func identity(args []string) error {
log.Printf("Sorry, this client does not yet support setting identity")
return nil
}
Add commands
package main
import (
"fmt"
"log"
"os"
)
const (
v = "0.1"
)
func main() {
command := ""
args := os.Args[1:] // remove app path from args
// We expect either a username or a subcommand and then a set of files in args
if len(args) > 0 {
command = args[0]
args = args[1:]
}
var err error
switch command {
case "encrypt", "e":
err = encrypt(args)
case "decrypt", "d":
err = decrypt(args)
case "identity", "i":
err = identity(args)
case "version", "v":
version()
case "help", "h":
help()
default:
// Default action is to send to (if we have a username and files)
if len(args) > 0 {
err = sendTo(command, args)
} else {
help()
}
}
if err != nil {
log.Fatalf("Sorry, an error occurred:\n\t%s", err)
}
}
func version() {
fmt.Printf("\n\t-----\n\tSend to client - version:%s\n\t-----\n", v)
}
func usage() string {
return fmt.Sprintf("\tUsage: sendto kennygrant [files] - send files to the username kennygrant\n")
}
func help() {
version()
fmt.Printf(usage())
fmt.Printf("\t-----\n")
fmt.Printf("\tCommands:\n")
fmt.Printf("\tsendto version - display version\n")
fmt.Printf("\tsendto [username] [files] - encrypt files for a given user\n")
fmt.Printf("\tsendto encrypt [file] - encrypt a file\n")
// fmt.Printf("\tsendto decrypt [file] - decrypt a file\n")
fmt.Printf("\tsendto identity [name] - sets default sender identity\n\n")
}
// decrypt files specified, using the user's private key
// TODO: to support decryption we'd need access to private keys, perhaps leave this for hackathon
func decrypt(args []string) error {
log.Printf("Sorry, this client does not yet support decrypt")
return nil
}
// encrypt the paths specified
func encrypt(args []string) error {
log.Printf("Sorry, this client does not yet support encryption")
return nil
}
// The main path - send files held in args to recipient
func sendTo(recipient string, args []string) error {
fmt.Printf("\nSending %d files to %s...\n\n", len(args), recipient)
if len(args) < 1 {
return fmt.Errorf("Not enough arguments - %s", usage())
}
return nil
}
func identity(args []string) error {
log.Printf("Sorry, this client does not yet support setting identity")
return nil
}
|
package rocker
import (
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"github.com/cloudcredo/cloudrocker/buildpack"
"github.com/cloudcredo/cloudrocker/config"
"github.com/cloudcredo/cloudrocker/docker"
"github.com/cloudcredo/cloudrocker/godocker"
"github.com/cloudcredo/cloudrocker/stager"
"github.com/cloudcredo/cloudrocker/utils"
)
type Rocker struct {
Stdout *io.PipeReader
directories *config.Directories
}
func NewRocker() *Rocker {
return &Rocker{
directories: config.NewDirectories(utils.CloudrockerHome()),
}
}
func DockerVersion(writer io.Writer) {
client := godocker.GetNewClient()
godocker.PrintVersion(client, writer)
}
func (f *Rocker) ImportRootfsImage(writer io.Writer) {
client := godocker.GetNewClient()
godocker.ImportRootfsImage(client, writer, utils.GetRootfsUrl())
f.BuildBaseImage(writer)
}
func (f *Rocker) BuildBaseImage(writer io.Writer) {
createHostDirectories(f.directories)
containerConfig := config.NewBaseContainerConfig(f.directories.BaseConfig())
client := godocker.GetNewClient()
godocker.BuildBaseImage(client, writer, containerConfig)
}
func StopContainer(writer io.Writer, name string) {
client := godocker.GetNewClient()
godocker.StopContainer(client, writer, name)
}
func DeleteContainer(writer io.Writer, name string) {
client := godocker.GetNewClient()
godocker.DeleteContainer(client, writer, name)
}
func (f *Rocker) AddBuildpack(writer io.Writer, url string, buildpackDirOptional ...string) {
buildpackDir := f.directories.Buildpacks()
if len(buildpackDirOptional) > 0 {
buildpackDir = buildpackDirOptional[0]
}
buildpack.Add(writer, url, abs(buildpackDir))
}
func (f *Rocker) DeleteBuildpack(writer io.Writer, bpack string, buildpackDirOptional ...string) {
buildpackDir := f.directories.Buildpacks()
if len(buildpackDirOptional) > 0 {
buildpackDir = buildpackDirOptional[0]
}
buildpack.Delete(writer, bpack, abs(buildpackDir))
}
func (f *Rocker) ListBuildpacks(writer io.Writer, buildpackDirOptional ...string) {
buildpackDir := f.directories.Buildpacks()
if len(buildpackDirOptional) > 0 {
buildpackDir = buildpackDirOptional[0]
}
buildpack.List(writer, abs(buildpackDir))
}
func (f *Rocker) RunStager(writer io.Writer) error {
prepareStagingFilesystem(f.directories)
prepareStagingApp(f.directories.App(), f.directories.Staging())
containerConfig := config.NewStageContainerConfig(f.directories)
client := godocker.GetNewClient()
godocker.RunStagingContainer(client, writer, containerConfig)
DeleteContainer(writer, containerConfig.ContainerName)
return stager.ValidateStagedApp(f.directories)
}
func (f *Rocker) StageApp(writer io.Writer, buildpackDirOptional ...string) error {
buildpackDir := f.directories.ContainerBuildpacks()
if len(buildpackDirOptional) > 0 {
buildpackDir = buildpackDirOptional[0]
}
buildpackRunner := stager.NewBuildpackRunner(abs(buildpackDir))
err := stager.RunBuildpack(writer, buildpackRunner)
return err
}
func (f *Rocker) RunRuntime(writer io.Writer) {
prepareRuntimeFilesystem(f.directories)
containerConfig := config.NewRuntimeContainerConfig(f.directories.Droplet())
cli, Stdout, stdoutpipe := docker.GetNewClient()
if docker.GetContainerId(cli, Stdout, stdoutpipe, containerConfig.ContainerName) != "" {
fmt.Println("Deleting running runtime container...")
f.StopRuntime(writer)
}
cli, Stdout, stdoutpipe = docker.GetNewClient()
docker.RunConfiguredContainer(cli, Stdout, stdoutpipe, writer, containerConfig)
fmt.Fprintln(writer, "Connect to your running application at http://localhost:8080/")
}
func (f *Rocker) StopRuntime(writer io.Writer) {
StopContainer(writer, "cloudrocker-runtime")
DeleteContainer(writer, "cloudrocker-runtime")
}
func (f *Rocker) BuildRuntimeImage(writer io.Writer, destImageTagOptional ...string) {
prepareRuntimeFilesystem(f.directories)
containerConfig := config.NewRuntimeContainerConfig(f.directories.Droplet(), destImageTagOptional...)
cli, Stdout, stdoutpipe := docker.GetNewClient()
docker.BuildRuntimeImage(cli, Stdout, stdoutpipe, writer, containerConfig)
}
func cloudRockerfileLocation() (location string) {
pwd, err := os.Getwd()
if err != nil {
log.Fatalf(" %s", err)
}
location = pwd + "/CloudRockerfile"
return
}
func prepareStagingFilesystem(directories *config.Directories) {
if err := CreateAndCleanAppDirs(directories); err != nil {
log.Fatalf(" %s", err)
}
if err := buildpack.AtLeastOneBuildpackIn(directories.Buildpacks()); err != nil {
log.Fatalf(" %s", err)
}
if err := utils.CopyRockerBinaryToDir(directories.Rocker()); err != nil {
log.Fatalf(" %s", err)
}
}
func prepareStagingApp(appDir string, stagingDir string) {
copyDir(appDir, stagingDir)
}
func copyDir(src string, dest string) {
src = src + "/*"
command := "shopt -s dotglob && cp -ra " + src + " " + dest
if err := exec.Command("bash", "-c", command).Run(); err != nil {
log.Fatalf("error copying from %s to %s : %s", src, dest, err)
}
}
func prepareRuntimeFilesystem(directories *config.Directories) {
tarPath, err := exec.LookPath("tar")
if err != nil {
log.Fatalf(" %s", err)
}
err = exec.Command(tarPath, "-xzf", directories.Tmp()+"/droplet", "-C", directories.Droplet()).Run()
if err != nil {
log.Fatalf(" %s", err)
}
if err := utils.AddLauncherRunScript(directories.Droplet() + "/app"); err != nil {
log.Fatalf(" %s", err)
}
}
func abs(relative string) string {
absolute, err := filepath.Abs(relative)
if err != nil {
log.Fatalf(" %s", err)
}
return absolute
}
func CreateAndCleanAppDirs(directories *config.Directories) error {
purgeHostDirectories(directories)
if err := createHostDirectories(directories); err != nil {
return err
}
return nil
}
func purgeHostDirectories(directories *config.Directories) {
for _, dir := range directories.HostDirectoriesToClean() {
os.RemoveAll(dir)
}
cleanTmpDirExceptCache(directories.Tmp())
}
func cleanTmpDirExceptCache(tmpDirName string) error {
tmpDir, err := os.Open(tmpDirName)
tmpDirContents, err := tmpDir.Readdirnames(0)
for _, file := range tmpDirContents {
if file != "cache" {
os.RemoveAll(tmpDirName + "/" + file)
}
}
return err
}
func createHostDirectories(directories *config.Directories) error {
for _, dir := range directories.HostDirectories() {
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
}
return nil
}
Use go docker client to rock build.
[#104829264]
Signed-off-by: Josh Hill <c028c213ed5efcf30c3f4fc7361dbde0c893c5b7@cloudcredo.com>
package rocker
import (
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"github.com/cloudcredo/cloudrocker/buildpack"
"github.com/cloudcredo/cloudrocker/config"
"github.com/cloudcredo/cloudrocker/docker"
"github.com/cloudcredo/cloudrocker/godocker"
"github.com/cloudcredo/cloudrocker/stager"
"github.com/cloudcredo/cloudrocker/utils"
)
type Rocker struct {
Stdout *io.PipeReader
directories *config.Directories
}
func NewRocker() *Rocker {
return &Rocker{
directories: config.NewDirectories(utils.CloudrockerHome()),
}
}
func DockerVersion(writer io.Writer) {
client := godocker.GetNewClient()
godocker.PrintVersion(client, writer)
}
func (f *Rocker) ImportRootfsImage(writer io.Writer) {
client := godocker.GetNewClient()
godocker.ImportRootfsImage(client, writer, utils.GetRootfsUrl())
f.BuildBaseImage(writer)
}
func (f *Rocker) BuildBaseImage(writer io.Writer) {
createHostDirectories(f.directories)
containerConfig := config.NewBaseContainerConfig(f.directories.BaseConfig())
client := godocker.GetNewClient()
godocker.BuildBaseImage(client, writer, containerConfig)
}
func StopContainer(writer io.Writer, name string) {
client := godocker.GetNewClient()
godocker.StopContainer(client, writer, name)
}
func DeleteContainer(writer io.Writer, name string) {
client := godocker.GetNewClient()
godocker.DeleteContainer(client, writer, name)
}
func (f *Rocker) AddBuildpack(writer io.Writer, url string, buildpackDirOptional ...string) {
buildpackDir := f.directories.Buildpacks()
if len(buildpackDirOptional) > 0 {
buildpackDir = buildpackDirOptional[0]
}
buildpack.Add(writer, url, abs(buildpackDir))
}
func (f *Rocker) DeleteBuildpack(writer io.Writer, bpack string, buildpackDirOptional ...string) {
buildpackDir := f.directories.Buildpacks()
if len(buildpackDirOptional) > 0 {
buildpackDir = buildpackDirOptional[0]
}
buildpack.Delete(writer, bpack, abs(buildpackDir))
}
func (f *Rocker) ListBuildpacks(writer io.Writer, buildpackDirOptional ...string) {
buildpackDir := f.directories.Buildpacks()
if len(buildpackDirOptional) > 0 {
buildpackDir = buildpackDirOptional[0]
}
buildpack.List(writer, abs(buildpackDir))
}
func (f *Rocker) RunStager(writer io.Writer) error {
prepareStagingFilesystem(f.directories)
prepareStagingApp(f.directories.App(), f.directories.Staging())
containerConfig := config.NewStageContainerConfig(f.directories)
client := godocker.GetNewClient()
godocker.RunStagingContainer(client, writer, containerConfig)
DeleteContainer(writer, containerConfig.ContainerName)
return stager.ValidateStagedApp(f.directories)
}
func (f *Rocker) StageApp(writer io.Writer, buildpackDirOptional ...string) error {
buildpackDir := f.directories.ContainerBuildpacks()
if len(buildpackDirOptional) > 0 {
buildpackDir = buildpackDirOptional[0]
}
buildpackRunner := stager.NewBuildpackRunner(abs(buildpackDir))
err := stager.RunBuildpack(writer, buildpackRunner)
return err
}
func (f *Rocker) RunRuntime(writer io.Writer) {
prepareRuntimeFilesystem(f.directories)
containerConfig := config.NewRuntimeContainerConfig(f.directories.Droplet())
cli, Stdout, stdoutpipe := docker.GetNewClient()
if docker.GetContainerId(cli, Stdout, stdoutpipe, containerConfig.ContainerName) != "" {
fmt.Println("Deleting running runtime container...")
f.StopRuntime(writer)
}
cli, Stdout, stdoutpipe = docker.GetNewClient()
docker.RunConfiguredContainer(cli, Stdout, stdoutpipe, writer, containerConfig)
fmt.Fprintln(writer, "Connect to your running application at http://localhost:8080/")
}
func (f *Rocker) StopRuntime(writer io.Writer) {
StopContainer(writer, "cloudrocker-runtime")
DeleteContainer(writer, "cloudrocker-runtime")
}
func (f *Rocker) BuildRuntimeImage(writer io.Writer, destImageTagOptional ...string) {
prepareRuntimeFilesystem(f.directories)
containerConfig := config.NewRuntimeContainerConfig(f.directories.Droplet(), destImageTagOptional...)
client := godocker.GetNewClient()
godocker.BuildRuntimeImage(client, writer, containerConfig)
}
func cloudRockerfileLocation() (location string) {
pwd, err := os.Getwd()
if err != nil {
log.Fatalf(" %s", err)
}
location = pwd + "/CloudRockerfile"
return
}
func prepareStagingFilesystem(directories *config.Directories) {
if err := CreateAndCleanAppDirs(directories); err != nil {
log.Fatalf(" %s", err)
}
if err := buildpack.AtLeastOneBuildpackIn(directories.Buildpacks()); err != nil {
log.Fatalf(" %s", err)
}
if err := utils.CopyRockerBinaryToDir(directories.Rocker()); err != nil {
log.Fatalf(" %s", err)
}
}
func prepareStagingApp(appDir string, stagingDir string) {
copyDir(appDir, stagingDir)
}
func copyDir(src string, dest string) {
src = src + "/*"
command := "shopt -s dotglob && cp -ra " + src + " " + dest
if err := exec.Command("bash", "-c", command).Run(); err != nil {
log.Fatalf("error copying from %s to %s : %s", src, dest, err)
}
}
func prepareRuntimeFilesystem(directories *config.Directories) {
tarPath, err := exec.LookPath("tar")
if err != nil {
log.Fatalf(" %s", err)
}
err = exec.Command(tarPath, "-xzf", directories.Tmp()+"/droplet", "-C", directories.Droplet()).Run()
if err != nil {
log.Fatalf(" %s", err)
}
if err := utils.AddLauncherRunScript(directories.Droplet() + "/app"); err != nil {
log.Fatalf(" %s", err)
}
}
func abs(relative string) string {
absolute, err := filepath.Abs(relative)
if err != nil {
log.Fatalf(" %s", err)
}
return absolute
}
func CreateAndCleanAppDirs(directories *config.Directories) error {
purgeHostDirectories(directories)
if err := createHostDirectories(directories); err != nil {
return err
}
return nil
}
func purgeHostDirectories(directories *config.Directories) {
for _, dir := range directories.HostDirectoriesToClean() {
os.RemoveAll(dir)
}
cleanTmpDirExceptCache(directories.Tmp())
}
func cleanTmpDirExceptCache(tmpDirName string) error {
tmpDir, err := os.Open(tmpDirName)
tmpDirContents, err := tmpDir.Readdirnames(0)
for _, file := range tmpDirContents {
if file != "cache" {
os.RemoveAll(tmpDirName + "/" + file)
}
}
return err
}
func createHostDirectories(directories *config.Directories) error {
for _, dir := range directories.HostDirectories() {
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
}
return nil
}
|
// Copyright 2018 Frédéric Guillot. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package client // import "miniflux.app/client"
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/url"
"strconv"
)
// Client holds API procedure calls.
type Client struct {
request *request
}
// New returns a new Miniflux client.
func New(endpoint string, credentials ...string) *Client {
if len(credentials) == 2 {
return &Client{request: &request{endpoint: endpoint, username: credentials[0], password: credentials[1]}}
}
return &Client{request: &request{endpoint: endpoint, apiKey: credentials[0]}}
}
// Me returns the logged user information.
func (c *Client) Me() (*User, error) {
body, err := c.request.Get("/v1/me")
if err != nil {
return nil, err
}
defer body.Close()
var user *User
decoder := json.NewDecoder(body)
if err := decoder.Decode(&user); err != nil {
return nil, fmt.Errorf("miniflux: json error (%v)", err)
}
return user, nil
}
// Users returns all users.
func (c *Client) Users() (Users, error) {
body, err := c.request.Get("/v1/users")
if err != nil {
return nil, err
}
defer body.Close()
var users Users
decoder := json.NewDecoder(body)
if err := decoder.Decode(&users); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return users, nil
}
// UserByID returns a single user.
func (c *Client) UserByID(userID int64) (*User, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/users/%d", userID))
if err != nil {
return nil, err
}
defer body.Close()
var user User
decoder := json.NewDecoder(body)
if err := decoder.Decode(&user); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return &user, nil
}
// UserByUsername returns a single user.
func (c *Client) UserByUsername(username string) (*User, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/users/%s", username))
if err != nil {
return nil, err
}
defer body.Close()
var user User
decoder := json.NewDecoder(body)
if err := decoder.Decode(&user); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return &user, nil
}
// CreateUser creates a new user in the system.
func (c *Client) CreateUser(username, password string, isAdmin bool) (*User, error) {
body, err := c.request.Post("/v1/users", &User{Username: username, Password: password, IsAdmin: isAdmin})
if err != nil {
return nil, err
}
defer body.Close()
var user *User
decoder := json.NewDecoder(body)
if err := decoder.Decode(&user); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return user, nil
}
// UpdateUser updates a user in the system.
func (c *Client) UpdateUser(userID int64, userChanges *UserModification) (*User, error) {
body, err := c.request.Put(fmt.Sprintf("/v1/users/%d", userID), userChanges)
if err != nil {
return nil, err
}
defer body.Close()
var u *User
decoder := json.NewDecoder(body)
if err := decoder.Decode(&u); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return u, nil
}
// DeleteUser removes a user from the system.
func (c *Client) DeleteUser(userID int64) error {
body, err := c.request.Delete(fmt.Sprintf("/v1/users/%d", userID))
if err != nil {
return err
}
body.Close()
return nil
}
// Discover try to find subscriptions from a website.
func (c *Client) Discover(url string) (Subscriptions, error) {
body, err := c.request.Post("/v1/discover", map[string]string{"url": url})
if err != nil {
return nil, err
}
defer body.Close()
var subscriptions Subscriptions
decoder := json.NewDecoder(body)
if err := decoder.Decode(&subscriptions); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return subscriptions, nil
}
// Categories gets the list of categories.
func (c *Client) Categories() (Categories, error) {
body, err := c.request.Get("/v1/categories")
if err != nil {
return nil, err
}
defer body.Close()
var categories Categories
decoder := json.NewDecoder(body)
if err := decoder.Decode(&categories); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return categories, nil
}
// CreateCategory creates a new category.
func (c *Client) CreateCategory(title string) (*Category, error) {
body, err := c.request.Post("/v1/categories", map[string]interface{}{
"title": title,
})
if err != nil {
return nil, err
}
defer body.Close()
var category *Category
decoder := json.NewDecoder(body)
if err := decoder.Decode(&category); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return category, nil
}
// UpdateCategory updates a category.
func (c *Client) UpdateCategory(categoryID int64, title string) (*Category, error) {
body, err := c.request.Put(fmt.Sprintf("/v1/categories/%d", categoryID), map[string]interface{}{
"title": title,
})
if err != nil {
return nil, err
}
defer body.Close()
var category *Category
decoder := json.NewDecoder(body)
if err := decoder.Decode(&category); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return category, nil
}
// DeleteCategory removes a category.
func (c *Client) DeleteCategory(categoryID int64) error {
body, err := c.request.Delete(fmt.Sprintf("/v1/categories/%d", categoryID))
if err != nil {
return err
}
defer body.Close()
return nil
}
// Feeds gets all feeds.
func (c *Client) Feeds() (Feeds, error) {
body, err := c.request.Get("/v1/feeds")
if err != nil {
return nil, err
}
defer body.Close()
var feeds Feeds
decoder := json.NewDecoder(body)
if err := decoder.Decode(&feeds); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return feeds, nil
}
// Export creates OPML file.
func (c *Client) Export() ([]byte, error) {
body, err := c.request.Get("/v1/export")
if err != nil {
return nil, err
}
defer body.Close()
opml, err := ioutil.ReadAll(body)
if err != nil {
return nil, err
}
return opml, nil
}
// Import imports an OPML file.
func (c *Client) Import(f io.ReadCloser) error {
_, err := c.request.PostFile("/v1/import", f)
return err
}
// Feed gets a feed.
func (c *Client) Feed(feedID int64) (*Feed, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/feeds/%d", feedID))
if err != nil {
return nil, err
}
defer body.Close()
var feed *Feed
decoder := json.NewDecoder(body)
if err := decoder.Decode(&feed); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return feed, nil
}
// CreateFeed creates a new feed.
func (c *Client) CreateFeed(url string, categoryID int64) (int64, error) {
body, err := c.request.Post("/v1/feeds", map[string]interface{}{
"feed_url": url,
"category_id": categoryID,
})
if err != nil {
return 0, err
}
defer body.Close()
type result struct {
FeedID int64 `json:"feed_id"`
}
var r result
decoder := json.NewDecoder(body)
if err := decoder.Decode(&r); err != nil {
return 0, fmt.Errorf("miniflux: response error (%v)", err)
}
return r.FeedID, nil
}
// UpdateFeed updates a feed.
func (c *Client) UpdateFeed(feedID int64, feedChanges *FeedModification) (*Feed, error) {
body, err := c.request.Put(fmt.Sprintf("/v1/feeds/%d", feedID), feedChanges)
if err != nil {
return nil, err
}
defer body.Close()
var f *Feed
decoder := json.NewDecoder(body)
if err := decoder.Decode(&f); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return f, nil
}
// RefreshFeed refresh a feed.
func (c *Client) RefreshFeed(feedID int64) error {
body, err := c.request.Put(fmt.Sprintf("/v1/feeds/%d/refresh", feedID), nil)
if err != nil {
return err
}
body.Close()
return nil
}
// DeleteFeed removes a feed.
func (c *Client) DeleteFeed(feedID int64) error {
body, err := c.request.Delete(fmt.Sprintf("/v1/feeds/%d", feedID))
if err != nil {
return err
}
body.Close()
return nil
}
// FeedIcon gets a feed icon.
func (c *Client) FeedIcon(feedID int64) (*FeedIcon, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/feeds/%d/icon", feedID))
if err != nil {
return nil, err
}
defer body.Close()
var feedIcon *FeedIcon
decoder := json.NewDecoder(body)
if err := decoder.Decode(&feedIcon); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return feedIcon, nil
}
// FeedEntry gets a single feed entry.
func (c *Client) FeedEntry(feedID, entryID int64) (*Entry, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/feeds/%d/entries/%d", feedID, entryID))
if err != nil {
return nil, err
}
defer body.Close()
var entry *Entry
decoder := json.NewDecoder(body)
if err := decoder.Decode(&entry); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return entry, nil
}
// Entry gets a single entry.
func (c *Client) Entry(entryID int64) (*Entry, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/entries/%d", entryID))
if err != nil {
return nil, err
}
defer body.Close()
var entry *Entry
decoder := json.NewDecoder(body)
if err := decoder.Decode(&entry); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return entry, nil
}
// Entries fetch entries.
func (c *Client) Entries(filter *Filter) (*EntryResultSet, error) {
path := buildFilterQueryString("/v1/entries", filter)
body, err := c.request.Get(path)
if err != nil {
return nil, err
}
defer body.Close()
var result EntryResultSet
decoder := json.NewDecoder(body)
if err := decoder.Decode(&result); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return &result, nil
}
// FeedEntries fetch feed entries.
func (c *Client) FeedEntries(feedID int64, filter *Filter) (*EntryResultSet, error) {
path := buildFilterQueryString(fmt.Sprintf("/v1/feeds/%d/entries", feedID), filter)
body, err := c.request.Get(path)
if err != nil {
return nil, err
}
defer body.Close()
var result EntryResultSet
decoder := json.NewDecoder(body)
if err := decoder.Decode(&result); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return &result, nil
}
// UpdateEntries updates the status of a list of entries.
func (c *Client) UpdateEntries(entryIDs []int64, status string) error {
type payload struct {
EntryIDs []int64 `json:"entry_ids"`
Status string `json:"status"`
}
body, err := c.request.Put("/v1/entries", &payload{EntryIDs: entryIDs, Status: status})
if err != nil {
return err
}
body.Close()
return nil
}
// ToggleBookmark toggles entry bookmark value.
func (c *Client) ToggleBookmark(entryID int64) error {
body, err := c.request.Put(fmt.Sprintf("/v1/entries/%d/bookmark", entryID), nil)
if err != nil {
return err
}
body.Close()
return nil
}
func buildFilterQueryString(path string, filter *Filter) string {
if filter != nil {
values := url.Values{}
if filter.Status != "" {
values.Set("status", filter.Status)
}
if filter.Direction != "" {
values.Set("direction", filter.Direction)
}
if filter.Order != "" {
values.Set("order", filter.Order)
}
if filter.Limit >= 0 {
values.Set("limit", strconv.Itoa(filter.Limit))
}
if filter.Offset >= 0 {
values.Set("offset", strconv.Itoa(filter.Offset))
}
if filter.After > 0 {
values.Set("after", strconv.FormatInt(filter.After, 10))
}
if filter.AfterEntryID > 0 {
values.Set("after_entry_id", strconv.FormatInt(filter.AfterEntryID, 10))
}
if filter.Before > 0 {
values.Set("before", strconv.FormatInt(filter.Before, 10))
}
if filter.BeforeEntryID > 0 {
values.Set("before_entry_id", strconv.FormatInt(filter.BeforeEntryID, 10))
}
if filter.Starred {
values.Set("starred", "1")
}
if filter.Search != "" {
values.Set("search", filter.Search)
}
path = fmt.Sprintf("%s?%s", path, values.Encode())
}
return path
}
Add API Client function to refresh all feeds
// Copyright 2018 Frédéric Guillot. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package client // import "miniflux.app/client"
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/url"
"strconv"
)
// Client holds API procedure calls.
type Client struct {
request *request
}
// New returns a new Miniflux client.
func New(endpoint string, credentials ...string) *Client {
if len(credentials) == 2 {
return &Client{request: &request{endpoint: endpoint, username: credentials[0], password: credentials[1]}}
}
return &Client{request: &request{endpoint: endpoint, apiKey: credentials[0]}}
}
// Me returns the logged user information.
func (c *Client) Me() (*User, error) {
body, err := c.request.Get("/v1/me")
if err != nil {
return nil, err
}
defer body.Close()
var user *User
decoder := json.NewDecoder(body)
if err := decoder.Decode(&user); err != nil {
return nil, fmt.Errorf("miniflux: json error (%v)", err)
}
return user, nil
}
// Users returns all users.
func (c *Client) Users() (Users, error) {
body, err := c.request.Get("/v1/users")
if err != nil {
return nil, err
}
defer body.Close()
var users Users
decoder := json.NewDecoder(body)
if err := decoder.Decode(&users); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return users, nil
}
// UserByID returns a single user.
func (c *Client) UserByID(userID int64) (*User, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/users/%d", userID))
if err != nil {
return nil, err
}
defer body.Close()
var user User
decoder := json.NewDecoder(body)
if err := decoder.Decode(&user); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return &user, nil
}
// UserByUsername returns a single user.
func (c *Client) UserByUsername(username string) (*User, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/users/%s", username))
if err != nil {
return nil, err
}
defer body.Close()
var user User
decoder := json.NewDecoder(body)
if err := decoder.Decode(&user); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return &user, nil
}
// CreateUser creates a new user in the system.
func (c *Client) CreateUser(username, password string, isAdmin bool) (*User, error) {
body, err := c.request.Post("/v1/users", &User{Username: username, Password: password, IsAdmin: isAdmin})
if err != nil {
return nil, err
}
defer body.Close()
var user *User
decoder := json.NewDecoder(body)
if err := decoder.Decode(&user); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return user, nil
}
// UpdateUser updates a user in the system.
func (c *Client) UpdateUser(userID int64, userChanges *UserModification) (*User, error) {
body, err := c.request.Put(fmt.Sprintf("/v1/users/%d", userID), userChanges)
if err != nil {
return nil, err
}
defer body.Close()
var u *User
decoder := json.NewDecoder(body)
if err := decoder.Decode(&u); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return u, nil
}
// DeleteUser removes a user from the system.
func (c *Client) DeleteUser(userID int64) error {
body, err := c.request.Delete(fmt.Sprintf("/v1/users/%d", userID))
if err != nil {
return err
}
body.Close()
return nil
}
// Discover try to find subscriptions from a website.
func (c *Client) Discover(url string) (Subscriptions, error) {
body, err := c.request.Post("/v1/discover", map[string]string{"url": url})
if err != nil {
return nil, err
}
defer body.Close()
var subscriptions Subscriptions
decoder := json.NewDecoder(body)
if err := decoder.Decode(&subscriptions); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return subscriptions, nil
}
// Categories gets the list of categories.
func (c *Client) Categories() (Categories, error) {
body, err := c.request.Get("/v1/categories")
if err != nil {
return nil, err
}
defer body.Close()
var categories Categories
decoder := json.NewDecoder(body)
if err := decoder.Decode(&categories); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return categories, nil
}
// CreateCategory creates a new category.
func (c *Client) CreateCategory(title string) (*Category, error) {
body, err := c.request.Post("/v1/categories", map[string]interface{}{
"title": title,
})
if err != nil {
return nil, err
}
defer body.Close()
var category *Category
decoder := json.NewDecoder(body)
if err := decoder.Decode(&category); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return category, nil
}
// UpdateCategory updates a category.
func (c *Client) UpdateCategory(categoryID int64, title string) (*Category, error) {
body, err := c.request.Put(fmt.Sprintf("/v1/categories/%d", categoryID), map[string]interface{}{
"title": title,
})
if err != nil {
return nil, err
}
defer body.Close()
var category *Category
decoder := json.NewDecoder(body)
if err := decoder.Decode(&category); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return category, nil
}
// DeleteCategory removes a category.
func (c *Client) DeleteCategory(categoryID int64) error {
body, err := c.request.Delete(fmt.Sprintf("/v1/categories/%d", categoryID))
if err != nil {
return err
}
defer body.Close()
return nil
}
// Feeds gets all feeds.
func (c *Client) Feeds() (Feeds, error) {
body, err := c.request.Get("/v1/feeds")
if err != nil {
return nil, err
}
defer body.Close()
var feeds Feeds
decoder := json.NewDecoder(body)
if err := decoder.Decode(&feeds); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return feeds, nil
}
// Export creates OPML file.
func (c *Client) Export() ([]byte, error) {
body, err := c.request.Get("/v1/export")
if err != nil {
return nil, err
}
defer body.Close()
opml, err := ioutil.ReadAll(body)
if err != nil {
return nil, err
}
return opml, nil
}
// Import imports an OPML file.
func (c *Client) Import(f io.ReadCloser) error {
_, err := c.request.PostFile("/v1/import", f)
return err
}
// Feed gets a feed.
func (c *Client) Feed(feedID int64) (*Feed, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/feeds/%d", feedID))
if err != nil {
return nil, err
}
defer body.Close()
var feed *Feed
decoder := json.NewDecoder(body)
if err := decoder.Decode(&feed); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return feed, nil
}
// CreateFeed creates a new feed.
func (c *Client) CreateFeed(url string, categoryID int64) (int64, error) {
body, err := c.request.Post("/v1/feeds", map[string]interface{}{
"feed_url": url,
"category_id": categoryID,
})
if err != nil {
return 0, err
}
defer body.Close()
type result struct {
FeedID int64 `json:"feed_id"`
}
var r result
decoder := json.NewDecoder(body)
if err := decoder.Decode(&r); err != nil {
return 0, fmt.Errorf("miniflux: response error (%v)", err)
}
return r.FeedID, nil
}
// UpdateFeed updates a feed.
func (c *Client) UpdateFeed(feedID int64, feedChanges *FeedModification) (*Feed, error) {
body, err := c.request.Put(fmt.Sprintf("/v1/feeds/%d", feedID), feedChanges)
if err != nil {
return nil, err
}
defer body.Close()
var f *Feed
decoder := json.NewDecoder(body)
if err := decoder.Decode(&f); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return f, nil
}
// RefreshAllFeeds refreshes all feeds.
func (c *Client) RefreshAllFeeds() error {
body, err := c.request.Put(fmt.Sprintf("/v1/feeds/refresh"), nil)
if err != nil {
return err
}
body.Close()
return nil
}
// RefreshFeed refreshes a feed.
func (c *Client) RefreshFeed(feedID int64) error {
body, err := c.request.Put(fmt.Sprintf("/v1/feeds/%d/refresh", feedID), nil)
if err != nil {
return err
}
body.Close()
return nil
}
// DeleteFeed removes a feed.
func (c *Client) DeleteFeed(feedID int64) error {
body, err := c.request.Delete(fmt.Sprintf("/v1/feeds/%d", feedID))
if err != nil {
return err
}
body.Close()
return nil
}
// FeedIcon gets a feed icon.
func (c *Client) FeedIcon(feedID int64) (*FeedIcon, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/feeds/%d/icon", feedID))
if err != nil {
return nil, err
}
defer body.Close()
var feedIcon *FeedIcon
decoder := json.NewDecoder(body)
if err := decoder.Decode(&feedIcon); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return feedIcon, nil
}
// FeedEntry gets a single feed entry.
func (c *Client) FeedEntry(feedID, entryID int64) (*Entry, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/feeds/%d/entries/%d", feedID, entryID))
if err != nil {
return nil, err
}
defer body.Close()
var entry *Entry
decoder := json.NewDecoder(body)
if err := decoder.Decode(&entry); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return entry, nil
}
// Entry gets a single entry.
func (c *Client) Entry(entryID int64) (*Entry, error) {
body, err := c.request.Get(fmt.Sprintf("/v1/entries/%d", entryID))
if err != nil {
return nil, err
}
defer body.Close()
var entry *Entry
decoder := json.NewDecoder(body)
if err := decoder.Decode(&entry); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return entry, nil
}
// Entries fetch entries.
func (c *Client) Entries(filter *Filter) (*EntryResultSet, error) {
path := buildFilterQueryString("/v1/entries", filter)
body, err := c.request.Get(path)
if err != nil {
return nil, err
}
defer body.Close()
var result EntryResultSet
decoder := json.NewDecoder(body)
if err := decoder.Decode(&result); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return &result, nil
}
// FeedEntries fetch feed entries.
func (c *Client) FeedEntries(feedID int64, filter *Filter) (*EntryResultSet, error) {
path := buildFilterQueryString(fmt.Sprintf("/v1/feeds/%d/entries", feedID), filter)
body, err := c.request.Get(path)
if err != nil {
return nil, err
}
defer body.Close()
var result EntryResultSet
decoder := json.NewDecoder(body)
if err := decoder.Decode(&result); err != nil {
return nil, fmt.Errorf("miniflux: response error (%v)", err)
}
return &result, nil
}
// UpdateEntries updates the status of a list of entries.
func (c *Client) UpdateEntries(entryIDs []int64, status string) error {
type payload struct {
EntryIDs []int64 `json:"entry_ids"`
Status string `json:"status"`
}
body, err := c.request.Put("/v1/entries", &payload{EntryIDs: entryIDs, Status: status})
if err != nil {
return err
}
body.Close()
return nil
}
// ToggleBookmark toggles entry bookmark value.
func (c *Client) ToggleBookmark(entryID int64) error {
body, err := c.request.Put(fmt.Sprintf("/v1/entries/%d/bookmark", entryID), nil)
if err != nil {
return err
}
body.Close()
return nil
}
func buildFilterQueryString(path string, filter *Filter) string {
if filter != nil {
values := url.Values{}
if filter.Status != "" {
values.Set("status", filter.Status)
}
if filter.Direction != "" {
values.Set("direction", filter.Direction)
}
if filter.Order != "" {
values.Set("order", filter.Order)
}
if filter.Limit >= 0 {
values.Set("limit", strconv.Itoa(filter.Limit))
}
if filter.Offset >= 0 {
values.Set("offset", strconv.Itoa(filter.Offset))
}
if filter.After > 0 {
values.Set("after", strconv.FormatInt(filter.After, 10))
}
if filter.AfterEntryID > 0 {
values.Set("after_entry_id", strconv.FormatInt(filter.AfterEntryID, 10))
}
if filter.Before > 0 {
values.Set("before", strconv.FormatInt(filter.Before, 10))
}
if filter.BeforeEntryID > 0 {
values.Set("before_entry_id", strconv.FormatInt(filter.BeforeEntryID, 10))
}
if filter.Starred {
values.Set("starred", "1")
}
if filter.Search != "" {
values.Set("search", filter.Search)
}
path = fmt.Sprintf("%s?%s", path, values.Encode())
}
return path
}
|
/*
Package client implements the class of a network client which can interact with a mix network.
*/
package client
import (
"anonymous-messaging/clientCore"
"anonymous-messaging/config"
"anonymous-messaging/helpers"
"anonymous-messaging/logging"
"anonymous-messaging/networker"
"github.com/protobuf/proto"
"crypto/elliptic"
"crypto/rand"
"math"
"math/big"
"net"
"time"
)
var (
logLocal = logging.PackageLogger()
loopCoverTrafficEnabled = true
dropCoverTrafficEnabled = true
)
const (
// the parameter of the exponential distribution which defines the rate of sending by client
// the desiredRateParameter is the reciprocal of the expected value of the exponential distribution
desiredRateParameter = 0.2
loopRate = 0.1
dropRate = 0.1
// the rate at which clients are querying the provider for received packets. fetchRate value is the
// parameter of an exponential distribution, and is the reciprocal of the expected value of the exp. distribution
fetchRate = 0.01
assignFlag = "\xA2"
commFlag = "\xc6"
tokenFlag = "xa9"
pullFlag = "\xff"
)
type Client interface {
networker.NetworkClient
networker.NetworkServer
Start() error
SendMessage(message string, recipient config.ClientConfig) error
ReadInNetworkFromPKI(pkiName string) error
}
type client struct {
id string
host string
port string
listener *net.TCPListener
pkiDir string
config config.ClientConfig
token []byte
outQueue chan []byte
registrationDone chan bool
*clientCore.CryptoClient
}
// Start function creates the loggers for capturing the info and error logs;
// it reads the network and users information from the PKI database
// and starts the listening server. Function returns an error
// signaling whenever any operation was unsuccessful.
func (c *client) Start() error {
c.resolveAddressAndStartListening()
c.outQueue = make(chan []byte)
c.registrationDone = make(chan bool)
err := c.ReadInNetworkFromPKI(c.pkiDir)
if err != nil {
logLocal.WithError(err).Error("Error during reading in network PKI")
return err
}
go func() {
for {
select {
case <-c.registrationDone:
return
default:
err = c.sendRegisterMessageToProvider()
if err != nil {
logLocal.WithError(err).Error("Error during registration to provider", err)
}
time.Sleep(60 * time.Second)
}
}
}()
c.startListenerInNewRoutine()
return nil
}
func (c *client) resolveAddressAndStartListening() error {
addr, err := helpers.ResolveTCPAddress(c.host, c.port)
if err != nil {
return err
}
c.listener, err = net.ListenTCP("tcp", addr)
if err != nil {
return err
}
return nil
}
// SendMessage responsible for sending a real message. Takes as input the message string
// and the public information about the destination.
func (c *client) SendMessage(message string, recipient config.ClientConfig) error {
packet, err := c.encodeMessage(message, recipient)
if err != nil {
logLocal.WithError(err).Error("Error in sending message - encode message returned error")
return err
}
c.outQueue <- packet
return nil
}
// encodeMessage encapsulates the given message into a sphinx packet destinated for recipient
// and wraps with the flag pointing that it is the communication packet
func (c *client) encodeMessage(message string, recipient config.ClientConfig) ([]byte, error) {
sphinxPacket, err := c.EncodeMessage(message, recipient)
if err != nil {
logLocal.WithError(err).Error("Error in sending message - create sphinx packet returned an error")
return nil, err
}
packetBytes, err := config.WrapWithFlag(commFlag, sphinxPacket)
if err != nil {
logLocal.WithError(err).Error("Error in sending message - wrap with flag returned an error")
return nil, err
}
return packetBytes, nil
}
// Send opens a connection with selected network address
// and send the passed packet. If connection failed or
// the packet could not be send, an error is returned
func (c *client) send(packet []byte, host string, port string) error {
conn, err := net.Dial("tcp", host+":"+port)
if err != nil {
logLocal.WithError(err).Error("Error in send - dial returned an error")
return err
}
defer conn.Close()
_, err = conn.Write(packet)
return err
}
// run opens the listener to start listening on clients host and port
func (c *client) startListenerInNewRoutine() {
defer c.listener.Close()
finish := make(chan bool)
go func() {
logLocal.Infof("Listening on address %s", c.host+":"+c.port)
c.listenForIncomingConnections()
}()
<-finish
}
// ListenForIncomingConnections responsible for running the listening process of the server;
// The clients listener accepts incoming connections and
// passes the incoming packets to the packet handler.
// If the connection could not be accepted an error
// is logged into the log files, but the function is not stopped
func (c *client) listenForIncomingConnections() {
for {
conn, err := c.listener.Accept()
if err != nil {
logLocal.WithError(err).Error(err)
} else {
go c.handleConnection(conn)
}
}
}
// HandleConnection handles the received packets; it checks the flag of the
// packet and schedules a corresponding process function;
// The potential errors are logged into the log files.
func (c *client) handleConnection(conn net.Conn) {
buff := make([]byte, 1024)
defer conn.Close()
reqLen, err := conn.Read(buff)
if err != nil {
logLocal.WithError(err).Error("Error while reading incoming connection")
panic(err)
}
var packet config.GeneralPacket
err = proto.Unmarshal(buff[:reqLen], &packet)
if err != nil {
logLocal.WithError(err).Error("Error in unmarshal incoming packet")
}
switch packet.Flag {
case tokenFlag:
c.registerToken(packet.Data)
go func() {
err := c.controlOutQueue()
if err != nil {
logLocal.WithError(err).Panic("Error in the controller of the outgoing packets queue. Possible security threat.")
}
}()
if loopCoverTrafficEnabled {
c.turnOnLoopCoverTraffic()
}
if dropCoverTrafficEnabled {
c.turnOnDropCoverTraffic()
}
go func() {
c.controlMessagingFetching()
}()
case commFlag:
_, err := c.processPacket(packet.Data)
if err != nil {
logLocal.WithError(err).Error("Error in processing received packet")
}
logLocal.Info("Received new message")
default:
logLocal.Info("Packet flag not recognised. Packet dropped.")
}
}
// RegisterToken stores the authentication token received from the provider
func (c *client) registerToken(token []byte) {
c.token = token
logLocal.Infof(" Registered token %s", c.token)
c.registrationDone <- true
}
// ProcessPacket processes the received sphinx packet and returns the
// encapsulated message or error in case the processing
// was unsuccessful.
func (c *client) processPacket(packet []byte) ([]byte, error) {
logLocal.Info(" Processing packet")
return packet, nil
}
// SendRegisterMessageToProvider allows the client to register with the selected provider.
// The client sends a special assignment packet, with its public information, to the provider
// or returns an error.
func (c *client) sendRegisterMessageToProvider() error {
logLocal.Info("Sending request to provider to register")
confBytes, err := proto.Marshal(&c.config)
if err != nil {
logLocal.WithError(err).Error("Error in register provider - marshal of provider config returned an error")
return err
}
pktBytes, err := config.WrapWithFlag(assignFlag, confBytes)
if err != nil {
logLocal.WithError(err).Error("Error in register provider - wrap with flag returned an error")
return err
}
err = c.send(pktBytes, c.Provider.Host, c.Provider.Port)
if err != nil {
logLocal.WithError(err).Error("Error in register provider - send registration packet returned an error")
return err
}
return nil
}
// GetMessagesFromProvider allows to fetch messages from the inbox stored by the
// provider. The client sends a pull packet to the provider, along with
// the authentication token. An error is returned if occurred.
func (c *client) getMessagesFromProvider() error {
pullRqs := config.PullRequest{ClientId: c.id, Token: c.token}
pullRqsBytes, err := proto.Marshal(&pullRqs)
if err != nil {
logLocal.WithError(err).Error("Error in register provider - marshal of pull request returned an error")
return err
}
pktBytes, err := config.WrapWithFlag(pullFlag, pullRqsBytes)
if err != nil {
logLocal.WithError(err).Error("Error in register provider - marshal of provider config returned an error")
return err
}
err = c.send(pktBytes, c.Provider.Host, c.Provider.Port)
if err != nil {
return err
}
return nil
}
// controlOutQueue controls the outgoing queue of the client.
// If a message awaits in the queue, it is sent. Otherwise a
// drop cover message is sent instead.
func (c *client) controlOutQueue() error {
logLocal.Info("Queue controller started")
for {
select {
case realPacket := <-c.outQueue:
c.send(realPacket, c.Provider.Host, c.Provider.Port)
logLocal.Info("Real packet was sent")
default:
dummyPacket, err := c.createDropCoverMessage()
if err != nil {
return err
}
c.send(dummyPacket, c.Provider.Host, c.Provider.Port)
logLocal.Info("OutQueue empty. Dummy packet sent.")
}
delaySec, err := helpers.RandomExponential(desiredRateParameter)
if err != nil {
return err
}
time.Sleep(time.Duration(int64(delaySec*math.Pow10(9))) * time.Nanosecond)
}
return nil
}
// controlMessagingFetching periodically at random sends a query to the provider
// to fetch received messages
func (c *client) controlMessagingFetching() {
for {
c.getMessagesFromProvider()
logLocal.Info("Sent request to provider to fetch messages")
timeout, err := helpers.RandomExponential(fetchRate)
if err != nil {
logLocal.Error("Error in ControlMessagingFetching - generating random exp. value failed")
}
time.Sleep(time.Duration(int64(timeout*math.Pow10(9))) * time.Nanosecond)
}
}
// CreateCoverMessage packs a dummy message into a Sphinx packet.
// The dummy message is a loop message.
func (c *client) createDropCoverMessage() ([]byte, error) {
dummyLoad := "DummyPayloadMessage"
randomRecipient, err := c.getRandomRecipient(c.Network.Clients)
if err != nil {
return nil, err
}
sphinxPacket, err := c.EncodeMessage(dummyLoad, randomRecipient)
if err != nil {
return nil, err
}
packetBytes, err := config.WrapWithFlag(commFlag, sphinxPacket)
if err != nil {
return nil, err
}
return packetBytes, nil
}
// getRandomRecipient picks a random client from the list of all available clients (stored by the client).
// getRandomRecipient returns the selected client public configuration and an error
func (c *client) getRandomRecipient(slice []config.ClientConfig) (config.ClientConfig, error) {
randIdx, err := rand.Int(rand.Reader, big.NewInt(int64(len(slice))))
if err != nil {
return config.ClientConfig{}, err
}
return slice[randIdx.Int64()], nil
}
// createLoopCoverMessage packs a dummy loop message into
// a sphinx packet. The loop message is destinated back to the sender
// createLoopCoverMessage returns a byte representation of the encapsulated packet and an error
func (c *client) createLoopCoverMessage() ([]byte, error) {
loopLoad := "LoopCoverMessage"
sphinxPacket, err := c.EncodeMessage(loopLoad, c.config)
if err != nil {
return nil, err
}
packetBytes, err := config.WrapWithFlag(commFlag, sphinxPacket)
if err != nil {
return nil, err
}
return packetBytes, nil
}
// runLoopCoverTrafficStream manages the stream of loop cover traffic.
// In each stream iteration it sends a freshly created loop packet and
// waits a random time before scheduling the next loop packet.
func (c *client) runLoopCoverTrafficStream() error {
logLocal.Info("Stream of loop cover traffic started")
for {
loopPacket, err := c.createLoopCoverMessage()
if err != nil {
return err
}
c.send(loopPacket, c.Provider.Host, c.Provider.Port)
logLocal.Info("Loop message sent")
delaySec, err := helpers.RandomExponential(loopRate)
if err != nil {
return err
}
time.Sleep(time.Duration(int64(delaySec*math.Pow10(9))) * time.Nanosecond)
}
return nil
}
// runDropCoverTrafficStream manages the stream of drop cover traffic.
// In each stream iteration it creates a fresh drop cover message destinated
// to a randomly selected user in the network. The drop packet is sent
// and the next stream call is scheduled after random time.
func (c *client) runDropCoverTrafficStream() error {
logLocal.Info("Stream of drop cover traffic started")
for {
dropPacket, err := c.createDropCoverMessage()
if err != nil {
return err
}
c.send(dropPacket, c.Provider.Host, c.Provider.Port)
logLocal.Info("Drop packet sent")
delaySec, err := helpers.RandomExponential(dropRate)
if err != nil {
return err
}
time.Sleep(time.Duration(int64(delaySec*math.Pow10(9))) * time.Nanosecond)
}
return nil
}
// turnOnLoopCoverTraffic starts the stream of loop cover traffic
func (c *client) turnOnLoopCoverTraffic() {
go func() {
err := c.runLoopCoverTrafficStream()
if err != nil {
logLocal.WithError(err).Panic("Error in the controller of the loop cover traffic. Possible security threat.")
}
}()
}
// turnOnDropCoverTraffic starts the stream of drop cover traffic
func (c *client) turnOnDropCoverTraffic() {
go func() {
err := c.runDropCoverTrafficStream()
if err != nil {
logLocal.WithError(err).Panic("Error in the controller of the drop cover traffic. Possible security threat.")
}
}()
}
// ReadInNetworkFromPKI reads in the public information about active mixes
// from the PKI database and stores them locally. In case
// the connection or fetching data from the PKI went wrong,
// an error is returned.
func (c *client) ReadInNetworkFromPKI(pkiName string) error {
logLocal.Infof("Reading network information from the PKI: %s", pkiName)
mixes, err := helpers.GetMixesPKI(pkiName)
if err != nil {
logLocal.WithError(err).Error("Error while reading mixes from PKI")
return err
}
c.Network.Mixes = mixes
clients, err := helpers.GetClientPKI(pkiName)
if err != nil {
logLocal.WithError(err).Error("Error while reading clients from PKI")
return err
}
c.Network.Clients = clients
logLocal.Info("Network information uploaded")
return nil
}
// The constructor function to create an new client object.
// Function returns a new client object or an error, if occurred.
func NewClient(id, host, port string, pubKey []byte, prvKey []byte, pkiDir string, provider config.MixConfig) (*client, error) {
core := clientCore.NewCryptoClient(pubKey, prvKey, elliptic.P224(), provider, clientCore.NetworkPKI{})
c := client{id: id, host: host, port: port, CryptoClient: core, pkiDir: pkiDir}
c.config = config.ClientConfig{Id: c.id, Host: c.host, Port: c.port, PubKey: c.GetPublicKey(), Provider: &c.Provider}
configBytes, err := proto.Marshal(&c.config)
if err != nil {
return nil, err
}
err = helpers.AddToDatabase(pkiDir, "Pki", c.id, "Client", configBytes)
if err != nil {
return nil, err
}
return &c, nil
}
// NewTestClient constructs a client object, which can be used for testing. The object contains the crypto core
// and the top-level of client, but does not involve networking and starting a listener.
func NewTestClient(id, host, port string, pubKey []byte, prvKey []byte, pkiDir string, provider config.MixConfig) (*client, error) {
core := clientCore.NewCryptoClient(pubKey, prvKey, elliptic.P224(), provider, clientCore.NetworkPKI{})
c := client{id: id, host: host, port: port, CryptoClient: core, pkiDir: pkiDir}
c.config = config.ClientConfig{Id: c.id, Host: c.host, Port: c.port, PubKey: c.GetPublicKey(), Provider: &c.Provider}
return &c, nil
}
Refactor to add one function generating delay in client
/*
Package client implements the class of a network client which can interact with a mix network.
*/
package client
import (
"anonymous-messaging/clientCore"
"anonymous-messaging/config"
"anonymous-messaging/helpers"
"anonymous-messaging/logging"
"anonymous-messaging/networker"
"github.com/protobuf/proto"
"crypto/elliptic"
"crypto/rand"
"math"
"math/big"
"net"
"time"
)
var (
logLocal = logging.PackageLogger()
loopCoverTrafficEnabled = true
dropCoverTrafficEnabled = true
)
const (
// the parameter of the exponential distribution which defines the rate of sending by client
// the desiredRateParameter is the reciprocal of the expected value of the exponential distribution
desiredRateParameter = 0.2
loopRate = 0.1
dropRate = 0.1
// the rate at which clients are querying the provider for received packets. fetchRate value is the
// parameter of an exponential distribution, and is the reciprocal of the expected value of the exp. distribution
fetchRate = 0.01
assignFlag = "\xA2"
commFlag = "\xc6"
tokenFlag = "xa9"
pullFlag = "\xff"
)
type Client interface {
networker.NetworkClient
networker.NetworkServer
Start() error
SendMessage(message string, recipient config.ClientConfig) error
ReadInNetworkFromPKI(pkiName string) error
}
type client struct {
id string
host string
port string
listener *net.TCPListener
pkiDir string
config config.ClientConfig
token []byte
outQueue chan []byte
registrationDone chan bool
*clientCore.CryptoClient
}
// Start function creates the loggers for capturing the info and error logs;
// it reads the network and users information from the PKI database
// and starts the listening server. Function returns an error
// signaling whenever any operation was unsuccessful.
func (c *client) Start() error {
c.resolveAddressAndStartListening()
c.outQueue = make(chan []byte)
c.registrationDone = make(chan bool)
err := c.ReadInNetworkFromPKI(c.pkiDir)
if err != nil {
logLocal.WithError(err).Error("Error during reading in network PKI")
return err
}
go func() {
for {
select {
case <-c.registrationDone:
return
default:
err = c.sendRegisterMessageToProvider()
if err != nil {
logLocal.WithError(err).Error("Error during registration to provider", err)
}
time.Sleep(60 * time.Second)
}
}
}()
c.startListenerInNewRoutine()
return nil
}
func (c *client) resolveAddressAndStartListening() error {
addr, err := helpers.ResolveTCPAddress(c.host, c.port)
if err != nil {
return err
}
c.listener, err = net.ListenTCP("tcp", addr)
if err != nil {
return err
}
return nil
}
// SendMessage responsible for sending a real message. Takes as input the message string
// and the public information about the destination.
func (c *client) SendMessage(message string, recipient config.ClientConfig) error {
packet, err := c.encodeMessage(message, recipient)
if err != nil {
logLocal.WithError(err).Error("Error in sending message - encode message returned error")
return err
}
c.outQueue <- packet
return nil
}
// encodeMessage encapsulates the given message into a sphinx packet destinated for recipient
// and wraps with the flag pointing that it is the communication packet
func (c *client) encodeMessage(message string, recipient config.ClientConfig) ([]byte, error) {
sphinxPacket, err := c.EncodeMessage(message, recipient)
if err != nil {
logLocal.WithError(err).Error("Error in sending message - create sphinx packet returned an error")
return nil, err
}
packetBytes, err := config.WrapWithFlag(commFlag, sphinxPacket)
if err != nil {
logLocal.WithError(err).Error("Error in sending message - wrap with flag returned an error")
return nil, err
}
return packetBytes, nil
}
// Send opens a connection with selected network address
// and send the passed packet. If connection failed or
// the packet could not be send, an error is returned
func (c *client) send(packet []byte, host string, port string) error {
conn, err := net.Dial("tcp", host+":"+port)
if err != nil {
logLocal.WithError(err).Error("Error in send - dial returned an error")
return err
}
defer conn.Close()
_, err = conn.Write(packet)
return err
}
// run opens the listener to start listening on clients host and port
func (c *client) startListenerInNewRoutine() {
defer c.listener.Close()
finish := make(chan bool)
go func() {
logLocal.Infof("Listening on address %s", c.host+":"+c.port)
c.listenForIncomingConnections()
}()
<-finish
}
// ListenForIncomingConnections responsible for running the listening process of the server;
// The clients listener accepts incoming connections and
// passes the incoming packets to the packet handler.
// If the connection could not be accepted an error
// is logged into the log files, but the function is not stopped
func (c *client) listenForIncomingConnections() {
for {
conn, err := c.listener.Accept()
if err != nil {
logLocal.WithError(err).Error(err)
} else {
go c.handleConnection(conn)
}
}
}
// HandleConnection handles the received packets; it checks the flag of the
// packet and schedules a corresponding process function;
// The potential errors are logged into the log files.
func (c *client) handleConnection(conn net.Conn) {
buff := make([]byte, 1024)
defer conn.Close()
reqLen, err := conn.Read(buff)
if err != nil {
logLocal.WithError(err).Error("Error while reading incoming connection")
panic(err)
}
var packet config.GeneralPacket
err = proto.Unmarshal(buff[:reqLen], &packet)
if err != nil {
logLocal.WithError(err).Error("Error in unmarshal incoming packet")
}
switch packet.Flag {
case tokenFlag:
c.registerToken(packet.Data)
go func() {
err := c.controlOutQueue()
if err != nil {
logLocal.WithError(err).Panic("Error in the controller of the outgoing packets queue. Possible security threat.")
}
}()
if loopCoverTrafficEnabled {
c.turnOnLoopCoverTraffic()
}
if dropCoverTrafficEnabled {
c.turnOnDropCoverTraffic()
}
go func() {
c.controlMessagingFetching()
}()
case commFlag:
_, err := c.processPacket(packet.Data)
if err != nil {
logLocal.WithError(err).Error("Error in processing received packet")
}
logLocal.Info("Received new message")
default:
logLocal.Info("Packet flag not recognised. Packet dropped.")
}
}
// RegisterToken stores the authentication token received from the provider
func (c *client) registerToken(token []byte) {
c.token = token
logLocal.Infof(" Registered token %s", c.token)
c.registrationDone <- true
}
// ProcessPacket processes the received sphinx packet and returns the
// encapsulated message or error in case the processing
// was unsuccessful.
func (c *client) processPacket(packet []byte) ([]byte, error) {
logLocal.Info(" Processing packet")
return packet, nil
}
// SendRegisterMessageToProvider allows the client to register with the selected provider.
// The client sends a special assignment packet, with its public information, to the provider
// or returns an error.
func (c *client) sendRegisterMessageToProvider() error {
logLocal.Info("Sending request to provider to register")
confBytes, err := proto.Marshal(&c.config)
if err != nil {
logLocal.WithError(err).Error("Error in register provider - marshal of provider config returned an error")
return err
}
pktBytes, err := config.WrapWithFlag(assignFlag, confBytes)
if err != nil {
logLocal.WithError(err).Error("Error in register provider - wrap with flag returned an error")
return err
}
err = c.send(pktBytes, c.Provider.Host, c.Provider.Port)
if err != nil {
logLocal.WithError(err).Error("Error in register provider - send registration packet returned an error")
return err
}
return nil
}
// GetMessagesFromProvider allows to fetch messages from the inbox stored by the
// provider. The client sends a pull packet to the provider, along with
// the authentication token. An error is returned if occurred.
func (c *client) getMessagesFromProvider() error {
pullRqs := config.PullRequest{ClientId: c.id, Token: c.token}
pullRqsBytes, err := proto.Marshal(&pullRqs)
if err != nil {
logLocal.WithError(err).Error("Error in register provider - marshal of pull request returned an error")
return err
}
pktBytes, err := config.WrapWithFlag(pullFlag, pullRqsBytes)
if err != nil {
logLocal.WithError(err).Error("Error in register provider - marshal of provider config returned an error")
return err
}
err = c.send(pktBytes, c.Provider.Host, c.Provider.Port)
if err != nil {
return err
}
return nil
}
// controlOutQueue controls the outgoing queue of the client.
// If a message awaits in the queue, it is sent. Otherwise a
// drop cover message is sent instead.
func (c *client) controlOutQueue() error {
logLocal.Info("Queue controller started")
for {
select {
case realPacket := <-c.outQueue:
c.send(realPacket, c.Provider.Host, c.Provider.Port)
logLocal.Info("Real packet was sent")
default:
dummyPacket, err := c.createDropCoverMessage()
if err != nil {
return err
}
c.send(dummyPacket, c.Provider.Host, c.Provider.Port)
logLocal.Info("OutQueue empty. Dummy packet sent.")
}
err := delayBeforeContinute(desiredRateParameter)
if err != nil {
return err
}
}
return nil
}
// controlMessagingFetching periodically at random sends a query to the provider
// to fetch received messages
func (c *client) controlMessagingFetching() {
for {
c.getMessagesFromProvider()
logLocal.Info("Sent request to provider to fetch messages")
err := delayBeforeContinute(fetchRate)
if err != nil {
logLocal.Error("Error in ControlMessagingFetching - generating random exp. value failed")
}
}
}
// CreateCoverMessage packs a dummy message into a Sphinx packet.
// The dummy message is a loop message.
func (c *client) createDropCoverMessage() ([]byte, error) {
dummyLoad := "DummyPayloadMessage"
randomRecipient, err := c.getRandomRecipient(c.Network.Clients)
if err != nil {
return nil, err
}
sphinxPacket, err := c.EncodeMessage(dummyLoad, randomRecipient)
if err != nil {
return nil, err
}
packetBytes, err := config.WrapWithFlag(commFlag, sphinxPacket)
if err != nil {
return nil, err
}
return packetBytes, nil
}
// getRandomRecipient picks a random client from the list of all available clients (stored by the client).
// getRandomRecipient returns the selected client public configuration and an error
func (c *client) getRandomRecipient(slice []config.ClientConfig) (config.ClientConfig, error) {
randIdx, err := rand.Int(rand.Reader, big.NewInt(int64(len(slice))))
if err != nil {
return config.ClientConfig{}, err
}
return slice[randIdx.Int64()], nil
}
// createLoopCoverMessage packs a dummy loop message into
// a sphinx packet. The loop message is destinated back to the sender
// createLoopCoverMessage returns a byte representation of the encapsulated packet and an error
func (c *client) createLoopCoverMessage() ([]byte, error) {
loopLoad := "LoopCoverMessage"
sphinxPacket, err := c.EncodeMessage(loopLoad, c.config)
if err != nil {
return nil, err
}
packetBytes, err := config.WrapWithFlag(commFlag, sphinxPacket)
if err != nil {
return nil, err
}
return packetBytes, nil
}
// runLoopCoverTrafficStream manages the stream of loop cover traffic.
// In each stream iteration it sends a freshly created loop packet and
// waits a random time before scheduling the next loop packet.
func (c *client) runLoopCoverTrafficStream() error {
logLocal.Info("Stream of loop cover traffic started")
for {
loopPacket, err := c.createLoopCoverMessage()
if err != nil {
return err
}
c.send(loopPacket, c.Provider.Host, c.Provider.Port)
logLocal.Info("Loop message sent")
err = delayBeforeContinute(loopRate)
if err != nil {
return err
}
}
return nil
}
// runDropCoverTrafficStream manages the stream of drop cover traffic.
// In each stream iteration it creates a fresh drop cover message destinated
// to a randomly selected user in the network. The drop packet is sent
// and the next stream call is scheduled after random time.
func (c *client) runDropCoverTrafficStream() error {
logLocal.Info("Stream of drop cover traffic started")
for {
dropPacket, err := c.createDropCoverMessage()
if err != nil {
return err
}
c.send(dropPacket, c.Provider.Host, c.Provider.Port)
logLocal.Info("Drop packet sent")
err = delayBeforeContinute(dropRate)
if err != nil {
return err
}
}
return nil
}
func delayBeforeContinute(rateParam float64) error {
delaySec, err := helpers.RandomExponential(rateParam)
if err != nil {
return err
}
time.Sleep(time.Duration(int64(delaySec*math.Pow10(9))) * time.Nanosecond)
return nil
}
// turnOnLoopCoverTraffic starts the stream of loop cover traffic
func (c *client) turnOnLoopCoverTraffic() {
go func() {
err := c.runLoopCoverTrafficStream()
if err != nil {
logLocal.WithError(err).Panic("Error in the controller of the loop cover traffic. Possible security threat.")
}
}()
}
// turnOnDropCoverTraffic starts the stream of drop cover traffic
func (c *client) turnOnDropCoverTraffic() {
go func() {
err := c.runDropCoverTrafficStream()
if err != nil {
logLocal.WithError(err).Panic("Error in the controller of the drop cover traffic. Possible security threat.")
}
}()
}
// ReadInNetworkFromPKI reads in the public information about active mixes
// from the PKI database and stores them locally. In case
// the connection or fetching data from the PKI went wrong,
// an error is returned.
func (c *client) ReadInNetworkFromPKI(pkiName string) error {
logLocal.Infof("Reading network information from the PKI: %s", pkiName)
mixes, err := helpers.GetMixesPKI(pkiName)
if err != nil {
logLocal.WithError(err).Error("Error while reading mixes from PKI")
return err
}
c.Network.Mixes = mixes
clients, err := helpers.GetClientPKI(pkiName)
if err != nil {
logLocal.WithError(err).Error("Error while reading clients from PKI")
return err
}
c.Network.Clients = clients
logLocal.Info("Network information uploaded")
return nil
}
// The constructor function to create an new client object.
// Function returns a new client object or an error, if occurred.
func NewClient(id, host, port string, pubKey []byte, prvKey []byte, pkiDir string, provider config.MixConfig) (*client, error) {
core := clientCore.NewCryptoClient(pubKey, prvKey, elliptic.P224(), provider, clientCore.NetworkPKI{})
c := client{id: id, host: host, port: port, CryptoClient: core, pkiDir: pkiDir}
c.config = config.ClientConfig{Id: c.id, Host: c.host, Port: c.port, PubKey: c.GetPublicKey(), Provider: &c.Provider}
configBytes, err := proto.Marshal(&c.config)
if err != nil {
return nil, err
}
err = helpers.AddToDatabase(pkiDir, "Pki", c.id, "Client", configBytes)
if err != nil {
return nil, err
}
return &c, nil
}
// NewTestClient constructs a client object, which can be used for testing. The object contains the crypto core
// and the top-level of client, but does not involve networking and starting a listener.
func NewTestClient(id, host, port string, pubKey []byte, prvKey []byte, pkiDir string, provider config.MixConfig) (*client, error) {
core := clientCore.NewCryptoClient(pubKey, prvKey, elliptic.P224(), provider, clientCore.NetworkPKI{})
c := client{id: id, host: host, port: port, CryptoClient: core, pkiDir: pkiDir}
c.config = config.ClientConfig{Id: c.id, Host: c.host, Port: c.port, PubKey: c.GetPublicKey(), Provider: &c.Provider}
return &c, nil
}
|
package client
import (
"crypto/tls"
"io"
"net"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/eclipse/paho.mqtt.golang/packets"
"github.com/iotalking/mqtt-broker/config"
"github.com/iotalking/mqtt-broker/safe-runtine"
"github.com/iotalking/mqtt-broker/session"
_ "github.com/iotalking/mqtt-broker/store/mem-provider"
)
var mgrOnce sync.Once
type Client struct {
sessionMgr session.SessionMgr
session *session.Session
proto string
addr string
user string
password []byte
clientId string
WillTopic string
WillMessage []byte
WillQos byte
WillRetain bool
Keepalive uint16
mainRuntine *runtine.SafeRuntine
}
func NewClient(id string, mgr session.SessionMgr) *Client {
c := &Client{
sessionMgr: mgr,
clientId: id,
}
return c
}
//连接服务器
//proto:mqtt,mqtts,ws,wss
//mqtt:tcp
//mqtts:tcp tls
//ws:websocket
//wss:websocket tls
//addr格式:
//[username][:password]@ip[:port]
func (this *Client) Connect(proto, addr string) (token session.Token, err error) {
this.proto = proto
this.addr = addr
//解析username和password
tmps := strings.Split(addr, "@")
if len(tmps) > 1 {
addr = tmps[1]
//包含用户名和密码段
tmps := strings.Split(tmps[0], ":")
this.user = tmps[0]
if len(tmps) > 1 {
this.password = []byte(tmps[1])
}
}
var c io.ReadWriteCloser
switch proto {
case "mqtt":
c, err = this.newTcpConn(addr)
case "mqtts":
c, err = this.newTcpTlsConn(addr)
default:
c, err = this.newTcpConn(addr)
}
if err == nil {
this.session = session.NewSession(this.sessionMgr, c, false)
this.session.SetClientId(this.clientId)
this.sessionMgr.HandleConnection(this.session)
connectMsg := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
connectMsg.ProtocolName = "MQTT"
connectMsg.ProtocolVersion = 4
connectMsg.UsernameFlag = true
connectMsg.Username = this.user
connectMsg.PasswordFlag = true
connectMsg.Password = this.password
connectMsg.ClientIdentifier = this.clientId
if this.Keepalive == 0 {
connectMsg.Keepalive = uint16(this.session.GetKeepalive())
} else {
connectMsg.Keepalive = this.Keepalive
}
token, err = this.session.Send(connectMsg)
} else {
log.Error("connect error:", err)
defer func() {
recover()
}()
}
return
}
func (this *Client) newTcpConn(addr string) (c io.ReadWriteCloser, err error) {
c, err = net.DialTimeout("tcp", addr, time.Duration(config.ConnectTimeout))
return
}
func (this *Client) newTcpTlsConn(addr string) (c io.ReadWriteCloser, err error) {
var config tls.Config
config.InsecureSkipVerify = true
c, err = tls.Dial("tcp", addr, &config)
return
}
func (this *Client) Disconnect() (err error) {
if this.session == nil || this.session.IsClosed() {
return
}
disconnectMsg := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
token, err := this.session.Send(disconnectMsg)
if err == nil {
token.Wait()
log.Debug("disconnect token.Wait return")
}
return
}
//发布消息
func (this *Client) Publish(topic string, body []byte, qos byte, retain bool) (token session.Token, err error) {
msg := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
msg.TopicName = topic
msg.Payload = body
msg.Qos = qos
msg.Retain = retain
token, err = this.session.Publish(msg)
return
}
//订阅主题,可以一次订阅多条
//submap
//key:subscription
//value:qos
func (this *Client) Subcribe(submap map[string]byte) (token session.Token, err error) {
subMsg := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
for sub, qos := range submap {
subMsg.Topics = append(subMsg.Topics, sub)
subMsg.Qoss = append(subMsg.Qoss, qos)
}
token, err = this.session.Send(subMsg)
return
}
//不订阅主题
func (this *Client) Unsubcribe(subs ...string) (token session.Token, err error) {
unsubMsg := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
unsubMsg.Topics = subs
token, err = this.session.Send(unsubMsg)
return
}
func (this *Client) SetOnMessage(cb func(topic string, body []byte, qos byte)) {
this.session.SetOnMessage(func(msg *packets.PublishPacket) {
if cb != nil {
cb(msg.TopicName, msg.Payload, msg.Qos)
}
})
}
func (this *Client) SetOnDisconnected(cb func()) {
this.session.SetOnDisconnected(cb)
}
func (this *Client) SetKeepalive(keepalive uint16) {
this.Keepalive = keepalive
}
func (this *Client) GetKeepalive() uint16 {
return this.Keepalive
}
func (this *Client) GetID() string {
return this.clientId
}
client 中Keepalive是秒为单位,session中的Keepalive是纳秒
package client
import (
"crypto/tls"
"io"
"net"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/eclipse/paho.mqtt.golang/packets"
"github.com/iotalking/mqtt-broker/config"
"github.com/iotalking/mqtt-broker/safe-runtine"
"github.com/iotalking/mqtt-broker/session"
_ "github.com/iotalking/mqtt-broker/store/mem-provider"
)
var mgrOnce sync.Once
type Client struct {
sessionMgr session.SessionMgr
session *session.Session
proto string
addr string
user string
password []byte
clientId string
WillTopic string
WillMessage []byte
WillQos byte
WillRetain bool
Keepalive uint16 //second
mainRuntine *runtine.SafeRuntine
}
func NewClient(id string, mgr session.SessionMgr) *Client {
c := &Client{
sessionMgr: mgr,
clientId: id,
}
return c
}
//连接服务器
//proto:mqtt,mqtts,ws,wss
//mqtt:tcp
//mqtts:tcp tls
//ws:websocket
//wss:websocket tls
//addr格式:
//[username][:password]@ip[:port]
func (this *Client) Connect(proto, addr string) (token session.Token, err error) {
this.proto = proto
this.addr = addr
//解析username和password
tmps := strings.Split(addr, "@")
if len(tmps) > 1 {
addr = tmps[1]
//包含用户名和密码段
tmps := strings.Split(tmps[0], ":")
this.user = tmps[0]
if len(tmps) > 1 {
this.password = []byte(tmps[1])
}
}
var c io.ReadWriteCloser
switch proto {
case "mqtt":
c, err = this.newTcpConn(addr)
case "mqtts":
c, err = this.newTcpTlsConn(addr)
default:
c, err = this.newTcpConn(addr)
}
if err == nil {
this.session = session.NewSession(this.sessionMgr, c, false)
this.session.SetClientId(this.clientId)
this.sessionMgr.HandleConnection(this.session)
connectMsg := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
connectMsg.ProtocolName = "MQTT"
connectMsg.ProtocolVersion = 4
connectMsg.UsernameFlag = true
connectMsg.Username = this.user
connectMsg.PasswordFlag = true
connectMsg.Password = this.password
connectMsg.ClientIdentifier = this.clientId
if this.Keepalive == 0 {
connectMsg.Keepalive = uint16(this.session.GetKeepalive() / int64(time.Second))
} else {
connectMsg.Keepalive = this.Keepalive
}
token, err = this.session.Send(connectMsg)
} else {
log.Error("connect error:", err)
defer func() {
recover()
}()
}
return
}
func (this *Client) newTcpConn(addr string) (c io.ReadWriteCloser, err error) {
c, err = net.DialTimeout("tcp", addr, time.Duration(config.ConnectTimeout))
return
}
func (this *Client) newTcpTlsConn(addr string) (c io.ReadWriteCloser, err error) {
var config tls.Config
config.InsecureSkipVerify = true
c, err = tls.Dial("tcp", addr, &config)
return
}
func (this *Client) Disconnect() (err error) {
if this.session == nil || this.session.IsClosed() {
return
}
disconnectMsg := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
token, err := this.session.Send(disconnectMsg)
if err == nil {
token.Wait()
log.Debug("disconnect token.Wait return")
}
return
}
//发布消息
func (this *Client) Publish(topic string, body []byte, qos byte, retain bool) (token session.Token, err error) {
msg := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
msg.TopicName = topic
msg.Payload = body
msg.Qos = qos
msg.Retain = retain
token, err = this.session.Publish(msg)
return
}
//订阅主题,可以一次订阅多条
//submap
//key:subscription
//value:qos
func (this *Client) Subcribe(submap map[string]byte) (token session.Token, err error) {
subMsg := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
for sub, qos := range submap {
subMsg.Topics = append(subMsg.Topics, sub)
subMsg.Qoss = append(subMsg.Qoss, qos)
}
token, err = this.session.Send(subMsg)
return
}
//不订阅主题
func (this *Client) Unsubcribe(subs ...string) (token session.Token, err error) {
unsubMsg := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
unsubMsg.Topics = subs
token, err = this.session.Send(unsubMsg)
return
}
func (this *Client) SetOnMessage(cb func(topic string, body []byte, qos byte)) {
this.session.SetOnMessage(func(msg *packets.PublishPacket) {
if cb != nil {
cb(msg.TopicName, msg.Payload, msg.Qos)
}
})
}
func (this *Client) SetOnDisconnected(cb func()) {
this.session.SetOnDisconnected(cb)
}
func (this *Client) SetKeepalive(keepalive uint16) {
this.Keepalive = keepalive
}
func (this *Client) GetKeepalive() uint16 {
return this.Keepalive
}
func (this *Client) GetID() string {
return this.clientId
}
|
package client
import (
"context"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/connhelper"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/util/appdefaults"
"github.com/moby/buildkit/util/grpcerrors"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type Client struct {
conn *grpc.ClientConn
}
type ClientOpt interface{}
// New returns a new buildkit client. Address can be empty for the system-default address.
func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
gopts := []grpc.DialOption{}
needDialer := true
needWithInsecure := true
var unary []grpc.UnaryClientInterceptor
var stream []grpc.StreamClientInterceptor
for _, o := range opts {
if _, ok := o.(*withFailFast); ok {
gopts = append(gopts, grpc.FailOnNonTempDialError(true))
}
if credInfo, ok := o.(*withCredentials); ok {
opt, err := loadCredentials(credInfo)
if err != nil {
return nil, err
}
gopts = append(gopts, opt)
needWithInsecure = false
}
if wt, ok := o.(*withTracer); ok {
unary = append(unary, otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads()))
stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))
}
if wd, ok := o.(*withDialer); ok {
gopts = append(gopts, grpc.WithContextDialer(wd.dialer))
needDialer = false
}
}
if needDialer {
dialFn, err := resolveDialer(address)
if err != nil {
return nil, err
}
gopts = append(gopts, grpc.WithContextDialer(dialFn))
}
if needWithInsecure {
gopts = append(gopts, grpc.WithInsecure())
}
if address == "" {
address = appdefaults.Address
}
unary = append(unary, grpcerrors.UnaryClientInterceptor)
stream = append(stream, grpcerrors.StreamClientInterceptor)
if len(unary) == 1 {
gopts = append(gopts, grpc.WithUnaryInterceptor(unary[0]))
} else if len(unary) > 1 {
gopts = append(gopts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...)))
}
if len(stream) == 1 {
gopts = append(gopts, grpc.WithStreamInterceptor(stream[0]))
} else if len(stream) > 1 {
gopts = append(gopts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...)))
}
conn, err := grpc.DialContext(ctx, address, gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address)
}
c := &Client{
conn: conn,
}
return c, nil
}
func (c *Client) controlClient() controlapi.ControlClient {
return controlapi.NewControlClient(c.conn)
}
func (c *Client) Dialer() session.Dialer {
return grpchijack.Dialer(c.controlClient())
}
func (c *Client) Close() error {
return c.conn.Close()
}
type withFailFast struct{}
func WithFailFast() ClientOpt {
return &withFailFast{}
}
type withDialer struct {
dialer func(context.Context, string) (net.Conn, error)
}
func WithContextDialer(df func(context.Context, string) (net.Conn, error)) ClientOpt {
return &withDialer{dialer: df}
}
type withCredentials struct {
ServerName string
CACert string
Cert string
Key string
}
// WithCredentials configures the TLS parameters of the client.
// Arguments:
// * serverName: specifies the name of the target server
// * ca: specifies the filepath of the CA certificate to use for verification
// * cert: specifies the filepath of the client certificate
// * key: specifies the filepath of the client key
func WithCredentials(serverName, ca, cert, key string) ClientOpt {
return &withCredentials{serverName, ca, cert, key}
}
func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
ca, err := ioutil.ReadFile(opts.CACert)
if err != nil {
return nil, errors.Wrap(err, "could not read ca certificate")
}
certPool := x509.NewCertPool()
if ok := certPool.AppendCertsFromPEM(ca); !ok {
return nil, errors.New("failed to append ca certs")
}
cfg := &tls.Config{
ServerName: opts.ServerName,
RootCAs: certPool,
}
// we will produce an error if the user forgot about either cert or key if at least one is specified
if opts.Cert != "" || opts.Key != "" {
cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key)
if err != nil {
return nil, errors.Wrap(err, "could not read certificate/key")
}
cfg.Certificates = []tls.Certificate{cert}
cfg.BuildNameToCertificate()
}
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
}
func WithTracer(t opentracing.Tracer) ClientOpt {
return &withTracer{t}
}
type withTracer struct {
tracer opentracing.Tracer
}
func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) {
ch, err := connhelper.GetConnectionHelper(address)
if err != nil {
return nil, err
}
if ch != nil {
return ch.ContextDialer, nil
}
// basic dialer
return dialer, nil
}
setting host part of address as :authority pseudo header.
grpc-go uses a slightly different naming scheme(https://github.com/grpc/grpc/blob/master/doc/naming.md)
This will end up setting rfc non-complient :authority header to address string (e.g. tcp://127.0.0.1:1234).
So, this commit changes to sets right authority header via WithAuthority DialOption.
Signed-off-by: Shingo Omura <aac6d626b49dc33e0911a17dc8d98f53df72571b@gmail.com>
package client
import (
"context"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net"
"net/url"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/connhelper"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/util/appdefaults"
"github.com/moby/buildkit/util/grpcerrors"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type Client struct {
conn *grpc.ClientConn
}
type ClientOpt interface{}
// New returns a new buildkit client. Address can be empty for the system-default address.
func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
gopts := []grpc.DialOption{}
needDialer := true
needWithInsecure := true
var unary []grpc.UnaryClientInterceptor
var stream []grpc.StreamClientInterceptor
for _, o := range opts {
if _, ok := o.(*withFailFast); ok {
gopts = append(gopts, grpc.FailOnNonTempDialError(true))
}
if credInfo, ok := o.(*withCredentials); ok {
opt, err := loadCredentials(credInfo)
if err != nil {
return nil, err
}
gopts = append(gopts, opt)
needWithInsecure = false
}
if wt, ok := o.(*withTracer); ok {
unary = append(unary, otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads()))
stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))
}
if wd, ok := o.(*withDialer); ok {
gopts = append(gopts, grpc.WithContextDialer(wd.dialer))
needDialer = false
}
}
if needDialer {
dialFn, err := resolveDialer(address)
if err != nil {
return nil, err
}
gopts = append(gopts, grpc.WithContextDialer(dialFn))
}
if needWithInsecure {
gopts = append(gopts, grpc.WithInsecure())
}
if address == "" {
address = appdefaults.Address
}
// grpc-go uses a slightly different naming scheme: https://github.com/grpc/grpc/blob/master/doc/naming.md
// This will end up setting rfc non-complient :authority header to address string (e.g. tcp://127.0.0.1:1234).
// So, here sets right authority header via WithAuthority DialOption.
addressURL, err := url.Parse(address)
if err != nil {
return nil, err
}
gopts = append(gopts, grpc.WithAuthority(addressURL.Host))
unary = append(unary, grpcerrors.UnaryClientInterceptor)
stream = append(stream, grpcerrors.StreamClientInterceptor)
if len(unary) == 1 {
gopts = append(gopts, grpc.WithUnaryInterceptor(unary[0]))
} else if len(unary) > 1 {
gopts = append(gopts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...)))
}
if len(stream) == 1 {
gopts = append(gopts, grpc.WithStreamInterceptor(stream[0]))
} else if len(stream) > 1 {
gopts = append(gopts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...)))
}
conn, err := grpc.DialContext(ctx, address, gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address)
}
c := &Client{
conn: conn,
}
return c, nil
}
func (c *Client) controlClient() controlapi.ControlClient {
return controlapi.NewControlClient(c.conn)
}
func (c *Client) Dialer() session.Dialer {
return grpchijack.Dialer(c.controlClient())
}
func (c *Client) Close() error {
return c.conn.Close()
}
type withFailFast struct{}
func WithFailFast() ClientOpt {
return &withFailFast{}
}
type withDialer struct {
dialer func(context.Context, string) (net.Conn, error)
}
func WithContextDialer(df func(context.Context, string) (net.Conn, error)) ClientOpt {
return &withDialer{dialer: df}
}
type withCredentials struct {
ServerName string
CACert string
Cert string
Key string
}
// WithCredentials configures the TLS parameters of the client.
// Arguments:
// * serverName: specifies the name of the target server
// * ca: specifies the filepath of the CA certificate to use for verification
// * cert: specifies the filepath of the client certificate
// * key: specifies the filepath of the client key
func WithCredentials(serverName, ca, cert, key string) ClientOpt {
return &withCredentials{serverName, ca, cert, key}
}
func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
ca, err := ioutil.ReadFile(opts.CACert)
if err != nil {
return nil, errors.Wrap(err, "could not read ca certificate")
}
certPool := x509.NewCertPool()
if ok := certPool.AppendCertsFromPEM(ca); !ok {
return nil, errors.New("failed to append ca certs")
}
cfg := &tls.Config{
ServerName: opts.ServerName,
RootCAs: certPool,
}
// we will produce an error if the user forgot about either cert or key if at least one is specified
if opts.Cert != "" || opts.Key != "" {
cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key)
if err != nil {
return nil, errors.Wrap(err, "could not read certificate/key")
}
cfg.Certificates = []tls.Certificate{cert}
cfg.BuildNameToCertificate()
}
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
}
func WithTracer(t opentracing.Tracer) ClientOpt {
return &withTracer{t}
}
type withTracer struct {
tracer opentracing.Tracer
}
func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) {
ch, err := connhelper.GetConnectionHelper(address)
if err != nil {
return nil, err
}
if ch != nil {
return ch.ContextDialer, nil
}
// basic dialer
return dialer, nil
}
|
// Package client is an IRC client library.
package client
import (
"bufio"
"crypto/tls"
"fmt"
"log"
"net"
"strings"
"time"
"github.com/horgh/irc"
)
// Conn holds an IRC connection.
type Conn struct {
// conn: The connection if we are actively connected.
conn net.Conn
// rw: Read/write handle to the connection
rw *bufio.ReadWriter
// nick is the desired nickname.
nick string
// name is the realname to use.
name string
// ident is the ident to use.
ident string
// host is the IP/hostname of the IRC server to connect to.
host string
// port is the port of the host of the IRC server to connect to.
port int
// tls toggles whether we connect with TLS/SSL or not.
tls bool
// Config holds the parsed config file data.
//
// TODO(horgh): This doesn't really seem to belong here.
Config map[string]string
}
// timeoutConnect is how long we wait for connection attempts to time out.
const timeoutConnect = 30 * time.Second
// timeoutTime is how long we wait on network I/O by default.
const timeoutTime = 5 * time.Minute
// Hooks are functions to call for each message. Packages can take actions
// this way.
var Hooks []func(*Conn, irc.Message)
// New creates a new client connection.
func New(nick, name, ident, host string, port int, tls bool) *Conn {
return &Conn{
nick: nick,
name: name,
ident: ident,
host: host,
port: port,
tls: tls,
}
}
// Close cleans up the client. It closes the connection.
func (c *Conn) Close() error {
if c.conn != nil {
err := c.conn.Close()
c.conn = nil
return err
}
return nil
}
// Connect attempts to connect to a server.
func (c *Conn) Connect() error {
if c.tls {
dialer := &net.Dialer{Timeout: timeoutConnect}
conn, err := tls.DialWithDialer(dialer, "tcp",
fmt.Sprintf("%s:%d", c.host, c.port),
&tls.Config{
// Typically IRC servers won't have valid certs.
InsecureSkipVerify: true,
})
if err != nil {
return err
}
c.conn = conn
c.rw = bufio.NewReadWriter(bufio.NewReader(c.conn), bufio.NewWriter(c.conn))
return c.greet()
}
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", c.host, c.port),
timeoutConnect)
if err != nil {
return err
}
c.conn = conn
c.rw = bufio.NewReadWriter(bufio.NewReader(c.conn), bufio.NewWriter(c.conn))
return c.greet()
}
// ReadMessage reads a line from the connection and parses it as an IRC message.
func (c Conn) ReadMessage() (irc.Message, error) {
buf, err := c.read()
if err != nil {
return irc.Message{}, err
}
m, err := irc.ParseMessage(buf)
if err != nil && err != irc.ErrTruncated {
return irc.Message{}, fmt.Errorf("unable to parse message: %s: %s", buf,
err)
}
return m, nil
}
// read reads a line from the connection.
func (c Conn) read() (string, error) {
if err := c.conn.SetDeadline(time.Now().Add(timeoutTime)); err != nil {
return "", fmt.Errorf("unable to set deadline: %s", err)
}
line, err := c.rw.ReadString('\n')
if err != nil {
return "", err
}
log.Printf("Read: %s", strings.TrimRight(line, "\r\n"))
return line, nil
}
// WriteMessage writes an IRC message to the connection.
func (c Conn) WriteMessage(m irc.Message) error {
buf, err := m.Encode()
if err != nil && err != irc.ErrTruncated {
return fmt.Errorf("unable to encode message: %s", err)
}
return c.write(buf)
}
// write writes a string to the connection
func (c Conn) write(s string) error {
if err := c.conn.SetDeadline(time.Now().Add(timeoutTime)); err != nil {
return fmt.Errorf("unable to set deadline: %s", err)
}
sz, err := c.rw.WriteString(s)
if err != nil {
return err
}
if sz != len(s) {
return fmt.Errorf("short write")
}
if err := c.rw.Flush(); err != nil {
return fmt.Errorf("flush error: %s", err)
}
log.Printf("Sent: %s", strings.TrimRight(s, "\r\n"))
return nil
}
// greet runs connection initiation (NICK, USER) and then reads messages until
// it sees it worked.
//
// Currently it will wait until it times out reading a message before reporting
// failure.
func (c *Conn) greet() error {
if err := c.Register(); err != nil {
return err
}
for {
msg, err := c.ReadMessage()
if err != nil {
return err
}
c.hooks(msg)
// RPL_WELCOME tells us we've registered.
//
// Note RPL_WELCOME is not defined in RFC 1459. It is in RFC 2812. The best
// way I can tell from RFC 1459 that we've completed registration is by
// looking for RPL_LUSERCLIENT which apparently must be sent (section 8.5).
if msg.Command == irc.ReplyWelcome {
return nil
}
}
}
// Loop enters a loop reading from the server.
//
// We maintain the IRC connection.
//
// Hook events will fire.
func (c *Conn) Loop() error {
for {
if !c.IsConnected() {
return c.Connect()
}
msg, err := c.ReadMessage()
if err != nil {
return err
}
if msg.Command == "PING" {
if err := c.Pong(msg); err != nil {
return err
}
}
if msg.Command == "ERROR" {
// Error terminates the connection. We get it as an acknowledgement after
// sending a QUIT.
return c.Close()
}
c.hooks(msg)
}
}
// hooks calls each registered IRC package hook.
func (c *Conn) hooks(message irc.Message) {
for _, hook := range Hooks {
hook(c, message)
}
}
// IsConnected checks whether the client is connected
func (c *Conn) IsConnected() bool {
return c.conn != nil
}
// Register sends the client's registration/greeting. This consists of NICK and
// USER.
func (c *Conn) Register() error {
if err := c.Nick(); err != nil {
return err
}
if err := c.User(); err != nil {
return err
}
return nil
}
// Nick sends the NICK command.
func (c *Conn) Nick() error {
if err := c.WriteMessage(irc.Message{
Command: "NICK",
Params: []string{c.nick},
}); err != nil {
return fmt.Errorf("failed to send NICK: %s", err)
}
return nil
}
// User sends the USER command.
func (c *Conn) User() error {
if err := c.WriteMessage(irc.Message{
Command: "USER",
Params: []string{c.ident, "0", "*", c.name},
}); err != nil {
return fmt.Errorf("failed to send NICK: %s", err)
}
return nil
}
// Pong sends a PONG in response to the given PING message.
func (c *Conn) Pong(ping irc.Message) error {
return c.WriteMessage(irc.Message{
Command: "PONG",
Params: []string{ping.Params[0]},
})
}
// Join joins a channel.
func (c *Conn) Join(name string) error {
return c.WriteMessage(irc.Message{
Command: "JOIN",
Params: []string{name},
})
}
// Message sends a message.
//
// If the message is too long for a single line, then it will be split over
// several lines.
func (c *Conn) Message(target string, message string) error {
// 512 is the maximum IRC protocol length.
// However, user and host takes up some of that. Let's cut down a bit.
// This is arbitrary.
maxMessage := 412
// Number of overhead bytes.
overhead := len("PRIVMSG ") + len(" :") + len("\r\n")
for i := 0; i < len(message); i += maxMessage - overhead {
endIndex := i + maxMessage - overhead
if endIndex > len(message) {
endIndex = len(message)
}
piece := message[i:endIndex]
if err := c.WriteMessage(irc.Message{
Command: "PRIVMSG",
Params: []string{target, piece},
}); err != nil {
return nil
}
}
return nil
}
// Quit sends a quit.
//
// We track when we send this as we expect an ERROR message in response.
func (c *Conn) Quit(message string) error {
if err := c.WriteMessage(irc.Message{
Command: "QUIT",
Params: []string{message},
}); err != nil {
return err
}
return nil
}
// Oper sends an OPER command
func (c *Conn) Oper(name string, password string) error {
return c.WriteMessage(irc.Message{
Command: "OPER",
Params: []string{name, password},
})
}
// UserMode sends a MODE command.
func (c *Conn) UserMode(nick string, modes string) error {
return c.WriteMessage(irc.Message{
Command: "MODE",
Params: []string{nick, modes},
})
}
Rename Conn to Client
// Package client is an IRC client library.
package client
import (
"bufio"
"crypto/tls"
"fmt"
"log"
"net"
"strings"
"time"
"github.com/horgh/irc"
)
// Client holds an IRC client connection.
type Client struct {
// conn: The connection if we are actively connected.
conn net.Conn
// rw: Read/write handle to the connection
rw *bufio.ReadWriter
// nick is the desired nickname.
nick string
// name is the realname to use.
name string
// ident is the ident to use.
ident string
// host is the IP/hostname of the IRC server to connect to.
host string
// port is the port of the host of the IRC server to connect to.
port int
// tls toggles whether we connect with TLS/SSL or not.
tls bool
// Config holds the parsed config file data.
//
// TODO(horgh): This doesn't really seem to belong here.
Config map[string]string
}
// timeoutConnect is how long we wait for connection attempts to time out.
const timeoutConnect = 30 * time.Second
// timeoutTime is how long we wait on network I/O by default.
const timeoutTime = 5 * time.Minute
// Hooks are functions to call for each message. Packages can take actions
// this way.
var Hooks []func(*Client, irc.Message)
// New creates a new client connection.
func New(nick, name, ident, host string, port int, tls bool) *Client {
return &Client{
nick: nick,
name: name,
ident: ident,
host: host,
port: port,
tls: tls,
}
}
// Close cleans up the client. It closes the connection.
func (c *Client) Close() error {
if c.conn != nil {
err := c.conn.Close()
c.conn = nil
return err
}
return nil
}
// Connect attempts to connect to a server.
func (c *Client) Connect() error {
if c.tls {
dialer := &net.Dialer{Timeout: timeoutConnect}
conn, err := tls.DialWithDialer(dialer, "tcp",
fmt.Sprintf("%s:%d", c.host, c.port),
&tls.Config{
// Typically IRC servers won't have valid certs.
InsecureSkipVerify: true,
})
if err != nil {
return err
}
c.conn = conn
c.rw = bufio.NewReadWriter(bufio.NewReader(c.conn), bufio.NewWriter(c.conn))
return c.greet()
}
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", c.host, c.port),
timeoutConnect)
if err != nil {
return err
}
c.conn = conn
c.rw = bufio.NewReadWriter(bufio.NewReader(c.conn), bufio.NewWriter(c.conn))
return c.greet()
}
// ReadMessage reads a line from the connection and parses it as an IRC message.
func (c Client) ReadMessage() (irc.Message, error) {
buf, err := c.read()
if err != nil {
return irc.Message{}, err
}
m, err := irc.ParseMessage(buf)
if err != nil && err != irc.ErrTruncated {
return irc.Message{}, fmt.Errorf("unable to parse message: %s: %s", buf,
err)
}
return m, nil
}
// read reads a line from the connection.
func (c Client) read() (string, error) {
if err := c.conn.SetDeadline(time.Now().Add(timeoutTime)); err != nil {
return "", fmt.Errorf("unable to set deadline: %s", err)
}
line, err := c.rw.ReadString('\n')
if err != nil {
return "", err
}
log.Printf("Read: %s", strings.TrimRight(line, "\r\n"))
return line, nil
}
// WriteMessage writes an IRC message to the connection.
func (c Client) WriteMessage(m irc.Message) error {
buf, err := m.Encode()
if err != nil && err != irc.ErrTruncated {
return fmt.Errorf("unable to encode message: %s", err)
}
return c.write(buf)
}
// write writes a string to the connection
func (c Client) write(s string) error {
if err := c.conn.SetDeadline(time.Now().Add(timeoutTime)); err != nil {
return fmt.Errorf("unable to set deadline: %s", err)
}
sz, err := c.rw.WriteString(s)
if err != nil {
return err
}
if sz != len(s) {
return fmt.Errorf("short write")
}
if err := c.rw.Flush(); err != nil {
return fmt.Errorf("flush error: %s", err)
}
log.Printf("Sent: %s", strings.TrimRight(s, "\r\n"))
return nil
}
// greet runs connection initiation (NICK, USER) and then reads messages until
// it sees it worked.
//
// Currently it will wait until it times out reading a message before reporting
// failure.
func (c *Client) greet() error {
if err := c.Register(); err != nil {
return err
}
for {
msg, err := c.ReadMessage()
if err != nil {
return err
}
c.hooks(msg)
// RPL_WELCOME tells us we've registered.
//
// Note RPL_WELCOME is not defined in RFC 1459. It is in RFC 2812. The best
// way I can tell from RFC 1459 that we've completed registration is by
// looking for RPL_LUSERCLIENT which apparently must be sent (section 8.5).
if msg.Command == irc.ReplyWelcome {
return nil
}
}
}
// Loop enters a loop reading from the server.
//
// We maintain the IRC connection.
//
// Hook events will fire.
func (c *Client) Loop() error {
for {
if !c.IsConnected() {
return c.Connect()
}
msg, err := c.ReadMessage()
if err != nil {
return err
}
if msg.Command == "PING" {
if err := c.Pong(msg); err != nil {
return err
}
}
if msg.Command == "ERROR" {
// Error terminates the connection. We get it as an acknowledgement after
// sending a QUIT.
return c.Close()
}
c.hooks(msg)
}
}
// hooks calls each registered IRC package hook.
func (c *Client) hooks(message irc.Message) {
for _, hook := range Hooks {
hook(c, message)
}
}
// IsConnected checks whether the client is connected
func (c *Client) IsConnected() bool {
return c.conn != nil
}
// Register sends the client's registration/greeting. This consists of NICK and
// USER.
func (c *Client) Register() error {
if err := c.Nick(); err != nil {
return err
}
if err := c.User(); err != nil {
return err
}
return nil
}
// Nick sends the NICK command.
func (c *Client) Nick() error {
if err := c.WriteMessage(irc.Message{
Command: "NICK",
Params: []string{c.nick},
}); err != nil {
return fmt.Errorf("failed to send NICK: %s", err)
}
return nil
}
// User sends the USER command.
func (c *Client) User() error {
if err := c.WriteMessage(irc.Message{
Command: "USER",
Params: []string{c.ident, "0", "*", c.name},
}); err != nil {
return fmt.Errorf("failed to send NICK: %s", err)
}
return nil
}
// Pong sends a PONG in response to the given PING message.
func (c *Client) Pong(ping irc.Message) error {
return c.WriteMessage(irc.Message{
Command: "PONG",
Params: []string{ping.Params[0]},
})
}
// Join joins a channel.
func (c *Client) Join(name string) error {
return c.WriteMessage(irc.Message{
Command: "JOIN",
Params: []string{name},
})
}
// Message sends a message.
//
// If the message is too long for a single line, then it will be split over
// several lines.
func (c *Client) Message(target string, message string) error {
// 512 is the maximum IRC protocol length.
// However, user and host takes up some of that. Let's cut down a bit.
// This is arbitrary.
maxMessage := 412
// Number of overhead bytes.
overhead := len("PRIVMSG ") + len(" :") + len("\r\n")
for i := 0; i < len(message); i += maxMessage - overhead {
endIndex := i + maxMessage - overhead
if endIndex > len(message) {
endIndex = len(message)
}
piece := message[i:endIndex]
if err := c.WriteMessage(irc.Message{
Command: "PRIVMSG",
Params: []string{target, piece},
}); err != nil {
return nil
}
}
return nil
}
// Quit sends a quit.
//
// We track when we send this as we expect an ERROR message in response.
func (c *Client) Quit(message string) error {
if err := c.WriteMessage(irc.Message{
Command: "QUIT",
Params: []string{message},
}); err != nil {
return err
}
return nil
}
// Oper sends an OPER command
func (c *Client) Oper(name string, password string) error {
return c.WriteMessage(irc.Message{
Command: "OPER",
Params: []string{name, password},
})
}
// UserMode sends a MODE command.
func (c *Client) UserMode(nick string, modes string) error {
return c.WriteMessage(irc.Message{
Command: "MODE",
Params: []string{nick, modes},
})
}
|
package client
import (
"errors"
"fmt"
"io/ioutil"
"net"
"net/rpc"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/armon/go-metrics"
consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
arstate "github.com/hashicorp/nomad/client/allocrunner/state"
"github.com/hashicorp/nomad/client/allocwatcher"
"github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/devicemanager"
"github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/client/pluginmanager"
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
"github.com/hashicorp/nomad/client/servers"
"github.com/hashicorp/nomad/client/state"
"github.com/hashicorp/nomad/client/stats"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/pool"
hstats "github.com/hashicorp/nomad/helper/stats"
"github.com/hashicorp/nomad/helper/tlsutil"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/structs"
nconfig "github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/plugins/device"
"github.com/hashicorp/nomad/plugins/drivers"
vaultapi "github.com/hashicorp/vault/api"
"github.com/shirou/gopsutil/host"
)
const (
// clientRPCCache controls how long we keep an idle connection
// open to a server
clientRPCCache = 5 * time.Minute
// clientMaxStreams controls how many idle streams we keep
// open to a server
clientMaxStreams = 2
// datacenterQueryLimit searches through up to this many adjacent
// datacenters looking for the Nomad server service.
datacenterQueryLimit = 9
// registerRetryIntv is minimum interval on which we retry
// registration. We pick a value between this and 2x this.
registerRetryIntv = 15 * time.Second
// getAllocRetryIntv is minimum interval on which we retry
// to fetch allocations. We pick a value between this and 2x this.
getAllocRetryIntv = 30 * time.Second
// devModeRetryIntv is the retry interval used for development
devModeRetryIntv = time.Second
// stateSnapshotIntv is how often the client snapshots state
stateSnapshotIntv = 60 * time.Second
// initialHeartbeatStagger is used to stagger the interval between
// starting and the initial heartbeat. After the initial heartbeat,
// we switch to using the TTL specified by the servers.
initialHeartbeatStagger = 10 * time.Second
// nodeUpdateRetryIntv is how often the client checks for updates to the
// node attributes or meta map.
nodeUpdateRetryIntv = 5 * time.Second
// allocSyncIntv is the batching period of allocation updates before they
// are synced with the server.
allocSyncIntv = 200 * time.Millisecond
// allocSyncRetryIntv is the interval on which we retry updating
// the status of the allocation
allocSyncRetryIntv = 5 * time.Second
)
var (
// grace period to allow for batch fingerprint processing
batchFirstFingerprintsProcessingGrace = batchFirstFingerprintsTimeout + 5*time.Second
)
// ClientStatsReporter exposes all the APIs related to resource usage of a Nomad
// Client
type ClientStatsReporter interface {
// GetAllocStats returns the AllocStatsReporter for the passed allocation.
// If it does not exist an error is reported.
GetAllocStats(allocID string) (interfaces.AllocStatsReporter, error)
// LatestHostStats returns the latest resource usage stats for the host
LatestHostStats() *stats.HostStats
}
// AllocRunner is the interface implemented by the core alloc runner.
//TODO Create via factory to allow testing Client with mock AllocRunners.
type AllocRunner interface {
Alloc() *structs.Allocation
AllocState() *arstate.State
Destroy()
Shutdown()
GetAllocDir() *allocdir.AllocDir
IsDestroyed() bool
IsMigrating() bool
IsWaiting() bool
Listener() *cstructs.AllocListener
Restore() error
Run()
StatsReporter() interfaces.AllocStatsReporter
Update(*structs.Allocation)
WaitCh() <-chan struct{}
DestroyCh() <-chan struct{}
ShutdownCh() <-chan struct{}
Signal(taskName, signal string) error
GetTaskEventHandler(taskName string) drivermanager.EventHandler
RestartTask(taskName string, taskEvent *structs.TaskEvent) error
RestartAll(taskEvent *structs.TaskEvent) error
GetTaskExecHandler(taskName string) drivermanager.TaskExecHandler
GetTaskDriverCapabilities(taskName string) (*drivers.Capabilities, error)
}
// Client is used to implement the client interaction with Nomad. Clients
// are expected to register as a schedulable node to the servers, and to
// run allocations as determined by the servers.
type Client struct {
config *config.Config
start time.Time
// stateDB is used to efficiently store client state.
stateDB state.StateDB
// configCopy is a copy that should be passed to alloc-runners.
configCopy *config.Config
configLock sync.RWMutex
logger hclog.Logger
rpcLogger hclog.Logger
connPool *pool.ConnPool
// tlsWrap is used to wrap outbound connections using TLS. It should be
// accessed using the lock.
tlsWrap tlsutil.RegionWrapper
tlsWrapLock sync.RWMutex
// servers is the list of nomad servers
servers *servers.Manager
// heartbeat related times for tracking how often to heartbeat
lastHeartbeat time.Time
heartbeatTTL time.Duration
haveHeartbeated bool
heartbeatLock sync.Mutex
// triggerDiscoveryCh triggers Consul discovery; see triggerDiscovery
triggerDiscoveryCh chan struct{}
// triggerNodeUpdate triggers the client to mark the Node as changed and
// update it.
triggerNodeUpdate chan struct{}
// triggerEmitNodeEvent sends an event and triggers the client to update the
// server for the node event
triggerEmitNodeEvent chan *structs.NodeEvent
// rpcRetryCh is closed when there an event such as server discovery or a
// successful RPC occurring happens such that a retry should happen. Access
// should only occur via the getter method
rpcRetryCh chan struct{}
rpcRetryLock sync.Mutex
// allocs maps alloc IDs to their AllocRunner. This map includes all
// AllocRunners - running and GC'd - until the server GCs them.
allocs map[string]AllocRunner
allocLock sync.RWMutex
// invalidAllocs is a map that tracks allocations that failed because
// the client couldn't initialize alloc or task runners for it. This can
// happen due to driver errors
invalidAllocs map[string]struct{}
// allocUpdates stores allocations that need to be synced to the server.
allocUpdates chan *structs.Allocation
// consulService is Nomad's custom Consul client for managing services
// and checks.
consulService consulApi.ConsulServiceAPI
// consulCatalog is the subset of Consul's Catalog API Nomad uses.
consulCatalog consul.CatalogAPI
// HostStatsCollector collects host resource usage stats
hostStatsCollector *stats.HostStatsCollector
// shutdown is true when the Client has been shutdown. Must hold
// shutdownLock to access.
shutdown bool
// shutdownCh is closed to signal the Client is shutting down.
shutdownCh chan struct{}
shutdownLock sync.Mutex
// shutdownGroup are goroutines that exit when shutdownCh is closed.
// Shutdown() blocks on Wait() after closing shutdownCh.
shutdownGroup group
// vaultClient is used to interact with Vault for token and secret renewals
vaultClient vaultclient.VaultClient
// garbageCollector is used to garbage collect terminal allocations present
// in the node automatically
garbageCollector *AllocGarbageCollector
// clientACLResolver holds the ACL resolution state
clientACLResolver
// rpcServer is used to serve RPCs by the local agent.
rpcServer *rpc.Server
endpoints rpcEndpoints
streamingRpcs *structs.StreamingRpcRegistry
// pluginManagers is the set of PluginManagers registered by the client
pluginManagers *pluginmanager.PluginGroup
// devicemanger is responsible for managing device plugins.
devicemanager devicemanager.Manager
// drivermanager is responsible for managing driver plugins
drivermanager drivermanager.Manager
// baseLabels are used when emitting tagged metrics. All client metrics will
// have these tags, and optionally more.
baseLabels []metrics.Label
// batchNodeUpdates is used to batch initial updates to the node
batchNodeUpdates *batchNodeUpdates
// fpInitialized chan is closed when the first batch of fingerprints are
// applied to the node and the server is updated
fpInitialized chan struct{}
// serversContactedCh is closed when GetClientAllocs and runAllocs have
// successfully run once.
serversContactedCh chan struct{}
serversContactedOnce sync.Once
}
var (
// noServersErr is returned by the RPC method when the client has no
// configured servers. This is used to trigger Consul discovery if
// enabled.
noServersErr = errors.New("no servers")
)
// NewClient is used to create a new client from the given configuration
func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulService consulApi.ConsulServiceAPI) (*Client, error) {
// Create the tls wrapper
var tlsWrap tlsutil.RegionWrapper
if cfg.TLSConfig.EnableRPC {
tw, err := tlsutil.NewTLSConfiguration(cfg.TLSConfig, true, true)
if err != nil {
return nil, err
}
tlsWrap, err = tw.OutgoingTLSWrapper()
if err != nil {
return nil, err
}
}
if cfg.StateDBFactory == nil {
cfg.StateDBFactory = state.GetStateDBFactory(cfg.DevMode)
}
// Create the logger
logger := cfg.Logger.ResetNamed("client")
// Create the client
c := &Client{
config: cfg,
consulCatalog: consulCatalog,
consulService: consulService,
start: time.Now(),
connPool: pool.NewPool(logger, clientRPCCache, clientMaxStreams, tlsWrap),
tlsWrap: tlsWrap,
streamingRpcs: structs.NewStreamingRpcRegistry(),
logger: logger,
rpcLogger: logger.Named("rpc"),
allocs: make(map[string]AllocRunner),
allocUpdates: make(chan *structs.Allocation, 64),
shutdownCh: make(chan struct{}),
triggerDiscoveryCh: make(chan struct{}),
triggerNodeUpdate: make(chan struct{}, 8),
triggerEmitNodeEvent: make(chan *structs.NodeEvent, 8),
fpInitialized: make(chan struct{}),
invalidAllocs: make(map[string]struct{}),
serversContactedCh: make(chan struct{}),
serversContactedOnce: sync.Once{},
}
c.batchNodeUpdates = newBatchNodeUpdates(
c.updateNodeFromDriver,
c.updateNodeFromDevices,
)
// Initialize the server manager
c.servers = servers.New(c.logger, c.shutdownCh, c)
// Start server manager rebalancing go routine
go c.servers.Start()
// Initialize the client
if err := c.init(); err != nil {
return nil, fmt.Errorf("failed to initialize client: %v", err)
}
// Setup the clients RPC server
c.setupClientRpc()
// Initialize the ACL state
if err := c.clientACLResolver.init(); err != nil {
return nil, fmt.Errorf("failed to initialize ACL state: %v", err)
}
// Setup the node
if err := c.setupNode(); err != nil {
return nil, fmt.Errorf("node setup failed: %v", err)
}
// Store the config copy before restoring state but after it has been
// initialized.
c.configLock.Lock()
c.configCopy = c.config.Copy()
c.configLock.Unlock()
fingerprintManager := NewFingerprintManager(
c.configCopy.PluginSingletonLoader, c.GetConfig, c.configCopy.Node,
c.shutdownCh, c.updateNodeFromFingerprint, c.logger)
c.pluginManagers = pluginmanager.New(c.logger)
// Fingerprint the node and scan for drivers
if err := fingerprintManager.Run(); err != nil {
return nil, fmt.Errorf("fingerprinting failed: %v", err)
}
// Build the white/blacklists of drivers.
allowlistDrivers := cfg.ReadStringListToMap("driver.whitelist")
blocklistDrivers := cfg.ReadStringListToMap("driver.blacklist")
// Setup the driver manager
driverConfig := &drivermanager.Config{
Logger: c.logger,
Loader: c.configCopy.PluginSingletonLoader,
PluginConfig: c.configCopy.NomadPluginConfig(),
Updater: c.batchNodeUpdates.updateNodeFromDriver,
EventHandlerFactory: c.GetTaskEventHandler,
State: c.stateDB,
AllowedDrivers: allowlistDrivers,
BlockedDrivers: blocklistDrivers,
}
drvManager := drivermanager.New(driverConfig)
c.drivermanager = drvManager
c.pluginManagers.RegisterAndRun(drvManager)
// Setup the device manager
devConfig := &devicemanager.Config{
Logger: c.logger,
Loader: c.configCopy.PluginSingletonLoader,
PluginConfig: c.configCopy.NomadPluginConfig(),
Updater: c.batchNodeUpdates.updateNodeFromDevices,
StatsInterval: c.configCopy.StatsCollectionInterval,
State: c.stateDB,
}
devManager := devicemanager.New(devConfig)
c.devicemanager = devManager
c.pluginManagers.RegisterAndRun(devManager)
// Batching of initial fingerprints is done to reduce the number of node
// updates sent to the server on startup.
go c.batchFirstFingerprints()
// Add the stats collector
statsCollector := stats.NewHostStatsCollector(c.logger, c.config.AllocDir, c.devicemanager.AllStats)
c.hostStatsCollector = statsCollector
// Add the garbage collector
gcConfig := &GCConfig{
MaxAllocs: cfg.GCMaxAllocs,
DiskUsageThreshold: cfg.GCDiskUsageThreshold,
InodeUsageThreshold: cfg.GCInodeUsageThreshold,
Interval: cfg.GCInterval,
ParallelDestroys: cfg.GCParallelDestroys,
ReservedDiskMB: cfg.Node.Reserved.DiskMB,
}
c.garbageCollector = NewAllocGarbageCollector(c.logger, statsCollector, c, gcConfig)
go c.garbageCollector.Run()
// Set the preconfigured list of static servers
c.configLock.RLock()
if len(c.configCopy.Servers) > 0 {
if _, err := c.setServersImpl(c.configCopy.Servers, true); err != nil {
logger.Warn("none of the configured servers are valid", "error", err)
}
}
c.configLock.RUnlock()
// Setup Consul discovery if enabled
if c.configCopy.ConsulConfig.ClientAutoJoin != nil && *c.configCopy.ConsulConfig.ClientAutoJoin {
c.shutdownGroup.Go(c.consulDiscovery)
if c.servers.NumServers() == 0 {
// No configured servers; trigger discovery manually
c.triggerDiscoveryCh <- struct{}{}
}
}
// Setup the vault client for token and secret renewals
if err := c.setupVaultClient(); err != nil {
return nil, fmt.Errorf("failed to setup vault client: %v", err)
}
// wait until drivers are healthy before restoring or registering with servers
select {
case <-c.Ready():
case <-time.After(batchFirstFingerprintsProcessingGrace):
logger.Warn("batch fingerprint operation timed out; proceeding to register with fingerprinted plugins so far")
}
// Register and then start heartbeating to the servers.
c.shutdownGroup.Go(c.registerAndHeartbeat)
// Restore the state
if err := c.restoreState(); err != nil {
logger.Error("failed to restore state", "error", err)
logger.Error("Nomad is unable to start due to corrupt state. "+
"The safest way to proceed is to manually stop running task processes "+
"and remove Nomad's state and alloc directories before "+
"restarting. Lost allocations will be rescheduled.",
"state_dir", c.config.StateDir, "alloc_dir", c.config.AllocDir)
logger.Error("Corrupt state is often caused by a bug. Please " +
"report as much information as possible to " +
"https://github.com/hashicorp/nomad/issues")
return nil, fmt.Errorf("failed to restore state")
}
// Begin periodic snapshotting of state.
c.shutdownGroup.Go(c.periodicSnapshot)
// Begin syncing allocations to the server
c.shutdownGroup.Go(c.allocSync)
// Start the client! Don't use the shutdownGroup as run handles
// shutdowns manually to prevent updates from being applied during
// shutdown.
go c.run()
// Start collecting stats
c.shutdownGroup.Go(c.emitStats)
c.logger.Info("started client", "node_id", c.NodeID())
return c, nil
}
// Ready returns a chan that is closed when the client is fully initialized
func (c *Client) Ready() <-chan struct{} {
return c.fpInitialized
}
// init is used to initialize the client and perform any setup
// needed before we begin starting its various components.
func (c *Client) init() error {
// Ensure the state dir exists if we have one
if c.config.StateDir != "" {
if err := os.MkdirAll(c.config.StateDir, 0700); err != nil {
return fmt.Errorf("failed creating state dir: %s", err)
}
} else {
// Otherwise make a temp directory to use.
p, err := ioutil.TempDir("", "NomadClient")
if err != nil {
return fmt.Errorf("failed creating temporary directory for the StateDir: %v", err)
}
p, err = filepath.EvalSymlinks(p)
if err != nil {
return fmt.Errorf("failed to find temporary directory for the StateDir: %v", err)
}
c.config.StateDir = p
}
c.logger.Info("using state directory", "state_dir", c.config.StateDir)
// Open the state database
db, err := c.config.StateDBFactory(c.logger, c.config.StateDir)
if err != nil {
return fmt.Errorf("failed to open state database: %v", err)
}
// Upgrade the state database
if err := db.Upgrade(); err != nil {
// Upgrade only returns an error on critical persistence
// failures in which an operator should intervene before the
// node is accessible. Upgrade drops and logs corrupt state it
// encounters, so failing to start the agent should be extremely
// rare.
return fmt.Errorf("failed to upgrade state database: %v", err)
}
c.stateDB = db
// Ensure the alloc dir exists if we have one
if c.config.AllocDir != "" {
if err := os.MkdirAll(c.config.AllocDir, 0711); err != nil {
return fmt.Errorf("failed creating alloc dir: %s", err)
}
} else {
// Otherwise make a temp directory to use.
p, err := ioutil.TempDir("", "NomadClient")
if err != nil {
return fmt.Errorf("failed creating temporary directory for the AllocDir: %v", err)
}
p, err = filepath.EvalSymlinks(p)
if err != nil {
return fmt.Errorf("failed to find temporary directory for the AllocDir: %v", err)
}
// Change the permissions to have the execute bit
if err := os.Chmod(p, 0711); err != nil {
return fmt.Errorf("failed to change directory permissions for the AllocDir: %v", err)
}
c.config.AllocDir = p
}
c.logger.Info("using alloc directory", "alloc_dir", c.config.AllocDir)
return nil
}
// reloadTLSConnections allows a client to reload its TLS configuration on the
// fly
func (c *Client) reloadTLSConnections(newConfig *nconfig.TLSConfig) error {
var tlsWrap tlsutil.RegionWrapper
if newConfig != nil && newConfig.EnableRPC {
tw, err := tlsutil.NewTLSConfiguration(newConfig, true, true)
if err != nil {
return err
}
twWrap, err := tw.OutgoingTLSWrapper()
if err != nil {
return err
}
tlsWrap = twWrap
}
// Store the new tls wrapper.
c.tlsWrapLock.Lock()
c.tlsWrap = tlsWrap
c.tlsWrapLock.Unlock()
// Keep the client configuration up to date as we use configuration values to
// decide on what type of connections to accept
c.configLock.Lock()
c.config.TLSConfig = newConfig
c.configLock.Unlock()
c.connPool.ReloadTLS(tlsWrap)
return nil
}
// Reload allows a client to reload its configuration on the fly
func (c *Client) Reload(newConfig *config.Config) error {
shouldReloadTLS, err := tlsutil.ShouldReloadRPCConnections(c.config.TLSConfig, newConfig.TLSConfig)
if err != nil {
c.logger.Error("error parsing TLS configuration", "error", err)
return err
}
if shouldReloadTLS {
return c.reloadTLSConnections(newConfig.TLSConfig)
}
return nil
}
// Leave is used to prepare the client to leave the cluster
func (c *Client) Leave() error {
// TODO
return nil
}
// GetConfig returns the config of the client
func (c *Client) GetConfig() *config.Config {
c.configLock.Lock()
defer c.configLock.Unlock()
return c.configCopy
}
// Datacenter returns the datacenter for the given client
func (c *Client) Datacenter() string {
return c.config.Node.Datacenter
}
// Region returns the region for the given client
func (c *Client) Region() string {
return c.config.Region
}
// NodeID returns the node ID for the given client
func (c *Client) NodeID() string {
return c.config.Node.ID
}
// secretNodeID returns the secret node ID for the given client
func (c *Client) secretNodeID() string {
return c.config.Node.SecretID
}
// RPCMajorVersion returns the structs.ApiMajorVersion supported by the
// client.
func (c *Client) RPCMajorVersion() int {
return structs.ApiMajorVersion
}
// RPCMinorVersion returns the structs.ApiMinorVersion supported by the
// client.
func (c *Client) RPCMinorVersion() int {
return structs.ApiMinorVersion
}
// Shutdown is used to tear down the client
func (c *Client) Shutdown() error {
c.shutdownLock.Lock()
defer c.shutdownLock.Unlock()
if c.shutdown {
c.logger.Info("already shutdown")
return nil
}
c.logger.Info("shutting down")
// Stop renewing tokens and secrets
if c.vaultClient != nil {
c.vaultClient.Stop()
}
// Stop Garbage collector
c.garbageCollector.Stop()
arGroup := group{}
if c.config.DevMode {
// In DevMode destroy all the running allocations.
for _, ar := range c.getAllocRunners() {
ar.Destroy()
arGroup.AddCh(ar.DestroyCh())
}
} else {
// In normal mode call shutdown
for _, ar := range c.getAllocRunners() {
ar.Shutdown()
arGroup.AddCh(ar.ShutdownCh())
}
}
arGroup.Wait()
// Shutdown the plugin managers
c.pluginManagers.Shutdown()
c.shutdown = true
close(c.shutdownCh)
// Must close connection pool to unblock alloc watcher
c.connPool.Shutdown()
// Wait for goroutines to stop
c.shutdownGroup.Wait()
// One final save state
c.saveState()
return c.stateDB.Close()
}
// Stats is used to return statistics for debugging and insight
// for various sub-systems
func (c *Client) Stats() map[string]map[string]string {
c.heartbeatLock.Lock()
defer c.heartbeatLock.Unlock()
stats := map[string]map[string]string{
"client": {
"node_id": c.NodeID(),
"known_servers": strings.Join(c.GetServers(), ","),
"num_allocations": strconv.Itoa(c.NumAllocs()),
"last_heartbeat": fmt.Sprintf("%v", time.Since(c.lastHeartbeat)),
"heartbeat_ttl": fmt.Sprintf("%v", c.heartbeatTTL),
},
"runtime": hstats.RuntimeStats(),
}
return stats
}
// SignalAllocation sends a signal to the tasks within an allocation.
// If the provided task is empty, then every allocation will be signalled.
// If a task is provided, then only an exactly matching task will be signalled.
func (c *Client) SignalAllocation(allocID, task, signal string) error {
ar, err := c.getAllocRunner(allocID)
if err != nil {
return err
}
return ar.Signal(task, signal)
}
// CollectAllocation garbage collects a single allocation on a node. Returns
// true if alloc was found and garbage collected; otherwise false.
func (c *Client) CollectAllocation(allocID string) bool {
return c.garbageCollector.Collect(allocID)
}
// CollectAllAllocs garbage collects all allocations on a node in the terminal
// state
func (c *Client) CollectAllAllocs() {
c.garbageCollector.CollectAll()
}
func (c *Client) RestartAllocation(allocID, taskName string) error {
ar, err := c.getAllocRunner(allocID)
if err != nil {
return err
}
event := structs.NewTaskEvent(structs.TaskRestartSignal).
SetRestartReason("User requested restart")
if taskName != "" {
return ar.RestartTask(taskName, event)
}
return ar.RestartAll(event)
}
// Node returns the locally registered node
func (c *Client) Node() *structs.Node {
c.configLock.RLock()
defer c.configLock.RUnlock()
return c.configCopy.Node
}
func (c *Client) getAllocRunner(allocID string) (AllocRunner, error) {
c.allocLock.RLock()
defer c.allocLock.RUnlock()
ar, ok := c.allocs[allocID]
if !ok {
return nil, structs.NewErrUnknownAllocation(allocID)
}
return ar, nil
}
// StatsReporter exposes the various APIs related resource usage of a Nomad
// client
func (c *Client) StatsReporter() ClientStatsReporter {
return c
}
func (c *Client) GetAllocStats(allocID string) (interfaces.AllocStatsReporter, error) {
ar, err := c.getAllocRunner(allocID)
if err != nil {
return nil, err
}
return ar.StatsReporter(), nil
}
// HostStats returns all the stats related to a Nomad client
func (c *Client) LatestHostStats() *stats.HostStats {
return c.hostStatsCollector.Stats()
}
func (c *Client) LatestDeviceResourceStats(devices []*structs.AllocatedDeviceResource) []*device.DeviceGroupStats {
return c.computeAllocatedDeviceGroupStats(devices, c.LatestHostStats().DeviceStats)
}
func (c *Client) computeAllocatedDeviceGroupStats(devices []*structs.AllocatedDeviceResource, hostDeviceGroupStats []*device.DeviceGroupStats) []*device.DeviceGroupStats {
// basic optimization for the usual case
if len(devices) == 0 || len(hostDeviceGroupStats) == 0 {
return nil
}
// Build an index of allocated devices
adIdx := map[structs.DeviceIdTuple][]string{}
total := 0
for _, ds := range devices {
adIdx[*ds.ID()] = ds.DeviceIDs
total += len(ds.DeviceIDs)
}
// Collect allocated device stats from host stats
result := make([]*device.DeviceGroupStats, 0, len(adIdx))
for _, dg := range hostDeviceGroupStats {
k := structs.DeviceIdTuple{
Vendor: dg.Vendor,
Type: dg.Type,
Name: dg.Name,
}
allocatedDeviceIDs, ok := adIdx[k]
if !ok {
continue
}
rdgStats := &device.DeviceGroupStats{
Vendor: dg.Vendor,
Type: dg.Type,
Name: dg.Name,
InstanceStats: map[string]*device.DeviceStats{},
}
for _, adID := range allocatedDeviceIDs {
deviceStats, ok := dg.InstanceStats[adID]
if !ok || deviceStats == nil {
c.logger.Warn("device not found in stats", "device_id", adID, "device_group_id", k)
continue
}
rdgStats.InstanceStats[adID] = deviceStats
}
result = append(result, rdgStats)
}
return result
}
// ValidateMigrateToken verifies that a token is for a specific client and
// allocation, and has been created by a trusted party that has privileged
// knowledge of the client's secret identifier
func (c *Client) ValidateMigrateToken(allocID, migrateToken string) bool {
if !c.config.ACLEnabled {
return true
}
return structs.CompareMigrateToken(allocID, c.secretNodeID(), migrateToken)
}
// GetAllocFS returns the AllocFS interface for the alloc dir of an allocation
func (c *Client) GetAllocFS(allocID string) (allocdir.AllocDirFS, error) {
ar, err := c.getAllocRunner(allocID)
if err != nil {
return nil, err
}
return ar.GetAllocDir(), nil
}
// GetAllocState returns a copy of an allocation's state on this client. It
// returns either an AllocState or an unknown allocation error.
func (c *Client) GetAllocState(allocID string) (*arstate.State, error) {
ar, err := c.getAllocRunner(allocID)
if err != nil {
return nil, err
}
return ar.AllocState(), nil
}
// GetServers returns the list of nomad servers this client is aware of.
func (c *Client) GetServers() []string {
endpoints := c.servers.GetServers()
res := make([]string, len(endpoints))
for i := range endpoints {
res[i] = endpoints[i].String()
}
sort.Strings(res)
return res
}
// SetServers sets a new list of nomad servers to connect to. As long as one
// server is resolvable no error is returned.
func (c *Client) SetServers(in []string) (int, error) {
return c.setServersImpl(in, false)
}
// setServersImpl sets a new list of nomad servers to connect to. If force is
// set, we add the server to the internal serverlist even if the server could not
// be pinged. An error is returned if no endpoints were valid when non-forcing.
//
// Force should be used when setting the servers from the initial configuration
// since the server may be starting up in parallel and initial pings may fail.
func (c *Client) setServersImpl(in []string, force bool) (int, error) {
var mu sync.Mutex
var wg sync.WaitGroup
var merr multierror.Error
endpoints := make([]*servers.Server, 0, len(in))
wg.Add(len(in))
for _, s := range in {
go func(srv string) {
defer wg.Done()
addr, err := resolveServer(srv)
if err != nil {
mu.Lock()
c.logger.Debug("ignoring server due to resolution error", "error", err, "server", srv)
merr.Errors = append(merr.Errors, err)
mu.Unlock()
return
}
// Try to ping to check if it is a real server
if err := c.Ping(addr); err != nil {
mu.Lock()
merr.Errors = append(merr.Errors, fmt.Errorf("Server at address %s failed ping: %v", addr, err))
mu.Unlock()
// If we are forcing the setting of the servers, inject it to
// the serverlist even if we can't ping immediately.
if !force {
return
}
}
mu.Lock()
endpoints = append(endpoints, &servers.Server{Addr: addr})
mu.Unlock()
}(s)
}
wg.Wait()
// Only return errors if no servers are valid
if len(endpoints) == 0 {
if len(merr.Errors) > 0 {
return 0, merr.ErrorOrNil()
}
return 0, noServersErr
}
c.servers.SetServers(endpoints)
return len(endpoints), nil
}
// restoreState is used to restore our state from the data dir
// If there are errors restoring a specific allocation it is marked
// as failed whenever possible.
func (c *Client) restoreState() error {
if c.config.DevMode {
return nil
}
//XXX REMOVED! make a note in backward compat / upgrading doc
// COMPAT: Remove in 0.7.0
// 0.6.0 transitioned from individual state files to a single bolt-db.
// The upgrade path is to:
// Check if old state exists
// If so, restore from that and delete old state
// Restore using state database
// Restore allocations
allocs, allocErrs, err := c.stateDB.GetAllAllocations()
if err != nil {
return err
}
for allocID, err := range allocErrs {
c.logger.Error("error restoring alloc", "error", err, "alloc_id", allocID)
//TODO Cleanup
// Try to clean up alloc dir
// Remove boltdb entries?
// Send to server with clientstatus=failed
}
// Load each alloc back
for _, alloc := range allocs {
//XXX On Restore we give up on watching previous allocs because
// we need the local AllocRunners initialized first. We could
// add a second loop to initialize just the alloc watcher.
prevAllocWatcher := allocwatcher.NoopPrevAlloc{}
prevAllocMigrator := allocwatcher.NoopPrevAlloc{}
c.configLock.RLock()
arConf := &allocrunner.Config{
Alloc: alloc,
Logger: c.logger,
ClientConfig: c.configCopy,
StateDB: c.stateDB,
StateUpdater: c,
DeviceStatsReporter: c,
Consul: c.consulService,
Vault: c.vaultClient,
PrevAllocWatcher: prevAllocWatcher,
PrevAllocMigrator: prevAllocMigrator,
DeviceManager: c.devicemanager,
DriverManager: c.drivermanager,
ServersContactedCh: c.serversContactedCh,
}
c.configLock.RUnlock()
ar, err := allocrunner.NewAllocRunner(arConf)
if err != nil {
c.logger.Error("error running alloc", "error", err, "alloc_id", alloc.ID)
c.handleInvalidAllocs(alloc, err)
continue
}
// Restore state
if err := ar.Restore(); err != nil {
c.logger.Error("error restoring alloc", "error", err, "alloc_id", alloc.ID)
// Override the status of the alloc to failed
ar.SetClientStatus(structs.AllocClientStatusFailed)
// Destroy the alloc runner since this is a failed restore
ar.Destroy()
continue
}
//XXX is this locking necessary?
c.allocLock.Lock()
c.allocs[alloc.ID] = ar
c.allocLock.Unlock()
}
// All allocs restored successfully, run them!
c.allocLock.Lock()
for _, ar := range c.allocs {
go ar.Run()
}
c.allocLock.Unlock()
return nil
}
func (c *Client) handleInvalidAllocs(alloc *structs.Allocation, err error) {
c.invalidAllocs[alloc.ID] = struct{}{}
// Mark alloc as failed so server can handle this
failed := makeFailedAlloc(alloc, err)
select {
case c.allocUpdates <- failed:
case <-c.shutdownCh:
}
}
// saveState is used to snapshot our state into the data dir.
func (c *Client) saveState() error {
var wg sync.WaitGroup
var l sync.Mutex
var mErr multierror.Error
runners := c.getAllocRunners()
wg.Add(len(runners))
for id, ar := range runners {
go func(id string, ar AllocRunner) {
err := c.stateDB.PutAllocation(ar.Alloc())
if err != nil {
c.logger.Error("error saving alloc state", "error", err, "alloc_id", id)
l.Lock()
multierror.Append(&mErr, err)
l.Unlock()
}
wg.Done()
}(id, ar)
}
wg.Wait()
return mErr.ErrorOrNil()
}
// getAllocRunners returns a snapshot of the current set of alloc runners.
func (c *Client) getAllocRunners() map[string]AllocRunner {
c.allocLock.RLock()
defer c.allocLock.RUnlock()
runners := make(map[string]AllocRunner, len(c.allocs))
for id, ar := range c.allocs {
runners[id] = ar
}
return runners
}
// NumAllocs returns the number of un-GC'd allocs this client has. Used to
// fulfill the AllocCounter interface for the GC.
func (c *Client) NumAllocs() int {
n := 0
c.allocLock.RLock()
for _, a := range c.allocs {
if !a.IsDestroyed() {
n++
}
}
c.allocLock.RUnlock()
return n
}
// nodeID restores, or generates if necessary, a unique node ID and SecretID.
// The node ID is, if available, a persistent unique ID. The secret ID is a
// high-entropy random UUID.
func (c *Client) nodeID() (id, secret string, err error) {
var hostID string
hostInfo, err := host.Info()
if !c.config.NoHostUUID && err == nil {
if hashed, ok := helper.HashUUID(hostInfo.HostID); ok {
hostID = hashed
}
}
if hostID == "" {
// Generate a random hostID if no constant ID is available on
// this platform.
hostID = uuid.Generate()
}
// Do not persist in dev mode
if c.config.DevMode {
return hostID, uuid.Generate(), nil
}
// Attempt to read existing ID
idPath := filepath.Join(c.config.StateDir, "client-id")
idBuf, err := ioutil.ReadFile(idPath)
if err != nil && !os.IsNotExist(err) {
return "", "", err
}
// Attempt to read existing secret ID
secretPath := filepath.Join(c.config.StateDir, "secret-id")
secretBuf, err := ioutil.ReadFile(secretPath)
if err != nil && !os.IsNotExist(err) {
return "", "", err
}
// Use existing ID if any
if len(idBuf) != 0 {
id = strings.ToLower(string(idBuf))
} else {
id = hostID
// Persist the ID
if err := ioutil.WriteFile(idPath, []byte(id), 0700); err != nil {
return "", "", err
}
}
if len(secretBuf) != 0 {
secret = string(secretBuf)
} else {
// Generate new ID
secret = uuid.Generate()
// Persist the ID
if err := ioutil.WriteFile(secretPath, []byte(secret), 0700); err != nil {
return "", "", err
}
}
return id, secret, nil
}
// setupNode is used to setup the initial node
func (c *Client) setupNode() error {
node := c.config.Node
if node == nil {
node = &structs.Node{}
c.config.Node = node
}
// Generate an ID and secret for the node
id, secretID, err := c.nodeID()
if err != nil {
return fmt.Errorf("node ID setup failed: %v", err)
}
node.ID = id
node.SecretID = secretID
if node.Attributes == nil {
node.Attributes = make(map[string]string)
}
if node.Links == nil {
node.Links = make(map[string]string)
}
if node.Drivers == nil {
node.Drivers = make(map[string]*structs.DriverInfo)
}
if node.Meta == nil {
node.Meta = make(map[string]string)
}
if node.NodeResources == nil {
node.NodeResources = &structs.NodeResources{}
}
if node.ReservedResources == nil {
node.ReservedResources = &structs.NodeReservedResources{}
}
if node.Resources == nil {
node.Resources = &structs.Resources{}
}
if node.Reserved == nil {
node.Reserved = &structs.Resources{}
}
if node.Datacenter == "" {
node.Datacenter = "dc1"
}
if node.Name == "" {
node.Name, _ = os.Hostname()
}
if node.Name == "" {
node.Name = node.ID
}
node.Status = structs.NodeStatusInit
return nil
}
// updateNodeFromFingerprint updates the node with the result of
// fingerprinting the node from the diff that was created
func (c *Client) updateNodeFromFingerprint(response *fingerprint.FingerprintResponse) *structs.Node {
c.configLock.Lock()
defer c.configLock.Unlock()
nodeHasChanged := false
for name, newVal := range response.Attributes {
oldVal := c.config.Node.Attributes[name]
if oldVal == newVal {
continue
}
nodeHasChanged = true
if newVal == "" {
delete(c.config.Node.Attributes, name)
} else {
c.config.Node.Attributes[name] = newVal
}
}
// update node links and resources from the diff created from
// fingerprinting
for name, newVal := range response.Links {
oldVal := c.config.Node.Links[name]
if oldVal == newVal {
continue
}
nodeHasChanged = true
if newVal == "" {
delete(c.config.Node.Links, name)
} else {
c.config.Node.Links[name] = newVal
}
}
// COMPAT(0.10): Remove in 0.10
// update the response networks with the config
// if we still have node changes, merge them
if response.Resources != nil {
response.Resources.Networks = updateNetworks(
c.config.Node.Resources.Networks,
response.Resources.Networks,
c.config)
if !c.config.Node.Resources.Equals(response.Resources) {
c.config.Node.Resources.Merge(response.Resources)
nodeHasChanged = true
}
}
// update the response networks with the config
// if we still have node changes, merge them
if response.NodeResources != nil {
response.NodeResources.Networks = updateNetworks(
c.config.Node.NodeResources.Networks,
response.NodeResources.Networks,
c.config)
if !c.config.Node.NodeResources.Equals(response.NodeResources) {
c.config.Node.NodeResources.Merge(response.NodeResources)
nodeHasChanged = true
}
}
if nodeHasChanged {
c.updateNodeLocked()
}
return c.configCopy.Node
}
// updateNetworks preserves manually configured network options, but
// applies fingerprint updates
func updateNetworks(ns structs.Networks, up structs.Networks, c *config.Config) structs.Networks {
if c.NetworkInterface == "" {
ns = up
} else {
// If a network device is configured, filter up to contain details for only
// that device
upd := []*structs.NetworkResource{}
for _, n := range up {
if c.NetworkInterface == n.Device {
upd = append(upd, n)
}
}
// If updates, use them. Otherwise, ns contains the configured interfaces
if len(upd) > 0 {
ns = upd
}
}
// ns is set, apply the config NetworkSpeed to all
if c.NetworkSpeed != 0 {
for _, n := range ns {
n.MBits = c.NetworkSpeed
}
}
return ns
}
// retryIntv calculates a retry interval value given the base
func (c *Client) retryIntv(base time.Duration) time.Duration {
if c.config.DevMode {
return devModeRetryIntv
}
return base + lib.RandomStagger(base)
}
// registerAndHeartbeat is a long lived goroutine used to register the client
// and then start heartbeating to the server.
func (c *Client) registerAndHeartbeat() {
// Register the node
c.retryRegisterNode()
// Start watching changes for node changes
go c.watchNodeUpdates()
// Start watching for emitting node events
go c.watchNodeEvents()
// Setup the heartbeat timer, for the initial registration
// we want to do this quickly. We want to do it extra quickly
// in development mode.
var heartbeat <-chan time.Time
if c.config.DevMode {
heartbeat = time.After(0)
} else {
heartbeat = time.After(lib.RandomStagger(initialHeartbeatStagger))
}
for {
select {
case <-c.rpcRetryWatcher():
case <-heartbeat:
case <-c.shutdownCh:
return
}
if err := c.updateNodeStatus(); err != nil {
// The servers have changed such that this node has not been
// registered before
if strings.Contains(err.Error(), "node not found") {
// Re-register the node
c.logger.Info("re-registering node")
c.retryRegisterNode()
heartbeat = time.After(lib.RandomStagger(initialHeartbeatStagger))
} else {
intv := c.getHeartbeatRetryIntv(err)
c.logger.Error("error heartbeating. retrying", "error", err, "period", intv)
heartbeat = time.After(intv)
// If heartbeating fails, trigger Consul discovery
c.triggerDiscovery()
}
} else {
c.heartbeatLock.Lock()
heartbeat = time.After(c.heartbeatTTL)
c.heartbeatLock.Unlock()
}
}
}
// getHeartbeatRetryIntv is used to retrieve the time to wait before attempting
// another heartbeat.
func (c *Client) getHeartbeatRetryIntv(err error) time.Duration {
if c.config.DevMode {
return devModeRetryIntv
}
// Collect the useful heartbeat info
c.heartbeatLock.Lock()
haveHeartbeated := c.haveHeartbeated
last := c.lastHeartbeat
ttl := c.heartbeatTTL
c.heartbeatLock.Unlock()
// If we haven't even successfully heartbeated once or there is no leader
// treat it as a registration. In the case that there is a leadership loss,
// we will have our heartbeat timer reset to a much larger threshold, so
// do not put unnecessary pressure on the new leader.
if !haveHeartbeated || err == structs.ErrNoLeader {
return c.retryIntv(registerRetryIntv)
}
// Determine how much time we have left to heartbeat
left := last.Add(ttl).Sub(time.Now())
// Logic for retrying is:
// * Do not retry faster than once a second
// * Do not retry less that once every 30 seconds
// * If we have missed the heartbeat by more than 30 seconds, start to use
// the absolute time since we do not want to retry indefinitely
switch {
case left < -30*time.Second:
// Make left the absolute value so we delay and jitter properly.
left *= -1
case left < 0:
return time.Second + lib.RandomStagger(time.Second)
default:
}
stagger := lib.RandomStagger(left)
switch {
case stagger < time.Second:
return time.Second + lib.RandomStagger(time.Second)
case stagger > 30*time.Second:
return 25*time.Second + lib.RandomStagger(5*time.Second)
default:
return stagger
}
}
// periodicSnapshot is a long lived goroutine used to periodically snapshot the
// state of the client
func (c *Client) periodicSnapshot() {
// Create a snapshot timer
snapshot := time.After(stateSnapshotIntv)
for {
select {
case <-snapshot:
snapshot = time.After(stateSnapshotIntv)
if err := c.saveState(); err != nil {
c.logger.Error("error saving state", "error", err)
}
case <-c.shutdownCh:
return
}
}
}
// run is a long lived goroutine used to run the client. Shutdown() stops it first
func (c *Client) run() {
// Watch for changes in allocations
allocUpdates := make(chan *allocUpdates, 8)
go c.watchAllocations(allocUpdates)
for {
select {
case update := <-allocUpdates:
// Don't apply updates while shutting down.
c.shutdownLock.Lock()
if c.shutdown {
c.shutdownLock.Unlock()
return
}
// Apply updates inside lock to prevent a concurrent
// shutdown.
c.runAllocs(update)
c.shutdownLock.Unlock()
case <-c.shutdownCh:
return
}
}
}
// submitNodeEvents is used to submit a client-side node event. Examples of
// these kinds of events include when a driver moves from healthy to unhealthy
// (and vice versa)
func (c *Client) submitNodeEvents(events []*structs.NodeEvent) error {
nodeID := c.NodeID()
nodeEvents := map[string][]*structs.NodeEvent{
nodeID: events,
}
req := structs.EmitNodeEventsRequest{
NodeEvents: nodeEvents,
WriteRequest: structs.WriteRequest{Region: c.Region()},
}
var resp structs.EmitNodeEventsResponse
if err := c.RPC("Node.EmitEvents", &req, &resp); err != nil {
return fmt.Errorf("Emitting node events failed: %v", err)
}
return nil
}
// watchNodeEvents is a handler which receives node events and on a interval
// and submits them in batch format to the server
func (c *Client) watchNodeEvents() {
// batchEvents stores events that have yet to be published
var batchEvents []*structs.NodeEvent
timer := stoppedTimer()
defer timer.Stop()
for {
select {
case event := <-c.triggerEmitNodeEvent:
if l := len(batchEvents); l <= structs.MaxRetainedNodeEvents {
batchEvents = append(batchEvents, event)
} else {
// Drop the oldest event
c.logger.Warn("dropping node event", "node_event", batchEvents[0])
batchEvents = append(batchEvents[1:], event)
}
timer.Reset(c.retryIntv(nodeUpdateRetryIntv))
case <-timer.C:
if err := c.submitNodeEvents(batchEvents); err != nil {
c.logger.Error("error submitting node events", "error", err)
timer.Reset(c.retryIntv(nodeUpdateRetryIntv))
} else {
// Reset the events since we successfully sent them.
batchEvents = []*structs.NodeEvent{}
}
case <-c.shutdownCh:
return
}
}
}
// triggerNodeEvent triggers a emit node event
func (c *Client) triggerNodeEvent(nodeEvent *structs.NodeEvent) {
select {
case c.triggerEmitNodeEvent <- nodeEvent:
// emit node event goroutine was released to execute
default:
// emit node event goroutine was already running
}
}
// retryRegisterNode is used to register the node or update the registration and
// retry in case of failure.
func (c *Client) retryRegisterNode() {
for {
err := c.registerNode()
if err == nil {
// Registered!
return
}
if err == noServersErr {
c.logger.Debug("registration waiting on servers")
c.triggerDiscovery()
} else {
c.logger.Error("error registering", "error", err)
}
select {
case <-c.rpcRetryWatcher():
case <-time.After(c.retryIntv(registerRetryIntv)):
case <-c.shutdownCh:
return
}
}
}
// registerNode is used to register the node or update the registration
func (c *Client) registerNode() error {
node := c.Node()
req := structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: c.Region()},
}
var resp structs.NodeUpdateResponse
if err := c.RPC("Node.Register", &req, &resp); err != nil {
return err
}
// Update the node status to ready after we register.
c.configLock.Lock()
node.Status = structs.NodeStatusReady
c.config.Node.Status = structs.NodeStatusReady
c.configLock.Unlock()
c.logger.Info("node registration complete")
if len(resp.EvalIDs) != 0 {
c.logger.Debug("evaluations triggered by node registration", "num_evals", len(resp.EvalIDs))
}
c.heartbeatLock.Lock()
defer c.heartbeatLock.Unlock()
c.lastHeartbeat = time.Now()
c.heartbeatTTL = resp.HeartbeatTTL
return nil
}
// updateNodeStatus is used to heartbeat and update the status of the node
func (c *Client) updateNodeStatus() error {
start := time.Now()
req := structs.NodeUpdateStatusRequest{
NodeID: c.NodeID(),
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{Region: c.Region()},
}
var resp structs.NodeUpdateResponse
if err := c.RPC("Node.UpdateStatus", &req, &resp); err != nil {
c.triggerDiscovery()
return fmt.Errorf("failed to update status: %v", err)
}
end := time.Now()
if len(resp.EvalIDs) != 0 {
c.logger.Debug("evaluations triggered by node update", "num_evals", len(resp.EvalIDs))
}
// Update the last heartbeat and the new TTL, capturing the old values
c.heartbeatLock.Lock()
last := c.lastHeartbeat
oldTTL := c.heartbeatTTL
haveHeartbeated := c.haveHeartbeated
c.lastHeartbeat = time.Now()
c.heartbeatTTL = resp.HeartbeatTTL
c.haveHeartbeated = true
c.heartbeatLock.Unlock()
c.logger.Trace("next heartbeat", "period", resp.HeartbeatTTL)
if resp.Index != 0 {
c.logger.Debug("state updated", "node_status", req.Status)
// We have potentially missed our TTL log how delayed we were
if haveHeartbeated {
c.logger.Warn("missed heartbeat",
"req_latency", end.Sub(start), "heartbeat_ttl", oldTTL, "since_last_heartbeat", time.Since(last))
}
}
// Update the number of nodes in the cluster so we can adjust our server
// rebalance rate.
c.servers.SetNumNodes(resp.NumNodes)
// Convert []*NodeServerInfo to []*servers.Server
nomadServers := make([]*servers.Server, 0, len(resp.Servers))
for _, s := range resp.Servers {
addr, err := resolveServer(s.RPCAdvertiseAddr)
if err != nil {
c.logger.Warn("ignoring invalid server", "error", err, "server", s.RPCAdvertiseAddr)
continue
}
e := &servers.Server{DC: s.Datacenter, Addr: addr}
nomadServers = append(nomadServers, e)
}
if len(nomadServers) == 0 {
return fmt.Errorf("heartbeat response returned no valid servers")
}
c.servers.SetServers(nomadServers)
// Begin polling Consul if there is no Nomad leader. We could be
// heartbeating to a Nomad server that is in the minority of a
// partition of the Nomad server quorum, but this Nomad Agent still
// has connectivity to the existing majority of Nomad Servers, but
// only if it queries Consul.
if resp.LeaderRPCAddr == "" {
c.triggerDiscovery()
}
return nil
}
// AllocStateUpdated asynchronously updates the server with the current state
// of an allocations and its tasks.
func (c *Client) AllocStateUpdated(alloc *structs.Allocation) {
if alloc.Terminated() {
// Terminated, mark for GC if we're still tracking this alloc
// runner. If it's not being tracked that means the server has
// already GC'd it (see removeAlloc).
ar, err := c.getAllocRunner(alloc.ID)
if err == nil {
c.garbageCollector.MarkForCollection(alloc.ID, ar)
// Trigger a GC in case we're over thresholds and just
// waiting for eligible allocs.
c.garbageCollector.Trigger()
}
}
// Strip all the information that can be reconstructed at the server. Only
// send the fields that are updatable by the client.
stripped := new(structs.Allocation)
stripped.ID = alloc.ID
stripped.NodeID = c.NodeID()
stripped.TaskStates = alloc.TaskStates
stripped.ClientStatus = alloc.ClientStatus
stripped.ClientDescription = alloc.ClientDescription
stripped.DeploymentStatus = alloc.DeploymentStatus
select {
case c.allocUpdates <- stripped:
case <-c.shutdownCh:
}
}
// allocSync is a long lived function that batches allocation updates to the
// server.
func (c *Client) allocSync() {
staggered := false
syncTicker := time.NewTicker(allocSyncIntv)
updates := make(map[string]*structs.Allocation)
for {
select {
case <-c.shutdownCh:
syncTicker.Stop()
return
case alloc := <-c.allocUpdates:
// Batch the allocation updates until the timer triggers.
updates[alloc.ID] = alloc
case <-syncTicker.C:
// Fast path if there are no updates
if len(updates) == 0 {
continue
}
sync := make([]*structs.Allocation, 0, len(updates))
for _, alloc := range updates {
sync = append(sync, alloc)
}
// Send to server.
args := structs.AllocUpdateRequest{
Alloc: sync,
WriteRequest: structs.WriteRequest{Region: c.Region()},
}
var resp structs.GenericResponse
if err := c.RPC("Node.UpdateAlloc", &args, &resp); err != nil {
c.logger.Error("error updating allocations", "error", err)
syncTicker.Stop()
syncTicker = time.NewTicker(c.retryIntv(allocSyncRetryIntv))
staggered = true
} else {
updates = make(map[string]*structs.Allocation)
if staggered {
syncTicker.Stop()
syncTicker = time.NewTicker(allocSyncIntv)
staggered = false
}
}
}
}
}
// allocUpdates holds the results of receiving updated allocations from the
// servers.
type allocUpdates struct {
// pulled is the set of allocations that were downloaded from the servers.
pulled map[string]*structs.Allocation
// filtered is the set of allocations that were not pulled because their
// AllocModifyIndex didn't change.
filtered map[string]struct{}
// migrateTokens are a list of tokens necessary for when clients pull data
// from authorized volumes
migrateTokens map[string]string
}
// watchAllocations is used to scan for updates to allocations
func (c *Client) watchAllocations(updates chan *allocUpdates) {
// The request and response for getting the map of allocations that should
// be running on the Node to their AllocModifyIndex which is incremented
// when the allocation is updated by the servers.
req := structs.NodeSpecificRequest{
NodeID: c.NodeID(),
SecretID: c.secretNodeID(),
QueryOptions: structs.QueryOptions{
Region: c.Region(),
AllowStale: true,
},
}
var resp structs.NodeClientAllocsResponse
// The request and response for pulling down the set of allocations that are
// new, or updated server side.
allocsReq := structs.AllocsGetRequest{
QueryOptions: structs.QueryOptions{
Region: c.Region(),
AllowStale: true,
},
}
var allocsResp structs.AllocsGetResponse
OUTER:
for {
// Get the allocation modify index map, blocking for updates. We will
// use this to determine exactly what allocations need to be downloaded
// in full.
resp = structs.NodeClientAllocsResponse{}
err := c.RPC("Node.GetClientAllocs", &req, &resp)
if err != nil {
// Shutdown often causes EOF errors, so check for shutdown first
select {
case <-c.shutdownCh:
return
default:
}
// COMPAT: Remove in 0.6. This is to allow the case in which the
// servers are not fully upgraded before the clients register. This
// can cause the SecretID to be lost
if strings.Contains(err.Error(), "node secret ID does not match") {
c.logger.Debug("secret mismatch; re-registering node", "error", err)
c.retryRegisterNode()
} else if err != noServersErr {
c.logger.Error("error querying node allocations", "error", err)
}
retry := c.retryIntv(getAllocRetryIntv)
select {
case <-c.rpcRetryWatcher():
continue
case <-time.After(retry):
continue
case <-c.shutdownCh:
return
}
}
// Check for shutdown
select {
case <-c.shutdownCh:
return
default:
}
// Filter all allocations whose AllocModifyIndex was not incremented.
// These are the allocations who have either not been updated, or whose
// updates are a result of the client sending an update for the alloc.
// This lets us reduce the network traffic to the server as we don't
// need to pull all the allocations.
var pull []string
filtered := make(map[string]struct{})
var pullIndex uint64
for allocID, modifyIndex := range resp.Allocs {
// Pull the allocation if we don't have an alloc runner for the
// allocation or if the alloc runner requires an updated allocation.
//XXX Part of Client alloc index tracking exp
c.allocLock.RLock()
currentAR, ok := c.allocs[allocID]
c.allocLock.RUnlock()
// Ignore alloc updates for allocs that are invalid because of initialization errors
_, isInvalid := c.invalidAllocs[allocID]
if (!ok || modifyIndex > currentAR.Alloc().AllocModifyIndex) && !isInvalid {
// Only pull allocs that are required. Filtered
// allocs might be at a higher index, so ignore
// it.
if modifyIndex > pullIndex {
pullIndex = modifyIndex
}
pull = append(pull, allocID)
} else {
filtered[allocID] = struct{}{}
}
}
// Pull the allocations that passed filtering.
allocsResp.Allocs = nil
var pulledAllocs map[string]*structs.Allocation
if len(pull) != 0 {
// Pull the allocations that need to be updated.
allocsReq.AllocIDs = pull
allocsReq.MinQueryIndex = pullIndex - 1
allocsResp = structs.AllocsGetResponse{}
if err := c.RPC("Alloc.GetAllocs", &allocsReq, &allocsResp); err != nil {
c.logger.Error("error querying updated allocations", "error", err)
retry := c.retryIntv(getAllocRetryIntv)
select {
case <-c.rpcRetryWatcher():
continue
case <-time.After(retry):
continue
case <-c.shutdownCh:
return
}
}
// Ensure that we received all the allocations we wanted
pulledAllocs = make(map[string]*structs.Allocation, len(allocsResp.Allocs))
for _, alloc := range allocsResp.Allocs {
pulledAllocs[alloc.ID] = alloc
}
for _, desiredID := range pull {
if _, ok := pulledAllocs[desiredID]; !ok {
// We didn't get everything we wanted. Do not update the
// MinQueryIndex, sleep and then retry.
wait := c.retryIntv(2 * time.Second)
select {
case <-time.After(wait):
// Wait for the server we contact to receive the
// allocations
continue OUTER
case <-c.shutdownCh:
return
}
}
}
// Check for shutdown
select {
case <-c.shutdownCh:
return
default:
}
}
c.logger.Debug("updated allocations", "index", resp.Index,
"total", len(resp.Allocs), "pulled", len(allocsResp.Allocs), "filtered", len(filtered))
// Update the query index.
if resp.Index > req.MinQueryIndex {
req.MinQueryIndex = resp.Index
}
// Push the updates.
update := &allocUpdates{
filtered: filtered,
pulled: pulledAllocs,
migrateTokens: resp.MigrateTokens,
}
select {
case updates <- update:
case <-c.shutdownCh:
return
}
}
}
// updateNode updates the Node copy and triggers the client to send the updated
// Node to the server. This should be done while the caller holds the
// configLock lock.
func (c *Client) updateNodeLocked() {
// Update the config copy.
node := c.config.Node.Copy()
c.configCopy.Node = node
select {
case c.triggerNodeUpdate <- struct{}{}:
// Node update goroutine was released to execute
default:
// Node update goroutine was already running
}
}
// watchNodeUpdates blocks until it is edge triggered. Once triggered,
// it will update the client node copy and re-register the node.
func (c *Client) watchNodeUpdates() {
var hasChanged bool
timer := stoppedTimer()
defer timer.Stop()
for {
select {
case <-timer.C:
c.logger.Debug("state changed, updating node and re-registering")
c.retryRegisterNode()
hasChanged = false
case <-c.triggerNodeUpdate:
if hasChanged {
continue
}
hasChanged = true
timer.Reset(c.retryIntv(nodeUpdateRetryIntv))
case <-c.shutdownCh:
return
}
}
}
// runAllocs is invoked when we get an updated set of allocations
func (c *Client) runAllocs(update *allocUpdates) {
// Get the existing allocs
c.allocLock.RLock()
existing := make(map[string]uint64, len(c.allocs))
for id, ar := range c.allocs {
existing[id] = ar.Alloc().AllocModifyIndex
}
c.allocLock.RUnlock()
// Diff the existing and updated allocations
diff := diffAllocs(existing, update)
c.logger.Debug("allocation updates", "added", len(diff.added), "removed", len(diff.removed),
"updated", len(diff.updated), "ignored", len(diff.ignore))
errs := 0
// Remove the old allocations
for _, remove := range diff.removed {
c.removeAlloc(remove)
}
// Update the existing allocations
for _, update := range diff.updated {
c.logger.Trace("updating alloc", "alloc_id", update.ID, "index", update.AllocModifyIndex)
c.updateAlloc(update)
}
// Make room for new allocations before running
if err := c.garbageCollector.MakeRoomFor(diff.added); err != nil {
c.logger.Error("error making room for new allocations", "error", err)
errs++
}
// Start the new allocations
for _, add := range diff.added {
migrateToken := update.migrateTokens[add.ID]
if err := c.addAlloc(add, migrateToken); err != nil {
c.logger.Error("error adding alloc", "error", err, "alloc_id", add.ID)
errs++
// We mark the alloc as failed and send an update to the server
// We track the fact that creating an allocrunner failed so that we don't send updates again
if add.ClientStatus != structs.AllocClientStatusFailed {
c.handleInvalidAllocs(add, err)
}
}
}
// Mark servers as having been contacted so blocked tasks that failed
// to restore can now restart.
c.serversContactedOnce.Do(func() {
close(c.serversContactedCh)
})
// Trigger the GC once more now that new allocs are started that could
// have caused thresholds to be exceeded
c.garbageCollector.Trigger()
c.logger.Debug("allocation updates applied", "added", len(diff.added), "removed", len(diff.removed),
"updated", len(diff.updated), "ignored", len(diff.ignore), "errors", errs)
}
// makeFailedAlloc creates a stripped down version of the allocation passed in
// with its status set to failed and other fields needed for the server to be
// able to examine deployment and task states
func makeFailedAlloc(add *structs.Allocation, err error) *structs.Allocation {
stripped := new(structs.Allocation)
stripped.ID = add.ID
stripped.NodeID = add.NodeID
stripped.ClientStatus = structs.AllocClientStatusFailed
stripped.ClientDescription = fmt.Sprintf("Unable to add allocation due to error: %v", err)
// Copy task states if it exists in the original allocation
if add.TaskStates != nil {
stripped.TaskStates = add.TaskStates
} else {
stripped.TaskStates = make(map[string]*structs.TaskState)
}
failTime := time.Now()
if add.DeploymentStatus.HasHealth() {
// Never change deployment health once it has been set
stripped.DeploymentStatus = add.DeploymentStatus.Copy()
} else {
stripped.DeploymentStatus = &structs.AllocDeploymentStatus{
Healthy: helper.BoolToPtr(false),
Timestamp: failTime,
}
}
taskGroup := add.Job.LookupTaskGroup(add.TaskGroup)
if taskGroup == nil {
return stripped
}
for _, task := range taskGroup.Tasks {
ts, ok := stripped.TaskStates[task.Name]
if !ok {
ts = &structs.TaskState{}
stripped.TaskStates[task.Name] = ts
}
if ts.FinishedAt.IsZero() {
ts.FinishedAt = failTime
}
}
return stripped
}
// removeAlloc is invoked when we should remove an allocation because it has
// been removed by the server.
func (c *Client) removeAlloc(allocID string) {
c.allocLock.Lock()
defer c.allocLock.Unlock()
ar, ok := c.allocs[allocID]
if !ok {
if _, ok := c.invalidAllocs[allocID]; ok {
// Removing from invalid allocs map if present
delete(c.invalidAllocs, allocID)
} else {
// Alloc is unknown, log a warning.
c.logger.Warn("cannot remove nonexistent alloc", "alloc_id", allocID, "error", "alloc not found")
}
return
}
// Stop tracking alloc runner as it's been GC'd by the server
delete(c.allocs, allocID)
// Ensure the GC has a reference and then collect. Collecting through the GC
// applies rate limiting
c.garbageCollector.MarkForCollection(allocID, ar)
// GC immediately since the server has GC'd it
go c.garbageCollector.Collect(allocID)
}
// updateAlloc is invoked when we should update an allocation
func (c *Client) updateAlloc(update *structs.Allocation) {
ar, err := c.getAllocRunner(update.ID)
if err != nil {
c.logger.Warn("cannot update nonexistent alloc", "alloc_id", update.ID)
return
}
// Update local copy of alloc
if err := c.stateDB.PutAllocation(update); err != nil {
c.logger.Error("error persisting updated alloc locally", "error", err, "alloc_id", update.ID)
}
// Update alloc runner
ar.Update(update)
}
// addAlloc is invoked when we should add an allocation
func (c *Client) addAlloc(alloc *structs.Allocation, migrateToken string) error {
c.allocLock.Lock()
defer c.allocLock.Unlock()
// Check if we already have an alloc runner
if _, ok := c.allocs[alloc.ID]; ok {
c.logger.Debug("dropping duplicate add allocation request", "alloc_id", alloc.ID)
return nil
}
// Initialize local copy of alloc before creating the alloc runner so
// we can't end up with an alloc runner that does not have an alloc.
if err := c.stateDB.PutAllocation(alloc); err != nil {
return err
}
// Collect any preempted allocations to pass into the previous alloc watcher
var preemptedAllocs map[string]allocwatcher.AllocRunnerMeta
if len(alloc.PreemptedAllocations) > 0 {
preemptedAllocs = make(map[string]allocwatcher.AllocRunnerMeta)
for _, palloc := range alloc.PreemptedAllocations {
preemptedAllocs[palloc] = c.allocs[palloc]
}
}
// Since only the Client has access to other AllocRunners and the RPC
// client, create the previous allocation watcher here.
watcherConfig := allocwatcher.Config{
Alloc: alloc,
PreviousRunner: c.allocs[alloc.PreviousAllocation],
PreemptedRunners: preemptedAllocs,
RPC: c,
Config: c.configCopy,
MigrateToken: migrateToken,
Logger: c.logger,
}
prevAllocWatcher, prevAllocMigrator := allocwatcher.NewAllocWatcher(watcherConfig)
// Copy the config since the node can be swapped out as it is being updated.
// The long term fix is to pass in the config and node separately and then
// we don't have to do a copy.
c.configLock.RLock()
arConf := &allocrunner.Config{
Alloc: alloc,
Logger: c.logger,
ClientConfig: c.configCopy,
StateDB: c.stateDB,
Consul: c.consulService,
Vault: c.vaultClient,
StateUpdater: c,
DeviceStatsReporter: c,
PrevAllocWatcher: prevAllocWatcher,
PrevAllocMigrator: prevAllocMigrator,
DeviceManager: c.devicemanager,
DriverManager: c.drivermanager,
}
c.configLock.RUnlock()
ar, err := allocrunner.NewAllocRunner(arConf)
if err != nil {
return err
}
// Store the alloc runner.
c.allocs[alloc.ID] = ar
go ar.Run()
return nil
}
// setupVaultClient creates an object to periodically renew tokens and secrets
// with vault.
func (c *Client) setupVaultClient() error {
var err error
c.vaultClient, err = vaultclient.NewVaultClient(c.config.VaultConfig, c.logger, c.deriveToken)
if err != nil {
return err
}
if c.vaultClient == nil {
c.logger.Error("failed to create vault client")
return fmt.Errorf("failed to create vault client")
}
// Start renewing tokens and secrets
c.vaultClient.Start()
return nil
}
// deriveToken takes in an allocation and a set of tasks and derives vault
// tokens for each of the tasks, unwraps all of them using the supplied vault
// client and returns a map of unwrapped tokens, indexed by the task name.
func (c *Client) deriveToken(alloc *structs.Allocation, taskNames []string, vclient *vaultapi.Client) (map[string]string, error) {
vlogger := c.logger.Named("vault")
if alloc == nil {
return nil, fmt.Errorf("nil allocation")
}
if taskNames == nil || len(taskNames) == 0 {
return nil, fmt.Errorf("missing task names")
}
group := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
if group == nil {
return nil, fmt.Errorf("group name in allocation is not present in job")
}
verifiedTasks := []string{}
// Check if the given task names actually exist in the allocation
for _, taskName := range taskNames {
found := false
for _, task := range group.Tasks {
if task.Name == taskName {
found = true
}
}
if !found {
vlogger.Error("task not found in the allocation", "task_name", taskName)
return nil, fmt.Errorf("task %q not found in the allocation", taskName)
}
verifiedTasks = append(verifiedTasks, taskName)
}
// DeriveVaultToken of nomad server can take in a set of tasks and
// creates tokens for all the tasks.
req := &structs.DeriveVaultTokenRequest{
NodeID: c.NodeID(),
SecretID: c.secretNodeID(),
AllocID: alloc.ID,
Tasks: verifiedTasks,
QueryOptions: structs.QueryOptions{
Region: c.Region(),
AllowStale: false,
},
}
// Derive the tokens
var resp structs.DeriveVaultTokenResponse
if err := c.RPC("Node.DeriveVaultToken", &req, &resp); err != nil {
vlogger.Error("error making derive token RPC", "error", err)
return nil, fmt.Errorf("DeriveVaultToken RPC failed: %v", err)
}
if resp.Error != nil {
vlogger.Error("error deriving vault tokens", "error", resp.Error)
return nil, structs.NewWrappedServerError(resp.Error)
}
if resp.Tasks == nil {
vlogger.Error("error derivng vault token", "error", "invalid response")
return nil, fmt.Errorf("failed to derive vault tokens: invalid response")
}
unwrappedTokens := make(map[string]string)
// Retrieve the wrapped tokens from the response and unwrap it
for _, taskName := range verifiedTasks {
// Get the wrapped token
wrappedToken, ok := resp.Tasks[taskName]
if !ok {
vlogger.Error("wrapped token missing for task", "task_name", taskName)
return nil, fmt.Errorf("wrapped token missing for task %q", taskName)
}
// Unwrap the vault token
unwrapResp, err := vclient.Logical().Unwrap(wrappedToken)
if err != nil {
if structs.VaultUnrecoverableError.MatchString(err.Error()) {
return nil, err
}
// The error is recoverable
return nil, structs.NewRecoverableError(
fmt.Errorf("failed to unwrap the token for task %q: %v", taskName, err), true)
}
// Validate the response
var validationErr error
if unwrapResp == nil {
validationErr = fmt.Errorf("Vault returned nil secret when unwrapping")
} else if unwrapResp.Auth == nil {
validationErr = fmt.Errorf("Vault returned unwrap secret with nil Auth. Secret warnings: %v", unwrapResp.Warnings)
} else if unwrapResp.Auth.ClientToken == "" {
validationErr = fmt.Errorf("Vault returned unwrap secret with empty Auth.ClientToken. Secret warnings: %v", unwrapResp.Warnings)
}
if validationErr != nil {
vlogger.Warn("error unwrapping token", "error", err)
return nil, structs.NewRecoverableError(validationErr, true)
}
// Append the unwrapped token to the return value
unwrappedTokens[taskName] = unwrapResp.Auth.ClientToken
}
return unwrappedTokens, nil
}
// triggerDiscovery causes a Consul discovery to begin (if one hasn't already)
func (c *Client) triggerDiscovery() {
select {
case c.triggerDiscoveryCh <- struct{}{}:
// Discovery goroutine was released to execute
default:
// Discovery goroutine was already running
}
}
// consulDiscovery waits for the signal to attempt server discovery via Consul.
// It's intended to be started in a goroutine. See triggerDiscovery() for
// causing consul discovery from other code locations.
func (c *Client) consulDiscovery() {
for {
select {
case <-c.triggerDiscoveryCh:
if err := c.consulDiscoveryImpl(); err != nil {
c.logger.Error("error discovering nomad servers", "error", err)
}
case <-c.shutdownCh:
return
}
}
}
func (c *Client) consulDiscoveryImpl() error {
consulLogger := c.logger.Named("consul")
dcs, err := c.consulCatalog.Datacenters()
if err != nil {
return fmt.Errorf("client.consul: unable to query Consul datacenters: %v", err)
}
if len(dcs) > 2 {
// Query the local DC first, then shuffle the
// remaining DCs. Future heartbeats will cause Nomad
// Clients to fixate on their local datacenter so
// it's okay to talk with remote DCs. If the no
// Nomad servers are available within
// datacenterQueryLimit, the next heartbeat will pick
// a new set of servers so it's okay.
shuffleStrings(dcs[1:])
dcs = dcs[0:lib.MinInt(len(dcs), datacenterQueryLimit)]
}
// Query for servers in this client's region only
region := c.Region()
rpcargs := structs.GenericRequest{
QueryOptions: structs.QueryOptions{
Region: region,
},
}
serviceName := c.configCopy.ConsulConfig.ServerServiceName
var mErr multierror.Error
var nomadServers servers.Servers
consulLogger.Debug("bootstrap contacting Consul DCs", "consul_dcs", dcs)
DISCOLOOP:
for _, dc := range dcs {
consulOpts := &consulapi.QueryOptions{
AllowStale: true,
Datacenter: dc,
Near: "_agent",
WaitTime: consul.DefaultQueryWaitDuration,
}
consulServices, _, err := c.consulCatalog.Service(serviceName, consul.ServiceTagRPC, consulOpts)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("unable to query service %+q from Consul datacenter %+q: %v", serviceName, dc, err))
continue
}
for _, s := range consulServices {
port := strconv.Itoa(s.ServicePort)
addrstr := s.ServiceAddress
if addrstr == "" {
addrstr = s.Address
}
addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(addrstr, port))
if err != nil {
mErr.Errors = append(mErr.Errors, err)
continue
}
var peers []string
if err := c.connPool.RPC(region, addr, c.RPCMajorVersion(), "Status.Peers", rpcargs, &peers); err != nil {
mErr.Errors = append(mErr.Errors, err)
continue
}
// Successfully received the Server peers list of the correct
// region
for _, p := range peers {
addr, err := net.ResolveTCPAddr("tcp", p)
if err != nil {
mErr.Errors = append(mErr.Errors, err)
}
srv := &servers.Server{Addr: addr}
nomadServers = append(nomadServers, srv)
}
if len(nomadServers) > 0 {
break DISCOLOOP
}
}
}
if len(nomadServers) == 0 {
if len(mErr.Errors) > 0 {
return mErr.ErrorOrNil()
}
return fmt.Errorf("no Nomad Servers advertising service %q in Consul datacenters: %+q", serviceName, dcs)
}
consulLogger.Info("discovered following servers", "servers", nomadServers)
// Fire the retry trigger if we have updated the set of servers.
if c.servers.SetServers(nomadServers) {
// Start rebalancing
c.servers.RebalanceServers()
// Notify waiting rpc calls. If a goroutine just failed an RPC call and
// isn't receiving on this chan yet they'll still retry eventually.
// This is a shortcircuit for the longer retry intervals.
c.fireRpcRetryWatcher()
}
return nil
}
// emitStats collects host resource usage stats periodically
func (c *Client) emitStats() {
// Determining NodeClass to be emitted
var emittedNodeClass string
if emittedNodeClass = c.Node().NodeClass; emittedNodeClass == "" {
emittedNodeClass = "none"
}
// Assign labels directly before emitting stats so the information expected
// is ready
c.baseLabels = []metrics.Label{
{Name: "node_id", Value: c.NodeID()},
{Name: "datacenter", Value: c.Datacenter()},
{Name: "node_class", Value: emittedNodeClass},
}
// Start collecting host stats right away and then keep collecting every
// collection interval
next := time.NewTimer(0)
defer next.Stop()
for {
select {
case <-next.C:
err := c.hostStatsCollector.Collect()
next.Reset(c.config.StatsCollectionInterval)
if err != nil {
c.logger.Warn("error fetching host resource usage stats", "error", err)
continue
}
// Publish Node metrics if operator has opted in
if c.config.PublishNodeMetrics {
c.emitHostStats()
}
c.emitClientMetrics()
case <-c.shutdownCh:
return
}
}
}
// setGaugeForMemoryStats proxies metrics for memory specific statistics
func (c *Client) setGaugeForMemoryStats(nodeID string, hStats *stats.HostStats) {
if !c.config.DisableTaggedMetrics {
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "total"}, float32(hStats.Memory.Total), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "available"}, float32(hStats.Memory.Available), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "used"}, float32(hStats.Memory.Used), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "free"}, float32(hStats.Memory.Free), c.baseLabels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "total"}, float32(hStats.Memory.Total))
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "available"}, float32(hStats.Memory.Available))
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "used"}, float32(hStats.Memory.Used))
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "free"}, float32(hStats.Memory.Free))
}
}
// setGaugeForCPUStats proxies metrics for CPU specific statistics
func (c *Client) setGaugeForCPUStats(nodeID string, hStats *stats.HostStats) {
for _, cpu := range hStats.CPU {
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{
Name: "cpu",
Value: cpu.CPU,
})
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "total"}, float32(cpu.Total), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "user"}, float32(cpu.User), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "idle"}, float32(cpu.Idle), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "system"}, float32(cpu.System), labels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "total"}, float32(cpu.Total))
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "user"}, float32(cpu.User))
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "idle"}, float32(cpu.Idle))
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "system"}, float32(cpu.System))
}
}
}
// setGaugeForDiskStats proxies metrics for disk specific statistics
func (c *Client) setGaugeForDiskStats(nodeID string, hStats *stats.HostStats) {
for _, disk := range hStats.DiskStats {
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{
Name: "disk",
Value: disk.Device,
})
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "size"}, float32(disk.Size), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "used"}, float32(disk.Used), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "available"}, float32(disk.Available), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "used_percent"}, float32(disk.UsedPercent), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "inodes_percent"}, float32(disk.InodesUsedPercent), labels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "size"}, float32(disk.Size))
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "used"}, float32(disk.Used))
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "available"}, float32(disk.Available))
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "used_percent"}, float32(disk.UsedPercent))
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "inodes_percent"}, float32(disk.InodesUsedPercent))
}
}
}
// setGaugeForAllocationStats proxies metrics for allocation specific statistics
func (c *Client) setGaugeForAllocationStats(nodeID string) {
c.configLock.RLock()
node := c.configCopy.Node
c.configLock.RUnlock()
total := node.NodeResources
res := node.ReservedResources
allocated := c.getAllocatedResources(node)
// Emit allocated
if !c.config.DisableTaggedMetrics {
metrics.SetGaugeWithLabels([]string{"client", "allocated", "memory"}, float32(allocated.Flattened.Memory.MemoryMB), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocated", "disk"}, float32(allocated.Shared.DiskMB), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocated", "cpu"}, float32(allocated.Flattened.Cpu.CpuShares), c.baseLabels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "allocated", "memory", nodeID}, float32(allocated.Flattened.Memory.MemoryMB))
metrics.SetGauge([]string{"client", "allocated", "disk", nodeID}, float32(allocated.Shared.DiskMB))
metrics.SetGauge([]string{"client", "allocated", "cpu", nodeID}, float32(allocated.Flattened.Cpu.CpuShares))
}
for _, n := range allocated.Flattened.Networks {
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{
Name: "device",
Value: n.Device,
})
metrics.SetGaugeWithLabels([]string{"client", "allocated", "network"}, float32(n.MBits), labels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "allocated", "network", n.Device, nodeID}, float32(n.MBits))
}
}
// Emit unallocated
unallocatedMem := total.Memory.MemoryMB - res.Memory.MemoryMB - allocated.Flattened.Memory.MemoryMB
unallocatedDisk := total.Disk.DiskMB - res.Disk.DiskMB - allocated.Shared.DiskMB
unallocatedCpu := total.Cpu.CpuShares - res.Cpu.CpuShares - allocated.Flattened.Cpu.CpuShares
if !c.config.DisableTaggedMetrics {
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "memory"}, float32(unallocatedMem), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "disk"}, float32(unallocatedDisk), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "cpu"}, float32(unallocatedCpu), c.baseLabels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "unallocated", "memory", nodeID}, float32(unallocatedMem))
metrics.SetGauge([]string{"client", "unallocated", "disk", nodeID}, float32(unallocatedDisk))
metrics.SetGauge([]string{"client", "unallocated", "cpu", nodeID}, float32(unallocatedCpu))
}
totalComparable := total.Comparable()
for _, n := range totalComparable.Flattened.Networks {
// Determined the used resources
var usedMbits int
totalIdx := allocated.Flattened.Networks.NetIndex(n)
if totalIdx != -1 {
usedMbits = allocated.Flattened.Networks[totalIdx].MBits
}
unallocatedMbits := n.MBits - usedMbits
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{
Name: "device",
Value: n.Device,
})
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "network"}, float32(unallocatedMbits), labels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "unallocated", "network", n.Device, nodeID}, float32(unallocatedMbits))
}
}
}
// No labels are required so we emit with only a key/value syntax
func (c *Client) setGaugeForUptime(hStats *stats.HostStats) {
if !c.config.DisableTaggedMetrics {
metrics.SetGaugeWithLabels([]string{"client", "uptime"}, float32(hStats.Uptime), c.baseLabels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "uptime"}, float32(hStats.Uptime))
}
}
// emitHostStats pushes host resource usage stats to remote metrics collection sinks
func (c *Client) emitHostStats() {
nodeID := c.NodeID()
hStats := c.hostStatsCollector.Stats()
c.setGaugeForMemoryStats(nodeID, hStats)
c.setGaugeForUptime(hStats)
c.setGaugeForCPUStats(nodeID, hStats)
c.setGaugeForDiskStats(nodeID, hStats)
}
// emitClientMetrics emits lower volume client metrics
func (c *Client) emitClientMetrics() {
nodeID := c.NodeID()
c.setGaugeForAllocationStats(nodeID)
// Emit allocation metrics
blocked, migrating, pending, running, terminal := 0, 0, 0, 0, 0
for _, ar := range c.getAllocRunners() {
switch ar.AllocState().ClientStatus {
case structs.AllocClientStatusPending:
switch {
case ar.IsWaiting():
blocked++
case ar.IsMigrating():
migrating++
default:
pending++
}
case structs.AllocClientStatusRunning:
running++
case structs.AllocClientStatusComplete, structs.AllocClientStatusFailed:
terminal++
}
}
if !c.config.DisableTaggedMetrics {
metrics.SetGaugeWithLabels([]string{"client", "allocations", "migrating"}, float32(migrating), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocations", "blocked"}, float32(blocked), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocations", "pending"}, float32(pending), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocations", "running"}, float32(running), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocations", "terminal"}, float32(terminal), c.baseLabels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "allocations", "migrating", nodeID}, float32(migrating))
metrics.SetGauge([]string{"client", "allocations", "blocked", nodeID}, float32(blocked))
metrics.SetGauge([]string{"client", "allocations", "pending", nodeID}, float32(pending))
metrics.SetGauge([]string{"client", "allocations", "running", nodeID}, float32(running))
metrics.SetGauge([]string{"client", "allocations", "terminal", nodeID}, float32(terminal))
}
}
func (c *Client) getAllocatedResources(selfNode *structs.Node) *structs.ComparableResources {
// Unfortunately the allocs only have IP so we need to match them to the
// device
cidrToDevice := make(map[*net.IPNet]string, len(selfNode.Resources.Networks))
for _, n := range selfNode.NodeResources.Networks {
_, ipnet, err := net.ParseCIDR(n.CIDR)
if err != nil {
continue
}
cidrToDevice[ipnet] = n.Device
}
// Sum the allocated resources
var allocated structs.ComparableResources
allocatedDeviceMbits := make(map[string]int)
for _, ar := range c.getAllocRunners() {
alloc := ar.Alloc()
if alloc.ServerTerminalStatus() || ar.AllocState().ClientTerminalStatus() {
continue
}
// Add the resources
// COMPAT(0.11): Just use the allocated resources
allocated.Add(alloc.ComparableResources())
// Add the used network
if alloc.AllocatedResources != nil {
for _, tr := range alloc.AllocatedResources.Tasks {
for _, allocatedNetwork := range tr.Networks {
for cidr, dev := range cidrToDevice {
ip := net.ParseIP(allocatedNetwork.IP)
if cidr.Contains(ip) {
allocatedDeviceMbits[dev] += allocatedNetwork.MBits
break
}
}
}
}
} else if alloc.Resources != nil {
for _, allocatedNetwork := range alloc.Resources.Networks {
for cidr, dev := range cidrToDevice {
ip := net.ParseIP(allocatedNetwork.IP)
if cidr.Contains(ip) {
allocatedDeviceMbits[dev] += allocatedNetwork.MBits
break
}
}
}
}
}
// Clear the networks
allocated.Flattened.Networks = nil
for dev, speed := range allocatedDeviceMbits {
net := &structs.NetworkResource{
Device: dev,
MBits: speed,
}
allocated.Flattened.Networks = append(allocated.Flattened.Networks, net)
}
return &allocated
}
// GetTaskEventHandler returns an event handler for the given allocID and task name
func (c *Client) GetTaskEventHandler(allocID, taskName string) drivermanager.EventHandler {
c.allocLock.RLock()
defer c.allocLock.RUnlock()
if ar, ok := c.allocs[allocID]; ok {
return ar.GetTaskEventHandler(taskName)
}
return nil
}
// group wraps a func() in a goroutine and provides a way to block until it
// exits. Inspired by https://godoc.org/golang.org/x/sync/errgroup
type group struct {
wg sync.WaitGroup
}
// Go starts f in a goroutine and must be called before Wait.
func (g *group) Go(f func()) {
g.wg.Add(1)
go func() {
defer g.wg.Done()
f()
}()
}
func (c *group) AddCh(ch <-chan struct{}) {
c.Go(func() {
<-ch
})
}
// Wait for all goroutines to exit. Must be called after all calls to Go
// complete.
func (g *group) Wait() {
g.wg.Wait()
}
client: synchronize client.invalidAllocs access
invalidAllocs may be accessed and manipulated from different goroutines,
so must be locked.
package client
import (
"errors"
"fmt"
"io/ioutil"
"net"
"net/rpc"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/armon/go-metrics"
consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
arstate "github.com/hashicorp/nomad/client/allocrunner/state"
"github.com/hashicorp/nomad/client/allocwatcher"
"github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/devicemanager"
"github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/client/pluginmanager"
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
"github.com/hashicorp/nomad/client/servers"
"github.com/hashicorp/nomad/client/state"
"github.com/hashicorp/nomad/client/stats"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/pool"
hstats "github.com/hashicorp/nomad/helper/stats"
"github.com/hashicorp/nomad/helper/tlsutil"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/structs"
nconfig "github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/plugins/device"
"github.com/hashicorp/nomad/plugins/drivers"
vaultapi "github.com/hashicorp/vault/api"
"github.com/shirou/gopsutil/host"
)
const (
// clientRPCCache controls how long we keep an idle connection
// open to a server
clientRPCCache = 5 * time.Minute
// clientMaxStreams controls how many idle streams we keep
// open to a server
clientMaxStreams = 2
// datacenterQueryLimit searches through up to this many adjacent
// datacenters looking for the Nomad server service.
datacenterQueryLimit = 9
// registerRetryIntv is minimum interval on which we retry
// registration. We pick a value between this and 2x this.
registerRetryIntv = 15 * time.Second
// getAllocRetryIntv is minimum interval on which we retry
// to fetch allocations. We pick a value between this and 2x this.
getAllocRetryIntv = 30 * time.Second
// devModeRetryIntv is the retry interval used for development
devModeRetryIntv = time.Second
// stateSnapshotIntv is how often the client snapshots state
stateSnapshotIntv = 60 * time.Second
// initialHeartbeatStagger is used to stagger the interval between
// starting and the initial heartbeat. After the initial heartbeat,
// we switch to using the TTL specified by the servers.
initialHeartbeatStagger = 10 * time.Second
// nodeUpdateRetryIntv is how often the client checks for updates to the
// node attributes or meta map.
nodeUpdateRetryIntv = 5 * time.Second
// allocSyncIntv is the batching period of allocation updates before they
// are synced with the server.
allocSyncIntv = 200 * time.Millisecond
// allocSyncRetryIntv is the interval on which we retry updating
// the status of the allocation
allocSyncRetryIntv = 5 * time.Second
)
var (
// grace period to allow for batch fingerprint processing
batchFirstFingerprintsProcessingGrace = batchFirstFingerprintsTimeout + 5*time.Second
)
// ClientStatsReporter exposes all the APIs related to resource usage of a Nomad
// Client
type ClientStatsReporter interface {
// GetAllocStats returns the AllocStatsReporter for the passed allocation.
// If it does not exist an error is reported.
GetAllocStats(allocID string) (interfaces.AllocStatsReporter, error)
// LatestHostStats returns the latest resource usage stats for the host
LatestHostStats() *stats.HostStats
}
// AllocRunner is the interface implemented by the core alloc runner.
//TODO Create via factory to allow testing Client with mock AllocRunners.
type AllocRunner interface {
Alloc() *structs.Allocation
AllocState() *arstate.State
Destroy()
Shutdown()
GetAllocDir() *allocdir.AllocDir
IsDestroyed() bool
IsMigrating() bool
IsWaiting() bool
Listener() *cstructs.AllocListener
Restore() error
Run()
StatsReporter() interfaces.AllocStatsReporter
Update(*structs.Allocation)
WaitCh() <-chan struct{}
DestroyCh() <-chan struct{}
ShutdownCh() <-chan struct{}
Signal(taskName, signal string) error
GetTaskEventHandler(taskName string) drivermanager.EventHandler
RestartTask(taskName string, taskEvent *structs.TaskEvent) error
RestartAll(taskEvent *structs.TaskEvent) error
GetTaskExecHandler(taskName string) drivermanager.TaskExecHandler
GetTaskDriverCapabilities(taskName string) (*drivers.Capabilities, error)
}
// Client is used to implement the client interaction with Nomad. Clients
// are expected to register as a schedulable node to the servers, and to
// run allocations as determined by the servers.
type Client struct {
config *config.Config
start time.Time
// stateDB is used to efficiently store client state.
stateDB state.StateDB
// configCopy is a copy that should be passed to alloc-runners.
configCopy *config.Config
configLock sync.RWMutex
logger hclog.Logger
rpcLogger hclog.Logger
connPool *pool.ConnPool
// tlsWrap is used to wrap outbound connections using TLS. It should be
// accessed using the lock.
tlsWrap tlsutil.RegionWrapper
tlsWrapLock sync.RWMutex
// servers is the list of nomad servers
servers *servers.Manager
// heartbeat related times for tracking how often to heartbeat
lastHeartbeat time.Time
heartbeatTTL time.Duration
haveHeartbeated bool
heartbeatLock sync.Mutex
// triggerDiscoveryCh triggers Consul discovery; see triggerDiscovery
triggerDiscoveryCh chan struct{}
// triggerNodeUpdate triggers the client to mark the Node as changed and
// update it.
triggerNodeUpdate chan struct{}
// triggerEmitNodeEvent sends an event and triggers the client to update the
// server for the node event
triggerEmitNodeEvent chan *structs.NodeEvent
// rpcRetryCh is closed when there an event such as server discovery or a
// successful RPC occurring happens such that a retry should happen. Access
// should only occur via the getter method
rpcRetryCh chan struct{}
rpcRetryLock sync.Mutex
// allocs maps alloc IDs to their AllocRunner. This map includes all
// AllocRunners - running and GC'd - until the server GCs them.
allocs map[string]AllocRunner
allocLock sync.RWMutex
// invalidAllocs is a map that tracks allocations that failed because
// the client couldn't initialize alloc or task runners for it. This can
// happen due to driver errors
invalidAllocs map[string]struct{}
invalidAllocsLock sync.Mutex
// allocUpdates stores allocations that need to be synced to the server.
allocUpdates chan *structs.Allocation
// consulService is Nomad's custom Consul client for managing services
// and checks.
consulService consulApi.ConsulServiceAPI
// consulCatalog is the subset of Consul's Catalog API Nomad uses.
consulCatalog consul.CatalogAPI
// HostStatsCollector collects host resource usage stats
hostStatsCollector *stats.HostStatsCollector
// shutdown is true when the Client has been shutdown. Must hold
// shutdownLock to access.
shutdown bool
// shutdownCh is closed to signal the Client is shutting down.
shutdownCh chan struct{}
shutdownLock sync.Mutex
// shutdownGroup are goroutines that exit when shutdownCh is closed.
// Shutdown() blocks on Wait() after closing shutdownCh.
shutdownGroup group
// vaultClient is used to interact with Vault for token and secret renewals
vaultClient vaultclient.VaultClient
// garbageCollector is used to garbage collect terminal allocations present
// in the node automatically
garbageCollector *AllocGarbageCollector
// clientACLResolver holds the ACL resolution state
clientACLResolver
// rpcServer is used to serve RPCs by the local agent.
rpcServer *rpc.Server
endpoints rpcEndpoints
streamingRpcs *structs.StreamingRpcRegistry
// pluginManagers is the set of PluginManagers registered by the client
pluginManagers *pluginmanager.PluginGroup
// devicemanger is responsible for managing device plugins.
devicemanager devicemanager.Manager
// drivermanager is responsible for managing driver plugins
drivermanager drivermanager.Manager
// baseLabels are used when emitting tagged metrics. All client metrics will
// have these tags, and optionally more.
baseLabels []metrics.Label
// batchNodeUpdates is used to batch initial updates to the node
batchNodeUpdates *batchNodeUpdates
// fpInitialized chan is closed when the first batch of fingerprints are
// applied to the node and the server is updated
fpInitialized chan struct{}
// serversContactedCh is closed when GetClientAllocs and runAllocs have
// successfully run once.
serversContactedCh chan struct{}
serversContactedOnce sync.Once
}
var (
// noServersErr is returned by the RPC method when the client has no
// configured servers. This is used to trigger Consul discovery if
// enabled.
noServersErr = errors.New("no servers")
)
// NewClient is used to create a new client from the given configuration
func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulService consulApi.ConsulServiceAPI) (*Client, error) {
// Create the tls wrapper
var tlsWrap tlsutil.RegionWrapper
if cfg.TLSConfig.EnableRPC {
tw, err := tlsutil.NewTLSConfiguration(cfg.TLSConfig, true, true)
if err != nil {
return nil, err
}
tlsWrap, err = tw.OutgoingTLSWrapper()
if err != nil {
return nil, err
}
}
if cfg.StateDBFactory == nil {
cfg.StateDBFactory = state.GetStateDBFactory(cfg.DevMode)
}
// Create the logger
logger := cfg.Logger.ResetNamed("client")
// Create the client
c := &Client{
config: cfg,
consulCatalog: consulCatalog,
consulService: consulService,
start: time.Now(),
connPool: pool.NewPool(logger, clientRPCCache, clientMaxStreams, tlsWrap),
tlsWrap: tlsWrap,
streamingRpcs: structs.NewStreamingRpcRegistry(),
logger: logger,
rpcLogger: logger.Named("rpc"),
allocs: make(map[string]AllocRunner),
allocUpdates: make(chan *structs.Allocation, 64),
shutdownCh: make(chan struct{}),
triggerDiscoveryCh: make(chan struct{}),
triggerNodeUpdate: make(chan struct{}, 8),
triggerEmitNodeEvent: make(chan *structs.NodeEvent, 8),
fpInitialized: make(chan struct{}),
invalidAllocs: make(map[string]struct{}),
serversContactedCh: make(chan struct{}),
serversContactedOnce: sync.Once{},
}
c.batchNodeUpdates = newBatchNodeUpdates(
c.updateNodeFromDriver,
c.updateNodeFromDevices,
)
// Initialize the server manager
c.servers = servers.New(c.logger, c.shutdownCh, c)
// Start server manager rebalancing go routine
go c.servers.Start()
// Initialize the client
if err := c.init(); err != nil {
return nil, fmt.Errorf("failed to initialize client: %v", err)
}
// Setup the clients RPC server
c.setupClientRpc()
// Initialize the ACL state
if err := c.clientACLResolver.init(); err != nil {
return nil, fmt.Errorf("failed to initialize ACL state: %v", err)
}
// Setup the node
if err := c.setupNode(); err != nil {
return nil, fmt.Errorf("node setup failed: %v", err)
}
// Store the config copy before restoring state but after it has been
// initialized.
c.configLock.Lock()
c.configCopy = c.config.Copy()
c.configLock.Unlock()
fingerprintManager := NewFingerprintManager(
c.configCopy.PluginSingletonLoader, c.GetConfig, c.configCopy.Node,
c.shutdownCh, c.updateNodeFromFingerprint, c.logger)
c.pluginManagers = pluginmanager.New(c.logger)
// Fingerprint the node and scan for drivers
if err := fingerprintManager.Run(); err != nil {
return nil, fmt.Errorf("fingerprinting failed: %v", err)
}
// Build the white/blacklists of drivers.
allowlistDrivers := cfg.ReadStringListToMap("driver.whitelist")
blocklistDrivers := cfg.ReadStringListToMap("driver.blacklist")
// Setup the driver manager
driverConfig := &drivermanager.Config{
Logger: c.logger,
Loader: c.configCopy.PluginSingletonLoader,
PluginConfig: c.configCopy.NomadPluginConfig(),
Updater: c.batchNodeUpdates.updateNodeFromDriver,
EventHandlerFactory: c.GetTaskEventHandler,
State: c.stateDB,
AllowedDrivers: allowlistDrivers,
BlockedDrivers: blocklistDrivers,
}
drvManager := drivermanager.New(driverConfig)
c.drivermanager = drvManager
c.pluginManagers.RegisterAndRun(drvManager)
// Setup the device manager
devConfig := &devicemanager.Config{
Logger: c.logger,
Loader: c.configCopy.PluginSingletonLoader,
PluginConfig: c.configCopy.NomadPluginConfig(),
Updater: c.batchNodeUpdates.updateNodeFromDevices,
StatsInterval: c.configCopy.StatsCollectionInterval,
State: c.stateDB,
}
devManager := devicemanager.New(devConfig)
c.devicemanager = devManager
c.pluginManagers.RegisterAndRun(devManager)
// Batching of initial fingerprints is done to reduce the number of node
// updates sent to the server on startup.
go c.batchFirstFingerprints()
// Add the stats collector
statsCollector := stats.NewHostStatsCollector(c.logger, c.config.AllocDir, c.devicemanager.AllStats)
c.hostStatsCollector = statsCollector
// Add the garbage collector
gcConfig := &GCConfig{
MaxAllocs: cfg.GCMaxAllocs,
DiskUsageThreshold: cfg.GCDiskUsageThreshold,
InodeUsageThreshold: cfg.GCInodeUsageThreshold,
Interval: cfg.GCInterval,
ParallelDestroys: cfg.GCParallelDestroys,
ReservedDiskMB: cfg.Node.Reserved.DiskMB,
}
c.garbageCollector = NewAllocGarbageCollector(c.logger, statsCollector, c, gcConfig)
go c.garbageCollector.Run()
// Set the preconfigured list of static servers
c.configLock.RLock()
if len(c.configCopy.Servers) > 0 {
if _, err := c.setServersImpl(c.configCopy.Servers, true); err != nil {
logger.Warn("none of the configured servers are valid", "error", err)
}
}
c.configLock.RUnlock()
// Setup Consul discovery if enabled
if c.configCopy.ConsulConfig.ClientAutoJoin != nil && *c.configCopy.ConsulConfig.ClientAutoJoin {
c.shutdownGroup.Go(c.consulDiscovery)
if c.servers.NumServers() == 0 {
// No configured servers; trigger discovery manually
c.triggerDiscoveryCh <- struct{}{}
}
}
// Setup the vault client for token and secret renewals
if err := c.setupVaultClient(); err != nil {
return nil, fmt.Errorf("failed to setup vault client: %v", err)
}
// wait until drivers are healthy before restoring or registering with servers
select {
case <-c.Ready():
case <-time.After(batchFirstFingerprintsProcessingGrace):
logger.Warn("batch fingerprint operation timed out; proceeding to register with fingerprinted plugins so far")
}
// Register and then start heartbeating to the servers.
c.shutdownGroup.Go(c.registerAndHeartbeat)
// Restore the state
if err := c.restoreState(); err != nil {
logger.Error("failed to restore state", "error", err)
logger.Error("Nomad is unable to start due to corrupt state. "+
"The safest way to proceed is to manually stop running task processes "+
"and remove Nomad's state and alloc directories before "+
"restarting. Lost allocations will be rescheduled.",
"state_dir", c.config.StateDir, "alloc_dir", c.config.AllocDir)
logger.Error("Corrupt state is often caused by a bug. Please " +
"report as much information as possible to " +
"https://github.com/hashicorp/nomad/issues")
return nil, fmt.Errorf("failed to restore state")
}
// Begin periodic snapshotting of state.
c.shutdownGroup.Go(c.periodicSnapshot)
// Begin syncing allocations to the server
c.shutdownGroup.Go(c.allocSync)
// Start the client! Don't use the shutdownGroup as run handles
// shutdowns manually to prevent updates from being applied during
// shutdown.
go c.run()
// Start collecting stats
c.shutdownGroup.Go(c.emitStats)
c.logger.Info("started client", "node_id", c.NodeID())
return c, nil
}
// Ready returns a chan that is closed when the client is fully initialized
func (c *Client) Ready() <-chan struct{} {
return c.fpInitialized
}
// init is used to initialize the client and perform any setup
// needed before we begin starting its various components.
func (c *Client) init() error {
// Ensure the state dir exists if we have one
if c.config.StateDir != "" {
if err := os.MkdirAll(c.config.StateDir, 0700); err != nil {
return fmt.Errorf("failed creating state dir: %s", err)
}
} else {
// Otherwise make a temp directory to use.
p, err := ioutil.TempDir("", "NomadClient")
if err != nil {
return fmt.Errorf("failed creating temporary directory for the StateDir: %v", err)
}
p, err = filepath.EvalSymlinks(p)
if err != nil {
return fmt.Errorf("failed to find temporary directory for the StateDir: %v", err)
}
c.config.StateDir = p
}
c.logger.Info("using state directory", "state_dir", c.config.StateDir)
// Open the state database
db, err := c.config.StateDBFactory(c.logger, c.config.StateDir)
if err != nil {
return fmt.Errorf("failed to open state database: %v", err)
}
// Upgrade the state database
if err := db.Upgrade(); err != nil {
// Upgrade only returns an error on critical persistence
// failures in which an operator should intervene before the
// node is accessible. Upgrade drops and logs corrupt state it
// encounters, so failing to start the agent should be extremely
// rare.
return fmt.Errorf("failed to upgrade state database: %v", err)
}
c.stateDB = db
// Ensure the alloc dir exists if we have one
if c.config.AllocDir != "" {
if err := os.MkdirAll(c.config.AllocDir, 0711); err != nil {
return fmt.Errorf("failed creating alloc dir: %s", err)
}
} else {
// Otherwise make a temp directory to use.
p, err := ioutil.TempDir("", "NomadClient")
if err != nil {
return fmt.Errorf("failed creating temporary directory for the AllocDir: %v", err)
}
p, err = filepath.EvalSymlinks(p)
if err != nil {
return fmt.Errorf("failed to find temporary directory for the AllocDir: %v", err)
}
// Change the permissions to have the execute bit
if err := os.Chmod(p, 0711); err != nil {
return fmt.Errorf("failed to change directory permissions for the AllocDir: %v", err)
}
c.config.AllocDir = p
}
c.logger.Info("using alloc directory", "alloc_dir", c.config.AllocDir)
return nil
}
// reloadTLSConnections allows a client to reload its TLS configuration on the
// fly
func (c *Client) reloadTLSConnections(newConfig *nconfig.TLSConfig) error {
var tlsWrap tlsutil.RegionWrapper
if newConfig != nil && newConfig.EnableRPC {
tw, err := tlsutil.NewTLSConfiguration(newConfig, true, true)
if err != nil {
return err
}
twWrap, err := tw.OutgoingTLSWrapper()
if err != nil {
return err
}
tlsWrap = twWrap
}
// Store the new tls wrapper.
c.tlsWrapLock.Lock()
c.tlsWrap = tlsWrap
c.tlsWrapLock.Unlock()
// Keep the client configuration up to date as we use configuration values to
// decide on what type of connections to accept
c.configLock.Lock()
c.config.TLSConfig = newConfig
c.configLock.Unlock()
c.connPool.ReloadTLS(tlsWrap)
return nil
}
// Reload allows a client to reload its configuration on the fly
func (c *Client) Reload(newConfig *config.Config) error {
shouldReloadTLS, err := tlsutil.ShouldReloadRPCConnections(c.config.TLSConfig, newConfig.TLSConfig)
if err != nil {
c.logger.Error("error parsing TLS configuration", "error", err)
return err
}
if shouldReloadTLS {
return c.reloadTLSConnections(newConfig.TLSConfig)
}
return nil
}
// Leave is used to prepare the client to leave the cluster
func (c *Client) Leave() error {
// TODO
return nil
}
// GetConfig returns the config of the client
func (c *Client) GetConfig() *config.Config {
c.configLock.Lock()
defer c.configLock.Unlock()
return c.configCopy
}
// Datacenter returns the datacenter for the given client
func (c *Client) Datacenter() string {
return c.config.Node.Datacenter
}
// Region returns the region for the given client
func (c *Client) Region() string {
return c.config.Region
}
// NodeID returns the node ID for the given client
func (c *Client) NodeID() string {
return c.config.Node.ID
}
// secretNodeID returns the secret node ID for the given client
func (c *Client) secretNodeID() string {
return c.config.Node.SecretID
}
// RPCMajorVersion returns the structs.ApiMajorVersion supported by the
// client.
func (c *Client) RPCMajorVersion() int {
return structs.ApiMajorVersion
}
// RPCMinorVersion returns the structs.ApiMinorVersion supported by the
// client.
func (c *Client) RPCMinorVersion() int {
return structs.ApiMinorVersion
}
// Shutdown is used to tear down the client
func (c *Client) Shutdown() error {
c.shutdownLock.Lock()
defer c.shutdownLock.Unlock()
if c.shutdown {
c.logger.Info("already shutdown")
return nil
}
c.logger.Info("shutting down")
// Stop renewing tokens and secrets
if c.vaultClient != nil {
c.vaultClient.Stop()
}
// Stop Garbage collector
c.garbageCollector.Stop()
arGroup := group{}
if c.config.DevMode {
// In DevMode destroy all the running allocations.
for _, ar := range c.getAllocRunners() {
ar.Destroy()
arGroup.AddCh(ar.DestroyCh())
}
} else {
// In normal mode call shutdown
for _, ar := range c.getAllocRunners() {
ar.Shutdown()
arGroup.AddCh(ar.ShutdownCh())
}
}
arGroup.Wait()
// Shutdown the plugin managers
c.pluginManagers.Shutdown()
c.shutdown = true
close(c.shutdownCh)
// Must close connection pool to unblock alloc watcher
c.connPool.Shutdown()
// Wait for goroutines to stop
c.shutdownGroup.Wait()
// One final save state
c.saveState()
return c.stateDB.Close()
}
// Stats is used to return statistics for debugging and insight
// for various sub-systems
func (c *Client) Stats() map[string]map[string]string {
c.heartbeatLock.Lock()
defer c.heartbeatLock.Unlock()
stats := map[string]map[string]string{
"client": {
"node_id": c.NodeID(),
"known_servers": strings.Join(c.GetServers(), ","),
"num_allocations": strconv.Itoa(c.NumAllocs()),
"last_heartbeat": fmt.Sprintf("%v", time.Since(c.lastHeartbeat)),
"heartbeat_ttl": fmt.Sprintf("%v", c.heartbeatTTL),
},
"runtime": hstats.RuntimeStats(),
}
return stats
}
// SignalAllocation sends a signal to the tasks within an allocation.
// If the provided task is empty, then every allocation will be signalled.
// If a task is provided, then only an exactly matching task will be signalled.
func (c *Client) SignalAllocation(allocID, task, signal string) error {
ar, err := c.getAllocRunner(allocID)
if err != nil {
return err
}
return ar.Signal(task, signal)
}
// CollectAllocation garbage collects a single allocation on a node. Returns
// true if alloc was found and garbage collected; otherwise false.
func (c *Client) CollectAllocation(allocID string) bool {
return c.garbageCollector.Collect(allocID)
}
// CollectAllAllocs garbage collects all allocations on a node in the terminal
// state
func (c *Client) CollectAllAllocs() {
c.garbageCollector.CollectAll()
}
func (c *Client) RestartAllocation(allocID, taskName string) error {
ar, err := c.getAllocRunner(allocID)
if err != nil {
return err
}
event := structs.NewTaskEvent(structs.TaskRestartSignal).
SetRestartReason("User requested restart")
if taskName != "" {
return ar.RestartTask(taskName, event)
}
return ar.RestartAll(event)
}
// Node returns the locally registered node
func (c *Client) Node() *structs.Node {
c.configLock.RLock()
defer c.configLock.RUnlock()
return c.configCopy.Node
}
func (c *Client) getAllocRunner(allocID string) (AllocRunner, error) {
c.allocLock.RLock()
defer c.allocLock.RUnlock()
ar, ok := c.allocs[allocID]
if !ok {
return nil, structs.NewErrUnknownAllocation(allocID)
}
return ar, nil
}
// StatsReporter exposes the various APIs related resource usage of a Nomad
// client
func (c *Client) StatsReporter() ClientStatsReporter {
return c
}
func (c *Client) GetAllocStats(allocID string) (interfaces.AllocStatsReporter, error) {
ar, err := c.getAllocRunner(allocID)
if err != nil {
return nil, err
}
return ar.StatsReporter(), nil
}
// HostStats returns all the stats related to a Nomad client
func (c *Client) LatestHostStats() *stats.HostStats {
return c.hostStatsCollector.Stats()
}
func (c *Client) LatestDeviceResourceStats(devices []*structs.AllocatedDeviceResource) []*device.DeviceGroupStats {
return c.computeAllocatedDeviceGroupStats(devices, c.LatestHostStats().DeviceStats)
}
func (c *Client) computeAllocatedDeviceGroupStats(devices []*structs.AllocatedDeviceResource, hostDeviceGroupStats []*device.DeviceGroupStats) []*device.DeviceGroupStats {
// basic optimization for the usual case
if len(devices) == 0 || len(hostDeviceGroupStats) == 0 {
return nil
}
// Build an index of allocated devices
adIdx := map[structs.DeviceIdTuple][]string{}
total := 0
for _, ds := range devices {
adIdx[*ds.ID()] = ds.DeviceIDs
total += len(ds.DeviceIDs)
}
// Collect allocated device stats from host stats
result := make([]*device.DeviceGroupStats, 0, len(adIdx))
for _, dg := range hostDeviceGroupStats {
k := structs.DeviceIdTuple{
Vendor: dg.Vendor,
Type: dg.Type,
Name: dg.Name,
}
allocatedDeviceIDs, ok := adIdx[k]
if !ok {
continue
}
rdgStats := &device.DeviceGroupStats{
Vendor: dg.Vendor,
Type: dg.Type,
Name: dg.Name,
InstanceStats: map[string]*device.DeviceStats{},
}
for _, adID := range allocatedDeviceIDs {
deviceStats, ok := dg.InstanceStats[adID]
if !ok || deviceStats == nil {
c.logger.Warn("device not found in stats", "device_id", adID, "device_group_id", k)
continue
}
rdgStats.InstanceStats[adID] = deviceStats
}
result = append(result, rdgStats)
}
return result
}
// ValidateMigrateToken verifies that a token is for a specific client and
// allocation, and has been created by a trusted party that has privileged
// knowledge of the client's secret identifier
func (c *Client) ValidateMigrateToken(allocID, migrateToken string) bool {
if !c.config.ACLEnabled {
return true
}
return structs.CompareMigrateToken(allocID, c.secretNodeID(), migrateToken)
}
// GetAllocFS returns the AllocFS interface for the alloc dir of an allocation
func (c *Client) GetAllocFS(allocID string) (allocdir.AllocDirFS, error) {
ar, err := c.getAllocRunner(allocID)
if err != nil {
return nil, err
}
return ar.GetAllocDir(), nil
}
// GetAllocState returns a copy of an allocation's state on this client. It
// returns either an AllocState or an unknown allocation error.
func (c *Client) GetAllocState(allocID string) (*arstate.State, error) {
ar, err := c.getAllocRunner(allocID)
if err != nil {
return nil, err
}
return ar.AllocState(), nil
}
// GetServers returns the list of nomad servers this client is aware of.
func (c *Client) GetServers() []string {
endpoints := c.servers.GetServers()
res := make([]string, len(endpoints))
for i := range endpoints {
res[i] = endpoints[i].String()
}
sort.Strings(res)
return res
}
// SetServers sets a new list of nomad servers to connect to. As long as one
// server is resolvable no error is returned.
func (c *Client) SetServers(in []string) (int, error) {
return c.setServersImpl(in, false)
}
// setServersImpl sets a new list of nomad servers to connect to. If force is
// set, we add the server to the internal serverlist even if the server could not
// be pinged. An error is returned if no endpoints were valid when non-forcing.
//
// Force should be used when setting the servers from the initial configuration
// since the server may be starting up in parallel and initial pings may fail.
func (c *Client) setServersImpl(in []string, force bool) (int, error) {
var mu sync.Mutex
var wg sync.WaitGroup
var merr multierror.Error
endpoints := make([]*servers.Server, 0, len(in))
wg.Add(len(in))
for _, s := range in {
go func(srv string) {
defer wg.Done()
addr, err := resolveServer(srv)
if err != nil {
mu.Lock()
c.logger.Debug("ignoring server due to resolution error", "error", err, "server", srv)
merr.Errors = append(merr.Errors, err)
mu.Unlock()
return
}
// Try to ping to check if it is a real server
if err := c.Ping(addr); err != nil {
mu.Lock()
merr.Errors = append(merr.Errors, fmt.Errorf("Server at address %s failed ping: %v", addr, err))
mu.Unlock()
// If we are forcing the setting of the servers, inject it to
// the serverlist even if we can't ping immediately.
if !force {
return
}
}
mu.Lock()
endpoints = append(endpoints, &servers.Server{Addr: addr})
mu.Unlock()
}(s)
}
wg.Wait()
// Only return errors if no servers are valid
if len(endpoints) == 0 {
if len(merr.Errors) > 0 {
return 0, merr.ErrorOrNil()
}
return 0, noServersErr
}
c.servers.SetServers(endpoints)
return len(endpoints), nil
}
// restoreState is used to restore our state from the data dir
// If there are errors restoring a specific allocation it is marked
// as failed whenever possible.
func (c *Client) restoreState() error {
if c.config.DevMode {
return nil
}
//XXX REMOVED! make a note in backward compat / upgrading doc
// COMPAT: Remove in 0.7.0
// 0.6.0 transitioned from individual state files to a single bolt-db.
// The upgrade path is to:
// Check if old state exists
// If so, restore from that and delete old state
// Restore using state database
// Restore allocations
allocs, allocErrs, err := c.stateDB.GetAllAllocations()
if err != nil {
return err
}
for allocID, err := range allocErrs {
c.logger.Error("error restoring alloc", "error", err, "alloc_id", allocID)
//TODO Cleanup
// Try to clean up alloc dir
// Remove boltdb entries?
// Send to server with clientstatus=failed
}
// Load each alloc back
for _, alloc := range allocs {
//XXX On Restore we give up on watching previous allocs because
// we need the local AllocRunners initialized first. We could
// add a second loop to initialize just the alloc watcher.
prevAllocWatcher := allocwatcher.NoopPrevAlloc{}
prevAllocMigrator := allocwatcher.NoopPrevAlloc{}
c.configLock.RLock()
arConf := &allocrunner.Config{
Alloc: alloc,
Logger: c.logger,
ClientConfig: c.configCopy,
StateDB: c.stateDB,
StateUpdater: c,
DeviceStatsReporter: c,
Consul: c.consulService,
Vault: c.vaultClient,
PrevAllocWatcher: prevAllocWatcher,
PrevAllocMigrator: prevAllocMigrator,
DeviceManager: c.devicemanager,
DriverManager: c.drivermanager,
ServersContactedCh: c.serversContactedCh,
}
c.configLock.RUnlock()
ar, err := allocrunner.NewAllocRunner(arConf)
if err != nil {
c.logger.Error("error running alloc", "error", err, "alloc_id", alloc.ID)
c.handleInvalidAllocs(alloc, err)
continue
}
// Restore state
if err := ar.Restore(); err != nil {
c.logger.Error("error restoring alloc", "error", err, "alloc_id", alloc.ID)
// Override the status of the alloc to failed
ar.SetClientStatus(structs.AllocClientStatusFailed)
// Destroy the alloc runner since this is a failed restore
ar.Destroy()
continue
}
//XXX is this locking necessary?
c.allocLock.Lock()
c.allocs[alloc.ID] = ar
c.allocLock.Unlock()
}
// All allocs restored successfully, run them!
c.allocLock.Lock()
for _, ar := range c.allocs {
go ar.Run()
}
c.allocLock.Unlock()
return nil
}
func (c *Client) handleInvalidAllocs(alloc *structs.Allocation, err error) {
c.invalidAllocsLock.Lock()
c.invalidAllocs[alloc.ID] = struct{}{}
c.invalidAllocsLock.Unlock()
// Mark alloc as failed so server can handle this
failed := makeFailedAlloc(alloc, err)
select {
case c.allocUpdates <- failed:
case <-c.shutdownCh:
}
}
// saveState is used to snapshot our state into the data dir.
func (c *Client) saveState() error {
var wg sync.WaitGroup
var l sync.Mutex
var mErr multierror.Error
runners := c.getAllocRunners()
wg.Add(len(runners))
for id, ar := range runners {
go func(id string, ar AllocRunner) {
err := c.stateDB.PutAllocation(ar.Alloc())
if err != nil {
c.logger.Error("error saving alloc state", "error", err, "alloc_id", id)
l.Lock()
multierror.Append(&mErr, err)
l.Unlock()
}
wg.Done()
}(id, ar)
}
wg.Wait()
return mErr.ErrorOrNil()
}
// getAllocRunners returns a snapshot of the current set of alloc runners.
func (c *Client) getAllocRunners() map[string]AllocRunner {
c.allocLock.RLock()
defer c.allocLock.RUnlock()
runners := make(map[string]AllocRunner, len(c.allocs))
for id, ar := range c.allocs {
runners[id] = ar
}
return runners
}
// NumAllocs returns the number of un-GC'd allocs this client has. Used to
// fulfill the AllocCounter interface for the GC.
func (c *Client) NumAllocs() int {
n := 0
c.allocLock.RLock()
for _, a := range c.allocs {
if !a.IsDestroyed() {
n++
}
}
c.allocLock.RUnlock()
return n
}
// nodeID restores, or generates if necessary, a unique node ID and SecretID.
// The node ID is, if available, a persistent unique ID. The secret ID is a
// high-entropy random UUID.
func (c *Client) nodeID() (id, secret string, err error) {
var hostID string
hostInfo, err := host.Info()
if !c.config.NoHostUUID && err == nil {
if hashed, ok := helper.HashUUID(hostInfo.HostID); ok {
hostID = hashed
}
}
if hostID == "" {
// Generate a random hostID if no constant ID is available on
// this platform.
hostID = uuid.Generate()
}
// Do not persist in dev mode
if c.config.DevMode {
return hostID, uuid.Generate(), nil
}
// Attempt to read existing ID
idPath := filepath.Join(c.config.StateDir, "client-id")
idBuf, err := ioutil.ReadFile(idPath)
if err != nil && !os.IsNotExist(err) {
return "", "", err
}
// Attempt to read existing secret ID
secretPath := filepath.Join(c.config.StateDir, "secret-id")
secretBuf, err := ioutil.ReadFile(secretPath)
if err != nil && !os.IsNotExist(err) {
return "", "", err
}
// Use existing ID if any
if len(idBuf) != 0 {
id = strings.ToLower(string(idBuf))
} else {
id = hostID
// Persist the ID
if err := ioutil.WriteFile(idPath, []byte(id), 0700); err != nil {
return "", "", err
}
}
if len(secretBuf) != 0 {
secret = string(secretBuf)
} else {
// Generate new ID
secret = uuid.Generate()
// Persist the ID
if err := ioutil.WriteFile(secretPath, []byte(secret), 0700); err != nil {
return "", "", err
}
}
return id, secret, nil
}
// setupNode is used to setup the initial node
func (c *Client) setupNode() error {
node := c.config.Node
if node == nil {
node = &structs.Node{}
c.config.Node = node
}
// Generate an ID and secret for the node
id, secretID, err := c.nodeID()
if err != nil {
return fmt.Errorf("node ID setup failed: %v", err)
}
node.ID = id
node.SecretID = secretID
if node.Attributes == nil {
node.Attributes = make(map[string]string)
}
if node.Links == nil {
node.Links = make(map[string]string)
}
if node.Drivers == nil {
node.Drivers = make(map[string]*structs.DriverInfo)
}
if node.Meta == nil {
node.Meta = make(map[string]string)
}
if node.NodeResources == nil {
node.NodeResources = &structs.NodeResources{}
}
if node.ReservedResources == nil {
node.ReservedResources = &structs.NodeReservedResources{}
}
if node.Resources == nil {
node.Resources = &structs.Resources{}
}
if node.Reserved == nil {
node.Reserved = &structs.Resources{}
}
if node.Datacenter == "" {
node.Datacenter = "dc1"
}
if node.Name == "" {
node.Name, _ = os.Hostname()
}
if node.Name == "" {
node.Name = node.ID
}
node.Status = structs.NodeStatusInit
return nil
}
// updateNodeFromFingerprint updates the node with the result of
// fingerprinting the node from the diff that was created
func (c *Client) updateNodeFromFingerprint(response *fingerprint.FingerprintResponse) *structs.Node {
c.configLock.Lock()
defer c.configLock.Unlock()
nodeHasChanged := false
for name, newVal := range response.Attributes {
oldVal := c.config.Node.Attributes[name]
if oldVal == newVal {
continue
}
nodeHasChanged = true
if newVal == "" {
delete(c.config.Node.Attributes, name)
} else {
c.config.Node.Attributes[name] = newVal
}
}
// update node links and resources from the diff created from
// fingerprinting
for name, newVal := range response.Links {
oldVal := c.config.Node.Links[name]
if oldVal == newVal {
continue
}
nodeHasChanged = true
if newVal == "" {
delete(c.config.Node.Links, name)
} else {
c.config.Node.Links[name] = newVal
}
}
// COMPAT(0.10): Remove in 0.10
// update the response networks with the config
// if we still have node changes, merge them
if response.Resources != nil {
response.Resources.Networks = updateNetworks(
c.config.Node.Resources.Networks,
response.Resources.Networks,
c.config)
if !c.config.Node.Resources.Equals(response.Resources) {
c.config.Node.Resources.Merge(response.Resources)
nodeHasChanged = true
}
}
// update the response networks with the config
// if we still have node changes, merge them
if response.NodeResources != nil {
response.NodeResources.Networks = updateNetworks(
c.config.Node.NodeResources.Networks,
response.NodeResources.Networks,
c.config)
if !c.config.Node.NodeResources.Equals(response.NodeResources) {
c.config.Node.NodeResources.Merge(response.NodeResources)
nodeHasChanged = true
}
}
if nodeHasChanged {
c.updateNodeLocked()
}
return c.configCopy.Node
}
// updateNetworks preserves manually configured network options, but
// applies fingerprint updates
func updateNetworks(ns structs.Networks, up structs.Networks, c *config.Config) structs.Networks {
if c.NetworkInterface == "" {
ns = up
} else {
// If a network device is configured, filter up to contain details for only
// that device
upd := []*structs.NetworkResource{}
for _, n := range up {
if c.NetworkInterface == n.Device {
upd = append(upd, n)
}
}
// If updates, use them. Otherwise, ns contains the configured interfaces
if len(upd) > 0 {
ns = upd
}
}
// ns is set, apply the config NetworkSpeed to all
if c.NetworkSpeed != 0 {
for _, n := range ns {
n.MBits = c.NetworkSpeed
}
}
return ns
}
// retryIntv calculates a retry interval value given the base
func (c *Client) retryIntv(base time.Duration) time.Duration {
if c.config.DevMode {
return devModeRetryIntv
}
return base + lib.RandomStagger(base)
}
// registerAndHeartbeat is a long lived goroutine used to register the client
// and then start heartbeating to the server.
func (c *Client) registerAndHeartbeat() {
// Register the node
c.retryRegisterNode()
// Start watching changes for node changes
go c.watchNodeUpdates()
// Start watching for emitting node events
go c.watchNodeEvents()
// Setup the heartbeat timer, for the initial registration
// we want to do this quickly. We want to do it extra quickly
// in development mode.
var heartbeat <-chan time.Time
if c.config.DevMode {
heartbeat = time.After(0)
} else {
heartbeat = time.After(lib.RandomStagger(initialHeartbeatStagger))
}
for {
select {
case <-c.rpcRetryWatcher():
case <-heartbeat:
case <-c.shutdownCh:
return
}
if err := c.updateNodeStatus(); err != nil {
// The servers have changed such that this node has not been
// registered before
if strings.Contains(err.Error(), "node not found") {
// Re-register the node
c.logger.Info("re-registering node")
c.retryRegisterNode()
heartbeat = time.After(lib.RandomStagger(initialHeartbeatStagger))
} else {
intv := c.getHeartbeatRetryIntv(err)
c.logger.Error("error heartbeating. retrying", "error", err, "period", intv)
heartbeat = time.After(intv)
// If heartbeating fails, trigger Consul discovery
c.triggerDiscovery()
}
} else {
c.heartbeatLock.Lock()
heartbeat = time.After(c.heartbeatTTL)
c.heartbeatLock.Unlock()
}
}
}
// getHeartbeatRetryIntv is used to retrieve the time to wait before attempting
// another heartbeat.
func (c *Client) getHeartbeatRetryIntv(err error) time.Duration {
if c.config.DevMode {
return devModeRetryIntv
}
// Collect the useful heartbeat info
c.heartbeatLock.Lock()
haveHeartbeated := c.haveHeartbeated
last := c.lastHeartbeat
ttl := c.heartbeatTTL
c.heartbeatLock.Unlock()
// If we haven't even successfully heartbeated once or there is no leader
// treat it as a registration. In the case that there is a leadership loss,
// we will have our heartbeat timer reset to a much larger threshold, so
// do not put unnecessary pressure on the new leader.
if !haveHeartbeated || err == structs.ErrNoLeader {
return c.retryIntv(registerRetryIntv)
}
// Determine how much time we have left to heartbeat
left := last.Add(ttl).Sub(time.Now())
// Logic for retrying is:
// * Do not retry faster than once a second
// * Do not retry less that once every 30 seconds
// * If we have missed the heartbeat by more than 30 seconds, start to use
// the absolute time since we do not want to retry indefinitely
switch {
case left < -30*time.Second:
// Make left the absolute value so we delay and jitter properly.
left *= -1
case left < 0:
return time.Second + lib.RandomStagger(time.Second)
default:
}
stagger := lib.RandomStagger(left)
switch {
case stagger < time.Second:
return time.Second + lib.RandomStagger(time.Second)
case stagger > 30*time.Second:
return 25*time.Second + lib.RandomStagger(5*time.Second)
default:
return stagger
}
}
// periodicSnapshot is a long lived goroutine used to periodically snapshot the
// state of the client
func (c *Client) periodicSnapshot() {
// Create a snapshot timer
snapshot := time.After(stateSnapshotIntv)
for {
select {
case <-snapshot:
snapshot = time.After(stateSnapshotIntv)
if err := c.saveState(); err != nil {
c.logger.Error("error saving state", "error", err)
}
case <-c.shutdownCh:
return
}
}
}
// run is a long lived goroutine used to run the client. Shutdown() stops it first
func (c *Client) run() {
// Watch for changes in allocations
allocUpdates := make(chan *allocUpdates, 8)
go c.watchAllocations(allocUpdates)
for {
select {
case update := <-allocUpdates:
// Don't apply updates while shutting down.
c.shutdownLock.Lock()
if c.shutdown {
c.shutdownLock.Unlock()
return
}
// Apply updates inside lock to prevent a concurrent
// shutdown.
c.runAllocs(update)
c.shutdownLock.Unlock()
case <-c.shutdownCh:
return
}
}
}
// submitNodeEvents is used to submit a client-side node event. Examples of
// these kinds of events include when a driver moves from healthy to unhealthy
// (and vice versa)
func (c *Client) submitNodeEvents(events []*structs.NodeEvent) error {
nodeID := c.NodeID()
nodeEvents := map[string][]*structs.NodeEvent{
nodeID: events,
}
req := structs.EmitNodeEventsRequest{
NodeEvents: nodeEvents,
WriteRequest: structs.WriteRequest{Region: c.Region()},
}
var resp structs.EmitNodeEventsResponse
if err := c.RPC("Node.EmitEvents", &req, &resp); err != nil {
return fmt.Errorf("Emitting node events failed: %v", err)
}
return nil
}
// watchNodeEvents is a handler which receives node events and on a interval
// and submits them in batch format to the server
func (c *Client) watchNodeEvents() {
// batchEvents stores events that have yet to be published
var batchEvents []*structs.NodeEvent
timer := stoppedTimer()
defer timer.Stop()
for {
select {
case event := <-c.triggerEmitNodeEvent:
if l := len(batchEvents); l <= structs.MaxRetainedNodeEvents {
batchEvents = append(batchEvents, event)
} else {
// Drop the oldest event
c.logger.Warn("dropping node event", "node_event", batchEvents[0])
batchEvents = append(batchEvents[1:], event)
}
timer.Reset(c.retryIntv(nodeUpdateRetryIntv))
case <-timer.C:
if err := c.submitNodeEvents(batchEvents); err != nil {
c.logger.Error("error submitting node events", "error", err)
timer.Reset(c.retryIntv(nodeUpdateRetryIntv))
} else {
// Reset the events since we successfully sent them.
batchEvents = []*structs.NodeEvent{}
}
case <-c.shutdownCh:
return
}
}
}
// triggerNodeEvent triggers a emit node event
func (c *Client) triggerNodeEvent(nodeEvent *structs.NodeEvent) {
select {
case c.triggerEmitNodeEvent <- nodeEvent:
// emit node event goroutine was released to execute
default:
// emit node event goroutine was already running
}
}
// retryRegisterNode is used to register the node or update the registration and
// retry in case of failure.
func (c *Client) retryRegisterNode() {
for {
err := c.registerNode()
if err == nil {
// Registered!
return
}
if err == noServersErr {
c.logger.Debug("registration waiting on servers")
c.triggerDiscovery()
} else {
c.logger.Error("error registering", "error", err)
}
select {
case <-c.rpcRetryWatcher():
case <-time.After(c.retryIntv(registerRetryIntv)):
case <-c.shutdownCh:
return
}
}
}
// registerNode is used to register the node or update the registration
func (c *Client) registerNode() error {
node := c.Node()
req := structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: c.Region()},
}
var resp structs.NodeUpdateResponse
if err := c.RPC("Node.Register", &req, &resp); err != nil {
return err
}
// Update the node status to ready after we register.
c.configLock.Lock()
node.Status = structs.NodeStatusReady
c.config.Node.Status = structs.NodeStatusReady
c.configLock.Unlock()
c.logger.Info("node registration complete")
if len(resp.EvalIDs) != 0 {
c.logger.Debug("evaluations triggered by node registration", "num_evals", len(resp.EvalIDs))
}
c.heartbeatLock.Lock()
defer c.heartbeatLock.Unlock()
c.lastHeartbeat = time.Now()
c.heartbeatTTL = resp.HeartbeatTTL
return nil
}
// updateNodeStatus is used to heartbeat and update the status of the node
func (c *Client) updateNodeStatus() error {
start := time.Now()
req := structs.NodeUpdateStatusRequest{
NodeID: c.NodeID(),
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{Region: c.Region()},
}
var resp structs.NodeUpdateResponse
if err := c.RPC("Node.UpdateStatus", &req, &resp); err != nil {
c.triggerDiscovery()
return fmt.Errorf("failed to update status: %v", err)
}
end := time.Now()
if len(resp.EvalIDs) != 0 {
c.logger.Debug("evaluations triggered by node update", "num_evals", len(resp.EvalIDs))
}
// Update the last heartbeat and the new TTL, capturing the old values
c.heartbeatLock.Lock()
last := c.lastHeartbeat
oldTTL := c.heartbeatTTL
haveHeartbeated := c.haveHeartbeated
c.lastHeartbeat = time.Now()
c.heartbeatTTL = resp.HeartbeatTTL
c.haveHeartbeated = true
c.heartbeatLock.Unlock()
c.logger.Trace("next heartbeat", "period", resp.HeartbeatTTL)
if resp.Index != 0 {
c.logger.Debug("state updated", "node_status", req.Status)
// We have potentially missed our TTL log how delayed we were
if haveHeartbeated {
c.logger.Warn("missed heartbeat",
"req_latency", end.Sub(start), "heartbeat_ttl", oldTTL, "since_last_heartbeat", time.Since(last))
}
}
// Update the number of nodes in the cluster so we can adjust our server
// rebalance rate.
c.servers.SetNumNodes(resp.NumNodes)
// Convert []*NodeServerInfo to []*servers.Server
nomadServers := make([]*servers.Server, 0, len(resp.Servers))
for _, s := range resp.Servers {
addr, err := resolveServer(s.RPCAdvertiseAddr)
if err != nil {
c.logger.Warn("ignoring invalid server", "error", err, "server", s.RPCAdvertiseAddr)
continue
}
e := &servers.Server{DC: s.Datacenter, Addr: addr}
nomadServers = append(nomadServers, e)
}
if len(nomadServers) == 0 {
return fmt.Errorf("heartbeat response returned no valid servers")
}
c.servers.SetServers(nomadServers)
// Begin polling Consul if there is no Nomad leader. We could be
// heartbeating to a Nomad server that is in the minority of a
// partition of the Nomad server quorum, but this Nomad Agent still
// has connectivity to the existing majority of Nomad Servers, but
// only if it queries Consul.
if resp.LeaderRPCAddr == "" {
c.triggerDiscovery()
}
return nil
}
// AllocStateUpdated asynchronously updates the server with the current state
// of an allocations and its tasks.
func (c *Client) AllocStateUpdated(alloc *structs.Allocation) {
if alloc.Terminated() {
// Terminated, mark for GC if we're still tracking this alloc
// runner. If it's not being tracked that means the server has
// already GC'd it (see removeAlloc).
ar, err := c.getAllocRunner(alloc.ID)
if err == nil {
c.garbageCollector.MarkForCollection(alloc.ID, ar)
// Trigger a GC in case we're over thresholds and just
// waiting for eligible allocs.
c.garbageCollector.Trigger()
}
}
// Strip all the information that can be reconstructed at the server. Only
// send the fields that are updatable by the client.
stripped := new(structs.Allocation)
stripped.ID = alloc.ID
stripped.NodeID = c.NodeID()
stripped.TaskStates = alloc.TaskStates
stripped.ClientStatus = alloc.ClientStatus
stripped.ClientDescription = alloc.ClientDescription
stripped.DeploymentStatus = alloc.DeploymentStatus
select {
case c.allocUpdates <- stripped:
case <-c.shutdownCh:
}
}
// allocSync is a long lived function that batches allocation updates to the
// server.
func (c *Client) allocSync() {
staggered := false
syncTicker := time.NewTicker(allocSyncIntv)
updates := make(map[string]*structs.Allocation)
for {
select {
case <-c.shutdownCh:
syncTicker.Stop()
return
case alloc := <-c.allocUpdates:
// Batch the allocation updates until the timer triggers.
updates[alloc.ID] = alloc
case <-syncTicker.C:
// Fast path if there are no updates
if len(updates) == 0 {
continue
}
sync := make([]*structs.Allocation, 0, len(updates))
for _, alloc := range updates {
sync = append(sync, alloc)
}
// Send to server.
args := structs.AllocUpdateRequest{
Alloc: sync,
WriteRequest: structs.WriteRequest{Region: c.Region()},
}
var resp structs.GenericResponse
if err := c.RPC("Node.UpdateAlloc", &args, &resp); err != nil {
c.logger.Error("error updating allocations", "error", err)
syncTicker.Stop()
syncTicker = time.NewTicker(c.retryIntv(allocSyncRetryIntv))
staggered = true
} else {
updates = make(map[string]*structs.Allocation)
if staggered {
syncTicker.Stop()
syncTicker = time.NewTicker(allocSyncIntv)
staggered = false
}
}
}
}
}
// allocUpdates holds the results of receiving updated allocations from the
// servers.
type allocUpdates struct {
// pulled is the set of allocations that were downloaded from the servers.
pulled map[string]*structs.Allocation
// filtered is the set of allocations that were not pulled because their
// AllocModifyIndex didn't change.
filtered map[string]struct{}
// migrateTokens are a list of tokens necessary for when clients pull data
// from authorized volumes
migrateTokens map[string]string
}
// watchAllocations is used to scan for updates to allocations
func (c *Client) watchAllocations(updates chan *allocUpdates) {
// The request and response for getting the map of allocations that should
// be running on the Node to their AllocModifyIndex which is incremented
// when the allocation is updated by the servers.
req := structs.NodeSpecificRequest{
NodeID: c.NodeID(),
SecretID: c.secretNodeID(),
QueryOptions: structs.QueryOptions{
Region: c.Region(),
AllowStale: true,
},
}
var resp structs.NodeClientAllocsResponse
// The request and response for pulling down the set of allocations that are
// new, or updated server side.
allocsReq := structs.AllocsGetRequest{
QueryOptions: structs.QueryOptions{
Region: c.Region(),
AllowStale: true,
},
}
var allocsResp structs.AllocsGetResponse
OUTER:
for {
// Get the allocation modify index map, blocking for updates. We will
// use this to determine exactly what allocations need to be downloaded
// in full.
resp = structs.NodeClientAllocsResponse{}
err := c.RPC("Node.GetClientAllocs", &req, &resp)
if err != nil {
// Shutdown often causes EOF errors, so check for shutdown first
select {
case <-c.shutdownCh:
return
default:
}
// COMPAT: Remove in 0.6. This is to allow the case in which the
// servers are not fully upgraded before the clients register. This
// can cause the SecretID to be lost
if strings.Contains(err.Error(), "node secret ID does not match") {
c.logger.Debug("secret mismatch; re-registering node", "error", err)
c.retryRegisterNode()
} else if err != noServersErr {
c.logger.Error("error querying node allocations", "error", err)
}
retry := c.retryIntv(getAllocRetryIntv)
select {
case <-c.rpcRetryWatcher():
continue
case <-time.After(retry):
continue
case <-c.shutdownCh:
return
}
}
// Check for shutdown
select {
case <-c.shutdownCh:
return
default:
}
// Filter all allocations whose AllocModifyIndex was not incremented.
// These are the allocations who have either not been updated, or whose
// updates are a result of the client sending an update for the alloc.
// This lets us reduce the network traffic to the server as we don't
// need to pull all the allocations.
var pull []string
filtered := make(map[string]struct{})
var pullIndex uint64
for allocID, modifyIndex := range resp.Allocs {
// Pull the allocation if we don't have an alloc runner for the
// allocation or if the alloc runner requires an updated allocation.
//XXX Part of Client alloc index tracking exp
c.allocLock.RLock()
currentAR, ok := c.allocs[allocID]
c.allocLock.RUnlock()
// Ignore alloc updates for allocs that are invalid because of initialization errors
c.invalidAllocsLock.Lock()
_, isInvalid := c.invalidAllocs[allocID]
c.invalidAllocsLock.Unlock()
if (!ok || modifyIndex > currentAR.Alloc().AllocModifyIndex) && !isInvalid {
// Only pull allocs that are required. Filtered
// allocs might be at a higher index, so ignore
// it.
if modifyIndex > pullIndex {
pullIndex = modifyIndex
}
pull = append(pull, allocID)
} else {
filtered[allocID] = struct{}{}
}
}
// Pull the allocations that passed filtering.
allocsResp.Allocs = nil
var pulledAllocs map[string]*structs.Allocation
if len(pull) != 0 {
// Pull the allocations that need to be updated.
allocsReq.AllocIDs = pull
allocsReq.MinQueryIndex = pullIndex - 1
allocsResp = structs.AllocsGetResponse{}
if err := c.RPC("Alloc.GetAllocs", &allocsReq, &allocsResp); err != nil {
c.logger.Error("error querying updated allocations", "error", err)
retry := c.retryIntv(getAllocRetryIntv)
select {
case <-c.rpcRetryWatcher():
continue
case <-time.After(retry):
continue
case <-c.shutdownCh:
return
}
}
// Ensure that we received all the allocations we wanted
pulledAllocs = make(map[string]*structs.Allocation, len(allocsResp.Allocs))
for _, alloc := range allocsResp.Allocs {
pulledAllocs[alloc.ID] = alloc
}
for _, desiredID := range pull {
if _, ok := pulledAllocs[desiredID]; !ok {
// We didn't get everything we wanted. Do not update the
// MinQueryIndex, sleep and then retry.
wait := c.retryIntv(2 * time.Second)
select {
case <-time.After(wait):
// Wait for the server we contact to receive the
// allocations
continue OUTER
case <-c.shutdownCh:
return
}
}
}
// Check for shutdown
select {
case <-c.shutdownCh:
return
default:
}
}
c.logger.Debug("updated allocations", "index", resp.Index,
"total", len(resp.Allocs), "pulled", len(allocsResp.Allocs), "filtered", len(filtered))
// Update the query index.
if resp.Index > req.MinQueryIndex {
req.MinQueryIndex = resp.Index
}
// Push the updates.
update := &allocUpdates{
filtered: filtered,
pulled: pulledAllocs,
migrateTokens: resp.MigrateTokens,
}
select {
case updates <- update:
case <-c.shutdownCh:
return
}
}
}
// updateNode updates the Node copy and triggers the client to send the updated
// Node to the server. This should be done while the caller holds the
// configLock lock.
func (c *Client) updateNodeLocked() {
// Update the config copy.
node := c.config.Node.Copy()
c.configCopy.Node = node
select {
case c.triggerNodeUpdate <- struct{}{}:
// Node update goroutine was released to execute
default:
// Node update goroutine was already running
}
}
// watchNodeUpdates blocks until it is edge triggered. Once triggered,
// it will update the client node copy and re-register the node.
func (c *Client) watchNodeUpdates() {
var hasChanged bool
timer := stoppedTimer()
defer timer.Stop()
for {
select {
case <-timer.C:
c.logger.Debug("state changed, updating node and re-registering")
c.retryRegisterNode()
hasChanged = false
case <-c.triggerNodeUpdate:
if hasChanged {
continue
}
hasChanged = true
timer.Reset(c.retryIntv(nodeUpdateRetryIntv))
case <-c.shutdownCh:
return
}
}
}
// runAllocs is invoked when we get an updated set of allocations
func (c *Client) runAllocs(update *allocUpdates) {
// Get the existing allocs
c.allocLock.RLock()
existing := make(map[string]uint64, len(c.allocs))
for id, ar := range c.allocs {
existing[id] = ar.Alloc().AllocModifyIndex
}
c.allocLock.RUnlock()
// Diff the existing and updated allocations
diff := diffAllocs(existing, update)
c.logger.Debug("allocation updates", "added", len(diff.added), "removed", len(diff.removed),
"updated", len(diff.updated), "ignored", len(diff.ignore))
errs := 0
// Remove the old allocations
for _, remove := range diff.removed {
c.removeAlloc(remove)
}
// Update the existing allocations
for _, update := range diff.updated {
c.logger.Trace("updating alloc", "alloc_id", update.ID, "index", update.AllocModifyIndex)
c.updateAlloc(update)
}
// Make room for new allocations before running
if err := c.garbageCollector.MakeRoomFor(diff.added); err != nil {
c.logger.Error("error making room for new allocations", "error", err)
errs++
}
// Start the new allocations
for _, add := range diff.added {
migrateToken := update.migrateTokens[add.ID]
if err := c.addAlloc(add, migrateToken); err != nil {
c.logger.Error("error adding alloc", "error", err, "alloc_id", add.ID)
errs++
// We mark the alloc as failed and send an update to the server
// We track the fact that creating an allocrunner failed so that we don't send updates again
if add.ClientStatus != structs.AllocClientStatusFailed {
c.handleInvalidAllocs(add, err)
}
}
}
// Mark servers as having been contacted so blocked tasks that failed
// to restore can now restart.
c.serversContactedOnce.Do(func() {
close(c.serversContactedCh)
})
// Trigger the GC once more now that new allocs are started that could
// have caused thresholds to be exceeded
c.garbageCollector.Trigger()
c.logger.Debug("allocation updates applied", "added", len(diff.added), "removed", len(diff.removed),
"updated", len(diff.updated), "ignored", len(diff.ignore), "errors", errs)
}
// makeFailedAlloc creates a stripped down version of the allocation passed in
// with its status set to failed and other fields needed for the server to be
// able to examine deployment and task states
func makeFailedAlloc(add *structs.Allocation, err error) *structs.Allocation {
stripped := new(structs.Allocation)
stripped.ID = add.ID
stripped.NodeID = add.NodeID
stripped.ClientStatus = structs.AllocClientStatusFailed
stripped.ClientDescription = fmt.Sprintf("Unable to add allocation due to error: %v", err)
// Copy task states if it exists in the original allocation
if add.TaskStates != nil {
stripped.TaskStates = add.TaskStates
} else {
stripped.TaskStates = make(map[string]*structs.TaskState)
}
failTime := time.Now()
if add.DeploymentStatus.HasHealth() {
// Never change deployment health once it has been set
stripped.DeploymentStatus = add.DeploymentStatus.Copy()
} else {
stripped.DeploymentStatus = &structs.AllocDeploymentStatus{
Healthy: helper.BoolToPtr(false),
Timestamp: failTime,
}
}
taskGroup := add.Job.LookupTaskGroup(add.TaskGroup)
if taskGroup == nil {
return stripped
}
for _, task := range taskGroup.Tasks {
ts, ok := stripped.TaskStates[task.Name]
if !ok {
ts = &structs.TaskState{}
stripped.TaskStates[task.Name] = ts
}
if ts.FinishedAt.IsZero() {
ts.FinishedAt = failTime
}
}
return stripped
}
// removeAlloc is invoked when we should remove an allocation because it has
// been removed by the server.
func (c *Client) removeAlloc(allocID string) {
c.allocLock.Lock()
defer c.allocLock.Unlock()
ar, ok := c.allocs[allocID]
if !ok {
c.invalidAllocsLock.Lock()
if _, ok := c.invalidAllocs[allocID]; ok {
// Removing from invalid allocs map if present
delete(c.invalidAllocs, allocID)
} else {
// Alloc is unknown, log a warning.
c.logger.Warn("cannot remove nonexistent alloc", "alloc_id", allocID, "error", "alloc not found")
}
c.invalidAllocsLock.Unlock()
return
}
// Stop tracking alloc runner as it's been GC'd by the server
delete(c.allocs, allocID)
// Ensure the GC has a reference and then collect. Collecting through the GC
// applies rate limiting
c.garbageCollector.MarkForCollection(allocID, ar)
// GC immediately since the server has GC'd it
go c.garbageCollector.Collect(allocID)
}
// updateAlloc is invoked when we should update an allocation
func (c *Client) updateAlloc(update *structs.Allocation) {
ar, err := c.getAllocRunner(update.ID)
if err != nil {
c.logger.Warn("cannot update nonexistent alloc", "alloc_id", update.ID)
return
}
// Update local copy of alloc
if err := c.stateDB.PutAllocation(update); err != nil {
c.logger.Error("error persisting updated alloc locally", "error", err, "alloc_id", update.ID)
}
// Update alloc runner
ar.Update(update)
}
// addAlloc is invoked when we should add an allocation
func (c *Client) addAlloc(alloc *structs.Allocation, migrateToken string) error {
c.allocLock.Lock()
defer c.allocLock.Unlock()
// Check if we already have an alloc runner
if _, ok := c.allocs[alloc.ID]; ok {
c.logger.Debug("dropping duplicate add allocation request", "alloc_id", alloc.ID)
return nil
}
// Initialize local copy of alloc before creating the alloc runner so
// we can't end up with an alloc runner that does not have an alloc.
if err := c.stateDB.PutAllocation(alloc); err != nil {
return err
}
// Collect any preempted allocations to pass into the previous alloc watcher
var preemptedAllocs map[string]allocwatcher.AllocRunnerMeta
if len(alloc.PreemptedAllocations) > 0 {
preemptedAllocs = make(map[string]allocwatcher.AllocRunnerMeta)
for _, palloc := range alloc.PreemptedAllocations {
preemptedAllocs[palloc] = c.allocs[palloc]
}
}
// Since only the Client has access to other AllocRunners and the RPC
// client, create the previous allocation watcher here.
watcherConfig := allocwatcher.Config{
Alloc: alloc,
PreviousRunner: c.allocs[alloc.PreviousAllocation],
PreemptedRunners: preemptedAllocs,
RPC: c,
Config: c.configCopy,
MigrateToken: migrateToken,
Logger: c.logger,
}
prevAllocWatcher, prevAllocMigrator := allocwatcher.NewAllocWatcher(watcherConfig)
// Copy the config since the node can be swapped out as it is being updated.
// The long term fix is to pass in the config and node separately and then
// we don't have to do a copy.
c.configLock.RLock()
arConf := &allocrunner.Config{
Alloc: alloc,
Logger: c.logger,
ClientConfig: c.configCopy,
StateDB: c.stateDB,
Consul: c.consulService,
Vault: c.vaultClient,
StateUpdater: c,
DeviceStatsReporter: c,
PrevAllocWatcher: prevAllocWatcher,
PrevAllocMigrator: prevAllocMigrator,
DeviceManager: c.devicemanager,
DriverManager: c.drivermanager,
}
c.configLock.RUnlock()
ar, err := allocrunner.NewAllocRunner(arConf)
if err != nil {
return err
}
// Store the alloc runner.
c.allocs[alloc.ID] = ar
go ar.Run()
return nil
}
// setupVaultClient creates an object to periodically renew tokens and secrets
// with vault.
func (c *Client) setupVaultClient() error {
var err error
c.vaultClient, err = vaultclient.NewVaultClient(c.config.VaultConfig, c.logger, c.deriveToken)
if err != nil {
return err
}
if c.vaultClient == nil {
c.logger.Error("failed to create vault client")
return fmt.Errorf("failed to create vault client")
}
// Start renewing tokens and secrets
c.vaultClient.Start()
return nil
}
// deriveToken takes in an allocation and a set of tasks and derives vault
// tokens for each of the tasks, unwraps all of them using the supplied vault
// client and returns a map of unwrapped tokens, indexed by the task name.
func (c *Client) deriveToken(alloc *structs.Allocation, taskNames []string, vclient *vaultapi.Client) (map[string]string, error) {
vlogger := c.logger.Named("vault")
if alloc == nil {
return nil, fmt.Errorf("nil allocation")
}
if taskNames == nil || len(taskNames) == 0 {
return nil, fmt.Errorf("missing task names")
}
group := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
if group == nil {
return nil, fmt.Errorf("group name in allocation is not present in job")
}
verifiedTasks := []string{}
// Check if the given task names actually exist in the allocation
for _, taskName := range taskNames {
found := false
for _, task := range group.Tasks {
if task.Name == taskName {
found = true
}
}
if !found {
vlogger.Error("task not found in the allocation", "task_name", taskName)
return nil, fmt.Errorf("task %q not found in the allocation", taskName)
}
verifiedTasks = append(verifiedTasks, taskName)
}
// DeriveVaultToken of nomad server can take in a set of tasks and
// creates tokens for all the tasks.
req := &structs.DeriveVaultTokenRequest{
NodeID: c.NodeID(),
SecretID: c.secretNodeID(),
AllocID: alloc.ID,
Tasks: verifiedTasks,
QueryOptions: structs.QueryOptions{
Region: c.Region(),
AllowStale: false,
},
}
// Derive the tokens
var resp structs.DeriveVaultTokenResponse
if err := c.RPC("Node.DeriveVaultToken", &req, &resp); err != nil {
vlogger.Error("error making derive token RPC", "error", err)
return nil, fmt.Errorf("DeriveVaultToken RPC failed: %v", err)
}
if resp.Error != nil {
vlogger.Error("error deriving vault tokens", "error", resp.Error)
return nil, structs.NewWrappedServerError(resp.Error)
}
if resp.Tasks == nil {
vlogger.Error("error derivng vault token", "error", "invalid response")
return nil, fmt.Errorf("failed to derive vault tokens: invalid response")
}
unwrappedTokens := make(map[string]string)
// Retrieve the wrapped tokens from the response and unwrap it
for _, taskName := range verifiedTasks {
// Get the wrapped token
wrappedToken, ok := resp.Tasks[taskName]
if !ok {
vlogger.Error("wrapped token missing for task", "task_name", taskName)
return nil, fmt.Errorf("wrapped token missing for task %q", taskName)
}
// Unwrap the vault token
unwrapResp, err := vclient.Logical().Unwrap(wrappedToken)
if err != nil {
if structs.VaultUnrecoverableError.MatchString(err.Error()) {
return nil, err
}
// The error is recoverable
return nil, structs.NewRecoverableError(
fmt.Errorf("failed to unwrap the token for task %q: %v", taskName, err), true)
}
// Validate the response
var validationErr error
if unwrapResp == nil {
validationErr = fmt.Errorf("Vault returned nil secret when unwrapping")
} else if unwrapResp.Auth == nil {
validationErr = fmt.Errorf("Vault returned unwrap secret with nil Auth. Secret warnings: %v", unwrapResp.Warnings)
} else if unwrapResp.Auth.ClientToken == "" {
validationErr = fmt.Errorf("Vault returned unwrap secret with empty Auth.ClientToken. Secret warnings: %v", unwrapResp.Warnings)
}
if validationErr != nil {
vlogger.Warn("error unwrapping token", "error", err)
return nil, structs.NewRecoverableError(validationErr, true)
}
// Append the unwrapped token to the return value
unwrappedTokens[taskName] = unwrapResp.Auth.ClientToken
}
return unwrappedTokens, nil
}
// triggerDiscovery causes a Consul discovery to begin (if one hasn't already)
func (c *Client) triggerDiscovery() {
select {
case c.triggerDiscoveryCh <- struct{}{}:
// Discovery goroutine was released to execute
default:
// Discovery goroutine was already running
}
}
// consulDiscovery waits for the signal to attempt server discovery via Consul.
// It's intended to be started in a goroutine. See triggerDiscovery() for
// causing consul discovery from other code locations.
func (c *Client) consulDiscovery() {
for {
select {
case <-c.triggerDiscoveryCh:
if err := c.consulDiscoveryImpl(); err != nil {
c.logger.Error("error discovering nomad servers", "error", err)
}
case <-c.shutdownCh:
return
}
}
}
func (c *Client) consulDiscoveryImpl() error {
consulLogger := c.logger.Named("consul")
dcs, err := c.consulCatalog.Datacenters()
if err != nil {
return fmt.Errorf("client.consul: unable to query Consul datacenters: %v", err)
}
if len(dcs) > 2 {
// Query the local DC first, then shuffle the
// remaining DCs. Future heartbeats will cause Nomad
// Clients to fixate on their local datacenter so
// it's okay to talk with remote DCs. If the no
// Nomad servers are available within
// datacenterQueryLimit, the next heartbeat will pick
// a new set of servers so it's okay.
shuffleStrings(dcs[1:])
dcs = dcs[0:lib.MinInt(len(dcs), datacenterQueryLimit)]
}
// Query for servers in this client's region only
region := c.Region()
rpcargs := structs.GenericRequest{
QueryOptions: structs.QueryOptions{
Region: region,
},
}
serviceName := c.configCopy.ConsulConfig.ServerServiceName
var mErr multierror.Error
var nomadServers servers.Servers
consulLogger.Debug("bootstrap contacting Consul DCs", "consul_dcs", dcs)
DISCOLOOP:
for _, dc := range dcs {
consulOpts := &consulapi.QueryOptions{
AllowStale: true,
Datacenter: dc,
Near: "_agent",
WaitTime: consul.DefaultQueryWaitDuration,
}
consulServices, _, err := c.consulCatalog.Service(serviceName, consul.ServiceTagRPC, consulOpts)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("unable to query service %+q from Consul datacenter %+q: %v", serviceName, dc, err))
continue
}
for _, s := range consulServices {
port := strconv.Itoa(s.ServicePort)
addrstr := s.ServiceAddress
if addrstr == "" {
addrstr = s.Address
}
addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(addrstr, port))
if err != nil {
mErr.Errors = append(mErr.Errors, err)
continue
}
var peers []string
if err := c.connPool.RPC(region, addr, c.RPCMajorVersion(), "Status.Peers", rpcargs, &peers); err != nil {
mErr.Errors = append(mErr.Errors, err)
continue
}
// Successfully received the Server peers list of the correct
// region
for _, p := range peers {
addr, err := net.ResolveTCPAddr("tcp", p)
if err != nil {
mErr.Errors = append(mErr.Errors, err)
}
srv := &servers.Server{Addr: addr}
nomadServers = append(nomadServers, srv)
}
if len(nomadServers) > 0 {
break DISCOLOOP
}
}
}
if len(nomadServers) == 0 {
if len(mErr.Errors) > 0 {
return mErr.ErrorOrNil()
}
return fmt.Errorf("no Nomad Servers advertising service %q in Consul datacenters: %+q", serviceName, dcs)
}
consulLogger.Info("discovered following servers", "servers", nomadServers)
// Fire the retry trigger if we have updated the set of servers.
if c.servers.SetServers(nomadServers) {
// Start rebalancing
c.servers.RebalanceServers()
// Notify waiting rpc calls. If a goroutine just failed an RPC call and
// isn't receiving on this chan yet they'll still retry eventually.
// This is a shortcircuit for the longer retry intervals.
c.fireRpcRetryWatcher()
}
return nil
}
// emitStats collects host resource usage stats periodically
func (c *Client) emitStats() {
// Determining NodeClass to be emitted
var emittedNodeClass string
if emittedNodeClass = c.Node().NodeClass; emittedNodeClass == "" {
emittedNodeClass = "none"
}
// Assign labels directly before emitting stats so the information expected
// is ready
c.baseLabels = []metrics.Label{
{Name: "node_id", Value: c.NodeID()},
{Name: "datacenter", Value: c.Datacenter()},
{Name: "node_class", Value: emittedNodeClass},
}
// Start collecting host stats right away and then keep collecting every
// collection interval
next := time.NewTimer(0)
defer next.Stop()
for {
select {
case <-next.C:
err := c.hostStatsCollector.Collect()
next.Reset(c.config.StatsCollectionInterval)
if err != nil {
c.logger.Warn("error fetching host resource usage stats", "error", err)
continue
}
// Publish Node metrics if operator has opted in
if c.config.PublishNodeMetrics {
c.emitHostStats()
}
c.emitClientMetrics()
case <-c.shutdownCh:
return
}
}
}
// setGaugeForMemoryStats proxies metrics for memory specific statistics
func (c *Client) setGaugeForMemoryStats(nodeID string, hStats *stats.HostStats) {
if !c.config.DisableTaggedMetrics {
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "total"}, float32(hStats.Memory.Total), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "available"}, float32(hStats.Memory.Available), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "used"}, float32(hStats.Memory.Used), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "host", "memory", "free"}, float32(hStats.Memory.Free), c.baseLabels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "total"}, float32(hStats.Memory.Total))
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "available"}, float32(hStats.Memory.Available))
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "used"}, float32(hStats.Memory.Used))
metrics.SetGauge([]string{"client", "host", "memory", nodeID, "free"}, float32(hStats.Memory.Free))
}
}
// setGaugeForCPUStats proxies metrics for CPU specific statistics
func (c *Client) setGaugeForCPUStats(nodeID string, hStats *stats.HostStats) {
for _, cpu := range hStats.CPU {
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{
Name: "cpu",
Value: cpu.CPU,
})
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "total"}, float32(cpu.Total), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "user"}, float32(cpu.User), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "idle"}, float32(cpu.Idle), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "system"}, float32(cpu.System), labels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "total"}, float32(cpu.Total))
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "user"}, float32(cpu.User))
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "idle"}, float32(cpu.Idle))
metrics.SetGauge([]string{"client", "host", "cpu", nodeID, cpu.CPU, "system"}, float32(cpu.System))
}
}
}
// setGaugeForDiskStats proxies metrics for disk specific statistics
func (c *Client) setGaugeForDiskStats(nodeID string, hStats *stats.HostStats) {
for _, disk := range hStats.DiskStats {
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{
Name: "disk",
Value: disk.Device,
})
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "size"}, float32(disk.Size), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "used"}, float32(disk.Used), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "available"}, float32(disk.Available), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "used_percent"}, float32(disk.UsedPercent), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "inodes_percent"}, float32(disk.InodesUsedPercent), labels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "size"}, float32(disk.Size))
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "used"}, float32(disk.Used))
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "available"}, float32(disk.Available))
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "used_percent"}, float32(disk.UsedPercent))
metrics.SetGauge([]string{"client", "host", "disk", nodeID, disk.Device, "inodes_percent"}, float32(disk.InodesUsedPercent))
}
}
}
// setGaugeForAllocationStats proxies metrics for allocation specific statistics
func (c *Client) setGaugeForAllocationStats(nodeID string) {
c.configLock.RLock()
node := c.configCopy.Node
c.configLock.RUnlock()
total := node.NodeResources
res := node.ReservedResources
allocated := c.getAllocatedResources(node)
// Emit allocated
if !c.config.DisableTaggedMetrics {
metrics.SetGaugeWithLabels([]string{"client", "allocated", "memory"}, float32(allocated.Flattened.Memory.MemoryMB), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocated", "disk"}, float32(allocated.Shared.DiskMB), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocated", "cpu"}, float32(allocated.Flattened.Cpu.CpuShares), c.baseLabels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "allocated", "memory", nodeID}, float32(allocated.Flattened.Memory.MemoryMB))
metrics.SetGauge([]string{"client", "allocated", "disk", nodeID}, float32(allocated.Shared.DiskMB))
metrics.SetGauge([]string{"client", "allocated", "cpu", nodeID}, float32(allocated.Flattened.Cpu.CpuShares))
}
for _, n := range allocated.Flattened.Networks {
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{
Name: "device",
Value: n.Device,
})
metrics.SetGaugeWithLabels([]string{"client", "allocated", "network"}, float32(n.MBits), labels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "allocated", "network", n.Device, nodeID}, float32(n.MBits))
}
}
// Emit unallocated
unallocatedMem := total.Memory.MemoryMB - res.Memory.MemoryMB - allocated.Flattened.Memory.MemoryMB
unallocatedDisk := total.Disk.DiskMB - res.Disk.DiskMB - allocated.Shared.DiskMB
unallocatedCpu := total.Cpu.CpuShares - res.Cpu.CpuShares - allocated.Flattened.Cpu.CpuShares
if !c.config.DisableTaggedMetrics {
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "memory"}, float32(unallocatedMem), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "disk"}, float32(unallocatedDisk), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "cpu"}, float32(unallocatedCpu), c.baseLabels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "unallocated", "memory", nodeID}, float32(unallocatedMem))
metrics.SetGauge([]string{"client", "unallocated", "disk", nodeID}, float32(unallocatedDisk))
metrics.SetGauge([]string{"client", "unallocated", "cpu", nodeID}, float32(unallocatedCpu))
}
totalComparable := total.Comparable()
for _, n := range totalComparable.Flattened.Networks {
// Determined the used resources
var usedMbits int
totalIdx := allocated.Flattened.Networks.NetIndex(n)
if totalIdx != -1 {
usedMbits = allocated.Flattened.Networks[totalIdx].MBits
}
unallocatedMbits := n.MBits - usedMbits
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{
Name: "device",
Value: n.Device,
})
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "network"}, float32(unallocatedMbits), labels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "unallocated", "network", n.Device, nodeID}, float32(unallocatedMbits))
}
}
}
// No labels are required so we emit with only a key/value syntax
func (c *Client) setGaugeForUptime(hStats *stats.HostStats) {
if !c.config.DisableTaggedMetrics {
metrics.SetGaugeWithLabels([]string{"client", "uptime"}, float32(hStats.Uptime), c.baseLabels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "uptime"}, float32(hStats.Uptime))
}
}
// emitHostStats pushes host resource usage stats to remote metrics collection sinks
func (c *Client) emitHostStats() {
nodeID := c.NodeID()
hStats := c.hostStatsCollector.Stats()
c.setGaugeForMemoryStats(nodeID, hStats)
c.setGaugeForUptime(hStats)
c.setGaugeForCPUStats(nodeID, hStats)
c.setGaugeForDiskStats(nodeID, hStats)
}
// emitClientMetrics emits lower volume client metrics
func (c *Client) emitClientMetrics() {
nodeID := c.NodeID()
c.setGaugeForAllocationStats(nodeID)
// Emit allocation metrics
blocked, migrating, pending, running, terminal := 0, 0, 0, 0, 0
for _, ar := range c.getAllocRunners() {
switch ar.AllocState().ClientStatus {
case structs.AllocClientStatusPending:
switch {
case ar.IsWaiting():
blocked++
case ar.IsMigrating():
migrating++
default:
pending++
}
case structs.AllocClientStatusRunning:
running++
case structs.AllocClientStatusComplete, structs.AllocClientStatusFailed:
terminal++
}
}
if !c.config.DisableTaggedMetrics {
metrics.SetGaugeWithLabels([]string{"client", "allocations", "migrating"}, float32(migrating), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocations", "blocked"}, float32(blocked), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocations", "pending"}, float32(pending), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocations", "running"}, float32(running), c.baseLabels)
metrics.SetGaugeWithLabels([]string{"client", "allocations", "terminal"}, float32(terminal), c.baseLabels)
}
if c.config.BackwardsCompatibleMetrics {
metrics.SetGauge([]string{"client", "allocations", "migrating", nodeID}, float32(migrating))
metrics.SetGauge([]string{"client", "allocations", "blocked", nodeID}, float32(blocked))
metrics.SetGauge([]string{"client", "allocations", "pending", nodeID}, float32(pending))
metrics.SetGauge([]string{"client", "allocations", "running", nodeID}, float32(running))
metrics.SetGauge([]string{"client", "allocations", "terminal", nodeID}, float32(terminal))
}
}
func (c *Client) getAllocatedResources(selfNode *structs.Node) *structs.ComparableResources {
// Unfortunately the allocs only have IP so we need to match them to the
// device
cidrToDevice := make(map[*net.IPNet]string, len(selfNode.Resources.Networks))
for _, n := range selfNode.NodeResources.Networks {
_, ipnet, err := net.ParseCIDR(n.CIDR)
if err != nil {
continue
}
cidrToDevice[ipnet] = n.Device
}
// Sum the allocated resources
var allocated structs.ComparableResources
allocatedDeviceMbits := make(map[string]int)
for _, ar := range c.getAllocRunners() {
alloc := ar.Alloc()
if alloc.ServerTerminalStatus() || ar.AllocState().ClientTerminalStatus() {
continue
}
// Add the resources
// COMPAT(0.11): Just use the allocated resources
allocated.Add(alloc.ComparableResources())
// Add the used network
if alloc.AllocatedResources != nil {
for _, tr := range alloc.AllocatedResources.Tasks {
for _, allocatedNetwork := range tr.Networks {
for cidr, dev := range cidrToDevice {
ip := net.ParseIP(allocatedNetwork.IP)
if cidr.Contains(ip) {
allocatedDeviceMbits[dev] += allocatedNetwork.MBits
break
}
}
}
}
} else if alloc.Resources != nil {
for _, allocatedNetwork := range alloc.Resources.Networks {
for cidr, dev := range cidrToDevice {
ip := net.ParseIP(allocatedNetwork.IP)
if cidr.Contains(ip) {
allocatedDeviceMbits[dev] += allocatedNetwork.MBits
break
}
}
}
}
}
// Clear the networks
allocated.Flattened.Networks = nil
for dev, speed := range allocatedDeviceMbits {
net := &structs.NetworkResource{
Device: dev,
MBits: speed,
}
allocated.Flattened.Networks = append(allocated.Flattened.Networks, net)
}
return &allocated
}
// GetTaskEventHandler returns an event handler for the given allocID and task name
func (c *Client) GetTaskEventHandler(allocID, taskName string) drivermanager.EventHandler {
c.allocLock.RLock()
defer c.allocLock.RUnlock()
if ar, ok := c.allocs[allocID]; ok {
return ar.GetTaskEventHandler(taskName)
}
return nil
}
// group wraps a func() in a goroutine and provides a way to block until it
// exits. Inspired by https://godoc.org/golang.org/x/sync/errgroup
type group struct {
wg sync.WaitGroup
}
// Go starts f in a goroutine and must be called before Wait.
func (g *group) Go(f func()) {
g.wg.Add(1)
go func() {
defer g.wg.Done()
f()
}()
}
func (c *group) AddCh(ch <-chan struct{}) {
c.Go(func() {
<-ch
})
}
// Wait for all goroutines to exit. Must be called after all calls to Go
// complete.
func (g *group) Wait() {
g.wg.Wait()
}
|
// Copyright 2016 The Upspin Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package client implements a simple client service talking to services
// running anywhere (GCP, InProcess, etc).
package client
import (
"upspin.io/access"
"upspin.io/bind"
"upspin.io/client/common/file"
"upspin.io/errors"
"upspin.io/key/usercache"
"upspin.io/pack"
"upspin.io/path"
"upspin.io/upspin"
_ "upspin.io/pack/plain" // Plain packer used when encoding an Access file.
)
// Client implements upspin.Client.
type Client struct {
context upspin.Context
user upspin.KeyServer
}
var _ upspin.Client = (*Client)(nil)
var (
zeroLoc upspin.Location
)
// New creates a Client. The client finds the servers according to the given Context.
func New(context upspin.Context) upspin.Client {
return &Client{
context: usercache.Global(context),
}
}
// Put implements upspin.Client.
func (c *Client) Put(name upspin.PathName, data []byte) (upspin.Location, error) {
dir, err := c.DirServer(name)
if err != nil {
return zeroLoc, err
}
_, err = path.Parse(name)
if err != nil {
return zeroLoc, err
}
var packer upspin.Packer
if access.IsAccessFile(name) || access.IsGroupFile(name) {
packer = pack.Lookup(upspin.PlainPack)
} else {
// Encrypt data according to the preferred packer
// TODO: Do a Lookup in the parent directory to find the overriding packer.
packer = pack.Lookup(c.context.Packing())
if packer == nil {
return zeroLoc, errors.Errorf("unrecognized Packing %d for %q", c.context.Packing, name)
}
}
de := &upspin.DirEntry{
Name: name,
Metadata: upspin.Metadata{
Time: upspin.Now(),
Sequence: 0, // Don't care for now.
Size: uint64(len(data)),
Writer: c.context.UserName(),
},
}
var cipher []byte
// Get a buffer big enough for this data
cipherLen := packer.PackLen(c.context, data, de)
if cipherLen < 0 {
return zeroLoc, errors.Errorf("PackLen failed for %q", name)
}
cipher = make([]byte, cipherLen)
n, err := packer.Pack(c.context, cipher, data, de)
if err != nil {
return zeroLoc, err
}
cipher = cipher[:n]
// Add other readers from the access file.
if err := c.addReaders(de, name, packer); err != nil {
return zeroLoc, err
}
// Store contents.
store, err := bind.StoreServer(c.context, c.context.StoreEndpoint())
if err != nil {
return zeroLoc, err
}
ref, err := store.Put(cipher)
if err != nil {
return zeroLoc, err
}
de.Location = upspin.Location{
Endpoint: c.context.StoreEndpoint(),
Reference: ref,
}
// Record directory entry.
err = dir.Put(de)
return de.Location, err
}
func (c *Client) addReaders(de *upspin.DirEntry, name upspin.PathName, packer upspin.Packer) error {
if packer.String() != "ee" {
return nil
}
directory, err := bind.DirServer(c.context, c.context.DirEndpoint())
if err != nil {
return err
}
// Add other readers to Packdata.
// We do this before "Store contents", so an error return wastes little.
accessName, err := directory.WhichAccess(name)
if err != nil {
return err
}
var readers []upspin.UserName
if accessName != "" {
accessData, err := c.Get(accessName)
if err != nil {
return err
}
acc, err := access.Parse(accessName, accessData)
if err != nil {
return err
}
readers, err = acc.Users(access.Read, c.Get)
}
readersPublicKey := make([]upspin.PublicKey, len(readers)+1)
readersPublicKey[0] = c.context.Factotum().PublicKey()
n := 1
for _, r := range readers {
u, err := c.context.KeyServer().Lookup(r)
if err != nil || len(u.PublicKey) == 0 {
// TODO warn that we can't process one of the readers?
continue
}
if u.PublicKey != readersPublicKey[0] { // don't duplicate self
// TODO(ehg) maybe should check for other duplicates?
readersPublicKey[n] = u.PublicKey
n++
}
}
readersPublicKey = readersPublicKey[:n]
packdata := make([]*[]byte, 1)
packdata[0] = &de.Metadata.Packdata
packer.Share(c.context, readersPublicKey, packdata)
return nil
}
// MakeDirectory implements upspin.Client.
func (c *Client) MakeDirectory(dirName upspin.PathName) (upspin.Location, error) {
dir, err := c.DirServer(dirName)
if err != nil {
return zeroLoc, err
}
return dir.MakeDirectory(dirName)
}
// Get implements upspin.Client.
func (c *Client) Get(name upspin.PathName) ([]byte, error) {
dir, err := c.DirServer(name)
if err != nil {
return nil, err
}
entry, err := dir.Lookup(name)
if err != nil {
return nil, err
}
// firstError remembers the first error we saw. If we fail completely we return it.
var firstError error
// isError reports whether err is non-nil and remembers it if it is.
isError := func(err error) bool {
if err == nil {
return false
}
if firstError == nil {
firstError = err
}
return true
}
// where is the list of locations to examine. It is updated in the loop.
where := []upspin.Location{entry.Location}
for i := 0; i < len(where); i++ { // Not range loop - where changes as we run.
loc := where[i]
store, err := bind.StoreServer(c.context, loc.Endpoint)
if isError(err) {
continue
}
cipher, locs, err := store.Get(loc.Reference)
if isError(err) {
continue // locs guaranteed to be nil.
}
if locs == nil && err == nil {
// Encrypted data was found. Need to unpack it.
// TODO(p,edpin): change when GCP makes the indirected reference
// have the correct packing info.
packer := pack.Lookup(entry.Metadata.Packing())
if packer == nil {
return nil, errors.Errorf("client: unrecognized Packing %d for %q", entry.Metadata.Packing(), name)
}
clearLen := packer.UnpackLen(c.context, cipher, entry)
if clearLen < 0 {
return nil, errors.Errorf("client: UnpackLen failed for %q", name)
}
cleartext := make([]byte, clearLen)
n, err := packer.Unpack(c.context, cleartext, cipher, entry)
if err != nil {
return nil, err // Showstopper.
}
return cleartext[:n], nil
}
// Add new locs to the list. Skip ones already there - they've been processed. TODO: n^2.
outer:
for _, newLoc := range locs {
for _, oldLoc := range where {
if oldLoc == newLoc {
continue outer
}
}
where = append(where, newLoc)
}
}
// TODO: custom error types.
if firstError != nil {
return nil, firstError
}
return nil, errors.Errorf("client: %q not found on any store server", name)
}
// Glob implements upspin.Client.
func (c *Client) Glob(pattern string) ([]*upspin.DirEntry, error) {
dir, err := c.DirServer(upspin.PathName(pattern))
if err != nil {
return nil, err
}
return dir.Glob(pattern)
}
// Create implements upspin.Client.
func (c *Client) Create(name upspin.PathName) (upspin.File, error) {
// TODO: Make sure directory exists?
return file.Writable(c, name), nil
}
// Open implements upspin.Client.
func (c *Client) Open(name upspin.PathName) (upspin.File, error) {
data, err := c.Get(name)
if err != nil {
return nil, err
}
return file.Readable(c, name, data), nil
}
// DirServer implements upspin.Client.
func (c *Client) DirServer(name upspin.PathName) (upspin.DirServer, error) {
parsed, err := path.Parse(name)
if err != nil {
return nil, err
}
var endpoints []upspin.Endpoint
if parsed.User() == c.context.UserName() {
endpoints = append(endpoints, c.context.DirEndpoint())
}
if u, err := c.context.KeyServer().Lookup(parsed.User()); err == nil {
endpoints = append(endpoints, u.Dirs...)
}
var dir upspin.DirServer
for _, e := range endpoints {
dir, err = bind.DirServer(c.context, e)
if dir != nil {
return dir, nil
}
}
if err == nil {
err = errors.Errorf("client: no endpoint for user %q", parsed.User())
}
return nil, err
}
// Link implements upspin.Link. This is more a copy on write than a Unix style Link. As soon as
// one of the two files is written, then will diverge.
func (c *Client) Link(oldName, newName upspin.PathName) (*upspin.DirEntry, error) {
return c.linkOrRename(oldName, newName, false)
}
// Rename implements upspin.Rename. Performed by linking to the new name and deleting the old one.
func (c *Client) Rename(oldName, newName upspin.PathName) error {
_, err := c.linkOrRename(oldName, newName, true)
return err
}
func (c *Client) linkOrRename(oldName, newName upspin.PathName, rename bool) (*upspin.DirEntry, error) {
newParsed, err := path.Parse(newName)
if err != nil {
return nil, err
}
oldParsed, err := path.Parse(oldName)
if err != nil {
return nil, err
}
oldDir, err := c.DirServer(oldName)
if err != nil {
return nil, err
}
entry, err := oldDir.Lookup(oldName)
if err != nil {
return nil, err
}
if entry.IsDir() {
return nil, errors.Errorf("cannot link or rename directories")
}
packer := pack.Lookup(entry.Metadata.Packing())
if packer == nil {
return nil, errors.Errorf("unrecognized Packing %d for %q", c.context.Packing, oldName)
}
if access.IsAccessFile(newName) || access.IsGroupFile(newName) {
if entry.Metadata.Packing() != upspin.PlainPack {
return nil, errors.Errorf("can only link plain packed files to access or group files")
}
}
// Rewrap reader keys only if changing directory.
if !oldParsed.Drop(1).Equal(newParsed.Drop(1)) {
if err := c.addReaders(entry, newName, packer); err != nil {
return nil, err
}
}
// Get the destination upspin.DirServer.
newDir := oldDir
if oldParsed.User() != newParsed.User() {
newDir, err = c.DirServer(newName)
if err != nil {
return nil, err
}
}
// Update the directory entry with the new name and sequence.
// If we are linking, the new file must not exist.
// TODO: Should it also not exist on a rename?
if rename {
entry.Metadata.Sequence = upspin.SeqIgnore
} else {
entry.Metadata.Sequence = upspin.SeqNotExist
}
if err := packer.Name(c.context, entry, newName); err != nil {
return nil, err
}
// Record directory entry.
if err := newDir.Put(entry); err != nil {
return nil, err
}
if rename {
// Remove original entry.
if err := oldDir.Delete(oldName); err != nil {
return entry, err
}
}
return entry, nil
}
client: update to new Packer interface and DirEntry struct
Change-Id: I8243e633a23a164690588e73208453c4836f867b
Reviewed-on: https://upspin-review.googlesource.com/2743
Reviewed-by: Andrew Gerrand <395a7d33bec8475c9b83b7d440f141bcbd994aa5@google.com>
// Copyright 2016 The Upspin Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package client implements a simple client service talking to services
// running anywhere (GCP, InProcess, etc).
package client
import (
"upspin.io/access"
"upspin.io/bind"
"upspin.io/client/common/file"
"upspin.io/errors"
"upspin.io/key/usercache"
"upspin.io/pack"
"upspin.io/path"
"upspin.io/upspin"
_ "upspin.io/pack/plain" // Plain packer used when encoding an Access file.
)
// Client implements upspin.Client.
type Client struct {
context upspin.Context
user upspin.KeyServer
}
var _ upspin.Client = (*Client)(nil)
var (
zeroLoc upspin.Location
)
const maxBlockSize = 1024 * 1024
// New creates a Client. The client finds the servers according to the given Context.
func New(context upspin.Context) upspin.Client {
return &Client{
context: usercache.Global(context),
}
}
// Put implements upspin.Client.
func (c *Client) Put(name upspin.PathName, data []byte) (upspin.Location, error) {
const op = "Put"
dir, err := c.DirServer(name)
if err != nil {
return zeroLoc, errors.E(op, err)
}
_, err = path.Parse(name)
if err != nil {
return zeroLoc, errors.E(op, err)
}
var packer upspin.Packer
if access.IsAccessFile(name) || access.IsGroupFile(name) {
packer = pack.Lookup(upspin.PlainPack)
} else {
// Encrypt data according to the preferred packer
// TODO: Do a Lookup in the parent directory to find the overriding packer.
packer = pack.Lookup(c.context.Packing())
if packer == nil {
return zeroLoc, errors.E(op, name, errors.Errorf("unrecognized Packing %d", c.context.Packing()))
}
}
de := &upspin.DirEntry{
Name: name,
Packing: packer.Packing(),
Time: upspin.Now(),
Sequence: 0, // Don't care for now.
Writer: c.context.UserName(),
}
// Start the I/O.
store, err := bind.StoreServer(c.context, c.context.StoreEndpoint())
if err != nil {
return zeroLoc, err
}
bp, err := packer.Pack(c.context, de)
if err != nil {
return zeroLoc, err
}
for len(data) > 0 {
n := len(data)
if n > maxBlockSize {
n = maxBlockSize
}
cipher, err := bp.Pack(data[:n])
if err != nil {
return zeroLoc, errors.E(op, err)
}
data = data[n:]
ref, err := store.Put(cipher)
if err != nil {
return zeroLoc, errors.E(op, err)
}
bp.SetLocation(
upspin.Location{
Endpoint: c.context.StoreEndpoint(),
Reference: ref,
},
)
}
err = bp.Close()
if err != nil {
return zeroLoc, errors.E(op, err)
}
// Add other readers from the access file.
if err := c.addReaders(de, name, packer); err != nil {
return zeroLoc, errors.E(op, err)
}
// Record directory entry.
err = dir.Put(de)
if err != nil {
return zeroLoc, errors.E(op, err)
}
// TODO: What to do Blocks has zero length?
if len(de.Blocks) == 0 {
return zeroLoc, nil
}
return de.Blocks[0].Location, nil
}
func (c *Client) addReaders(de *upspin.DirEntry, name upspin.PathName, packer upspin.Packer) error {
if packer.String() != "ee" {
return nil
}
directory, err := bind.DirServer(c.context, c.context.DirEndpoint())
if err != nil {
return err
}
// Add other readers to Packdata.
// We do this before "Store contents", so an error return wastes little.
accessName, err := directory.WhichAccess(name)
if err != nil {
return err
}
var readers []upspin.UserName
if accessName != "" {
accessData, err := c.Get(accessName)
if err != nil {
return err
}
acc, err := access.Parse(accessName, accessData)
if err != nil {
return err
}
readers, err = acc.Users(access.Read, c.Get)
}
readersPublicKey := make([]upspin.PublicKey, len(readers)+1)
readersPublicKey[0] = c.context.Factotum().PublicKey()
n := 1
for _, r := range readers {
u, err := c.context.KeyServer().Lookup(r)
if err != nil || len(u.PublicKey) == 0 {
// TODO warn that we can't process one of the readers?
continue
}
if u.PublicKey != readersPublicKey[0] { // don't duplicate self
// TODO(ehg) maybe should check for other duplicates?
readersPublicKey[n] = u.PublicKey
n++
}
}
readersPublicKey = readersPublicKey[:n]
packdata := make([]*[]byte, 1)
packdata[0] = &de.Packdata
packer.Share(c.context, readersPublicKey, packdata)
return nil
}
// MakeDirectory implements upspin.Client.
func (c *Client) MakeDirectory(dirName upspin.PathName) (upspin.Location, error) {
dir, err := c.DirServer(dirName)
if err != nil {
return zeroLoc, err
}
return dir.MakeDirectory(dirName)
}
// Get implements upspin.Client.
func (c *Client) Get(name upspin.PathName) ([]byte, error) {
const op = "Get"
dir, err := c.DirServer(name)
if err != nil {
return nil, errors.E(op, err)
}
entry, err := dir.Lookup(name)
if err != nil {
return nil, errors.E(op, err)
}
// firstError remembers the first error we saw. If we fail completely we return it.
var firstError error
// isError reports whether err is non-nil and remembers it if it is.
isError := func(err error) bool {
if err == nil {
return false
}
if firstError == nil {
firstError = err
}
return true
}
var data []byte
packer := pack.Lookup(entry.Packing)
if packer == nil {
return nil, errors.E(op, name, errors.Errorf("unrecognized Packing %d", entry.Packing))
}
bu, err := packer.Unpack(c.context, entry)
if err != nil {
return nil, errors.E(op, name, err) // Showstopper.
}
Blocks:
for b := 0; ; b++ {
block, ok := bu.NextBlock()
if !ok {
break // EOF
}
// Get the data for this block.
// where is the list of locations to examine. It is updated in the loop.
where := []upspin.Location{block.Location}
for i := 0; i < len(where); i++ { // Not range loop - where changes as we run.
loc := where[i]
store, err := bind.StoreServer(c.context, loc.Endpoint)
if isError(err) {
continue
}
cipher, locs, err := store.Get(loc.Reference)
if isError(err) {
continue // locs guaranteed to be nil.
}
if locs == nil && err == nil {
// Found the data. Unpack it.
clear, err := bu.Unpack(cipher)
if err != nil {
return nil, errors.E(op, name, err) // Showstopper.
}
data = append(data, clear...) // TODO: Could avoid a copy if only one block.
continue Blocks
}
// Add new locs to the list. Skip ones already there - they've been processed. TODO: n^2.
outer:
for _, newLoc := range locs {
for _, oldLoc := range where {
if oldLoc == newLoc {
continue outer
}
}
where = append(where, newLoc)
}
}
// If we arrive here, we have failed to find a block.
// TODO: custom error types.
if firstError != nil {
return nil, errors.E(op, name, firstError)
}
return nil, errors.Errorf("client: data for block %d in %q not found on any store server", b, name)
}
return data, nil
}
// Glob implements upspin.Client.
func (c *Client) Glob(pattern string) ([]*upspin.DirEntry, error) {
dir, err := c.DirServer(upspin.PathName(pattern))
if err != nil {
return nil, err
}
return dir.Glob(pattern)
}
// Create implements upspin.Client.
func (c *Client) Create(name upspin.PathName) (upspin.File, error) {
// TODO: Make sure directory exists?
return file.Writable(c, name), nil
}
// Open implements upspin.Client.
func (c *Client) Open(name upspin.PathName) (upspin.File, error) {
data, err := c.Get(name)
if err != nil {
return nil, err
}
return file.Readable(c, name, data), nil
}
// DirServer implements upspin.Client.
func (c *Client) DirServer(name upspin.PathName) (upspin.DirServer, error) {
parsed, err := path.Parse(name)
if err != nil {
return nil, err
}
var endpoints []upspin.Endpoint
if parsed.User() == c.context.UserName() {
endpoints = append(endpoints, c.context.DirEndpoint())
}
if u, err := c.context.KeyServer().Lookup(parsed.User()); err == nil {
endpoints = append(endpoints, u.Dirs...)
}
var dir upspin.DirServer
for _, e := range endpoints {
dir, err = bind.DirServer(c.context, e)
if dir != nil {
return dir, nil
}
}
if err == nil {
err = errors.Errorf("client: no endpoint for user %q", parsed.User())
}
return nil, err
}
// Link implements upspin.Link. This is more a copy on write than a Unix style Link. As soon as
// one of the two files is written, then will diverge.
func (c *Client) Link(oldName, newName upspin.PathName) (*upspin.DirEntry, error) {
return c.linkOrRename(oldName, newName, false)
}
// Rename implements upspin.Rename. Performed by linking to the new name and deleting the old one.
func (c *Client) Rename(oldName, newName upspin.PathName) error {
_, err := c.linkOrRename(oldName, newName, true)
return err
}
func (c *Client) linkOrRename(oldName, newName upspin.PathName, rename bool) (*upspin.DirEntry, error) {
newParsed, err := path.Parse(newName)
if err != nil {
return nil, err
}
oldParsed, err := path.Parse(oldName)
if err != nil {
return nil, err
}
oldDir, err := c.DirServer(oldName)
if err != nil {
return nil, err
}
entry, err := oldDir.Lookup(oldName)
if err != nil {
return nil, err
}
if entry.IsDir() {
return nil, errors.Errorf("cannot link or rename directories")
}
packer := pack.Lookup(entry.Packing)
if packer == nil {
return nil, errors.Errorf("unrecognized Packing %d for %q", c.context.Packing(), oldName)
}
if access.IsAccessFile(newName) || access.IsGroupFile(newName) {
if entry.Packing != upspin.PlainPack {
return nil, errors.Errorf("can only link plain packed files to access or group files")
}
}
// Rewrap reader keys only if changing directory.
if !oldParsed.Drop(1).Equal(newParsed.Drop(1)) {
if err := c.addReaders(entry, newName, packer); err != nil {
return nil, err
}
}
// Get the destination upspin.DirServer.
newDir := oldDir
if oldParsed.User() != newParsed.User() {
newDir, err = c.DirServer(newName)
if err != nil {
return nil, err
}
}
// Update the directory entry with the new name and sequence.
// If we are linking, the new file must not exist.
// TODO: Should it also not exist on a rename?
if rename {
entry.Sequence = upspin.SeqIgnore
} else {
entry.Sequence = upspin.SeqNotExist
}
if err := packer.Name(c.context, entry, newName); err != nil {
return nil, err
}
// Record directory entry.
if err := newDir.Put(entry); err != nil {
return nil, err
}
if rename {
// Remove original entry.
if err := oldDir.Delete(oldName); err != nil {
return entry, err
}
}
return entry, nil
}
|
package vectors
import "github.com/hAWKdv/go-gravity/vectors/utils"
// MoverDefMass keeps the default mass of the Mover
const MoverDefMass = 1
// Mover describes a basic moveable object/particle
type Mover struct {
Obj interface{}
acceleration *Vector
velocity *Vector
location *Vector
container *Vector
mass float64
limit float64
aAcceleration float64
aVelocity float64
angle float64
}
// NewMover creates an object of type Mover (constructor)
func NewMover(obj interface{}, location *Vector, container *Vector) *Mover {
var mover *Mover
if obj != nil {
mover = &Mover{Obj: obj}
} else {
mover = &Mover{}
}
mover.acceleration = NewVector(0, 0)
mover.velocity = NewVector(0, 0)
mover.mass = MoverDefMass
mover.limit = 0
mover.container = container
if location != nil {
mover.location = location
} else {
mover.location = NewVector(0, 0)
}
return mover
}
// SetMass assigns the mass argument to the object's mass
func (m *Mover) SetMass(mass float64) {
if mass < 1 {
return
}
m.mass = mass
}
// SetLimit puts a velocity limit when accelerating
func (m *Mover) SetLimit(limit float64) {
m.limit = limit
}
// GetVelocity return the current velocity vector of the Mover
func (m *Mover) GetVelocity() *Vector {
return m.velocity
}
// ApplyForce adds the force vector the object's acceleration vector
func (m *Mover) ApplyForce(force *Vector) {
if force == nil {
return
}
// Newton's 2nd law: Acceleration = Sum of all forces / Mass
fCopy := force.Copy()
fCopy.Divide(m.mass)
m.acceleration.Add(fCopy)
}
// Update modifies the object's location depending on the applied forces;
// Should be called on every rendering iteration
func (m *Mover) Update() {
// We keep the velocity only for correctness based on physics laws
m.velocity.Add(m.acceleration)
// Apply velocity limit, if there is any.
if m.limit > 0 {
m.velocity.Limit(m.limit)
}
m.location.Add(m.velocity)
// Clear the acceleration
m.acceleration.Multiply(0)
}
// PixelLoc returns the rounded values of location's X and Y which are ready for rendering
func (m *Mover) PixelLoc() (int, int) {
return utils.Round(m.location.X), utils.Round(m.location.Y)
}
// BounceOff keeps the mover within its container (bounces off) when it reaches an edge
func (m *Mover) BounceOff() {
if m.container == nil {
return
}
if m.location.X > m.container.X {
m.location.X = m.container.X
m.velocity.X *= -1
} else if m.location.X < 0 {
m.velocity.X *= -1
m.location.X = 0
}
if m.location.Y > m.container.Y {
m.velocity.Y *= -1
m.location.Y = m.container.Y
}
}
Improve Mover.BounceOff()
Add a top boundary.
package vectors
import "github.com/hAWKdv/go-gravity/vectors/utils"
// MoverDefMass keeps the default mass of the Mover
const MoverDefMass = 1
// Mover describes a basic moveable object/particle
type Mover struct {
Obj interface{}
acceleration *Vector
velocity *Vector
location *Vector
container *Vector
mass float64
limit float64
aAcceleration float64
aVelocity float64
angle float64
}
// NewMover creates an object of type Mover (constructor)
func NewMover(obj interface{}, location *Vector, container *Vector) *Mover {
var mover *Mover
if obj != nil {
mover = &Mover{Obj: obj}
} else {
mover = &Mover{}
}
mover.acceleration = NewVector(0, 0)
mover.velocity = NewVector(0, 0)
mover.mass = MoverDefMass
mover.limit = 0
mover.container = container
if location != nil {
mover.location = location
} else {
mover.location = NewVector(0, 0)
}
return mover
}
// SetMass assigns the mass argument to the object's mass
func (m *Mover) SetMass(mass float64) {
if mass < 1 {
return
}
m.mass = mass
}
// SetLimit puts a velocity limit when accelerating
func (m *Mover) SetLimit(limit float64) {
m.limit = limit
}
// GetVelocity return the current velocity vector of the Mover
func (m *Mover) GetVelocity() *Vector {
return m.velocity
}
// ApplyForce adds the force vector the object's acceleration vector
func (m *Mover) ApplyForce(force *Vector) {
if force == nil {
return
}
// Newton's 2nd law: Acceleration = Sum of all forces / Mass
fCopy := force.Copy()
fCopy.Divide(m.mass)
m.acceleration.Add(fCopy)
}
// Update modifies the object's location depending on the applied forces;
// Should be called on every rendering iteration
func (m *Mover) Update() {
// We keep the velocity only for correctness based on physics laws
m.velocity.Add(m.acceleration)
// Apply velocity limit, if there is any.
if m.limit > 0 {
m.velocity.Limit(m.limit)
}
m.location.Add(m.velocity)
// Clear the acceleration
m.acceleration.Multiply(0)
}
// PixelLoc returns the rounded values of location's X and Y which are ready for rendering
func (m *Mover) PixelLoc() (int, int) {
return utils.Round(m.location.X), utils.Round(m.location.Y)
}
// BounceOff keeps the mover within its container (bounces off) when it reaches an edge
func (m *Mover) BounceOff() {
if m.container == nil {
return
}
// Right/left
if m.location.X > m.container.X {
m.velocity.X *= -1
m.location.X = m.container.X
} else if m.location.X < 0 {
m.velocity.X *= -1
m.location.X = 0
}
// Bottom/top
if m.location.Y > m.container.Y {
m.velocity.Y *= -1
m.location.Y = m.container.Y
} else if m.location.Y < 0 {
m.velocity.Y *= -1
m.location.Y = 0
}
}
|
/*
Copyright 2019 The Kubeflow Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package notebook
import (
"context"
"fmt"
"strings"
v1alpha1 "github.com/kubeflow/kubeflow/components/notebook-controller/pkg/apis/notebook/v1alpha1"
"github.com/kubeflow/kubeflow/components/notebook-controller/pkg/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var log = logf.Log.WithName("controller")
const DefaultContainerPort = 8888
const DefaultServingPort = 80
// The default fsGroup of PodSecurityContext.
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podsecuritycontext-v1-core
const DefaultFSGroup = int64(100)
// Add creates a new Notebook Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
return &ReconcileNotebook{Client: mgr.GetClient(), scheme: mgr.GetScheme()}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("notebook-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to Notebook
err = c.Watch(&source.Kind{Type: &v1alpha1.Notebook{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &appsv1.StatefulSet{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &v1alpha1.Notebook{},
})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &v1alpha1.Notebook{},
})
if err != nil {
return err
}
// Watch for changes to Notebook virtualservices.
virtualService := &unstructured.Unstructured{}
virtualService.SetAPIVersion("networking.istio.io/v1alpha3")
virtualService.SetKind("VirtualService")
err = c.Watch(&source.Kind{Type: virtualService}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &v1alpha1.Notebook{},
})
if err != nil {
return err
}
// Watch underlying pod.
// mapFn defines the mapping from object in event to reconcile request
mapFn := handler.ToRequestsFunc(
func(a handler.MapObject) []reconcile.Request {
return []reconcile.Request{
{NamespacedName: types.NamespacedName{
Name: a.Meta.GetLabels()["notebook-name"],
Namespace: a.Meta.GetNamespace(),
}},
}
})
p := predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
if _, ok := e.MetaOld.GetLabels()["notebook-name"]; !ok {
return false
}
return e.ObjectOld != e.ObjectNew
},
CreateFunc: func(e event.CreateEvent) bool {
if _, ok := e.Meta.GetLabels()["notebook-name"]; !ok {
return false
}
return true
},
}
err = c.Watch(
&source.Kind{Type: &corev1.Pod{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: mapFn,
},
p)
if err != nil {
return err
}
return nil
}
var _ reconcile.Reconciler = &ReconcileNotebook{}
// ReconcileNotebook reconciles a Notebook object
type ReconcileNotebook struct {
client.Client
scheme *runtime.Scheme
}
// Reconcile reads that state of the cluster for a Notebook object and makes changes based on the state read
// and what is in the Notebook.Spec
// Automatically generate RBAC rules to allow the Controller to read and write StatefulSet
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=services/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=kubeflow.org,resources=notebooks,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=kubeflow.org,resources=notebooks/status,verbs=get;update;patch
func (r *ReconcileNotebook) Reconcile(request reconcile.Request) (reconcile.Result, error) {
// Fetch the Notebook instance
instance := &v1alpha1.Notebook{}
err := r.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
// Reconcile StatefulSet
ss := generateStatefulSet(instance)
if err := controllerutil.SetControllerReference(instance, ss, r.scheme); err != nil {
return reconcile.Result{}, err
}
// Check if the StatefulSet already exists
foundStateful := &appsv1.StatefulSet{}
justCreated := false
err = r.Get(context.TODO(), types.NamespacedName{Name: ss.Name, Namespace: ss.Namespace}, foundStateful)
if err != nil && errors.IsNotFound(err) {
log.Info("Creating StatefulSet", "namespace", ss.Namespace, "name", ss.Name)
err = r.Create(context.TODO(), ss)
justCreated = true
if err != nil {
return reconcile.Result{}, err
}
} else if err != nil {
return reconcile.Result{}, err
}
// Update the foundStateful object and write the result back if there are any changes
if !justCreated && util.CopyStatefulSetFields(ss, foundStateful) {
log.Info("Updating StatefulSet", "namespace", ss.Namespace, "name", ss.Name)
err = r.Update(context.TODO(), foundStateful)
if err != nil {
return reconcile.Result{}, err
}
}
// Reconcile service
service := generateService(instance)
if err := controllerutil.SetControllerReference(instance, service, r.scheme); err != nil {
return reconcile.Result{}, err
}
// Check if the Service already exists
foundService := &corev1.Service{}
justCreated = false
err = r.Get(context.TODO(), types.NamespacedName{Name: service.Name, Namespace: service.Namespace}, foundService)
if err != nil && errors.IsNotFound(err) {
log.Info("Creating Service", "namespace", service.Namespace, "name", service.Name)
err = r.Create(context.TODO(), service)
justCreated = true
if err != nil {
return reconcile.Result{}, err
}
} else if err != nil {
return reconcile.Result{}, err
}
// Update the foundService object and write the result back if there are any changes
if !justCreated && util.CopyServiceFields(service, foundService) {
log.Info("Updating Service\n", "namespace", service.Namespace, "name", service.Name)
err = r.Update(context.TODO(), foundService)
if err != nil {
return reconcile.Result{}, err
}
}
// Reconcile virtual service
virtualService, err := generateVirtualService(instance)
if err := controllerutil.SetControllerReference(instance, virtualService, r.scheme); err != nil {
return reconcile.Result{}, err
}
// Check if the virtual service already exists.
foundVirtual := &unstructured.Unstructured{}
justCreated = false
foundVirtual.SetAPIVersion("networking.istio.io/v1alpha3")
foundVirtual.SetKind("VirtualService")
err = r.Get(context.TODO(), types.NamespacedName{Name: virtualServiceName(instance.Name,
instance.Namespace), Namespace: instance.Namespace}, foundVirtual)
if err != nil && errors.IsNotFound(err) {
log.Info("Creating virtual service", "namespace", instance.Namespace, "name",
virtualServiceName(instance.Name, instance.Namespace))
err = r.Create(context.TODO(), virtualService)
justCreated = true
if err != nil {
return reconcile.Result{}, err
}
} else if err != nil {
return reconcile.Result{}, err
}
if !justCreated && util.CopyVirtualService(virtualService, foundVirtual) {
log.Info("Updating virtual service", "namespace", instance.Namespace, "name",
virtualServiceName(instance.Name, instance.Namespace))
err = r.Update(context.TODO(), foundVirtual)
if err != nil {
return reconcile.Result{}, err
}
}
// Update the status if previous condition is not "Ready"
oldConditions := instance.Status.Conditions
if len(oldConditions) == 0 || oldConditions[0].Type != "Ready" {
newCondition := v1alpha1.NotebookCondition{
Type: "Ready",
}
instance.Status.Conditions = append([]v1alpha1.NotebookCondition{newCondition}, oldConditions...)
// Using context.Background as: https://book.kubebuilder.io/basics/status_subresource.html
err = r.Status().Update(context.Background(), instance)
if err != nil {
return reconcile.Result{}, err
}
}
// Update the readyReplicas if the status is changed
if foundStateful.Status.ReadyReplicas != instance.Status.ReadyReplicas {
log.Info("Updating Status", "namespace", instance.Namespace, "name", instance.Name)
instance.Status.ReadyReplicas = foundStateful.Status.ReadyReplicas
err = r.Status().Update(context.Background(), instance)
if err != nil {
return reconcile.Result{}, err
}
}
// Check the pod status
pod := &corev1.Pod{}
err = r.Get(context.TODO(), types.NamespacedName{Name: ss.Name + "-0", Namespace: ss.Namespace}, pod)
if err != nil && errors.IsNotFound(err) {
// This should be reconcile by the StatefulSet
log.Info("Pod not found...")
} else if err != nil {
return reconcile.Result{}, err
} else {
// Got the pod
if len(pod.Status.ContainerStatuses) > 0 &&
pod.Status.ContainerStatuses[0].State != instance.Status.ContainerState {
log.Info("Updating container state: ", "namespace", instance.Namespace, "name", instance.Name)
instance.Status.ContainerState = pod.Status.ContainerStatuses[0].State
err = r.Status().Update(context.Background(), instance)
if err != nil {
return reconcile.Result{}, err
}
}
}
return reconcile.Result{}, nil
}
func generateStatefulSet(instance *v1alpha1.Notebook) *appsv1.StatefulSet {
ss := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: instance.Name,
Namespace: instance.Namespace,
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"statefulset": instance.Name,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
"statefulset": instance.Name,
"notebook-name": instance.Name,
}},
Spec: instance.Spec.Template.Spec,
},
},
}
// copy all of the Notebook labels to the pod including poddefault related labels
l := &ss.Spec.Template.ObjectMeta.Labels
for k, v := range instance.ObjectMeta.Labels {
(*l)[k] = v
}
podSpec := &ss.Spec.Template.Spec
container := &podSpec.Containers[0]
if container.WorkingDir == "" {
container.WorkingDir = "/home/jovyan"
}
if container.Ports == nil {
container.Ports = []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: DefaultContainerPort,
Name: "notebook-port",
Protocol: "TCP",
},
}
}
container.Env = append(container.Env, corev1.EnvVar{
Name: "NB_PREFIX",
Value: "/notebook/" + instance.Namespace + "/" + instance.Name,
})
if podSpec.SecurityContext == nil {
fsGroup := DefaultFSGroup
podSpec.SecurityContext = &corev1.PodSecurityContext{
FSGroup: &fsGroup,
}
}
return ss
}
func generateService(instance *v1alpha1.Notebook) *corev1.Service {
// Define the desired Service object
port := DefaultContainerPort
containerPorts := instance.Spec.Template.Spec.Containers[0].Ports
if containerPorts != nil {
port = int(containerPorts[0].ContainerPort)
}
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: instance.Name,
Namespace: instance.Namespace,
Annotations: map[string]string{
"getambassador.io/config": strings.Join(
[]string{
"---",
"apiVersion: ambassador/v0",
"kind: Mapping",
"name: notebook_" + instance.Namespace + "_" + instance.Name + "_mapping",
"prefix: /notebook/" + instance.Namespace + "/" + instance.Name,
"rewrite: /notebook/" + instance.Namespace + "/" + instance.Name,
"timeout_ms: 300000",
"service: " + instance.Name + "." + instance.Namespace,
"use_websocket: true",
}, "\n"),
},
},
Spec: corev1.ServiceSpec{
Type: "ClusterIP",
Selector: map[string]string{"statefulset": instance.Name},
Ports: []corev1.ServicePort{
corev1.ServicePort{
// Make port name follow Istio pattern so it can be managed by istio rbac
Name: "http-" + instance.Name,
Port: DefaultServingPort,
TargetPort: intstr.FromInt(port),
Protocol: "TCP",
},
},
},
}
return svc
}
func virtualServiceName(kfName string, namespace string) string {
return fmt.Sprintf("notebook-%s-%s", namespace, kfName)
}
func generateVirtualService(instance *v1alpha1.Notebook) (*unstructured.Unstructured, error) {
name := instance.Name
namespace := instance.Namespace
prefix := fmt.Sprintf("/notebook/%s/%s", namespace, name)
rewrite := fmt.Sprintf("/notebook/%s/%s", namespace, name)
// TODO(gabrielwen): Make clusterDomain an option.
service := fmt.Sprintf("%s.%s.svc.cluster.local", name, namespace)
vsvc := &unstructured.Unstructured{}
vsvc.SetAPIVersion("networking.istio.io/v1alpha3")
vsvc.SetKind("VirtualService")
vsvc.SetName(virtualServiceName(name, namespace))
vsvc.SetNamespace(namespace)
if err := unstructured.SetNestedStringSlice(vsvc.Object, []string{"*"}, "spec", "hosts"); err != nil {
return nil, fmt.Errorf("Set .spec.hosts error: %v", err)
}
if err := unstructured.SetNestedStringSlice(vsvc.Object, []string{"kubeflow/kubeflow-gateway"},
"spec", "gateways"); err != nil {
return nil, fmt.Errorf("Set .spec.gateways error: %v", err)
}
http := []interface{}{
map[string]interface{}{
"match": []interface{}{
map[string]interface{}{
"uri": map[string]interface{}{
"prefix": prefix,
},
},
},
"rewrite": map[string]interface{}{
"uri": rewrite,
},
"route": []interface{}{
map[string]interface{}{
"destination": map[string]interface{}{
"host": service,
"port": map[string]interface{}{
"number": int64(DefaultServingPort),
},
},
},
},
"timeout": "300s",
},
}
if err := unstructured.SetNestedSlice(vsvc.Object, http, "spec", "http"); err != nil {
return nil, fmt.Errorf("Set .spec.http error: %v", err)
}
return vsvc, nil
}
update notebook_controller to use env
/*
Copyright 2019 The Kubeflow Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package notebook
import (
"context"
"fmt"
"os"
"strings"
v1alpha1 "github.com/kubeflow/kubeflow/components/notebook-controller/pkg/apis/notebook/v1alpha1"
"github.com/kubeflow/kubeflow/components/notebook-controller/pkg/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var log = logf.Log.WithName("controller")
const DefaultContainerPort = 8888
const DefaultServingPort = 80
// The default fsGroup of PodSecurityContext.
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podsecuritycontext-v1-core
const DefaultFSGroup = int64(100)
// Add creates a new Notebook Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
return &ReconcileNotebook{Client: mgr.GetClient(), scheme: mgr.GetScheme()}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("notebook-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to Notebook
err = c.Watch(&source.Kind{Type: &v1alpha1.Notebook{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &appsv1.StatefulSet{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &v1alpha1.Notebook{},
})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &v1alpha1.Notebook{},
})
if err != nil {
return err
}
// Watch for changes to Notebook virtualservices.
virtualService := &unstructured.Unstructured{}
virtualService.SetAPIVersion("networking.istio.io/v1alpha3")
virtualService.SetKind("VirtualService")
err = c.Watch(&source.Kind{Type: virtualService}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &v1alpha1.Notebook{},
})
if err != nil {
return err
}
// Watch underlying pod.
// mapFn defines the mapping from object in event to reconcile request
mapFn := handler.ToRequestsFunc(
func(a handler.MapObject) []reconcile.Request {
return []reconcile.Request{
{NamespacedName: types.NamespacedName{
Name: a.Meta.GetLabels()["notebook-name"],
Namespace: a.Meta.GetNamespace(),
}},
}
})
p := predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
if _, ok := e.MetaOld.GetLabels()["notebook-name"]; !ok {
return false
}
return e.ObjectOld != e.ObjectNew
},
CreateFunc: func(e event.CreateEvent) bool {
if _, ok := e.Meta.GetLabels()["notebook-name"]; !ok {
return false
}
return true
},
}
err = c.Watch(
&source.Kind{Type: &corev1.Pod{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: mapFn,
},
p)
if err != nil {
return err
}
return nil
}
var _ reconcile.Reconciler = &ReconcileNotebook{}
// ReconcileNotebook reconciles a Notebook object
type ReconcileNotebook struct {
client.Client
scheme *runtime.Scheme
}
// Reconcile reads that state of the cluster for a Notebook object and makes changes based on the state read
// and what is in the Notebook.Spec
// Automatically generate RBAC rules to allow the Controller to read and write StatefulSet
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=services/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=kubeflow.org,resources=notebooks,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=kubeflow.org,resources=notebooks/status,verbs=get;update;patch
func (r *ReconcileNotebook) Reconcile(request reconcile.Request) (reconcile.Result, error) {
// Fetch the Notebook instance
instance := &v1alpha1.Notebook{}
err := r.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
// Reconcile StatefulSet
ss := generateStatefulSet(instance)
if err := controllerutil.SetControllerReference(instance, ss, r.scheme); err != nil {
return reconcile.Result{}, err
}
// Check if the StatefulSet already exists
foundStateful := &appsv1.StatefulSet{}
justCreated := false
err = r.Get(context.TODO(), types.NamespacedName{Name: ss.Name, Namespace: ss.Namespace}, foundStateful)
if err != nil && errors.IsNotFound(err) {
log.Info("Creating StatefulSet", "namespace", ss.Namespace, "name", ss.Name)
err = r.Create(context.TODO(), ss)
justCreated = true
if err != nil {
return reconcile.Result{}, err
}
} else if err != nil {
return reconcile.Result{}, err
}
// Update the foundStateful object and write the result back if there are any changes
if !justCreated && util.CopyStatefulSetFields(ss, foundStateful) {
log.Info("Updating StatefulSet", "namespace", ss.Namespace, "name", ss.Name)
err = r.Update(context.TODO(), foundStateful)
if err != nil {
return reconcile.Result{}, err
}
}
// Reconcile service
service := generateService(instance)
if err := controllerutil.SetControllerReference(instance, service, r.scheme); err != nil {
return reconcile.Result{}, err
}
// Check if the Service already exists
foundService := &corev1.Service{}
justCreated = false
err = r.Get(context.TODO(), types.NamespacedName{Name: service.Name, Namespace: service.Namespace}, foundService)
if err != nil && errors.IsNotFound(err) {
log.Info("Creating Service", "namespace", service.Namespace, "name", service.Name)
err = r.Create(context.TODO(), service)
justCreated = true
if err != nil {
return reconcile.Result{}, err
}
} else if err != nil {
return reconcile.Result{}, err
}
// Update the foundService object and write the result back if there are any changes
if !justCreated && util.CopyServiceFields(service, foundService) {
log.Info("Updating Service\n", "namespace", service.Namespace, "name", service.Name)
err = r.Update(context.TODO(), foundService)
if err != nil {
return reconcile.Result{}, err
}
}
// Reconcile virtual service if we use ISTIO.
if os.Getenv("USE_ISTIO") == "true" {
err = r.reconcileVirtualService(instance)
if err != nil {
return reconcile.Result{}, err
}
}
// Update the status if previous condition is not "Ready"
oldConditions := instance.Status.Conditions
if len(oldConditions) == 0 || oldConditions[0].Type != "Ready" {
newCondition := v1alpha1.NotebookCondition{
Type: "Ready",
}
instance.Status.Conditions = append([]v1alpha1.NotebookCondition{newCondition}, oldConditions...)
// Using context.Background as: https://book.kubebuilder.io/basics/status_subresource.html
err = r.Status().Update(context.Background(), instance)
if err != nil {
return reconcile.Result{}, err
}
}
// Update the readyReplicas if the status is changed
if foundStateful.Status.ReadyReplicas != instance.Status.ReadyReplicas {
log.Info("Updating Status", "namespace", instance.Namespace, "name", instance.Name)
instance.Status.ReadyReplicas = foundStateful.Status.ReadyReplicas
err = r.Status().Update(context.Background(), instance)
if err != nil {
return reconcile.Result{}, err
}
}
// Check the pod status
pod := &corev1.Pod{}
err = r.Get(context.TODO(), types.NamespacedName{Name: ss.Name + "-0", Namespace: ss.Namespace}, pod)
if err != nil && errors.IsNotFound(err) {
// This should be reconcile by the StatefulSet
log.Info("Pod not found...")
} else if err != nil {
return reconcile.Result{}, err
} else {
// Got the pod
if len(pod.Status.ContainerStatuses) > 0 &&
pod.Status.ContainerStatuses[0].State != instance.Status.ContainerState {
log.Info("Updating container state: ", "namespace", instance.Namespace, "name", instance.Name)
instance.Status.ContainerState = pod.Status.ContainerStatuses[0].State
err = r.Status().Update(context.Background(), instance)
if err != nil {
return reconcile.Result{}, err
}
}
}
return reconcile.Result{}, nil
}
func generateStatefulSet(instance *v1alpha1.Notebook) *appsv1.StatefulSet {
ss := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: instance.Name,
Namespace: instance.Namespace,
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"statefulset": instance.Name,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
"statefulset": instance.Name,
"notebook-name": instance.Name,
}},
Spec: instance.Spec.Template.Spec,
},
},
}
// copy all of the Notebook labels to the pod including poddefault related labels
l := &ss.Spec.Template.ObjectMeta.Labels
for k, v := range instance.ObjectMeta.Labels {
(*l)[k] = v
}
podSpec := &ss.Spec.Template.Spec
container := &podSpec.Containers[0]
if container.WorkingDir == "" {
container.WorkingDir = "/home/jovyan"
}
if container.Ports == nil {
container.Ports = []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: DefaultContainerPort,
Name: "notebook-port",
Protocol: "TCP",
},
}
}
container.Env = append(container.Env, corev1.EnvVar{
Name: "NB_PREFIX",
Value: "/notebook/" + instance.Namespace + "/" + instance.Name,
})
if podSpec.SecurityContext == nil {
fsGroup := DefaultFSGroup
podSpec.SecurityContext = &corev1.PodSecurityContext{
FSGroup: &fsGroup,
}
}
return ss
}
func generateService(instance *v1alpha1.Notebook) *corev1.Service {
// Define the desired Service object
port := DefaultContainerPort
containerPorts := instance.Spec.Template.Spec.Containers[0].Ports
if containerPorts != nil {
port = int(containerPorts[0].ContainerPort)
}
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: instance.Name,
Namespace: instance.Namespace,
Annotations: map[string]string{
"getambassador.io/config": strings.Join(
[]string{
"---",
"apiVersion: ambassador/v0",
"kind: Mapping",
"name: notebook_" + instance.Namespace + "_" + instance.Name + "_mapping",
"prefix: /notebook/" + instance.Namespace + "/" + instance.Name,
"rewrite: /notebook/" + instance.Namespace + "/" + instance.Name,
"timeout_ms: 300000",
"service: " + instance.Name + "." + instance.Namespace,
"use_websocket: true",
}, "\n"),
},
},
Spec: corev1.ServiceSpec{
Type: "ClusterIP",
Selector: map[string]string{"statefulset": instance.Name},
Ports: []corev1.ServicePort{
corev1.ServicePort{
// Make port name follow Istio pattern so it can be managed by istio rbac
Name: "http-" + instance.Name,
Port: DefaultServingPort,
TargetPort: intstr.FromInt(port),
Protocol: "TCP",
},
},
},
}
return svc
}
func virtualServiceName(kfName string, namespace string) string {
return fmt.Sprintf("notebook-%s-%s", namespace, kfName)
}
func generateVirtualService(instance *v1alpha1.Notebook) (*unstructured.Unstructured, error) {
name := instance.Name
namespace := instance.Namespace
prefix := fmt.Sprintf("/notebook/%s/%s", namespace, name)
rewrite := fmt.Sprintf("/notebook/%s/%s", namespace, name)
// TODO(gabrielwen): Make clusterDomain an option.
service := fmt.Sprintf("%s.%s.svc.cluster.local", name, namespace)
vsvc := &unstructured.Unstructured{}
vsvc.SetAPIVersion("networking.istio.io/v1alpha3")
vsvc.SetKind("VirtualService")
vsvc.SetName(virtualServiceName(name, namespace))
vsvc.SetNamespace(namespace)
if err := unstructured.SetNestedStringSlice(vsvc.Object, []string{"*"}, "spec", "hosts"); err != nil {
return nil, fmt.Errorf("Set .spec.hosts error: %v", err)
}
if err := unstructured.SetNestedStringSlice(vsvc.Object, []string{"kubeflow/kubeflow-gateway"},
"spec", "gateways"); err != nil {
return nil, fmt.Errorf("Set .spec.gateways error: %v", err)
}
http := []interface{}{
map[string]interface{}{
"match": []interface{}{
map[string]interface{}{
"uri": map[string]interface{}{
"prefix": prefix,
},
},
},
"rewrite": map[string]interface{}{
"uri": rewrite,
},
"route": []interface{}{
map[string]interface{}{
"destination": map[string]interface{}{
"host": service,
"port": map[string]interface{}{
"number": int64(DefaultServingPort),
},
},
},
},
"timeout": "300s",
},
}
if err := unstructured.SetNestedSlice(vsvc.Object, http, "spec", "http"); err != nil {
return nil, fmt.Errorf("Set .spec.http error: %v", err)
}
return vsvc, nil
}
func (r *ReconcileNotebook) reconcileVirtualService(instance *v1alpha1.Notebook) error {
virtualService, err := generateVirtualService(instance)
if err := controllerutil.SetControllerReference(instance, virtualService, r.scheme); err != nil {
return err
}
// Check if the virtual service already exists.
foundVirtual := &unstructured.Unstructured{}
justCreated := false
foundVirtual.SetAPIVersion("networking.istio.io/v1alpha3")
foundVirtual.SetKind("VirtualService")
err = r.Get(context.TODO(), types.NamespacedName{Name: virtualServiceName(instance.Name,
instance.Namespace), Namespace: instance.Namespace}, foundVirtual)
if err != nil && errors.IsNotFound(err) {
log.Info("Creating virtual service", "namespace", instance.Namespace, "name",
virtualServiceName(instance.Name, instance.Namespace))
err = r.Create(context.TODO(), virtualService)
justCreated = true
if err != nil {
return err
}
} else if err != nil {
return err
}
if !justCreated && util.CopyVirtualService(virtualService, foundVirtual) {
log.Info("Updating virtual service", "namespace", instance.Namespace, "name",
virtualServiceName(instance.Name, instance.Namespace))
err = r.Update(context.TODO(), foundVirtual)
if err != nil {
return err
}
}
return nil
}
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmath
import (
"math"
"testing"
)
var vc26 = []complex128{
(4.97901192488367350108546816 + 7.73887247457810456552351752i),
(7.73887247457810456552351752 - 0.27688005719200159404635997i),
(-0.27688005719200159404635997 - 5.01060361827107492160848778i),
(-5.01060361827107492160848778 + 9.63629370719841737980004837i),
(9.63629370719841737980004837 + 2.92637723924396464525443662i),
(2.92637723924396464525443662 + 5.22908343145930665230025625i),
(5.22908343145930665230025625 + 2.72793991043601025126008608i),
(2.72793991043601025126008608 + 1.82530809168085506044576505i),
(1.82530809168085506044576505 - 8.68592476857560136238589621i),
(-8.68592476857560136238589621 + 4.97901192488367350108546816i),
}
var vc = []complex128{
(4.9790119248836735e+00 + 7.7388724745781045e+00i),
(7.7388724745781045e+00 - 2.7688005719200159e-01i),
(-2.7688005719200159e-01 - 5.0106036182710749e+00i),
(-5.0106036182710749e+00 + 9.6362937071984173e+00i),
(9.6362937071984173e+00 + 2.9263772392439646e+00i),
(2.9263772392439646e+00 + 5.2290834314593066e+00i),
(5.2290834314593066e+00 + 2.7279399104360102e+00i),
(2.7279399104360102e+00 + 1.8253080916808550e+00i),
(1.8253080916808550e+00 - 8.6859247685756013e+00i),
(-8.6859247685756013e+00 + 4.9790119248836735e+00i),
}
// The expected results below were computed by the high precision calculators
// at http://keisan.casio.com/. More exact input values (array vc[], above)
// were obtained by printing them with "%.26f". The answers were calculated
// to 26 digits (by using the "Digit number" drop-down control of each
// calculator).
var abs = []float64{
9.2022120669932650313380972e+00,
7.7438239742296106616261394e+00,
5.0182478202557746902556648e+00,
1.0861137372799545160704002e+01,
1.0070841084922199607011905e+01,
5.9922447613166942183705192e+00,
5.8978784056736762299945176e+00,
3.2822866700678709020367184e+00,
8.8756430028990417290744307e+00,
1.0011785496777731986390856e+01,
}
var acos = []complex128{
(1.0017679804707456328694569 - 2.9138232718554953784519807i),
(0.03606427612041407369636057 + 2.7358584434576260925091256i),
(1.6249365462333796703711823 + 2.3159537454335901187730929i),
(2.0485650849650740120660391 - 3.0795576791204117911123886i),
(0.29621132089073067282488147 - 3.0007392508200622519398814i),
(1.0664555914934156601503632 - 2.4872865024796011364747111i),
(0.48681307452231387690013905 - 2.463655912283054555225301i),
(0.6116977071277574248407752 - 1.8734458851737055262693056i),
(1.3649311280370181331184214 + 2.8793528632328795424123832i),
(2.6189310485682988308904501 - 2.9956543302898767795858704i),
}
var acosh = []complex128{
(2.9138232718554953784519807 + 1.0017679804707456328694569i),
(2.7358584434576260925091256 - 0.03606427612041407369636057i),
(2.3159537454335901187730929 - 1.6249365462333796703711823i),
(3.0795576791204117911123886 + 2.0485650849650740120660391i),
(3.0007392508200622519398814 + 0.29621132089073067282488147i),
(2.4872865024796011364747111 + 1.0664555914934156601503632i),
(2.463655912283054555225301 + 0.48681307452231387690013905i),
(1.8734458851737055262693056 + 0.6116977071277574248407752i),
(2.8793528632328795424123832 - 1.3649311280370181331184214i),
(2.9956543302898767795858704 + 2.6189310485682988308904501i),
}
var asin = []complex128{
(0.56902834632415098636186476 + 2.9138232718554953784519807i),
(1.5347320506744825455349611 - 2.7358584434576260925091256i),
(-0.054140219438483051139860579 - 2.3159537454335901187730929i),
(-0.47776875817017739283471738 + 3.0795576791204117911123886i),
(1.2745850059041659464064402 + 3.0007392508200622519398814i),
(0.50434073530148095908095852 + 2.4872865024796011364747111i),
(1.0839832522725827423311826 + 2.463655912283054555225301i),
(0.9590986196671391943905465 + 1.8734458851737055262693056i),
(0.20586519875787848611290031 - 2.8793528632328795424123832i),
(-1.0481347217734022116591284 + 2.9956543302898767795858704i),
}
var asinh = []complex128{
(2.9113760469415295679342185 + 0.99639459545704326759805893i),
(2.7441755423994259061579029 - 0.035468308789000500601119392i),
(-2.2962136462520690506126678 - 1.5144663565690151885726707i),
(-3.0771233459295725965402455 + 1.0895577967194013849422294i),
(3.0048366100923647417557027 + 0.29346979169819220036454168i),
(2.4800059370795363157364643 + 1.0545868606049165710424232i),
(2.4718773838309585611141821 + 0.47502344364250803363708842i),
(1.8910743588080159144378396 + 0.56882925572563602341139174i),
(2.8735426423367341878069406 - 1.362376149648891420997548i),
(-2.9981750586172477217567878 + 0.5183571985225367505624207i),
}
var atan = []complex128{
(1.5115747079332741358607654 + 0.091324403603954494382276776i),
(1.4424504323482602560806727 - 0.0045416132642803911503770933i),
(-1.5593488703630532674484026 - 0.20163295409248362456446431i),
(-1.5280619472445889867794105 + 0.081721556230672003746956324i),
(1.4759909163240799678221039 + 0.028602969320691644358773586i),
(1.4877353772046548932715555 + 0.14566877153207281663773599i),
(1.4206983927779191889826 + 0.076830486127880702249439993i),
(1.3162236060498933364869556 + 0.16031313000467530644933363i),
(1.5473450684303703578810093 - 0.11064907507939082484935782i),
(-1.4841462340185253987375812 + 0.049341850305024399493142411i),
}
var atanh = []complex128{
(0.058375027938968509064640438 + 1.4793488495105334458167782i),
(0.12977343497790381229915667 - 1.5661009410463561327262499i),
(-0.010576456067347252072200088 - 1.3743698658402284549750563i),
(-0.042218595678688358882784918 + 1.4891433968166405606692604i),
(0.095218997991316722061828397 + 1.5416884098777110330499698i),
(0.079965459366890323857556487 + 1.4252510353873192700350435i),
(0.15051245471980726221708301 + 1.4907432533016303804884461i),
(0.25082072933993987714470373 + 1.392057665392187516442986i),
(0.022896108815797135846276662 - 1.4609224989282864208963021i),
(-0.08665624101841876130537396 + 1.5207902036935093480142159i),
}
var conj = []complex128{
(4.9790119248836735e+00 - 7.7388724745781045e+00i),
(7.7388724745781045e+00 + 2.7688005719200159e-01i),
(-2.7688005719200159e-01 + 5.0106036182710749e+00i),
(-5.0106036182710749e+00 - 9.6362937071984173e+00i),
(9.6362937071984173e+00 - 2.9263772392439646e+00i),
(2.9263772392439646e+00 - 5.2290834314593066e+00i),
(5.2290834314593066e+00 - 2.7279399104360102e+00i),
(2.7279399104360102e+00 - 1.8253080916808550e+00i),
(1.8253080916808550e+00 + 8.6859247685756013e+00i),
(-8.6859247685756013e+00 - 4.9790119248836735e+00i),
}
var cos = []complex128{
(3.024540920601483938336569e+02 + 1.1073797572517071650045357e+03i),
(1.192858682649064973252758e-01 + 2.7857554122333065540970207e-01i),
(7.2144394304528306603857962e+01 - 2.0500129667076044169954205e+01i),
(2.24921952538403984190541e+03 - 7.317363745602773587049329e+03i),
(-9.148222970032421760015498e+00 + 1.953124661113563541862227e+00i),
(-9.116081175857732248227078e+01 - 1.992669213569952232487371e+01i),
(3.795639179042704640002918e+00 + 6.623513350981458399309662e+00i),
(-2.9144840732498869560679084e+00 - 1.214620271628002917638748e+00i),
(-7.45123482501299743872481e+02 + 2.8641692314488080814066734e+03i),
(-5.371977967039319076416747e+01 + 4.893348341339375830564624e+01i),
}
var cosh = []complex128{
(8.34638383523018249366948e+00 + 7.2181057886425846415112064e+01i),
(1.10421967379919366952251e+03 - 3.1379638689277575379469861e+02i),
(3.051485206773701584738512e-01 - 2.6805384730105297848044485e-01i),
(-7.33294728684187933370938e+01 + 1.574445942284918251038144e+01i),
(-7.478643293945957535757355e+03 + 1.6348382209913353929473321e+03i),
(4.622316522966235701630926e+00 - 8.088695185566375256093098e+00i),
(-8.544333183278877406197712e+01 + 3.7505836120128166455231717e+01i),
(-1.934457815021493925115198e+00 + 7.3725859611767228178358673e+00i),
(-2.352958770061749348353548e+00 - 2.034982010440878358915409e+00i),
(7.79756457532134748165069e+02 + 2.8549350716819176560377717e+03i),
}
var exp = []complex128{
(1.669197736864670815125146e+01 + 1.4436895109507663689174096e+02i),
(2.2084389286252583447276212e+03 - 6.2759289284909211238261917e+02i),
(2.227538273122775173434327e-01 + 7.2468284028334191250470034e-01i),
(-6.5182985958153548997881627e-03 - 1.39965837915193860879044e-03i),
(-1.4957286524084015746110777e+04 + 3.269676455931135688988042e+03i),
(9.218158701983105935659273e+00 - 1.6223985291084956009304582e+01i),
(-1.7088175716853040841444505e+02 + 7.501382609870410713795546e+01i),
(-3.852461315830959613132505e+00 + 1.4808420423156073221970892e+01i),
(-4.586775503301407379786695e+00 - 4.178501081246873415144744e+00i),
(4.451337963005453491095747e-05 - 1.62977574205442915935263e-04i),
}
var log = []complex128{
(2.2194438972179194425697051e+00 + 9.9909115046919291062461269e-01i),
(2.0468956191154167256337289e+00 - 3.5762575021856971295156489e-02i),
(1.6130808329853860438751244e+00 - 1.6259990074019058442232221e+00i),
(2.3851910394823008710032651e+00 + 2.0502936359659111755031062e+00i),
(2.3096442270679923004800651e+00 + 2.9483213155446756211881774e-01i),
(1.7904660933974656106951860e+00 + 1.0605860367252556281902109e+00i),
(1.7745926939841751666177512e+00 + 4.8084556083358307819310911e-01i),
(1.1885403350045342425648780e+00 + 5.8969634164776659423195222e-01i),
(2.1833107837679082586772505e+00 - 1.3636647724582455028314573e+00i),
(2.3037629487273259170991671e+00 + 2.6210913895386013290915234e+00i),
}
var log10 = []complex128{
(9.6389223745559042474184943e-01 + 4.338997735671419492599631e-01i),
(8.8895547241376579493490892e-01 - 1.5531488990643548254864806e-02i),
(7.0055210462945412305244578e-01 - 7.0616239649481243222248404e-01i),
(1.0358753067322445311676952e+00 + 8.9043121238134980156490909e-01i),
(1.003065742975330237172029e+00 + 1.2804396782187887479857811e-01i),
(7.7758954439739162532085157e-01 + 4.6060666333341810869055108e-01i),
(7.7069581462315327037689152e-01 + 2.0882857371769952195512475e-01i),
(5.1617650901191156135137239e-01 + 2.5610186717615977620363299e-01i),
(9.4819982567026639742663212e-01 - 5.9223208584446952284914289e-01i),
(1.0005115362454417135973429e+00 + 1.1383255270407412817250921e+00i),
}
type ff struct {
r, theta float64
}
var polar = []ff{
{9.2022120669932650313380972e+00, 9.9909115046919291062461269e-01},
{7.7438239742296106616261394e+00, -3.5762575021856971295156489e-02},
{5.0182478202557746902556648e+00, -1.6259990074019058442232221e+00},
{1.0861137372799545160704002e+01, 2.0502936359659111755031062e+00},
{1.0070841084922199607011905e+01, 2.9483213155446756211881774e-01},
{5.9922447613166942183705192e+00, 1.0605860367252556281902109e+00},
{5.8978784056736762299945176e+00, 4.8084556083358307819310911e-01},
{3.2822866700678709020367184e+00, 5.8969634164776659423195222e-01},
{8.8756430028990417290744307e+00, -1.3636647724582455028314573e+00},
{1.0011785496777731986390856e+01, 2.6210913895386013290915234e+00},
}
var pow = []complex128{
(-2.499956739197529585028819e+00 + 1.759751724335650228957144e+00i),
(7.357094338218116311191939e+04 - 5.089973412479151648145882e+04i),
(1.320777296067768517259592e+01 - 3.165621914333901498921986e+01i),
(-3.123287828297300934072149e-07 - 1.9849567521490553032502223E-7i),
(8.0622651468477229614813e+04 - 7.80028727944573092944363e+04i),
(-1.0268824572103165858577141e+00 - 4.716844738244989776610672e-01i),
(-4.35953819012244175753187e+01 + 2.2036445974645306917648585e+02i),
(8.3556092283250594950239e-01 - 1.2261571947167240272593282e+01i),
(1.582292972120769306069625e+03 + 1.273564263524278244782512e+04i),
(6.592208301642122149025369e-08 + 2.584887236651661903526389e-08i),
}
var sin = []complex128{
(-1.1073801774240233539648544e+03 + 3.024539773002502192425231e+02i),
(1.0317037521400759359744682e+00 - 3.2208979799929570242818e-02i),
(-2.0501952097271429804261058e+01 - 7.2137981348240798841800967e+01i),
(7.3173638080346338642193078e+03 + 2.249219506193664342566248e+03i),
(-1.964375633631808177565226e+00 - 9.0958264713870404464159683e+00i),
(1.992783647158514838337674e+01 - 9.11555769410191350416942e+01i),
(-6.680335650741921444300349e+00 + 3.763353833142432513086117e+00i),
(1.2794028166657459148245993e+00 - 2.7669092099795781155109602e+00i),
(2.8641693949535259594188879e+03 + 7.451234399649871202841615e+02i),
(-4.893811726244659135553033e+01 - 5.371469305562194635957655e+01i),
}
var sinh = []complex128{
(8.34559353341652565758198e+00 + 7.2187893208650790476628899e+01i),
(1.1042192548260646752051112e+03 - 3.1379650595631635858792056e+02i),
(-8.239469336509264113041849e-02 + 9.9273668758439489098514519e-01i),
(7.332295456982297798219401e+01 - 1.574585908122833444899023e+01i),
(-7.4786432301380582103534216e+03 + 1.63483823493980029604071e+03i),
(4.595842179016870234028347e+00 - 8.135290105518580753211484e+00i),
(-8.543842533574163435246793e+01 + 3.750798997857594068272375e+01i),
(-1.918003500809465688017307e+00 + 7.4358344619793504041350251e+00i),
(-2.233816733239658031433147e+00 - 2.143519070805995056229335e+00i),
(-7.797564130187551181105341e+02 - 2.8549352346594918614806877e+03i),
}
var sqrt = []complex128{
(2.6628203086086130543813948e+00 + 1.4531345674282185229796902e+00i),
(2.7823278427251986247149295e+00 - 4.9756907317005224529115567e-02i),
(1.5397025302089642757361015e+00 - 1.6271336573016637535695727e+00i),
(1.7103411581506875260277898e+00 + 2.8170677122737589676157029e+00i),
(3.1390392472953103383607947e+00 + 4.6612625849858653248980849e-01i),
(2.1117080764822417640789287e+00 + 1.2381170223514273234967850e+00i),
(2.3587032281672256703926939e+00 + 5.7827111903257349935720172e-01i),
(1.7335262588873410476661577e+00 + 5.2647258220721269141550382e-01i),
(2.3131094974708716531499282e+00 - 1.8775429304303785570775490e+00i),
(8.1420535745048086240947359e-01 + 3.0575897587277248522656113e+00i),
}
var tan = []complex128{
(-1.928757919086441129134525e-07 + 1.0000003267499169073251826e+00i),
(1.242412685364183792138948e+00 - 3.17149693883133370106696e+00i),
(-4.6745126251587795225571826e-05 - 9.9992439225263959286114298e-01i),
(4.792363401193648192887116e-09 + 1.0000000070589333451557723e+00i),
(2.345740824080089140287315e-03 + 9.947733046570988661022763e-01i),
(-2.396030789494815566088809e-05 + 9.9994781345418591429826779e-01i),
(-7.370204836644931340905303e-03 + 1.0043553413417138987717748e+00i),
(-3.691803847992048527007457e-02 + 9.6475071993469548066328894e-01i),
(-2.781955256713729368401878e-08 - 1.000000049848910609006646e+00i),
(9.4281590064030478879791249e-05 + 9.9999119340863718183758545e-01i),
}
var tanh = []complex128{
(1.0000921981225144748819918e+00 + 2.160986245871518020231507e-05i),
(9.9999967727531993209562591e-01 - 1.9953763222959658873657676e-07i),
(-1.765485739548037260789686e+00 + 1.7024216325552852445168471e+00i),
(-9.999189442732736452807108e-01 + 3.64906070494473701938098e-05i),
(9.9999999224622333738729767e-01 - 3.560088949517914774813046e-09i),
(1.0029324933367326862499343e+00 - 4.948790309797102353137528e-03i),
(9.9996113064788012488693567e-01 - 4.226995742097032481451259e-05i),
(1.0074784189316340029873945e+00 - 4.194050814891697808029407e-03i),
(9.9385534229718327109131502e-01 + 5.144217985914355502713437e-02i),
(-1.0000000491604982429364892e+00 - 2.901873195374433112227349e-08i),
}
// special cases
var vcAbsSC = []complex128{
NaN(),
}
var absSC = []float64{
math.NaN(),
}
var vcAcosSC = []complex128{
NaN(),
}
var acosSC = []complex128{
NaN(),
}
var vcAcoshSC = []complex128{
NaN(),
}
var acoshSC = []complex128{
NaN(),
}
var vcAsinSC = []complex128{
NaN(),
}
var asinSC = []complex128{
NaN(),
}
var vcAsinhSC = []complex128{
NaN(),
}
var asinhSC = []complex128{
NaN(),
}
var vcAtanSC = []complex128{
NaN(),
}
var atanSC = []complex128{
NaN(),
}
var vcAtanhSC = []complex128{
NaN(),
}
var atanhSC = []complex128{
NaN(),
}
var vcConjSC = []complex128{
NaN(),
}
var conjSC = []complex128{
NaN(),
}
var vcCosSC = []complex128{
NaN(),
}
var cosSC = []complex128{
NaN(),
}
var vcCoshSC = []complex128{
NaN(),
}
var coshSC = []complex128{
NaN(),
}
var vcExpSC = []complex128{
NaN(),
}
var expSC = []complex128{
NaN(),
}
var vcIsNaNSC = []complex128{
cmplx(math.Inf(-1), math.Inf(-1)),
cmplx(math.Inf(-1), math.NaN()),
cmplx(math.NaN(), math.Inf(-1)),
cmplx(0, math.NaN()),
cmplx(math.NaN(), 0),
cmplx(math.Inf(1), math.Inf(1)),
cmplx(math.Inf(1), math.NaN()),
cmplx(math.NaN(), math.Inf(1)),
cmplx(math.NaN(), math.NaN()),
}
var isNaNSC = []bool{
false,
false,
false,
true,
true,
false,
false,
false,
true,
}
var vcLogSC = []complex128{
NaN(),
}
var logSC = []complex128{
NaN(),
}
var vcLog10SC = []complex128{
NaN(),
}
var log10SC = []complex128{
NaN(),
}
var vcPolarSC = []complex128{
NaN(),
}
var polarSC = []ff{
{math.NaN(), math.NaN()},
}
var vcPowSC = [][2]complex128{
{NaN(), NaN()},
}
var powSC = []complex128{
NaN(),
}
var vcSinSC = []complex128{
NaN(),
}
var sinSC = []complex128{
NaN(),
}
var vcSinhSC = []complex128{
NaN(),
}
var sinhSC = []complex128{
NaN(),
}
var vcSqrtSC = []complex128{
NaN(),
}
var sqrtSC = []complex128{
NaN(),
}
var vcTanSC = []complex128{
NaN(),
}
var tanSC = []complex128{
NaN(),
}
var vcTanhSC = []complex128{
NaN(),
}
var tanhSC = []complex128{
NaN(),
}
// functions borrowed from pkg/math/all_test.go
func tolerance(a, b, e float64) bool {
d := a - b
if d < 0 {
d = -d
}
if a != 0 {
e = e * a
if e < 0 {
e = -e
}
}
return d < e
}
func soclose(a, b, e float64) bool { return tolerance(a, b, e) }
func veryclose(a, b float64) bool { return tolerance(a, b, 4e-16) }
func alike(a, b float64) bool {
switch {
case a != a && b != b: // math.IsNaN(a) && math.IsNaN(b):
return true
case a == b:
return math.Signbit(a) == math.Signbit(b)
}
return false
}
func cTolerance(a, b complex128, e float64) bool {
d := Abs(a - b)
if a != 0 {
e = e * Abs(a)
if e < 0 {
e = -e
}
}
return d < e
}
func cSoclose(a, b complex128, e float64) bool { return cTolerance(a, b, e) }
func cVeryclose(a, b complex128) bool { return cTolerance(a, b, 4e-16) }
func cAlike(a, b complex128) bool {
switch {
case IsNaN(a) && IsNaN(b):
return true
case a == b:
return math.Signbit(real(a)) == math.Signbit(real(b)) && math.Signbit(imag(a)) == math.Signbit(imag(b))
}
return false
}
func TestAbs(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Abs(vc[i]); !veryclose(abs[i], f) {
t.Errorf("Abs(%g) = %g, want %g", vc[i], f, abs[i])
}
}
for i := 0; i < len(vcAbsSC); i++ {
if f := Abs(vcAbsSC[i]); !alike(absSC[i], f) {
t.Errorf("Abs(%g) = %g, want %g", vcAbsSC[i], f, absSC[i])
}
}
}
func TestAcos(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Acos(vc[i]); !cSoclose(acos[i], f, 1e-14) {
t.Errorf("Acos(%g) = %g, want %g", vc[i], f, acos[i])
}
}
for i := 0; i < len(vcAcosSC); i++ {
if f := Acos(vcAcosSC[i]); !cAlike(acosSC[i], f) {
t.Errorf("Acos(%g) = %g, want %g", vcAcosSC[i], f, acosSC[i])
}
}
}
func TestAcosh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Acosh(vc[i]); !cSoclose(acosh[i], f, 1e-14) {
t.Errorf("Acosh(%g) = %g, want %g", vc[i], f, acosh[i])
}
}
for i := 0; i < len(vcAcoshSC); i++ {
if f := Acosh(vcAcoshSC[i]); !cAlike(acoshSC[i], f) {
t.Errorf("Acosh(%g) = %g, want %g", vcAcoshSC[i], f, acoshSC[i])
}
}
}
func TestAsin(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Asin(vc[i]); !cSoclose(asin[i], f, 1e-14) {
t.Errorf("Asin(%g) = %g, want %g", vc[i], f, asin[i])
}
}
for i := 0; i < len(vcAsinSC); i++ {
if f := Asin(vcAsinSC[i]); !cAlike(asinSC[i], f) {
t.Errorf("Asin(%g) = %g, want %g", vcAsinSC[i], f, asinSC[i])
}
}
}
func TestAsinh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Asinh(vc[i]); !cSoclose(asinh[i], f, 4e-15) {
t.Errorf("Asinh(%g) = %g, want %g", vc[i], f, asinh[i])
}
}
for i := 0; i < len(vcAsinhSC); i++ {
if f := Asinh(vcAsinhSC[i]); !cAlike(asinhSC[i], f) {
t.Errorf("Asinh(%g) = %g, want %g", vcAsinhSC[i], f, asinhSC[i])
}
}
}
func TestAtan(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Atan(vc[i]); !cVeryclose(atan[i], f) {
t.Errorf("Atan(%g) = %g, want %g", vc[i], f, atan[i])
}
}
for i := 0; i < len(vcAtanSC); i++ {
if f := Atan(vcAtanSC[i]); !cAlike(atanSC[i], f) {
t.Errorf("Atan(%g) = %g, want %g", vcAtanSC[i], f, atanSC[i])
}
}
}
func TestAtanh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Atanh(vc[i]); !cVeryclose(atanh[i], f) {
t.Errorf("Atanh(%g) = %g, want %g", vc[i], f, atanh[i])
}
}
for i := 0; i < len(vcAtanhSC); i++ {
if f := Atanh(vcAtanhSC[i]); !cAlike(atanhSC[i], f) {
t.Errorf("Atanh(%g) = %g, want %g", vcAtanhSC[i], f, atanhSC[i])
}
}
}
func TestConj(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Conj(vc[i]); !cVeryclose(conj[i], f) {
t.Errorf("Conj(%g) = %g, want %g", vc[i], f, conj[i])
}
}
for i := 0; i < len(vcConjSC); i++ {
if f := Conj(vcConjSC[i]); !cAlike(conjSC[i], f) {
t.Errorf("Conj(%g) = %g, want %g", vcConjSC[i], f, conjSC[i])
}
}
}
func TestCos(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Cos(vc[i]); !cSoclose(cos[i], f, 3e-15) {
t.Errorf("Cos(%g) = %g, want %g", vc[i], f, cos[i])
}
}
for i := 0; i < len(vcCosSC); i++ {
if f := Cos(vcCosSC[i]); !cAlike(cosSC[i], f) {
t.Errorf("Cos(%g) = %g, want %g", vcCosSC[i], f, cosSC[i])
}
}
}
func TestCosh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Cosh(vc[i]); !cSoclose(cosh[i], f, 2e-15) {
t.Errorf("Cosh(%g) = %g, want %g", vc[i], f, cosh[i])
}
}
for i := 0; i < len(vcCoshSC); i++ {
if f := Cosh(vcCoshSC[i]); !cAlike(coshSC[i], f) {
t.Errorf("Cosh(%g) = %g, want %g", vcCoshSC[i], f, coshSC[i])
}
}
}
func TestExp(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Exp(vc[i]); !cSoclose(exp[i], f, 1e-15) {
t.Errorf("Exp(%g) = %g, want %g", vc[i], f, exp[i])
}
}
for i := 0; i < len(vcExpSC); i++ {
if f := Exp(vcExpSC[i]); !cAlike(expSC[i], f) {
t.Errorf("Exp(%g) = %g, want %g", vcExpSC[i], f, expSC[i])
}
}
}
func TestIsNaN(t *testing.T) {
for i := 0; i < len(vcIsNaNSC); i++ {
if f := IsNaN(vcIsNaNSC[i]); isNaNSC[i] != f {
t.Errorf("IsNaN(%g) = %g, want %g", vcIsNaNSC[i], f, isNaNSC[i])
}
}
}
func TestLog(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Log(vc[i]); !cVeryclose(log[i], f) {
t.Errorf("Log(%g) = %g, want %g", vc[i], f, log[i])
}
}
for i := 0; i < len(vcLogSC); i++ {
if f := Log(vcLogSC[i]); !cAlike(logSC[i], f) {
t.Errorf("Log(%g) = %g, want %g", vcLogSC[i], f, logSC[i])
}
}
}
func TestLog10(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Log10(vc[i]); !cVeryclose(log10[i], f) {
t.Errorf("Log10(%g) = %g, want %g", vc[i], f, log10[i])
}
}
for i := 0; i < len(vcLog10SC); i++ {
if f := Log10(vcLog10SC[i]); !cAlike(log10SC[i], f) {
t.Errorf("Log10(%g) = %g, want %g", vcLog10SC[i], f, log10SC[i])
}
}
}
func TestPolar(t *testing.T) {
for i := 0; i < len(vc); i++ {
if r, theta := Polar(vc[i]); !veryclose(polar[i].r, r) && !veryclose(polar[i].theta, theta) {
t.Errorf("Polar(%g) = %g, %g want %g, %g", vc[i], r, theta, polar[i].r, polar[i].theta)
}
}
for i := 0; i < len(vcPolarSC); i++ {
if r, theta := Polar(vcPolarSC[i]); !alike(polarSC[i].r, r) && !alike(polarSC[i].theta, theta) {
t.Errorf("Polar(%g) = %g, %g, want %g, %g", vcPolarSC[i], r, theta, polarSC[i].r, polarSC[i].theta)
}
}
}
func TestPow(t *testing.T) {
var a = cmplx(float64(3), float64(3))
for i := 0; i < len(vc); i++ {
if f := Pow(a, vc[i]); !cSoclose(pow[i], f, 4e-15) {
t.Errorf("Pow(%g, %g) = %g, want %g", a, vc[i], f, pow[i])
}
}
for i := 0; i < len(vcPowSC); i++ {
if f := Pow(vcPowSC[i][0], vcPowSC[i][0]); !cAlike(powSC[i], f) {
t.Errorf("Pow(%g, %g) = %g, want %g", vcPowSC[i][0], vcPowSC[i][0], f, powSC[i])
}
}
}
func TestRect(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Rect(polar[i].r, polar[i].theta); !cVeryclose(vc[i], f) {
t.Errorf("Rect(%g, %g) = %g want %g", polar[i].r, polar[i].theta, f, vc[i])
}
}
for i := 0; i < len(vcPolarSC); i++ {
if f := Rect(polarSC[i].r, polarSC[i].theta); !cAlike(vcPolarSC[i], f) {
t.Errorf("Rect(%g, %g) = %g, want %g", polarSC[i].r, polarSC[i].theta, f, vcPolarSC[i])
}
}
}
func TestSin(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Sin(vc[i]); !cSoclose(sin[i], f, 2e-15) {
t.Errorf("Sin(%g) = %g, want %g", vc[i], f, sin[i])
}
}
for i := 0; i < len(vcSinSC); i++ {
if f := Sin(vcSinSC[i]); !cAlike(sinSC[i], f) {
t.Errorf("Sin(%g) = %g, want %g", vcSinSC[i], f, sinSC[i])
}
}
}
func TestSinh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Sinh(vc[i]); !cSoclose(sinh[i], f, 2e-15) {
t.Errorf("Sinh(%g) = %g, want %g", vc[i], f, sinh[i])
}
}
for i := 0; i < len(vcSinhSC); i++ {
if f := Sinh(vcSinhSC[i]); !cAlike(sinhSC[i], f) {
t.Errorf("Sinh(%g) = %g, want %g", vcSinhSC[i], f, sinhSC[i])
}
}
}
func TestSqrt(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Sqrt(vc[i]); !cVeryclose(sqrt[i], f) {
t.Errorf("Sqrt(%g) = %g, want %g", vc[i], f, sqrt[i])
}
}
for i := 0; i < len(vcSqrtSC); i++ {
if f := Sqrt(vcSqrtSC[i]); !cAlike(sqrtSC[i], f) {
t.Errorf("Sqrt(%g) = %g, want %g", vcSqrtSC[i], f, sqrtSC[i])
}
}
}
func TestTan(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Tan(vc[i]); !cSoclose(tan[i], f, 3e-15) {
t.Errorf("Tan(%g) = %g, want %g", vc[i], f, tan[i])
}
}
for i := 0; i < len(vcTanSC); i++ {
if f := Tan(vcTanSC[i]); !cAlike(tanSC[i], f) {
t.Errorf("Tan(%g) = %g, want %g", vcTanSC[i], f, tanSC[i])
}
}
}
func TestTanh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Tanh(vc[i]); !cSoclose(tanh[i], f, 2e-15) {
t.Errorf("Tanh(%g) = %g, want %g", vc[i], f, tanh[i])
}
}
for i := 0; i < len(vcTanhSC); i++ {
if f := Tanh(vcTanhSC[i]); !cAlike(tanhSC[i], f) {
t.Errorf("Tanh(%g) = %g, want %g", vcTanhSC[i], f, tanhSC[i])
}
}
}
func BenchmarkAbs(b *testing.B) {
for i := 0; i < b.N; i++ {
Abs(cmplx(2.5, 3.5))
}
}
func BenchmarkAcos(b *testing.B) {
for i := 0; i < b.N; i++ {
Acos(cmplx(2.5, 3.5))
}
}
func BenchmarkAcosh(b *testing.B) {
for i := 0; i < b.N; i++ {
Acosh(cmplx(2.5, 3.5))
}
}
func BenchmarkAsin(b *testing.B) {
for i := 0; i < b.N; i++ {
Asin(cmplx(2.5, 3.5))
}
}
func BenchmarkAsinh(b *testing.B) {
for i := 0; i < b.N; i++ {
Asinh(cmplx(2.5, 3.5))
}
}
func BenchmarkAtan(b *testing.B) {
for i := 0; i < b.N; i++ {
Atan(cmplx(2.5, 3.5))
}
}
func BenchmarkAtanh(b *testing.B) {
for i := 0; i < b.N; i++ {
Atanh(cmplx(2.5, 3.5))
}
}
func BenchmarkConj(b *testing.B) {
for i := 0; i < b.N; i++ {
Conj(cmplx(2.5, 3.5))
}
}
func BenchmarkCos(b *testing.B) {
for i := 0; i < b.N; i++ {
Cos(cmplx(2.5, 3.5))
}
}
func BenchmarkCosh(b *testing.B) {
for i := 0; i < b.N; i++ {
Cosh(cmplx(2.5, 3.5))
}
}
func BenchmarkExp(b *testing.B) {
for i := 0; i < b.N; i++ {
Exp(cmplx(2.5, 3.5))
}
}
func BenchmarkLog(b *testing.B) {
for i := 0; i < b.N; i++ {
Log(cmplx(2.5, 3.5))
}
}
func BenchmarkLog10(b *testing.B) {
for i := 0; i < b.N; i++ {
Log10(cmplx(2.5, 3.5))
}
}
func BenchmarkPhase(b *testing.B) {
for i := 0; i < b.N; i++ {
Phase(cmplx(2.5, 3.5))
}
}
func BenchmarkPolar(b *testing.B) {
for i := 0; i < b.N; i++ {
Polar(cmplx(2.5, 3.5))
}
}
func BenchmarkPow(b *testing.B) {
for i := 0; i < b.N; i++ {
Pow(cmplx(2.5, 3.5), cmplx(2.5, 3.5))
}
}
func BenchmarkRect(b *testing.B) {
for i := 0; i < b.N; i++ {
Rect(2.5, 1.5)
}
}
func BenchmarkSin(b *testing.B) {
for i := 0; i < b.N; i++ {
Sin(cmplx(2.5, 3.5))
}
}
func BenchmarkSinh(b *testing.B) {
for i := 0; i < b.N; i++ {
Sinh(cmplx(2.5, 3.5))
}
}
func BenchmarkSqrt(b *testing.B) {
for i := 0; i < b.N; i++ {
Sqrt(cmplx(2.5, 3.5))
}
}
func BenchmarkTan(b *testing.B) {
for i := 0; i < b.N; i++ {
Tan(cmplx(2.5, 3.5))
}
}
func BenchmarkTanh(b *testing.B) {
for i := 0; i < b.N; i++ {
Tanh(cmplx(2.5, 3.5))
}
}
cmath test: fix format (%g does not print booleans)
R=ken
CC=golang-dev
http://codereview.appspot.com/4003041
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmath
import (
"math"
"testing"
)
var vc26 = []complex128{
(4.97901192488367350108546816 + 7.73887247457810456552351752i),
(7.73887247457810456552351752 - 0.27688005719200159404635997i),
(-0.27688005719200159404635997 - 5.01060361827107492160848778i),
(-5.01060361827107492160848778 + 9.63629370719841737980004837i),
(9.63629370719841737980004837 + 2.92637723924396464525443662i),
(2.92637723924396464525443662 + 5.22908343145930665230025625i),
(5.22908343145930665230025625 + 2.72793991043601025126008608i),
(2.72793991043601025126008608 + 1.82530809168085506044576505i),
(1.82530809168085506044576505 - 8.68592476857560136238589621i),
(-8.68592476857560136238589621 + 4.97901192488367350108546816i),
}
var vc = []complex128{
(4.9790119248836735e+00 + 7.7388724745781045e+00i),
(7.7388724745781045e+00 - 2.7688005719200159e-01i),
(-2.7688005719200159e-01 - 5.0106036182710749e+00i),
(-5.0106036182710749e+00 + 9.6362937071984173e+00i),
(9.6362937071984173e+00 + 2.9263772392439646e+00i),
(2.9263772392439646e+00 + 5.2290834314593066e+00i),
(5.2290834314593066e+00 + 2.7279399104360102e+00i),
(2.7279399104360102e+00 + 1.8253080916808550e+00i),
(1.8253080916808550e+00 - 8.6859247685756013e+00i),
(-8.6859247685756013e+00 + 4.9790119248836735e+00i),
}
// The expected results below were computed by the high precision calculators
// at http://keisan.casio.com/. More exact input values (array vc[], above)
// were obtained by printing them with "%.26f". The answers were calculated
// to 26 digits (by using the "Digit number" drop-down control of each
// calculator).
var abs = []float64{
9.2022120669932650313380972e+00,
7.7438239742296106616261394e+00,
5.0182478202557746902556648e+00,
1.0861137372799545160704002e+01,
1.0070841084922199607011905e+01,
5.9922447613166942183705192e+00,
5.8978784056736762299945176e+00,
3.2822866700678709020367184e+00,
8.8756430028990417290744307e+00,
1.0011785496777731986390856e+01,
}
var acos = []complex128{
(1.0017679804707456328694569 - 2.9138232718554953784519807i),
(0.03606427612041407369636057 + 2.7358584434576260925091256i),
(1.6249365462333796703711823 + 2.3159537454335901187730929i),
(2.0485650849650740120660391 - 3.0795576791204117911123886i),
(0.29621132089073067282488147 - 3.0007392508200622519398814i),
(1.0664555914934156601503632 - 2.4872865024796011364747111i),
(0.48681307452231387690013905 - 2.463655912283054555225301i),
(0.6116977071277574248407752 - 1.8734458851737055262693056i),
(1.3649311280370181331184214 + 2.8793528632328795424123832i),
(2.6189310485682988308904501 - 2.9956543302898767795858704i),
}
var acosh = []complex128{
(2.9138232718554953784519807 + 1.0017679804707456328694569i),
(2.7358584434576260925091256 - 0.03606427612041407369636057i),
(2.3159537454335901187730929 - 1.6249365462333796703711823i),
(3.0795576791204117911123886 + 2.0485650849650740120660391i),
(3.0007392508200622519398814 + 0.29621132089073067282488147i),
(2.4872865024796011364747111 + 1.0664555914934156601503632i),
(2.463655912283054555225301 + 0.48681307452231387690013905i),
(1.8734458851737055262693056 + 0.6116977071277574248407752i),
(2.8793528632328795424123832 - 1.3649311280370181331184214i),
(2.9956543302898767795858704 + 2.6189310485682988308904501i),
}
var asin = []complex128{
(0.56902834632415098636186476 + 2.9138232718554953784519807i),
(1.5347320506744825455349611 - 2.7358584434576260925091256i),
(-0.054140219438483051139860579 - 2.3159537454335901187730929i),
(-0.47776875817017739283471738 + 3.0795576791204117911123886i),
(1.2745850059041659464064402 + 3.0007392508200622519398814i),
(0.50434073530148095908095852 + 2.4872865024796011364747111i),
(1.0839832522725827423311826 + 2.463655912283054555225301i),
(0.9590986196671391943905465 + 1.8734458851737055262693056i),
(0.20586519875787848611290031 - 2.8793528632328795424123832i),
(-1.0481347217734022116591284 + 2.9956543302898767795858704i),
}
var asinh = []complex128{
(2.9113760469415295679342185 + 0.99639459545704326759805893i),
(2.7441755423994259061579029 - 0.035468308789000500601119392i),
(-2.2962136462520690506126678 - 1.5144663565690151885726707i),
(-3.0771233459295725965402455 + 1.0895577967194013849422294i),
(3.0048366100923647417557027 + 0.29346979169819220036454168i),
(2.4800059370795363157364643 + 1.0545868606049165710424232i),
(2.4718773838309585611141821 + 0.47502344364250803363708842i),
(1.8910743588080159144378396 + 0.56882925572563602341139174i),
(2.8735426423367341878069406 - 1.362376149648891420997548i),
(-2.9981750586172477217567878 + 0.5183571985225367505624207i),
}
var atan = []complex128{
(1.5115747079332741358607654 + 0.091324403603954494382276776i),
(1.4424504323482602560806727 - 0.0045416132642803911503770933i),
(-1.5593488703630532674484026 - 0.20163295409248362456446431i),
(-1.5280619472445889867794105 + 0.081721556230672003746956324i),
(1.4759909163240799678221039 + 0.028602969320691644358773586i),
(1.4877353772046548932715555 + 0.14566877153207281663773599i),
(1.4206983927779191889826 + 0.076830486127880702249439993i),
(1.3162236060498933364869556 + 0.16031313000467530644933363i),
(1.5473450684303703578810093 - 0.11064907507939082484935782i),
(-1.4841462340185253987375812 + 0.049341850305024399493142411i),
}
var atanh = []complex128{
(0.058375027938968509064640438 + 1.4793488495105334458167782i),
(0.12977343497790381229915667 - 1.5661009410463561327262499i),
(-0.010576456067347252072200088 - 1.3743698658402284549750563i),
(-0.042218595678688358882784918 + 1.4891433968166405606692604i),
(0.095218997991316722061828397 + 1.5416884098777110330499698i),
(0.079965459366890323857556487 + 1.4252510353873192700350435i),
(0.15051245471980726221708301 + 1.4907432533016303804884461i),
(0.25082072933993987714470373 + 1.392057665392187516442986i),
(0.022896108815797135846276662 - 1.4609224989282864208963021i),
(-0.08665624101841876130537396 + 1.5207902036935093480142159i),
}
var conj = []complex128{
(4.9790119248836735e+00 - 7.7388724745781045e+00i),
(7.7388724745781045e+00 + 2.7688005719200159e-01i),
(-2.7688005719200159e-01 + 5.0106036182710749e+00i),
(-5.0106036182710749e+00 - 9.6362937071984173e+00i),
(9.6362937071984173e+00 - 2.9263772392439646e+00i),
(2.9263772392439646e+00 - 5.2290834314593066e+00i),
(5.2290834314593066e+00 - 2.7279399104360102e+00i),
(2.7279399104360102e+00 - 1.8253080916808550e+00i),
(1.8253080916808550e+00 + 8.6859247685756013e+00i),
(-8.6859247685756013e+00 - 4.9790119248836735e+00i),
}
var cos = []complex128{
(3.024540920601483938336569e+02 + 1.1073797572517071650045357e+03i),
(1.192858682649064973252758e-01 + 2.7857554122333065540970207e-01i),
(7.2144394304528306603857962e+01 - 2.0500129667076044169954205e+01i),
(2.24921952538403984190541e+03 - 7.317363745602773587049329e+03i),
(-9.148222970032421760015498e+00 + 1.953124661113563541862227e+00i),
(-9.116081175857732248227078e+01 - 1.992669213569952232487371e+01i),
(3.795639179042704640002918e+00 + 6.623513350981458399309662e+00i),
(-2.9144840732498869560679084e+00 - 1.214620271628002917638748e+00i),
(-7.45123482501299743872481e+02 + 2.8641692314488080814066734e+03i),
(-5.371977967039319076416747e+01 + 4.893348341339375830564624e+01i),
}
var cosh = []complex128{
(8.34638383523018249366948e+00 + 7.2181057886425846415112064e+01i),
(1.10421967379919366952251e+03 - 3.1379638689277575379469861e+02i),
(3.051485206773701584738512e-01 - 2.6805384730105297848044485e-01i),
(-7.33294728684187933370938e+01 + 1.574445942284918251038144e+01i),
(-7.478643293945957535757355e+03 + 1.6348382209913353929473321e+03i),
(4.622316522966235701630926e+00 - 8.088695185566375256093098e+00i),
(-8.544333183278877406197712e+01 + 3.7505836120128166455231717e+01i),
(-1.934457815021493925115198e+00 + 7.3725859611767228178358673e+00i),
(-2.352958770061749348353548e+00 - 2.034982010440878358915409e+00i),
(7.79756457532134748165069e+02 + 2.8549350716819176560377717e+03i),
}
var exp = []complex128{
(1.669197736864670815125146e+01 + 1.4436895109507663689174096e+02i),
(2.2084389286252583447276212e+03 - 6.2759289284909211238261917e+02i),
(2.227538273122775173434327e-01 + 7.2468284028334191250470034e-01i),
(-6.5182985958153548997881627e-03 - 1.39965837915193860879044e-03i),
(-1.4957286524084015746110777e+04 + 3.269676455931135688988042e+03i),
(9.218158701983105935659273e+00 - 1.6223985291084956009304582e+01i),
(-1.7088175716853040841444505e+02 + 7.501382609870410713795546e+01i),
(-3.852461315830959613132505e+00 + 1.4808420423156073221970892e+01i),
(-4.586775503301407379786695e+00 - 4.178501081246873415144744e+00i),
(4.451337963005453491095747e-05 - 1.62977574205442915935263e-04i),
}
var log = []complex128{
(2.2194438972179194425697051e+00 + 9.9909115046919291062461269e-01i),
(2.0468956191154167256337289e+00 - 3.5762575021856971295156489e-02i),
(1.6130808329853860438751244e+00 - 1.6259990074019058442232221e+00i),
(2.3851910394823008710032651e+00 + 2.0502936359659111755031062e+00i),
(2.3096442270679923004800651e+00 + 2.9483213155446756211881774e-01i),
(1.7904660933974656106951860e+00 + 1.0605860367252556281902109e+00i),
(1.7745926939841751666177512e+00 + 4.8084556083358307819310911e-01i),
(1.1885403350045342425648780e+00 + 5.8969634164776659423195222e-01i),
(2.1833107837679082586772505e+00 - 1.3636647724582455028314573e+00i),
(2.3037629487273259170991671e+00 + 2.6210913895386013290915234e+00i),
}
var log10 = []complex128{
(9.6389223745559042474184943e-01 + 4.338997735671419492599631e-01i),
(8.8895547241376579493490892e-01 - 1.5531488990643548254864806e-02i),
(7.0055210462945412305244578e-01 - 7.0616239649481243222248404e-01i),
(1.0358753067322445311676952e+00 + 8.9043121238134980156490909e-01i),
(1.003065742975330237172029e+00 + 1.2804396782187887479857811e-01i),
(7.7758954439739162532085157e-01 + 4.6060666333341810869055108e-01i),
(7.7069581462315327037689152e-01 + 2.0882857371769952195512475e-01i),
(5.1617650901191156135137239e-01 + 2.5610186717615977620363299e-01i),
(9.4819982567026639742663212e-01 - 5.9223208584446952284914289e-01i),
(1.0005115362454417135973429e+00 + 1.1383255270407412817250921e+00i),
}
type ff struct {
r, theta float64
}
var polar = []ff{
{9.2022120669932650313380972e+00, 9.9909115046919291062461269e-01},
{7.7438239742296106616261394e+00, -3.5762575021856971295156489e-02},
{5.0182478202557746902556648e+00, -1.6259990074019058442232221e+00},
{1.0861137372799545160704002e+01, 2.0502936359659111755031062e+00},
{1.0070841084922199607011905e+01, 2.9483213155446756211881774e-01},
{5.9922447613166942183705192e+00, 1.0605860367252556281902109e+00},
{5.8978784056736762299945176e+00, 4.8084556083358307819310911e-01},
{3.2822866700678709020367184e+00, 5.8969634164776659423195222e-01},
{8.8756430028990417290744307e+00, -1.3636647724582455028314573e+00},
{1.0011785496777731986390856e+01, 2.6210913895386013290915234e+00},
}
var pow = []complex128{
(-2.499956739197529585028819e+00 + 1.759751724335650228957144e+00i),
(7.357094338218116311191939e+04 - 5.089973412479151648145882e+04i),
(1.320777296067768517259592e+01 - 3.165621914333901498921986e+01i),
(-3.123287828297300934072149e-07 - 1.9849567521490553032502223E-7i),
(8.0622651468477229614813e+04 - 7.80028727944573092944363e+04i),
(-1.0268824572103165858577141e+00 - 4.716844738244989776610672e-01i),
(-4.35953819012244175753187e+01 + 2.2036445974645306917648585e+02i),
(8.3556092283250594950239e-01 - 1.2261571947167240272593282e+01i),
(1.582292972120769306069625e+03 + 1.273564263524278244782512e+04i),
(6.592208301642122149025369e-08 + 2.584887236651661903526389e-08i),
}
var sin = []complex128{
(-1.1073801774240233539648544e+03 + 3.024539773002502192425231e+02i),
(1.0317037521400759359744682e+00 - 3.2208979799929570242818e-02i),
(-2.0501952097271429804261058e+01 - 7.2137981348240798841800967e+01i),
(7.3173638080346338642193078e+03 + 2.249219506193664342566248e+03i),
(-1.964375633631808177565226e+00 - 9.0958264713870404464159683e+00i),
(1.992783647158514838337674e+01 - 9.11555769410191350416942e+01i),
(-6.680335650741921444300349e+00 + 3.763353833142432513086117e+00i),
(1.2794028166657459148245993e+00 - 2.7669092099795781155109602e+00i),
(2.8641693949535259594188879e+03 + 7.451234399649871202841615e+02i),
(-4.893811726244659135553033e+01 - 5.371469305562194635957655e+01i),
}
var sinh = []complex128{
(8.34559353341652565758198e+00 + 7.2187893208650790476628899e+01i),
(1.1042192548260646752051112e+03 - 3.1379650595631635858792056e+02i),
(-8.239469336509264113041849e-02 + 9.9273668758439489098514519e-01i),
(7.332295456982297798219401e+01 - 1.574585908122833444899023e+01i),
(-7.4786432301380582103534216e+03 + 1.63483823493980029604071e+03i),
(4.595842179016870234028347e+00 - 8.135290105518580753211484e+00i),
(-8.543842533574163435246793e+01 + 3.750798997857594068272375e+01i),
(-1.918003500809465688017307e+00 + 7.4358344619793504041350251e+00i),
(-2.233816733239658031433147e+00 - 2.143519070805995056229335e+00i),
(-7.797564130187551181105341e+02 - 2.8549352346594918614806877e+03i),
}
var sqrt = []complex128{
(2.6628203086086130543813948e+00 + 1.4531345674282185229796902e+00i),
(2.7823278427251986247149295e+00 - 4.9756907317005224529115567e-02i),
(1.5397025302089642757361015e+00 - 1.6271336573016637535695727e+00i),
(1.7103411581506875260277898e+00 + 2.8170677122737589676157029e+00i),
(3.1390392472953103383607947e+00 + 4.6612625849858653248980849e-01i),
(2.1117080764822417640789287e+00 + 1.2381170223514273234967850e+00i),
(2.3587032281672256703926939e+00 + 5.7827111903257349935720172e-01i),
(1.7335262588873410476661577e+00 + 5.2647258220721269141550382e-01i),
(2.3131094974708716531499282e+00 - 1.8775429304303785570775490e+00i),
(8.1420535745048086240947359e-01 + 3.0575897587277248522656113e+00i),
}
var tan = []complex128{
(-1.928757919086441129134525e-07 + 1.0000003267499169073251826e+00i),
(1.242412685364183792138948e+00 - 3.17149693883133370106696e+00i),
(-4.6745126251587795225571826e-05 - 9.9992439225263959286114298e-01i),
(4.792363401193648192887116e-09 + 1.0000000070589333451557723e+00i),
(2.345740824080089140287315e-03 + 9.947733046570988661022763e-01i),
(-2.396030789494815566088809e-05 + 9.9994781345418591429826779e-01i),
(-7.370204836644931340905303e-03 + 1.0043553413417138987717748e+00i),
(-3.691803847992048527007457e-02 + 9.6475071993469548066328894e-01i),
(-2.781955256713729368401878e-08 - 1.000000049848910609006646e+00i),
(9.4281590064030478879791249e-05 + 9.9999119340863718183758545e-01i),
}
var tanh = []complex128{
(1.0000921981225144748819918e+00 + 2.160986245871518020231507e-05i),
(9.9999967727531993209562591e-01 - 1.9953763222959658873657676e-07i),
(-1.765485739548037260789686e+00 + 1.7024216325552852445168471e+00i),
(-9.999189442732736452807108e-01 + 3.64906070494473701938098e-05i),
(9.9999999224622333738729767e-01 - 3.560088949517914774813046e-09i),
(1.0029324933367326862499343e+00 - 4.948790309797102353137528e-03i),
(9.9996113064788012488693567e-01 - 4.226995742097032481451259e-05i),
(1.0074784189316340029873945e+00 - 4.194050814891697808029407e-03i),
(9.9385534229718327109131502e-01 + 5.144217985914355502713437e-02i),
(-1.0000000491604982429364892e+00 - 2.901873195374433112227349e-08i),
}
// special cases
var vcAbsSC = []complex128{
NaN(),
}
var absSC = []float64{
math.NaN(),
}
var vcAcosSC = []complex128{
NaN(),
}
var acosSC = []complex128{
NaN(),
}
var vcAcoshSC = []complex128{
NaN(),
}
var acoshSC = []complex128{
NaN(),
}
var vcAsinSC = []complex128{
NaN(),
}
var asinSC = []complex128{
NaN(),
}
var vcAsinhSC = []complex128{
NaN(),
}
var asinhSC = []complex128{
NaN(),
}
var vcAtanSC = []complex128{
NaN(),
}
var atanSC = []complex128{
NaN(),
}
var vcAtanhSC = []complex128{
NaN(),
}
var atanhSC = []complex128{
NaN(),
}
var vcConjSC = []complex128{
NaN(),
}
var conjSC = []complex128{
NaN(),
}
var vcCosSC = []complex128{
NaN(),
}
var cosSC = []complex128{
NaN(),
}
var vcCoshSC = []complex128{
NaN(),
}
var coshSC = []complex128{
NaN(),
}
var vcExpSC = []complex128{
NaN(),
}
var expSC = []complex128{
NaN(),
}
var vcIsNaNSC = []complex128{
cmplx(math.Inf(-1), math.Inf(-1)),
cmplx(math.Inf(-1), math.NaN()),
cmplx(math.NaN(), math.Inf(-1)),
cmplx(0, math.NaN()),
cmplx(math.NaN(), 0),
cmplx(math.Inf(1), math.Inf(1)),
cmplx(math.Inf(1), math.NaN()),
cmplx(math.NaN(), math.Inf(1)),
cmplx(math.NaN(), math.NaN()),
}
var isNaNSC = []bool{
false,
false,
false,
true,
true,
false,
false,
false,
true,
}
var vcLogSC = []complex128{
NaN(),
}
var logSC = []complex128{
NaN(),
}
var vcLog10SC = []complex128{
NaN(),
}
var log10SC = []complex128{
NaN(),
}
var vcPolarSC = []complex128{
NaN(),
}
var polarSC = []ff{
{math.NaN(), math.NaN()},
}
var vcPowSC = [][2]complex128{
{NaN(), NaN()},
}
var powSC = []complex128{
NaN(),
}
var vcSinSC = []complex128{
NaN(),
}
var sinSC = []complex128{
NaN(),
}
var vcSinhSC = []complex128{
NaN(),
}
var sinhSC = []complex128{
NaN(),
}
var vcSqrtSC = []complex128{
NaN(),
}
var sqrtSC = []complex128{
NaN(),
}
var vcTanSC = []complex128{
NaN(),
}
var tanSC = []complex128{
NaN(),
}
var vcTanhSC = []complex128{
NaN(),
}
var tanhSC = []complex128{
NaN(),
}
// functions borrowed from pkg/math/all_test.go
func tolerance(a, b, e float64) bool {
d := a - b
if d < 0 {
d = -d
}
if a != 0 {
e = e * a
if e < 0 {
e = -e
}
}
return d < e
}
func soclose(a, b, e float64) bool { return tolerance(a, b, e) }
func veryclose(a, b float64) bool { return tolerance(a, b, 4e-16) }
func alike(a, b float64) bool {
switch {
case a != a && b != b: // math.IsNaN(a) && math.IsNaN(b):
return true
case a == b:
return math.Signbit(a) == math.Signbit(b)
}
return false
}
func cTolerance(a, b complex128, e float64) bool {
d := Abs(a - b)
if a != 0 {
e = e * Abs(a)
if e < 0 {
e = -e
}
}
return d < e
}
func cSoclose(a, b complex128, e float64) bool { return cTolerance(a, b, e) }
func cVeryclose(a, b complex128) bool { return cTolerance(a, b, 4e-16) }
func cAlike(a, b complex128) bool {
switch {
case IsNaN(a) && IsNaN(b):
return true
case a == b:
return math.Signbit(real(a)) == math.Signbit(real(b)) && math.Signbit(imag(a)) == math.Signbit(imag(b))
}
return false
}
func TestAbs(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Abs(vc[i]); !veryclose(abs[i], f) {
t.Errorf("Abs(%g) = %g, want %g", vc[i], f, abs[i])
}
}
for i := 0; i < len(vcAbsSC); i++ {
if f := Abs(vcAbsSC[i]); !alike(absSC[i], f) {
t.Errorf("Abs(%g) = %g, want %g", vcAbsSC[i], f, absSC[i])
}
}
}
func TestAcos(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Acos(vc[i]); !cSoclose(acos[i], f, 1e-14) {
t.Errorf("Acos(%g) = %g, want %g", vc[i], f, acos[i])
}
}
for i := 0; i < len(vcAcosSC); i++ {
if f := Acos(vcAcosSC[i]); !cAlike(acosSC[i], f) {
t.Errorf("Acos(%g) = %g, want %g", vcAcosSC[i], f, acosSC[i])
}
}
}
func TestAcosh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Acosh(vc[i]); !cSoclose(acosh[i], f, 1e-14) {
t.Errorf("Acosh(%g) = %g, want %g", vc[i], f, acosh[i])
}
}
for i := 0; i < len(vcAcoshSC); i++ {
if f := Acosh(vcAcoshSC[i]); !cAlike(acoshSC[i], f) {
t.Errorf("Acosh(%g) = %g, want %g", vcAcoshSC[i], f, acoshSC[i])
}
}
}
func TestAsin(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Asin(vc[i]); !cSoclose(asin[i], f, 1e-14) {
t.Errorf("Asin(%g) = %g, want %g", vc[i], f, asin[i])
}
}
for i := 0; i < len(vcAsinSC); i++ {
if f := Asin(vcAsinSC[i]); !cAlike(asinSC[i], f) {
t.Errorf("Asin(%g) = %g, want %g", vcAsinSC[i], f, asinSC[i])
}
}
}
func TestAsinh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Asinh(vc[i]); !cSoclose(asinh[i], f, 4e-15) {
t.Errorf("Asinh(%g) = %g, want %g", vc[i], f, asinh[i])
}
}
for i := 0; i < len(vcAsinhSC); i++ {
if f := Asinh(vcAsinhSC[i]); !cAlike(asinhSC[i], f) {
t.Errorf("Asinh(%g) = %g, want %g", vcAsinhSC[i], f, asinhSC[i])
}
}
}
func TestAtan(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Atan(vc[i]); !cVeryclose(atan[i], f) {
t.Errorf("Atan(%g) = %g, want %g", vc[i], f, atan[i])
}
}
for i := 0; i < len(vcAtanSC); i++ {
if f := Atan(vcAtanSC[i]); !cAlike(atanSC[i], f) {
t.Errorf("Atan(%g) = %g, want %g", vcAtanSC[i], f, atanSC[i])
}
}
}
func TestAtanh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Atanh(vc[i]); !cVeryclose(atanh[i], f) {
t.Errorf("Atanh(%g) = %g, want %g", vc[i], f, atanh[i])
}
}
for i := 0; i < len(vcAtanhSC); i++ {
if f := Atanh(vcAtanhSC[i]); !cAlike(atanhSC[i], f) {
t.Errorf("Atanh(%g) = %g, want %g", vcAtanhSC[i], f, atanhSC[i])
}
}
}
func TestConj(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Conj(vc[i]); !cVeryclose(conj[i], f) {
t.Errorf("Conj(%g) = %g, want %g", vc[i], f, conj[i])
}
}
for i := 0; i < len(vcConjSC); i++ {
if f := Conj(vcConjSC[i]); !cAlike(conjSC[i], f) {
t.Errorf("Conj(%g) = %g, want %g", vcConjSC[i], f, conjSC[i])
}
}
}
func TestCos(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Cos(vc[i]); !cSoclose(cos[i], f, 3e-15) {
t.Errorf("Cos(%g) = %g, want %g", vc[i], f, cos[i])
}
}
for i := 0; i < len(vcCosSC); i++ {
if f := Cos(vcCosSC[i]); !cAlike(cosSC[i], f) {
t.Errorf("Cos(%g) = %g, want %g", vcCosSC[i], f, cosSC[i])
}
}
}
func TestCosh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Cosh(vc[i]); !cSoclose(cosh[i], f, 2e-15) {
t.Errorf("Cosh(%g) = %g, want %g", vc[i], f, cosh[i])
}
}
for i := 0; i < len(vcCoshSC); i++ {
if f := Cosh(vcCoshSC[i]); !cAlike(coshSC[i], f) {
t.Errorf("Cosh(%g) = %g, want %g", vcCoshSC[i], f, coshSC[i])
}
}
}
func TestExp(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Exp(vc[i]); !cSoclose(exp[i], f, 1e-15) {
t.Errorf("Exp(%g) = %g, want %g", vc[i], f, exp[i])
}
}
for i := 0; i < len(vcExpSC); i++ {
if f := Exp(vcExpSC[i]); !cAlike(expSC[i], f) {
t.Errorf("Exp(%g) = %g, want %g", vcExpSC[i], f, expSC[i])
}
}
}
func TestIsNaN(t *testing.T) {
for i := 0; i < len(vcIsNaNSC); i++ {
if f := IsNaN(vcIsNaNSC[i]); isNaNSC[i] != f {
t.Errorf("IsNaN(%v) = %v, want %v", vcIsNaNSC[i], f, isNaNSC[i])
}
}
}
func TestLog(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Log(vc[i]); !cVeryclose(log[i], f) {
t.Errorf("Log(%g) = %g, want %g", vc[i], f, log[i])
}
}
for i := 0; i < len(vcLogSC); i++ {
if f := Log(vcLogSC[i]); !cAlike(logSC[i], f) {
t.Errorf("Log(%g) = %g, want %g", vcLogSC[i], f, logSC[i])
}
}
}
func TestLog10(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Log10(vc[i]); !cVeryclose(log10[i], f) {
t.Errorf("Log10(%g) = %g, want %g", vc[i], f, log10[i])
}
}
for i := 0; i < len(vcLog10SC); i++ {
if f := Log10(vcLog10SC[i]); !cAlike(log10SC[i], f) {
t.Errorf("Log10(%g) = %g, want %g", vcLog10SC[i], f, log10SC[i])
}
}
}
func TestPolar(t *testing.T) {
for i := 0; i < len(vc); i++ {
if r, theta := Polar(vc[i]); !veryclose(polar[i].r, r) && !veryclose(polar[i].theta, theta) {
t.Errorf("Polar(%g) = %g, %g want %g, %g", vc[i], r, theta, polar[i].r, polar[i].theta)
}
}
for i := 0; i < len(vcPolarSC); i++ {
if r, theta := Polar(vcPolarSC[i]); !alike(polarSC[i].r, r) && !alike(polarSC[i].theta, theta) {
t.Errorf("Polar(%g) = %g, %g, want %g, %g", vcPolarSC[i], r, theta, polarSC[i].r, polarSC[i].theta)
}
}
}
func TestPow(t *testing.T) {
var a = cmplx(float64(3), float64(3))
for i := 0; i < len(vc); i++ {
if f := Pow(a, vc[i]); !cSoclose(pow[i], f, 4e-15) {
t.Errorf("Pow(%g, %g) = %g, want %g", a, vc[i], f, pow[i])
}
}
for i := 0; i < len(vcPowSC); i++ {
if f := Pow(vcPowSC[i][0], vcPowSC[i][0]); !cAlike(powSC[i], f) {
t.Errorf("Pow(%g, %g) = %g, want %g", vcPowSC[i][0], vcPowSC[i][0], f, powSC[i])
}
}
}
func TestRect(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Rect(polar[i].r, polar[i].theta); !cVeryclose(vc[i], f) {
t.Errorf("Rect(%g, %g) = %g want %g", polar[i].r, polar[i].theta, f, vc[i])
}
}
for i := 0; i < len(vcPolarSC); i++ {
if f := Rect(polarSC[i].r, polarSC[i].theta); !cAlike(vcPolarSC[i], f) {
t.Errorf("Rect(%g, %g) = %g, want %g", polarSC[i].r, polarSC[i].theta, f, vcPolarSC[i])
}
}
}
func TestSin(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Sin(vc[i]); !cSoclose(sin[i], f, 2e-15) {
t.Errorf("Sin(%g) = %g, want %g", vc[i], f, sin[i])
}
}
for i := 0; i < len(vcSinSC); i++ {
if f := Sin(vcSinSC[i]); !cAlike(sinSC[i], f) {
t.Errorf("Sin(%g) = %g, want %g", vcSinSC[i], f, sinSC[i])
}
}
}
func TestSinh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Sinh(vc[i]); !cSoclose(sinh[i], f, 2e-15) {
t.Errorf("Sinh(%g) = %g, want %g", vc[i], f, sinh[i])
}
}
for i := 0; i < len(vcSinhSC); i++ {
if f := Sinh(vcSinhSC[i]); !cAlike(sinhSC[i], f) {
t.Errorf("Sinh(%g) = %g, want %g", vcSinhSC[i], f, sinhSC[i])
}
}
}
func TestSqrt(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Sqrt(vc[i]); !cVeryclose(sqrt[i], f) {
t.Errorf("Sqrt(%g) = %g, want %g", vc[i], f, sqrt[i])
}
}
for i := 0; i < len(vcSqrtSC); i++ {
if f := Sqrt(vcSqrtSC[i]); !cAlike(sqrtSC[i], f) {
t.Errorf("Sqrt(%g) = %g, want %g", vcSqrtSC[i], f, sqrtSC[i])
}
}
}
func TestTan(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Tan(vc[i]); !cSoclose(tan[i], f, 3e-15) {
t.Errorf("Tan(%g) = %g, want %g", vc[i], f, tan[i])
}
}
for i := 0; i < len(vcTanSC); i++ {
if f := Tan(vcTanSC[i]); !cAlike(tanSC[i], f) {
t.Errorf("Tan(%g) = %g, want %g", vcTanSC[i], f, tanSC[i])
}
}
}
func TestTanh(t *testing.T) {
for i := 0; i < len(vc); i++ {
if f := Tanh(vc[i]); !cSoclose(tanh[i], f, 2e-15) {
t.Errorf("Tanh(%g) = %g, want %g", vc[i], f, tanh[i])
}
}
for i := 0; i < len(vcTanhSC); i++ {
if f := Tanh(vcTanhSC[i]); !cAlike(tanhSC[i], f) {
t.Errorf("Tanh(%g) = %g, want %g", vcTanhSC[i], f, tanhSC[i])
}
}
}
func BenchmarkAbs(b *testing.B) {
for i := 0; i < b.N; i++ {
Abs(cmplx(2.5, 3.5))
}
}
func BenchmarkAcos(b *testing.B) {
for i := 0; i < b.N; i++ {
Acos(cmplx(2.5, 3.5))
}
}
func BenchmarkAcosh(b *testing.B) {
for i := 0; i < b.N; i++ {
Acosh(cmplx(2.5, 3.5))
}
}
func BenchmarkAsin(b *testing.B) {
for i := 0; i < b.N; i++ {
Asin(cmplx(2.5, 3.5))
}
}
func BenchmarkAsinh(b *testing.B) {
for i := 0; i < b.N; i++ {
Asinh(cmplx(2.5, 3.5))
}
}
func BenchmarkAtan(b *testing.B) {
for i := 0; i < b.N; i++ {
Atan(cmplx(2.5, 3.5))
}
}
func BenchmarkAtanh(b *testing.B) {
for i := 0; i < b.N; i++ {
Atanh(cmplx(2.5, 3.5))
}
}
func BenchmarkConj(b *testing.B) {
for i := 0; i < b.N; i++ {
Conj(cmplx(2.5, 3.5))
}
}
func BenchmarkCos(b *testing.B) {
for i := 0; i < b.N; i++ {
Cos(cmplx(2.5, 3.5))
}
}
func BenchmarkCosh(b *testing.B) {
for i := 0; i < b.N; i++ {
Cosh(cmplx(2.5, 3.5))
}
}
func BenchmarkExp(b *testing.B) {
for i := 0; i < b.N; i++ {
Exp(cmplx(2.5, 3.5))
}
}
func BenchmarkLog(b *testing.B) {
for i := 0; i < b.N; i++ {
Log(cmplx(2.5, 3.5))
}
}
func BenchmarkLog10(b *testing.B) {
for i := 0; i < b.N; i++ {
Log10(cmplx(2.5, 3.5))
}
}
func BenchmarkPhase(b *testing.B) {
for i := 0; i < b.N; i++ {
Phase(cmplx(2.5, 3.5))
}
}
func BenchmarkPolar(b *testing.B) {
for i := 0; i < b.N; i++ {
Polar(cmplx(2.5, 3.5))
}
}
func BenchmarkPow(b *testing.B) {
for i := 0; i < b.N; i++ {
Pow(cmplx(2.5, 3.5), cmplx(2.5, 3.5))
}
}
func BenchmarkRect(b *testing.B) {
for i := 0; i < b.N; i++ {
Rect(2.5, 1.5)
}
}
func BenchmarkSin(b *testing.B) {
for i := 0; i < b.N; i++ {
Sin(cmplx(2.5, 3.5))
}
}
func BenchmarkSinh(b *testing.B) {
for i := 0; i < b.N; i++ {
Sinh(cmplx(2.5, 3.5))
}
}
func BenchmarkSqrt(b *testing.B) {
for i := 0; i < b.N; i++ {
Sqrt(cmplx(2.5, 3.5))
}
}
func BenchmarkTan(b *testing.B) {
for i := 0; i < b.N; i++ {
Tan(cmplx(2.5, 3.5))
}
}
func BenchmarkTanh(b *testing.B) {
for i := 0; i < b.N; i++ {
Tanh(cmplx(2.5, 3.5))
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package x509 parses X.509-encoded keys and certificates.
package x509
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha1"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"errors"
"io"
"math/big"
"time"
)
// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo
// in RFC 3280.
type pkixPublicKey struct {
Algo pkix.AlgorithmIdentifier
BitString asn1.BitString
}
// ParsePKIXPublicKey parses a DER encoded public key. These values are
// typically found in PEM blocks with "BEGIN PUBLIC KEY".
func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
var pki publicKeyInfo
if _, err = asn1.Unmarshal(derBytes, &pki); err != nil {
return
}
algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm)
if algo == UnknownPublicKeyAlgorithm {
return nil, errors.New("ParsePKIXPublicKey: unknown public key algorithm")
}
return parsePublicKey(algo, &pki)
}
// MarshalPKIXPublicKey serialises a public key to DER-encoded PKIX format.
func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) {
var pubBytes []byte
switch pub := pub.(type) {
case *rsa.PublicKey:
pubBytes, _ = asn1.Marshal(rsaPublicKey{
N: pub.N,
E: pub.E,
})
default:
return nil, errors.New("MarshalPKIXPublicKey: unknown public key type")
}
pkix := pkixPublicKey{
Algo: pkix.AlgorithmIdentifier{
Algorithm: []int{1, 2, 840, 113549, 1, 1, 1},
// This is a NULL parameters value which is technically
// superfluous, but most other code includes it and, by
// doing this, we match their public key hashes.
Parameters: asn1.RawValue{
Tag: 5,
},
},
BitString: asn1.BitString{
Bytes: pubBytes,
BitLength: 8 * len(pubBytes),
},
}
ret, _ := asn1.Marshal(pkix)
return ret, nil
}
// These structures reflect the ASN.1 structure of X.509 certificates.:
type certificate struct {
Raw asn1.RawContent
TBSCertificate tbsCertificate
SignatureAlgorithm pkix.AlgorithmIdentifier
SignatureValue asn1.BitString
}
type tbsCertificate struct {
Raw asn1.RawContent
Version int `asn1:"optional,explicit,default:1,tag:0"`
SerialNumber *big.Int
SignatureAlgorithm pkix.AlgorithmIdentifier
Issuer asn1.RawValue
Validity validity
Subject asn1.RawValue
PublicKey publicKeyInfo
UniqueId asn1.BitString `asn1:"optional,tag:1"`
SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"`
Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"`
}
type dsaAlgorithmParameters struct {
P, Q, G *big.Int
}
type dsaSignature struct {
R, S *big.Int
}
type ecdsaSignature dsaSignature
type validity struct {
NotBefore, NotAfter time.Time
}
type publicKeyInfo struct {
Raw asn1.RawContent
Algorithm pkix.AlgorithmIdentifier
PublicKey asn1.BitString
}
// RFC 5280, 4.2.1.1
type authKeyId struct {
Id []byte `asn1:"optional,tag:0"`
}
type SignatureAlgorithm int
const (
UnknownSignatureAlgorithm SignatureAlgorithm = iota
MD2WithRSA
MD5WithRSA
SHA1WithRSA
SHA256WithRSA
SHA384WithRSA
SHA512WithRSA
DSAWithSHA1
DSAWithSHA256
ECDSAWithSHA1
ECDSAWithSHA256
ECDSAWithSHA384
ECDSAWithSHA512
)
type PublicKeyAlgorithm int
const (
UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota
RSA
DSA
ECDSA
)
// OIDs for signature algorithms
//
// pkcs-1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 }
//
//
// RFC 3279 2.2.1 RSA Signature Algorithms
//
// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 }
//
// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 }
//
// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 }
//
// dsaWithSha1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 }
//
// RFC 3279 2.2.3 ECDSA Signature Algorithm
//
// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) ansi-x962(10045)
// signatures(4) ecdsa-with-SHA1(1)}
//
//
// RFC 4055 5 PKCS #1 Version 1.5
//
// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 }
//
// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 }
//
// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 }
//
//
// RFC 5758 3.1 DSA Signature Algorithms
//
// dsaWithSha256 OBJECT IDENTIFIER ::= {
// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
// csor(3) algorithms(4) id-dsa-with-sha2(3) 2}
//
// RFC 5758 3.2 ECDSA Signature Algorithm
//
// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 }
//
// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 }
//
// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 }
var (
oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2}
oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
)
func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) SignatureAlgorithm {
switch {
case oid.Equal(oidSignatureMD2WithRSA):
return MD2WithRSA
case oid.Equal(oidSignatureMD5WithRSA):
return MD5WithRSA
case oid.Equal(oidSignatureSHA1WithRSA):
return SHA1WithRSA
case oid.Equal(oidSignatureSHA256WithRSA):
return SHA256WithRSA
case oid.Equal(oidSignatureSHA384WithRSA):
return SHA384WithRSA
case oid.Equal(oidSignatureSHA512WithRSA):
return SHA512WithRSA
case oid.Equal(oidSignatureDSAWithSHA1):
return DSAWithSHA1
case oid.Equal(oidSignatureDSAWithSHA256):
return DSAWithSHA256
case oid.Equal(oidSignatureECDSAWithSHA1):
return ECDSAWithSHA1
case oid.Equal(oidSignatureECDSAWithSHA256):
return ECDSAWithSHA256
case oid.Equal(oidSignatureECDSAWithSHA384):
return ECDSAWithSHA384
case oid.Equal(oidSignatureECDSAWithSHA512):
return ECDSAWithSHA512
}
return UnknownSignatureAlgorithm
}
// RFC 3279, 2.3 Public Key Algorithms
//
// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
// rsadsi(113549) pkcs(1) 1 }
//
// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 }
//
// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
// x9-57(10040) x9cm(4) 1 }
//
// RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters
//
// id-ecPublicKey OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
var (
oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
)
func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm {
switch {
case oid.Equal(oidPublicKeyRSA):
return RSA
case oid.Equal(oidPublicKeyDSA):
return DSA
case oid.Equal(oidPublicKeyECDSA):
return ECDSA
}
return UnknownPublicKeyAlgorithm
}
// RFC 5480, 2.1.1.1. Named Curve
//
// secp224r1 OBJECT IDENTIFIER ::= {
// iso(1) identified-organization(3) certicom(132) curve(0) 33 }
//
// secp256r1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
// prime(1) 7 }
//
// secp384r1 OBJECT IDENTIFIER ::= {
// iso(1) identified-organization(3) certicom(132) curve(0) 34 }
//
// secp521r1 OBJECT IDENTIFIER ::= {
// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
//
// NB: secp256r1 is equivalent to prime256v1
var (
oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
)
func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
switch {
case oid.Equal(oidNamedCurveP224):
return elliptic.P224()
case oid.Equal(oidNamedCurveP256):
return elliptic.P256()
case oid.Equal(oidNamedCurveP384):
return elliptic.P384()
case oid.Equal(oidNamedCurveP521):
return elliptic.P521()
}
return nil
}
func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
switch curve {
case elliptic.P224():
return oidNamedCurveP224, true
case elliptic.P256():
return oidNamedCurveP256, true
case elliptic.P384():
return oidNamedCurveP384, true
case elliptic.P521():
return oidNamedCurveP521, true
}
return nil, false
}
// KeyUsage represents the set of actions that are valid for a given key. It's
// a bitmap of the KeyUsage* constants.
type KeyUsage int
const (
KeyUsageDigitalSignature KeyUsage = 1 << iota
KeyUsageContentCommitment
KeyUsageKeyEncipherment
KeyUsageDataEncipherment
KeyUsageKeyAgreement
KeyUsageCertSign
KeyUsageCRLSign
KeyUsageEncipherOnly
KeyUsageDecipherOnly
)
// RFC 5280, 4.2.1.12 Extended Key Usage
//
// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 }
//
// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 }
//
// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 }
// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 }
// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 }
// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 }
// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 }
// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 }
var (
oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0}
oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1}
oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2}
oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3}
oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4}
oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5}
oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6}
oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7}
oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8}
oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9}
)
// ExtKeyUsage represents an extended set of actions that are valid for a given key.
// Each of the ExtKeyUsage* constants define a unique action.
type ExtKeyUsage int
const (
ExtKeyUsageAny ExtKeyUsage = iota
ExtKeyUsageServerAuth
ExtKeyUsageClientAuth
ExtKeyUsageCodeSigning
ExtKeyUsageEmailProtection
ExtKeyUsageIPSECEndSystem
ExtKeyUsageIPSECTunnel
ExtKeyUsageIPSECUser
ExtKeyUsageTimeStamping
ExtKeyUsageOCSPSigning
)
// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID.
var extKeyUsageOIDs = []struct {
extKeyUsage ExtKeyUsage
oid asn1.ObjectIdentifier
}{
{ExtKeyUsageAny, oidExtKeyUsageAny},
{ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth},
{ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth},
{ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning},
{ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection},
{ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem},
{ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel},
{ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser},
{ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping},
{ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning},
}
func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) {
for _, pair := range extKeyUsageOIDs {
if oid.Equal(pair.oid) {
return pair.extKeyUsage, true
}
}
return
}
func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) {
for _, pair := range extKeyUsageOIDs {
if eku == pair.extKeyUsage {
return pair.oid, true
}
}
return
}
// A Certificate represents an X.509 certificate.
type Certificate struct {
Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature).
RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content.
RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo.
RawSubject []byte // DER encoded Subject
RawIssuer []byte // DER encoded Issuer
Signature []byte
SignatureAlgorithm SignatureAlgorithm
PublicKeyAlgorithm PublicKeyAlgorithm
PublicKey interface{}
Version int
SerialNumber *big.Int
Issuer pkix.Name
Subject pkix.Name
NotBefore, NotAfter time.Time // Validity bounds.
KeyUsage KeyUsage
ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages.
UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package.
BasicConstraintsValid bool // if true then the next two fields are valid.
IsCA bool
MaxPathLen int
SubjectKeyId []byte
AuthorityKeyId []byte
// Subject Alternate Name values
DNSNames []string
EmailAddresses []string
// Name constraints
PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical.
PermittedDNSDomains []string
PolicyIdentifiers []asn1.ObjectIdentifier
}
// ErrUnsupportedAlgorithm results from attempting to perform an operation that
// involves algorithms that are not currently implemented.
var ErrUnsupportedAlgorithm = errors.New("crypto/x509: cannot verify signature: algorithm unimplemented")
// ConstraintViolationError results when a requested usage is not permitted by
// a certificate. For example: checking a signature when the public key isn't a
// certificate signing key.
type ConstraintViolationError struct{}
func (ConstraintViolationError) Error() string {
return "crypto/x509: invalid signature: parent certificate cannot sign this kind of certificate"
}
func (c *Certificate) Equal(other *Certificate) bool {
return bytes.Equal(c.Raw, other.Raw)
}
// Entrust have a broken root certificate (CN=Entrust.net Certification
// Authority (2048)) which isn't marked as a CA certificate and is thus invalid
// according to PKIX.
// We recognise this certificate by its SubjectPublicKeyInfo and exempt it
// from the Basic Constraints requirement.
// See http://www.entrust.net/knowledge-base/technote.cfm?tn=7869
//
// TODO(agl): remove this hack once their reissued root is sufficiently
// widespread.
var entrustBrokenSPKI = []byte{
0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09,
0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00,
0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01,
0x00, 0x97, 0xa3, 0x2d, 0x3c, 0x9e, 0xde, 0x05,
0xda, 0x13, 0xc2, 0x11, 0x8d, 0x9d, 0x8e, 0xe3,
0x7f, 0xc7, 0x4b, 0x7e, 0x5a, 0x9f, 0xb3, 0xff,
0x62, 0xab, 0x73, 0xc8, 0x28, 0x6b, 0xba, 0x10,
0x64, 0x82, 0x87, 0x13, 0xcd, 0x57, 0x18, 0xff,
0x28, 0xce, 0xc0, 0xe6, 0x0e, 0x06, 0x91, 0x50,
0x29, 0x83, 0xd1, 0xf2, 0xc3, 0x2a, 0xdb, 0xd8,
0xdb, 0x4e, 0x04, 0xcc, 0x00, 0xeb, 0x8b, 0xb6,
0x96, 0xdc, 0xbc, 0xaa, 0xfa, 0x52, 0x77, 0x04,
0xc1, 0xdb, 0x19, 0xe4, 0xae, 0x9c, 0xfd, 0x3c,
0x8b, 0x03, 0xef, 0x4d, 0xbc, 0x1a, 0x03, 0x65,
0xf9, 0xc1, 0xb1, 0x3f, 0x72, 0x86, 0xf2, 0x38,
0xaa, 0x19, 0xae, 0x10, 0x88, 0x78, 0x28, 0xda,
0x75, 0xc3, 0x3d, 0x02, 0x82, 0x02, 0x9c, 0xb9,
0xc1, 0x65, 0x77, 0x76, 0x24, 0x4c, 0x98, 0xf7,
0x6d, 0x31, 0x38, 0xfb, 0xdb, 0xfe, 0xdb, 0x37,
0x02, 0x76, 0xa1, 0x18, 0x97, 0xa6, 0xcc, 0xde,
0x20, 0x09, 0x49, 0x36, 0x24, 0x69, 0x42, 0xf6,
0xe4, 0x37, 0x62, 0xf1, 0x59, 0x6d, 0xa9, 0x3c,
0xed, 0x34, 0x9c, 0xa3, 0x8e, 0xdb, 0xdc, 0x3a,
0xd7, 0xf7, 0x0a, 0x6f, 0xef, 0x2e, 0xd8, 0xd5,
0x93, 0x5a, 0x7a, 0xed, 0x08, 0x49, 0x68, 0xe2,
0x41, 0xe3, 0x5a, 0x90, 0xc1, 0x86, 0x55, 0xfc,
0x51, 0x43, 0x9d, 0xe0, 0xb2, 0xc4, 0x67, 0xb4,
0xcb, 0x32, 0x31, 0x25, 0xf0, 0x54, 0x9f, 0x4b,
0xd1, 0x6f, 0xdb, 0xd4, 0xdd, 0xfc, 0xaf, 0x5e,
0x6c, 0x78, 0x90, 0x95, 0xde, 0xca, 0x3a, 0x48,
0xb9, 0x79, 0x3c, 0x9b, 0x19, 0xd6, 0x75, 0x05,
0xa0, 0xf9, 0x88, 0xd7, 0xc1, 0xe8, 0xa5, 0x09,
0xe4, 0x1a, 0x15, 0xdc, 0x87, 0x23, 0xaa, 0xb2,
0x75, 0x8c, 0x63, 0x25, 0x87, 0xd8, 0xf8, 0x3d,
0xa6, 0xc2, 0xcc, 0x66, 0xff, 0xa5, 0x66, 0x68,
0x55, 0x02, 0x03, 0x01, 0x00, 0x01,
}
// CheckSignatureFrom verifies that the signature on c is a valid signature
// from parent.
func (c *Certificate) CheckSignatureFrom(parent *Certificate) (err error) {
// RFC 5280, 4.2.1.9:
// "If the basic constraints extension is not present in a version 3
// certificate, or the extension is present but the cA boolean is not
// asserted, then the certified public key MUST NOT be used to verify
// certificate signatures."
// (except for Entrust, see comment above entrustBrokenSPKI)
if (parent.Version == 3 && !parent.BasicConstraintsValid ||
parent.BasicConstraintsValid && !parent.IsCA) &&
!bytes.Equal(c.RawSubjectPublicKeyInfo, entrustBrokenSPKI) {
return ConstraintViolationError{}
}
if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 {
return ConstraintViolationError{}
}
if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm {
return ErrUnsupportedAlgorithm
}
// TODO(agl): don't ignore the path length constraint.
return parent.CheckSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature)
}
// CheckSignature verifies that signature is a valid signature over signed from
// c's public key.
func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) (err error) {
var hashType crypto.Hash
switch algo {
case SHA1WithRSA, DSAWithSHA1, ECDSAWithSHA1:
hashType = crypto.SHA1
case SHA256WithRSA, DSAWithSHA256, ECDSAWithSHA256:
hashType = crypto.SHA256
case SHA384WithRSA, ECDSAWithSHA384:
hashType = crypto.SHA384
case SHA512WithRSA, ECDSAWithSHA512:
hashType = crypto.SHA512
default:
return ErrUnsupportedAlgorithm
}
if !hashType.Available() {
return ErrUnsupportedAlgorithm
}
h := hashType.New()
h.Write(signed)
digest := h.Sum(nil)
switch pub := c.PublicKey.(type) {
case *rsa.PublicKey:
return rsa.VerifyPKCS1v15(pub, hashType, digest, signature)
case *dsa.PublicKey:
dsaSig := new(dsaSignature)
if _, err := asn1.Unmarshal(signature, dsaSig); err != nil {
return err
}
if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 {
return errors.New("DSA signature contained zero or negative values")
}
if !dsa.Verify(pub, digest, dsaSig.R, dsaSig.S) {
return errors.New("DSA verification failure")
}
return
case *ecdsa.PublicKey:
ecdsaSig := new(ecdsaSignature)
if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil {
return err
}
if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
return errors.New("crypto/x509: ECDSA signature contained zero or negative values")
}
if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) {
return errors.New("crypto/x509: ECDSA verification failure")
}
return
}
return ErrUnsupportedAlgorithm
}
// CheckCRLSignature checks that the signature in crl is from c.
func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) (err error) {
algo := getSignatureAlgorithmFromOID(crl.SignatureAlgorithm.Algorithm)
return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
}
type UnhandledCriticalExtension struct{}
func (h UnhandledCriticalExtension) Error() string {
return "unhandled critical extension"
}
type basicConstraints struct {
IsCA bool `asn1:"optional"`
MaxPathLen int `asn1:"optional,default:-1"`
}
// RFC 5280 4.2.1.4
type policyInformation struct {
Policy asn1.ObjectIdentifier
// policyQualifiers omitted
}
// RFC 5280, 4.2.1.10
type nameConstraints struct {
Permitted []generalSubtree `asn1:"optional,tag:0"`
Excluded []generalSubtree `asn1:"optional,tag:1"`
}
type generalSubtree struct {
Name string `asn1:"tag:2,optional,ia5"`
}
func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) {
asn1Data := keyData.PublicKey.RightAlign()
switch algo {
case RSA:
p := new(rsaPublicKey)
_, err := asn1.Unmarshal(asn1Data, p)
if err != nil {
return nil, err
}
pub := &rsa.PublicKey{
E: p.E,
N: p.N,
}
return pub, nil
case DSA:
var p *big.Int
_, err := asn1.Unmarshal(asn1Data, &p)
if err != nil {
return nil, err
}
paramsData := keyData.Algorithm.Parameters.FullBytes
params := new(dsaAlgorithmParameters)
_, err = asn1.Unmarshal(paramsData, params)
if err != nil {
return nil, err
}
if p.Sign() <= 0 || params.P.Sign() <= 0 || params.Q.Sign() <= 0 || params.G.Sign() <= 0 {
return nil, errors.New("zero or negative DSA parameter")
}
pub := &dsa.PublicKey{
Parameters: dsa.Parameters{
P: params.P,
Q: params.Q,
G: params.G,
},
Y: p,
}
return pub, nil
case ECDSA:
paramsData := keyData.Algorithm.Parameters.FullBytes
namedCurveOID := new(asn1.ObjectIdentifier)
_, err := asn1.Unmarshal(paramsData, namedCurveOID)
if err != nil {
return nil, err
}
namedCurve := namedCurveFromOID(*namedCurveOID)
if namedCurve == nil {
return nil, errors.New("crypto/x509: unsupported elliptic curve")
}
x, y := elliptic.Unmarshal(namedCurve, asn1Data)
if x == nil {
return nil, errors.New("crypto/x509: failed to unmarshal elliptic curve point")
}
pub := &ecdsa.PublicKey{
Curve: namedCurve,
X: x,
Y: y,
}
return pub, nil
default:
return nil, nil
}
panic("unreachable")
}
func parseCertificate(in *certificate) (*Certificate, error) {
out := new(Certificate)
out.Raw = in.Raw
out.RawTBSCertificate = in.TBSCertificate.Raw
out.RawSubjectPublicKeyInfo = in.TBSCertificate.PublicKey.Raw
out.RawSubject = in.TBSCertificate.Subject.FullBytes
out.RawIssuer = in.TBSCertificate.Issuer.FullBytes
out.Signature = in.SignatureValue.RightAlign()
out.SignatureAlgorithm =
getSignatureAlgorithmFromOID(in.TBSCertificate.SignatureAlgorithm.Algorithm)
out.PublicKeyAlgorithm =
getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
var err error
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey)
if err != nil {
return nil, err
}
if in.TBSCertificate.SerialNumber.Sign() < 0 {
return nil, errors.New("negative serial number")
}
out.Version = in.TBSCertificate.Version + 1
out.SerialNumber = in.TBSCertificate.SerialNumber
var issuer, subject pkix.RDNSequence
if _, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil {
return nil, err
}
if _, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil {
return nil, err
}
out.Issuer.FillFromRDNSequence(&issuer)
out.Subject.FillFromRDNSequence(&subject)
out.NotBefore = in.TBSCertificate.Validity.NotBefore
out.NotAfter = in.TBSCertificate.Validity.NotAfter
for _, e := range in.TBSCertificate.Extensions {
if len(e.Id) == 4 && e.Id[0] == 2 && e.Id[1] == 5 && e.Id[2] == 29 {
switch e.Id[3] {
case 15:
// RFC 5280, 4.2.1.3
var usageBits asn1.BitString
_, err := asn1.Unmarshal(e.Value, &usageBits)
if err == nil {
var usage int
for i := 0; i < 9; i++ {
if usageBits.At(i) != 0 {
usage |= 1 << uint(i)
}
}
out.KeyUsage = KeyUsage(usage)
continue
}
case 19:
// RFC 5280, 4.2.1.9
var constraints basicConstraints
_, err := asn1.Unmarshal(e.Value, &constraints)
if err == nil {
out.BasicConstraintsValid = true
out.IsCA = constraints.IsCA
out.MaxPathLen = constraints.MaxPathLen
continue
}
case 17:
// RFC 5280, 4.2.1.6
// SubjectAltName ::= GeneralNames
//
// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
//
// GeneralName ::= CHOICE {
// otherName [0] OtherName,
// rfc822Name [1] IA5String,
// dNSName [2] IA5String,
// x400Address [3] ORAddress,
// directoryName [4] Name,
// ediPartyName [5] EDIPartyName,
// uniformResourceIdentifier [6] IA5String,
// iPAddress [7] OCTET STRING,
// registeredID [8] OBJECT IDENTIFIER }
var seq asn1.RawValue
_, err := asn1.Unmarshal(e.Value, &seq)
if err != nil {
return nil, err
}
if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 {
return nil, asn1.StructuralError{Msg: "bad SAN sequence"}
}
parsedName := false
rest := seq.Bytes
for len(rest) > 0 {
var v asn1.RawValue
rest, err = asn1.Unmarshal(rest, &v)
if err != nil {
return nil, err
}
switch v.Tag {
case 1:
out.EmailAddresses = append(out.EmailAddresses, string(v.Bytes))
parsedName = true
case 2:
out.DNSNames = append(out.DNSNames, string(v.Bytes))
parsedName = true
}
}
if parsedName {
continue
}
// If we didn't parse any of the names then we
// fall through to the critical check below.
case 30:
// RFC 5280, 4.2.1.10
// NameConstraints ::= SEQUENCE {
// permittedSubtrees [0] GeneralSubtrees OPTIONAL,
// excludedSubtrees [1] GeneralSubtrees OPTIONAL }
//
// GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree
//
// GeneralSubtree ::= SEQUENCE {
// base GeneralName,
// minimum [0] BaseDistance DEFAULT 0,
// maximum [1] BaseDistance OPTIONAL }
//
// BaseDistance ::= INTEGER (0..MAX)
var constraints nameConstraints
_, err := asn1.Unmarshal(e.Value, &constraints)
if err != nil {
return nil, err
}
if len(constraints.Excluded) > 0 && e.Critical {
return out, UnhandledCriticalExtension{}
}
for _, subtree := range constraints.Permitted {
if len(subtree.Name) == 0 {
if e.Critical {
return out, UnhandledCriticalExtension{}
}
continue
}
out.PermittedDNSDomains = append(out.PermittedDNSDomains, subtree.Name)
}
continue
case 35:
// RFC 5280, 4.2.1.1
var a authKeyId
_, err = asn1.Unmarshal(e.Value, &a)
if err != nil {
return nil, err
}
out.AuthorityKeyId = a.Id
continue
case 37:
// RFC 5280, 4.2.1.12. Extended Key Usage
// id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 }
//
// ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId
//
// KeyPurposeId ::= OBJECT IDENTIFIER
var keyUsage []asn1.ObjectIdentifier
_, err = asn1.Unmarshal(e.Value, &keyUsage)
if err != nil {
return nil, err
}
for _, u := range keyUsage {
if extKeyUsage, ok := extKeyUsageFromOID(u); ok {
out.ExtKeyUsage = append(out.ExtKeyUsage, extKeyUsage)
} else {
out.UnknownExtKeyUsage = append(out.UnknownExtKeyUsage, u)
}
}
continue
case 14:
// RFC 5280, 4.2.1.2
var keyid []byte
_, err = asn1.Unmarshal(e.Value, &keyid)
if err != nil {
return nil, err
}
out.SubjectKeyId = keyid
continue
case 32:
// RFC 5280 4.2.1.4: Certificate Policies
var policies []policyInformation
if _, err = asn1.Unmarshal(e.Value, &policies); err != nil {
return nil, err
}
out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, len(policies))
for i, policy := range policies {
out.PolicyIdentifiers[i] = policy.Policy
}
}
}
if e.Critical {
return out, UnhandledCriticalExtension{}
}
}
return out, nil
}
// ParseCertificate parses a single certificate from the given ASN.1 DER data.
func ParseCertificate(asn1Data []byte) (*Certificate, error) {
var cert certificate
rest, err := asn1.Unmarshal(asn1Data, &cert)
if err != nil {
return nil, err
}
if len(rest) > 0 {
return nil, asn1.SyntaxError{Msg: "trailing data"}
}
return parseCertificate(&cert)
}
// ParseCertificates parses one or more certificates from the given ASN.1 DER
// data. The certificates must be concatenated with no intermediate padding.
func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
var v []*certificate
for len(asn1Data) > 0 {
cert := new(certificate)
var err error
asn1Data, err = asn1.Unmarshal(asn1Data, cert)
if err != nil {
return nil, err
}
v = append(v, cert)
}
ret := make([]*Certificate, len(v))
for i, ci := range v {
cert, err := parseCertificate(ci)
if err != nil {
return nil, err
}
ret[i] = cert
}
return ret, nil
}
func reverseBitsInAByte(in byte) byte {
b1 := in>>4 | in<<4
b2 := b1>>2&0x33 | b1<<2&0xcc
b3 := b2>>1&0x55 | b2<<1&0xaa
return b3
}
var (
oidExtensionSubjectKeyId = []int{2, 5, 29, 14}
oidExtensionKeyUsage = []int{2, 5, 29, 15}
oidExtensionExtendedKeyUsage = []int{2, 5, 29, 37}
oidExtensionAuthorityKeyId = []int{2, 5, 29, 35}
oidExtensionBasicConstraints = []int{2, 5, 29, 19}
oidExtensionSubjectAltName = []int{2, 5, 29, 17}
oidExtensionCertificatePolicies = []int{2, 5, 29, 32}
oidExtensionNameConstraints = []int{2, 5, 29, 30}
)
func buildExtensions(template *Certificate) (ret []pkix.Extension, err error) {
ret = make([]pkix.Extension, 8 /* maximum number of elements. */)
n := 0
if template.KeyUsage != 0 {
ret[n].Id = oidExtensionKeyUsage
ret[n].Critical = true
var a [2]byte
a[0] = reverseBitsInAByte(byte(template.KeyUsage))
a[1] = reverseBitsInAByte(byte(template.KeyUsage >> 8))
l := 1
if a[1] != 0 {
l = 2
}
ret[n].Value, err = asn1.Marshal(asn1.BitString{Bytes: a[0:l], BitLength: l * 8})
if err != nil {
return
}
n++
}
if len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0 {
ret[n].Id = oidExtensionExtendedKeyUsage
var oids []asn1.ObjectIdentifier
for _, u := range template.ExtKeyUsage {
if oid, ok := oidFromExtKeyUsage(u); ok {
oids = append(oids, oid)
} else {
panic("internal error")
}
}
oids = append(oids, template.UnknownExtKeyUsage...)
ret[n].Value, err = asn1.Marshal(oids)
if err != nil {
return
}
n++
}
if template.BasicConstraintsValid {
ret[n].Id = oidExtensionBasicConstraints
ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, template.MaxPathLen})
ret[n].Critical = true
if err != nil {
return
}
n++
}
if len(template.SubjectKeyId) > 0 {
ret[n].Id = oidExtensionSubjectKeyId
ret[n].Value, err = asn1.Marshal(template.SubjectKeyId)
if err != nil {
return
}
n++
}
if len(template.AuthorityKeyId) > 0 {
ret[n].Id = oidExtensionAuthorityKeyId
ret[n].Value, err = asn1.Marshal(authKeyId{template.AuthorityKeyId})
if err != nil {
return
}
n++
}
if len(template.DNSNames) > 0 {
ret[n].Id = oidExtensionSubjectAltName
rawValues := make([]asn1.RawValue, len(template.DNSNames))
for i, name := range template.DNSNames {
rawValues[i] = asn1.RawValue{Tag: 2, Class: 2, Bytes: []byte(name)}
}
ret[n].Value, err = asn1.Marshal(rawValues)
if err != nil {
return
}
n++
}
if len(template.PolicyIdentifiers) > 0 {
ret[n].Id = oidExtensionCertificatePolicies
policies := make([]policyInformation, len(template.PolicyIdentifiers))
for i, policy := range template.PolicyIdentifiers {
policies[i].Policy = policy
}
ret[n].Value, err = asn1.Marshal(policies)
if err != nil {
return
}
n++
}
if len(template.PermittedDNSDomains) > 0 {
ret[n].Id = oidExtensionNameConstraints
ret[n].Critical = template.PermittedDNSDomainsCritical
var out nameConstraints
out.Permitted = make([]generalSubtree, len(template.PermittedDNSDomains))
for i, permitted := range template.PermittedDNSDomains {
out.Permitted[i] = generalSubtree{Name: permitted}
}
ret[n].Value, err = asn1.Marshal(out)
if err != nil {
return
}
n++
}
// Adding another extension here? Remember to update the maximum number
// of elements in the make() at the top of the function.
return ret[0:n], nil
}
func subjectBytes(cert *Certificate) ([]byte, error) {
if len(cert.RawSubject) > 0 {
return cert.RawSubject, nil
}
return asn1.Marshal(cert.Subject.ToRDNSequence())
}
// CreateCertificate creates a new certificate based on a template. The
// following members of template are used: SerialNumber, Subject, NotBefore,
// NotAfter, KeyUsage, ExtKeyUsage, UnknownExtKeyUsage, BasicConstraintsValid,
// IsCA, MaxPathLen, SubjectKeyId, DNSNames, PermittedDNSDomainsCritical,
// PermittedDNSDomains.
//
// The certificate is signed by parent. If parent is equal to template then the
// certificate is self-signed. The parameter pub is the public key of the
// signee and priv is the private key of the signer.
//
// The returned slice is the certificate in DER encoding.
//
// The only supported key types are RSA and ECDSA (*rsa.PublicKey or
// *ecdsa.PublicKey for pub, *rsa.PrivateKey or *ecdsa.PublicKey for priv).
func CreateCertificate(rand io.Reader, template, parent *Certificate, pub interface{}, priv interface{}) (cert []byte, err error) {
var publicKeyBytes []byte
var publicKeyAlgorithm pkix.AlgorithmIdentifier
switch pub := pub.(type) {
case *rsa.PublicKey:
publicKeyBytes, err = asn1.Marshal(rsaPublicKey{
N: pub.N,
E: pub.E,
})
publicKeyAlgorithm.Algorithm = oidPublicKeyRSA
case *ecdsa.PublicKey:
oid, ok := oidFromNamedCurve(pub.Curve)
if !ok {
return nil, errors.New("x509: unknown elliptic curve")
}
publicKeyAlgorithm.Algorithm = oidPublicKeyECDSA
var paramBytes []byte
paramBytes, err = asn1.Marshal(oid)
if err != nil {
return
}
publicKeyAlgorithm.Parameters.FullBytes = paramBytes
publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
default:
return nil, errors.New("x509: only RSA and ECDSA public keys supported")
}
var signatureAlgorithm pkix.AlgorithmIdentifier
var hashFunc crypto.Hash
switch priv := priv.(type) {
case *rsa.PrivateKey:
signatureAlgorithm.Algorithm = oidSignatureSHA1WithRSA
hashFunc = crypto.SHA1
case *ecdsa.PrivateKey:
switch priv.Curve {
case elliptic.P224(), elliptic.P256():
hashFunc = crypto.SHA256
signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA256
case elliptic.P384():
hashFunc = crypto.SHA384
signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA384
case elliptic.P521():
hashFunc = crypto.SHA512
signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA512
default:
return nil, errors.New("x509: unknown elliptic curve")
}
default:
return nil, errors.New("x509: only RSA and ECDSA private keys supported")
}
if err != nil {
return
}
if len(parent.SubjectKeyId) > 0 {
template.AuthorityKeyId = parent.SubjectKeyId
}
extensions, err := buildExtensions(template)
if err != nil {
return
}
asn1Issuer, err := subjectBytes(parent)
if err != nil {
return
}
asn1Subject, err := subjectBytes(template)
if err != nil {
return
}
encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes}
c := tbsCertificate{
Version: 2,
SerialNumber: template.SerialNumber,
SignatureAlgorithm: signatureAlgorithm,
Issuer: asn1.RawValue{FullBytes: asn1Issuer},
Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()},
Subject: asn1.RawValue{FullBytes: asn1Subject},
PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey},
Extensions: extensions,
}
tbsCertContents, err := asn1.Marshal(c)
if err != nil {
return
}
c.Raw = tbsCertContents
h := hashFunc.New()
h.Write(tbsCertContents)
digest := h.Sum(nil)
var signature []byte
switch priv := priv.(type) {
case *rsa.PrivateKey:
signature, err = rsa.SignPKCS1v15(rand, priv, hashFunc, digest)
case *ecdsa.PrivateKey:
var r, s *big.Int
if r, s, err = ecdsa.Sign(rand, priv, digest); err == nil {
signature, err = asn1.Marshal(ecdsaSignature{r, s})
}
default:
panic("internal error")
}
if err != nil {
return
}
cert, err = asn1.Marshal(certificate{
nil,
c,
signatureAlgorithm,
asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
})
return
}
// pemCRLPrefix is the magic string that indicates that we have a PEM encoded
// CRL.
var pemCRLPrefix = []byte("-----BEGIN X509 CRL")
// pemType is the type of a PEM encoded CRL.
var pemType = "X509 CRL"
// ParseCRL parses a CRL from the given bytes. It's often the case that PEM
// encoded CRLs will appear where they should be DER encoded, so this function
// will transparently handle PEM encoding as long as there isn't any leading
// garbage.
func ParseCRL(crlBytes []byte) (certList *pkix.CertificateList, err error) {
if bytes.HasPrefix(crlBytes, pemCRLPrefix) {
block, _ := pem.Decode(crlBytes)
if block != nil && block.Type == pemType {
crlBytes = block.Bytes
}
}
return ParseDERCRL(crlBytes)
}
// ParseDERCRL parses a DER encoded CRL from the given bytes.
func ParseDERCRL(derBytes []byte) (certList *pkix.CertificateList, err error) {
certList = new(pkix.CertificateList)
_, err = asn1.Unmarshal(derBytes, certList)
if err != nil {
certList = nil
}
return
}
// CreateCRL returns a DER encoded CRL, signed by this Certificate, that
// contains the given list of revoked certificates.
//
// The only supported key type is RSA (*rsa.PrivateKey for priv).
func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) {
rsaPriv, ok := priv.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("x509: non-RSA private keys not supported")
}
tbsCertList := pkix.TBSCertificateList{
Version: 2,
Signature: pkix.AlgorithmIdentifier{
Algorithm: oidSignatureSHA1WithRSA,
},
Issuer: c.Subject.ToRDNSequence(),
ThisUpdate: now.UTC(),
NextUpdate: expiry.UTC(),
RevokedCertificates: revokedCerts,
}
tbsCertListContents, err := asn1.Marshal(tbsCertList)
if err != nil {
return
}
h := sha1.New()
h.Write(tbsCertListContents)
digest := h.Sum(nil)
signature, err := rsa.SignPKCS1v15(rand, rsaPriv, crypto.SHA1, digest)
if err != nil {
return
}
return asn1.Marshal(pkix.CertificateList{
TBSCertList: tbsCertList,
SignatureAlgorithm: pkix.AlgorithmIdentifier{
Algorithm: oidSignatureSHA1WithRSA,
},
SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
})
}
crypto/x509: test for negative RSA parameters.
Someone found software that generates negative numbers for the RSA
modulus in an X.509 certificate. Our error messages were very poor in
this case so this change improves that.
Update issue 4728
Return more helpful errors when RSA parameters are negative or zero.
R=golang-dev, rsc
CC=golang-dev
https://codereview.appspot.com/7228072
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package x509 parses X.509-encoded keys and certificates.
package x509
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha1"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"errors"
"io"
"math/big"
"time"
)
// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo
// in RFC 3280.
type pkixPublicKey struct {
Algo pkix.AlgorithmIdentifier
BitString asn1.BitString
}
// ParsePKIXPublicKey parses a DER encoded public key. These values are
// typically found in PEM blocks with "BEGIN PUBLIC KEY".
func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
var pki publicKeyInfo
if _, err = asn1.Unmarshal(derBytes, &pki); err != nil {
return
}
algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm)
if algo == UnknownPublicKeyAlgorithm {
return nil, errors.New("ParsePKIXPublicKey: unknown public key algorithm")
}
return parsePublicKey(algo, &pki)
}
// MarshalPKIXPublicKey serialises a public key to DER-encoded PKIX format.
func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) {
var pubBytes []byte
switch pub := pub.(type) {
case *rsa.PublicKey:
pubBytes, _ = asn1.Marshal(rsaPublicKey{
N: pub.N,
E: pub.E,
})
default:
return nil, errors.New("MarshalPKIXPublicKey: unknown public key type")
}
pkix := pkixPublicKey{
Algo: pkix.AlgorithmIdentifier{
Algorithm: []int{1, 2, 840, 113549, 1, 1, 1},
// This is a NULL parameters value which is technically
// superfluous, but most other code includes it and, by
// doing this, we match their public key hashes.
Parameters: asn1.RawValue{
Tag: 5,
},
},
BitString: asn1.BitString{
Bytes: pubBytes,
BitLength: 8 * len(pubBytes),
},
}
ret, _ := asn1.Marshal(pkix)
return ret, nil
}
// These structures reflect the ASN.1 structure of X.509 certificates.:
type certificate struct {
Raw asn1.RawContent
TBSCertificate tbsCertificate
SignatureAlgorithm pkix.AlgorithmIdentifier
SignatureValue asn1.BitString
}
type tbsCertificate struct {
Raw asn1.RawContent
Version int `asn1:"optional,explicit,default:1,tag:0"`
SerialNumber *big.Int
SignatureAlgorithm pkix.AlgorithmIdentifier
Issuer asn1.RawValue
Validity validity
Subject asn1.RawValue
PublicKey publicKeyInfo
UniqueId asn1.BitString `asn1:"optional,tag:1"`
SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"`
Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"`
}
type dsaAlgorithmParameters struct {
P, Q, G *big.Int
}
type dsaSignature struct {
R, S *big.Int
}
type ecdsaSignature dsaSignature
type validity struct {
NotBefore, NotAfter time.Time
}
type publicKeyInfo struct {
Raw asn1.RawContent
Algorithm pkix.AlgorithmIdentifier
PublicKey asn1.BitString
}
// RFC 5280, 4.2.1.1
type authKeyId struct {
Id []byte `asn1:"optional,tag:0"`
}
type SignatureAlgorithm int
const (
UnknownSignatureAlgorithm SignatureAlgorithm = iota
MD2WithRSA
MD5WithRSA
SHA1WithRSA
SHA256WithRSA
SHA384WithRSA
SHA512WithRSA
DSAWithSHA1
DSAWithSHA256
ECDSAWithSHA1
ECDSAWithSHA256
ECDSAWithSHA384
ECDSAWithSHA512
)
type PublicKeyAlgorithm int
const (
UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota
RSA
DSA
ECDSA
)
// OIDs for signature algorithms
//
// pkcs-1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 }
//
//
// RFC 3279 2.2.1 RSA Signature Algorithms
//
// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 }
//
// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 }
//
// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 }
//
// dsaWithSha1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 }
//
// RFC 3279 2.2.3 ECDSA Signature Algorithm
//
// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) ansi-x962(10045)
// signatures(4) ecdsa-with-SHA1(1)}
//
//
// RFC 4055 5 PKCS #1 Version 1.5
//
// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 }
//
// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 }
//
// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 }
//
//
// RFC 5758 3.1 DSA Signature Algorithms
//
// dsaWithSha256 OBJECT IDENTIFIER ::= {
// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
// csor(3) algorithms(4) id-dsa-with-sha2(3) 2}
//
// RFC 5758 3.2 ECDSA Signature Algorithm
//
// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 }
//
// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 }
//
// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 }
var (
oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2}
oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
)
func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) SignatureAlgorithm {
switch {
case oid.Equal(oidSignatureMD2WithRSA):
return MD2WithRSA
case oid.Equal(oidSignatureMD5WithRSA):
return MD5WithRSA
case oid.Equal(oidSignatureSHA1WithRSA):
return SHA1WithRSA
case oid.Equal(oidSignatureSHA256WithRSA):
return SHA256WithRSA
case oid.Equal(oidSignatureSHA384WithRSA):
return SHA384WithRSA
case oid.Equal(oidSignatureSHA512WithRSA):
return SHA512WithRSA
case oid.Equal(oidSignatureDSAWithSHA1):
return DSAWithSHA1
case oid.Equal(oidSignatureDSAWithSHA256):
return DSAWithSHA256
case oid.Equal(oidSignatureECDSAWithSHA1):
return ECDSAWithSHA1
case oid.Equal(oidSignatureECDSAWithSHA256):
return ECDSAWithSHA256
case oid.Equal(oidSignatureECDSAWithSHA384):
return ECDSAWithSHA384
case oid.Equal(oidSignatureECDSAWithSHA512):
return ECDSAWithSHA512
}
return UnknownSignatureAlgorithm
}
// RFC 3279, 2.3 Public Key Algorithms
//
// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
// rsadsi(113549) pkcs(1) 1 }
//
// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 }
//
// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
// x9-57(10040) x9cm(4) 1 }
//
// RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters
//
// id-ecPublicKey OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
var (
oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
)
func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm {
switch {
case oid.Equal(oidPublicKeyRSA):
return RSA
case oid.Equal(oidPublicKeyDSA):
return DSA
case oid.Equal(oidPublicKeyECDSA):
return ECDSA
}
return UnknownPublicKeyAlgorithm
}
// RFC 5480, 2.1.1.1. Named Curve
//
// secp224r1 OBJECT IDENTIFIER ::= {
// iso(1) identified-organization(3) certicom(132) curve(0) 33 }
//
// secp256r1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
// prime(1) 7 }
//
// secp384r1 OBJECT IDENTIFIER ::= {
// iso(1) identified-organization(3) certicom(132) curve(0) 34 }
//
// secp521r1 OBJECT IDENTIFIER ::= {
// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
//
// NB: secp256r1 is equivalent to prime256v1
var (
oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
)
func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
switch {
case oid.Equal(oidNamedCurveP224):
return elliptic.P224()
case oid.Equal(oidNamedCurveP256):
return elliptic.P256()
case oid.Equal(oidNamedCurveP384):
return elliptic.P384()
case oid.Equal(oidNamedCurveP521):
return elliptic.P521()
}
return nil
}
func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
switch curve {
case elliptic.P224():
return oidNamedCurveP224, true
case elliptic.P256():
return oidNamedCurveP256, true
case elliptic.P384():
return oidNamedCurveP384, true
case elliptic.P521():
return oidNamedCurveP521, true
}
return nil, false
}
// KeyUsage represents the set of actions that are valid for a given key. It's
// a bitmap of the KeyUsage* constants.
type KeyUsage int
const (
KeyUsageDigitalSignature KeyUsage = 1 << iota
KeyUsageContentCommitment
KeyUsageKeyEncipherment
KeyUsageDataEncipherment
KeyUsageKeyAgreement
KeyUsageCertSign
KeyUsageCRLSign
KeyUsageEncipherOnly
KeyUsageDecipherOnly
)
// RFC 5280, 4.2.1.12 Extended Key Usage
//
// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 }
//
// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 }
//
// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 }
// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 }
// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 }
// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 }
// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 }
// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 }
var (
oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0}
oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1}
oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2}
oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3}
oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4}
oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5}
oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6}
oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7}
oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8}
oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9}
)
// ExtKeyUsage represents an extended set of actions that are valid for a given key.
// Each of the ExtKeyUsage* constants define a unique action.
type ExtKeyUsage int
const (
ExtKeyUsageAny ExtKeyUsage = iota
ExtKeyUsageServerAuth
ExtKeyUsageClientAuth
ExtKeyUsageCodeSigning
ExtKeyUsageEmailProtection
ExtKeyUsageIPSECEndSystem
ExtKeyUsageIPSECTunnel
ExtKeyUsageIPSECUser
ExtKeyUsageTimeStamping
ExtKeyUsageOCSPSigning
)
// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID.
var extKeyUsageOIDs = []struct {
extKeyUsage ExtKeyUsage
oid asn1.ObjectIdentifier
}{
{ExtKeyUsageAny, oidExtKeyUsageAny},
{ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth},
{ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth},
{ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning},
{ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection},
{ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem},
{ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel},
{ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser},
{ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping},
{ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning},
}
func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) {
for _, pair := range extKeyUsageOIDs {
if oid.Equal(pair.oid) {
return pair.extKeyUsage, true
}
}
return
}
func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) {
for _, pair := range extKeyUsageOIDs {
if eku == pair.extKeyUsage {
return pair.oid, true
}
}
return
}
// A Certificate represents an X.509 certificate.
type Certificate struct {
Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature).
RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content.
RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo.
RawSubject []byte // DER encoded Subject
RawIssuer []byte // DER encoded Issuer
Signature []byte
SignatureAlgorithm SignatureAlgorithm
PublicKeyAlgorithm PublicKeyAlgorithm
PublicKey interface{}
Version int
SerialNumber *big.Int
Issuer pkix.Name
Subject pkix.Name
NotBefore, NotAfter time.Time // Validity bounds.
KeyUsage KeyUsage
ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages.
UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package.
BasicConstraintsValid bool // if true then the next two fields are valid.
IsCA bool
MaxPathLen int
SubjectKeyId []byte
AuthorityKeyId []byte
// Subject Alternate Name values
DNSNames []string
EmailAddresses []string
// Name constraints
PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical.
PermittedDNSDomains []string
PolicyIdentifiers []asn1.ObjectIdentifier
}
// ErrUnsupportedAlgorithm results from attempting to perform an operation that
// involves algorithms that are not currently implemented.
var ErrUnsupportedAlgorithm = errors.New("crypto/x509: cannot verify signature: algorithm unimplemented")
// ConstraintViolationError results when a requested usage is not permitted by
// a certificate. For example: checking a signature when the public key isn't a
// certificate signing key.
type ConstraintViolationError struct{}
func (ConstraintViolationError) Error() string {
return "crypto/x509: invalid signature: parent certificate cannot sign this kind of certificate"
}
func (c *Certificate) Equal(other *Certificate) bool {
return bytes.Equal(c.Raw, other.Raw)
}
// Entrust have a broken root certificate (CN=Entrust.net Certification
// Authority (2048)) which isn't marked as a CA certificate and is thus invalid
// according to PKIX.
// We recognise this certificate by its SubjectPublicKeyInfo and exempt it
// from the Basic Constraints requirement.
// See http://www.entrust.net/knowledge-base/technote.cfm?tn=7869
//
// TODO(agl): remove this hack once their reissued root is sufficiently
// widespread.
var entrustBrokenSPKI = []byte{
0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09,
0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00,
0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01,
0x00, 0x97, 0xa3, 0x2d, 0x3c, 0x9e, 0xde, 0x05,
0xda, 0x13, 0xc2, 0x11, 0x8d, 0x9d, 0x8e, 0xe3,
0x7f, 0xc7, 0x4b, 0x7e, 0x5a, 0x9f, 0xb3, 0xff,
0x62, 0xab, 0x73, 0xc8, 0x28, 0x6b, 0xba, 0x10,
0x64, 0x82, 0x87, 0x13, 0xcd, 0x57, 0x18, 0xff,
0x28, 0xce, 0xc0, 0xe6, 0x0e, 0x06, 0x91, 0x50,
0x29, 0x83, 0xd1, 0xf2, 0xc3, 0x2a, 0xdb, 0xd8,
0xdb, 0x4e, 0x04, 0xcc, 0x00, 0xeb, 0x8b, 0xb6,
0x96, 0xdc, 0xbc, 0xaa, 0xfa, 0x52, 0x77, 0x04,
0xc1, 0xdb, 0x19, 0xe4, 0xae, 0x9c, 0xfd, 0x3c,
0x8b, 0x03, 0xef, 0x4d, 0xbc, 0x1a, 0x03, 0x65,
0xf9, 0xc1, 0xb1, 0x3f, 0x72, 0x86, 0xf2, 0x38,
0xaa, 0x19, 0xae, 0x10, 0x88, 0x78, 0x28, 0xda,
0x75, 0xc3, 0x3d, 0x02, 0x82, 0x02, 0x9c, 0xb9,
0xc1, 0x65, 0x77, 0x76, 0x24, 0x4c, 0x98, 0xf7,
0x6d, 0x31, 0x38, 0xfb, 0xdb, 0xfe, 0xdb, 0x37,
0x02, 0x76, 0xa1, 0x18, 0x97, 0xa6, 0xcc, 0xde,
0x20, 0x09, 0x49, 0x36, 0x24, 0x69, 0x42, 0xf6,
0xe4, 0x37, 0x62, 0xf1, 0x59, 0x6d, 0xa9, 0x3c,
0xed, 0x34, 0x9c, 0xa3, 0x8e, 0xdb, 0xdc, 0x3a,
0xd7, 0xf7, 0x0a, 0x6f, 0xef, 0x2e, 0xd8, 0xd5,
0x93, 0x5a, 0x7a, 0xed, 0x08, 0x49, 0x68, 0xe2,
0x41, 0xe3, 0x5a, 0x90, 0xc1, 0x86, 0x55, 0xfc,
0x51, 0x43, 0x9d, 0xe0, 0xb2, 0xc4, 0x67, 0xb4,
0xcb, 0x32, 0x31, 0x25, 0xf0, 0x54, 0x9f, 0x4b,
0xd1, 0x6f, 0xdb, 0xd4, 0xdd, 0xfc, 0xaf, 0x5e,
0x6c, 0x78, 0x90, 0x95, 0xde, 0xca, 0x3a, 0x48,
0xb9, 0x79, 0x3c, 0x9b, 0x19, 0xd6, 0x75, 0x05,
0xa0, 0xf9, 0x88, 0xd7, 0xc1, 0xe8, 0xa5, 0x09,
0xe4, 0x1a, 0x15, 0xdc, 0x87, 0x23, 0xaa, 0xb2,
0x75, 0x8c, 0x63, 0x25, 0x87, 0xd8, 0xf8, 0x3d,
0xa6, 0xc2, 0xcc, 0x66, 0xff, 0xa5, 0x66, 0x68,
0x55, 0x02, 0x03, 0x01, 0x00, 0x01,
}
// CheckSignatureFrom verifies that the signature on c is a valid signature
// from parent.
func (c *Certificate) CheckSignatureFrom(parent *Certificate) (err error) {
// RFC 5280, 4.2.1.9:
// "If the basic constraints extension is not present in a version 3
// certificate, or the extension is present but the cA boolean is not
// asserted, then the certified public key MUST NOT be used to verify
// certificate signatures."
// (except for Entrust, see comment above entrustBrokenSPKI)
if (parent.Version == 3 && !parent.BasicConstraintsValid ||
parent.BasicConstraintsValid && !parent.IsCA) &&
!bytes.Equal(c.RawSubjectPublicKeyInfo, entrustBrokenSPKI) {
return ConstraintViolationError{}
}
if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 {
return ConstraintViolationError{}
}
if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm {
return ErrUnsupportedAlgorithm
}
// TODO(agl): don't ignore the path length constraint.
return parent.CheckSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature)
}
// CheckSignature verifies that signature is a valid signature over signed from
// c's public key.
func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) (err error) {
var hashType crypto.Hash
switch algo {
case SHA1WithRSA, DSAWithSHA1, ECDSAWithSHA1:
hashType = crypto.SHA1
case SHA256WithRSA, DSAWithSHA256, ECDSAWithSHA256:
hashType = crypto.SHA256
case SHA384WithRSA, ECDSAWithSHA384:
hashType = crypto.SHA384
case SHA512WithRSA, ECDSAWithSHA512:
hashType = crypto.SHA512
default:
return ErrUnsupportedAlgorithm
}
if !hashType.Available() {
return ErrUnsupportedAlgorithm
}
h := hashType.New()
h.Write(signed)
digest := h.Sum(nil)
switch pub := c.PublicKey.(type) {
case *rsa.PublicKey:
return rsa.VerifyPKCS1v15(pub, hashType, digest, signature)
case *dsa.PublicKey:
dsaSig := new(dsaSignature)
if _, err := asn1.Unmarshal(signature, dsaSig); err != nil {
return err
}
if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 {
return errors.New("DSA signature contained zero or negative values")
}
if !dsa.Verify(pub, digest, dsaSig.R, dsaSig.S) {
return errors.New("DSA verification failure")
}
return
case *ecdsa.PublicKey:
ecdsaSig := new(ecdsaSignature)
if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil {
return err
}
if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
return errors.New("crypto/x509: ECDSA signature contained zero or negative values")
}
if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) {
return errors.New("crypto/x509: ECDSA verification failure")
}
return
}
return ErrUnsupportedAlgorithm
}
// CheckCRLSignature checks that the signature in crl is from c.
func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) (err error) {
algo := getSignatureAlgorithmFromOID(crl.SignatureAlgorithm.Algorithm)
return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
}
type UnhandledCriticalExtension struct{}
func (h UnhandledCriticalExtension) Error() string {
return "unhandled critical extension"
}
type basicConstraints struct {
IsCA bool `asn1:"optional"`
MaxPathLen int `asn1:"optional,default:-1"`
}
// RFC 5280 4.2.1.4
type policyInformation struct {
Policy asn1.ObjectIdentifier
// policyQualifiers omitted
}
// RFC 5280, 4.2.1.10
type nameConstraints struct {
Permitted []generalSubtree `asn1:"optional,tag:0"`
Excluded []generalSubtree `asn1:"optional,tag:1"`
}
type generalSubtree struct {
Name string `asn1:"tag:2,optional,ia5"`
}
func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) {
asn1Data := keyData.PublicKey.RightAlign()
switch algo {
case RSA:
p := new(rsaPublicKey)
_, err := asn1.Unmarshal(asn1Data, p)
if err != nil {
return nil, err
}
if p.N.Sign() <= 0 {
return nil, errors.New("x509: RSA modulus is not a positive number")
}
if p.E <= 0 {
return nil, errors.New("x509: RSA public exponent is not a positive number")
}
pub := &rsa.PublicKey{
E: p.E,
N: p.N,
}
return pub, nil
case DSA:
var p *big.Int
_, err := asn1.Unmarshal(asn1Data, &p)
if err != nil {
return nil, err
}
paramsData := keyData.Algorithm.Parameters.FullBytes
params := new(dsaAlgorithmParameters)
_, err = asn1.Unmarshal(paramsData, params)
if err != nil {
return nil, err
}
if p.Sign() <= 0 || params.P.Sign() <= 0 || params.Q.Sign() <= 0 || params.G.Sign() <= 0 {
return nil, errors.New("zero or negative DSA parameter")
}
pub := &dsa.PublicKey{
Parameters: dsa.Parameters{
P: params.P,
Q: params.Q,
G: params.G,
},
Y: p,
}
return pub, nil
case ECDSA:
paramsData := keyData.Algorithm.Parameters.FullBytes
namedCurveOID := new(asn1.ObjectIdentifier)
_, err := asn1.Unmarshal(paramsData, namedCurveOID)
if err != nil {
return nil, err
}
namedCurve := namedCurveFromOID(*namedCurveOID)
if namedCurve == nil {
return nil, errors.New("crypto/x509: unsupported elliptic curve")
}
x, y := elliptic.Unmarshal(namedCurve, asn1Data)
if x == nil {
return nil, errors.New("crypto/x509: failed to unmarshal elliptic curve point")
}
pub := &ecdsa.PublicKey{
Curve: namedCurve,
X: x,
Y: y,
}
return pub, nil
default:
return nil, nil
}
panic("unreachable")
}
func parseCertificate(in *certificate) (*Certificate, error) {
out := new(Certificate)
out.Raw = in.Raw
out.RawTBSCertificate = in.TBSCertificate.Raw
out.RawSubjectPublicKeyInfo = in.TBSCertificate.PublicKey.Raw
out.RawSubject = in.TBSCertificate.Subject.FullBytes
out.RawIssuer = in.TBSCertificate.Issuer.FullBytes
out.Signature = in.SignatureValue.RightAlign()
out.SignatureAlgorithm =
getSignatureAlgorithmFromOID(in.TBSCertificate.SignatureAlgorithm.Algorithm)
out.PublicKeyAlgorithm =
getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
var err error
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey)
if err != nil {
return nil, err
}
if in.TBSCertificate.SerialNumber.Sign() < 0 {
return nil, errors.New("negative serial number")
}
out.Version = in.TBSCertificate.Version + 1
out.SerialNumber = in.TBSCertificate.SerialNumber
var issuer, subject pkix.RDNSequence
if _, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil {
return nil, err
}
if _, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil {
return nil, err
}
out.Issuer.FillFromRDNSequence(&issuer)
out.Subject.FillFromRDNSequence(&subject)
out.NotBefore = in.TBSCertificate.Validity.NotBefore
out.NotAfter = in.TBSCertificate.Validity.NotAfter
for _, e := range in.TBSCertificate.Extensions {
if len(e.Id) == 4 && e.Id[0] == 2 && e.Id[1] == 5 && e.Id[2] == 29 {
switch e.Id[3] {
case 15:
// RFC 5280, 4.2.1.3
var usageBits asn1.BitString
_, err := asn1.Unmarshal(e.Value, &usageBits)
if err == nil {
var usage int
for i := 0; i < 9; i++ {
if usageBits.At(i) != 0 {
usage |= 1 << uint(i)
}
}
out.KeyUsage = KeyUsage(usage)
continue
}
case 19:
// RFC 5280, 4.2.1.9
var constraints basicConstraints
_, err := asn1.Unmarshal(e.Value, &constraints)
if err == nil {
out.BasicConstraintsValid = true
out.IsCA = constraints.IsCA
out.MaxPathLen = constraints.MaxPathLen
continue
}
case 17:
// RFC 5280, 4.2.1.6
// SubjectAltName ::= GeneralNames
//
// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
//
// GeneralName ::= CHOICE {
// otherName [0] OtherName,
// rfc822Name [1] IA5String,
// dNSName [2] IA5String,
// x400Address [3] ORAddress,
// directoryName [4] Name,
// ediPartyName [5] EDIPartyName,
// uniformResourceIdentifier [6] IA5String,
// iPAddress [7] OCTET STRING,
// registeredID [8] OBJECT IDENTIFIER }
var seq asn1.RawValue
_, err := asn1.Unmarshal(e.Value, &seq)
if err != nil {
return nil, err
}
if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 {
return nil, asn1.StructuralError{Msg: "bad SAN sequence"}
}
parsedName := false
rest := seq.Bytes
for len(rest) > 0 {
var v asn1.RawValue
rest, err = asn1.Unmarshal(rest, &v)
if err != nil {
return nil, err
}
switch v.Tag {
case 1:
out.EmailAddresses = append(out.EmailAddresses, string(v.Bytes))
parsedName = true
case 2:
out.DNSNames = append(out.DNSNames, string(v.Bytes))
parsedName = true
}
}
if parsedName {
continue
}
// If we didn't parse any of the names then we
// fall through to the critical check below.
case 30:
// RFC 5280, 4.2.1.10
// NameConstraints ::= SEQUENCE {
// permittedSubtrees [0] GeneralSubtrees OPTIONAL,
// excludedSubtrees [1] GeneralSubtrees OPTIONAL }
//
// GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree
//
// GeneralSubtree ::= SEQUENCE {
// base GeneralName,
// minimum [0] BaseDistance DEFAULT 0,
// maximum [1] BaseDistance OPTIONAL }
//
// BaseDistance ::= INTEGER (0..MAX)
var constraints nameConstraints
_, err := asn1.Unmarshal(e.Value, &constraints)
if err != nil {
return nil, err
}
if len(constraints.Excluded) > 0 && e.Critical {
return out, UnhandledCriticalExtension{}
}
for _, subtree := range constraints.Permitted {
if len(subtree.Name) == 0 {
if e.Critical {
return out, UnhandledCriticalExtension{}
}
continue
}
out.PermittedDNSDomains = append(out.PermittedDNSDomains, subtree.Name)
}
continue
case 35:
// RFC 5280, 4.2.1.1
var a authKeyId
_, err = asn1.Unmarshal(e.Value, &a)
if err != nil {
return nil, err
}
out.AuthorityKeyId = a.Id
continue
case 37:
// RFC 5280, 4.2.1.12. Extended Key Usage
// id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 }
//
// ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId
//
// KeyPurposeId ::= OBJECT IDENTIFIER
var keyUsage []asn1.ObjectIdentifier
_, err = asn1.Unmarshal(e.Value, &keyUsage)
if err != nil {
return nil, err
}
for _, u := range keyUsage {
if extKeyUsage, ok := extKeyUsageFromOID(u); ok {
out.ExtKeyUsage = append(out.ExtKeyUsage, extKeyUsage)
} else {
out.UnknownExtKeyUsage = append(out.UnknownExtKeyUsage, u)
}
}
continue
case 14:
// RFC 5280, 4.2.1.2
var keyid []byte
_, err = asn1.Unmarshal(e.Value, &keyid)
if err != nil {
return nil, err
}
out.SubjectKeyId = keyid
continue
case 32:
// RFC 5280 4.2.1.4: Certificate Policies
var policies []policyInformation
if _, err = asn1.Unmarshal(e.Value, &policies); err != nil {
return nil, err
}
out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, len(policies))
for i, policy := range policies {
out.PolicyIdentifiers[i] = policy.Policy
}
}
}
if e.Critical {
return out, UnhandledCriticalExtension{}
}
}
return out, nil
}
// ParseCertificate parses a single certificate from the given ASN.1 DER data.
func ParseCertificate(asn1Data []byte) (*Certificate, error) {
var cert certificate
rest, err := asn1.Unmarshal(asn1Data, &cert)
if err != nil {
return nil, err
}
if len(rest) > 0 {
return nil, asn1.SyntaxError{Msg: "trailing data"}
}
return parseCertificate(&cert)
}
// ParseCertificates parses one or more certificates from the given ASN.1 DER
// data. The certificates must be concatenated with no intermediate padding.
func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
var v []*certificate
for len(asn1Data) > 0 {
cert := new(certificate)
var err error
asn1Data, err = asn1.Unmarshal(asn1Data, cert)
if err != nil {
return nil, err
}
v = append(v, cert)
}
ret := make([]*Certificate, len(v))
for i, ci := range v {
cert, err := parseCertificate(ci)
if err != nil {
return nil, err
}
ret[i] = cert
}
return ret, nil
}
func reverseBitsInAByte(in byte) byte {
b1 := in>>4 | in<<4
b2 := b1>>2&0x33 | b1<<2&0xcc
b3 := b2>>1&0x55 | b2<<1&0xaa
return b3
}
var (
oidExtensionSubjectKeyId = []int{2, 5, 29, 14}
oidExtensionKeyUsage = []int{2, 5, 29, 15}
oidExtensionExtendedKeyUsage = []int{2, 5, 29, 37}
oidExtensionAuthorityKeyId = []int{2, 5, 29, 35}
oidExtensionBasicConstraints = []int{2, 5, 29, 19}
oidExtensionSubjectAltName = []int{2, 5, 29, 17}
oidExtensionCertificatePolicies = []int{2, 5, 29, 32}
oidExtensionNameConstraints = []int{2, 5, 29, 30}
)
func buildExtensions(template *Certificate) (ret []pkix.Extension, err error) {
ret = make([]pkix.Extension, 8 /* maximum number of elements. */)
n := 0
if template.KeyUsage != 0 {
ret[n].Id = oidExtensionKeyUsage
ret[n].Critical = true
var a [2]byte
a[0] = reverseBitsInAByte(byte(template.KeyUsage))
a[1] = reverseBitsInAByte(byte(template.KeyUsage >> 8))
l := 1
if a[1] != 0 {
l = 2
}
ret[n].Value, err = asn1.Marshal(asn1.BitString{Bytes: a[0:l], BitLength: l * 8})
if err != nil {
return
}
n++
}
if len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0 {
ret[n].Id = oidExtensionExtendedKeyUsage
var oids []asn1.ObjectIdentifier
for _, u := range template.ExtKeyUsage {
if oid, ok := oidFromExtKeyUsage(u); ok {
oids = append(oids, oid)
} else {
panic("internal error")
}
}
oids = append(oids, template.UnknownExtKeyUsage...)
ret[n].Value, err = asn1.Marshal(oids)
if err != nil {
return
}
n++
}
if template.BasicConstraintsValid {
ret[n].Id = oidExtensionBasicConstraints
ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, template.MaxPathLen})
ret[n].Critical = true
if err != nil {
return
}
n++
}
if len(template.SubjectKeyId) > 0 {
ret[n].Id = oidExtensionSubjectKeyId
ret[n].Value, err = asn1.Marshal(template.SubjectKeyId)
if err != nil {
return
}
n++
}
if len(template.AuthorityKeyId) > 0 {
ret[n].Id = oidExtensionAuthorityKeyId
ret[n].Value, err = asn1.Marshal(authKeyId{template.AuthorityKeyId})
if err != nil {
return
}
n++
}
if len(template.DNSNames) > 0 {
ret[n].Id = oidExtensionSubjectAltName
rawValues := make([]asn1.RawValue, len(template.DNSNames))
for i, name := range template.DNSNames {
rawValues[i] = asn1.RawValue{Tag: 2, Class: 2, Bytes: []byte(name)}
}
ret[n].Value, err = asn1.Marshal(rawValues)
if err != nil {
return
}
n++
}
if len(template.PolicyIdentifiers) > 0 {
ret[n].Id = oidExtensionCertificatePolicies
policies := make([]policyInformation, len(template.PolicyIdentifiers))
for i, policy := range template.PolicyIdentifiers {
policies[i].Policy = policy
}
ret[n].Value, err = asn1.Marshal(policies)
if err != nil {
return
}
n++
}
if len(template.PermittedDNSDomains) > 0 {
ret[n].Id = oidExtensionNameConstraints
ret[n].Critical = template.PermittedDNSDomainsCritical
var out nameConstraints
out.Permitted = make([]generalSubtree, len(template.PermittedDNSDomains))
for i, permitted := range template.PermittedDNSDomains {
out.Permitted[i] = generalSubtree{Name: permitted}
}
ret[n].Value, err = asn1.Marshal(out)
if err != nil {
return
}
n++
}
// Adding another extension here? Remember to update the maximum number
// of elements in the make() at the top of the function.
return ret[0:n], nil
}
func subjectBytes(cert *Certificate) ([]byte, error) {
if len(cert.RawSubject) > 0 {
return cert.RawSubject, nil
}
return asn1.Marshal(cert.Subject.ToRDNSequence())
}
// CreateCertificate creates a new certificate based on a template. The
// following members of template are used: SerialNumber, Subject, NotBefore,
// NotAfter, KeyUsage, ExtKeyUsage, UnknownExtKeyUsage, BasicConstraintsValid,
// IsCA, MaxPathLen, SubjectKeyId, DNSNames, PermittedDNSDomainsCritical,
// PermittedDNSDomains.
//
// The certificate is signed by parent. If parent is equal to template then the
// certificate is self-signed. The parameter pub is the public key of the
// signee and priv is the private key of the signer.
//
// The returned slice is the certificate in DER encoding.
//
// The only supported key types are RSA and ECDSA (*rsa.PublicKey or
// *ecdsa.PublicKey for pub, *rsa.PrivateKey or *ecdsa.PublicKey for priv).
func CreateCertificate(rand io.Reader, template, parent *Certificate, pub interface{}, priv interface{}) (cert []byte, err error) {
var publicKeyBytes []byte
var publicKeyAlgorithm pkix.AlgorithmIdentifier
switch pub := pub.(type) {
case *rsa.PublicKey:
publicKeyBytes, err = asn1.Marshal(rsaPublicKey{
N: pub.N,
E: pub.E,
})
publicKeyAlgorithm.Algorithm = oidPublicKeyRSA
case *ecdsa.PublicKey:
oid, ok := oidFromNamedCurve(pub.Curve)
if !ok {
return nil, errors.New("x509: unknown elliptic curve")
}
publicKeyAlgorithm.Algorithm = oidPublicKeyECDSA
var paramBytes []byte
paramBytes, err = asn1.Marshal(oid)
if err != nil {
return
}
publicKeyAlgorithm.Parameters.FullBytes = paramBytes
publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
default:
return nil, errors.New("x509: only RSA and ECDSA public keys supported")
}
var signatureAlgorithm pkix.AlgorithmIdentifier
var hashFunc crypto.Hash
switch priv := priv.(type) {
case *rsa.PrivateKey:
signatureAlgorithm.Algorithm = oidSignatureSHA1WithRSA
hashFunc = crypto.SHA1
case *ecdsa.PrivateKey:
switch priv.Curve {
case elliptic.P224(), elliptic.P256():
hashFunc = crypto.SHA256
signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA256
case elliptic.P384():
hashFunc = crypto.SHA384
signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA384
case elliptic.P521():
hashFunc = crypto.SHA512
signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA512
default:
return nil, errors.New("x509: unknown elliptic curve")
}
default:
return nil, errors.New("x509: only RSA and ECDSA private keys supported")
}
if err != nil {
return
}
if len(parent.SubjectKeyId) > 0 {
template.AuthorityKeyId = parent.SubjectKeyId
}
extensions, err := buildExtensions(template)
if err != nil {
return
}
asn1Issuer, err := subjectBytes(parent)
if err != nil {
return
}
asn1Subject, err := subjectBytes(template)
if err != nil {
return
}
encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes}
c := tbsCertificate{
Version: 2,
SerialNumber: template.SerialNumber,
SignatureAlgorithm: signatureAlgorithm,
Issuer: asn1.RawValue{FullBytes: asn1Issuer},
Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()},
Subject: asn1.RawValue{FullBytes: asn1Subject},
PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey},
Extensions: extensions,
}
tbsCertContents, err := asn1.Marshal(c)
if err != nil {
return
}
c.Raw = tbsCertContents
h := hashFunc.New()
h.Write(tbsCertContents)
digest := h.Sum(nil)
var signature []byte
switch priv := priv.(type) {
case *rsa.PrivateKey:
signature, err = rsa.SignPKCS1v15(rand, priv, hashFunc, digest)
case *ecdsa.PrivateKey:
var r, s *big.Int
if r, s, err = ecdsa.Sign(rand, priv, digest); err == nil {
signature, err = asn1.Marshal(ecdsaSignature{r, s})
}
default:
panic("internal error")
}
if err != nil {
return
}
cert, err = asn1.Marshal(certificate{
nil,
c,
signatureAlgorithm,
asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
})
return
}
// pemCRLPrefix is the magic string that indicates that we have a PEM encoded
// CRL.
var pemCRLPrefix = []byte("-----BEGIN X509 CRL")
// pemType is the type of a PEM encoded CRL.
var pemType = "X509 CRL"
// ParseCRL parses a CRL from the given bytes. It's often the case that PEM
// encoded CRLs will appear where they should be DER encoded, so this function
// will transparently handle PEM encoding as long as there isn't any leading
// garbage.
func ParseCRL(crlBytes []byte) (certList *pkix.CertificateList, err error) {
if bytes.HasPrefix(crlBytes, pemCRLPrefix) {
block, _ := pem.Decode(crlBytes)
if block != nil && block.Type == pemType {
crlBytes = block.Bytes
}
}
return ParseDERCRL(crlBytes)
}
// ParseDERCRL parses a DER encoded CRL from the given bytes.
func ParseDERCRL(derBytes []byte) (certList *pkix.CertificateList, err error) {
certList = new(pkix.CertificateList)
_, err = asn1.Unmarshal(derBytes, certList)
if err != nil {
certList = nil
}
return
}
// CreateCRL returns a DER encoded CRL, signed by this Certificate, that
// contains the given list of revoked certificates.
//
// The only supported key type is RSA (*rsa.PrivateKey for priv).
func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) {
rsaPriv, ok := priv.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("x509: non-RSA private keys not supported")
}
tbsCertList := pkix.TBSCertificateList{
Version: 2,
Signature: pkix.AlgorithmIdentifier{
Algorithm: oidSignatureSHA1WithRSA,
},
Issuer: c.Subject.ToRDNSequence(),
ThisUpdate: now.UTC(),
NextUpdate: expiry.UTC(),
RevokedCertificates: revokedCerts,
}
tbsCertListContents, err := asn1.Marshal(tbsCertList)
if err != nil {
return
}
h := sha1.New()
h.Write(tbsCertListContents)
digest := h.Sum(nil)
signature, err := rsa.SignPKCS1v15(rand, rsaPriv, crypto.SHA1, digest)
if err != nil {
return
}
return asn1.Marshal(pkix.CertificateList{
TBSCertList: tbsCertList,
SignatureAlgorithm: pkix.AlgorithmIdentifier{
Algorithm: oidSignatureSHA1WithRSA,
},
SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
})
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sql provides a generic interface around SQL (or SQL-like)
// databases.
package sql
import (
"database/sql/driver"
"errors"
"fmt"
"io"
"sync"
)
var drivers = make(map[string]driver.Driver)
// Register makes a database driver available by the provided name.
// If Register is called twice with the same name or if driver is nil,
// it panics.
func Register(name string, driver driver.Driver) {
if driver == nil {
panic("sql: Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("sql: Register called twice for driver " + name)
}
drivers[name] = driver
}
// RawBytes is a byte slice that holds a reference to memory owned by
// the database itself. After a Scan into a RawBytes, the slice is only
// valid until the next call to Next, Scan, or Close.
type RawBytes []byte
// NullString represents a string that may be null.
// NullString implements the Scanner interface so
// it can be used as a scan destination:
//
// var s NullString
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
// ...
// if s.Valid {
// // use s.String
// } else {
// // NULL value
// }
//
type NullString struct {
String string
Valid bool // Valid is true if String is not NULL
}
// Scan implements the Scanner interface.
func (ns *NullString) Scan(value interface{}) error {
if value == nil {
ns.String, ns.Valid = "", false
return nil
}
ns.Valid = true
return convertAssign(&ns.String, value)
}
// Value implements the driver Valuer interface.
func (ns NullString) Value() (driver.Value, error) {
if !ns.Valid {
return nil, nil
}
return ns.String, nil
}
// NullInt64 represents an int64 that may be null.
// NullInt64 implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL
}
// Scan implements the Scanner interface.
func (n *NullInt64) Scan(value interface{}) error {
if value == nil {
n.Int64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Int64, value)
}
// Value implements the driver Valuer interface.
func (n NullInt64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Int64, nil
}
// NullFloat64 represents a float64 that may be null.
// NullFloat64 implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL
}
// Scan implements the Scanner interface.
func (n *NullFloat64) Scan(value interface{}) error {
if value == nil {
n.Float64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Float64, value)
}
// Value implements the driver Valuer interface.
func (n NullFloat64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Float64, nil
}
// NullBool represents a bool that may be null.
// NullBool implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL
}
// Scan implements the Scanner interface.
func (n *NullBool) Scan(value interface{}) error {
if value == nil {
n.Bool, n.Valid = false, false
return nil
}
n.Valid = true
return convertAssign(&n.Bool, value)
}
// Value implements the driver Valuer interface.
func (n NullBool) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Bool, nil
}
// Scanner is an interface used by Scan.
type Scanner interface {
// Scan assigns a value from a database driver.
//
// The src value will be of one of the following restricted
// set of types:
//
// int64
// float64
// bool
// []byte
// string
// time.Time
// nil - for NULL values
//
// An error should be returned if the value can not be stored
// without loss of information.
Scan(src interface{}) error
}
// ErrNoRows is returned by Scan when QueryRow doesn't return a
// row. In such a case, QueryRow returns a placeholder *Row value that
// defers this error until a Scan.
var ErrNoRows = errors.New("sql: no rows in result set")
// DB is a database handle. It's safe for concurrent use by multiple
// goroutines.
//
// If the underlying database driver has the concept of a connection
// and per-connection session state, the sql package manages creating
// and freeing connections automatically, including maintaining a free
// pool of idle connections. If observing session state is required,
// either do not share a *DB between multiple concurrent goroutines or
// create and observe all state only within a transaction. Once
// DB.Open is called, the returned Tx is bound to a single isolated
// connection. Once Tx.Commit or Tx.Rollback is called, that
// connection is returned to DB's idle connection pool.
type DB struct {
driver driver.Driver
dsn string
mu sync.Mutex // protects freeConn and closed
freeConn []driver.Conn
closed bool
}
// Open opens a database specified by its database driver name and a
// driver-specific data source name, usually consisting of at least a
// database name and connection information.
//
// Most users will open a database via a driver-specific connection
// helper function that returns a *DB.
func Open(driverName, dataSourceName string) (*DB, error) {
driver, ok := drivers[driverName]
if !ok {
return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
}
return &DB{driver: driver, dsn: dataSourceName}, nil
}
// Close closes the database, releasing any open resources.
func (db *DB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
var err error
for _, c := range db.freeConn {
err1 := c.Close()
if err1 != nil {
err = err1
}
}
db.freeConn = nil
db.closed = true
return err
}
func (db *DB) maxIdleConns() int {
const defaultMaxIdleConns = 2
// TODO(bradfitz): ask driver, if supported, for its default preference
// TODO(bradfitz): let users override?
return defaultMaxIdleConns
}
// conn returns a newly-opened or cached driver.Conn
func (db *DB) conn() (driver.Conn, error) {
db.mu.Lock()
if db.closed {
db.mu.Unlock()
return nil, errors.New("sql: database is closed")
}
if n := len(db.freeConn); n > 0 {
conn := db.freeConn[n-1]
db.freeConn = db.freeConn[:n-1]
db.mu.Unlock()
return conn, nil
}
db.mu.Unlock()
return db.driver.Open(db.dsn)
}
func (db *DB) connIfFree(wanted driver.Conn) (conn driver.Conn, ok bool) {
db.mu.Lock()
defer db.mu.Unlock()
for i, conn := range db.freeConn {
if conn != wanted {
continue
}
db.freeConn[i] = db.freeConn[len(db.freeConn)-1]
db.freeConn = db.freeConn[:len(db.freeConn)-1]
return wanted, true
}
return nil, false
}
// putConnHook is a hook for testing.
var putConnHook func(*DB, driver.Conn)
// putConn adds a connection to the db's free pool.
// err is optionally the last error that occurred on this connection.
func (db *DB) putConn(c driver.Conn, err error) {
if err == driver.ErrBadConn {
// Don't reuse bad connections.
return
}
db.mu.Lock()
if putConnHook != nil {
putConnHook(db, c)
}
if n := len(db.freeConn); !db.closed && n < db.maxIdleConns() {
db.freeConn = append(db.freeConn, c)
db.mu.Unlock()
return
}
// TODO: check to see if we need this Conn for any prepared
// statements which are still active?
db.mu.Unlock()
c.Close()
}
// Prepare creates a prepared statement for later execution.
func (db *DB) Prepare(query string) (*Stmt, error) {
var stmt *Stmt
var err error
for i := 0; i < 10; i++ {
stmt, err = db.prepare(query)
if err != driver.ErrBadConn {
break
}
}
return stmt, err
}
func (db *DB) prepare(query string) (stmt *Stmt, err error) {
// TODO: check if db.driver supports an optional
// driver.Preparer interface and call that instead, if so,
// otherwise we make a prepared statement that's bound
// to a connection, and to execute this prepared statement
// we either need to use this connection (if it's free), else
// get a new connection + re-prepare + execute on that one.
ci, err := db.conn()
if err != nil {
return nil, err
}
defer func() {
db.putConn(ci, err)
}()
si, err := ci.Prepare(query)
if err != nil {
return nil, err
}
stmt = &Stmt{
db: db,
query: query,
css: []connStmt{{ci, si}},
}
return stmt, nil
}
// Exec executes a query without returning any rows.
func (db *DB) Exec(query string, args ...interface{}) (Result, error) {
var res Result
var err error
for i := 0; i < 10; i++ {
res, err = db.exec(query, args)
if err != driver.ErrBadConn {
break
}
}
return res, err
}
func (db *DB) exec(query string, args []interface{}) (res Result, err error) {
ci, err := db.conn()
if err != nil {
return nil, err
}
defer func() {
db.putConn(ci, err)
}()
if execer, ok := ci.(driver.Execer); ok {
dargs, err := driverArgs(nil, args)
if err != nil {
return nil, err
}
resi, err := execer.Exec(query, dargs)
if err != driver.ErrSkip {
if err != nil {
return nil, err
}
return result{resi}, nil
}
}
sti, err := ci.Prepare(query)
if err != nil {
return nil, err
}
defer sti.Close()
dargs, err := driverArgs(sti, args)
if err != nil {
return nil, err
}
resi, err := sti.Exec(dargs)
if err != nil {
return nil, err
}
return result{resi}, nil
}
// Query executes a query that returns rows, typically a SELECT.
func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
stmt, err := db.Prepare(query)
if err != nil {
return nil, err
}
rows, err := stmt.Query(args...)
if err != nil {
stmt.Close()
return nil, err
}
rows.closeStmt = stmt
return rows, nil
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (db *DB) QueryRow(query string, args ...interface{}) *Row {
rows, err := db.Query(query, args...)
return &Row{rows: rows, err: err}
}
// Begin starts a transaction. The isolation level is dependent on
// the driver.
func (db *DB) Begin() (*Tx, error) {
var tx *Tx
var err error
for i := 0; i < 10; i++ {
tx, err = db.begin()
if err != driver.ErrBadConn {
break
}
}
return tx, err
}
func (db *DB) begin() (tx *Tx, err error) {
ci, err := db.conn()
if err != nil {
return nil, err
}
txi, err := ci.Begin()
if err != nil {
db.putConn(ci, err)
return nil, err
}
return &Tx{
db: db,
ci: ci,
txi: txi,
}, nil
}
// Driver returns the database's underlying driver.
func (db *DB) Driver() driver.Driver {
return db.driver
}
// Tx is an in-progress database transaction.
//
// A transaction must end with a call to Commit or Rollback.
//
// After a call to Commit or Rollback, all operations on the
// transaction fail with ErrTxDone.
type Tx struct {
db *DB
// ci is owned exclusively until Commit or Rollback, at which point
// it's returned with putConn.
ci driver.Conn
txi driver.Tx
// cimu is held while somebody is using ci (between grabConn
// and releaseConn)
cimu sync.Mutex
// done transitions from false to true exactly once, on Commit
// or Rollback. once done, all operations fail with
// ErrTxDone.
done bool
}
var ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back")
func (tx *Tx) close() {
if tx.done {
panic("double close") // internal error
}
tx.done = true
tx.db.putConn(tx.ci, nil)
tx.ci = nil
tx.txi = nil
}
func (tx *Tx) grabConn() (driver.Conn, error) {
if tx.done {
return nil, ErrTxDone
}
tx.cimu.Lock()
return tx.ci, nil
}
func (tx *Tx) releaseConn() {
tx.cimu.Unlock()
}
// Commit commits the transaction.
func (tx *Tx) Commit() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
return tx.txi.Commit()
}
// Rollback aborts the transaction.
func (tx *Tx) Rollback() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
return tx.txi.Rollback()
}
// Prepare creates a prepared statement for use within a transaction.
//
// The returned statement operates within the transaction and can no longer
// be used once the transaction has been committed or rolled back.
//
// To use an existing prepared statement on this transaction, see Tx.Stmt.
func (tx *Tx) Prepare(query string) (*Stmt, error) {
// TODO(bradfitz): We could be more efficient here and either
// provide a method to take an existing Stmt (created on
// perhaps a different Conn), and re-create it on this Conn if
// necessary. Or, better: keep a map in DB of query string to
// Stmts, and have Stmt.Execute do the right thing and
// re-prepare if the Conn in use doesn't have that prepared
// statement. But we'll want to avoid caching the statement
// in the case where we only call conn.Prepare implicitly
// (such as in db.Exec or tx.Exec), but the caller package
// can't be holding a reference to the returned statement.
// Perhaps just looking at the reference count (by noting
// Stmt.Close) would be enough. We might also want a finalizer
// on Stmt to drop the reference count.
ci, err := tx.grabConn()
if err != nil {
return nil, err
}
defer tx.releaseConn()
si, err := ci.Prepare(query)
if err != nil {
return nil, err
}
stmt := &Stmt{
db: tx.db,
tx: tx,
txsi: si,
query: query,
}
return stmt, nil
}
// Stmt returns a transaction-specific prepared statement from
// an existing statement.
//
// Example:
// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
// ...
// tx, err := db.Begin()
// ...
// res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
// TODO(bradfitz): optimize this. Currently this re-prepares
// each time. This is fine for now to illustrate the API but
// we should really cache already-prepared statements
// per-Conn. See also the big comment in Tx.Prepare.
if tx.db != stmt.db {
return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")}
}
ci, err := tx.grabConn()
if err != nil {
return &Stmt{stickyErr: err}
}
defer tx.releaseConn()
si, err := ci.Prepare(stmt.query)
return &Stmt{
db: tx.db,
tx: tx,
txsi: si,
query: stmt.query,
stickyErr: err,
}
}
// Exec executes a query that doesn't return rows.
// For example: an INSERT and UPDATE.
func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
ci, err := tx.grabConn()
if err != nil {
return nil, err
}
defer tx.releaseConn()
if execer, ok := ci.(driver.Execer); ok {
dargs, err := driverArgs(nil, args)
if err != nil {
return nil, err
}
resi, err := execer.Exec(query, dargs)
if err == nil {
return result{resi}, nil
}
if err != driver.ErrSkip {
return nil, err
}
}
sti, err := ci.Prepare(query)
if err != nil {
return nil, err
}
defer sti.Close()
dargs, err := driverArgs(sti, args)
if err != nil {
return nil, err
}
resi, err := sti.Exec(dargs)
if err != nil {
return nil, err
}
return result{resi}, nil
}
// Query executes a query that returns rows, typically a SELECT.
func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
if tx.done {
return nil, ErrTxDone
}
stmt, err := tx.Prepare(query)
if err != nil {
return nil, err
}
rows, err := stmt.Query(args...)
if err != nil {
stmt.Close()
return nil, err
}
rows.closeStmt = stmt
return rows, err
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (tx *Tx) QueryRow(query string, args ...interface{}) *Row {
rows, err := tx.Query(query, args...)
return &Row{rows: rows, err: err}
}
// connStmt is a prepared statement on a particular connection.
type connStmt struct {
ci driver.Conn
si driver.Stmt
}
// Stmt is a prepared statement. Stmt is safe for concurrent use by multiple goroutines.
type Stmt struct {
// Immutable:
db *DB // where we came from
query string // that created the Stmt
stickyErr error // if non-nil, this error is returned for all operations
// If in a transaction, else both nil:
tx *Tx
txsi driver.Stmt
mu sync.Mutex // protects the rest of the fields
closed bool
// css is a list of underlying driver statement interfaces
// that are valid on particular connections. This is only
// used if tx == nil and one is found that has idle
// connections. If tx != nil, txsi is always used.
css []connStmt
}
// Exec executes a prepared statement with the given arguments and
// returns a Result summarizing the effect of the statement.
func (s *Stmt) Exec(args ...interface{}) (Result, error) {
_, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
defer releaseConn(nil)
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want := si.NumInput(); want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(args))
}
dargs, err := driverArgs(si, args)
if err != nil {
return nil, err
}
resi, err := si.Exec(dargs)
if err != nil {
return nil, err
}
return result{resi}, nil
}
// connStmt returns a free driver connection on which to execute the
// statement, a function to call to release the connection, and a
// statement bound to that connection.
func (s *Stmt) connStmt() (ci driver.Conn, releaseConn func(error), si driver.Stmt, err error) {
if err = s.stickyErr; err != nil {
return
}
s.mu.Lock()
if s.closed {
s.mu.Unlock()
err = errors.New("sql: statement is closed")
return
}
// In a transaction, we always use the connection that the
// transaction was created on.
if s.tx != nil {
s.mu.Unlock()
ci, err = s.tx.grabConn() // blocks, waiting for the connection.
if err != nil {
return
}
releaseConn = func(error) { s.tx.releaseConn() }
return ci, releaseConn, s.txsi, nil
}
var cs connStmt
match := false
for _, v := range s.css {
// TODO(bradfitz): lazily clean up entries in this
// list with dead conns while enumerating
if _, match = s.db.connIfFree(v.ci); match {
cs = v
break
}
}
s.mu.Unlock()
// Make a new conn if all are busy.
// TODO(bradfitz): or wait for one? make configurable later?
if !match {
for i := 0; ; i++ {
ci, err := s.db.conn()
if err != nil {
return nil, nil, nil, err
}
si, err := ci.Prepare(s.query)
if err == driver.ErrBadConn && i < 10 {
continue
}
if err != nil {
return nil, nil, nil, err
}
s.mu.Lock()
cs = connStmt{ci, si}
s.css = append(s.css, cs)
s.mu.Unlock()
break
}
}
conn := cs.ci
releaseConn = func(err error) { s.db.putConn(conn, err) }
return conn, releaseConn, cs.si, nil
}
// Query executes a prepared query statement with the given arguments
// and returns the query results as a *Rows.
func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
ci, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want := si.NumInput(); want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: statement expects %d inputs; got %d", si.NumInput(), len(args))
}
dargs, err := driverArgs(si, args)
if err != nil {
return nil, err
}
rowsi, err := si.Query(dargs)
if err != nil {
releaseConn(err)
return nil, err
}
// Note: ownership of ci passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
db: s.db,
ci: ci,
releaseConn: releaseConn,
rowsi: rowsi,
}
return rows, nil
}
// QueryRow executes a prepared query statement with the given arguments.
// If an error occurs during the execution of the statement, that error will
// be returned by a call to Scan on the returned *Row, which is always non-nil.
// If the query selects no rows, the *Row's Scan will return ErrNoRows.
// Otherwise, the *Row's Scan scans the first selected row and discards
// the rest.
//
// Example usage:
//
// var name string
// err := nameByUseridStmt.QueryRow(id).Scan(&name)
func (s *Stmt) QueryRow(args ...interface{}) *Row {
rows, err := s.Query(args...)
if err != nil {
return &Row{err: err}
}
return &Row{rows: rows}
}
// Close closes the statement.
func (s *Stmt) Close() error {
if s.stickyErr != nil {
return s.stickyErr
}
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return nil
}
s.closed = true
if s.tx != nil {
s.txsi.Close()
} else {
for _, v := range s.css {
if ci, match := s.db.connIfFree(v.ci); match {
v.si.Close()
s.db.putConn(ci, nil)
} else {
// TODO(bradfitz): care that we can't close
// this statement because the statement's
// connection is in use?
}
}
}
return nil
}
// Rows is the result of a query. Its cursor starts before the first row
// of the result set. Use Next to advance through the rows:
//
// rows, err := db.Query("SELECT ...")
// ...
// for rows.Next() {
// var id int
// var name string
// err = rows.Scan(&id, &name)
// ...
// }
// err = rows.Err() // get any error encountered during iteration
// ...
type Rows struct {
db *DB
ci driver.Conn // owned; must call putconn when closed to release
releaseConn func(error)
rowsi driver.Rows
closed bool
lastcols []driver.Value
lasterr error
closeStmt *Stmt // if non-nil, statement to Close on close
}
// Next prepares the next result row for reading with the Scan method.
// It returns true on success, false if there is no next result row.
// Every call to Scan, even the first one, must be preceded by a call
// to Next.
func (rs *Rows) Next() bool {
if rs.closed {
return false
}
if rs.lasterr != nil {
return false
}
if rs.lastcols == nil {
rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns()))
}
rs.lasterr = rs.rowsi.Next(rs.lastcols)
if rs.lasterr == io.EOF {
rs.Close()
}
return rs.lasterr == nil
}
// Err returns the error, if any, that was encountered during iteration.
func (rs *Rows) Err() error {
if rs.lasterr == io.EOF {
return nil
}
return rs.lasterr
}
// Columns returns the column names.
// Columns returns an error if the rows are closed, or if the rows
// are from QueryRow and there was a deferred error.
func (rs *Rows) Columns() ([]string, error) {
if rs.closed {
return nil, errors.New("sql: Rows are closed")
}
if rs.rowsi == nil {
return nil, errors.New("sql: no Rows available")
}
return rs.rowsi.Columns(), nil
}
// Scan copies the columns in the current row into the values pointed
// at by dest.
//
// If an argument has type *[]byte, Scan saves in that argument a copy
// of the corresponding data. The copy is owned by the caller and can
// be modified and held indefinitely. The copy can be avoided by using
// an argument of type *RawBytes instead; see the documentation for
// RawBytes for restrictions on its use.
//
// If an argument has type *interface{}, Scan copies the value
// provided by the underlying driver without conversion. If the value
// is of type []byte, a copy is made and the caller owns the result.
func (rs *Rows) Scan(dest ...interface{}) error {
if rs.closed {
return errors.New("sql: Rows closed")
}
if rs.lasterr != nil {
return rs.lasterr
}
if rs.lastcols == nil {
return errors.New("sql: Scan called without calling Next")
}
if len(dest) != len(rs.lastcols) {
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
}
for i, sv := range rs.lastcols {
err := convertAssign(dest[i], sv)
if err != nil {
return fmt.Errorf("sql: Scan error on column index %d: %v", i, err)
}
}
for _, dp := range dest {
b, ok := dp.(*[]byte)
if !ok {
continue
}
if *b == nil {
// If the []byte is now nil (for a NULL value),
// don't fall through to below which would
// turn it into a non-nil 0-length byte slice
continue
}
if _, ok = dp.(*RawBytes); ok {
continue
}
clone := make([]byte, len(*b))
copy(clone, *b)
*b = clone
}
return nil
}
// Close closes the Rows, preventing further enumeration. If the
// end is encountered, the Rows are closed automatically. Close
// is idempotent.
func (rs *Rows) Close() error {
if rs.closed {
return nil
}
rs.closed = true
err := rs.rowsi.Close()
rs.releaseConn(err)
if rs.closeStmt != nil {
rs.closeStmt.Close()
}
return err
}
// Row is the result of calling QueryRow to select a single row.
type Row struct {
// One of these two will be non-nil:
err error // deferred error for easy chaining
rows *Rows
}
// Scan copies the columns from the matched row into the values
// pointed at by dest. If more than one row matches the query,
// Scan uses the first row and discards the rest. If no row matches
// the query, Scan returns ErrNoRows.
func (r *Row) Scan(dest ...interface{}) error {
if r.err != nil {
return r.err
}
// TODO(bradfitz): for now we need to defensively clone all
// []byte that the driver returned (not permitting
// *RawBytes in Rows.Scan), since we're about to close
// the Rows in our defer, when we return from this function.
// the contract with the driver.Next(...) interface is that it
// can return slices into read-only temporary memory that's
// only valid until the next Scan/Close. But the TODO is that
// for a lot of drivers, this copy will be unnecessary. We
// should provide an optional interface for drivers to
// implement to say, "don't worry, the []bytes that I return
// from Next will not be modified again." (for instance, if
// they were obtained from the network anyway) But for now we
// don't care.
for _, dp := range dest {
if _, ok := dp.(*RawBytes); ok {
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
}
}
defer r.rows.Close()
if !r.rows.Next() {
return ErrNoRows
}
err := r.rows.Scan(dest...)
if err != nil {
return err
}
return nil
}
// A Result summarizes an executed SQL command.
type Result interface {
LastInsertId() (int64, error)
RowsAffected() (int64, error)
}
type result struct {
driver.Result
}
database/sql: check NumInput on Stmt.Exec
Fixes issue 3678.
R=golang-dev, bradfitz
CC=golang-dev
https://codereview.appspot.com/6460087
Committer: Brad Fitzpatrick <ae9783c0b0efc69cd85ab025ddd17aa44cdc4aa5@golang.org>
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sql provides a generic interface around SQL (or SQL-like)
// databases.
package sql
import (
"database/sql/driver"
"errors"
"fmt"
"io"
"sync"
)
var drivers = make(map[string]driver.Driver)
// Register makes a database driver available by the provided name.
// If Register is called twice with the same name or if driver is nil,
// it panics.
func Register(name string, driver driver.Driver) {
if driver == nil {
panic("sql: Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("sql: Register called twice for driver " + name)
}
drivers[name] = driver
}
// RawBytes is a byte slice that holds a reference to memory owned by
// the database itself. After a Scan into a RawBytes, the slice is only
// valid until the next call to Next, Scan, or Close.
type RawBytes []byte
// NullString represents a string that may be null.
// NullString implements the Scanner interface so
// it can be used as a scan destination:
//
// var s NullString
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
// ...
// if s.Valid {
// // use s.String
// } else {
// // NULL value
// }
//
type NullString struct {
String string
Valid bool // Valid is true if String is not NULL
}
// Scan implements the Scanner interface.
func (ns *NullString) Scan(value interface{}) error {
if value == nil {
ns.String, ns.Valid = "", false
return nil
}
ns.Valid = true
return convertAssign(&ns.String, value)
}
// Value implements the driver Valuer interface.
func (ns NullString) Value() (driver.Value, error) {
if !ns.Valid {
return nil, nil
}
return ns.String, nil
}
// NullInt64 represents an int64 that may be null.
// NullInt64 implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL
}
// Scan implements the Scanner interface.
func (n *NullInt64) Scan(value interface{}) error {
if value == nil {
n.Int64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Int64, value)
}
// Value implements the driver Valuer interface.
func (n NullInt64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Int64, nil
}
// NullFloat64 represents a float64 that may be null.
// NullFloat64 implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL
}
// Scan implements the Scanner interface.
func (n *NullFloat64) Scan(value interface{}) error {
if value == nil {
n.Float64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Float64, value)
}
// Value implements the driver Valuer interface.
func (n NullFloat64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Float64, nil
}
// NullBool represents a bool that may be null.
// NullBool implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL
}
// Scan implements the Scanner interface.
func (n *NullBool) Scan(value interface{}) error {
if value == nil {
n.Bool, n.Valid = false, false
return nil
}
n.Valid = true
return convertAssign(&n.Bool, value)
}
// Value implements the driver Valuer interface.
func (n NullBool) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Bool, nil
}
// Scanner is an interface used by Scan.
type Scanner interface {
// Scan assigns a value from a database driver.
//
// The src value will be of one of the following restricted
// set of types:
//
// int64
// float64
// bool
// []byte
// string
// time.Time
// nil - for NULL values
//
// An error should be returned if the value can not be stored
// without loss of information.
Scan(src interface{}) error
}
// ErrNoRows is returned by Scan when QueryRow doesn't return a
// row. In such a case, QueryRow returns a placeholder *Row value that
// defers this error until a Scan.
var ErrNoRows = errors.New("sql: no rows in result set")
// DB is a database handle. It's safe for concurrent use by multiple
// goroutines.
//
// If the underlying database driver has the concept of a connection
// and per-connection session state, the sql package manages creating
// and freeing connections automatically, including maintaining a free
// pool of idle connections. If observing session state is required,
// either do not share a *DB between multiple concurrent goroutines or
// create and observe all state only within a transaction. Once
// DB.Open is called, the returned Tx is bound to a single isolated
// connection. Once Tx.Commit or Tx.Rollback is called, that
// connection is returned to DB's idle connection pool.
type DB struct {
driver driver.Driver
dsn string
mu sync.Mutex // protects freeConn and closed
freeConn []driver.Conn
closed bool
}
// Open opens a database specified by its database driver name and a
// driver-specific data source name, usually consisting of at least a
// database name and connection information.
//
// Most users will open a database via a driver-specific connection
// helper function that returns a *DB.
func Open(driverName, dataSourceName string) (*DB, error) {
driver, ok := drivers[driverName]
if !ok {
return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
}
return &DB{driver: driver, dsn: dataSourceName}, nil
}
// Close closes the database, releasing any open resources.
func (db *DB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
var err error
for _, c := range db.freeConn {
err1 := c.Close()
if err1 != nil {
err = err1
}
}
db.freeConn = nil
db.closed = true
return err
}
func (db *DB) maxIdleConns() int {
const defaultMaxIdleConns = 2
// TODO(bradfitz): ask driver, if supported, for its default preference
// TODO(bradfitz): let users override?
return defaultMaxIdleConns
}
// conn returns a newly-opened or cached driver.Conn
func (db *DB) conn() (driver.Conn, error) {
db.mu.Lock()
if db.closed {
db.mu.Unlock()
return nil, errors.New("sql: database is closed")
}
if n := len(db.freeConn); n > 0 {
conn := db.freeConn[n-1]
db.freeConn = db.freeConn[:n-1]
db.mu.Unlock()
return conn, nil
}
db.mu.Unlock()
return db.driver.Open(db.dsn)
}
func (db *DB) connIfFree(wanted driver.Conn) (conn driver.Conn, ok bool) {
db.mu.Lock()
defer db.mu.Unlock()
for i, conn := range db.freeConn {
if conn != wanted {
continue
}
db.freeConn[i] = db.freeConn[len(db.freeConn)-1]
db.freeConn = db.freeConn[:len(db.freeConn)-1]
return wanted, true
}
return nil, false
}
// putConnHook is a hook for testing.
var putConnHook func(*DB, driver.Conn)
// putConn adds a connection to the db's free pool.
// err is optionally the last error that occurred on this connection.
func (db *DB) putConn(c driver.Conn, err error) {
if err == driver.ErrBadConn {
// Don't reuse bad connections.
return
}
db.mu.Lock()
if putConnHook != nil {
putConnHook(db, c)
}
if n := len(db.freeConn); !db.closed && n < db.maxIdleConns() {
db.freeConn = append(db.freeConn, c)
db.mu.Unlock()
return
}
// TODO: check to see if we need this Conn for any prepared
// statements which are still active?
db.mu.Unlock()
c.Close()
}
// Prepare creates a prepared statement for later execution.
func (db *DB) Prepare(query string) (*Stmt, error) {
var stmt *Stmt
var err error
for i := 0; i < 10; i++ {
stmt, err = db.prepare(query)
if err != driver.ErrBadConn {
break
}
}
return stmt, err
}
func (db *DB) prepare(query string) (stmt *Stmt, err error) {
// TODO: check if db.driver supports an optional
// driver.Preparer interface and call that instead, if so,
// otherwise we make a prepared statement that's bound
// to a connection, and to execute this prepared statement
// we either need to use this connection (if it's free), else
// get a new connection + re-prepare + execute on that one.
ci, err := db.conn()
if err != nil {
return nil, err
}
defer func() {
db.putConn(ci, err)
}()
si, err := ci.Prepare(query)
if err != nil {
return nil, err
}
stmt = &Stmt{
db: db,
query: query,
css: []connStmt{{ci, si}},
}
return stmt, nil
}
// Exec executes a query without returning any rows.
func (db *DB) Exec(query string, args ...interface{}) (Result, error) {
var res Result
var err error
for i := 0; i < 10; i++ {
res, err = db.exec(query, args)
if err != driver.ErrBadConn {
break
}
}
return res, err
}
func (db *DB) exec(query string, args []interface{}) (res Result, err error) {
ci, err := db.conn()
if err != nil {
return nil, err
}
defer func() {
db.putConn(ci, err)
}()
if execer, ok := ci.(driver.Execer); ok {
dargs, err := driverArgs(nil, args)
if err != nil {
return nil, err
}
resi, err := execer.Exec(query, dargs)
if err != driver.ErrSkip {
if err != nil {
return nil, err
}
return result{resi}, nil
}
}
sti, err := ci.Prepare(query)
if err != nil {
return nil, err
}
defer sti.Close()
return resultFromStatement(sti, args...)
}
// Query executes a query that returns rows, typically a SELECT.
func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
stmt, err := db.Prepare(query)
if err != nil {
return nil, err
}
rows, err := stmt.Query(args...)
if err != nil {
stmt.Close()
return nil, err
}
rows.closeStmt = stmt
return rows, nil
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (db *DB) QueryRow(query string, args ...interface{}) *Row {
rows, err := db.Query(query, args...)
return &Row{rows: rows, err: err}
}
// Begin starts a transaction. The isolation level is dependent on
// the driver.
func (db *DB) Begin() (*Tx, error) {
var tx *Tx
var err error
for i := 0; i < 10; i++ {
tx, err = db.begin()
if err != driver.ErrBadConn {
break
}
}
return tx, err
}
func (db *DB) begin() (tx *Tx, err error) {
ci, err := db.conn()
if err != nil {
return nil, err
}
txi, err := ci.Begin()
if err != nil {
db.putConn(ci, err)
return nil, err
}
return &Tx{
db: db,
ci: ci,
txi: txi,
}, nil
}
// Driver returns the database's underlying driver.
func (db *DB) Driver() driver.Driver {
return db.driver
}
// Tx is an in-progress database transaction.
//
// A transaction must end with a call to Commit or Rollback.
//
// After a call to Commit or Rollback, all operations on the
// transaction fail with ErrTxDone.
type Tx struct {
db *DB
// ci is owned exclusively until Commit or Rollback, at which point
// it's returned with putConn.
ci driver.Conn
txi driver.Tx
// cimu is held while somebody is using ci (between grabConn
// and releaseConn)
cimu sync.Mutex
// done transitions from false to true exactly once, on Commit
// or Rollback. once done, all operations fail with
// ErrTxDone.
done bool
}
var ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back")
func (tx *Tx) close() {
if tx.done {
panic("double close") // internal error
}
tx.done = true
tx.db.putConn(tx.ci, nil)
tx.ci = nil
tx.txi = nil
}
func (tx *Tx) grabConn() (driver.Conn, error) {
if tx.done {
return nil, ErrTxDone
}
tx.cimu.Lock()
return tx.ci, nil
}
func (tx *Tx) releaseConn() {
tx.cimu.Unlock()
}
// Commit commits the transaction.
func (tx *Tx) Commit() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
return tx.txi.Commit()
}
// Rollback aborts the transaction.
func (tx *Tx) Rollback() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
return tx.txi.Rollback()
}
// Prepare creates a prepared statement for use within a transaction.
//
// The returned statement operates within the transaction and can no longer
// be used once the transaction has been committed or rolled back.
//
// To use an existing prepared statement on this transaction, see Tx.Stmt.
func (tx *Tx) Prepare(query string) (*Stmt, error) {
// TODO(bradfitz): We could be more efficient here and either
// provide a method to take an existing Stmt (created on
// perhaps a different Conn), and re-create it on this Conn if
// necessary. Or, better: keep a map in DB of query string to
// Stmts, and have Stmt.Execute do the right thing and
// re-prepare if the Conn in use doesn't have that prepared
// statement. But we'll want to avoid caching the statement
// in the case where we only call conn.Prepare implicitly
// (such as in db.Exec or tx.Exec), but the caller package
// can't be holding a reference to the returned statement.
// Perhaps just looking at the reference count (by noting
// Stmt.Close) would be enough. We might also want a finalizer
// on Stmt to drop the reference count.
ci, err := tx.grabConn()
if err != nil {
return nil, err
}
defer tx.releaseConn()
si, err := ci.Prepare(query)
if err != nil {
return nil, err
}
stmt := &Stmt{
db: tx.db,
tx: tx,
txsi: si,
query: query,
}
return stmt, nil
}
// Stmt returns a transaction-specific prepared statement from
// an existing statement.
//
// Example:
// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
// ...
// tx, err := db.Begin()
// ...
// res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
// TODO(bradfitz): optimize this. Currently this re-prepares
// each time. This is fine for now to illustrate the API but
// we should really cache already-prepared statements
// per-Conn. See also the big comment in Tx.Prepare.
if tx.db != stmt.db {
return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")}
}
ci, err := tx.grabConn()
if err != nil {
return &Stmt{stickyErr: err}
}
defer tx.releaseConn()
si, err := ci.Prepare(stmt.query)
return &Stmt{
db: tx.db,
tx: tx,
txsi: si,
query: stmt.query,
stickyErr: err,
}
}
// Exec executes a query that doesn't return rows.
// For example: an INSERT and UPDATE.
func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
ci, err := tx.grabConn()
if err != nil {
return nil, err
}
defer tx.releaseConn()
if execer, ok := ci.(driver.Execer); ok {
dargs, err := driverArgs(nil, args)
if err != nil {
return nil, err
}
resi, err := execer.Exec(query, dargs)
if err == nil {
return result{resi}, nil
}
if err != driver.ErrSkip {
return nil, err
}
}
sti, err := ci.Prepare(query)
if err != nil {
return nil, err
}
defer sti.Close()
return resultFromStatement(sti, args...)
}
// Query executes a query that returns rows, typically a SELECT.
func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
if tx.done {
return nil, ErrTxDone
}
stmt, err := tx.Prepare(query)
if err != nil {
return nil, err
}
rows, err := stmt.Query(args...)
if err != nil {
stmt.Close()
return nil, err
}
rows.closeStmt = stmt
return rows, err
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (tx *Tx) QueryRow(query string, args ...interface{}) *Row {
rows, err := tx.Query(query, args...)
return &Row{rows: rows, err: err}
}
// connStmt is a prepared statement on a particular connection.
type connStmt struct {
ci driver.Conn
si driver.Stmt
}
// Stmt is a prepared statement. Stmt is safe for concurrent use by multiple goroutines.
type Stmt struct {
// Immutable:
db *DB // where we came from
query string // that created the Stmt
stickyErr error // if non-nil, this error is returned for all operations
// If in a transaction, else both nil:
tx *Tx
txsi driver.Stmt
mu sync.Mutex // protects the rest of the fields
closed bool
// css is a list of underlying driver statement interfaces
// that are valid on particular connections. This is only
// used if tx == nil and one is found that has idle
// connections. If tx != nil, txsi is always used.
css []connStmt
}
// Exec executes a prepared statement with the given arguments and
// returns a Result summarizing the effect of the statement.
func (s *Stmt) Exec(args ...interface{}) (Result, error) {
_, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
defer releaseConn(nil)
return resultFromStatement(si, args...)
}
func resultFromStatement(si driver.Stmt, args ...interface{}) (Result, error) {
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want := si.NumInput(); want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(args))
}
dargs, err := driverArgs(si, args)
if err != nil {
return nil, err
}
resi, err := si.Exec(dargs)
if err != nil {
return nil, err
}
return result{resi}, nil
}
// connStmt returns a free driver connection on which to execute the
// statement, a function to call to release the connection, and a
// statement bound to that connection.
func (s *Stmt) connStmt() (ci driver.Conn, releaseConn func(error), si driver.Stmt, err error) {
if err = s.stickyErr; err != nil {
return
}
s.mu.Lock()
if s.closed {
s.mu.Unlock()
err = errors.New("sql: statement is closed")
return
}
// In a transaction, we always use the connection that the
// transaction was created on.
if s.tx != nil {
s.mu.Unlock()
ci, err = s.tx.grabConn() // blocks, waiting for the connection.
if err != nil {
return
}
releaseConn = func(error) { s.tx.releaseConn() }
return ci, releaseConn, s.txsi, nil
}
var cs connStmt
match := false
for _, v := range s.css {
// TODO(bradfitz): lazily clean up entries in this
// list with dead conns while enumerating
if _, match = s.db.connIfFree(v.ci); match {
cs = v
break
}
}
s.mu.Unlock()
// Make a new conn if all are busy.
// TODO(bradfitz): or wait for one? make configurable later?
if !match {
for i := 0; ; i++ {
ci, err := s.db.conn()
if err != nil {
return nil, nil, nil, err
}
si, err := ci.Prepare(s.query)
if err == driver.ErrBadConn && i < 10 {
continue
}
if err != nil {
return nil, nil, nil, err
}
s.mu.Lock()
cs = connStmt{ci, si}
s.css = append(s.css, cs)
s.mu.Unlock()
break
}
}
conn := cs.ci
releaseConn = func(err error) { s.db.putConn(conn, err) }
return conn, releaseConn, cs.si, nil
}
// Query executes a prepared query statement with the given arguments
// and returns the query results as a *Rows.
func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
ci, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want := si.NumInput(); want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: statement expects %d inputs; got %d", si.NumInput(), len(args))
}
dargs, err := driverArgs(si, args)
if err != nil {
return nil, err
}
rowsi, err := si.Query(dargs)
if err != nil {
releaseConn(err)
return nil, err
}
// Note: ownership of ci passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
db: s.db,
ci: ci,
releaseConn: releaseConn,
rowsi: rowsi,
}
return rows, nil
}
// QueryRow executes a prepared query statement with the given arguments.
// If an error occurs during the execution of the statement, that error will
// be returned by a call to Scan on the returned *Row, which is always non-nil.
// If the query selects no rows, the *Row's Scan will return ErrNoRows.
// Otherwise, the *Row's Scan scans the first selected row and discards
// the rest.
//
// Example usage:
//
// var name string
// err := nameByUseridStmt.QueryRow(id).Scan(&name)
func (s *Stmt) QueryRow(args ...interface{}) *Row {
rows, err := s.Query(args...)
if err != nil {
return &Row{err: err}
}
return &Row{rows: rows}
}
// Close closes the statement.
func (s *Stmt) Close() error {
if s.stickyErr != nil {
return s.stickyErr
}
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return nil
}
s.closed = true
if s.tx != nil {
s.txsi.Close()
} else {
for _, v := range s.css {
if ci, match := s.db.connIfFree(v.ci); match {
v.si.Close()
s.db.putConn(ci, nil)
} else {
// TODO(bradfitz): care that we can't close
// this statement because the statement's
// connection is in use?
}
}
}
return nil
}
// Rows is the result of a query. Its cursor starts before the first row
// of the result set. Use Next to advance through the rows:
//
// rows, err := db.Query("SELECT ...")
// ...
// for rows.Next() {
// var id int
// var name string
// err = rows.Scan(&id, &name)
// ...
// }
// err = rows.Err() // get any error encountered during iteration
// ...
type Rows struct {
db *DB
ci driver.Conn // owned; must call putconn when closed to release
releaseConn func(error)
rowsi driver.Rows
closed bool
lastcols []driver.Value
lasterr error
closeStmt *Stmt // if non-nil, statement to Close on close
}
// Next prepares the next result row for reading with the Scan method.
// It returns true on success, false if there is no next result row.
// Every call to Scan, even the first one, must be preceded by a call
// to Next.
func (rs *Rows) Next() bool {
if rs.closed {
return false
}
if rs.lasterr != nil {
return false
}
if rs.lastcols == nil {
rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns()))
}
rs.lasterr = rs.rowsi.Next(rs.lastcols)
if rs.lasterr == io.EOF {
rs.Close()
}
return rs.lasterr == nil
}
// Err returns the error, if any, that was encountered during iteration.
func (rs *Rows) Err() error {
if rs.lasterr == io.EOF {
return nil
}
return rs.lasterr
}
// Columns returns the column names.
// Columns returns an error if the rows are closed, or if the rows
// are from QueryRow and there was a deferred error.
func (rs *Rows) Columns() ([]string, error) {
if rs.closed {
return nil, errors.New("sql: Rows are closed")
}
if rs.rowsi == nil {
return nil, errors.New("sql: no Rows available")
}
return rs.rowsi.Columns(), nil
}
// Scan copies the columns in the current row into the values pointed
// at by dest.
//
// If an argument has type *[]byte, Scan saves in that argument a copy
// of the corresponding data. The copy is owned by the caller and can
// be modified and held indefinitely. The copy can be avoided by using
// an argument of type *RawBytes instead; see the documentation for
// RawBytes for restrictions on its use.
//
// If an argument has type *interface{}, Scan copies the value
// provided by the underlying driver without conversion. If the value
// is of type []byte, a copy is made and the caller owns the result.
func (rs *Rows) Scan(dest ...interface{}) error {
if rs.closed {
return errors.New("sql: Rows closed")
}
if rs.lasterr != nil {
return rs.lasterr
}
if rs.lastcols == nil {
return errors.New("sql: Scan called without calling Next")
}
if len(dest) != len(rs.lastcols) {
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
}
for i, sv := range rs.lastcols {
err := convertAssign(dest[i], sv)
if err != nil {
return fmt.Errorf("sql: Scan error on column index %d: %v", i, err)
}
}
for _, dp := range dest {
b, ok := dp.(*[]byte)
if !ok {
continue
}
if *b == nil {
// If the []byte is now nil (for a NULL value),
// don't fall through to below which would
// turn it into a non-nil 0-length byte slice
continue
}
if _, ok = dp.(*RawBytes); ok {
continue
}
clone := make([]byte, len(*b))
copy(clone, *b)
*b = clone
}
return nil
}
// Close closes the Rows, preventing further enumeration. If the
// end is encountered, the Rows are closed automatically. Close
// is idempotent.
func (rs *Rows) Close() error {
if rs.closed {
return nil
}
rs.closed = true
err := rs.rowsi.Close()
rs.releaseConn(err)
if rs.closeStmt != nil {
rs.closeStmt.Close()
}
return err
}
// Row is the result of calling QueryRow to select a single row.
type Row struct {
// One of these two will be non-nil:
err error // deferred error for easy chaining
rows *Rows
}
// Scan copies the columns from the matched row into the values
// pointed at by dest. If more than one row matches the query,
// Scan uses the first row and discards the rest. If no row matches
// the query, Scan returns ErrNoRows.
func (r *Row) Scan(dest ...interface{}) error {
if r.err != nil {
return r.err
}
// TODO(bradfitz): for now we need to defensively clone all
// []byte that the driver returned (not permitting
// *RawBytes in Rows.Scan), since we're about to close
// the Rows in our defer, when we return from this function.
// the contract with the driver.Next(...) interface is that it
// can return slices into read-only temporary memory that's
// only valid until the next Scan/Close. But the TODO is that
// for a lot of drivers, this copy will be unnecessary. We
// should provide an optional interface for drivers to
// implement to say, "don't worry, the []bytes that I return
// from Next will not be modified again." (for instance, if
// they were obtained from the network anyway) But for now we
// don't care.
for _, dp := range dest {
if _, ok := dp.(*RawBytes); ok {
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
}
}
defer r.rows.Close()
if !r.rows.Next() {
return ErrNoRows
}
err := r.rows.Scan(dest...)
if err != nil {
return err
}
return nil
}
// A Result summarizes an executed SQL command.
type Result interface {
LastInsertId() (int64, error)
RowsAffected() (int64, error)
}
type result struct {
driver.Result
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A parser for Go source text. The input is a stream of lexical tokens
// provided via the Scanner interface. The output is an abstract syntax
// tree (AST) representing the Go source. The parser is invoked by calling
// Parse.
//
package parser
import (
"bytes";
"container/vector";
"fmt";
"go/ast";
"go/scanner";
"go/token";
"io";
"os";
"strings";
)
// Names to index the parser's commentIndex array.
const (
leading = iota; // index of the leading comments entry
trailing; // index of the trailing comments entry
)
// Initial value for parser.commentsIndex.
var noIndex = [2]int{-1, -1};
// The parser structure holds the parser's internal state.
type parser struct {
scanner.ErrorVector;
scanner scanner.Scanner;
// Tracing/debugging
mode uint; // parsing mode
trace bool; // == (mode & Trace != 0)
indent uint; // indentation used for tracing output
// Comments
comments vector.Vector; // list of collected, unassociated comment groups
commentsIndex [2]int; // comments indexes of last leading/trailing comment group; or -1
// Next token
pos token.Position; // token position
tok token.Token; // one token look-ahead
lit []byte; // token literal
// Non-syntactic parser control
optSemi bool; // true if semicolon separator is optional in statement list
exprLev int; // < 0: in control clause, >= 0: in expression
};
// noPos is used when there is no corresponding source position for a token
var noPos token.Position;
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...) {
const dots =
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ";
const n = uint(len(dots));
fmt.Printf("%5d:%3d: ", p.pos.Line, p.pos.Column);
i := 2*p.indent;
for ; i > n; i -= n {
fmt.Print(dots);
}
fmt.Print(dots[0 : i]);
fmt.Println(a);
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(");
p.indent++;
return p;
}
func un/*trace*/(p *parser) {
p.indent--;
p.printTrace(")");
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (p.pos.Line == 0) is not initialized (it
// is token.ILLEGAL), so don't print it .
if p.trace && p.pos.Line > 0 {
s := p.tok.String();
switch {
case p.tok.IsLiteral():
p.printTrace(s, string(p.lit));
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"");
default:
p.printTrace(s);
}
}
p.pos, p.tok, p.lit = p.scanner.Scan();
p.optSemi = false;
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.pos.Line;
if p.lit[1] == '*' {
for _, b := range p.lit {
if b == '\n' {
endline++;
}
}
}
comment = &ast.Comment{p.pos, p.lit};
p.next0();
return comment, endline;
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return the line of which the last comment
// in the group ends. An empty line or non-comment token terminates
// a comment group.
//
func (p *parser) consumeCommentGroup() int {
list := vector.New(0);
endline := p.pos.Line;
for p.tok == token.COMMENT && endline+1 >= p.pos.Line {
var comment *ast.Comment;
comment, endline = p.consumeComment();
list.Push(comment);
}
// convert list
group := make([]*ast.Comment, list.Len());
for i := 0; i < list.Len(); i++ {
group[i] = list.At(i).(*ast.Comment);
}
p.comments.Push(&ast.CommentGroup{group, endline});
return endline;
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last leading
// and trailing comments.
//
// A leading comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A trailing comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Leading and trailing comments may be considered documentation
// that is stored in the AST. In that case they are removed from
// the parser's list of unassociated comments (via getComment).
//
func (p *parser) next() {
p.commentsIndex = noIndex;
line := p.pos.Line; // current line
p.next0();
if p.tok == token.COMMENT {
if p.pos.Line == line {
// The comment is on same line as previous token; it
// cannot be a leading comment but may be a trailing
// comment.
endline := p.consumeCommentGroup();
if p.pos.Line != endline {
// The next token is on a different line, thus
// the last comment group is a trailing comment.
p.commentsIndex[trailing] = p.comments.Len() - 1;
}
}
// consume successor comments, if any
endline := -1;
for p.tok == token.COMMENT {
endline = p.consumeCommentGroup();
}
if endline >= 0 && endline+1 == p.pos.Line {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a leading comment.
p.commentsIndex[leading] = p.comments.Len() - 1;
}
}
}
// Get leading/trailing comment group, if any.
func (p *parser) getComment(kind int) *ast.CommentGroup {
i := p.commentsIndex[kind];
if i >= 0 {
// get comment and remove if from the list of unassociated comment groups
c := p.comments.At(i).(*ast.CommentGroup);
p.comments.Set(i, nil); // clear entry
p.commentsIndex[kind] = -1; // comment was consumed
return c;
}
return nil;
}
func (p *parser) errorExpected(pos token.Position, msg string) {
msg = "expected " + msg;
if pos.Offset == p.pos.Offset {
// the error happened at the current position;
// make the error message more specific
msg += ", found '" + p.tok.String() + "'";
if p.tok.IsLiteral() {
msg += " " + string(p.lit);
}
}
p.Error(pos, msg);
}
func (p *parser) expect(tok token.Token) token.Position {
pos := p.pos;
if p.tok != tok {
p.errorExpected(pos, "'" + tok.String() + "'");
}
p.next(); // make progress in any case
return pos;
}
// ----------------------------------------------------------------------------
// Common productions
func (p *parser) tryType() ast.Expr
func (p *parser) parseStringList(x *ast.StringLit) []*ast.StringLit
func (p *parser) parseExpression() ast.Expr
func (p *parser) parseStatement() ast.Stmt
func (p *parser) parseDeclaration(getSemi bool) (decl ast.Decl, gotSemi bool)
func (p *parser) parseIdent() *ast.Ident {
if p.tok == token.IDENT {
x := &ast.Ident{p.pos, string(p.lit)};
p.next();
return x;
}
p.expect(token.IDENT); // use expect() error handling
return &ast.Ident{p.pos, ""};
}
func (p *parser) parseIdentList(x ast.Expr) []*ast.Ident {
if p.trace {
defer un(trace(p, "IdentList"));
}
list := vector.New(0);
if x == nil {
x = p.parseIdent();
}
list.Push(x);
for p.tok == token.COMMA {
p.next();
list.Push(p.parseIdent());
}
// convert vector
idents := make([]*ast.Ident, list.Len());
for i := 0; i < list.Len(); i++ {
idents[i] = list.At(i).(*ast.Ident);
}
return idents;
}
func (p *parser) parseExpressionList() []ast.Expr {
if p.trace {
defer un(trace(p, "ExpressionList"));
}
list := vector.New(0);
list.Push(p.parseExpression());
for p.tok == token.COMMA {
p.next();
list.Push(p.parseExpression());
}
// convert list
exprs := make([]ast.Expr, list.Len());
for i := 0; i < list.Len(); i++ {
exprs[i] = list.At(i).(ast.Expr);
}
return exprs;
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"));
}
typ := p.tryType();
if typ == nil {
p.errorExpected(p.pos, "type");
p.next(); // make progress
return &ast.BadExpr{p.pos};
}
return typ;
}
func (p *parser) parseQualifiedIdent() ast.Expr {
if p.trace {
defer un(trace(p, "QualifiedIdent"));
}
var x ast.Expr = p.parseIdent();
if p.tok == token.PERIOD {
// first identifier is a package identifier
p.next();
sel := p.parseIdent();
x = &ast.SelectorExpr{x, sel};
}
return x;
}
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"));
}
return p.parseQualifiedIdent();
}
func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"));
}
lbrack := p.expect(token.LBRACK);
var len ast.Expr;
if ellipsisOk && p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{p.pos};
p.next();
} else if p.tok != token.RBRACK {
len = p.parseExpression();
}
p.expect(token.RBRACK);
elt := p.parseType();
return &ast.ArrayType{lbrack, len, elt};
}
func (p *parser) makeIdentList(list *vector.Vector) []*ast.Ident {
idents := make([]*ast.Ident, list.Len());
for i := 0; i < list.Len(); i++ {
ident, isIdent := list.At(i).(*ast.Ident);
if !isIdent {
pos := list.At(i).(ast.Expr).Pos();
p.errorExpected(pos, "identifier");
idents[i] = &ast.Ident{pos, ""};
}
idents[i] = ident;
}
return idents;
}
func (p *parser) parseFieldDecl() *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"));
}
doc := p.getComment(leading);
// a list of identifiers looks like a list of type names
list := vector.New(0);
for {
// TODO do not allow ()'s here
list.Push(p.parseType());
if p.tok == token.COMMA {
p.next();
} else {
break;
}
}
// if we had a list of identifiers, it must be followed by a type
typ := p.tryType();
// optional tag
var tag []*ast.StringLit;
if p.tok == token.STRING {
tag = p.parseStringList(nil);
}
// analyze case
var idents []*ast.Ident;
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(list);
} else {
// Type (anonymous field)
if list.Len() == 1 {
// TODO check that this looks like a type
typ = list.At(0).(ast.Expr);
} else {
p.errorExpected(p.pos, "anonymous field");
typ = &ast.BadExpr{p.pos};
}
}
return &ast.Field{doc, idents, typ, tag, nil};
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"));
}
pos := p.expect(token.STRUCT);
var lbrace, rbrace token.Position;
var fields []*ast.Field;
if p.tok == token.LBRACE {
lbrace = p.pos;
p.next();
list := vector.New(0);
for p.tok != token.RBRACE && p.tok != token.EOF {
f := p.parseFieldDecl();
list.Push(f);
if p.tok == token.SEMICOLON {
p.next();
f.Comment = p.getComment(trailing);
} else {
f.Comment = p.getComment(trailing);
break;
}
}
rbrace = p.expect(token.RBRACE);
p.optSemi = true;
// convert vector
fields = make([]*ast.Field, list.Len());
for i := list.Len() - 1; i >= 0; i-- {
fields[i] = list.At(i).(*ast.Field);
}
}
return &ast.StructType{pos, lbrace, fields, rbrace};
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"));
}
star := p.expect(token.MUL);
base := p.parseType();
return &ast.StarExpr{star, base};
}
func (p *parser) tryParameterType(ellipsisOk bool) ast.Expr {
if ellipsisOk && p.tok == token.ELLIPSIS {
pos := p.pos;
p.next();
if p.tok != token.RPAREN {
// "..." always must be at the very end of a parameter list
p.Error(pos, "expected type, found '...'");
}
return &ast.Ellipsis{pos};
}
return p.tryType();
}
func (p *parser) parseParameterType(ellipsisOk bool) ast.Expr {
typ := p.tryParameterType(ellipsisOk);
if typ == nil {
p.errorExpected(p.pos, "type");
p.next(); // make progress
typ = &ast.BadExpr{p.pos};
}
return typ;
}
func (p *parser) parseParameterDecl(ellipsisOk bool) (*vector.Vector, ast.Expr) {
if p.trace {
defer un(trace(p, "ParameterDecl"));
}
// a list of identifiers looks like a list of type names
list := vector.New(0);
for {
// TODO do not allow ()'s here
list.Push(p.parseParameterType(ellipsisOk));
if p.tok == token.COMMA {
p.next();
} else {
break;
}
}
// if we had a list of identifiers, it must be followed by a type
typ := p.tryParameterType(ellipsisOk);
return list, typ;
}
func (p *parser) parseParameterList(ellipsisOk bool) []*ast.Field {
if p.trace {
defer un(trace(p, "ParameterList"));
}
list, typ := p.parseParameterDecl(ellipsisOk);
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list);
list.Init(0);
list.Push(&ast.Field{nil, idents, typ, nil, nil});
for p.tok == token.COMMA {
p.next();
idents := p.parseIdentList(nil);
typ := p.parseParameterType(ellipsisOk);
list.Push(&ast.Field{nil, idents, typ, nil, nil});
}
} else {
// Type { "," Type } (anonymous parameters)
// convert list of types into list of *Param
for i := 0; i < list.Len(); i++ {
list.Set(i, &ast.Field{Type: list.At(i).(ast.Expr)});
}
}
// convert list
params := make([]*ast.Field, list.Len());
for i := 0; i < list.Len(); i++ {
params[i] = list.At(i).(*ast.Field);
}
return params;
}
func (p *parser) parseParameters(ellipsisOk bool) []*ast.Field {
if p.trace {
defer un(trace(p, "Parameters"));
}
var params []*ast.Field;
p.expect(token.LPAREN);
if p.tok != token.RPAREN {
params = p.parseParameterList(ellipsisOk);
}
p.expect(token.RPAREN);
return params;
}
func (p *parser) parseResult() []*ast.Field {
if p.trace {
defer un(trace(p, "Result"));
}
var results []*ast.Field;
if p.tok == token.LPAREN {
results = p.parseParameters(false);
} else if p.tok != token.FUNC {
typ := p.tryType();
if typ != nil {
results = make([]*ast.Field, 1);
results[0] = &ast.Field{Type: typ};
}
}
return results;
}
func (p *parser) parseSignature() (params []*ast.Field, results []*ast.Field) {
if p.trace {
defer un(trace(p, "Signature"));
}
params = p.parseParameters(true);
results = p.parseResult();
return params, results;
}
func (p *parser) parseFuncType() *ast.FuncType {
if p.trace {
defer un(trace(p, "FuncType"));
}
pos := p.expect(token.FUNC);
params, results := p.parseSignature();
return &ast.FuncType{pos, params, results};
}
func (p *parser) parseMethodSpec() *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"));
}
doc := p.getComment(leading);
var idents []*ast.Ident;
var typ ast.Expr;
x := p.parseQualifiedIdent();
if tmp, isIdent := x.(*ast.Ident); isIdent && (p.tok == token.COMMA || p.tok == token.LPAREN) {
// methods
idents = p.parseIdentList(x);
params, results := p.parseSignature();
typ = &ast.FuncType{noPos, params, results};
} else {
// embedded interface
typ = x;
}
return &ast.Field{doc, idents, typ, nil, nil};
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"));
}
pos := p.expect(token.INTERFACE);
var lbrace, rbrace token.Position;
var methods []*ast.Field;
if p.tok == token.LBRACE {
lbrace = p.pos;
p.next();
list := vector.New(0);
for p.tok == token.IDENT {
list.Push(p.parseMethodSpec());
if p.tok != token.RBRACE {
p.expect(token.SEMICOLON);
}
}
rbrace = p.expect(token.RBRACE);
p.optSemi = true;
// convert vector
methods = make([]*ast.Field, list.Len());
for i := list.Len() - 1; i >= 0; i-- {
methods[i] = list.At(i).(*ast.Field);
}
}
return &ast.InterfaceType{pos, lbrace, methods, rbrace};
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"));
}
pos := p.expect(token.MAP);
p.expect(token.LBRACK);
key := p.parseType();
p.expect(token.RBRACK);
value := p.parseType();
return &ast.MapType{pos, key, value};
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"));
}
pos := p.pos;
dir := ast.SEND | ast.RECV;
if p.tok == token.CHAN {
p.next();
if p.tok == token.ARROW {
p.next();
dir = ast.SEND;
}
} else {
p.expect(token.ARROW);
p.expect(token.CHAN);
dir = ast.RECV;
}
value := p.parseType();
return &ast.ChanType{pos, dir, value};
}
func (p *parser) tryRawType(ellipsisOk bool) ast.Expr {
switch p.tok {
case token.IDENT: return p.parseTypeName();
case token.LBRACK: return p.parseArrayType(ellipsisOk);
case token.STRUCT: return p.parseStructType();
case token.MUL: return p.parsePointerType();
case token.FUNC: return p.parseFuncType();
case token.INTERFACE: return p.parseInterfaceType();
case token.MAP: return p.parseMapType();
case token.CHAN, token.ARROW: return p.parseChanType();
case token.LPAREN:
lparen := p.pos;
p.next();
typ := p.parseType();
rparen := p.expect(token.RPAREN);
return &ast.ParenExpr{lparen, typ, rparen};
}
// no type found
return nil;
}
func (p *parser) tryType() ast.Expr {
return p.tryRawType(false);
}
// ----------------------------------------------------------------------------
// Blocks
func makeStmtList(list *vector.Vector) []ast.Stmt {
stats := make([]ast.Stmt, list.Len());
for i := 0; i < list.Len(); i++ {
stats[i] = list.At(i).(ast.Stmt);
}
return stats;
}
func (p *parser) parseStatementList() []ast.Stmt {
if p.trace {
defer un(trace(p, "StatementList"));
}
list := vector.New(0);
expectSemi := false;
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
if expectSemi {
p.expect(token.SEMICOLON);
expectSemi = false;
}
list.Push(p.parseStatement());
if p.tok == token.SEMICOLON {
p.next();
} else if p.optSemi {
p.optSemi = false; // "consume" optional semicolon
} else {
expectSemi = true;
}
}
return makeStmtList(list);
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"));
}
lbrace := p.expect(token.LBRACE);
list := p.parseStatementList();
rbrace := p.expect(token.RBRACE);
p.optSemi = true;
return &ast.BlockStmt{lbrace, list, rbrace};
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseStringList(x *ast.StringLit) []*ast.StringLit {
if p.trace {
defer un(trace(p, "StringList"));
}
list := vector.New(0);
if x != nil {
list.Push(x);
}
for p.tok == token.STRING {
list.Push(&ast.StringLit{p.pos, p.lit});
p.next();
}
// convert list
strings := make([]*ast.StringLit, list.Len());
for i := 0; i < list.Len(); i++ {
strings[i] = list.At(i).(*ast.StringLit);
}
return strings;
}
func (p *parser) parseFuncLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncLit"));
}
typ := p.parseFuncType();
p.exprLev++;
body := p.parseBlockStmt();
p.optSemi = false; // function body requires separating ";"
p.exprLev--;
return &ast.FuncLit{typ, body};
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
//
func (p *parser) parseOperand() ast.Expr {
if p.trace {
defer un(trace(p, "Operand"));
}
switch p.tok {
case token.IDENT:
return p.parseIdent();
case token.INT:
x := &ast.IntLit{p.pos, p.lit};
p.next();
return x;
case token.FLOAT:
x := &ast.FloatLit{p.pos, p.lit};
p.next();
return x;
case token.CHAR:
x := &ast.CharLit{p.pos, p.lit};
p.next();
return x;
case token.STRING:
x := &ast.StringLit{p.pos, p.lit};
p.next();
if p.tok == token.STRING {
return &ast.StringList{p.parseStringList(x)};
}
return x;
case token.LPAREN:
lparen := p.pos;
p.next();
p.exprLev++;
x := p.parseExpression();
p.exprLev--;
rparen := p.expect(token.RPAREN);
return &ast.ParenExpr{lparen, x, rparen};
case token.FUNC:
return p.parseFuncLit();
default:
t := p.tryRawType(true); // could be type for composite literal
if t != nil {
return t;
}
}
p.errorExpected(p.pos, "operand");
p.next(); // make progress
return &ast.BadExpr{p.pos};
}
func (p *parser) parseSelectorOrTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "SelectorOrTypeAssertion"));
}
p.expect(token.PERIOD);
if p.tok == token.IDENT {
// selector
sel := p.parseIdent();
return &ast.SelectorExpr{x, sel};
}
// type assertion
p.expect(token.LPAREN);
var typ ast.Expr;
if p.tok == token.TYPE {
// special case for type switch
typ = &ast.Ident{p.pos, "type"};
p.next();
} else {
typ = p.parseType();
}
p.expect(token.RPAREN);
return &ast.TypeAssertExpr{x, typ};
}
func (p *parser) parseIndex(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Index"));
}
p.expect(token.LBRACK);
p.exprLev++;
begin := p.parseExpression();
var end ast.Expr;
if p.tok == token.COLON {
p.next();
end = p.parseExpression();
}
p.exprLev--;
p.expect(token.RBRACK);
return &ast.IndexExpr{x, begin, end};
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"));
}
lparen := p.expect(token.LPAREN);
var args []ast.Expr;
if p.tok != token.RPAREN {
args = p.parseExpressionList();
}
rparen := p.expect(token.RPAREN);
return &ast.CallExpr{fun, lparen, args, rparen};
}
func (p *parser) parseElement() ast.Expr {
if p.trace {
defer un(trace(p, "Element"));
}
x := p.parseExpression();
if p.tok == token.COLON {
colon := p.pos;
p.next();
x = &ast.KeyValueExpr{x, colon, p.parseExpression()};
}
return x;
}
func (p *parser) parseElementList() []ast.Expr {
if p.trace {
defer un(trace(p, "ElementList"));
}
list := vector.New(0);
for p.tok != token.RBRACE && p.tok != token.EOF {
list.Push(p.parseElement());
if p.tok == token.COMMA {
p.next();
} else {
break;
}
}
// convert list
elts := make([]ast.Expr, list.Len());
for i := 0; i < list.Len(); i++ {
elts[i] = list.At(i).(ast.Expr);
}
return elts;
}
func (p *parser) parseCompositeLit(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "CompositeLit"));
}
lbrace := p.expect(token.LBRACE);
var elts []ast.Expr;
if p.tok != token.RBRACE {
elts = p.parseElementList();
}
rbrace := p.expect(token.RBRACE);
return &ast.CompositeLit{typ, lbrace, elts, rbrace};
}
// TODO Consider different approach to checking syntax after parsing:
// Provide a arguments (set of flags) to parsing functions
// restricting what they are supposed to accept depending
// on context.
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
// TODO should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.IntLit:
case *ast.FloatLit:
case *ast.CharLit:
case *ast.StringLit:
case *ast.StringList:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.TypeAssertExpr:
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
if t.Op == token.RANGE {
// the range operator is only allowed at the top of a for statement
p.errorExpected(x.Pos(), "expression");
x = &ast.BadExpr{x.Pos()};
}
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression");
x = &ast.BadExpr{x.Pos()};
}
return x;
}
// isTypeName returns true iff x is type name.
func isTypeName(x ast.Expr) bool {
// TODO should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.ParenExpr: return isTypeName(t.X); // TODO should (TypeName) be illegal?
case *ast.SelectorExpr: return isTypeName(t.X);
default: return false; // all other nodes are not type names
}
return true;
}
// isCompositeLitType returns true iff x is a legal composite literal type.
func isCompositeLitType(x ast.Expr) bool {
// TODO should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.ParenExpr: return isCompositeLitType(t.X);
case *ast.SelectorExpr: return isTypeName(t.X);
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default: return false; // all other nodes are not legal composite literal types
}
return true;
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
// TODO should provide predicate in AST nodes
switch t := x.(type) {
case *ast.UnaryExpr:
if t.Op == token.RANGE {
// the range operator is only allowed at the top of a for statement
p.errorExpected(x.Pos(), "expression");
x = &ast.BadExpr{x.Pos()};
}
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.Error(len.Pos(), "expected array length, found '...'");
x = &ast.BadExpr{x.Pos()};
}
}
// all other nodes are expressions or types
return x;
}
func (p *parser) parsePrimaryExpr() ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"));
}
x := p.parseOperand();
L: for {
switch p.tok {
case token.PERIOD: x = p.parseSelectorOrTypeAssertion(p.checkExpr(x));
case token.LBRACK: x = p.parseIndex(p.checkExpr(x));
case token.LPAREN: x = p.parseCallOrConversion(p.checkExprOrType(x));
case token.LBRACE:
if isCompositeLitType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
x = p.parseCompositeLit(x);
} else {
break L;
}
default:
break L;
}
}
return p.checkExprOrType(x);
}
func (p *parser) parseUnaryExpr() ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"));
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.ARROW, token.AND, token.RANGE:
pos, op := p.pos, p.tok;
p.next();
x := p.parseUnaryExpr();
return &ast.UnaryExpr{pos, op, p.checkExpr(x)};
case token.MUL:
// unary "*" expression or pointer type
pos := p.pos;
p.next();
x := p.parseUnaryExpr();
return &ast.StarExpr{pos, p.checkExprOrType(x)};
}
return p.parsePrimaryExpr();
}
func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"));
}
x := p.parseUnaryExpr();
for prec := p.tok.Precedence(); prec >= prec1; prec-- {
for p.tok.Precedence() == prec {
pos, op := p.pos, p.tok;
p.next();
y := p.parseBinaryExpr(prec + 1);
x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)};
}
}
return x;
}
func (p *parser) parseExpression() ast.Expr {
if p.trace {
defer un(trace(p, "Expression"));
}
return p.parseBinaryExpr(token.LowestPrec + 1);
}
// ----------------------------------------------------------------------------
// Statements
func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
if p.trace {
defer un(trace(p, "SimpleStmt"));
}
x := p.parseExpressionList();
switch p.tok {
case token.COLON:
// labeled statement
p.next();
if labelOk && len(x) == 1 {
if label, isIdent := x[0].(*ast.Ident); isIdent {
return &ast.LabeledStmt{label, p.parseStatement()};
}
}
p.Error(x[0].Pos(), "illegal label declaration");
return &ast.BadStmt{x[0].Pos()};
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement
pos, tok := p.pos, p.tok;
p.next();
y := p.parseExpressionList();
if len(x) > 1 && len(y) > 1 && len(x) != len(y) {
p.Error(x[0].Pos(), "arity of lhs doesn't match rhs");
}
return &ast.AssignStmt{x, pos, tok, y};
}
if len(x) > 1 {
p.Error(x[0].Pos(), "only one expression allowed");
// continue with first expression
}
if p.tok == token.INC || p.tok == token.DEC {
// increment or decrement
s := &ast.IncDecStmt{x[0], p.tok};
p.next(); // consume "++" or "--"
return s;
}
// expression
return &ast.ExprStmt{x[0]};
}
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseExpression();
if call, isCall := x.(*ast.CallExpr); isCall {
return call;
}
p.errorExpected(x.Pos(), "function/method call");
return nil;
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"));
}
pos := p.expect(token.GO);
call := p.parseCallExpr();
if call != nil {
return &ast.GoStmt{pos, call};
}
return &ast.BadStmt{pos};
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"));
}
pos := p.expect(token.DEFER);
call := p.parseCallExpr();
if call != nil {
return &ast.DeferStmt{pos, call};
}
return &ast.BadStmt{pos};
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"));
}
pos := p.pos;
p.expect(token.RETURN);
var x []ast.Expr;
if p.tok != token.SEMICOLON && p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE {
x = p.parseExpressionList();
}
return &ast.ReturnStmt{pos, x};
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"));
}
s := &ast.BranchStmt{p.pos, tok, nil};
p.expect(tok);
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
s.Label = p.parseIdent();
}
return s;
}
func (p *parser) isExpr(s ast.Stmt) bool {
if s == nil {
return true;
}
dummy, isExpr := s.(*ast.ExprStmt);
return isExpr;
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil;
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X);
}
p.Error(s.Pos(), "expected condition, found simple statement");
return &ast.BadExpr{s.Pos()};
}
func (p *parser) parseControlClause(isForStmt bool) (s1, s2, s3 ast.Stmt) {
if p.tok != token.LBRACE {
prevLev := p.exprLev;
p.exprLev = -1;
if p.tok != token.SEMICOLON {
s1 = p.parseSimpleStmt(false);
}
if p.tok == token.SEMICOLON {
p.next();
if p.tok != token.LBRACE && p.tok != token.SEMICOLON {
s2 = p.parseSimpleStmt(false);
}
if isForStmt {
// for statements have a 3rd section
p.expect(token.SEMICOLON);
if p.tok != token.LBRACE {
s3 = p.parseSimpleStmt(false);
}
}
} else {
s1, s2 = nil, s1;
}
p.exprLev = prevLev;
}
return s1, s2, s3;
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"));
}
pos := p.expect(token.IF);
s1, s2, dummy := p.parseControlClause(false);
body := p.parseBlockStmt();
var else_ ast.Stmt;
if p.tok == token.ELSE {
p.next();
else_ = p.parseStatement();
}
return &ast.IfStmt{pos, s1, p.makeExpr(s2), body, else_};
}
func (p *parser) parseCaseClause() *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"));
}
// SwitchCase
pos := p.pos;
var x []ast.Expr;
if p.tok == token.CASE {
p.next();
x = p.parseExpressionList();
} else {
p.expect(token.DEFAULT);
}
colon := p.expect(token.COLON);
body := p.parseStatementList();
return &ast.CaseClause{pos, x, colon, body};
}
func (p *parser) parseTypeCaseClause() *ast.TypeCaseClause {
if p.trace {
defer un(trace(p, "TypeCaseClause"));
}
// TypeSwitchCase
pos := p.pos;
var typ ast.Expr;
if p.tok == token.CASE {
p.next();
typ = p.parseType();
} else {
p.expect(token.DEFAULT);
}
colon := p.expect(token.COLON);
body := p.parseStatementList();
return &ast.TypeCaseClause{pos, typ, colon, body};
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"));
}
pos := p.expect(token.SWITCH);
s1, s2, dummy := p.parseControlClause(false);
if p.isExpr(s2) {
// expression switch
lbrace := p.expect(token.LBRACE);
cases := vector.New(0);
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseCaseClause());
}
rbrace := p.expect(token.RBRACE);
p.optSemi = true;
body := &ast.BlockStmt{lbrace, makeStmtList(cases), rbrace};
return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body};
}
// type switch
// TODO do all the checks!
lbrace := p.expect(token.LBRACE);
cases := vector.New(0);
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseTypeCaseClause());
}
rbrace := p.expect(token.RBRACE);
p.optSemi = true;
body := &ast.BlockStmt{lbrace, makeStmtList(cases), rbrace};
return &ast.TypeSwitchStmt{pos, s1, s2, body};
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"));
}
// CommCase
pos := p.pos;
var tok token.Token;
var lhs, rhs ast.Expr;
if p.tok == token.CASE {
p.next();
if p.tok == token.ARROW {
// RecvExpr without assignment
rhs = p.parseExpression();
} else {
// SendExpr or RecvExpr
rhs = p.parseExpression();
if p.tok == token.ASSIGN || p.tok == token.DEFINE {
// RecvExpr with assignment
tok = p.tok;
p.next();
lhs = rhs;
if p.tok == token.ARROW {
rhs = p.parseExpression();
} else {
p.expect(token.ARROW); // use expect() error handling
}
}
// else SendExpr
}
} else {
p.expect(token.DEFAULT);
}
colon := p.expect(token.COLON);
body := p.parseStatementList();
return &ast.CommClause{pos, tok, lhs, rhs, colon, body};
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"));
}
pos := p.expect(token.SELECT);
lbrace := p.expect(token.LBRACE);
cases := vector.New(0);
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseCommClause());
}
rbrace := p.expect(token.RBRACE);
p.optSemi = true;
body := &ast.BlockStmt{lbrace, makeStmtList(cases), rbrace};
return &ast.SelectStmt{pos, body};
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"));
}
pos := p.expect(token.FOR);
s1, s2, s3 := p.parseControlClause(true);
body := p.parseBlockStmt();
if as, isAssign := s2.(*ast.AssignStmt); isAssign {
// possibly a for statement with a range clause; check assignment operator
if as.Tok != token.ASSIGN && as.Tok != token.DEFINE {
p.errorExpected(as.TokPos, "'=' or ':='");
return &ast.BadStmt{pos};
}
// check lhs
var key, value ast.Expr;
switch len(as.Lhs) {
case 2:
value = as.Lhs[1];
fallthrough;
case 1:
key = as.Lhs[0];
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions");
return &ast.BadStmt{pos};
}
// check rhs
if len(as.Rhs) != 1 {
p.errorExpected(as.Rhs[0].Pos(), "1 expressions");
return &ast.BadStmt{pos};
}
if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
// rhs is range expression; check lhs
return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
} else {
p.errorExpected(s2.Pos(), "range clause");
return &ast.BadStmt{pos};
}
} else {
// regular for statement
return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body};
}
panic(); // unreachable
return nil;
}
func (p *parser) parseStatement() ast.Stmt {
if p.trace {
defer un(trace(p, "Statement"));
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
decl, _ := p.parseDeclaration(false); // do not consume trailing semicolon
return &ast.DeclStmt{decl};
case
// tokens that may start a top-level expression
token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand
token.LBRACK, token.STRUCT, // composite type
token.MUL, token.AND, token.ARROW: // unary operators
return p.parseSimpleStmt(true);
case token.GO:
return p.parseGoStmt();
case token.DEFER:
return p.parseDeferStmt();
case token.RETURN:
return p.parseReturnStmt();
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
return p.parseBranchStmt(p.tok);
case token.LBRACE:
return p.parseBlockStmt();
case token.IF:
return p.parseIfStmt();
case token.SWITCH:
return p.parseSwitchStmt();
case token.SELECT:
return p.parseSelectStmt();
case token.FOR:
return p.parseForStmt();
case token.SEMICOLON, token.RBRACE:
// don't consume the ";", it is the separator following the empty statement
return &ast.EmptyStmt{p.pos};
}
// no statement found
p.errorExpected(p.pos, "statement");
p.next(); // make progress
return &ast.BadStmt{p.pos};
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool)
// Consume semicolon if there is one and getSemi is set, and get any trailing comment.
// Return the comment if any and indicate if a semicolon was consumed.
//
func (p *parser) parseComment(getSemi bool) (comment *ast.CommentGroup, gotSemi bool) {
if getSemi && p.tok == token.SEMICOLON {
p.next();
gotSemi = true;
}
return p.getComment(trailing), gotSemi;
}
func parseImportSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) {
if p.trace {
defer un(trace(p, "ImportSpec"));
}
var ident *ast.Ident;
if p.tok == token.PERIOD {
ident = &ast.Ident{p.pos, "."};
p.next();
} else if p.tok == token.IDENT {
ident = p.parseIdent();
}
var path []*ast.StringLit;
if p.tok == token.STRING {
path = p.parseStringList(nil);
} else {
p.expect(token.STRING); // use expect() error handling
}
comment, gotSemi := p.parseComment(getSemi);
return &ast.ImportSpec{doc, ident, path, comment}, gotSemi;
}
func parseConstSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) {
if p.trace {
defer un(trace(p, "ConstSpec"));
}
idents := p.parseIdentList(nil);
typ := p.tryType();
var values []ast.Expr;
if typ != nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN);
values = p.parseExpressionList();
}
comment, gotSemi := p.parseComment(getSemi);
return &ast.ValueSpec{doc, idents, typ, values, comment}, gotSemi;
}
func parseTypeSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) {
if p.trace {
defer un(trace(p, "TypeSpec"));
}
ident := p.parseIdent();
typ := p.parseType();
comment, gotSemi := p.parseComment(getSemi);
return &ast.TypeSpec{doc, ident, typ, comment}, gotSemi;
}
func parseVarSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) {
if p.trace {
defer un(trace(p, "VarSpec"));
}
idents := p.parseIdentList(nil);
typ := p.tryType();
var values []ast.Expr;
if typ == nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN);
values = p.parseExpressionList();
}
comment, gotSemi := p.parseComment(getSemi);
return &ast.ValueSpec{doc, idents, typ, values, comment}, gotSemi;
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction, getSemi bool) (decl *ast.GenDecl, gotSemi bool) {
if p.trace {
defer un(trace(p, keyword.String() + "Decl"));
}
doc := p.getComment(leading);
pos := p.expect(keyword);
var lparen, rparen token.Position;
list := vector.New(0);
if p.tok == token.LPAREN {
lparen = p.pos;
p.next();
for p.tok != token.RPAREN && p.tok != token.EOF {
doc := p.getComment(leading);
spec, semi := f(p, doc, true); // consume semicolon if any
list.Push(spec);
if !semi {
break;
}
}
rparen = p.expect(token.RPAREN);
if getSemi && p.tok == token.SEMICOLON {
p.next();
gotSemi = true;
} else {
p.optSemi = true;
}
} else {
spec, semi := f(p, nil, getSemi);
list.Push(spec);
gotSemi = semi;
}
// convert vector
specs := make([]ast.Spec, list.Len());
for i := 0; i < list.Len(); i++ {
specs[i] = list.At(i);
}
return &ast.GenDecl{doc, pos, keyword, lparen, specs, rparen}, gotSemi;
}
func (p *parser) parseReceiver() *ast.Field {
if p.trace {
defer un(trace(p, "Receiver"));
}
pos := p.pos;
par := p.parseParameters(false);
// must have exactly one receiver
if len(par) != 1 || len(par) == 1 && len(par[0].Names) > 1 {
p.errorExpected(pos, "exactly one receiver");
return &ast.Field{Type: &ast.BadExpr{noPos}};
}
recv := par[0];
// recv type must be TypeName or *TypeName
base := recv.Type;
if ptr, isPtr := base.(*ast.StarExpr); isPtr {
base = ptr.X;
}
if !isTypeName(base) {
p.errorExpected(base.Pos(), "type name");
}
return recv;
}
func (p *parser) parseFunctionDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"));
}
doc := p.getComment(leading);
pos := p.expect(token.FUNC);
var recv *ast.Field;
if p.tok == token.LPAREN {
recv = p.parseReceiver();
}
ident := p.parseIdent();
params, results := p.parseSignature();
var body *ast.BlockStmt;
if p.tok == token.LBRACE {
body = p.parseBlockStmt();
}
return &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body};
}
func (p *parser) parseDeclaration(getSemi bool) (decl ast.Decl, gotSemi bool) {
if p.trace {
defer un(trace(p, "Declaration"));
}
var f parseSpecFunction;
switch p.tok {
case token.CONST:
f = parseConstSpec;
case token.TYPE:
f = parseTypeSpec;
case token.VAR:
f = parseVarSpec;
case token.FUNC:
decl = p.parseFunctionDecl();
// Do not use parseComment here to consume a semicolon
// because we don't want to remove a trailing comment
// from the list of unassociated comments.
if getSemi && p.tok == token.SEMICOLON {
p.next();
gotSemi = true;
}
return decl, gotSemi;
default:
pos := p.pos;
p.errorExpected(pos, "declaration");
decl = &ast.BadDecl{pos};
gotSemi = getSemi && p.tok == token.SEMICOLON;
p.next(); // make progress in any case
return decl, gotSemi;
}
decl, gotSemi = p.parseGenDecl(p.tok, f, getSemi); // TODO 6g/spec issue
return;
}
// ----------------------------------------------------------------------------
// Packages
// The mode parameter to the Parse function is a set of flags (or 0).
// They control the amount of source code parsed and other optional
// parser functionality.
//
const (
PackageClauseOnly uint = 1 << iota; // parsing stops after package clause
ImportsOnly; // parsing stops after import declarations
ParseComments; // parse comments and add them to AST
Trace; // print a trace of parsed productions
)
func (p *parser) parsePackage() *ast.Program {
if p.trace {
defer un(trace(p, "Program"));
}
// package clause
comment := p.getComment(leading);
pos := p.expect(token.PACKAGE);
ident := p.parseIdent();
var decls []ast.Decl;
// Don't bother parsing the rest if we had errors already.
// Likely not a Go source file at all.
if p.ErrorCount() == 0 && p.mode & PackageClauseOnly == 0 {
// import decls
list := vector.New(0);
for p.tok == token.IMPORT {
decl, _ := p.parseGenDecl(token.IMPORT, parseImportSpec, true); // consume optional semicolon
list.Push(decl);
}
if p.mode & ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
decl, _ := p.parseDeclaration(true); // consume optional semicolon
list.Push(decl);
}
}
// convert declaration list
decls = make([]ast.Decl, list.Len());
for i := 0; i < list.Len(); i++ {
decls[i] = list.At(i).(ast.Decl);
}
}
// convert comments list
// 1) determine number of remaining comments
n := 0;
for i := 0; i < p.comments.Len(); i++ {
if p.comments.At(i) != nil {
n++;
}
}
// 2) convert the remaining comments
comments := make([]*ast.CommentGroup, n);
for i, j := 0, 0; i < p.comments.Len(); i++ {
if p.comments.At(i) != nil {
comments[j] = p.comments.At(i).(*ast.CommentGroup);
j++;
}
}
return &ast.Program{comment, pos, ident, decls, comments};
}
// ----------------------------------------------------------------------------
// Parser entry points.
func readSource(src interface{}) ([]byte, os.Error) {
if src != nil {
switch s := src.(type) {
case string:
return strings.Bytes(s), nil;
case []byte:
return s, nil;
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Data(), nil;
}
case io.Reader:
var buf bytes.Buffer;
n, err := io.Copy(s, &buf);
if err != nil {
return nil, err;
}
return buf.Data(), nil;
}
}
return nil, os.ErrorString("invalid source");
}
// scannerMode returns the scanner mode bits given the parser's mode bits.
func scannerMode(mode uint) uint {
if mode & ParseComments != 0 {
return scanner.ScanComments;
}
return 0;
}
func (p *parser) init(filename string, src interface{}, mode uint) os.Error {
data, err := readSource(src);
if err != nil {
return err;
}
// initialize parser state
p.ErrorVector.Init();
p.scanner.Init(filename, data, p, scannerMode(mode));
p.mode = mode;
p.trace = mode & Trace != 0; // for convenience (p.trace is used frequently)
p.comments.Init(0);
p.commentsIndex = noIndex;
p.next();
return nil;
}
// Parse parses a Go program.
//
// The program source src may be provided in a variety of formats. At the
// moment the following types are supported: string, []byte, and io.Reader.
// The mode parameter controls the amount of source text parsed and other
// optional parser functionality.
//
// Parse returns a complete AST if no error occured. Otherwise, if the
// source couldn't be read, the returned program is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.BadX nodes
// representing the fragments of erroneous source code) and an ErrorList
// describing the syntax errors.
//
func Parse(filename string, src interface{}, mode uint) (*ast.Program, os.Error) {
var p parser;
if err := p.init(filename, src, mode); err != nil {
return nil, err;
}
prog := p.parsePackage();
return prog, p.GetError(scanner.NoMultiples);
}
// ParseStmts parses a list of Go statement.
func ParseStmts(filename string, src interface{}, mode uint) ([]ast.Stmt, os.Error) {
if mode & (PackageClauseOnly | ImportsOnly) != 0 {
return nil, nil;
}
var p parser;
if err := p.init(filename, src, mode); err != nil {
return nil, err;
}
stmts := p.parseStatementList();
return stmts, p.GetError(scanner.Sorted);
}
// ParseExpr parses a single Go expression.
func ParseExpr(filename string, src interface{}, mode uint) (ast.Expr, os.Error) {
if mode & (PackageClauseOnly | ImportsOnly) != 0 {
return nil, nil;
}
var p parser;
if err := p.init(filename, src, mode); err != nil {
return nil, err;
}
expr := p.parseExpression();
return expr, p.GetError(scanner.Sorted);
}
- interface and comments cleanup
R=rsc
DELTA=33 (1 added, 3 deleted, 29 changed)
OCL=31620
CL=31642
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A parser for Go source text. The input is a stream of lexical tokens
// provided via the Scanner interface. The output is an abstract syntax
// tree (AST) representing the Go source. The parser is invoked by calling
// Parse.
//
package parser
import (
"bytes";
"container/vector";
"fmt";
"go/ast";
"go/scanner";
"go/token";
"io";
"os";
"strings";
)
// Names to index the parser's commentIndex array.
const (
leading = iota; // index of the leading comments entry
trailing; // index of the trailing comments entry
)
// Initial value for parser.commentsIndex.
var noIndex = [2]int{-1, -1};
// The parser structure holds the parser's internal state.
type parser struct {
scanner.ErrorVector;
scanner scanner.Scanner;
// Tracing/debugging
mode uint; // parsing mode
trace bool; // == (mode & Trace != 0)
indent uint; // indentation used for tracing output
// Comments
comments vector.Vector; // list of collected, unassociated comment groups
commentsIndex [2]int; // comments indexes of last leading/trailing comment group; or -1
// Next token
pos token.Position; // token position
tok token.Token; // one token look-ahead
lit []byte; // token literal
// Non-syntactic parser control
optSemi bool; // true if semicolon separator is optional in statement list
exprLev int; // < 0: in control clause, >= 0: in expression
};
// noPos is used when there is no corresponding source position for a token
var noPos token.Position;
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...) {
const dots =
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ";
const n = uint(len(dots));
fmt.Printf("%5d:%3d: ", p.pos.Line, p.pos.Column);
i := 2*p.indent;
for ; i > n; i -= n {
fmt.Print(dots);
}
fmt.Print(dots[0 : i]);
fmt.Println(a);
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(");
p.indent++;
return p;
}
func un/*trace*/(p *parser) {
p.indent--;
p.printTrace(")");
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (p.pos.Line == 0) is not initialized (it
// is token.ILLEGAL), so don't print it .
if p.trace && p.pos.Line > 0 {
s := p.tok.String();
switch {
case p.tok.IsLiteral():
p.printTrace(s, string(p.lit));
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"");
default:
p.printTrace(s);
}
}
p.pos, p.tok, p.lit = p.scanner.Scan();
p.optSemi = false;
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.pos.Line;
if p.lit[1] == '*' {
for _, b := range p.lit {
if b == '\n' {
endline++;
}
}
}
comment = &ast.Comment{p.pos, p.lit};
p.next0();
return comment, endline;
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return the line of which the last comment
// in the group ends. An empty line or non-comment token terminates
// a comment group.
//
func (p *parser) consumeCommentGroup() int {
list := vector.New(0);
endline := p.pos.Line;
for p.tok == token.COMMENT && endline+1 >= p.pos.Line {
var comment *ast.Comment;
comment, endline = p.consumeComment();
list.Push(comment);
}
// convert list
group := make([]*ast.Comment, list.Len());
for i := 0; i < list.Len(); i++ {
group[i] = list.At(i).(*ast.Comment);
}
p.comments.Push(&ast.CommentGroup{group, endline});
return endline;
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last leading
// and trailing comments.
//
// A leading comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A trailing comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Leading and trailing comments may be considered documentation
// that is stored in the AST. In that case they are removed from
// the parser's list of unassociated comments (via getComment).
//
func (p *parser) next() {
p.commentsIndex = noIndex;
line := p.pos.Line; // current line
p.next0();
if p.tok == token.COMMENT {
if p.pos.Line == line {
// The comment is on same line as previous token; it
// cannot be a leading comment but may be a trailing
// comment.
endline := p.consumeCommentGroup();
if p.pos.Line != endline {
// The next token is on a different line, thus
// the last comment group is a trailing comment.
p.commentsIndex[trailing] = p.comments.Len() - 1;
}
}
// consume successor comments, if any
endline := -1;
for p.tok == token.COMMENT {
endline = p.consumeCommentGroup();
}
if endline >= 0 && endline+1 == p.pos.Line {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a leading comment.
p.commentsIndex[leading] = p.comments.Len() - 1;
}
}
}
// Get leading/trailing comment group, if any.
func (p *parser) getComment(kind int) *ast.CommentGroup {
i := p.commentsIndex[kind];
if i >= 0 {
// get comment and remove if from the list of unassociated comment groups
c := p.comments.At(i).(*ast.CommentGroup);
p.comments.Set(i, nil); // clear entry
p.commentsIndex[kind] = -1; // comment was consumed
return c;
}
return nil;
}
func (p *parser) errorExpected(pos token.Position, msg string) {
msg = "expected " + msg;
if pos.Offset == p.pos.Offset {
// the error happened at the current position;
// make the error message more specific
msg += ", found '" + p.tok.String() + "'";
if p.tok.IsLiteral() {
msg += " " + string(p.lit);
}
}
p.Error(pos, msg);
}
func (p *parser) expect(tok token.Token) token.Position {
pos := p.pos;
if p.tok != tok {
p.errorExpected(pos, "'" + tok.String() + "'");
}
p.next(); // make progress in any case
return pos;
}
// ----------------------------------------------------------------------------
// Common productions
func (p *parser) tryType() ast.Expr
func (p *parser) parseStringList(x *ast.StringLit) []*ast.StringLit
func (p *parser) parseExpression() ast.Expr
func (p *parser) parseStatement() ast.Stmt
func (p *parser) parseDeclaration(getSemi bool) (decl ast.Decl, gotSemi bool)
func (p *parser) parseIdent() *ast.Ident {
if p.tok == token.IDENT {
x := &ast.Ident{p.pos, string(p.lit)};
p.next();
return x;
}
p.expect(token.IDENT); // use expect() error handling
return &ast.Ident{p.pos, ""};
}
func (p *parser) parseIdentList(x ast.Expr) []*ast.Ident {
if p.trace {
defer un(trace(p, "IdentList"));
}
list := vector.New(0);
if x == nil {
x = p.parseIdent();
}
list.Push(x);
for p.tok == token.COMMA {
p.next();
list.Push(p.parseIdent());
}
// convert vector
idents := make([]*ast.Ident, list.Len());
for i := 0; i < list.Len(); i++ {
idents[i] = list.At(i).(*ast.Ident);
}
return idents;
}
func (p *parser) parseExpressionList() []ast.Expr {
if p.trace {
defer un(trace(p, "ExpressionList"));
}
list := vector.New(0);
list.Push(p.parseExpression());
for p.tok == token.COMMA {
p.next();
list.Push(p.parseExpression());
}
// convert list
exprs := make([]ast.Expr, list.Len());
for i := 0; i < list.Len(); i++ {
exprs[i] = list.At(i).(ast.Expr);
}
return exprs;
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"));
}
typ := p.tryType();
if typ == nil {
p.errorExpected(p.pos, "type");
p.next(); // make progress
return &ast.BadExpr{p.pos};
}
return typ;
}
func (p *parser) parseQualifiedIdent() ast.Expr {
if p.trace {
defer un(trace(p, "QualifiedIdent"));
}
var x ast.Expr = p.parseIdent();
if p.tok == token.PERIOD {
// first identifier is a package identifier
p.next();
sel := p.parseIdent();
x = &ast.SelectorExpr{x, sel};
}
return x;
}
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"));
}
return p.parseQualifiedIdent();
}
func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"));
}
lbrack := p.expect(token.LBRACK);
var len ast.Expr;
if ellipsisOk && p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{p.pos};
p.next();
} else if p.tok != token.RBRACK {
len = p.parseExpression();
}
p.expect(token.RBRACK);
elt := p.parseType();
return &ast.ArrayType{lbrack, len, elt};
}
func (p *parser) makeIdentList(list *vector.Vector) []*ast.Ident {
idents := make([]*ast.Ident, list.Len());
for i := 0; i < list.Len(); i++ {
ident, isIdent := list.At(i).(*ast.Ident);
if !isIdent {
pos := list.At(i).(ast.Expr).Pos();
p.errorExpected(pos, "identifier");
idents[i] = &ast.Ident{pos, ""};
}
idents[i] = ident;
}
return idents;
}
func (p *parser) parseFieldDecl() *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"));
}
doc := p.getComment(leading);
// a list of identifiers looks like a list of type names
list := vector.New(0);
for {
// TODO do not allow ()'s here
list.Push(p.parseType());
if p.tok == token.COMMA {
p.next();
} else {
break;
}
}
// if we had a list of identifiers, it must be followed by a type
typ := p.tryType();
// optional tag
var tag []*ast.StringLit;
if p.tok == token.STRING {
tag = p.parseStringList(nil);
}
// analyze case
var idents []*ast.Ident;
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(list);
} else {
// Type (anonymous field)
if list.Len() == 1 {
// TODO check that this looks like a type
typ = list.At(0).(ast.Expr);
} else {
p.errorExpected(p.pos, "anonymous field");
typ = &ast.BadExpr{p.pos};
}
}
return &ast.Field{doc, idents, typ, tag, nil};
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"));
}
pos := p.expect(token.STRUCT);
var lbrace, rbrace token.Position;
var fields []*ast.Field;
if p.tok == token.LBRACE {
lbrace = p.pos;
p.next();
list := vector.New(0);
for p.tok != token.RBRACE && p.tok != token.EOF {
f := p.parseFieldDecl();
list.Push(f);
if p.tok == token.SEMICOLON {
p.next();
f.Comment = p.getComment(trailing);
} else {
f.Comment = p.getComment(trailing);
break;
}
}
rbrace = p.expect(token.RBRACE);
p.optSemi = true;
// convert vector
fields = make([]*ast.Field, list.Len());
for i := list.Len() - 1; i >= 0; i-- {
fields[i] = list.At(i).(*ast.Field);
}
}
return &ast.StructType{pos, lbrace, fields, rbrace};
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"));
}
star := p.expect(token.MUL);
base := p.parseType();
return &ast.StarExpr{star, base};
}
func (p *parser) tryParameterType(ellipsisOk bool) ast.Expr {
if ellipsisOk && p.tok == token.ELLIPSIS {
pos := p.pos;
p.next();
if p.tok != token.RPAREN {
// "..." always must be at the very end of a parameter list
p.Error(pos, "expected type, found '...'");
}
return &ast.Ellipsis{pos};
}
return p.tryType();
}
func (p *parser) parseParameterType(ellipsisOk bool) ast.Expr {
typ := p.tryParameterType(ellipsisOk);
if typ == nil {
p.errorExpected(p.pos, "type");
p.next(); // make progress
typ = &ast.BadExpr{p.pos};
}
return typ;
}
func (p *parser) parseParameterDecl(ellipsisOk bool) (*vector.Vector, ast.Expr) {
if p.trace {
defer un(trace(p, "ParameterDecl"));
}
// a list of identifiers looks like a list of type names
list := vector.New(0);
for {
// TODO do not allow ()'s here
list.Push(p.parseParameterType(ellipsisOk));
if p.tok == token.COMMA {
p.next();
} else {
break;
}
}
// if we had a list of identifiers, it must be followed by a type
typ := p.tryParameterType(ellipsisOk);
return list, typ;
}
func (p *parser) parseParameterList(ellipsisOk bool) []*ast.Field {
if p.trace {
defer un(trace(p, "ParameterList"));
}
list, typ := p.parseParameterDecl(ellipsisOk);
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list);
list.Init(0);
list.Push(&ast.Field{nil, idents, typ, nil, nil});
for p.tok == token.COMMA {
p.next();
idents := p.parseIdentList(nil);
typ := p.parseParameterType(ellipsisOk);
list.Push(&ast.Field{nil, idents, typ, nil, nil});
}
} else {
// Type { "," Type } (anonymous parameters)
// convert list of types into list of *Param
for i := 0; i < list.Len(); i++ {
list.Set(i, &ast.Field{Type: list.At(i).(ast.Expr)});
}
}
// convert list
params := make([]*ast.Field, list.Len());
for i := 0; i < list.Len(); i++ {
params[i] = list.At(i).(*ast.Field);
}
return params;
}
func (p *parser) parseParameters(ellipsisOk bool) []*ast.Field {
if p.trace {
defer un(trace(p, "Parameters"));
}
var params []*ast.Field;
p.expect(token.LPAREN);
if p.tok != token.RPAREN {
params = p.parseParameterList(ellipsisOk);
}
p.expect(token.RPAREN);
return params;
}
func (p *parser) parseResult() []*ast.Field {
if p.trace {
defer un(trace(p, "Result"));
}
var results []*ast.Field;
if p.tok == token.LPAREN {
results = p.parseParameters(false);
} else if p.tok != token.FUNC {
typ := p.tryType();
if typ != nil {
results = make([]*ast.Field, 1);
results[0] = &ast.Field{Type: typ};
}
}
return results;
}
func (p *parser) parseSignature() (params []*ast.Field, results []*ast.Field) {
if p.trace {
defer un(trace(p, "Signature"));
}
params = p.parseParameters(true);
results = p.parseResult();
return params, results;
}
func (p *parser) parseFuncType() *ast.FuncType {
if p.trace {
defer un(trace(p, "FuncType"));
}
pos := p.expect(token.FUNC);
params, results := p.parseSignature();
return &ast.FuncType{pos, params, results};
}
func (p *parser) parseMethodSpec() *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"));
}
doc := p.getComment(leading);
var idents []*ast.Ident;
var typ ast.Expr;
x := p.parseQualifiedIdent();
if tmp, isIdent := x.(*ast.Ident); isIdent && (p.tok == token.COMMA || p.tok == token.LPAREN) {
// methods
idents = p.parseIdentList(x);
params, results := p.parseSignature();
typ = &ast.FuncType{noPos, params, results};
} else {
// embedded interface
typ = x;
}
return &ast.Field{doc, idents, typ, nil, nil};
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"));
}
pos := p.expect(token.INTERFACE);
var lbrace, rbrace token.Position;
var methods []*ast.Field;
if p.tok == token.LBRACE {
lbrace = p.pos;
p.next();
list := vector.New(0);
for p.tok == token.IDENT {
list.Push(p.parseMethodSpec());
if p.tok != token.RBRACE {
p.expect(token.SEMICOLON);
}
}
rbrace = p.expect(token.RBRACE);
p.optSemi = true;
// convert vector
methods = make([]*ast.Field, list.Len());
for i := list.Len() - 1; i >= 0; i-- {
methods[i] = list.At(i).(*ast.Field);
}
}
return &ast.InterfaceType{pos, lbrace, methods, rbrace};
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"));
}
pos := p.expect(token.MAP);
p.expect(token.LBRACK);
key := p.parseType();
p.expect(token.RBRACK);
value := p.parseType();
return &ast.MapType{pos, key, value};
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"));
}
pos := p.pos;
dir := ast.SEND | ast.RECV;
if p.tok == token.CHAN {
p.next();
if p.tok == token.ARROW {
p.next();
dir = ast.SEND;
}
} else {
p.expect(token.ARROW);
p.expect(token.CHAN);
dir = ast.RECV;
}
value := p.parseType();
return &ast.ChanType{pos, dir, value};
}
func (p *parser) tryRawType(ellipsisOk bool) ast.Expr {
switch p.tok {
case token.IDENT: return p.parseTypeName();
case token.LBRACK: return p.parseArrayType(ellipsisOk);
case token.STRUCT: return p.parseStructType();
case token.MUL: return p.parsePointerType();
case token.FUNC: return p.parseFuncType();
case token.INTERFACE: return p.parseInterfaceType();
case token.MAP: return p.parseMapType();
case token.CHAN, token.ARROW: return p.parseChanType();
case token.LPAREN:
lparen := p.pos;
p.next();
typ := p.parseType();
rparen := p.expect(token.RPAREN);
return &ast.ParenExpr{lparen, typ, rparen};
}
// no type found
return nil;
}
func (p *parser) tryType() ast.Expr {
return p.tryRawType(false);
}
// ----------------------------------------------------------------------------
// Blocks
func makeStmtList(list *vector.Vector) []ast.Stmt {
stats := make([]ast.Stmt, list.Len());
for i := 0; i < list.Len(); i++ {
stats[i] = list.At(i).(ast.Stmt);
}
return stats;
}
func (p *parser) parseStatementList() []ast.Stmt {
if p.trace {
defer un(trace(p, "StatementList"));
}
list := vector.New(0);
expectSemi := false;
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
if expectSemi {
p.expect(token.SEMICOLON);
expectSemi = false;
}
list.Push(p.parseStatement());
if p.tok == token.SEMICOLON {
p.next();
} else if p.optSemi {
p.optSemi = false; // "consume" optional semicolon
} else {
expectSemi = true;
}
}
return makeStmtList(list);
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"));
}
lbrace := p.expect(token.LBRACE);
list := p.parseStatementList();
rbrace := p.expect(token.RBRACE);
p.optSemi = true;
return &ast.BlockStmt{lbrace, list, rbrace};
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseStringList(x *ast.StringLit) []*ast.StringLit {
if p.trace {
defer un(trace(p, "StringList"));
}
list := vector.New(0);
if x != nil {
list.Push(x);
}
for p.tok == token.STRING {
list.Push(&ast.StringLit{p.pos, p.lit});
p.next();
}
// convert list
strings := make([]*ast.StringLit, list.Len());
for i := 0; i < list.Len(); i++ {
strings[i] = list.At(i).(*ast.StringLit);
}
return strings;
}
func (p *parser) parseFuncLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncLit"));
}
typ := p.parseFuncType();
p.exprLev++;
body := p.parseBlockStmt();
p.optSemi = false; // function body requires separating ";"
p.exprLev--;
return &ast.FuncLit{typ, body};
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
//
func (p *parser) parseOperand() ast.Expr {
if p.trace {
defer un(trace(p, "Operand"));
}
switch p.tok {
case token.IDENT:
return p.parseIdent();
case token.INT:
x := &ast.IntLit{p.pos, p.lit};
p.next();
return x;
case token.FLOAT:
x := &ast.FloatLit{p.pos, p.lit};
p.next();
return x;
case token.CHAR:
x := &ast.CharLit{p.pos, p.lit};
p.next();
return x;
case token.STRING:
x := &ast.StringLit{p.pos, p.lit};
p.next();
if p.tok == token.STRING {
return &ast.StringList{p.parseStringList(x)};
}
return x;
case token.LPAREN:
lparen := p.pos;
p.next();
p.exprLev++;
x := p.parseExpression();
p.exprLev--;
rparen := p.expect(token.RPAREN);
return &ast.ParenExpr{lparen, x, rparen};
case token.FUNC:
return p.parseFuncLit();
default:
t := p.tryRawType(true); // could be type for composite literal
if t != nil {
return t;
}
}
p.errorExpected(p.pos, "operand");
p.next(); // make progress
return &ast.BadExpr{p.pos};
}
func (p *parser) parseSelectorOrTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "SelectorOrTypeAssertion"));
}
p.expect(token.PERIOD);
if p.tok == token.IDENT {
// selector
sel := p.parseIdent();
return &ast.SelectorExpr{x, sel};
}
// type assertion
p.expect(token.LPAREN);
var typ ast.Expr;
if p.tok == token.TYPE {
// special case for type switch
typ = &ast.Ident{p.pos, "type"};
p.next();
} else {
typ = p.parseType();
}
p.expect(token.RPAREN);
return &ast.TypeAssertExpr{x, typ};
}
func (p *parser) parseIndex(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Index"));
}
p.expect(token.LBRACK);
p.exprLev++;
begin := p.parseExpression();
var end ast.Expr;
if p.tok == token.COLON {
p.next();
end = p.parseExpression();
}
p.exprLev--;
p.expect(token.RBRACK);
return &ast.IndexExpr{x, begin, end};
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"));
}
lparen := p.expect(token.LPAREN);
var args []ast.Expr;
if p.tok != token.RPAREN {
args = p.parseExpressionList();
}
rparen := p.expect(token.RPAREN);
return &ast.CallExpr{fun, lparen, args, rparen};
}
func (p *parser) parseElement() ast.Expr {
if p.trace {
defer un(trace(p, "Element"));
}
x := p.parseExpression();
if p.tok == token.COLON {
colon := p.pos;
p.next();
x = &ast.KeyValueExpr{x, colon, p.parseExpression()};
}
return x;
}
func (p *parser) parseElementList() []ast.Expr {
if p.trace {
defer un(trace(p, "ElementList"));
}
list := vector.New(0);
for p.tok != token.RBRACE && p.tok != token.EOF {
list.Push(p.parseElement());
if p.tok == token.COMMA {
p.next();
} else {
break;
}
}
// convert list
elts := make([]ast.Expr, list.Len());
for i := 0; i < list.Len(); i++ {
elts[i] = list.At(i).(ast.Expr);
}
return elts;
}
func (p *parser) parseCompositeLit(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "CompositeLit"));
}
lbrace := p.expect(token.LBRACE);
var elts []ast.Expr;
if p.tok != token.RBRACE {
elts = p.parseElementList();
}
rbrace := p.expect(token.RBRACE);
return &ast.CompositeLit{typ, lbrace, elts, rbrace};
}
// TODO Consider different approach to checking syntax after parsing:
// Provide a arguments (set of flags) to parsing functions
// restricting what they are supposed to accept depending
// on context.
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
// TODO should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.IntLit:
case *ast.FloatLit:
case *ast.CharLit:
case *ast.StringLit:
case *ast.StringList:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.TypeAssertExpr:
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
if t.Op == token.RANGE {
// the range operator is only allowed at the top of a for statement
p.errorExpected(x.Pos(), "expression");
x = &ast.BadExpr{x.Pos()};
}
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression");
x = &ast.BadExpr{x.Pos()};
}
return x;
}
// isTypeName returns true iff x is type name.
func isTypeName(x ast.Expr) bool {
// TODO should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.ParenExpr: return isTypeName(t.X); // TODO should (TypeName) be illegal?
case *ast.SelectorExpr: return isTypeName(t.X);
default: return false; // all other nodes are not type names
}
return true;
}
// isCompositeLitType returns true iff x is a legal composite literal type.
func isCompositeLitType(x ast.Expr) bool {
// TODO should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.ParenExpr: return isCompositeLitType(t.X);
case *ast.SelectorExpr: return isTypeName(t.X);
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default: return false; // all other nodes are not legal composite literal types
}
return true;
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
// TODO should provide predicate in AST nodes
switch t := x.(type) {
case *ast.UnaryExpr:
if t.Op == token.RANGE {
// the range operator is only allowed at the top of a for statement
p.errorExpected(x.Pos(), "expression");
x = &ast.BadExpr{x.Pos()};
}
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.Error(len.Pos(), "expected array length, found '...'");
x = &ast.BadExpr{x.Pos()};
}
}
// all other nodes are expressions or types
return x;
}
func (p *parser) parsePrimaryExpr() ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"));
}
x := p.parseOperand();
L: for {
switch p.tok {
case token.PERIOD: x = p.parseSelectorOrTypeAssertion(p.checkExpr(x));
case token.LBRACK: x = p.parseIndex(p.checkExpr(x));
case token.LPAREN: x = p.parseCallOrConversion(p.checkExprOrType(x));
case token.LBRACE:
if isCompositeLitType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
x = p.parseCompositeLit(x);
} else {
break L;
}
default:
break L;
}
}
return p.checkExprOrType(x);
}
func (p *parser) parseUnaryExpr() ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"));
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.ARROW, token.AND, token.RANGE:
pos, op := p.pos, p.tok;
p.next();
x := p.parseUnaryExpr();
return &ast.UnaryExpr{pos, op, p.checkExpr(x)};
case token.MUL:
// unary "*" expression or pointer type
pos := p.pos;
p.next();
x := p.parseUnaryExpr();
return &ast.StarExpr{pos, p.checkExprOrType(x)};
}
return p.parsePrimaryExpr();
}
func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"));
}
x := p.parseUnaryExpr();
for prec := p.tok.Precedence(); prec >= prec1; prec-- {
for p.tok.Precedence() == prec {
pos, op := p.pos, p.tok;
p.next();
y := p.parseBinaryExpr(prec + 1);
x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)};
}
}
return x;
}
func (p *parser) parseExpression() ast.Expr {
if p.trace {
defer un(trace(p, "Expression"));
}
return p.parseBinaryExpr(token.LowestPrec + 1);
}
// ----------------------------------------------------------------------------
// Statements
func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
if p.trace {
defer un(trace(p, "SimpleStmt"));
}
x := p.parseExpressionList();
switch p.tok {
case token.COLON:
// labeled statement
p.next();
if labelOk && len(x) == 1 {
if label, isIdent := x[0].(*ast.Ident); isIdent {
return &ast.LabeledStmt{label, p.parseStatement()};
}
}
p.Error(x[0].Pos(), "illegal label declaration");
return &ast.BadStmt{x[0].Pos()};
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement
pos, tok := p.pos, p.tok;
p.next();
y := p.parseExpressionList();
if len(x) > 1 && len(y) > 1 && len(x) != len(y) {
p.Error(x[0].Pos(), "arity of lhs doesn't match rhs");
}
return &ast.AssignStmt{x, pos, tok, y};
}
if len(x) > 1 {
p.Error(x[0].Pos(), "only one expression allowed");
// continue with first expression
}
if p.tok == token.INC || p.tok == token.DEC {
// increment or decrement
s := &ast.IncDecStmt{x[0], p.tok};
p.next(); // consume "++" or "--"
return s;
}
// expression
return &ast.ExprStmt{x[0]};
}
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseExpression();
if call, isCall := x.(*ast.CallExpr); isCall {
return call;
}
p.errorExpected(x.Pos(), "function/method call");
return nil;
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"));
}
pos := p.expect(token.GO);
call := p.parseCallExpr();
if call != nil {
return &ast.GoStmt{pos, call};
}
return &ast.BadStmt{pos};
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"));
}
pos := p.expect(token.DEFER);
call := p.parseCallExpr();
if call != nil {
return &ast.DeferStmt{pos, call};
}
return &ast.BadStmt{pos};
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"));
}
pos := p.pos;
p.expect(token.RETURN);
var x []ast.Expr;
if p.tok != token.SEMICOLON && p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE {
x = p.parseExpressionList();
}
return &ast.ReturnStmt{pos, x};
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"));
}
s := &ast.BranchStmt{p.pos, tok, nil};
p.expect(tok);
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
s.Label = p.parseIdent();
}
return s;
}
func (p *parser) isExpr(s ast.Stmt) bool {
if s == nil {
return true;
}
dummy, isExpr := s.(*ast.ExprStmt);
return isExpr;
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil;
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X);
}
p.Error(s.Pos(), "expected condition, found simple statement");
return &ast.BadExpr{s.Pos()};
}
func (p *parser) parseControlClause(isForStmt bool) (s1, s2, s3 ast.Stmt) {
if p.tok != token.LBRACE {
prevLev := p.exprLev;
p.exprLev = -1;
if p.tok != token.SEMICOLON {
s1 = p.parseSimpleStmt(false);
}
if p.tok == token.SEMICOLON {
p.next();
if p.tok != token.LBRACE && p.tok != token.SEMICOLON {
s2 = p.parseSimpleStmt(false);
}
if isForStmt {
// for statements have a 3rd section
p.expect(token.SEMICOLON);
if p.tok != token.LBRACE {
s3 = p.parseSimpleStmt(false);
}
}
} else {
s1, s2 = nil, s1;
}
p.exprLev = prevLev;
}
return s1, s2, s3;
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"));
}
pos := p.expect(token.IF);
s1, s2, dummy := p.parseControlClause(false);
body := p.parseBlockStmt();
var else_ ast.Stmt;
if p.tok == token.ELSE {
p.next();
else_ = p.parseStatement();
}
return &ast.IfStmt{pos, s1, p.makeExpr(s2), body, else_};
}
func (p *parser) parseCaseClause() *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"));
}
// SwitchCase
pos := p.pos;
var x []ast.Expr;
if p.tok == token.CASE {
p.next();
x = p.parseExpressionList();
} else {
p.expect(token.DEFAULT);
}
colon := p.expect(token.COLON);
body := p.parseStatementList();
return &ast.CaseClause{pos, x, colon, body};
}
func (p *parser) parseTypeCaseClause() *ast.TypeCaseClause {
if p.trace {
defer un(trace(p, "TypeCaseClause"));
}
// TypeSwitchCase
pos := p.pos;
var typ ast.Expr;
if p.tok == token.CASE {
p.next();
typ = p.parseType();
} else {
p.expect(token.DEFAULT);
}
colon := p.expect(token.COLON);
body := p.parseStatementList();
return &ast.TypeCaseClause{pos, typ, colon, body};
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"));
}
pos := p.expect(token.SWITCH);
s1, s2, dummy := p.parseControlClause(false);
if p.isExpr(s2) {
// expression switch
lbrace := p.expect(token.LBRACE);
cases := vector.New(0);
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseCaseClause());
}
rbrace := p.expect(token.RBRACE);
p.optSemi = true;
body := &ast.BlockStmt{lbrace, makeStmtList(cases), rbrace};
return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body};
}
// type switch
// TODO do all the checks!
lbrace := p.expect(token.LBRACE);
cases := vector.New(0);
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseTypeCaseClause());
}
rbrace := p.expect(token.RBRACE);
p.optSemi = true;
body := &ast.BlockStmt{lbrace, makeStmtList(cases), rbrace};
return &ast.TypeSwitchStmt{pos, s1, s2, body};
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"));
}
// CommCase
pos := p.pos;
var tok token.Token;
var lhs, rhs ast.Expr;
if p.tok == token.CASE {
p.next();
if p.tok == token.ARROW {
// RecvExpr without assignment
rhs = p.parseExpression();
} else {
// SendExpr or RecvExpr
rhs = p.parseExpression();
if p.tok == token.ASSIGN || p.tok == token.DEFINE {
// RecvExpr with assignment
tok = p.tok;
p.next();
lhs = rhs;
if p.tok == token.ARROW {
rhs = p.parseExpression();
} else {
p.expect(token.ARROW); // use expect() error handling
}
}
// else SendExpr
}
} else {
p.expect(token.DEFAULT);
}
colon := p.expect(token.COLON);
body := p.parseStatementList();
return &ast.CommClause{pos, tok, lhs, rhs, colon, body};
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"));
}
pos := p.expect(token.SELECT);
lbrace := p.expect(token.LBRACE);
cases := vector.New(0);
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseCommClause());
}
rbrace := p.expect(token.RBRACE);
p.optSemi = true;
body := &ast.BlockStmt{lbrace, makeStmtList(cases), rbrace};
return &ast.SelectStmt{pos, body};
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"));
}
pos := p.expect(token.FOR);
s1, s2, s3 := p.parseControlClause(true);
body := p.parseBlockStmt();
if as, isAssign := s2.(*ast.AssignStmt); isAssign {
// possibly a for statement with a range clause; check assignment operator
if as.Tok != token.ASSIGN && as.Tok != token.DEFINE {
p.errorExpected(as.TokPos, "'=' or ':='");
return &ast.BadStmt{pos};
}
// check lhs
var key, value ast.Expr;
switch len(as.Lhs) {
case 2:
value = as.Lhs[1];
fallthrough;
case 1:
key = as.Lhs[0];
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions");
return &ast.BadStmt{pos};
}
// check rhs
if len(as.Rhs) != 1 {
p.errorExpected(as.Rhs[0].Pos(), "1 expressions");
return &ast.BadStmt{pos};
}
if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
// rhs is range expression; check lhs
return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
} else {
p.errorExpected(s2.Pos(), "range clause");
return &ast.BadStmt{pos};
}
} else {
// regular for statement
return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body};
}
panic(); // unreachable
return nil;
}
func (p *parser) parseStatement() ast.Stmt {
if p.trace {
defer un(trace(p, "Statement"));
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
decl, _ := p.parseDeclaration(false); // do not consume trailing semicolon
return &ast.DeclStmt{decl};
case
// tokens that may start a top-level expression
token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand
token.LBRACK, token.STRUCT, // composite type
token.MUL, token.AND, token.ARROW: // unary operators
return p.parseSimpleStmt(true);
case token.GO:
return p.parseGoStmt();
case token.DEFER:
return p.parseDeferStmt();
case token.RETURN:
return p.parseReturnStmt();
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
return p.parseBranchStmt(p.tok);
case token.LBRACE:
return p.parseBlockStmt();
case token.IF:
return p.parseIfStmt();
case token.SWITCH:
return p.parseSwitchStmt();
case token.SELECT:
return p.parseSelectStmt();
case token.FOR:
return p.parseForStmt();
case token.SEMICOLON, token.RBRACE:
// don't consume the ";", it is the separator following the empty statement
return &ast.EmptyStmt{p.pos};
}
// no statement found
p.errorExpected(p.pos, "statement");
p.next(); // make progress
return &ast.BadStmt{p.pos};
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool)
// Consume semicolon if there is one and getSemi is set, and get any trailing comment.
// Return the comment if any and indicate if a semicolon was consumed.
//
func (p *parser) parseComment(getSemi bool) (comment *ast.CommentGroup, gotSemi bool) {
if getSemi && p.tok == token.SEMICOLON {
p.next();
gotSemi = true;
}
return p.getComment(trailing), gotSemi;
}
func parseImportSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) {
if p.trace {
defer un(trace(p, "ImportSpec"));
}
var ident *ast.Ident;
if p.tok == token.PERIOD {
ident = &ast.Ident{p.pos, "."};
p.next();
} else if p.tok == token.IDENT {
ident = p.parseIdent();
}
var path []*ast.StringLit;
if p.tok == token.STRING {
path = p.parseStringList(nil);
} else {
p.expect(token.STRING); // use expect() error handling
}
comment, gotSemi := p.parseComment(getSemi);
return &ast.ImportSpec{doc, ident, path, comment}, gotSemi;
}
func parseConstSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) {
if p.trace {
defer un(trace(p, "ConstSpec"));
}
idents := p.parseIdentList(nil);
typ := p.tryType();
var values []ast.Expr;
if typ != nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN);
values = p.parseExpressionList();
}
comment, gotSemi := p.parseComment(getSemi);
return &ast.ValueSpec{doc, idents, typ, values, comment}, gotSemi;
}
func parseTypeSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) {
if p.trace {
defer un(trace(p, "TypeSpec"));
}
ident := p.parseIdent();
typ := p.parseType();
comment, gotSemi := p.parseComment(getSemi);
return &ast.TypeSpec{doc, ident, typ, comment}, gotSemi;
}
func parseVarSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) {
if p.trace {
defer un(trace(p, "VarSpec"));
}
idents := p.parseIdentList(nil);
typ := p.tryType();
var values []ast.Expr;
if typ == nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN);
values = p.parseExpressionList();
}
comment, gotSemi := p.parseComment(getSemi);
return &ast.ValueSpec{doc, idents, typ, values, comment}, gotSemi;
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction, getSemi bool) (decl *ast.GenDecl, gotSemi bool) {
if p.trace {
defer un(trace(p, keyword.String() + "Decl"));
}
doc := p.getComment(leading);
pos := p.expect(keyword);
var lparen, rparen token.Position;
list := vector.New(0);
if p.tok == token.LPAREN {
lparen = p.pos;
p.next();
for p.tok != token.RPAREN && p.tok != token.EOF {
doc := p.getComment(leading);
spec, semi := f(p, doc, true); // consume semicolon if any
list.Push(spec);
if !semi {
break;
}
}
rparen = p.expect(token.RPAREN);
if getSemi && p.tok == token.SEMICOLON {
p.next();
gotSemi = true;
} else {
p.optSemi = true;
}
} else {
spec, semi := f(p, nil, getSemi);
list.Push(spec);
gotSemi = semi;
}
// convert vector
specs := make([]ast.Spec, list.Len());
for i := 0; i < list.Len(); i++ {
specs[i] = list.At(i);
}
return &ast.GenDecl{doc, pos, keyword, lparen, specs, rparen}, gotSemi;
}
func (p *parser) parseReceiver() *ast.Field {
if p.trace {
defer un(trace(p, "Receiver"));
}
pos := p.pos;
par := p.parseParameters(false);
// must have exactly one receiver
if len(par) != 1 || len(par) == 1 && len(par[0].Names) > 1 {
p.errorExpected(pos, "exactly one receiver");
return &ast.Field{Type: &ast.BadExpr{noPos}};
}
recv := par[0];
// recv type must be TypeName or *TypeName
base := recv.Type;
if ptr, isPtr := base.(*ast.StarExpr); isPtr {
base = ptr.X;
}
if !isTypeName(base) {
p.errorExpected(base.Pos(), "type name");
}
return recv;
}
func (p *parser) parseFunctionDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"));
}
doc := p.getComment(leading);
pos := p.expect(token.FUNC);
var recv *ast.Field;
if p.tok == token.LPAREN {
recv = p.parseReceiver();
}
ident := p.parseIdent();
params, results := p.parseSignature();
var body *ast.BlockStmt;
if p.tok == token.LBRACE {
body = p.parseBlockStmt();
}
return &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body};
}
func (p *parser) parseDeclaration(getSemi bool) (decl ast.Decl, gotSemi bool) {
if p.trace {
defer un(trace(p, "Declaration"));
}
var f parseSpecFunction;
switch p.tok {
case token.CONST:
f = parseConstSpec;
case token.TYPE:
f = parseTypeSpec;
case token.VAR:
f = parseVarSpec;
case token.FUNC:
decl = p.parseFunctionDecl();
// Do not use parseComment here to consume a semicolon
// because we don't want to remove a trailing comment
// from the list of unassociated comments.
if getSemi && p.tok == token.SEMICOLON {
p.next();
gotSemi = true;
}
return decl, gotSemi;
default:
pos := p.pos;
p.errorExpected(pos, "declaration");
decl = &ast.BadDecl{pos};
gotSemi = getSemi && p.tok == token.SEMICOLON;
p.next(); // make progress in any case
return decl, gotSemi;
}
decl, gotSemi = p.parseGenDecl(p.tok, f, getSemi); // TODO 6g/spec issue
return;
}
// ----------------------------------------------------------------------------
// Packages
// The mode parameter to the Parse function is a set of flags (or 0).
// They control the amount of source code parsed and other optional
// parser functionality.
//
const (
PackageClauseOnly uint = 1 << iota; // parsing stops after package clause
ImportsOnly; // parsing stops after import declarations
ParseComments; // parse comments and add them to AST
Trace; // print a trace of parsed productions
)
func (p *parser) parsePackage() *ast.Program {
if p.trace {
defer un(trace(p, "Program"));
}
// package clause
comment := p.getComment(leading);
pos := p.expect(token.PACKAGE);
ident := p.parseIdent();
var decls []ast.Decl;
// Don't bother parsing the rest if we had errors already.
// Likely not a Go source file at all.
if p.ErrorCount() == 0 && p.mode & PackageClauseOnly == 0 {
// import decls
list := vector.New(0);
for p.tok == token.IMPORT {
decl, _ := p.parseGenDecl(token.IMPORT, parseImportSpec, true); // consume optional semicolon
list.Push(decl);
}
if p.mode & ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
decl, _ := p.parseDeclaration(true); // consume optional semicolon
list.Push(decl);
}
}
// convert declaration list
decls = make([]ast.Decl, list.Len());
for i := 0; i < list.Len(); i++ {
decls[i] = list.At(i).(ast.Decl);
}
}
// convert comments list
// 1) determine number of remaining comments
n := 0;
for i := 0; i < p.comments.Len(); i++ {
if p.comments.At(i) != nil {
n++;
}
}
// 2) convert the remaining comments
comments := make([]*ast.CommentGroup, n);
for i, j := 0, 0; i < p.comments.Len(); i++ {
if p.comments.At(i) != nil {
comments[j] = p.comments.At(i).(*ast.CommentGroup);
j++;
}
}
return &ast.Program{comment, pos, ident, decls, comments};
}
// ----------------------------------------------------------------------------
// Parser entry points.
func readSource(src interface{}) ([]byte, os.Error) {
if src != nil {
switch s := src.(type) {
case string:
return strings.Bytes(s), nil;
case []byte:
return s, nil;
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Data(), nil;
}
case io.Reader:
var buf bytes.Buffer;
n, err := io.Copy(s, &buf);
if err != nil {
return nil, err;
}
return buf.Data(), nil;
}
}
return nil, os.ErrorString("invalid source");
}
// scannerMode returns the scanner mode bits given the parser's mode bits.
func scannerMode(mode uint) uint {
if mode & ParseComments != 0 {
return scanner.ScanComments;
}
return 0;
}
func (p *parser) init(filename string, src interface{}, mode uint) os.Error {
data, err := readSource(src);
if err != nil {
return err;
}
// initialize parser state
p.ErrorVector.Init();
p.scanner.Init(filename, data, p, scannerMode(mode));
p.mode = mode;
p.trace = mode & Trace != 0; // for convenience (p.trace is used frequently)
p.comments.Init(0);
p.commentsIndex = noIndex;
p.next();
return nil;
}
// Parse parses a Go program.
//
// The filename is only used in AST position information and error messages
// and may be empty. The program source src may be provided in a variety of
// formats. At the moment the following types are supported: string, []byte,
// and io.Reader. The mode parameter controls the amount of source text parsed
// and other optional parser functionality.
//
// Parse returns a complete AST if no error occured. Otherwise, if the
// source couldn't be read, the returned program is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.BadX nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
func Parse(filename string, src interface{}, mode uint) (*ast.Program, os.Error) {
var p parser;
if err := p.init(filename, src, mode); err != nil {
return nil, err;
}
prog := p.parsePackage(); // TODO 6g bug - function call order in expr lists
return prog, p.GetError(scanner.NoMultiples);
}
// ParseStmts parses a list of Go statements and returns the list of
// corresponding AST nodes. The filename and src arguments have the
// same interpretation as for Parse. If there is an error, the node
// list may be nil or contain partial ASTs.
//
func ParseStmts(filename string, src interface{}) ([]ast.Stmt, os.Error) {
var p parser;
if err := p.init(filename, src, 0); err != nil {
return nil, err;
}
list := p.parseStatementList(); // TODO 6g bug - function call order in expr lists
return list, p.GetError(scanner.Sorted);
}
// ParseExpr parses a single Go expression and returns the corresponding
// AST node. The filename and src arguments have the same interpretation
// as for Parse. If there is an error, the result expression may be nil
// or contain a partial AST.
//
func ParseExpr(filename string, src interface{}) (ast.Expr, os.Error) {
var p parser;
if err := p.init(filename, src, 0); err != nil {
return nil, err;
}
x := p.parseExpression(); // TODO 6g bug - function call order in expr lists
return x, p.GetError(scanner.Sorted);
}
|
package bothandlers
import (
"github.com/djosephsen/hal"
"net/http"
"encoding/json"
"fmt"
)
type gifyout struct{
Meta interface{}
Data struct{
Tags []string
Caption string
Username string
Image_width string
Image_frames string
Image_mp4_url string
Image_url string
Image_original_url string
Url string
Id string
Type string
Image_height string
Fixed_height_downsampled_url string
Fixed_height_downsampled_width string
Fixed_height_downsampled_height string
Fixed_width_downsampled_url string
Fixed_width_downsampled_width string
Fixed_width_downsampled_height string
Rating string
}
}
var Gifme = &hal.Handler{
Method: hal.RESPOND,
Pattern: `gif me (.*)`,
Run: func(res *hal.Response) error {
search:=res.Match[1]
url:=fmt.Sprintf("http://api.giphy.com/v1/gifs/random?rating=pg&api_key=dc6zaTOxFJmzC&tag=%s",search)
hal.Logger.Debug(`url is`,url)
g:=new(gifyout)
resp,_:=http.Get(url)
dec:= json.NewDecoder(resp.Body)
dec.Decode(g)
return res.Send(g.Data.Image_url)
},
}
multi-word searches
package bothandlers
import (
"github.com/djosephsen/hal"
"net/http"
"net/url"
"encoding/json"
"fmt"
)
type gifyout struct{
Meta interface{}
Data struct{
Tags []string
Caption string
Username string
Image_width string
Image_frames string
Image_mp4_url string
Image_url string
Image_original_url string
Url string
Id string
Type string
Image_height string
Fixed_height_downsampled_url string
Fixed_height_downsampled_width string
Fixed_height_downsampled_height string
Fixed_width_downsampled_url string
Fixed_width_downsampled_width string
Fixed_width_downsampled_height string
Rating string
}
}
var Gifme = &hal.Handler{
Method: hal.RESPOND,
Pattern: `gif me (.*)`,
Run: func(res *hal.Response) error {
search:=res.Match[1]
myurl:=fmt.Sprintf("http://api.giphy.com/v1/gifs/random?rating=pg&api_key=dc6zaTOxFJmzC&tag=%s",search)
myurl=url.QueryEscape(myurl)
hal.Logger.Debug(`myurl is`,myurl)
g:=new(gifyout)
resp,_:=http.Get(myurl)
dec:= json.NewDecoder(resp.Body)
dec.Decode(g)
return res.Send(g.Data.Image_url)
},
}
|
// Copyright (c) 2018 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package psbt
import (
"bytes"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"testing"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
)
// Test vectors from:
// // https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki#test-vectors
// createPsbtFromSignedTx is a utility function to create a PSBT from an
// already-signed transaction, so we can test reconstructing, signing and
// extracting it. Returned are: an unsigned transaction serialization, a list
// of scriptSigs, one per input, and a list of witnesses, one per input.
func createPsbtFromSignedTx(serializedSignedTx []byte) (
*Packet, [][]byte, []wire.TxWitness, error) {
tx := wire.NewMsgTx(2)
err := tx.Deserialize(bytes.NewReader(serializedSignedTx))
if err != nil {
return nil, nil, nil, err
}
scriptSigs := make([][]byte, 0, len(tx.TxIn))
witnesses := make([]wire.TxWitness, 0, len(tx.TxIn))
tx2 := tx.Copy()
// Blank out signature info in inputs
for i, tin := range tx2.TxIn {
tin.SignatureScript = nil
scriptSigs = append(scriptSigs, tx.TxIn[i].SignatureScript)
tin.Witness = nil
witnesses = append(witnesses, tx.TxIn[i].Witness)
}
// Outputs always contain: (value, scriptPubkey) so don't need
// amending. Now tx2 is tx with all signing data stripped out
unsignedPsbt, err := NewFromUnsignedTx(tx2)
if err != nil {
return nil, nil, nil, err
}
return unsignedPsbt, scriptSigs, witnesses, nil
}
// These are all valid PSBTs
var validPsbtHex = map[int]string{
0: "70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab300000000000000",
1: "70736274ff0100a00200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40000000000feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac000000000001076a47304402204759661797c01b036b25928948686218347d89864b719e1f7fcf57d1e511658702205309eabf56aa4d8891ffd111fdf1336f3a29da866d7f8486d75546ceedaf93190121035cdc61fc7ba971c0b501a646a2a83b102cb43881217ca682dc86e2d73fa882920001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb82308000000",
2: "70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000001030401000000000000",
3: "70736274ff0100a00200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40000000000feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac00000000000100df0200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf6000000006a473044022070b2245123e6bf474d60c5b50c043d4c691a5d2435f09a34a7662a9dc251790a022001329ca9dacf280bdf30740ec0390422422c81cb45839457aeb76fc12edd95b3012102657d118d3357b8e0f4c2cd46db7b39f6d9c38d9a70abcb9b2de5dc8dbfe4ce31feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e13000001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb8230800220202ead596687ca806043edc3de116cdf29d5e9257c196cd055cf698c8d02bf24e9910b4a6ba670000008000000080020000800022020394f62be9df19952c5587768aeb7698061ad2c4a25c894f47d8c162b4d7213d0510b4a6ba6700000080010000800200008000",
4: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
5: "70736274ff01003f0200000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000ffffffff010000000000000000036a010000000000000a0f0102030405060708090f0102030405060708090a0b0c0d0e0f0000",
}
// These are all invalid PSBTs for the indicated
// reasons.
var invalidPsbtHex = map[int]string{
// wire format, not PSBT format
0: "0200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf6000000006a473044022070b2245123e6bf474d60c5b50c043d4c691a5d2435f09a34a7662a9dc251790a022001329ca9dacf280bdf30740ec0390422422c81cb45839457aeb76fc12edd95b3012102657d118d3357b8e0f4c2cd46db7b39f6d9c38d9a70abcb9b2de5dc8dbfe4ce31feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300",
// missing outputs
1: "70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000000",
// Filled in scriptSig in unsigned tx
2: "70736274ff0100fd0a010200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be4000000006a47304402204759661797c01b036b25928948686218347d89864b719e1f7fcf57d1e511658702205309eabf56aa4d8891ffd111fdf1336f3a29da866d7f8486d75546ceedaf93190121035cdc61fc7ba971c0b501a646a2a83b102cb43881217ca682dc86e2d73fa88292feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac00000000000001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb82308000000",
// No unsigned tx
3: "70736274ff000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000000",
// Duplicate keys in an input
4: "70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000001003f0200000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000ffffffff010000000000000000036a010000000000000000",
// Invalid global transaction typed key
5: "70736274ff020001550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid input witness utxo typed key
6: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac000000000002010020955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid pubkey length for input partial signature typed key
7: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87210203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd46304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid redeemscript typed key
8: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a01020400220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid witness script typed key
9: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d568102050047522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid bip32 typed key
10: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae210603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd10b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid non-witness utxo typed key
11: "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f0000000000020000bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
// Invalid final scriptsig typed key
12: "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000020700da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
// Invalid final script witness typed key
13: "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903020800da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
// Invalid pubkey in output BIP32 derivation paths typed key
14: "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00210203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca58710d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
// Invalid input sighash type typed key
15: "70736274ff0100730200000001301ae986e516a1ec8ac5b4bc6573d32f83b465e23ad76167d68b38e730b4dbdb0000000000ffffffff02747b01000000000017a91403aa17ae882b5d0d54b25d63104e4ffece7b9ea2876043993b0000000017a914b921b1ba6f722e4bfa83b6557a3139986a42ec8387000000000001011f00ca9a3b00000000160014d2d94b64ae08587eefc8eeb187c601e939f9037c0203000100000000010016001462e9e982fff34dd8239610316b090cd2a3b747cb000100220020876bad832f1d168015ed41232a9ea65a1815d9ef13c0ef8759f64b5b2b278a65010125512103b7ce23a01c5b4bf00a642537cdfabb315b668332867478ef51309d2bd57f8a8751ae00",
// Invalid output redeemscript typed key
16: "70736274ff0100730200000001301ae986e516a1ec8ac5b4bc6573d32f83b465e23ad76167d68b38e730b4dbdb0000000000ffffffff02747b01000000000017a91403aa17ae882b5d0d54b25d63104e4ffece7b9ea2876043993b0000000017a914b921b1ba6f722e4bfa83b6557a3139986a42ec8387000000000001011f00ca9a3b00000000160014d2d94b64ae08587eefc8eeb187c601e939f9037c0002000016001462e9e982fff34dd8239610316b090cd2a3b747cb000100220020876bad832f1d168015ed41232a9ea65a1815d9ef13c0ef8759f64b5b2b278a65010125512103b7ce23a01c5b4bf00a642537cdfabb315b668332867478ef51309d2bd57f8a8751ae00",
// Invalid output witnessScript typed key
17: "70736274ff0100730200000001301ae986e516a1ec8ac5b4bc6573d32f83b465e23ad76167d68b38e730b4dbdb0000000000ffffffff02747b01000000000017a91403aa17ae882b5d0d54b25d63104e4ffece7b9ea2876043993b0000000017a914b921b1ba6f722e4bfa83b6557a3139986a42ec8387000000000001011f00ca9a3b00000000160014d2d94b64ae08587eefc8eeb187c601e939f9037c00010016001462e9e982fff34dd8239610316b090cd2a3b747cb000100220020876bad832f1d168015ed41232a9ea65a1815d9ef13c0ef8759f64b5b2b278a6521010025512103b7ce23a01c5b4bf00a642537cdfabb315b668332867478ef51309d2bd57f8a8751ae00",
// Additional cases outside the existing test vectors.
// Invalid duplicate PartialSig
18: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a01220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid duplicate BIP32 derivation (different derivs, same key)
19: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba670000008000000080050000800000",
}
// This tests that valid PSBT serializations can be parsed
// into Psbt structs.
func TestReadValidPsbtAndReserialize(t *testing.T) {
for _, v := range validPsbtHex {
PsbtBytes, err := hex.DecodeString(v)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
testPsbt, err := NewFromRawBytes(
bytes.NewReader(PsbtBytes), false,
)
if err != nil {
t.Fatalf("unable to parse psbt: %v", err)
}
t.Logf("Successfully parsed test, got transaction: %v",
spew.Sdump(testPsbt.UnsignedTx))
var b bytes.Buffer
err = testPsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize created Psbt: %v", err)
}
raw := b.Bytes()
if !bytes.Equal(raw, PsbtBytes) {
t.Fatalf("Serialized PSBT didn't match: %v",
hex.EncodeToString(raw))
}
}
}
func TestReadInvalidPsbt(t *testing.T) {
for _, v := range invalidPsbtHex {
PsbtBytes, err := hex.DecodeString(v)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
_, err = NewFromRawBytes(bytes.NewReader(PsbtBytes), false)
if err == nil {
t.Fatalf("Incorrectly validated psbt: %v",
hex.EncodeToString(PsbtBytes))
}
t.Logf("Correctly got error: %v", err)
}
}
func TestSanityCheck(t *testing.T) {
// TODO(guggero): Remove when checks for segwit v1 are implemented.
t.Skip("Skipping PSBT sanity checks for segwit v0.")
// Test strategy:
// 1. Create an invalid PSBT from a serialization
// Then ensure that the sanity check fails.
// 2. Create a valid PSBT from a serialization
// Then create an updater, add a witness utxo to a non-witness
// utxo.
// Then ensure that the sanity check fails.
// Then add a witnessScript field to a non-witness utxo.
// Then ensure that the sanity check fails.
// index 1 contains a psbt with two inputs, first non-witness,
// second witness.
psbtraw1, err := hex.DecodeString(validPsbtHex[1])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
psbt1, err := NewFromRawBytes(bytes.NewReader(psbtraw1), false)
if err != nil {
t.Fatalf("Unable to create Psbt struct: %v", err)
}
// Add a non-witness utxo field to input2 using raw insertion function,
// so that it becomes invalid, then NewUpdater should fail.
nonWitnessUtxoRaw, err := hex.DecodeString(
CUTestHexData["NonWitnessUtxo"],
)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
nonWitnessUtxo := wire.NewMsgTx(2)
err = nonWitnessUtxo.Deserialize(bytes.NewReader(nonWitnessUtxoRaw))
if err != nil {
t.Fatalf("Unable to deserialize: %v", err)
}
inputs1 := &psbt1.Inputs[1]
inputs1.NonWitnessUtxo = nonWitnessUtxo
// The PSBT is now in an inconsistent state; Updater creation should
// fail.
updater, err := NewUpdater(psbt1)
if err == nil {
t.Fatalf("Failed to identify invalid PSBT state ( " +
"witness, non-witness fields)")
}
// Overwrite back with the correct psbt
psbtraw1, err = hex.DecodeString(validPsbtHex[1])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
psbt1, err = NewFromRawBytes(bytes.NewReader(psbtraw1), false)
updater, err = NewUpdater(psbt1)
if err != nil {
t.Fatalf("Unable to create Updater: %v", err)
}
// Create a fake non-witness utxo field to overlap with
// the existing witness input at index 1.
tx := wire.NewMsgTx(2)
err = tx.Deserialize(bytes.NewReader(nonWitnessUtxoRaw))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
err = updater.AddInNonWitnessUtxo(tx, 1)
if err == nil {
t.Fatalf("Incorrectly accepted Psbt with conflicting witness " +
"and non-witness utxo entries in the same input.")
}
// Now we try again; this time we try to add a witnessScript
// key-value pair to an input which is non-witness, which should
// also be rejected.
psbt2, err := NewFromRawBytes(
bytes.NewReader(psbtraw1), false,
)
if err != nil {
t.Fatalf("Unable to create Psbt struct: %v", err)
}
updater2, err := NewUpdater(psbt2)
if err != nil {
t.Fatalf("Got error creating updater2: %v", err)
}
witnessScript, err := hex.DecodeString(
CUTestHexData["Input2WitnessScript"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = updater2.AddInWitnessScript(witnessScript, 0)
if err == nil {
t.Fatalf("Incorrectly accepted adding witness script field " +
"to non-witness utxo")
}
}
// Data for creation and updating tests
// ===============================================================================
var CUTestHexData = map[string]string{
"scriptPubkey1": "0014d85c2b71d0060b09c9886aeb815e50991dda124d",
"scriptPubkey2": "001400aea9a2e5f0f876a588df5546e8742d1d87008f",
"txid1": "75ddabb27b8845f5247975c8a5ba7c6f336c4570708ebe230caf6db5217ae858",
"txid2": "1dea7cd05979072a3578cab271c02244ea8a090bbb46aa680a65ecd027048d83",
"COPsbtHex": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f000000000000000000",
"NonWitnessUtxo": "0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000",
"WitnessUtxo": "00c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e887",
// After adding witnessutxo and nonwitness utxo to inputs:
"UOPsbtHex": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e887000000",
"Input1RedeemScript": "5221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae",
"Input2RedeemScript": "00208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903",
"Input2WitnessScript": "522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae",
// After adding redeemscripts and witness scripts to inputs:
"UOPsbtHex2": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88701042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae000000",
// After adding bip32 derivations to inputs and outputs:
"UOPsbtHex3": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88701042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
//After adding sighash types to inputs
"UOPsbtHex4": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000010304010000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870103040100000001042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
}
// Just one example sanity check of B64 construction; after sighash appending above
var CUTestB64Data = map[string]string{
"UOPsbtB644": "cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWABTYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFgAUAK6pouXw+HaliN9VRuh0LR2HAI8AAAAAAAEAuwIAAAABqtc5MQGL0l+ErkALaISL4J23BurCrBgpi6vucatlb4sAAAAASEcwRAIgWPb8fGoz4bMVSNSByCbAFb0wE1qtQs1neQ2rZtKtJDsCIEoc7SYExnNbY5PltBaR3XiwDwxZQvufdRhW+qk4FX26Af7///8CgPD6AgAAAAAXqRQPuUY0IWlrgsgzryQceMF9295JNIfQ8gonAQAAABepFCnKdPigj4GZlCgYXJe12FLkBj9hh2UAAAABAwQBAAAAAQRHUiEClYO/Oa4KYJdHrRma3dY0+mEIVZ1sXNObTCGD8auW4H8hAtq2H/SaFNtqfQKwzR+7ePxLGDErW05U2uTbovv+9TbXUq4iBgKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgfxDZDGpPAAAAgAAAAIAAAACAIgYC2rYf9JoU22p9ArDNH7t4/EsYMStbTlTa5Nui+/71NtcQ2QxqTwAAAIAAAACAAQAAgAABASAAwusLAAAAABepFLf1+vQOPUClpFmx2zU18rcvqSHohwEDBAEAAAABBCIAIIwjUxc3Q7WV37Sge3K6jkLjeX2nTof+fZ10l+OyAokDAQVHUiEDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwhAjrdkE89bc9Z3bkGsN7iNSm3/7ntUOXoYVGSaGAiHw5zUq4iBgI63ZBPPW3PWd25BrDe4jUpt/+57VDl6GFRkmhgIh8OcxDZDGpPAAAAgAAAAIADAACAIgYDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwQ2QxqTwAAAIAAAACAAgAAgAAiAgOppMN/WZbTqiXbrGtXCvBlA5RJKUJGCzVHU+2e7KWHcRDZDGpPAAAAgAAAAIAEAACAACICAn9jmXV9Lv9VoTatAsaEsYOLZVbl8bazQoKpS2tQBRCWENkMak8AAACAAAAAgAUAAIAA",
}
var CUTestAmountData = map[string]int64{
"amount1": 149990000,
"amount2": 100000000,
"amount3": 200000000,
}
var CUTestIndexData = map[string]uint32{
"index1": 0,
"index2": 1,
}
var CUMasterKeyFingerPrint = "d90c6a4f"
var CUTestPathData = map[string][]uint32{
"dpath1": {0 + 0x80000000, 0 + 0x80000000, 0 + 0x80000000},
"dpath2": {0 + 0x80000000, 0 + 0x80000000, 1 + 0x80000000},
"dpath3": {0 + 0x80000000, 0 + 0x80000000, 2 + 0x80000000},
"dpath4": {0 + 0x80000000, 0 + 0x80000000, 3 + 0x80000000},
"dpath5": {0 + 0x80000000, 0 + 0x80000000, 4 + 0x80000000},
"dpath6": {0 + 0x80000000, 0 + 0x80000000, 5 + 0x80000000},
}
var CUTestPubkeyData = map[string]string{
"pub1": "029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f",
"pub2": "02dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d7",
"pub3": "03089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc",
"pub4": "023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e73",
"pub5": "03a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca58771",
"pub6": "027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b50051096",
}
// ===============================================================================
func TestPsbtCreator(t *testing.T) {
spkOut1, err := hex.DecodeString(CUTestHexData["scriptPubkey1"])
if err != nil {
t.Fatalf("Error: %v", err)
}
spkOut2, err := hex.DecodeString(CUTestHexData["scriptPubkey2"])
if err != nil {
t.Fatalf("Error: %v", err)
}
out1 := wire.NewTxOut(CUTestAmountData["amount1"], spkOut1)
out2 := wire.NewTxOut(CUTestAmountData["amount2"], spkOut2)
outputs := []*wire.TxOut{out1, out2}
hash1, err := chainhash.NewHashFromStr(CUTestHexData["txid1"])
if err != nil {
t.Fatalf("Error: %v", err)
}
prevOut1 := wire.NewOutPoint(hash1, uint32(0))
hash2, err := chainhash.NewHashFromStr(CUTestHexData["txid2"])
if err != nil {
t.Fatalf("Error: %v", err)
}
prevOut2 := wire.NewOutPoint(hash2, uint32(1))
inputs := []*wire.OutPoint{prevOut1, prevOut2}
// Check creation fails with invalid sequences:
nSequences := []uint32{wire.MaxTxInSequenceNum}
_, err = New(inputs, outputs, int32(3), uint32(0), nSequences)
if err == nil {
t.Fatalf("Did not error when creating transaction with " +
"invalid nSequences")
}
nSequences = append(nSequences, wire.MaxTxInSequenceNum)
// Check creation fails with invalid version
_, err = New(inputs, outputs, int32(0), uint32(0), nSequences)
if err == nil {
t.Fatalf("Did not error when creating transaction with " +
"invalid version (3)")
}
// Use valid data to create:
cPsbt, err := New(inputs, outputs, int32(2), uint32(0), nSequences)
var b bytes.Buffer
err = cPsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize created Psbt: %v", err)
}
if CUTestHexData["COPsbtHex"] != hex.EncodeToString(b.Bytes()) {
t.Fatalf("Failed to create expected psbt, instead got: %v",
hex.EncodeToString(b.Bytes()))
}
// Now simulate passing the created PSBT to an Updater
updater, err := NewUpdater(cPsbt)
if err != nil {
t.Fatalf("Unable to create Updater object")
}
tx := wire.NewMsgTx(2)
nonWitnessUtxoHex, err := hex.DecodeString(
CUTestHexData["NonWitnessUtxo"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = tx.Deserialize(bytes.NewReader(nonWitnessUtxoHex))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
witnessUtxoHex, err := hex.DecodeString(
CUTestHexData["WitnessUtxo"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
txout := wire.TxOut{Value: CUTestAmountData["amount3"],
PkScript: witnessUtxoHex[9:]}
err = updater.AddInNonWitnessUtxo(tx, 0)
if err != nil {
t.Fatalf("Unable to add NonWitness Utxo to inputs: %v", err)
}
err = updater.AddInWitnessUtxo(&txout, 1)
if err != nil {
t.Fatalf("Unable to add Witness Utxo to inputs: %v", err)
}
b.Reset()
err = updater.Upsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if CUTestHexData["UOPsbtHex"] != hex.EncodeToString(b.Bytes()) {
t.Fatal("Failed to create valid updated PSBT after utxos")
}
input1RedeemScript, err := hex.DecodeString(CUTestHexData["Input1RedeemScript"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = updater.AddInRedeemScript(input1RedeemScript, 0)
if err != nil {
t.Fatalf("Unable to add redeem script: %v", err)
}
input2RedeemScript, err := hex.DecodeString(CUTestHexData["Input2RedeemScript"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = updater.AddInRedeemScript(input2RedeemScript, 1)
if err != nil {
t.Fatalf("Unable to add redeem script: %v", err)
}
input2WitnessScript, err := hex.DecodeString(CUTestHexData["Input2WitnessScript"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = updater.AddInWitnessScript(input2WitnessScript, 1)
if err != nil {
t.Fatalf("Unable to add witness script: %v", err)
}
b.Reset()
err = updater.Upsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if CUTestHexData["UOPsbtHex2"] != hex.EncodeToString(b.Bytes()) {
t.Fatal("Failed to create valid updated PSBT after redeem scripts")
}
masterKey, err := hex.DecodeString(CUMasterKeyFingerPrint)
masterKeyInt := binary.LittleEndian.Uint32(masterKey)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
input1Path1 := CUTestPathData["dpath1"]
input1Path2 := CUTestPathData["dpath2"]
input1Key1, err := hex.DecodeString(CUTestPubkeyData["pub1"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
input1Key2, err := hex.DecodeString(CUTestPubkeyData["pub2"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = updater.AddInBip32Derivation(masterKeyInt, input1Path1, input1Key1, 0)
if err != nil {
t.Fatal("Failed to add first key derivation for input 1")
}
err = updater.AddInBip32Derivation(masterKeyInt, input1Path2, input1Key2, 0)
if err != nil {
t.Fatal("Failed to add second key derivation for input 1")
}
input2Path1 := CUTestPathData["dpath3"]
input2Path2 := CUTestPathData["dpath4"]
input2Key1, err := hex.DecodeString(CUTestPubkeyData["pub3"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
input2Key2, err := hex.DecodeString(CUTestPubkeyData["pub4"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
// check invalid pubkeys are not accepted
borkedInput2Key1 := append([]byte{0xff}, input2Key1...)
err = updater.AddInBip32Derivation(masterKeyInt, input2Path1,
borkedInput2Key1, 1)
if err == nil {
t.Fatalf("Expected invalid pubkey, got: %v", err)
}
err = updater.AddInBip32Derivation(masterKeyInt, input2Path1, input2Key1, 1)
if err != nil {
t.Fatal("Failed to add first key derivation for input 2")
}
err = updater.AddInBip32Derivation(masterKeyInt, input2Path2, input2Key2, 1)
if err != nil {
t.Fatal("Failed to add second key derivation for input 2")
}
output1Key1, err := hex.DecodeString(CUTestPubkeyData["pub5"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
output1Path := CUTestPathData["dpath5"]
// check invalid pubkeys are not accepted
borkedOutput1Key1 := append([]byte{0xab}, output1Key1[:13]...)
err = updater.AddOutBip32Derivation(masterKeyInt, output1Path,
borkedOutput1Key1, 0)
if err == nil {
t.Fatalf("Expected invalid pubkey, got: %v", err)
}
err = updater.AddOutBip32Derivation(masterKeyInt, output1Path, output1Key1, 0)
if err != nil {
t.Fatal("Failed to add key to first output")
}
output2Key1, err := hex.DecodeString(CUTestPubkeyData["pub6"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
output2Path := CUTestPathData["dpath6"]
err = updater.AddOutBip32Derivation(masterKeyInt, output2Path, output2Key1, 1)
if err != nil {
t.Fatal("Failed to add key to second output")
}
b.Reset()
err = updater.Upsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if CUTestHexData["UOPsbtHex3"] != hex.EncodeToString(b.Bytes()) {
t.Fatal("Failed to create valid updated PSBT after BIP32 derivations")
}
err = updater.AddInSighashType(txscript.SigHashType(1), 0)
if err != nil {
t.Fatal("Failed to add sighash type to first input")
}
err = updater.AddInSighashType(txscript.SigHashType(1), 1)
if err != nil {
t.Fatal("Failed to add sighash type to second input")
}
b.Reset()
err = updater.Upsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if CUTestHexData["UOPsbtHex4"] != hex.EncodeToString(b.Bytes()) {
t.Fatal("Failed to create valid updated PSBT after sighash types")
}
b644, err := updater.Upsbt.B64Encode()
if err != nil {
t.Fatalf("Unable to B64Encode updated Psbt: %v", err)
}
if b644 != CUTestB64Data["UOPsbtB644"] {
t.Fatalf("Failed to base64 encode updated PSBT after sighash "+
"types: %v", b644)
}
}
// Signing test data taken from
// https://github.com/achow101/bitcoin/blob/020628e3a4e88e36647eaf92bac4b3552796ac6a/test/functional/data/rpc_psbt.json
var signerPsbtData = map[string]string{
"signer1Privkey1": "cP53pDbR5WtAD8dYAW9hhTjuvvTVaEiQBdrz9XPrgLBeRFiyCbQr",
"signer1Privkey2": "cR6SXDoyfQrcp4piaiHE97Rsgta9mNhGTen9XeonVgwsh4iSgw6d",
"signer1PsbtB64": "cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWABTYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFgAUAK6pouXw+HaliN9VRuh0LR2HAI8AAAAAAAEAuwIAAAABqtc5MQGL0l+ErkALaISL4J23BurCrBgpi6vucatlb4sAAAAASEcwRAIgWPb8fGoz4bMVSNSByCbAFb0wE1qtQs1neQ2rZtKtJDsCIEoc7SYExnNbY5PltBaR3XiwDwxZQvufdRhW+qk4FX26Af7///8CgPD6AgAAAAAXqRQPuUY0IWlrgsgzryQceMF9295JNIfQ8gonAQAAABepFCnKdPigj4GZlCgYXJe12FLkBj9hh2UAAAABBEdSIQKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgfyEC2rYf9JoU22p9ArDNH7t4/EsYMStbTlTa5Nui+/71NtdSriIGApWDvzmuCmCXR60Zmt3WNPphCFWdbFzTm0whg/GrluB/ENkMak8AAACAAAAAgAAAAIAiBgLath/0mhTban0CsM0fu3j8SxgxK1tOVNrk26L7/vU21xDZDGpPAAAAgAAAAIABAACAAQMEAQAAAAABASAAwusLAAAAABepFLf1+vQOPUClpFmx2zU18rcvqSHohwEEIgAgjCNTFzdDtZXftKB7crqOQuN5fadOh/59nXSX47ICiQMBBUdSIQMIncEMesbbVPkTKa9hczPbOIzq0MIx9yM3nRuZAwsC3CECOt2QTz1tz1nduQaw3uI1Kbf/ue1Q5ehhUZJoYCIfDnNSriIGAjrdkE89bc9Z3bkGsN7iNSm3/7ntUOXoYVGSaGAiHw5zENkMak8AAACAAAAAgAMAAIAiBgMIncEMesbbVPkTKa9hczPbOIzq0MIx9yM3nRuZAwsC3BDZDGpPAAAAgAAAAIACAACAAQMEAQAAAAAiAgOppMN/WZbTqiXbrGtXCvBlA5RJKUJGCzVHU+2e7KWHcRDZDGpPAAAAgAAAAIAEAACAACICAn9jmXV9Lv9VoTatAsaEsYOLZVbl8bazQoKpS2tQBRCWENkMak8AAACAAAAAgAUAAIAA",
"signer1Result": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000002202029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01010304010000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e887220203089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f010103040100000001042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
"signer2Privkey1": "cT7J9YpCwY3AVRFSjN6ukeEeWY6mhpbJPxRaDaP5QTdygQRxP9Au",
"signer2Privkey2": "cNBc3SWUip9PPm1GjRoLEJT6T41iNzCYtD7qro84FMnM5zEqeJsE",
"signer2Psbt": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f000000800000008001000080010304010000000001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88701042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f0000008000000080020000800103040100000000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
"signer2Result": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000220202dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d7483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01010304010000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8872202023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e73473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d2010103040100000001042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
}
func TestPsbtSigner(t *testing.T) {
psbt1, err := NewFromRawBytes(
bytes.NewReader([]byte(signerPsbtData["signer1PsbtB64"])),
true,
)
if err != nil {
t.Fatalf("Failed to parse PSBT: %v", err)
}
psbtUpdater1 := Updater{
Upsbt: psbt1,
}
sig1, err := hex.DecodeString("3044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01")
pub1, err := hex.DecodeString("029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f")
res, err := psbtUpdater1.Sign(0, sig1, pub1, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Error from adding signatures: %v %v", err, res)
}
sig2, err := hex.DecodeString("3044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01")
pub2, err := hex.DecodeString("03089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc")
res, err = psbtUpdater1.Sign(1, sig2, pub2, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Error from adding signatures: %v %v", err, res)
}
signer1Result, err := hex.DecodeString(signerPsbtData["signer1Result"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
var b bytes.Buffer
err = psbtUpdater1.Upsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if !bytes.Equal(b.Bytes(), signer1Result) {
t.Fatalf("Failed to add signatures correctly")
}
}
// Finalizer-extractor test
var finalizerPsbtData = map[string]string{
"finalizeb64": "cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWABTYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFgAUAK6pouXw+HaliN9VRuh0LR2HAI8AAAAAAAEAuwIAAAABqtc5MQGL0l+ErkALaISL4J23BurCrBgpi6vucatlb4sAAAAASEcwRAIgWPb8fGoz4bMVSNSByCbAFb0wE1qtQs1neQ2rZtKtJDsCIEoc7SYExnNbY5PltBaR3XiwDwxZQvufdRhW+qk4FX26Af7///8CgPD6AgAAAAAXqRQPuUY0IWlrgsgzryQceMF9295JNIfQ8gonAQAAABepFCnKdPigj4GZlCgYXJe12FLkBj9hh2UAAAAiAgKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgf0cwRAIgdAGK1BgAl7hzMjwAFXILNoTMgSOJEEjn282bVa1nnJkCIHPTabdA4+tT3O+jOCPIBwUUylWn3ZVE8VfBZ5EyYRGMASICAtq2H/SaFNtqfQKwzR+7ePxLGDErW05U2uTbovv+9TbXSDBFAiEA9hA4swjcHahlo0hSdG8BV3KTQgjG0kRUOTzZm98iF3cCIAVuZ1pnWm0KArhbFOXikHTYolqbV2C+ooFvZhkQoAbqAQEDBAEAAAABBEdSIQKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgfyEC2rYf9JoU22p9ArDNH7t4/EsYMStbTlTa5Nui+/71NtdSriIGApWDvzmuCmCXR60Zmt3WNPphCFWdbFzTm0whg/GrluB/ENkMak8AAACAAAAAgAAAAIAiBgLath/0mhTban0CsM0fu3j8SxgxK1tOVNrk26L7/vU21xDZDGpPAAAAgAAAAIABAACAAAEBIADC6wsAAAAAF6kUt/X69A49QKWkWbHbNTXyty+pIeiHIgIDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtxHMEQCIGLrelVhB6fHP0WsSrWh3d9vcHX7EnWWmn84Pv/3hLyyAiAMBdu3Rw2/LwhVfdNWxzJcHtMJE+mWzThAlF2xIijaXwEiAgI63ZBPPW3PWd25BrDe4jUpt/+57VDl6GFRkmhgIh8Oc0cwRAIgZfRbpZmLWaJ//hp77QFq8fH5DVSzqo90UKpfVqJRA70CIH9yRwOtHtuWaAsoS1bU/8uI9/t1nqu+CKow8puFE4PSAQEDBAEAAAABBCIAIIwjUxc3Q7WV37Sge3K6jkLjeX2nTof+fZ10l+OyAokDAQVHUiEDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwhAjrdkE89bc9Z3bkGsN7iNSm3/7ntUOXoYVGSaGAiHw5zUq4iBgI63ZBPPW3PWd25BrDe4jUpt/+57VDl6GFRkmhgIh8OcxDZDGpPAAAAgAAAAIADAACAIgYDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwQ2QxqTwAAAIAAAACAAgAAgAAiAgOppMN/WZbTqiXbrGtXCvBlA5RJKUJGCzVHU+2e7KWHcRDZDGpPAAAAgAAAAIAEAACAACICAn9jmXV9Lv9VoTatAsaEsYOLZVbl8bazQoKpS2tQBRCWENkMak8AAACAAAAAgAUAAIAA",
"finalize": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000002202029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01220202dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d7483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01010304010000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e887220203089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f012202023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e73473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d2010103040100000001042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
"resultb64": "cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWABTYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFgAUAK6pouXw+HaliN9VRuh0LR2HAI8AAAAAAAEAuwIAAAABqtc5MQGL0l+ErkALaISL4J23BurCrBgpi6vucatlb4sAAAAASEcwRAIgWPb8fGoz4bMVSNSByCbAFb0wE1qtQs1neQ2rZtKtJDsCIEoc7SYExnNbY5PltBaR3XiwDwxZQvufdRhW+qk4FX26Af7///8CgPD6AgAAAAAXqRQPuUY0IWlrgsgzryQceMF9295JNIfQ8gonAQAAABepFCnKdPigj4GZlCgYXJe12FLkBj9hh2UAAAABB9oARzBEAiB0AYrUGACXuHMyPAAVcgs2hMyBI4kQSOfbzZtVrWecmQIgc9Npt0Dj61Pc76M4I8gHBRTKVafdlUTxV8FnkTJhEYwBSDBFAiEA9hA4swjcHahlo0hSdG8BV3KTQgjG0kRUOTzZm98iF3cCIAVuZ1pnWm0KArhbFOXikHTYolqbV2C+ooFvZhkQoAbqAUdSIQKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgfyEC2rYf9JoU22p9ArDNH7t4/EsYMStbTlTa5Nui+/71NtdSrgABASAAwusLAAAAABepFLf1+vQOPUClpFmx2zU18rcvqSHohwEHIyIAIIwjUxc3Q7WV37Sge3K6jkLjeX2nTof+fZ10l+OyAokDAQjaBABHMEQCIGLrelVhB6fHP0WsSrWh3d9vcHX7EnWWmn84Pv/3hLyyAiAMBdu3Rw2/LwhVfdNWxzJcHtMJE+mWzThAlF2xIijaXwFHMEQCIGX0W6WZi1mif/4ae+0BavHx+Q1Us6qPdFCqX1aiUQO9AiB/ckcDrR7blmgLKEtW1P/LiPf7dZ6rvgiqMPKbhROD0gFHUiEDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwhAjrdkE89bc9Z3bkGsN7iNSm3/7ntUOXoYVGSaGAiHw5zUq4AIgIDqaTDf1mW06ol26xrVwrwZQOUSSlCRgs1R1Ptnuylh3EQ2QxqTwAAAIAAAACABAAAgAAiAgJ/Y5l1fS7/VaE2rQLGhLGDi2VW5fG2s0KCqUtrUAUQlhDZDGpPAAAAgAAAAIAFAACAAA==",
"result": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
"network": "0200000000010258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd7500000000da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752aeffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d01000000232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f000400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00000000",
"twoOfThree": "70736274ff01005e01000000019a5fdb3c36f2168ea34a031857863c63bb776fd8a8a9149efd7341dfaf81c9970000000000ffffffff01e013a8040000000022002001c3a65ccfa5b39e31e6bafa504446200b9c88c58b4f21eb7e18412aff154e3f000000000001012bc817a80400000000220020114c9ab91ea00eb3e81a7aa4d0d8f1bc6bd8761f8f00dbccb38060dc2b9fdd5522020242ecd19afda551d58f496c17e3f51df4488089df4caafac3285ed3b9c590f6a847304402207c6ab50f421c59621323460aaf0f731a1b90ca76eddc635aed40e4d2fc86f97e02201b3f8fe931f1f94fde249e2b5b4dbfaff2f9df66dd97c6b518ffa746a4390bd1012202039f0acfe5a292aafc5331f18f6360a3cc53d645ebf0cc7f0509630b22b5d9f547473044022075329343e01033ebe5a22ea6eecf6361feca58752716bdc2260d7f449360a0810220299740ed32f694acc5f99d80c988bb270a030f63947f775382daf4669b272da0010103040100000001056952210242ecd19afda551d58f496c17e3f51df4488089df4caafac3285ed3b9c590f6a821035a654524d301dd0265c2370225a6837298b8ca2099085568cc61a8491287b63921039f0acfe5a292aafc5331f18f6360a3cc53d645ebf0cc7f0509630b22b5d9f54753ae22060242ecd19afda551d58f496c17e3f51df4488089df4caafac3285ed3b9c590f6a818d5f7375b2c000080000000800000008000000000010000002206035a654524d301dd0265c2370225a6837298b8ca2099085568cc61a8491287b63918e2314cf32c000080000000800000008000000000010000002206039f0acfe5a292aafc5331f18f6360a3cc53d645ebf0cc7f0509630b22b5d9f54718e524a1ce2c000080000000800000008000000000010000000000",
}
func TestFinalize2of3(t *testing.T) {
b, err := hex.DecodeString(finalizerPsbtData["twoOfThree"])
if err != nil {
t.Fatalf("Error decoding hex: %v", err)
}
p, err := NewFromRawBytes(bytes.NewReader(b), false)
if p.IsComplete() {
t.Fatalf("Psbt is complete")
}
err = MaybeFinalizeAll(p)
if err != nil {
t.Fatalf("Error in MaybeFinalizeAll: %v", err)
}
if !p.IsComplete() {
t.Fatalf("Psbt is not complete")
}
}
func TestPsbtExtractor(t *testing.T) {
rawToFinalize, err := base64.StdEncoding.DecodeString(
finalizerPsbtData["finalizeb64"],
)
if err != nil {
t.Fatalf("Error decoding b64: %v", err)
}
psbt1, err := NewFromRawBytes(
bytes.NewReader(rawToFinalize), false,
)
if err != nil {
t.Fatalf("Failed to parse PSBT: %v", err)
}
for i := range psbt1.Inputs {
err = Finalize(psbt1, i)
if err != nil {
t.Fatalf("Error from finalizing PSBT: %v", err)
}
}
finalizer1Result, err := base64.StdEncoding.DecodeString(
finalizerPsbtData["resultb64"],
)
if err != nil {
t.Fatalf("Unable to decode b64: %v", err)
}
finalToNetworkExpected, err := hex.DecodeString(finalizerPsbtData["network"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
tx, err := Extract(psbt1)
if err != nil {
t.Fatalf("Failed to extract: %v", err)
}
var resultToNetwork bytes.Buffer
if err := tx.Serialize(&resultToNetwork); err != nil {
t.Fatalf("unable to serialize: %v", err)
}
var b bytes.Buffer
err = psbt1.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if !bytes.Equal(b.Bytes(), finalizer1Result) {
t.Fatalf("Failed to finalize transaction: expected %x, "+
"got %x", finalizer1Result, b.Bytes())
}
if !bytes.Equal(finalToNetworkExpected, resultToNetwork.Bytes()) {
t.Fatalf("Failed to network serialize transaction: %x", b.Bytes())
}
}
func TestImportFromCore1(t *testing.T) {
// This example #1 was created manually using Bitcoin Core 0.17 regtest.
// It contains two inputs, one p2wkh and one p2pkh (non-witness).
// We take the created PSBT as input, then add the fields for each input
// separately, then finalize and extract, and compare with the network
// serialized tx output from Core.
imported := "cHNidP8BAJwCAAAAAjaoF6eKeGsPiDQxxqqhFDfHWjBtZzRqmaZmvyCVWZ5JAQAAAAD/////RhypNiFfnQSMNpo0SGsgIvDOyMQFAYEHZXD5jp4kCrUAAAAAAP////8CgCcSjAAAAAAXqRQFWy8ScSkkhlGMwfOnx15YwRzApofwX5MDAAAAABepFAt4TyLfGnL9QY6GLYHbpSQj+QclhwAAAAAAAAAAAA=="
psbt1, err := NewFromRawBytes(bytes.NewReader([]byte(imported)), true)
if err != nil {
t.Fatalf("Failed to parse PSBT: %v", err)
}
// update with the first input's utxo (witness) and the second input's utxo
// (non-witness)
fundingTxInput1Hex := "02000000014f2cbac7d7691fafca30313097d79be9e78aa6670752fcb1fc15508e77586efb000000004847304402201b5568d7cab977ae0892840b779d84e36d62e42fd93b95e648aaebeacd2577d602201d2ebda2b0cddfa0c1a71d3cbcb602e7c9c860a41ed8b4d18d40c92ccbe92aed01feffffff028c636f91000000001600147447b6d7e6193499565779c8eb5184fcfdfee6ef00879303000000001600149e88f2828a074ebf64af23c2168d1816258311d72d010000"
fundingTxInput2Hex := "020000000001012f03f70c673d83d65da0e8d0db3867b3e7d7bfbd34fd6be65892042e57576eb00000000000feffffff028027128c000000001976a91485780899b61a5506f342bd67a2f635181f50c8b788acb8032c040000000017a914e2e3d32d42d6f043cab39708a6073301df5039db8702473044022047ae396fd8aba8f67482ad16e315fe680db585c1ac6422ffb18dacd9cf5bac350220321176fd6157ef51d9eae9230b0b5bd7dd29bb6247a879189e6aaa8091f3020201210368081f7ff37dfadbed407eba17b232f959e41e6ac78741192c805ebf80d487852f010000"
fundingTxInput1Bytes, err := hex.DecodeString(fundingTxInput1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
txFund1 := wire.NewMsgTx(2)
err = txFund1.Deserialize(bytes.NewReader(fundingTxInput1Bytes))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
// First input is witness, take correct output:
txFund1Out := txFund1.TxOut[1]
fundingTxInput2Bytes, err := hex.DecodeString(fundingTxInput2Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
txFund2 := wire.NewMsgTx(2)
err = txFund2.Deserialize(bytes.NewReader(fundingTxInput2Bytes))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
psbtupdater1 := Updater{Upsbt: psbt1}
psbtupdater1.AddInWitnessUtxo(txFund1Out, 0)
err = psbtupdater1.AddInNonWitnessUtxo(txFund2, 1)
if err != nil {
t.Fatalf("Error inserting non-witness utxo: %v", err)
}
// Signing was done with Core; we manually insert the relevant input
// entries here.
sig1Hex := "304402200da03ac9890f5d724c42c83c2a62844c08425a274f1a5bca50dcde4126eb20dd02205278897b65cb8e390a0868c9582133c7157b2ad3e81c1c70d8fbd65f51a5658b01"
sig1, err := hex.DecodeString(sig1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
pub1Hex := "024d6b24f372dd4551277c8df4ecc0655101e11c22894c8e05a3468409c865a72c"
pub1, err := hex.DecodeString(pub1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
// Check that invalid pubkeys are not accepted.
pubInvalid := append(pub1, 0x00)
res, err := psbtupdater1.Sign(0, sig1, pubInvalid, nil, nil)
if err == nil {
t.Fatalf("Incorrectly accepted invalid pubkey: %v",
pubInvalid)
}
res, err = psbtupdater1.Sign(0, sig1, pub1, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Error from adding signatures: %v %v", err, res)
}
sig2Hex := "3044022014eb9c4858f71c9f280bc68402aa742a5187f54c56c8eb07c902eb1eb5804e5502203d66656de8386b9b044346d5605f5ae2b200328fb30476f6ac993fc0dbb0455901"
sig2, err := hex.DecodeString(sig2Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
pub2Hex := "03b4c79acdf4e7d978bef4019c421e4c6c67044ed49d27322dc90e808d8080e862"
pub2, err := hex.DecodeString(pub2Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
// ===============================================================
// Before adding the signature, we'll make a new PSBT with
// modifications to the input data and check it fails sanity checks.
// First an invalid tx:
psbtBorkedInput2, _ := NewFromRawBytes(bytes.NewReader([]byte(imported)), true)
borkedUpdater, err := NewUpdater(psbtBorkedInput2)
if err != nil {
t.Fatalf("NewUpdater failed while trying to create borked "+
"version: %v", err)
}
borkedUpdater.AddInWitnessUtxo(txFund1Out, 0)
res, err = borkedUpdater.Sign(0, sig2, pub2, nil, nil)
if err != ErrInvalidSignatureForInput {
t.Fatalf("AddPartialSig succeeded, but should have failed "+
"due to mismatch between pubkey and prevOut; err was: %v", err)
}
// Next, a valid tx serialization, but not the right one
wrongTxBytes, err := hex.DecodeString("020000000001012d1d7b17356d0ad8232a5817d2d2fa5cd97d803c0ed03e013e97b65f4f1e5e7501000000171600147848cfb25bb163c7c63732615980a25eddbadc7bfeffffff022a8227630000000017a91472128ae6b6a1b74e499bedb5efb1cb09c9a6713287107240000000000017a91485f81cb970d854e2513ebf5c5b5d09e4509f4af3870247304402201c09aa8bcd18753ef01d8712a55eea5a0f69b6c4cc2944ac942264ff0662c91402201fc1390bf8b0023dd12ae78d7ec181124e106de57bc8f00812ae92bd024d3045012103ba077fc011aa59393bfe17cf491b3a02a9c4d39df122b2148322da0ec23508f459430800")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
wrongTx := wire.NewMsgTx(2)
err = wrongTx.Deserialize(bytes.NewReader(wrongTxBytes))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
psbtBorkedInput2.Inputs[1] = *NewPsbtInput(wrongTx, nil)
res, err = borkedUpdater.Sign(1, sig2, pub2, nil, nil)
if err != ErrInvalidSignatureForInput {
t.Fatalf("Error should have been invalid sig for input, was: %v", err)
}
// ======================================================
res, err = psbtupdater1.Sign(1, sig2, pub2, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Failed to add signature to second input: %v %v", err, res)
}
// Neither input (p2pkh and p2wkh) require redeem script nor witness script,
// so there are no more fields to add; we are ready to finalize.
err = Finalize(psbt1, 0)
if err != nil {
t.Fatalf("Failed to finalize the first input, %v", err)
}
if psbt1.IsComplete() {
t.Fatalf("PSBT was complete but has not been fully finalized")
}
err = Finalize(psbt1, 1)
if err != nil {
t.Fatalf("Failed to finalize second input, %v", err)
}
tx, err := Extract(psbt1)
if err != nil {
t.Fatalf("unable to extract tx: %v", err)
}
var networkSerializedTx bytes.Buffer
if err := tx.Serialize(&networkSerializedTx); err != nil {
t.Fatalf("unable to encode tx: %v", err)
}
expectedTx := "0200000000010236a817a78a786b0f883431c6aaa11437c75a306d67346a99a666bf2095599e490100000000ffffffff461ca936215f9d048c369a34486b2022f0cec8c4050181076570f98e9e240ab5000000006a473044022014eb9c4858f71c9f280bc68402aa742a5187f54c56c8eb07c902eb1eb5804e5502203d66656de8386b9b044346d5605f5ae2b200328fb30476f6ac993fc0dbb04559012103b4c79acdf4e7d978bef4019c421e4c6c67044ed49d27322dc90e808d8080e862ffffffff028027128c0000000017a914055b2f1271292486518cc1f3a7c75e58c11cc0a687f05f93030000000017a9140b784f22df1a72fd418e862d81dba52423f90725870247304402200da03ac9890f5d724c42c83c2a62844c08425a274f1a5bca50dcde4126eb20dd02205278897b65cb8e390a0868c9582133c7157b2ad3e81c1c70d8fbd65f51a5658b0121024d6b24f372dd4551277c8df4ecc0655101e11c22894c8e05a3468409c865a72c0000000000"
expectedTxBytes, err := hex.DecodeString(expectedTx)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
if !bytes.Equal(expectedTxBytes, networkSerializedTx.Bytes()) {
t.Fatalf("The produced network transaction did not match the expected: %x \n %x \n",
networkSerializedTx.Bytes(), expectedTxBytes)
}
}
func TestImportFromCore2(t *testing.T) {
// This example #2 was created manually using Bitcoin Core 0.17 regtest.
// It contains two inputs, one p2sh-p2wkh and one fake utxo.
// The PSBT has been created with walletcreatepsbt and then partial-signed
// on the real input with walletprocessbst in Core.
// We first check that the updating here, using the Core created signature,
// redeem script and signature for the p2sh-p2wkh input, creates the
// same partial-signed intermediate transaction as Core did after
// walletprocesspsbt.
// We then attach a fake
// input of type p2sh-p2wsh, attach its witnessUtxo, redeemscript and
// witnessscript fields, and then finalize the whole transaction. Unlike
// the previous example, we cannot here compare with a Core produced
// network serialized final transaction, because of the fake input.
imported := "cHNidP8BAJsCAAAAAkxTQ+rig5QNnUS5nMc+Pccow4IcOJeQRcNNw+7p5ZA5AQAAAAD/////qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqoNAAAAAP////8CAIYOcAAAAAAWABQ1l7nn13RubTwqRQU2BnVV5WlXBWAxMbUAAAAAF6kUkiuXUjfWFgTp6nl/gf9+8zIWR6KHAAAAAAAAAAAA"
psbt1, err := NewFromRawBytes(bytes.NewReader([]byte(imported)), true)
if err != nil {
t.Fatalf("Failed to parse PSBT: %v", err)
}
// update with the first input's utxo, taken from its funding
// transaction
fundingTxInput1Hex := "02000000017b260536a3c17aee49c41a9b36fdf01a418e0c04df06fbabcb0d4f590b95d175000000006a473044022074a5a13159b6c12d77881c9501aa5c18616fb76c1809fc4d55f18a2e63159a6702200d1aa72be6056a41808898d24da93c0c0192cad65b7c2cc86e00b3e0fbbd57f601210212cc429d61fde565d0c2271a3e4fdb063cb49ae2257fa71460be753ceb56d175feffffff02bc060d8f0000000017a9140b56c31b5dc5a5a22c45a7850e707ad602d94a3087008352840000000017a9149f3679d67a9a486238764f618a93b82a7d999103879a000000"
fundingTxInput1Bytes, err := hex.DecodeString(fundingTxInput1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
txFund1 := wire.NewMsgTx(2)
err = txFund1.Deserialize(bytes.NewReader(fundingTxInput1Bytes))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
// First input is witness, take correct output:
txFund1Out := txFund1.TxOut[1]
psbtupdater1 := Updater{Upsbt: psbt1}
psbtupdater1.AddInWitnessUtxo(txFund1Out, 0)
// This input is p2sh-p2wkh, so it requires a redeemscript but not
// a witness script. The redeemscript is the witness program.
redeemScript, err := hex.DecodeString("00147aed39420a8b7ab98a83791327ccb70819d1fbe2")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
psbtupdater1.AddInRedeemScript(redeemScript, 0)
// Signing for the first input was done with Core; we manually insert the
// relevant input entries here.
sig1Hex := "30440220546d182d00e45ef659c329dce6197dc19e0abc795e2c9279873f5a887998b273022044143113fc3475d04fc8d5113e0bbcb42d80514a9f1a2247e9b2a7878e20d44901"
sig1, err := hex.DecodeString(sig1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
pub1Hex := "02bb3ce35af26f4c826eab3e5fc263ef56871b26686a8a995599b7ee6576613104"
pub1, err := hex.DecodeString(pub1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
res, err := psbtupdater1.Sign(0, sig1, pub1, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Unable to add partial signature: %v %v", err, res)
}
// Since this input is now finalizable, we do so:
err = Finalize(psbt1, 0)
if err != nil {
t.Fatalf("Failed to finalize the first input: %v", err)
}
if psbt1.IsComplete() {
t.Fatalf("PSBT was complete but has not been fully finalized")
}
// Core also adds the OutRedeemScript field for the output it knows about.
// Note that usually we would not of course re-create, but rather start
// from the half-signed version; so this is needed only for a sanity check
// that we can recreate the half-signed.
output2RedeemScript, err := hex.DecodeString("0014e0846bd17848ab40ca1f56b655c6fa31667880cc")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
psbtupdater1.AddOutRedeemScript(output2RedeemScript, 1)
// The main function of the test is to compare the thus-generated
// partially (not completely) signed transaction with that generated and
// encoded by Core.
expectedPsbtPartialB64 := "cHNidP8BAJsCAAAAAkxTQ+rig5QNnUS5nMc+Pccow4IcOJeQRcNNw+7p5ZA5AQAAAAD/////qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqoNAAAAAP////8CAIYOcAAAAAAWABQ1l7nn13RubTwqRQU2BnVV5WlXBWAxMbUAAAAAF6kUkiuXUjfWFgTp6nl/gf9+8zIWR6KHAAAAAAABASAAg1KEAAAAABepFJ82edZ6mkhiOHZPYYqTuCp9mZEDhwEHFxYAFHrtOUIKi3q5ioN5EyfMtwgZ0fviAQhrAkcwRAIgVG0YLQDkXvZZwync5hl9wZ4KvHleLJJ5hz9aiHmYsnMCIEQUMRP8NHXQT8jVET4LvLQtgFFKnxoiR+myp4eOINRJASECuzzjWvJvTIJuqz5fwmPvVocbJmhqiplVmbfuZXZhMQQAAAABABYAFOCEa9F4SKtAyh9WtlXG+jFmeIDMAA=="
generatedPsbtPartialB64, err := psbt1.B64Encode()
if err != nil {
t.Fatalf("Unable to B64Encode Psbt: %v", err)
}
if expectedPsbtPartialB64 != generatedPsbtPartialB64 {
t.Fatalf("Partial did not match expected: %v", generatedPsbtPartialB64)
}
// We now simulate adding the signing data for the second (fake) input,
// and check that we can finalize and extract. This input is p2sh-p2wsh.
// the second input is fake, we're going to make it witness type,
// so create a TxOut struct that fits
fakeTxOutSerialized, err := hex.DecodeString("00c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e887")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
fakevalSerialized := binary.LittleEndian.Uint64(fakeTxOutSerialized[:8])
fakeScriptPubKey := fakeTxOutSerialized[9:]
txFund2Out := wire.NewTxOut(int64(fakevalSerialized), fakeScriptPubKey)
psbt2, err := NewFromRawBytes(bytes.NewReader([]byte(expectedPsbtPartialB64)), true)
if err != nil {
t.Fatalf("Failed to load partial PSBT: %v", err)
}
psbtupdater2, err := NewUpdater(psbt2)
if err != nil {
t.Fatalf("Failed to create updater: %v", err)
}
psbtupdater2.AddInWitnessUtxo(txFund2Out, 1)
// Add redeemScript, which is the witnessscript/program:
redeemScript, err = hex.DecodeString("00208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
err = psbtupdater2.AddInRedeemScript(redeemScript, 1)
if err != nil {
t.Fatalf("Failed to add redeemscript to second input: %v", err)
}
// Add witnessScript, which here is multisig:
witnessScript, err := hex.DecodeString("522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
// To test multisig checks, add a nonsense version of the multisig script
witnessScriptNonsense, err := hex.DecodeString("52ffff")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
err = psbtupdater2.AddInWitnessScript(witnessScript, 1)
if err != nil {
t.Fatalf("Failed to add witnessscript to second input: %v", err)
}
// Construct the two partial signatures to be added
sig21, err := hex.DecodeString("3044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
pub21, err := hex.DecodeString("03089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
sig22, err := hex.DecodeString("3044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d201")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
pub22, err := hex.DecodeString("023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e73")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
res, err = psbtupdater2.Sign(1, sig21, pub21, nil, nil)
// Check that the finalization procedure fails here due to not
// meeting the multisig policy
success, err := MaybeFinalize(psbt2, 1)
if success {
t.Fatalf("Incorrectly succeeded in finalizing without sigs")
}
if err != ErrUnsupportedScriptType {
t.Fatalf("Got unexpected error type: %v", err)
}
res, err = psbtupdater2.Sign(1, sig22, pub22, nil, nil)
// Check that the finalization procedure also fails with a nonsense
// script
err = psbtupdater2.AddInWitnessScript(witnessScriptNonsense, 1)
if err != nil {
t.Fatalf("Failed to add witnessscript to second input: %v", err)
}
success, err = MaybeFinalize(psbt2, 1)
if success {
t.Fatalf("Incorrectly succeeded in finalizing with invalid msigscript")
}
if err != ErrUnsupportedScriptType {
t.Fatalf("Got unexpected error type: %v", err)
}
// Restore the correct witnessScript to complete correctly
err = psbtupdater2.AddInWitnessScript(witnessScript, 1)
if err != nil {
t.Fatalf("Failed to add witnessscript to second input: %v", err)
}
success, err = MaybeFinalize(psbt2, 1)
if !success {
if err != nil {
t.Fatalf("Failed to finalize second input: %v", err)
} else {
t.Fatalf("Input was not finalizable")
}
}
// Add a (fake) witnessOut descriptor field to one of the outputs,
// for coverage purposes (we aren't currently using this field)
psbtupdater2.AddOutWitnessScript([]byte{0xff, 0xff, 0xff}, 0)
// Sanity check; we should not have lost the additional output entry
// provided by Core initially
uoutput1 := psbtupdater2.Upsbt.Outputs[1]
if uoutput1.RedeemScript == nil {
t.Fatalf("PSBT should contain outredeemscript entry, but it does not.")
}
// Nor should we have lost our fake witnessscript output entry
uoutput2 := psbtupdater2.Upsbt.Outputs[0]
if uoutput2.WitnessScript == nil {
t.Fatalf("PSBT should contain outwitnessscript but it does not.")
}
var tx bytes.Buffer
networkSerializedTx, err := Extract(psbt2)
if err != nil {
t.Fatalf("unable to extract tx: %v", err)
}
if err := networkSerializedTx.Serialize(&tx); err != nil {
t.Fatalf("unable to encode tx: %v", err)
}
expectedSerializedTx, err := hex.DecodeString("020000000001024c5343eae283940d9d44b99cc73e3dc728c3821c38979045c34dc3eee9e5903901000000171600147aed39420a8b7ab98a83791327ccb70819d1fbe2ffffffffaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa0d000000232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903ffffffff0200860e70000000001600143597b9e7d7746e6d3c2a450536067555e5695705603131b50000000017a914922b975237d61604e9ea797f81ff7ef3321647a287024730440220546d182d00e45ef659c329dce6197dc19e0abc795e2c9279873f5a887998b273022044143113fc3475d04fc8d5113e0bbcb42d80514a9f1a2247e9b2a7878e20d449012102bb3ce35af26f4c826eab3e5fc263ef56871b26686a8a995599b7ee65766131040400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00000000")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
if !bytes.Equal(expectedSerializedTx, tx.Bytes()) {
t.Fatalf("Failed to create correct network serialized "+
"transaction: expected %x, got %x",
expectedSerializedTx, tx.Bytes())
}
}
func TestMaybeFinalizeAll(t *testing.T) {
// The following data is from a 3rd transaction from Core,
// using 3 inputs, all p2wkh.
imported := "cHNidP8BAKQCAAAAAzJyXH13IqBFvvZ7y1VSgUgkMvMoPgP5CfFNqsjQexKQAQAAAAD/////fMdLydu5bsoiHN9cFSaBL0Qnq2KLSKx0RA4b938CAgQAAAAAAP/////yKNgfsDAHr/zFz8R9k8EFI26allfg9DdE8Gzj6tGlegEAAAAA/////wHw9E0OAAAAABYAFDnPCRduiEWmmSc1j30SJ8k9u7PHAAAAAAAAAAAA"
psbt1, err := NewFromRawBytes(bytes.NewReader([]byte(imported)), true)
if err != nil {
t.Fatalf("Failed to parse PSBT: %v", err)
}
// update with the first input's utxo, taken from its funding
// transaction
fundingTxInput1, err := hex.DecodeString("020000000001017b260536a3c17aee49c41a9b36fdf01a418e0c04df06fbabcb0d4f590b95d1750100000017160014af82cd4409241b1de892726324bd780e3b5cd8aafeffffff02a85f9800000000001600149d21f8b306ddfd4dd035080689e88b4c3471e3cc801d2c0400000000160014d97ccd3dfb60820d7d33d862371ca5a73039bd560247304402201a1d2fdb5a7190b7fa59907769f0fc9c91fd3b34f6424acf5868a8ac21ec287102200a59b9d076ecf98c88f2196ed2be0aafff4966ead754041182fff5f92115a783012103604ffd31dc71db2e32c20f09eafe6353cd7515d3648aff829bb4879b553e30629a000000")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
fundingTxInput2, err := hex.DecodeString("020000000001019c27b886e420fcadb077706b0933efa8bb53e3a250c3ec45cfdba5e05e233f360100000000feffffff0200b4c404000000001600140853f50c7d2d5d2af326a75efdbc83b62551e89afce31c0d000000001600142d6936c082c35607ec3bdb334a932d928150b75802473044022000d962f5e5e6425f9de21da7ac65b4fd8af8f6bfbd33c7ba022827c73866b477022034c59935c1ea10b5ba335d93f55a200c2588ec6058b8c7aedd10d5cbc4654f99012102c30e9f0cd98f6a805464d6b8a326b5679b6c3262934341855ee0436eaedfd2869a000000")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
fundingTxInput3, err := hex.DecodeString("02000000012bf4331bb95df4eadb14f7a28db3fecdc5e87f08c29c2332b66338dd606699f60000000048473044022075ed43f508528da47673550a785702e9a93eca84a11faea91c4e9c66fcab3c9e022054a37610bd40b12263a5933188f062b718e007f290cecde2b6e41da3e1ebbddf01feffffff020c99a8240100000016001483bd916985726094d6d1c5b969722da580b5966a804a5d05000000001600140a2ee13a6696d75006af5e8a026ea49316087dae9a000000")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
psbtupdater1 := Updater{Upsbt: psbt1}
tx := wire.NewMsgTx(2)
err = tx.Deserialize(bytes.NewReader(fundingTxInput1))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
txFund1Out := tx.TxOut[1]
psbtupdater1.AddInWitnessUtxo(txFund1Out, 0)
tx = wire.NewMsgTx(2)
err = tx.Deserialize(bytes.NewReader(fundingTxInput2))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
txFund2Out := tx.TxOut[0]
psbtupdater1.AddInWitnessUtxo(txFund2Out, 1)
tx = wire.NewMsgTx(2)
err = tx.Deserialize(bytes.NewReader(fundingTxInput3))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
txFund3Out := tx.TxOut[1]
psbtupdater1.AddInWitnessUtxo(txFund3Out, 2)
// To be ready for finalization, we need to have partial signature
// fields for each input
sig1, _ := hex.DecodeString("30440220027605ee8015970baf02a72652967a543e1b29a6882d799738ed1baee508822702203818a2f1b9770c46a473f47ad7ae90bcc129a5d047f00fae354c80197a7cf50601")
pub1, _ := hex.DecodeString("03235fc1f9dc8bbf6fa3df35dfeb0dd486f2d488f139579885eb684510f004f6c1")
sig2, _ := hex.DecodeString("304402206f5aea4621696610de48736b95a89b1d3a434a4e536d9aae65e039c477cf4c7202203b27a18b0f63be7d3bbf5be1bc2306a7ec8c2da12c2820ff07b73c7f3f1d4d7301")
pub2, _ := hex.DecodeString("022011b496f0603a268b55a781c7be0c3849f605f09cb2e917ed44288b8144a752")
sig3, _ := hex.DecodeString("3044022036dbc6f8f85a856e7803cbbcf0a97b7a74806fc592e92d7c06826f911610b98e0220111d43c4b20f756581791334d9c5cbb1a9c07558f28404cabf01c782897ad50501")
pub3, _ := hex.DecodeString("0381772a80c69e275e20d7f014555b13031e9cacf1c54a44a67ab2bc7eba64f227")
res, err := psbtupdater1.Sign(0, sig1, pub1, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Failed to add partial signature for input 0: %v %v", err, res)
}
res, err = psbtupdater1.Sign(1, sig2, pub2, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Failed to add partial signature for input 1: %v %v", err, res)
}
// Not ready for finalize all, check it fails:
err = MaybeFinalizeAll(psbt1)
if err != ErrNotFinalizable {
t.Fatalf("Expected finalization failure, got: %v", err)
}
res, err = psbtupdater1.Sign(2, sig3, pub3, nil, nil)
// Since this input is now finalizable and is p2wkh only, we can do
// all at once:
err = MaybeFinalizeAll(psbt1)
if err != nil {
t.Fatalf("Failed to finalize PSBT: %v", err)
}
if !psbt1.IsComplete() {
t.Fatalf("PSBT was finalized but not marked complete")
}
}
func TestFromUnsigned(t *testing.T) {
serTx, err := hex.DecodeString("00000000000101e165f072311e71825b47a4797221d7ae56d4b40b7707c540049aee43302448a40000000000feffffff0212f1126a0000000017a9143e836801b2b15aa193449d815c62d6c4b6227c898780778e060000000017a914ba4bdb0b07d67bc60f59c1f4fe54170565254974870000000000")
if err != nil {
t.Fatalf("Error: %v", err)
}
tx := wire.NewMsgTx(2)
err = tx.Deserialize(bytes.NewReader(serTx))
if err != nil {
t.Fatalf("Error: %v", err)
}
psbt1, err := NewFromUnsignedTx(tx)
if err != nil {
t.Fatalf("Error: %v", err)
}
encoded, err := psbt1.B64Encode()
if err != nil {
t.Fatalf("Unable to B64Encode Psbt: %v", err)
}
// Compare with output of Core:
fromCoreB64 := "cHNidP8BAHMAAAAAAeFl8HIxHnGCW0ekeXIh165W1LQLdwfFQASa7kMwJEikAAAAAAD+////AhLxEmoAAAAAF6kUPoNoAbKxWqGTRJ2BXGLWxLYifImHgHeOBgAAAAAXqRS6S9sLB9Z7xg9ZwfT+VBcFZSVJdIcAAAAAAAAAAA=="
if encoded != fromCoreB64 {
t.Fatalf("Got incorrect b64: %v", encoded)
}
_, err = NewFromRawBytes(bytes.NewReader([]byte(fromCoreB64)), true)
if err != nil {
t.Fatalf("Error: %v", err)
}
}
func TestNonWitnessToWitness(t *testing.T) {
// We'll start with a PSBT produced by Core for which
// the first input is signed and we'll provided the signatures for
// the other three inputs; they are p2sh-p2wkh, p2wkh and legacy
// respectively.
// In each case we'll *first* attach the NonWitnessUtxo field,
// and then call sign; in the first two but not the third case, the
// NonWitnessUtxo will automatically be replaced with the WitnessUtxo.
// Finally we'll check that the fully finalized PSBT produced matches
// the one produced by Core for the same keys.
psbt1B64 := "cHNidP8BAM4CAAAABHtBMXY+SX95xidmWJP67CTQ02FPUpbNhIxNplAdlvk+AQAAAAD/////G2mt4bX7+sVi1jdbuBa5Q/xsJdgzFCgdHHSZq3ewK6YAAAAAAP/////NrbZb7GzfAg4kOqFWAIbXabq4cAvtVGv+eecIIv1KggEAAAAA/////73s9ifprgErlaONH1rgpNs3l6+t+mz2XGTHsTVWCem/AQAAAAD/////AfAmclMAAAAAF6kUQwsEC5nzbdY5meON2ZQ2thmeFgOHAAAAAAABASAAZc0dAAAAABepFPAv3VTMu5+4WN+/HIji6kG9RpzKhwEHFxYAFLN3PqXSyIHWKqm4ah5m9erc/3OoAQhrAkcwRAIgH7kzGO2iskfCvX0dgkDuzfqJ7tAu7KUZOeykTkJ1SYkCIBv4QRZK1hLz45D0gs+Lz93OE4s37lkPVE+SlXZtazWEASEC3jaf19MMferBn0Bn5lxXJGOqoqmfSvnHclQvB5gJ3nEAAAAAAQAWABTB+Qcq6iqdSvvc6959kd7XHrhYFgA="
nwutxo1ser, _ := hex.DecodeString("02000000017f7baa6b7377541c4aca372d2dce8e1098ba44aa8379b7ea87644ef27e08ec240000000048473044022072e3b94c33cb5128518cd3903cc0ca19e8c234ac6d462e01ae2bb1da7768ed7d0220167d7ad89f6e1bbb3b866ae6fc2f67b5e7d51eb4f33f7bfe3f4b2673856b815001feffffff0200c2eb0b0000000017a9142dd25c78db2e2e09376eab9cb342e1b03005abe487e4ab953e0000000017a914120b8ca3fb4c7f852e30d4e3714fb64027a0b4c38721020000")
nwutxo2ser, _ := hex.DecodeString("0200000001f51b0bb5d945dd5532448a4d3fb88134d0bd90493813515f9c2ddb1fa15b9ba60000000048473044022047d83caf88d398245c006374bfa9f27ae968f5f51d640cacd5a214ed2cba397a02204519b26035496855f574a72b73bdcfa46d53995faf64c8f0ab394b628cc5383901feffffff020ccb9f3800000000160014e13544a3c718faa6c5ad7089a6660383c12b072700a3e11100000000160014a5439b477c116b79bd4c7c5131f3e58d54f27bb721020000")
nwutxo3ser, _ := hex.DecodeString("0200000001eb452f0fc9a8c39edb79f7174763f3cb25dc56db455926e411719a115ef16509000000004847304402205aa80cc615eb4b3f6e89696db4eadd192581a6c46f5c09807d3d98ece1d77355022025007e58c1992a1e5d877ee324bfe0a65db26d29f80941cfa277ac3efbcad2a701feffffff02bce9a9320000000017a9141590e852ac66eb8798afeb2a5ed67c568a2d6561870084d717000000001976a914a57ea05eacf94900d5fb92bccd273cfdb90af36f88ac21020000")
nwutxo1 := wire.NewMsgTx(2)
err := nwutxo1.Deserialize(bytes.NewReader(nwutxo1ser))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
nwutxo2 := wire.NewMsgTx(2)
err = nwutxo2.Deserialize(bytes.NewReader(nwutxo2ser))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
nwutxo3 := wire.NewMsgTx(2)
err = nwutxo3.Deserialize(bytes.NewReader(nwutxo3ser))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
// import the PSBT
psbt1, err := NewFromRawBytes(bytes.NewReader([]byte(psbt1B64)), true)
if err != nil {
t.Fatalf("Failed to create PSBT: %v", err)
}
// check that we recognize the finality of the first input
if !isFinalized(psbt1, 0) {
t.Fatalf("First input incorrectly read as not finalized.")
}
// Add NonWitnessUtxo fields for each of the other three inputs
u := Updater{Upsbt: psbt1}
u.AddInNonWitnessUtxo(nwutxo1, 1)
u.AddInNonWitnessUtxo(nwutxo2, 2)
u.AddInNonWitnessUtxo(nwutxo3, 3)
// Signatures for each of those inputs were created with Core:
sig1, _ := hex.DecodeString("304402205676877e6162ce40a49ee5a74443cdc1e7915637c42da7b872c2ec2298fd371b02203c1d4a05b1e2a7a588d9ec9b8d4892d2cd59bebe0e777483477a0ec692ebbe6d01")
pub1, _ := hex.DecodeString("02534f23cb88a048b649672967263bd7570312d5d31d066fa7b303970010a77b2b")
redeemScript1, _ := hex.DecodeString("00142412be29368c0260cb841eecd9b59d7e01174aa1")
sig2, _ := hex.DecodeString("3044022065d0a349709b8d8043cfd644cf6c196c1f601a22e1b3fdfbf8c0cc2a80fe2f1702207c87d36b666a8862e81ec5df288707f517d2f35ea1548feb82019de2c8de90f701")
pub2, _ := hex.DecodeString("0257d88eaf1e79b72ea0a33ae89b57dae95ea68499bdc6770257e010ab899f0abb")
sig3, _ := hex.DecodeString("30440220290abcaacbd759c4f989762a9ee3468a9231788aab8f50bf65955d8597d8dd3602204d7e394f4419dc5392c6edba6945837458dd750a030ac67a746231903a8eb7db01")
pub3, _ := hex.DecodeString("0388025f50bb51c0469421ed13381f22f9d46a070ec2837e055c49c5876f0d0968")
// Add the signatures and any scripts needed to the inputs
res, err := u.Sign(1, sig1, pub1, redeemScript1, nil)
if res != 0 || err != nil {
t.Fatalf("Failed to sign at index %v res %v err %v", 1, res, err)
}
res, err = u.Sign(2, sig2, pub2, nil, nil)
if res != 0 || err != nil {
t.Fatalf("Failed to sign at index %v res %v err %v", 2, res, err)
}
res, err = u.Sign(3, sig3, pub3, nil, nil)
if res != 0 || err != nil {
t.Fatalf("Failed to sign at index %v res %v err %v", 3, res, err)
}
// Attempt to finalize the rest of the transaction
_, err = MaybeFinalize(psbt1, 1)
if err != nil {
t.Fatalf("Failed to finalize input 1 %v", err)
}
_, err = MaybeFinalize(psbt1, 2)
if err != nil {
t.Fatalf("Failed to finalize input 2 %v", err)
}
_, err = MaybeFinalize(psbt1, 3)
if err != nil {
t.Fatalf("Failed to finalize input 3 %v", err)
}
// Finally we can check whether both the B64 encoding of the PSBT,
// and the final network serialized signed transaction, that we generated
// with Core using the 2 wallets, matches what this code produces:
expectedFinalizedPsbt := "cHNidP8BAM4CAAAABHtBMXY+SX95xidmWJP67CTQ02FPUpbNhIxNplAdlvk+AQAAAAD/////G2mt4bX7+sVi1jdbuBa5Q/xsJdgzFCgdHHSZq3ewK6YAAAAAAP/////NrbZb7GzfAg4kOqFWAIbXabq4cAvtVGv+eecIIv1KggEAAAAA/////73s9ifprgErlaONH1rgpNs3l6+t+mz2XGTHsTVWCem/AQAAAAD/////AfAmclMAAAAAF6kUQwsEC5nzbdY5meON2ZQ2thmeFgOHAAAAAAABASAAZc0dAAAAABepFPAv3VTMu5+4WN+/HIji6kG9RpzKhwEHFxYAFLN3PqXSyIHWKqm4ah5m9erc/3OoAQhrAkcwRAIgH7kzGO2iskfCvX0dgkDuzfqJ7tAu7KUZOeykTkJ1SYkCIBv4QRZK1hLz45D0gs+Lz93OE4s37lkPVE+SlXZtazWEASEC3jaf19MMferBn0Bn5lxXJGOqoqmfSvnHclQvB5gJ3nEAAQEgAMLrCwAAAAAXqRQt0lx42y4uCTduq5yzQuGwMAWr5IcBBxcWABQkEr4pNowCYMuEHuzZtZ1+ARdKoQEIawJHMEQCIFZ2h35hYs5ApJ7lp0RDzcHnkVY3xC2nuHLC7CKY/TcbAiA8HUoFseKnpYjZ7JuNSJLSzVm+vg53dINHeg7Gkuu+bQEhAlNPI8uIoEi2SWcpZyY711cDEtXTHQZvp7MDlwAQp3srAAEBHwCj4REAAAAAFgAUpUObR3wRa3m9THxRMfPljVTye7cBCGsCRzBEAiBl0KNJcJuNgEPP1kTPbBlsH2AaIuGz/fv4wMwqgP4vFwIgfIfTa2ZqiGLoHsXfKIcH9RfS816hVI/rggGd4sjekPcBIQJX2I6vHnm3LqCjOuibV9rpXqaEmb3GdwJX4BCriZ8KuwABAL0CAAAAAetFLw/JqMOe23n3F0dj88sl3FbbRVkm5BFxmhFe8WUJAAAAAEhHMEQCIFqoDMYV60s/bolpbbTq3RklgabEb1wJgH09mOzh13NVAiAlAH5YwZkqHl2HfuMkv+CmXbJtKfgJQc+id6w++8rSpwH+////ArzpqTIAAAAAF6kUFZDoUqxm64eYr+sqXtZ8VootZWGHAITXFwAAAAAZdqkUpX6gXqz5SQDV+5K8zSc8/bkK82+IrCECAAABB2pHMEQCICkKvKrL11nE+Yl2Kp7jRoqSMXiKq49Qv2WVXYWX2N02AiBNfjlPRBncU5LG7bppRYN0WN11CgMKxnp0YjGQOo632wEhA4gCX1C7UcBGlCHtEzgfIvnUagcOwoN+BVxJxYdvDQloAAEAFgAUwfkHKuoqnUr73OvefZHe1x64WBYA"
calculatedPsbt, err := u.Upsbt.B64Encode()
if err != nil {
t.Fatalf("Failed to base64 encode")
}
if expectedFinalizedPsbt != calculatedPsbt {
t.Fatalf("Failed to generate correct PSBT")
}
expectedNetworkSer, _ := hex.DecodeString("020000000001047b4131763e497f79c627665893faec24d0d3614f5296cd848c4da6501d96f93e0100000017160014b3773ea5d2c881d62aa9b86a1e66f5eadcff73a8ffffffff1b69ade1b5fbfac562d6375bb816b943fc6c25d83314281d1c7499ab77b02ba600000000171600142412be29368c0260cb841eecd9b59d7e01174aa1ffffffffcdadb65bec6cdf020e243aa1560086d769bab8700bed546bfe79e70822fd4a820100000000ffffffffbdecf627e9ae012b95a38d1f5ae0a4db3797afadfa6cf65c64c7b1355609e9bf010000006a4730440220290abcaacbd759c4f989762a9ee3468a9231788aab8f50bf65955d8597d8dd3602204d7e394f4419dc5392c6edba6945837458dd750a030ac67a746231903a8eb7db01210388025f50bb51c0469421ed13381f22f9d46a070ec2837e055c49c5876f0d0968ffffffff01f02672530000000017a914430b040b99f36dd63999e38dd99436b6199e1603870247304402201fb93318eda2b247c2bd7d1d8240eecdfa89eed02eeca51939eca44e4275498902201bf841164ad612f3e390f482cf8bcfddce138b37ee590f544f9295766d6b3584012102de369fd7d30c7deac19f4067e65c572463aaa2a99f4af9c772542f079809de710247304402205676877e6162ce40a49ee5a74443cdc1e7915637c42da7b872c2ec2298fd371b02203c1d4a05b1e2a7a588d9ec9b8d4892d2cd59bebe0e777483477a0ec692ebbe6d012102534f23cb88a048b649672967263bd7570312d5d31d066fa7b303970010a77b2b02473044022065d0a349709b8d8043cfd644cf6c196c1f601a22e1b3fdfbf8c0cc2a80fe2f1702207c87d36b666a8862e81ec5df288707f517d2f35ea1548feb82019de2c8de90f701210257d88eaf1e79b72ea0a33ae89b57dae95ea68499bdc6770257e010ab899f0abb0000000000")
tx, err := Extract(psbt1)
if err != nil {
t.Fatalf("Failed to extract: %v", err)
}
var b bytes.Buffer
if err := tx.Serialize(&b); err != nil {
t.Fatalf("unable to encode tx: %v", err)
}
if !bytes.Equal(expectedNetworkSer, b.Bytes()) {
t.Fatalf("Expected serialized transaction was not produced: %x", b.Bytes())
}
}
// TestEmptyInputSerialization tests the special serialization case for a wire
// transaction that has no inputs.
func TestEmptyInputSerialization(t *testing.T) {
// Create and serialize a new, empty PSBT. The wire package will assume
// it's a non-witness transaction, as there are no inputs.
psbt, err := New(nil, nil, 2, 0, nil)
if err != nil {
t.Fatalf("failed to create empty PSBT: %v", err)
}
var buf bytes.Buffer
err = psbt.Serialize(&buf)
if err != nil {
t.Fatalf("failed to serialize empty PSBT: %v", err)
}
// Try to deserialize the empty transaction again. The wire package will
// assume it's a witness transaction because of the special case where
// there are no inputs. This assumption is wrong and the first attempt
// will fail. But a workaround should try again to deserialize the TX
// with the non-witness format.
psbt2, err := NewFromRawBytes(&buf, false)
if err != nil {
t.Fatalf("failed to deserialize empty PSBT: %v", err)
}
if len(psbt2.UnsignedTx.TxIn) > 0 || len(psbt2.UnsignedTx.TxOut) > 0 {
t.Fatalf("deserialized transaction not empty")
}
}
psbt: test full scenario of CVE-2020-14199 patched wallet
We add a test that makes sure the full signing scenario of a wallet that
has the CVE-2020-14199 vulnerability patched is supported by this
library.
// Copyright (c) 2018 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package psbt
import (
"bytes"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"testing"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
)
// Test vectors from:
// // https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki#test-vectors
// createPsbtFromSignedTx is a utility function to create a PSBT from an
// already-signed transaction, so we can test reconstructing, signing and
// extracting it. Returned are: an unsigned transaction serialization, a list
// of scriptSigs, one per input, and a list of witnesses, one per input.
func createPsbtFromSignedTx(serializedSignedTx []byte) (
*Packet, [][]byte, []wire.TxWitness, error) {
tx := wire.NewMsgTx(2)
err := tx.Deserialize(bytes.NewReader(serializedSignedTx))
if err != nil {
return nil, nil, nil, err
}
scriptSigs := make([][]byte, 0, len(tx.TxIn))
witnesses := make([]wire.TxWitness, 0, len(tx.TxIn))
tx2 := tx.Copy()
// Blank out signature info in inputs
for i, tin := range tx2.TxIn {
tin.SignatureScript = nil
scriptSigs = append(scriptSigs, tx.TxIn[i].SignatureScript)
tin.Witness = nil
witnesses = append(witnesses, tx.TxIn[i].Witness)
}
// Outputs always contain: (value, scriptPubkey) so don't need
// amending. Now tx2 is tx with all signing data stripped out
unsignedPsbt, err := NewFromUnsignedTx(tx2)
if err != nil {
return nil, nil, nil, err
}
return unsignedPsbt, scriptSigs, witnesses, nil
}
// These are all valid PSBTs
var validPsbtHex = map[int]string{
0: "70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab300000000000000",
1: "70736274ff0100a00200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40000000000feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac000000000001076a47304402204759661797c01b036b25928948686218347d89864b719e1f7fcf57d1e511658702205309eabf56aa4d8891ffd111fdf1336f3a29da866d7f8486d75546ceedaf93190121035cdc61fc7ba971c0b501a646a2a83b102cb43881217ca682dc86e2d73fa882920001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb82308000000",
2: "70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000001030401000000000000",
3: "70736274ff0100a00200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40000000000feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac00000000000100df0200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf6000000006a473044022070b2245123e6bf474d60c5b50c043d4c691a5d2435f09a34a7662a9dc251790a022001329ca9dacf280bdf30740ec0390422422c81cb45839457aeb76fc12edd95b3012102657d118d3357b8e0f4c2cd46db7b39f6d9c38d9a70abcb9b2de5dc8dbfe4ce31feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e13000001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb8230800220202ead596687ca806043edc3de116cdf29d5e9257c196cd055cf698c8d02bf24e9910b4a6ba670000008000000080020000800022020394f62be9df19952c5587768aeb7698061ad2c4a25c894f47d8c162b4d7213d0510b4a6ba6700000080010000800200008000",
4: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
5: "70736274ff01003f0200000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000ffffffff010000000000000000036a010000000000000a0f0102030405060708090f0102030405060708090a0b0c0d0e0f0000",
}
// These are all invalid PSBTs for the indicated
// reasons.
var invalidPsbtHex = map[int]string{
// wire format, not PSBT format
0: "0200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf6000000006a473044022070b2245123e6bf474d60c5b50c043d4c691a5d2435f09a34a7662a9dc251790a022001329ca9dacf280bdf30740ec0390422422c81cb45839457aeb76fc12edd95b3012102657d118d3357b8e0f4c2cd46db7b39f6d9c38d9a70abcb9b2de5dc8dbfe4ce31feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300",
// missing outputs
1: "70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000000",
// Filled in scriptSig in unsigned tx
2: "70736274ff0100fd0a010200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be4000000006a47304402204759661797c01b036b25928948686218347d89864b719e1f7fcf57d1e511658702205309eabf56aa4d8891ffd111fdf1336f3a29da866d7f8486d75546ceedaf93190121035cdc61fc7ba971c0b501a646a2a83b102cb43881217ca682dc86e2d73fa88292feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac00000000000001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb82308000000",
// No unsigned tx
3: "70736274ff000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000000",
// Duplicate keys in an input
4: "70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000001003f0200000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000ffffffff010000000000000000036a010000000000000000",
// Invalid global transaction typed key
5: "70736274ff020001550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid input witness utxo typed key
6: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac000000000002010020955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid pubkey length for input partial signature typed key
7: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87210203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd46304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid redeemscript typed key
8: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a01020400220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid witness script typed key
9: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d568102050047522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid bip32 typed key
10: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae210603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd10b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid non-witness utxo typed key
11: "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f0000000000020000bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
// Invalid final scriptsig typed key
12: "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000020700da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
// Invalid final script witness typed key
13: "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903020800da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
// Invalid pubkey in output BIP32 derivation paths typed key
14: "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00210203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca58710d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
// Invalid input sighash type typed key
15: "70736274ff0100730200000001301ae986e516a1ec8ac5b4bc6573d32f83b465e23ad76167d68b38e730b4dbdb0000000000ffffffff02747b01000000000017a91403aa17ae882b5d0d54b25d63104e4ffece7b9ea2876043993b0000000017a914b921b1ba6f722e4bfa83b6557a3139986a42ec8387000000000001011f00ca9a3b00000000160014d2d94b64ae08587eefc8eeb187c601e939f9037c0203000100000000010016001462e9e982fff34dd8239610316b090cd2a3b747cb000100220020876bad832f1d168015ed41232a9ea65a1815d9ef13c0ef8759f64b5b2b278a65010125512103b7ce23a01c5b4bf00a642537cdfabb315b668332867478ef51309d2bd57f8a8751ae00",
// Invalid output redeemscript typed key
16: "70736274ff0100730200000001301ae986e516a1ec8ac5b4bc6573d32f83b465e23ad76167d68b38e730b4dbdb0000000000ffffffff02747b01000000000017a91403aa17ae882b5d0d54b25d63104e4ffece7b9ea2876043993b0000000017a914b921b1ba6f722e4bfa83b6557a3139986a42ec8387000000000001011f00ca9a3b00000000160014d2d94b64ae08587eefc8eeb187c601e939f9037c0002000016001462e9e982fff34dd8239610316b090cd2a3b747cb000100220020876bad832f1d168015ed41232a9ea65a1815d9ef13c0ef8759f64b5b2b278a65010125512103b7ce23a01c5b4bf00a642537cdfabb315b668332867478ef51309d2bd57f8a8751ae00",
// Invalid output witnessScript typed key
17: "70736274ff0100730200000001301ae986e516a1ec8ac5b4bc6573d32f83b465e23ad76167d68b38e730b4dbdb0000000000ffffffff02747b01000000000017a91403aa17ae882b5d0d54b25d63104e4ffece7b9ea2876043993b0000000017a914b921b1ba6f722e4bfa83b6557a3139986a42ec8387000000000001011f00ca9a3b00000000160014d2d94b64ae08587eefc8eeb187c601e939f9037c00010016001462e9e982fff34dd8239610316b090cd2a3b747cb000100220020876bad832f1d168015ed41232a9ea65a1815d9ef13c0ef8759f64b5b2b278a6521010025512103b7ce23a01c5b4bf00a642537cdfabb315b668332867478ef51309d2bd57f8a8751ae00",
// Additional cases outside the existing test vectors.
// Invalid duplicate PartialSig
18: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a01220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
// Invalid duplicate BIP32 derivation (different derivs, same key)
19: "70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba670000008000000080050000800000",
}
// This tests that valid PSBT serializations can be parsed
// into Psbt structs.
func TestReadValidPsbtAndReserialize(t *testing.T) {
for _, v := range validPsbtHex {
PsbtBytes, err := hex.DecodeString(v)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
testPsbt, err := NewFromRawBytes(
bytes.NewReader(PsbtBytes), false,
)
if err != nil {
t.Fatalf("unable to parse psbt: %v", err)
}
t.Logf("Successfully parsed test, got transaction: %v",
spew.Sdump(testPsbt.UnsignedTx))
var b bytes.Buffer
err = testPsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize created Psbt: %v", err)
}
raw := b.Bytes()
if !bytes.Equal(raw, PsbtBytes) {
t.Fatalf("Serialized PSBT didn't match: %v",
hex.EncodeToString(raw))
}
}
}
func TestReadInvalidPsbt(t *testing.T) {
for _, v := range invalidPsbtHex {
PsbtBytes, err := hex.DecodeString(v)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
_, err = NewFromRawBytes(bytes.NewReader(PsbtBytes), false)
if err == nil {
t.Fatalf("Incorrectly validated psbt: %v",
hex.EncodeToString(PsbtBytes))
}
t.Logf("Correctly got error: %v", err)
}
}
func TestSanityCheck(t *testing.T) {
// TODO(guggero): Remove when checks for segwit v1 are implemented.
t.Skip("Skipping PSBT sanity checks for segwit v0.")
// Test strategy:
// 1. Create an invalid PSBT from a serialization
// Then ensure that the sanity check fails.
// 2. Create a valid PSBT from a serialization
// Then create an updater, add a witness utxo to a non-witness
// utxo.
// Then ensure that the sanity check fails.
// Then add a witnessScript field to a non-witness utxo.
// Then ensure that the sanity check fails.
// index 1 contains a psbt with two inputs, first non-witness,
// second witness.
psbtraw1, err := hex.DecodeString(validPsbtHex[1])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
psbt1, err := NewFromRawBytes(bytes.NewReader(psbtraw1), false)
if err != nil {
t.Fatalf("Unable to create Psbt struct: %v", err)
}
// Add a non-witness utxo field to input2 using raw insertion function,
// so that it becomes invalid, then NewUpdater should fail.
nonWitnessUtxoRaw, err := hex.DecodeString(
CUTestHexData["NonWitnessUtxo"],
)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
nonWitnessUtxo := wire.NewMsgTx(2)
err = nonWitnessUtxo.Deserialize(bytes.NewReader(nonWitnessUtxoRaw))
if err != nil {
t.Fatalf("Unable to deserialize: %v", err)
}
inputs1 := &psbt1.Inputs[1]
inputs1.NonWitnessUtxo = nonWitnessUtxo
// The PSBT is now in an inconsistent state; Updater creation should
// fail.
updater, err := NewUpdater(psbt1)
if err == nil {
t.Fatalf("Failed to identify invalid PSBT state ( " +
"witness, non-witness fields)")
}
// Overwrite back with the correct psbt
psbtraw1, err = hex.DecodeString(validPsbtHex[1])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
psbt1, err = NewFromRawBytes(bytes.NewReader(psbtraw1), false)
updater, err = NewUpdater(psbt1)
if err != nil {
t.Fatalf("Unable to create Updater: %v", err)
}
// Create a fake non-witness utxo field to overlap with
// the existing witness input at index 1.
tx := wire.NewMsgTx(2)
err = tx.Deserialize(bytes.NewReader(nonWitnessUtxoRaw))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
err = updater.AddInNonWitnessUtxo(tx, 1)
if err == nil {
t.Fatalf("Incorrectly accepted Psbt with conflicting witness " +
"and non-witness utxo entries in the same input.")
}
// Now we try again; this time we try to add a witnessScript
// key-value pair to an input which is non-witness, which should
// also be rejected.
psbt2, err := NewFromRawBytes(
bytes.NewReader(psbtraw1), false,
)
if err != nil {
t.Fatalf("Unable to create Psbt struct: %v", err)
}
updater2, err := NewUpdater(psbt2)
if err != nil {
t.Fatalf("Got error creating updater2: %v", err)
}
witnessScript, err := hex.DecodeString(
CUTestHexData["Input2WitnessScript"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = updater2.AddInWitnessScript(witnessScript, 0)
if err == nil {
t.Fatalf("Incorrectly accepted adding witness script field " +
"to non-witness utxo")
}
}
// Data for creation and updating tests
// ===============================================================================
var CUTestHexData = map[string]string{
"scriptPubkey1": "0014d85c2b71d0060b09c9886aeb815e50991dda124d",
"scriptPubkey2": "001400aea9a2e5f0f876a588df5546e8742d1d87008f",
"txid1": "75ddabb27b8845f5247975c8a5ba7c6f336c4570708ebe230caf6db5217ae858",
"txid2": "1dea7cd05979072a3578cab271c02244ea8a090bbb46aa680a65ecd027048d83",
"COPsbtHex": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f000000000000000000",
"NonWitnessUtxo": "0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000",
"WitnessUtxo": "00c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e887",
// After adding witnessutxo and nonwitness utxo to inputs:
"UOPsbtHex": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e887000000",
"Input1RedeemScript": "5221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae",
"Input2RedeemScript": "00208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903",
"Input2WitnessScript": "522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae",
// After adding redeemscripts and witness scripts to inputs:
"UOPsbtHex2": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88701042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae000000",
// After adding bip32 derivations to inputs and outputs:
"UOPsbtHex3": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88701042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
//After adding sighash types to inputs
"UOPsbtHex4": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000010304010000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870103040100000001042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
}
// Just one example sanity check of B64 construction; after sighash appending above
var CUTestB64Data = map[string]string{
"UOPsbtB644": "cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWABTYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFgAUAK6pouXw+HaliN9VRuh0LR2HAI8AAAAAAAEAuwIAAAABqtc5MQGL0l+ErkALaISL4J23BurCrBgpi6vucatlb4sAAAAASEcwRAIgWPb8fGoz4bMVSNSByCbAFb0wE1qtQs1neQ2rZtKtJDsCIEoc7SYExnNbY5PltBaR3XiwDwxZQvufdRhW+qk4FX26Af7///8CgPD6AgAAAAAXqRQPuUY0IWlrgsgzryQceMF9295JNIfQ8gonAQAAABepFCnKdPigj4GZlCgYXJe12FLkBj9hh2UAAAABAwQBAAAAAQRHUiEClYO/Oa4KYJdHrRma3dY0+mEIVZ1sXNObTCGD8auW4H8hAtq2H/SaFNtqfQKwzR+7ePxLGDErW05U2uTbovv+9TbXUq4iBgKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgfxDZDGpPAAAAgAAAAIAAAACAIgYC2rYf9JoU22p9ArDNH7t4/EsYMStbTlTa5Nui+/71NtcQ2QxqTwAAAIAAAACAAQAAgAABASAAwusLAAAAABepFLf1+vQOPUClpFmx2zU18rcvqSHohwEDBAEAAAABBCIAIIwjUxc3Q7WV37Sge3K6jkLjeX2nTof+fZ10l+OyAokDAQVHUiEDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwhAjrdkE89bc9Z3bkGsN7iNSm3/7ntUOXoYVGSaGAiHw5zUq4iBgI63ZBPPW3PWd25BrDe4jUpt/+57VDl6GFRkmhgIh8OcxDZDGpPAAAAgAAAAIADAACAIgYDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwQ2QxqTwAAAIAAAACAAgAAgAAiAgOppMN/WZbTqiXbrGtXCvBlA5RJKUJGCzVHU+2e7KWHcRDZDGpPAAAAgAAAAIAEAACAACICAn9jmXV9Lv9VoTatAsaEsYOLZVbl8bazQoKpS2tQBRCWENkMak8AAACAAAAAgAUAAIAA",
}
var CUTestAmountData = map[string]int64{
"amount1": 149990000,
"amount2": 100000000,
"amount3": 200000000,
}
var CUTestIndexData = map[string]uint32{
"index1": 0,
"index2": 1,
}
var CUMasterKeyFingerPrint = "d90c6a4f"
var CUTestPathData = map[string][]uint32{
"dpath1": {0 + 0x80000000, 0 + 0x80000000, 0 + 0x80000000},
"dpath2": {0 + 0x80000000, 0 + 0x80000000, 1 + 0x80000000},
"dpath3": {0 + 0x80000000, 0 + 0x80000000, 2 + 0x80000000},
"dpath4": {0 + 0x80000000, 0 + 0x80000000, 3 + 0x80000000},
"dpath5": {0 + 0x80000000, 0 + 0x80000000, 4 + 0x80000000},
"dpath6": {0 + 0x80000000, 0 + 0x80000000, 5 + 0x80000000},
}
var CUTestPubkeyData = map[string]string{
"pub1": "029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f",
"pub2": "02dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d7",
"pub3": "03089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc",
"pub4": "023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e73",
"pub5": "03a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca58771",
"pub6": "027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b50051096",
}
// ===============================================================================
func TestPsbtCreator(t *testing.T) {
spkOut1, err := hex.DecodeString(CUTestHexData["scriptPubkey1"])
if err != nil {
t.Fatalf("Error: %v", err)
}
spkOut2, err := hex.DecodeString(CUTestHexData["scriptPubkey2"])
if err != nil {
t.Fatalf("Error: %v", err)
}
out1 := wire.NewTxOut(CUTestAmountData["amount1"], spkOut1)
out2 := wire.NewTxOut(CUTestAmountData["amount2"], spkOut2)
outputs := []*wire.TxOut{out1, out2}
hash1, err := chainhash.NewHashFromStr(CUTestHexData["txid1"])
if err != nil {
t.Fatalf("Error: %v", err)
}
prevOut1 := wire.NewOutPoint(hash1, uint32(0))
hash2, err := chainhash.NewHashFromStr(CUTestHexData["txid2"])
if err != nil {
t.Fatalf("Error: %v", err)
}
prevOut2 := wire.NewOutPoint(hash2, uint32(1))
inputs := []*wire.OutPoint{prevOut1, prevOut2}
// Check creation fails with invalid sequences:
nSequences := []uint32{wire.MaxTxInSequenceNum}
_, err = New(inputs, outputs, int32(3), uint32(0), nSequences)
if err == nil {
t.Fatalf("Did not error when creating transaction with " +
"invalid nSequences")
}
nSequences = append(nSequences, wire.MaxTxInSequenceNum)
// Check creation fails with invalid version
_, err = New(inputs, outputs, int32(0), uint32(0), nSequences)
if err == nil {
t.Fatalf("Did not error when creating transaction with " +
"invalid version (3)")
}
// Use valid data to create:
cPsbt, err := New(inputs, outputs, int32(2), uint32(0), nSequences)
var b bytes.Buffer
err = cPsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize created Psbt: %v", err)
}
if CUTestHexData["COPsbtHex"] != hex.EncodeToString(b.Bytes()) {
t.Fatalf("Failed to create expected psbt, instead got: %v",
hex.EncodeToString(b.Bytes()))
}
// Now simulate passing the created PSBT to an Updater
updater, err := NewUpdater(cPsbt)
if err != nil {
t.Fatalf("Unable to create Updater object")
}
tx := wire.NewMsgTx(2)
nonWitnessUtxoHex, err := hex.DecodeString(
CUTestHexData["NonWitnessUtxo"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = tx.Deserialize(bytes.NewReader(nonWitnessUtxoHex))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
witnessUtxoHex, err := hex.DecodeString(
CUTestHexData["WitnessUtxo"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
txout := wire.TxOut{Value: CUTestAmountData["amount3"],
PkScript: witnessUtxoHex[9:]}
err = updater.AddInNonWitnessUtxo(tx, 0)
if err != nil {
t.Fatalf("Unable to add NonWitness Utxo to inputs: %v", err)
}
err = updater.AddInWitnessUtxo(&txout, 1)
if err != nil {
t.Fatalf("Unable to add Witness Utxo to inputs: %v", err)
}
b.Reset()
err = updater.Upsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if CUTestHexData["UOPsbtHex"] != hex.EncodeToString(b.Bytes()) {
t.Fatal("Failed to create valid updated PSBT after utxos")
}
input1RedeemScript, err := hex.DecodeString(CUTestHexData["Input1RedeemScript"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = updater.AddInRedeemScript(input1RedeemScript, 0)
if err != nil {
t.Fatalf("Unable to add redeem script: %v", err)
}
input2RedeemScript, err := hex.DecodeString(CUTestHexData["Input2RedeemScript"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = updater.AddInRedeemScript(input2RedeemScript, 1)
if err != nil {
t.Fatalf("Unable to add redeem script: %v", err)
}
input2WitnessScript, err := hex.DecodeString(CUTestHexData["Input2WitnessScript"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = updater.AddInWitnessScript(input2WitnessScript, 1)
if err != nil {
t.Fatalf("Unable to add witness script: %v", err)
}
b.Reset()
err = updater.Upsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if CUTestHexData["UOPsbtHex2"] != hex.EncodeToString(b.Bytes()) {
t.Fatal("Failed to create valid updated PSBT after redeem scripts")
}
masterKey, err := hex.DecodeString(CUMasterKeyFingerPrint)
masterKeyInt := binary.LittleEndian.Uint32(masterKey)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
input1Path1 := CUTestPathData["dpath1"]
input1Path2 := CUTestPathData["dpath2"]
input1Key1, err := hex.DecodeString(CUTestPubkeyData["pub1"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
input1Key2, err := hex.DecodeString(CUTestPubkeyData["pub2"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
err = updater.AddInBip32Derivation(masterKeyInt, input1Path1, input1Key1, 0)
if err != nil {
t.Fatal("Failed to add first key derivation for input 1")
}
err = updater.AddInBip32Derivation(masterKeyInt, input1Path2, input1Key2, 0)
if err != nil {
t.Fatal("Failed to add second key derivation for input 1")
}
input2Path1 := CUTestPathData["dpath3"]
input2Path2 := CUTestPathData["dpath4"]
input2Key1, err := hex.DecodeString(CUTestPubkeyData["pub3"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
input2Key2, err := hex.DecodeString(CUTestPubkeyData["pub4"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
// check invalid pubkeys are not accepted
borkedInput2Key1 := append([]byte{0xff}, input2Key1...)
err = updater.AddInBip32Derivation(masterKeyInt, input2Path1,
borkedInput2Key1, 1)
if err == nil {
t.Fatalf("Expected invalid pubkey, got: %v", err)
}
err = updater.AddInBip32Derivation(masterKeyInt, input2Path1, input2Key1, 1)
if err != nil {
t.Fatal("Failed to add first key derivation for input 2")
}
err = updater.AddInBip32Derivation(masterKeyInt, input2Path2, input2Key2, 1)
if err != nil {
t.Fatal("Failed to add second key derivation for input 2")
}
output1Key1, err := hex.DecodeString(CUTestPubkeyData["pub5"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
output1Path := CUTestPathData["dpath5"]
// check invalid pubkeys are not accepted
borkedOutput1Key1 := append([]byte{0xab}, output1Key1[:13]...)
err = updater.AddOutBip32Derivation(masterKeyInt, output1Path,
borkedOutput1Key1, 0)
if err == nil {
t.Fatalf("Expected invalid pubkey, got: %v", err)
}
err = updater.AddOutBip32Derivation(masterKeyInt, output1Path, output1Key1, 0)
if err != nil {
t.Fatal("Failed to add key to first output")
}
output2Key1, err := hex.DecodeString(CUTestPubkeyData["pub6"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
output2Path := CUTestPathData["dpath6"]
err = updater.AddOutBip32Derivation(masterKeyInt, output2Path, output2Key1, 1)
if err != nil {
t.Fatal("Failed to add key to second output")
}
b.Reset()
err = updater.Upsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if CUTestHexData["UOPsbtHex3"] != hex.EncodeToString(b.Bytes()) {
t.Fatal("Failed to create valid updated PSBT after BIP32 derivations")
}
err = updater.AddInSighashType(txscript.SigHashType(1), 0)
if err != nil {
t.Fatal("Failed to add sighash type to first input")
}
err = updater.AddInSighashType(txscript.SigHashType(1), 1)
if err != nil {
t.Fatal("Failed to add sighash type to second input")
}
b.Reset()
err = updater.Upsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if CUTestHexData["UOPsbtHex4"] != hex.EncodeToString(b.Bytes()) {
t.Fatal("Failed to create valid updated PSBT after sighash types")
}
b644, err := updater.Upsbt.B64Encode()
if err != nil {
t.Fatalf("Unable to B64Encode updated Psbt: %v", err)
}
if b644 != CUTestB64Data["UOPsbtB644"] {
t.Fatalf("Failed to base64 encode updated PSBT after sighash "+
"types: %v", b644)
}
}
// Signing test data taken from
// https://github.com/achow101/bitcoin/blob/020628e3a4e88e36647eaf92bac4b3552796ac6a/test/functional/data/rpc_psbt.json
var signerPsbtData = map[string]string{
"signer1Privkey1": "cP53pDbR5WtAD8dYAW9hhTjuvvTVaEiQBdrz9XPrgLBeRFiyCbQr",
"signer1Privkey2": "cR6SXDoyfQrcp4piaiHE97Rsgta9mNhGTen9XeonVgwsh4iSgw6d",
"signer1PsbtB64": "cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWABTYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFgAUAK6pouXw+HaliN9VRuh0LR2HAI8AAAAAAAEAuwIAAAABqtc5MQGL0l+ErkALaISL4J23BurCrBgpi6vucatlb4sAAAAASEcwRAIgWPb8fGoz4bMVSNSByCbAFb0wE1qtQs1neQ2rZtKtJDsCIEoc7SYExnNbY5PltBaR3XiwDwxZQvufdRhW+qk4FX26Af7///8CgPD6AgAAAAAXqRQPuUY0IWlrgsgzryQceMF9295JNIfQ8gonAQAAABepFCnKdPigj4GZlCgYXJe12FLkBj9hh2UAAAABBEdSIQKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgfyEC2rYf9JoU22p9ArDNH7t4/EsYMStbTlTa5Nui+/71NtdSriIGApWDvzmuCmCXR60Zmt3WNPphCFWdbFzTm0whg/GrluB/ENkMak8AAACAAAAAgAAAAIAiBgLath/0mhTban0CsM0fu3j8SxgxK1tOVNrk26L7/vU21xDZDGpPAAAAgAAAAIABAACAAQMEAQAAAAABASAAwusLAAAAABepFLf1+vQOPUClpFmx2zU18rcvqSHohwEEIgAgjCNTFzdDtZXftKB7crqOQuN5fadOh/59nXSX47ICiQMBBUdSIQMIncEMesbbVPkTKa9hczPbOIzq0MIx9yM3nRuZAwsC3CECOt2QTz1tz1nduQaw3uI1Kbf/ue1Q5ehhUZJoYCIfDnNSriIGAjrdkE89bc9Z3bkGsN7iNSm3/7ntUOXoYVGSaGAiHw5zENkMak8AAACAAAAAgAMAAIAiBgMIncEMesbbVPkTKa9hczPbOIzq0MIx9yM3nRuZAwsC3BDZDGpPAAAAgAAAAIACAACAAQMEAQAAAAAiAgOppMN/WZbTqiXbrGtXCvBlA5RJKUJGCzVHU+2e7KWHcRDZDGpPAAAAgAAAAIAEAACAACICAn9jmXV9Lv9VoTatAsaEsYOLZVbl8bazQoKpS2tQBRCWENkMak8AAACAAAAAgAUAAIAA",
"signer1Result": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000002202029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01010304010000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e887220203089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f010103040100000001042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
"signer2Privkey1": "cT7J9YpCwY3AVRFSjN6ukeEeWY6mhpbJPxRaDaP5QTdygQRxP9Au",
"signer2Privkey2": "cNBc3SWUip9PPm1GjRoLEJT6T41iNzCYtD7qro84FMnM5zEqeJsE",
"signer2Psbt": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f000000800000008001000080010304010000000001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88701042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f0000008000000080020000800103040100000000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
"signer2Result": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000220202dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d7483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01010304010000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8872202023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e73473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d2010103040100000001042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
}
func TestPsbtSigner(t *testing.T) {
psbt1, err := NewFromRawBytes(
bytes.NewReader([]byte(signerPsbtData["signer1PsbtB64"])),
true,
)
if err != nil {
t.Fatalf("Failed to parse PSBT: %v", err)
}
psbtUpdater1 := Updater{
Upsbt: psbt1,
}
sig1, err := hex.DecodeString("3044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01")
pub1, err := hex.DecodeString("029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f")
res, err := psbtUpdater1.Sign(0, sig1, pub1, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Error from adding signatures: %v %v", err, res)
}
sig2, err := hex.DecodeString("3044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01")
pub2, err := hex.DecodeString("03089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc")
res, err = psbtUpdater1.Sign(1, sig2, pub2, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Error from adding signatures: %v %v", err, res)
}
signer1Result, err := hex.DecodeString(signerPsbtData["signer1Result"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
var b bytes.Buffer
err = psbtUpdater1.Upsbt.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if !bytes.Equal(b.Bytes(), signer1Result) {
t.Fatalf("Failed to add signatures correctly")
}
}
// Finalizer-extractor test
var finalizerPsbtData = map[string]string{
"finalizeb64": "cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWABTYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFgAUAK6pouXw+HaliN9VRuh0LR2HAI8AAAAAAAEAuwIAAAABqtc5MQGL0l+ErkALaISL4J23BurCrBgpi6vucatlb4sAAAAASEcwRAIgWPb8fGoz4bMVSNSByCbAFb0wE1qtQs1neQ2rZtKtJDsCIEoc7SYExnNbY5PltBaR3XiwDwxZQvufdRhW+qk4FX26Af7///8CgPD6AgAAAAAXqRQPuUY0IWlrgsgzryQceMF9295JNIfQ8gonAQAAABepFCnKdPigj4GZlCgYXJe12FLkBj9hh2UAAAAiAgKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgf0cwRAIgdAGK1BgAl7hzMjwAFXILNoTMgSOJEEjn282bVa1nnJkCIHPTabdA4+tT3O+jOCPIBwUUylWn3ZVE8VfBZ5EyYRGMASICAtq2H/SaFNtqfQKwzR+7ePxLGDErW05U2uTbovv+9TbXSDBFAiEA9hA4swjcHahlo0hSdG8BV3KTQgjG0kRUOTzZm98iF3cCIAVuZ1pnWm0KArhbFOXikHTYolqbV2C+ooFvZhkQoAbqAQEDBAEAAAABBEdSIQKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgfyEC2rYf9JoU22p9ArDNH7t4/EsYMStbTlTa5Nui+/71NtdSriIGApWDvzmuCmCXR60Zmt3WNPphCFWdbFzTm0whg/GrluB/ENkMak8AAACAAAAAgAAAAIAiBgLath/0mhTban0CsM0fu3j8SxgxK1tOVNrk26L7/vU21xDZDGpPAAAAgAAAAIABAACAAAEBIADC6wsAAAAAF6kUt/X69A49QKWkWbHbNTXyty+pIeiHIgIDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtxHMEQCIGLrelVhB6fHP0WsSrWh3d9vcHX7EnWWmn84Pv/3hLyyAiAMBdu3Rw2/LwhVfdNWxzJcHtMJE+mWzThAlF2xIijaXwEiAgI63ZBPPW3PWd25BrDe4jUpt/+57VDl6GFRkmhgIh8Oc0cwRAIgZfRbpZmLWaJ//hp77QFq8fH5DVSzqo90UKpfVqJRA70CIH9yRwOtHtuWaAsoS1bU/8uI9/t1nqu+CKow8puFE4PSAQEDBAEAAAABBCIAIIwjUxc3Q7WV37Sge3K6jkLjeX2nTof+fZ10l+OyAokDAQVHUiEDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwhAjrdkE89bc9Z3bkGsN7iNSm3/7ntUOXoYVGSaGAiHw5zUq4iBgI63ZBPPW3PWd25BrDe4jUpt/+57VDl6GFRkmhgIh8OcxDZDGpPAAAAgAAAAIADAACAIgYDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwQ2QxqTwAAAIAAAACAAgAAgAAiAgOppMN/WZbTqiXbrGtXCvBlA5RJKUJGCzVHU+2e7KWHcRDZDGpPAAAAgAAAAIAEAACAACICAn9jmXV9Lv9VoTatAsaEsYOLZVbl8bazQoKpS2tQBRCWENkMak8AAACAAAAAgAUAAIAA",
"finalize": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000002202029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01220202dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d7483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01010304010000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e887220203089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f012202023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e73473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d2010103040100000001042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
"resultb64": "cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWABTYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFgAUAK6pouXw+HaliN9VRuh0LR2HAI8AAAAAAAEAuwIAAAABqtc5MQGL0l+ErkALaISL4J23BurCrBgpi6vucatlb4sAAAAASEcwRAIgWPb8fGoz4bMVSNSByCbAFb0wE1qtQs1neQ2rZtKtJDsCIEoc7SYExnNbY5PltBaR3XiwDwxZQvufdRhW+qk4FX26Af7///8CgPD6AgAAAAAXqRQPuUY0IWlrgsgzryQceMF9295JNIfQ8gonAQAAABepFCnKdPigj4GZlCgYXJe12FLkBj9hh2UAAAABB9oARzBEAiB0AYrUGACXuHMyPAAVcgs2hMyBI4kQSOfbzZtVrWecmQIgc9Npt0Dj61Pc76M4I8gHBRTKVafdlUTxV8FnkTJhEYwBSDBFAiEA9hA4swjcHahlo0hSdG8BV3KTQgjG0kRUOTzZm98iF3cCIAVuZ1pnWm0KArhbFOXikHTYolqbV2C+ooFvZhkQoAbqAUdSIQKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgfyEC2rYf9JoU22p9ArDNH7t4/EsYMStbTlTa5Nui+/71NtdSrgABASAAwusLAAAAABepFLf1+vQOPUClpFmx2zU18rcvqSHohwEHIyIAIIwjUxc3Q7WV37Sge3K6jkLjeX2nTof+fZ10l+OyAokDAQjaBABHMEQCIGLrelVhB6fHP0WsSrWh3d9vcHX7EnWWmn84Pv/3hLyyAiAMBdu3Rw2/LwhVfdNWxzJcHtMJE+mWzThAlF2xIijaXwFHMEQCIGX0W6WZi1mif/4ae+0BavHx+Q1Us6qPdFCqX1aiUQO9AiB/ckcDrR7blmgLKEtW1P/LiPf7dZ6rvgiqMPKbhROD0gFHUiEDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwhAjrdkE89bc9Z3bkGsN7iNSm3/7ntUOXoYVGSaGAiHw5zUq4AIgIDqaTDf1mW06ol26xrVwrwZQOUSSlCRgs1R1Ptnuylh3EQ2QxqTwAAAIAAAACABAAAgAAiAgJ/Y5l1fS7/VaE2rQLGhLGDi2VW5fG2s0KCqUtrUAUQlhDZDGpPAAAAgAAAAIAFAACAAA==",
"result": "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
"network": "0200000000010258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd7500000000da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752aeffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d01000000232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f000400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00000000",
"twoOfThree": "70736274ff01005e01000000019a5fdb3c36f2168ea34a031857863c63bb776fd8a8a9149efd7341dfaf81c9970000000000ffffffff01e013a8040000000022002001c3a65ccfa5b39e31e6bafa504446200b9c88c58b4f21eb7e18412aff154e3f000000000001012bc817a80400000000220020114c9ab91ea00eb3e81a7aa4d0d8f1bc6bd8761f8f00dbccb38060dc2b9fdd5522020242ecd19afda551d58f496c17e3f51df4488089df4caafac3285ed3b9c590f6a847304402207c6ab50f421c59621323460aaf0f731a1b90ca76eddc635aed40e4d2fc86f97e02201b3f8fe931f1f94fde249e2b5b4dbfaff2f9df66dd97c6b518ffa746a4390bd1012202039f0acfe5a292aafc5331f18f6360a3cc53d645ebf0cc7f0509630b22b5d9f547473044022075329343e01033ebe5a22ea6eecf6361feca58752716bdc2260d7f449360a0810220299740ed32f694acc5f99d80c988bb270a030f63947f775382daf4669b272da0010103040100000001056952210242ecd19afda551d58f496c17e3f51df4488089df4caafac3285ed3b9c590f6a821035a654524d301dd0265c2370225a6837298b8ca2099085568cc61a8491287b63921039f0acfe5a292aafc5331f18f6360a3cc53d645ebf0cc7f0509630b22b5d9f54753ae22060242ecd19afda551d58f496c17e3f51df4488089df4caafac3285ed3b9c590f6a818d5f7375b2c000080000000800000008000000000010000002206035a654524d301dd0265c2370225a6837298b8ca2099085568cc61a8491287b63918e2314cf32c000080000000800000008000000000010000002206039f0acfe5a292aafc5331f18f6360a3cc53d645ebf0cc7f0509630b22b5d9f54718e524a1ce2c000080000000800000008000000000010000000000",
}
func TestFinalize2of3(t *testing.T) {
b, err := hex.DecodeString(finalizerPsbtData["twoOfThree"])
if err != nil {
t.Fatalf("Error decoding hex: %v", err)
}
p, err := NewFromRawBytes(bytes.NewReader(b), false)
if p.IsComplete() {
t.Fatalf("Psbt is complete")
}
err = MaybeFinalizeAll(p)
if err != nil {
t.Fatalf("Error in MaybeFinalizeAll: %v", err)
}
if !p.IsComplete() {
t.Fatalf("Psbt is not complete")
}
}
func TestPsbtExtractor(t *testing.T) {
rawToFinalize, err := base64.StdEncoding.DecodeString(
finalizerPsbtData["finalizeb64"],
)
if err != nil {
t.Fatalf("Error decoding b64: %v", err)
}
psbt1, err := NewFromRawBytes(
bytes.NewReader(rawToFinalize), false,
)
if err != nil {
t.Fatalf("Failed to parse PSBT: %v", err)
}
for i := range psbt1.Inputs {
err = Finalize(psbt1, i)
if err != nil {
t.Fatalf("Error from finalizing PSBT: %v", err)
}
}
finalizer1Result, err := base64.StdEncoding.DecodeString(
finalizerPsbtData["resultb64"],
)
if err != nil {
t.Fatalf("Unable to decode b64: %v", err)
}
finalToNetworkExpected, err := hex.DecodeString(finalizerPsbtData["network"])
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
tx, err := Extract(psbt1)
if err != nil {
t.Fatalf("Failed to extract: %v", err)
}
var resultToNetwork bytes.Buffer
if err := tx.Serialize(&resultToNetwork); err != nil {
t.Fatalf("unable to serialize: %v", err)
}
var b bytes.Buffer
err = psbt1.Serialize(&b)
if err != nil {
t.Fatalf("Unable to serialize updated Psbt: %v", err)
}
if !bytes.Equal(b.Bytes(), finalizer1Result) {
t.Fatalf("Failed to finalize transaction: expected %x, "+
"got %x", finalizer1Result, b.Bytes())
}
if !bytes.Equal(finalToNetworkExpected, resultToNetwork.Bytes()) {
t.Fatalf("Failed to network serialize transaction: %x", b.Bytes())
}
}
func TestImportFromCore1(t *testing.T) {
// This example #1 was created manually using Bitcoin Core 0.17 regtest.
// It contains two inputs, one p2wkh and one p2pkh (non-witness).
// We take the created PSBT as input, then add the fields for each input
// separately, then finalize and extract, and compare with the network
// serialized tx output from Core.
imported := "cHNidP8BAJwCAAAAAjaoF6eKeGsPiDQxxqqhFDfHWjBtZzRqmaZmvyCVWZ5JAQAAAAD/////RhypNiFfnQSMNpo0SGsgIvDOyMQFAYEHZXD5jp4kCrUAAAAAAP////8CgCcSjAAAAAAXqRQFWy8ScSkkhlGMwfOnx15YwRzApofwX5MDAAAAABepFAt4TyLfGnL9QY6GLYHbpSQj+QclhwAAAAAAAAAAAA=="
psbt1, err := NewFromRawBytes(bytes.NewReader([]byte(imported)), true)
if err != nil {
t.Fatalf("Failed to parse PSBT: %v", err)
}
// update with the first input's utxo (witness) and the second input's utxo
// (non-witness)
fundingTxInput1Hex := "02000000014f2cbac7d7691fafca30313097d79be9e78aa6670752fcb1fc15508e77586efb000000004847304402201b5568d7cab977ae0892840b779d84e36d62e42fd93b95e648aaebeacd2577d602201d2ebda2b0cddfa0c1a71d3cbcb602e7c9c860a41ed8b4d18d40c92ccbe92aed01feffffff028c636f91000000001600147447b6d7e6193499565779c8eb5184fcfdfee6ef00879303000000001600149e88f2828a074ebf64af23c2168d1816258311d72d010000"
fundingTxInput2Hex := "020000000001012f03f70c673d83d65da0e8d0db3867b3e7d7bfbd34fd6be65892042e57576eb00000000000feffffff028027128c000000001976a91485780899b61a5506f342bd67a2f635181f50c8b788acb8032c040000000017a914e2e3d32d42d6f043cab39708a6073301df5039db8702473044022047ae396fd8aba8f67482ad16e315fe680db585c1ac6422ffb18dacd9cf5bac350220321176fd6157ef51d9eae9230b0b5bd7dd29bb6247a879189e6aaa8091f3020201210368081f7ff37dfadbed407eba17b232f959e41e6ac78741192c805ebf80d487852f010000"
fundingTxInput1Bytes, err := hex.DecodeString(fundingTxInput1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
txFund1 := wire.NewMsgTx(2)
err = txFund1.Deserialize(bytes.NewReader(fundingTxInput1Bytes))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
// First input is witness, take correct output:
txFund1Out := txFund1.TxOut[1]
fundingTxInput2Bytes, err := hex.DecodeString(fundingTxInput2Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
txFund2 := wire.NewMsgTx(2)
err = txFund2.Deserialize(bytes.NewReader(fundingTxInput2Bytes))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
psbtupdater1 := Updater{Upsbt: psbt1}
psbtupdater1.AddInWitnessUtxo(txFund1Out, 0)
err = psbtupdater1.AddInNonWitnessUtxo(txFund2, 1)
if err != nil {
t.Fatalf("Error inserting non-witness utxo: %v", err)
}
// Signing was done with Core; we manually insert the relevant input
// entries here.
sig1Hex := "304402200da03ac9890f5d724c42c83c2a62844c08425a274f1a5bca50dcde4126eb20dd02205278897b65cb8e390a0868c9582133c7157b2ad3e81c1c70d8fbd65f51a5658b01"
sig1, err := hex.DecodeString(sig1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
pub1Hex := "024d6b24f372dd4551277c8df4ecc0655101e11c22894c8e05a3468409c865a72c"
pub1, err := hex.DecodeString(pub1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
// Check that invalid pubkeys are not accepted.
pubInvalid := append(pub1, 0x00)
res, err := psbtupdater1.Sign(0, sig1, pubInvalid, nil, nil)
if err == nil {
t.Fatalf("Incorrectly accepted invalid pubkey: %v",
pubInvalid)
}
res, err = psbtupdater1.Sign(0, sig1, pub1, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Error from adding signatures: %v %v", err, res)
}
sig2Hex := "3044022014eb9c4858f71c9f280bc68402aa742a5187f54c56c8eb07c902eb1eb5804e5502203d66656de8386b9b044346d5605f5ae2b200328fb30476f6ac993fc0dbb0455901"
sig2, err := hex.DecodeString(sig2Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
pub2Hex := "03b4c79acdf4e7d978bef4019c421e4c6c67044ed49d27322dc90e808d8080e862"
pub2, err := hex.DecodeString(pub2Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
// ===============================================================
// Before adding the signature, we'll make a new PSBT with
// modifications to the input data and check it fails sanity checks.
// First an invalid tx:
psbtBorkedInput2, _ := NewFromRawBytes(bytes.NewReader([]byte(imported)), true)
borkedUpdater, err := NewUpdater(psbtBorkedInput2)
if err != nil {
t.Fatalf("NewUpdater failed while trying to create borked "+
"version: %v", err)
}
borkedUpdater.AddInWitnessUtxo(txFund1Out, 0)
res, err = borkedUpdater.Sign(0, sig2, pub2, nil, nil)
if err != ErrInvalidSignatureForInput {
t.Fatalf("AddPartialSig succeeded, but should have failed "+
"due to mismatch between pubkey and prevOut; err was: %v", err)
}
// Next, a valid tx serialization, but not the right one
wrongTxBytes, err := hex.DecodeString("020000000001012d1d7b17356d0ad8232a5817d2d2fa5cd97d803c0ed03e013e97b65f4f1e5e7501000000171600147848cfb25bb163c7c63732615980a25eddbadc7bfeffffff022a8227630000000017a91472128ae6b6a1b74e499bedb5efb1cb09c9a6713287107240000000000017a91485f81cb970d854e2513ebf5c5b5d09e4509f4af3870247304402201c09aa8bcd18753ef01d8712a55eea5a0f69b6c4cc2944ac942264ff0662c91402201fc1390bf8b0023dd12ae78d7ec181124e106de57bc8f00812ae92bd024d3045012103ba077fc011aa59393bfe17cf491b3a02a9c4d39df122b2148322da0ec23508f459430800")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
wrongTx := wire.NewMsgTx(2)
err = wrongTx.Deserialize(bytes.NewReader(wrongTxBytes))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
psbtBorkedInput2.Inputs[1] = *NewPsbtInput(wrongTx, nil)
res, err = borkedUpdater.Sign(1, sig2, pub2, nil, nil)
if err != ErrInvalidSignatureForInput {
t.Fatalf("Error should have been invalid sig for input, was: %v", err)
}
// ======================================================
res, err = psbtupdater1.Sign(1, sig2, pub2, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Failed to add signature to second input: %v %v", err, res)
}
// Neither input (p2pkh and p2wkh) require redeem script nor witness script,
// so there are no more fields to add; we are ready to finalize.
err = Finalize(psbt1, 0)
if err != nil {
t.Fatalf("Failed to finalize the first input, %v", err)
}
if psbt1.IsComplete() {
t.Fatalf("PSBT was complete but has not been fully finalized")
}
err = Finalize(psbt1, 1)
if err != nil {
t.Fatalf("Failed to finalize second input, %v", err)
}
tx, err := Extract(psbt1)
if err != nil {
t.Fatalf("unable to extract tx: %v", err)
}
var networkSerializedTx bytes.Buffer
if err := tx.Serialize(&networkSerializedTx); err != nil {
t.Fatalf("unable to encode tx: %v", err)
}
expectedTx := "0200000000010236a817a78a786b0f883431c6aaa11437c75a306d67346a99a666bf2095599e490100000000ffffffff461ca936215f9d048c369a34486b2022f0cec8c4050181076570f98e9e240ab5000000006a473044022014eb9c4858f71c9f280bc68402aa742a5187f54c56c8eb07c902eb1eb5804e5502203d66656de8386b9b044346d5605f5ae2b200328fb30476f6ac993fc0dbb04559012103b4c79acdf4e7d978bef4019c421e4c6c67044ed49d27322dc90e808d8080e862ffffffff028027128c0000000017a914055b2f1271292486518cc1f3a7c75e58c11cc0a687f05f93030000000017a9140b784f22df1a72fd418e862d81dba52423f90725870247304402200da03ac9890f5d724c42c83c2a62844c08425a274f1a5bca50dcde4126eb20dd02205278897b65cb8e390a0868c9582133c7157b2ad3e81c1c70d8fbd65f51a5658b0121024d6b24f372dd4551277c8df4ecc0655101e11c22894c8e05a3468409c865a72c0000000000"
expectedTxBytes, err := hex.DecodeString(expectedTx)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
if !bytes.Equal(expectedTxBytes, networkSerializedTx.Bytes()) {
t.Fatalf("The produced network transaction did not match the expected: %x \n %x \n",
networkSerializedTx.Bytes(), expectedTxBytes)
}
}
func TestImportFromCore2(t *testing.T) {
// This example #2 was created manually using Bitcoin Core 0.17 regtest.
// It contains two inputs, one p2sh-p2wkh and one fake utxo.
// The PSBT has been created with walletcreatepsbt and then partial-signed
// on the real input with walletprocessbst in Core.
// We first check that the updating here, using the Core created signature,
// redeem script and signature for the p2sh-p2wkh input, creates the
// same partial-signed intermediate transaction as Core did after
// walletprocesspsbt.
// We then attach a fake
// input of type p2sh-p2wsh, attach its witnessUtxo, redeemscript and
// witnessscript fields, and then finalize the whole transaction. Unlike
// the previous example, we cannot here compare with a Core produced
// network serialized final transaction, because of the fake input.
imported := "cHNidP8BAJsCAAAAAkxTQ+rig5QNnUS5nMc+Pccow4IcOJeQRcNNw+7p5ZA5AQAAAAD/////qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqoNAAAAAP////8CAIYOcAAAAAAWABQ1l7nn13RubTwqRQU2BnVV5WlXBWAxMbUAAAAAF6kUkiuXUjfWFgTp6nl/gf9+8zIWR6KHAAAAAAAAAAAA"
psbt1, err := NewFromRawBytes(bytes.NewReader([]byte(imported)), true)
if err != nil {
t.Fatalf("Failed to parse PSBT: %v", err)
}
// update with the first input's utxo, taken from its funding
// transaction
fundingTxInput1Hex := "02000000017b260536a3c17aee49c41a9b36fdf01a418e0c04df06fbabcb0d4f590b95d175000000006a473044022074a5a13159b6c12d77881c9501aa5c18616fb76c1809fc4d55f18a2e63159a6702200d1aa72be6056a41808898d24da93c0c0192cad65b7c2cc86e00b3e0fbbd57f601210212cc429d61fde565d0c2271a3e4fdb063cb49ae2257fa71460be753ceb56d175feffffff02bc060d8f0000000017a9140b56c31b5dc5a5a22c45a7850e707ad602d94a3087008352840000000017a9149f3679d67a9a486238764f618a93b82a7d999103879a000000"
fundingTxInput1Bytes, err := hex.DecodeString(fundingTxInput1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
txFund1 := wire.NewMsgTx(2)
err = txFund1.Deserialize(bytes.NewReader(fundingTxInput1Bytes))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
// First input is witness, take correct output:
txFund1Out := txFund1.TxOut[1]
psbtupdater1 := Updater{Upsbt: psbt1}
psbtupdater1.AddInWitnessUtxo(txFund1Out, 0)
// This input is p2sh-p2wkh, so it requires a redeemscript but not
// a witness script. The redeemscript is the witness program.
redeemScript, err := hex.DecodeString("00147aed39420a8b7ab98a83791327ccb70819d1fbe2")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
psbtupdater1.AddInRedeemScript(redeemScript, 0)
// Signing for the first input was done with Core; we manually insert the
// relevant input entries here.
sig1Hex := "30440220546d182d00e45ef659c329dce6197dc19e0abc795e2c9279873f5a887998b273022044143113fc3475d04fc8d5113e0bbcb42d80514a9f1a2247e9b2a7878e20d44901"
sig1, err := hex.DecodeString(sig1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
pub1Hex := "02bb3ce35af26f4c826eab3e5fc263ef56871b26686a8a995599b7ee6576613104"
pub1, err := hex.DecodeString(pub1Hex)
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
res, err := psbtupdater1.Sign(0, sig1, pub1, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Unable to add partial signature: %v %v", err, res)
}
// Since this input is now finalizable, we do so:
err = Finalize(psbt1, 0)
if err != nil {
t.Fatalf("Failed to finalize the first input: %v", err)
}
if psbt1.IsComplete() {
t.Fatalf("PSBT was complete but has not been fully finalized")
}
// Core also adds the OutRedeemScript field for the output it knows about.
// Note that usually we would not of course re-create, but rather start
// from the half-signed version; so this is needed only for a sanity check
// that we can recreate the half-signed.
output2RedeemScript, err := hex.DecodeString("0014e0846bd17848ab40ca1f56b655c6fa31667880cc")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
psbtupdater1.AddOutRedeemScript(output2RedeemScript, 1)
// The main function of the test is to compare the thus-generated
// partially (not completely) signed transaction with that generated and
// encoded by Core.
expectedPsbtPartialB64 := "cHNidP8BAJsCAAAAAkxTQ+rig5QNnUS5nMc+Pccow4IcOJeQRcNNw+7p5ZA5AQAAAAD/////qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqoNAAAAAP////8CAIYOcAAAAAAWABQ1l7nn13RubTwqRQU2BnVV5WlXBWAxMbUAAAAAF6kUkiuXUjfWFgTp6nl/gf9+8zIWR6KHAAAAAAABASAAg1KEAAAAABepFJ82edZ6mkhiOHZPYYqTuCp9mZEDhwEHFxYAFHrtOUIKi3q5ioN5EyfMtwgZ0fviAQhrAkcwRAIgVG0YLQDkXvZZwync5hl9wZ4KvHleLJJ5hz9aiHmYsnMCIEQUMRP8NHXQT8jVET4LvLQtgFFKnxoiR+myp4eOINRJASECuzzjWvJvTIJuqz5fwmPvVocbJmhqiplVmbfuZXZhMQQAAAABABYAFOCEa9F4SKtAyh9WtlXG+jFmeIDMAA=="
generatedPsbtPartialB64, err := psbt1.B64Encode()
if err != nil {
t.Fatalf("Unable to B64Encode Psbt: %v", err)
}
if expectedPsbtPartialB64 != generatedPsbtPartialB64 {
t.Fatalf("Partial did not match expected: %v", generatedPsbtPartialB64)
}
// We now simulate adding the signing data for the second (fake) input,
// and check that we can finalize and extract. This input is p2sh-p2wsh.
// the second input is fake, we're going to make it witness type,
// so create a TxOut struct that fits
fakeTxOutSerialized, err := hex.DecodeString("00c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e887")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
fakevalSerialized := binary.LittleEndian.Uint64(fakeTxOutSerialized[:8])
fakeScriptPubKey := fakeTxOutSerialized[9:]
txFund2Out := wire.NewTxOut(int64(fakevalSerialized), fakeScriptPubKey)
psbt2, err := NewFromRawBytes(bytes.NewReader([]byte(expectedPsbtPartialB64)), true)
if err != nil {
t.Fatalf("Failed to load partial PSBT: %v", err)
}
psbtupdater2, err := NewUpdater(psbt2)
if err != nil {
t.Fatalf("Failed to create updater: %v", err)
}
psbtupdater2.AddInWitnessUtxo(txFund2Out, 1)
// Add redeemScript, which is the witnessscript/program:
redeemScript, err = hex.DecodeString("00208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
err = psbtupdater2.AddInRedeemScript(redeemScript, 1)
if err != nil {
t.Fatalf("Failed to add redeemscript to second input: %v", err)
}
// Add witnessScript, which here is multisig:
witnessScript, err := hex.DecodeString("522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
// To test multisig checks, add a nonsense version of the multisig script
witnessScriptNonsense, err := hex.DecodeString("52ffff")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
err = psbtupdater2.AddInWitnessScript(witnessScript, 1)
if err != nil {
t.Fatalf("Failed to add witnessscript to second input: %v", err)
}
// Construct the two partial signatures to be added
sig21, err := hex.DecodeString("3044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
pub21, err := hex.DecodeString("03089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
sig22, err := hex.DecodeString("3044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d201")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
pub22, err := hex.DecodeString("023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e73")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
res, err = psbtupdater2.Sign(1, sig21, pub21, nil, nil)
// Check that the finalization procedure fails here due to not
// meeting the multisig policy
success, err := MaybeFinalize(psbt2, 1)
if success {
t.Fatalf("Incorrectly succeeded in finalizing without sigs")
}
if err != ErrUnsupportedScriptType {
t.Fatalf("Got unexpected error type: %v", err)
}
res, err = psbtupdater2.Sign(1, sig22, pub22, nil, nil)
// Check that the finalization procedure also fails with a nonsense
// script
err = psbtupdater2.AddInWitnessScript(witnessScriptNonsense, 1)
if err != nil {
t.Fatalf("Failed to add witnessscript to second input: %v", err)
}
success, err = MaybeFinalize(psbt2, 1)
if success {
t.Fatalf("Incorrectly succeeded in finalizing with invalid msigscript")
}
if err != ErrUnsupportedScriptType {
t.Fatalf("Got unexpected error type: %v", err)
}
// Restore the correct witnessScript to complete correctly
err = psbtupdater2.AddInWitnessScript(witnessScript, 1)
if err != nil {
t.Fatalf("Failed to add witnessscript to second input: %v", err)
}
success, err = MaybeFinalize(psbt2, 1)
if !success {
if err != nil {
t.Fatalf("Failed to finalize second input: %v", err)
} else {
t.Fatalf("Input was not finalizable")
}
}
// Add a (fake) witnessOut descriptor field to one of the outputs,
// for coverage purposes (we aren't currently using this field)
psbtupdater2.AddOutWitnessScript([]byte{0xff, 0xff, 0xff}, 0)
// Sanity check; we should not have lost the additional output entry
// provided by Core initially
uoutput1 := psbtupdater2.Upsbt.Outputs[1]
if uoutput1.RedeemScript == nil {
t.Fatalf("PSBT should contain outredeemscript entry, but it does not.")
}
// Nor should we have lost our fake witnessscript output entry
uoutput2 := psbtupdater2.Upsbt.Outputs[0]
if uoutput2.WitnessScript == nil {
t.Fatalf("PSBT should contain outwitnessscript but it does not.")
}
var tx bytes.Buffer
networkSerializedTx, err := Extract(psbt2)
if err != nil {
t.Fatalf("unable to extract tx: %v", err)
}
if err := networkSerializedTx.Serialize(&tx); err != nil {
t.Fatalf("unable to encode tx: %v", err)
}
expectedSerializedTx, err := hex.DecodeString("020000000001024c5343eae283940d9d44b99cc73e3dc728c3821c38979045c34dc3eee9e5903901000000171600147aed39420a8b7ab98a83791327ccb70819d1fbe2ffffffffaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa0d000000232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903ffffffff0200860e70000000001600143597b9e7d7746e6d3c2a450536067555e5695705603131b50000000017a914922b975237d61604e9ea797f81ff7ef3321647a287024730440220546d182d00e45ef659c329dce6197dc19e0abc795e2c9279873f5a887998b273022044143113fc3475d04fc8d5113e0bbcb42d80514a9f1a2247e9b2a7878e20d449012102bb3ce35af26f4c826eab3e5fc263ef56871b26686a8a995599b7ee65766131040400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00000000")
if err != nil {
t.Fatalf("Failed to decode hex: %v", err)
}
if !bytes.Equal(expectedSerializedTx, tx.Bytes()) {
t.Fatalf("Failed to create correct network serialized "+
"transaction: expected %x, got %x",
expectedSerializedTx, tx.Bytes())
}
}
func TestMaybeFinalizeAll(t *testing.T) {
// The following data is from a 3rd transaction from Core,
// using 3 inputs, all p2wkh.
imported := "cHNidP8BAKQCAAAAAzJyXH13IqBFvvZ7y1VSgUgkMvMoPgP5CfFNqsjQexKQAQAAAAD/////fMdLydu5bsoiHN9cFSaBL0Qnq2KLSKx0RA4b938CAgQAAAAAAP/////yKNgfsDAHr/zFz8R9k8EFI26allfg9DdE8Gzj6tGlegEAAAAA/////wHw9E0OAAAAABYAFDnPCRduiEWmmSc1j30SJ8k9u7PHAAAAAAAAAAAA"
psbt1, err := NewFromRawBytes(bytes.NewReader([]byte(imported)), true)
if err != nil {
t.Fatalf("Failed to parse PSBT: %v", err)
}
// update with the first input's utxo, taken from its funding
// transaction
fundingTxInput1, err := hex.DecodeString("020000000001017b260536a3c17aee49c41a9b36fdf01a418e0c04df06fbabcb0d4f590b95d1750100000017160014af82cd4409241b1de892726324bd780e3b5cd8aafeffffff02a85f9800000000001600149d21f8b306ddfd4dd035080689e88b4c3471e3cc801d2c0400000000160014d97ccd3dfb60820d7d33d862371ca5a73039bd560247304402201a1d2fdb5a7190b7fa59907769f0fc9c91fd3b34f6424acf5868a8ac21ec287102200a59b9d076ecf98c88f2196ed2be0aafff4966ead754041182fff5f92115a783012103604ffd31dc71db2e32c20f09eafe6353cd7515d3648aff829bb4879b553e30629a000000")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
fundingTxInput2, err := hex.DecodeString("020000000001019c27b886e420fcadb077706b0933efa8bb53e3a250c3ec45cfdba5e05e233f360100000000feffffff0200b4c404000000001600140853f50c7d2d5d2af326a75efdbc83b62551e89afce31c0d000000001600142d6936c082c35607ec3bdb334a932d928150b75802473044022000d962f5e5e6425f9de21da7ac65b4fd8af8f6bfbd33c7ba022827c73866b477022034c59935c1ea10b5ba335d93f55a200c2588ec6058b8c7aedd10d5cbc4654f99012102c30e9f0cd98f6a805464d6b8a326b5679b6c3262934341855ee0436eaedfd2869a000000")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
fundingTxInput3, err := hex.DecodeString("02000000012bf4331bb95df4eadb14f7a28db3fecdc5e87f08c29c2332b66338dd606699f60000000048473044022075ed43f508528da47673550a785702e9a93eca84a11faea91c4e9c66fcab3c9e022054a37610bd40b12263a5933188f062b718e007f290cecde2b6e41da3e1ebbddf01feffffff020c99a8240100000016001483bd916985726094d6d1c5b969722da580b5966a804a5d05000000001600140a2ee13a6696d75006af5e8a026ea49316087dae9a000000")
if err != nil {
t.Fatalf("Unable to decode hex: %v", err)
}
psbtupdater1 := Updater{Upsbt: psbt1}
tx := wire.NewMsgTx(2)
err = tx.Deserialize(bytes.NewReader(fundingTxInput1))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
txFund1Out := tx.TxOut[1]
psbtupdater1.AddInWitnessUtxo(txFund1Out, 0)
tx = wire.NewMsgTx(2)
err = tx.Deserialize(bytes.NewReader(fundingTxInput2))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
txFund2Out := tx.TxOut[0]
psbtupdater1.AddInWitnessUtxo(txFund2Out, 1)
tx = wire.NewMsgTx(2)
err = tx.Deserialize(bytes.NewReader(fundingTxInput3))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
txFund3Out := tx.TxOut[1]
psbtupdater1.AddInWitnessUtxo(txFund3Out, 2)
// To be ready for finalization, we need to have partial signature
// fields for each input
sig1, _ := hex.DecodeString("30440220027605ee8015970baf02a72652967a543e1b29a6882d799738ed1baee508822702203818a2f1b9770c46a473f47ad7ae90bcc129a5d047f00fae354c80197a7cf50601")
pub1, _ := hex.DecodeString("03235fc1f9dc8bbf6fa3df35dfeb0dd486f2d488f139579885eb684510f004f6c1")
sig2, _ := hex.DecodeString("304402206f5aea4621696610de48736b95a89b1d3a434a4e536d9aae65e039c477cf4c7202203b27a18b0f63be7d3bbf5be1bc2306a7ec8c2da12c2820ff07b73c7f3f1d4d7301")
pub2, _ := hex.DecodeString("022011b496f0603a268b55a781c7be0c3849f605f09cb2e917ed44288b8144a752")
sig3, _ := hex.DecodeString("3044022036dbc6f8f85a856e7803cbbcf0a97b7a74806fc592e92d7c06826f911610b98e0220111d43c4b20f756581791334d9c5cbb1a9c07558f28404cabf01c782897ad50501")
pub3, _ := hex.DecodeString("0381772a80c69e275e20d7f014555b13031e9cacf1c54a44a67ab2bc7eba64f227")
res, err := psbtupdater1.Sign(0, sig1, pub1, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Failed to add partial signature for input 0: %v %v", err, res)
}
res, err = psbtupdater1.Sign(1, sig2, pub2, nil, nil)
if err != nil || res != 0 {
t.Fatalf("Failed to add partial signature for input 1: %v %v", err, res)
}
// Not ready for finalize all, check it fails:
err = MaybeFinalizeAll(psbt1)
if err != ErrNotFinalizable {
t.Fatalf("Expected finalization failure, got: %v", err)
}
res, err = psbtupdater1.Sign(2, sig3, pub3, nil, nil)
// Since this input is now finalizable and is p2wkh only, we can do
// all at once:
err = MaybeFinalizeAll(psbt1)
if err != nil {
t.Fatalf("Failed to finalize PSBT: %v", err)
}
if !psbt1.IsComplete() {
t.Fatalf("PSBT was finalized but not marked complete")
}
}
func TestFromUnsigned(t *testing.T) {
serTx, err := hex.DecodeString("00000000000101e165f072311e71825b47a4797221d7ae56d4b40b7707c540049aee43302448a40000000000feffffff0212f1126a0000000017a9143e836801b2b15aa193449d815c62d6c4b6227c898780778e060000000017a914ba4bdb0b07d67bc60f59c1f4fe54170565254974870000000000")
if err != nil {
t.Fatalf("Error: %v", err)
}
tx := wire.NewMsgTx(2)
err = tx.Deserialize(bytes.NewReader(serTx))
if err != nil {
t.Fatalf("Error: %v", err)
}
psbt1, err := NewFromUnsignedTx(tx)
if err != nil {
t.Fatalf("Error: %v", err)
}
encoded, err := psbt1.B64Encode()
if err != nil {
t.Fatalf("Unable to B64Encode Psbt: %v", err)
}
// Compare with output of Core:
fromCoreB64 := "cHNidP8BAHMAAAAAAeFl8HIxHnGCW0ekeXIh165W1LQLdwfFQASa7kMwJEikAAAAAAD+////AhLxEmoAAAAAF6kUPoNoAbKxWqGTRJ2BXGLWxLYifImHgHeOBgAAAAAXqRS6S9sLB9Z7xg9ZwfT+VBcFZSVJdIcAAAAAAAAAAA=="
if encoded != fromCoreB64 {
t.Fatalf("Got incorrect b64: %v", encoded)
}
_, err = NewFromRawBytes(bytes.NewReader([]byte(fromCoreB64)), true)
if err != nil {
t.Fatalf("Error: %v", err)
}
}
func TestNonWitnessToWitness(t *testing.T) {
// We'll start with a PSBT produced by Core for which
// the first input is signed and we'll provided the signatures for
// the other three inputs; they are p2sh-p2wkh, p2wkh and legacy
// respectively.
// In each case we'll *first* attach the NonWitnessUtxo field,
// and then call sign; in the first two but not the third case, the
// NonWitnessUtxo will automatically be replaced with the WitnessUtxo.
// Finally we'll check that the fully finalized PSBT produced matches
// the one produced by Core for the same keys.
psbt1B64 := "cHNidP8BAM4CAAAABHtBMXY+SX95xidmWJP67CTQ02FPUpbNhIxNplAdlvk+AQAAAAD/////G2mt4bX7+sVi1jdbuBa5Q/xsJdgzFCgdHHSZq3ewK6YAAAAAAP/////NrbZb7GzfAg4kOqFWAIbXabq4cAvtVGv+eecIIv1KggEAAAAA/////73s9ifprgErlaONH1rgpNs3l6+t+mz2XGTHsTVWCem/AQAAAAD/////AfAmclMAAAAAF6kUQwsEC5nzbdY5meON2ZQ2thmeFgOHAAAAAAABASAAZc0dAAAAABepFPAv3VTMu5+4WN+/HIji6kG9RpzKhwEHFxYAFLN3PqXSyIHWKqm4ah5m9erc/3OoAQhrAkcwRAIgH7kzGO2iskfCvX0dgkDuzfqJ7tAu7KUZOeykTkJ1SYkCIBv4QRZK1hLz45D0gs+Lz93OE4s37lkPVE+SlXZtazWEASEC3jaf19MMferBn0Bn5lxXJGOqoqmfSvnHclQvB5gJ3nEAAAAAAQAWABTB+Qcq6iqdSvvc6959kd7XHrhYFgA="
nwutxo1ser, _ := hex.DecodeString("02000000017f7baa6b7377541c4aca372d2dce8e1098ba44aa8379b7ea87644ef27e08ec240000000048473044022072e3b94c33cb5128518cd3903cc0ca19e8c234ac6d462e01ae2bb1da7768ed7d0220167d7ad89f6e1bbb3b866ae6fc2f67b5e7d51eb4f33f7bfe3f4b2673856b815001feffffff0200c2eb0b0000000017a9142dd25c78db2e2e09376eab9cb342e1b03005abe487e4ab953e0000000017a914120b8ca3fb4c7f852e30d4e3714fb64027a0b4c38721020000")
nwutxo2ser, _ := hex.DecodeString("0200000001f51b0bb5d945dd5532448a4d3fb88134d0bd90493813515f9c2ddb1fa15b9ba60000000048473044022047d83caf88d398245c006374bfa9f27ae968f5f51d640cacd5a214ed2cba397a02204519b26035496855f574a72b73bdcfa46d53995faf64c8f0ab394b628cc5383901feffffff020ccb9f3800000000160014e13544a3c718faa6c5ad7089a6660383c12b072700a3e11100000000160014a5439b477c116b79bd4c7c5131f3e58d54f27bb721020000")
nwutxo3ser, _ := hex.DecodeString("0200000001eb452f0fc9a8c39edb79f7174763f3cb25dc56db455926e411719a115ef16509000000004847304402205aa80cc615eb4b3f6e89696db4eadd192581a6c46f5c09807d3d98ece1d77355022025007e58c1992a1e5d877ee324bfe0a65db26d29f80941cfa277ac3efbcad2a701feffffff02bce9a9320000000017a9141590e852ac66eb8798afeb2a5ed67c568a2d6561870084d717000000001976a914a57ea05eacf94900d5fb92bccd273cfdb90af36f88ac21020000")
nwutxo1 := wire.NewMsgTx(2)
err := nwutxo1.Deserialize(bytes.NewReader(nwutxo1ser))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
nwutxo2 := wire.NewMsgTx(2)
err = nwutxo2.Deserialize(bytes.NewReader(nwutxo2ser))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
nwutxo3 := wire.NewMsgTx(2)
err = nwutxo3.Deserialize(bytes.NewReader(nwutxo3ser))
if err != nil {
t.Fatalf("Error deserializing transaction: %v", err)
}
// import the PSBT
psbt1, err := NewFromRawBytes(bytes.NewReader([]byte(psbt1B64)), true)
if err != nil {
t.Fatalf("Failed to create PSBT: %v", err)
}
// check that we recognize the finality of the first input
if !isFinalized(psbt1, 0) {
t.Fatalf("First input incorrectly read as not finalized.")
}
// Add NonWitnessUtxo fields for each of the other three inputs
u := Updater{Upsbt: psbt1}
u.AddInNonWitnessUtxo(nwutxo1, 1)
u.AddInNonWitnessUtxo(nwutxo2, 2)
u.AddInNonWitnessUtxo(nwutxo3, 3)
// Signatures for each of those inputs were created with Core:
sig1, _ := hex.DecodeString("304402205676877e6162ce40a49ee5a74443cdc1e7915637c42da7b872c2ec2298fd371b02203c1d4a05b1e2a7a588d9ec9b8d4892d2cd59bebe0e777483477a0ec692ebbe6d01")
pub1, _ := hex.DecodeString("02534f23cb88a048b649672967263bd7570312d5d31d066fa7b303970010a77b2b")
redeemScript1, _ := hex.DecodeString("00142412be29368c0260cb841eecd9b59d7e01174aa1")
sig2, _ := hex.DecodeString("3044022065d0a349709b8d8043cfd644cf6c196c1f601a22e1b3fdfbf8c0cc2a80fe2f1702207c87d36b666a8862e81ec5df288707f517d2f35ea1548feb82019de2c8de90f701")
pub2, _ := hex.DecodeString("0257d88eaf1e79b72ea0a33ae89b57dae95ea68499bdc6770257e010ab899f0abb")
sig3, _ := hex.DecodeString("30440220290abcaacbd759c4f989762a9ee3468a9231788aab8f50bf65955d8597d8dd3602204d7e394f4419dc5392c6edba6945837458dd750a030ac67a746231903a8eb7db01")
pub3, _ := hex.DecodeString("0388025f50bb51c0469421ed13381f22f9d46a070ec2837e055c49c5876f0d0968")
// Add the signatures and any scripts needed to the inputs
res, err := u.Sign(1, sig1, pub1, redeemScript1, nil)
if res != 0 || err != nil {
t.Fatalf("Failed to sign at index %v res %v err %v", 1, res, err)
}
res, err = u.Sign(2, sig2, pub2, nil, nil)
if res != 0 || err != nil {
t.Fatalf("Failed to sign at index %v res %v err %v", 2, res, err)
}
res, err = u.Sign(3, sig3, pub3, nil, nil)
if res != 0 || err != nil {
t.Fatalf("Failed to sign at index %v res %v err %v", 3, res, err)
}
// Attempt to finalize the rest of the transaction
_, err = MaybeFinalize(psbt1, 1)
if err != nil {
t.Fatalf("Failed to finalize input 1 %v", err)
}
_, err = MaybeFinalize(psbt1, 2)
if err != nil {
t.Fatalf("Failed to finalize input 2 %v", err)
}
_, err = MaybeFinalize(psbt1, 3)
if err != nil {
t.Fatalf("Failed to finalize input 3 %v", err)
}
// Finally we can check whether both the B64 encoding of the PSBT,
// and the final network serialized signed transaction, that we generated
// with Core using the 2 wallets, matches what this code produces:
expectedFinalizedPsbt := "cHNidP8BAM4CAAAABHtBMXY+SX95xidmWJP67CTQ02FPUpbNhIxNplAdlvk+AQAAAAD/////G2mt4bX7+sVi1jdbuBa5Q/xsJdgzFCgdHHSZq3ewK6YAAAAAAP/////NrbZb7GzfAg4kOqFWAIbXabq4cAvtVGv+eecIIv1KggEAAAAA/////73s9ifprgErlaONH1rgpNs3l6+t+mz2XGTHsTVWCem/AQAAAAD/////AfAmclMAAAAAF6kUQwsEC5nzbdY5meON2ZQ2thmeFgOHAAAAAAABASAAZc0dAAAAABepFPAv3VTMu5+4WN+/HIji6kG9RpzKhwEHFxYAFLN3PqXSyIHWKqm4ah5m9erc/3OoAQhrAkcwRAIgH7kzGO2iskfCvX0dgkDuzfqJ7tAu7KUZOeykTkJ1SYkCIBv4QRZK1hLz45D0gs+Lz93OE4s37lkPVE+SlXZtazWEASEC3jaf19MMferBn0Bn5lxXJGOqoqmfSvnHclQvB5gJ3nEAAQEgAMLrCwAAAAAXqRQt0lx42y4uCTduq5yzQuGwMAWr5IcBBxcWABQkEr4pNowCYMuEHuzZtZ1+ARdKoQEIawJHMEQCIFZ2h35hYs5ApJ7lp0RDzcHnkVY3xC2nuHLC7CKY/TcbAiA8HUoFseKnpYjZ7JuNSJLSzVm+vg53dINHeg7Gkuu+bQEhAlNPI8uIoEi2SWcpZyY711cDEtXTHQZvp7MDlwAQp3srAAEBHwCj4REAAAAAFgAUpUObR3wRa3m9THxRMfPljVTye7cBCGsCRzBEAiBl0KNJcJuNgEPP1kTPbBlsH2AaIuGz/fv4wMwqgP4vFwIgfIfTa2ZqiGLoHsXfKIcH9RfS816hVI/rggGd4sjekPcBIQJX2I6vHnm3LqCjOuibV9rpXqaEmb3GdwJX4BCriZ8KuwABAL0CAAAAAetFLw/JqMOe23n3F0dj88sl3FbbRVkm5BFxmhFe8WUJAAAAAEhHMEQCIFqoDMYV60s/bolpbbTq3RklgabEb1wJgH09mOzh13NVAiAlAH5YwZkqHl2HfuMkv+CmXbJtKfgJQc+id6w++8rSpwH+////ArzpqTIAAAAAF6kUFZDoUqxm64eYr+sqXtZ8VootZWGHAITXFwAAAAAZdqkUpX6gXqz5SQDV+5K8zSc8/bkK82+IrCECAAABB2pHMEQCICkKvKrL11nE+Yl2Kp7jRoqSMXiKq49Qv2WVXYWX2N02AiBNfjlPRBncU5LG7bppRYN0WN11CgMKxnp0YjGQOo632wEhA4gCX1C7UcBGlCHtEzgfIvnUagcOwoN+BVxJxYdvDQloAAEAFgAUwfkHKuoqnUr73OvefZHe1x64WBYA"
calculatedPsbt, err := u.Upsbt.B64Encode()
if err != nil {
t.Fatalf("Failed to base64 encode")
}
if expectedFinalizedPsbt != calculatedPsbt {
t.Fatalf("Failed to generate correct PSBT")
}
expectedNetworkSer, _ := hex.DecodeString("020000000001047b4131763e497f79c627665893faec24d0d3614f5296cd848c4da6501d96f93e0100000017160014b3773ea5d2c881d62aa9b86a1e66f5eadcff73a8ffffffff1b69ade1b5fbfac562d6375bb816b943fc6c25d83314281d1c7499ab77b02ba600000000171600142412be29368c0260cb841eecd9b59d7e01174aa1ffffffffcdadb65bec6cdf020e243aa1560086d769bab8700bed546bfe79e70822fd4a820100000000ffffffffbdecf627e9ae012b95a38d1f5ae0a4db3797afadfa6cf65c64c7b1355609e9bf010000006a4730440220290abcaacbd759c4f989762a9ee3468a9231788aab8f50bf65955d8597d8dd3602204d7e394f4419dc5392c6edba6945837458dd750a030ac67a746231903a8eb7db01210388025f50bb51c0469421ed13381f22f9d46a070ec2837e055c49c5876f0d0968ffffffff01f02672530000000017a914430b040b99f36dd63999e38dd99436b6199e1603870247304402201fb93318eda2b247c2bd7d1d8240eecdfa89eed02eeca51939eca44e4275498902201bf841164ad612f3e390f482cf8bcfddce138b37ee590f544f9295766d6b3584012102de369fd7d30c7deac19f4067e65c572463aaa2a99f4af9c772542f079809de710247304402205676877e6162ce40a49ee5a74443cdc1e7915637c42da7b872c2ec2298fd371b02203c1d4a05b1e2a7a588d9ec9b8d4892d2cd59bebe0e777483477a0ec692ebbe6d012102534f23cb88a048b649672967263bd7570312d5d31d066fa7b303970010a77b2b02473044022065d0a349709b8d8043cfd644cf6c196c1f601a22e1b3fdfbf8c0cc2a80fe2f1702207c87d36b666a8862e81ec5df288707f517d2f35ea1548feb82019de2c8de90f701210257d88eaf1e79b72ea0a33ae89b57dae95ea68499bdc6770257e010ab899f0abb0000000000")
tx, err := Extract(psbt1)
if err != nil {
t.Fatalf("Failed to extract: %v", err)
}
var b bytes.Buffer
if err := tx.Serialize(&b); err != nil {
t.Fatalf("unable to encode tx: %v", err)
}
if !bytes.Equal(expectedNetworkSer, b.Bytes()) {
t.Fatalf("Expected serialized transaction was not produced: %x", b.Bytes())
}
}
// TestEmptyInputSerialization tests the special serialization case for a wire
// transaction that has no inputs.
func TestEmptyInputSerialization(t *testing.T) {
// Create and serialize a new, empty PSBT. The wire package will assume
// it's a non-witness transaction, as there are no inputs.
psbt, err := New(nil, nil, 2, 0, nil)
if err != nil {
t.Fatalf("failed to create empty PSBT: %v", err)
}
var buf bytes.Buffer
err = psbt.Serialize(&buf)
if err != nil {
t.Fatalf("failed to serialize empty PSBT: %v", err)
}
// Try to deserialize the empty transaction again. The wire package will
// assume it's a witness transaction because of the special case where
// there are no inputs. This assumption is wrong and the first attempt
// will fail. But a workaround should try again to deserialize the TX
// with the non-witness format.
psbt2, err := NewFromRawBytes(&buf, false)
if err != nil {
t.Fatalf("failed to deserialize empty PSBT: %v", err)
}
if len(psbt2.UnsignedTx.TxIn) > 0 || len(psbt2.UnsignedTx.TxOut) > 0 {
t.Fatalf("deserialized transaction not empty")
}
}
// TestWitnessForNonWitnessUtxo makes sure that a packet that only has a non-
// witness UTXO set can still be signed correctly by adding witness data. This
// is to make sure that PSBTs following the CVE-2020-14199 bugfix are not
// rejected. See https://github.com/bitcoin/bitcoin/pull/19215.
func TestWitnessForNonWitnessUtxo(t *testing.T) {
// Our witness UTXO is index 1 of this raw transaction from the test
// vectors.
prevTxRaw, _ := hex.DecodeString("0200000000010158e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd7501000000171600145f275f436b09a8cc9a2eb2a2f528485c68a56323feffffff02d8231f1b0100000017a914aed962d6654f9a2b36608eb9d64d2b260db4f1118700c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88702483045022100a22edcc6e5bc511af4cc4ae0de0fcd75c7e04d8c1c3a8aa9d820ed4b967384ec02200642963597b9b1bc22c75e9f3e117284a962188bf5e8a74c895089046a20ad770121035509a48eb623e10aace8bfd0212fdb8a8e5af3c94b0b133b95e114cab89e4f7965000000")
prevTx := wire.NewMsgTx(2)
err := prevTx.Deserialize(bytes.NewReader(prevTxRaw))
if err != nil {
t.Fatalf("failed to deserialize previous TX: %v", err)
}
// First create a packet that contains one input and one output.
outPkScript, _ := hex.DecodeString(CUTestHexData["scriptPubkey1"])
packet := &Packet{
UnsignedTx: &wire.MsgTx{
Version: 2,
LockTime: 0,
TxIn: []*wire.TxIn{{
PreviousOutPoint: wire.OutPoint{
Hash: prevTx.TxHash(),
Index: 1,
},
}},
TxOut: []*wire.TxOut{{
PkScript: outPkScript,
Value: 1.9 * btcutil.SatoshiPerBitcoin,
}},
},
Inputs: []PInput{{}},
Outputs: []POutput{{}},
}
// Create an updater for the packet. This also performs a sanity check.
updater, err := NewUpdater(packet)
if err != nil {
t.Fatalf("failed to sanity check raw packet: %v", err)
}
// Now add our witness UTXO to the input. But because hardware wallets
// that are patched against CVE-2020-14199 require the full non-witness
// UTXO to be set for all inputs, we do what Core does and add the full
// transaction in the NonWitnessUtxo instead of just the outpoint in
// WitnessUtxo.
err = updater.AddInNonWitnessUtxo(prevTx, 0)
if err != nil {
t.Fatalf("failed to update non-witness UTXO: %v", err)
}
// Then add the redeem scripts and witness scripts.
redeemScript, _ := hex.DecodeString(CUTestHexData["Input2RedeemScript"])
err = updater.AddInRedeemScript(redeemScript, 0)
if err != nil {
t.Fatalf("failed to update redeem script: %v", err)
}
witnessScript, _ := hex.DecodeString(CUTestHexData["Input2WitnessScript"])
err = updater.AddInWitnessScript(witnessScript, 0)
if err != nil {
t.Fatalf("failed to update redeem script: %v", err)
}
// Add the first of the two partial signatures.
sig1, _ := hex.DecodeString("3044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01")
pub1, _ := hex.DecodeString("03089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc")
res, err := updater.Sign(0, sig1, pub1, nil, nil)
if err != nil {
t.Fatalf("failed to sign with pubkey 1: %v", err)
}
if res != SignSuccesful {
t.Fatalf("signing was not successful, got result %v", res)
}
// Check that the finalization procedure fails here due to not
// meeting the multisig policy
success, err := MaybeFinalize(packet, 0)
if success {
t.Fatalf("Incorrectly succeeded in finalizing without sigs")
}
if err != ErrUnsupportedScriptType {
t.Fatalf("Got unexpected error type: %v", err)
}
// Add the second partial signature.
sig2, _ := hex.DecodeString("3044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d201")
pub2, _ := hex.DecodeString("023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e73")
res, err = updater.Sign(0, sig2, pub2, nil, nil)
if err != nil {
t.Fatalf("failed to sign with pubkey 2: %v", err)
}
if res != SignSuccesful {
t.Fatalf("signing was not successful, got result %v", res)
}
// Finally make sure we can finalize the packet and extract the raw TX.
err = MaybeFinalizeAll(packet)
if err != nil {
t.Fatalf("error finalizing PSBT: %v", err)
}
_, err = Extract(packet)
if err != nil {
t.Fatalf("unable to extract funding TX: %v", err)
}
}
|
package goble
import (
"bytes"
"encoding/binary"
"fmt"
"log"
"time"
"github.com/dim13/goble/xpc"
)
// "github.com/dim13/goble/xpc"
//
// BLE support
//
//go:generate stringer -type State
type State int
const (
unknown State = iota
resetting
unsupported
unauthorized
poweredOff
poweredOn
)
type Property int
const (
Broadcast Property = 1 << iota
Read
WriteWithoutResponse
Write
Notify
Indicate
AuthenticatedSignedWrites
ExtendedProperties
)
func (p Property) Readable() bool {
return (p & Read) != 0
}
func (p Property) String() (result string) {
if (p & Broadcast) != 0 {
result += "broadcast "
}
if (p & Read) != 0 {
result += "read "
}
if (p & WriteWithoutResponse) != 0 {
result += "writeWithoutResponse "
}
if (p & Write) != 0 {
result += "write "
}
if (p & Notify) != 0 {
result += "notify "
}
if (p & Indicate) != 0 {
result += "indicate "
}
if (p & AuthenticatedSignedWrites) != 0 {
result += "authenticateSignedWrites "
}
if (p & ExtendedProperties) != 0 {
result += "extendedProperties "
}
return
}
type ServiceData struct {
Uuid string
Data []byte
}
type CharacteristicDescriptor struct {
Uuid string
Handle int
}
type ServiceCharacteristic struct {
Uuid string
Name string
Type string
Properties Property
Descriptors map[interface{}]*CharacteristicDescriptor
Handle int
ValueHandle int
}
type ServiceHandle struct {
Uuid string
Name string
Type string
Characteristics map[interface{}]*ServiceCharacteristic
startHandle int
endHandle int
}
type Advertisement struct {
LocalName string
TxPowerLevel int
ManufacturerData []byte
ServiceData []ServiceData
ServiceUuids []string
}
type Peripheral struct {
Uuid xpc.UUID
Address string
AddressType string
Connectable bool
Advertisement Advertisement
Rssi int
Services map[interface{}]*ServiceHandle
}
// GATT Descriptor
type Descriptor struct {
uuid xpc.UUID
value []byte
}
// GATT Characteristic
type Characteristic struct {
uuid xpc.UUID
properties Property
secure Property
descriptors []Descriptor
value []byte
}
// GATT Service
type Service struct {
uuid xpc.UUID
characteristics []Characteristic
}
type BLE struct {
Emitter
conn xpc.XPC
verbose bool
peripherals map[string]*Peripheral
attributes xpc.Array
lastServiceAttributeId int
allowDuplicates bool
}
func New() *BLE {
ble := &BLE{peripherals: map[string]*Peripheral{}, Emitter: Emitter{}}
ble.Emitter.Init()
ble.conn = xpc.XpcConnect("com.apple.blued", ble)
return ble
}
func (ble *BLE) SetVerbose(v bool) {
ble.verbose = v
ble.Emitter.SetVerbose(v)
}
// events
const (
stateChange = 6
advertisingStart = 16
advertisingStop = 17
discover = 37
connect = 38
disconnect = 40
mtuChange = 53
rssiUpdate = 54
serviceDiscover = 55
characteristicsDiscover = 63
descriptorsDiscover = 75
read = 70
)
// process BLE events and asynchronous errors
// (implements XpcEventHandler)
func (ble *BLE) HandleXpcEvent(event xpc.Dict, err error) {
if err != nil {
log.Println("error:", err)
if event == nil {
return
}
}
id := event.MustGetInt("kCBMsgId")
args := event.MustGetDict("kCBMsgArgs")
if ble.verbose {
log.Printf("event: %v %#v\n", id, args)
}
switch id {
case stateChange:
state := args.MustGetInt("kCBMsgArgState")
ble.Emit(Event{
Name: "stateChange",
State: State(state).String(),
})
case advertisingStart:
result := args.MustGetInt("kCBMsgArgResult")
if result != 0 {
log.Printf("event: error in advertisingStart %v\n", result)
} else {
ble.Emit(Event{
Name: "advertisingStart",
})
}
case advertisingStop:
result := args.MustGetInt("kCBMsgArgResult")
if result != 0 {
log.Printf("event: error in advertisingStop %v\n", result)
} else {
ble.Emit(Event{
Name: "advertisingStop",
})
}
case discover:
advdata := args.MustGetDict("kCBMsgArgAdvertisementData")
if len(advdata) == 0 {
//log.Println("event: discover with no advertisment data")
break
}
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
advertisement := Advertisement{
LocalName: advdata.GetString("kCBAdvDataLocalName", args.GetString("kCBMsgArgName", "")),
TxPowerLevel: advdata.GetInt("kCBAdvDataTxPowerLevel", 0),
ManufacturerData: advdata.GetBytes("kCBAdvDataManufacturerData", nil),
ServiceData: []ServiceData{},
ServiceUuids: []string{},
}
connectable := advdata.GetInt("kCBAdvDataIsConnectable", 0) > 0
rssi := args.GetInt("kCBMsgArgRssi", 0)
if uuids, ok := advdata["kCBAdvDataServiceUUIDs"]; ok {
for _, uuid := range uuids.(xpc.Array) {
advertisement.ServiceUuids = append(advertisement.ServiceUuids, fmt.Sprintf("%x", uuid))
}
}
if data, ok := advdata["kCBAdvDataServiceData"]; ok {
sdata := data.(xpc.Array)
for i := 0; i < len(sdata); i += 2 {
sd := ServiceData{
Uuid: fmt.Sprintf("%x", sdata[i+0].([]byte)),
Data: sdata[i+1].([]byte),
}
advertisement.ServiceData = append(advertisement.ServiceData, sd)
}
}
pid := deviceUuid.String()
p := ble.peripherals[pid]
emit := ble.allowDuplicates || p == nil
if p == nil {
// add new peripheral
p = &Peripheral{
Uuid: deviceUuid,
Connectable: connectable,
Advertisement: advertisement,
Rssi: rssi,
Services: map[interface{}]*ServiceHandle{},
}
ble.peripherals[pid] = p
} else {
// update peripheral
p.Advertisement = advertisement
p.Rssi = rssi
}
if emit {
ble.Emit(Event{
Name: "discover",
DeviceUUID: deviceUuid,
Peripheral: *p,
})
}
case connect:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
ble.Emit(Event{
Name: "connect",
DeviceUUID: deviceUuid,
})
case disconnect:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
ble.Emit(Event{
Name: "disconnect",
DeviceUUID: deviceUuid,
})
case mtuChange:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
mtu := args.MustGetInt("kCBMsgArgATTMTU")
// bleno here converts the deviceUuid to an address
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
ble.Emit(Event{
Name: "mtuChange",
DeviceUUID: deviceUuid,
Peripheral: *p,
Mtu: mtu,
})
}
case rssiUpdate:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
rssi := args.MustGetInt("kCBMsgArgData")
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
p.Rssi = rssi
ble.Emit(Event{
Name: "rssiUpdate",
DeviceUUID: deviceUuid,
Peripheral: *p,
})
}
case serviceDiscover:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
servicesUuids := []string{}
servicesHandles := map[interface{}]*ServiceHandle{}
if dservices, ok := args["kCBMsgArgServices"]; ok {
for _, s := range dservices.(xpc.Array) {
service := s.(xpc.Dict)
serviceHandle := ServiceHandle{
Uuid: service.MustGetHexBytes("kCBMsgArgUUID"),
startHandle: service.MustGetInt("kCBMsgArgServiceStartHandle"),
endHandle: service.MustGetInt("kCBMsgArgServiceEndHandle"),
Characteristics: map[interface{}]*ServiceCharacteristic{},
}
if nameType, ok := knownServices[serviceHandle.Uuid]; ok {
serviceHandle.Name = nameType.Name
serviceHandle.Type = nameType.Type
}
servicesHandles[serviceHandle.Uuid] = &serviceHandle
servicesHandles[serviceHandle.startHandle] = &serviceHandle
servicesUuids = append(servicesUuids, serviceHandle.Uuid)
}
}
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
p.Services = servicesHandles
ble.Emit(Event{
Name: "servicesDiscover",
DeviceUUID: deviceUuid,
Peripheral: *p,
})
}
case characteristicsDiscover:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
serviceStartHandle := args.MustGetInt("kCBMsgArgServiceStartHandle")
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
service := p.Services[serviceStartHandle]
//result := args.MustGetInt("kCBMsgArgResult")
for _, c := range args.MustGetArray("kCBMsgArgCharacteristics") {
cDict := c.(xpc.Dict)
characteristic := ServiceCharacteristic{
Uuid: cDict.MustGetHexBytes("kCBMsgArgUUID"),
Handle: cDict.MustGetInt("kCBMsgArgCharacteristicHandle"),
ValueHandle: cDict.MustGetInt("kCBMsgArgCharacteristicValueHandle"),
Descriptors: map[interface{}]*CharacteristicDescriptor{},
}
if nameType, ok := knownCharacteristics[characteristic.Uuid]; ok {
characteristic.Name = nameType.Name
characteristic.Type = nameType.Type
}
properties := cDict.MustGetInt("kCBMsgArgCharacteristicProperties")
characteristic.Properties = Property(properties)
if service != nil {
service.Characteristics[characteristic.Uuid] = &characteristic
service.Characteristics[characteristic.Handle] = &characteristic
service.Characteristics[characteristic.ValueHandle] = &characteristic
}
}
if service != nil {
ble.Emit(Event{
Name: "characteristicsDiscover",
DeviceUUID: deviceUuid,
ServiceUuid: service.Uuid,
Peripheral: *p,
})
} else {
log.Println("no service", serviceStartHandle)
}
} else {
log.Println("no peripheral", deviceUuid)
}
case descriptorsDiscover:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
characteristicsHandle := args.MustGetInt("kCBMsgArgCharacteristicHandle")
//result := args.MustGetInt("kCBMsgArgResult")
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
for _, s := range p.Services {
if c, ok := s.Characteristics[characteristicsHandle]; ok {
for _, d := range args.MustGetArray("kCBMsgArgDescriptors") {
dDict := d.(xpc.Dict)
descriptor := CharacteristicDescriptor{
Uuid: dDict.MustGetHexBytes("kCBMsgArgUUID"),
Handle: dDict.MustGetInt("kCBMsgArgDescriptorHandle"),
}
c.Descriptors[descriptor.Uuid] = &descriptor
c.Descriptors[descriptor.Handle] = &descriptor
}
ble.Emit(Event{
Name: "descriptorsDiscover",
DeviceUUID: deviceUuid,
ServiceUuid: s.Uuid,
CharacteristicUuid: c.Uuid,
Peripheral: *p,
})
break
}
}
} else {
log.Println("no peripheral", deviceUuid)
}
case read:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
characteristicsHandle := args.MustGetInt("kCBMsgArgCharacteristicHandle")
//result := args.MustGetInt("kCBMsgArgResult")
isNotification := args.GetInt("kCBMsgArgIsNotification", 0) != 0
data := args.MustGetBytes("kCBMsgArgData")
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
for _, s := range p.Services {
if c, ok := s.Characteristics[characteristicsHandle]; ok {
ble.Emit(Event{
Name: "read",
DeviceUUID: deviceUuid,
ServiceUuid: s.Uuid,
CharacteristicUuid: c.Uuid,
Peripheral: *p,
Data: data,
IsNotification: isNotification,
})
break
}
}
}
}
}
// send a message to Blued
func (ble *BLE) sendCBMsg(id int, args xpc.Dict) {
message := xpc.Dict{
"kCBMsgId": id,
"kCBMsgArgs": args,
}
if ble.verbose {
log.Printf("sendCBMsg %#v\n", message)
}
ble.conn.Send(message, ble.verbose)
}
const (
initMsg = 1
startAdvertisingMsg = 8
stopAdvertisingMsg = 9
startScanningMsg = 29
stopScanningMsg = 30
connectMsg = 31
disconnectMsg = 32
updateRssiMsg = 43
discoverServicesMsg = 44
discoverCharacteristicsMsg = 61
discoverDescriptorsMsg = 69
readMsg = 64
removeServicesMsg = 12
setServicesMsg = 10
)
// initialize BLE
func (ble *BLE) Init() {
ble.sendCBMsg(initMsg, xpc.Dict{
"kCBMsgArgName": fmt.Sprintf("goble-%v", time.Now().Unix()),
"kCBMsgArgOptions": xpc.Dict{"kCBInitOptionShowPowerAlert": 0},
"kCBMsgArgType": 0,
})
}
// start advertising
func (ble *BLE) StartAdvertising(name string, serviceUuids []xpc.UUID) {
uuids := make([][]byte, len(serviceUuids))
for i, uuid := range serviceUuids {
uuids[i] = []byte(uuid[:])
}
ble.sendCBMsg(startAdvertisingMsg, xpc.Dict{
"kCBAdvDataLocalName": name,
"kCBAdvDataServiceUUIDs": uuids,
})
}
// start advertising as IBeacon (raw data)
func (ble *BLE) StartAdvertisingIBeaconData(data []byte) {
var utsname xpc.Utsname
xpc.Uname(&utsname)
if utsname.Release >= "14." {
l := len(data)
buf := bytes.NewBuffer([]byte{byte(l + 5), 0xFF, 0x4C, 0x00, 0x02, byte(l)})
buf.Write(data)
ble.sendCBMsg(startAdvertisingMsg, xpc.Dict{
"kCBAdvDataAppleMfgData": buf.Bytes(),
})
} else {
ble.sendCBMsg(startAdvertisingMsg, xpc.Dict{
"kCBAdvDataAppleBeaconKey": data,
})
}
}
// start advertising as IBeacon
func (ble *BLE) StartAdvertisingIBeacon(uuid xpc.UUID, major, minor uint16, measuredPower int8) {
var buf bytes.Buffer
binary.Write(&buf, binary.BigEndian, uuid[:])
binary.Write(&buf, binary.BigEndian, major)
binary.Write(&buf, binary.BigEndian, minor)
binary.Write(&buf, binary.BigEndian, measuredPower)
ble.StartAdvertisingIBeaconData(buf.Bytes())
}
// stop advertising
func (ble *BLE) StopAdvertising() {
ble.sendCBMsg(stopAdvertisingMsg, nil)
}
// start scanning
func (ble *BLE) StartScanning(serviceUuids []xpc.UUID, allowDuplicates bool) {
uuids := []string{}
for _, uuid := range serviceUuids {
uuids = append(uuids, uuid.String())
}
args := xpc.Dict{"kCBMsgArgUUIDs": uuids}
if allowDuplicates {
args["kCBMsgArgOptions"] = xpc.Dict{"kCBScanOptionAllowDuplicates": 1}
} else {
args["kCBMsgArgOptions"] = xpc.Dict{}
}
ble.allowDuplicates = allowDuplicates
ble.sendCBMsg(startScanningMsg, args)
}
// stop scanning
func (ble *BLE) StopScanning() {
ble.sendCBMsg(stopScanningMsg, nil)
}
// connect
func (ble *BLE) Connect(deviceUuid xpc.UUID) {
uuid := deviceUuid.String()
if p, ok := ble.peripherals[uuid]; ok {
ble.sendCBMsg(connectMsg, xpc.Dict{
"kCBMsgArgOptions": xpc.Dict{"kCBConnectOptionNotifyOnDisconnection": 1},
"kCBMsgArgDeviceUUID": p.Uuid,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// disconnect
func (ble *BLE) Disconnect(deviceUuid xpc.UUID) {
uuid := deviceUuid.String()
if p, ok := ble.peripherals[uuid]; ok {
ble.sendCBMsg(disconnectMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// update rssi
func (ble *BLE) UpdateRssi(deviceUuid xpc.UUID) {
uuid := deviceUuid.String()
if p, ok := ble.peripherals[uuid]; ok {
ble.sendCBMsg(updateRssiMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// discover services
func (ble *BLE) DiscoverServices(deviceUuid xpc.UUID, uuids []xpc.UUID) {
sUuid := deviceUuid.String()
if p, ok := ble.peripherals[sUuid]; ok {
sUuids := make([]string, len(uuids))
for i, uuid := range uuids {
sUuids[i] = uuid.String() // uuids may be a list of []byte (2 bytes)
}
ble.sendCBMsg(discoverServicesMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
"kCBMsgArgUUIDs": sUuids,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// discover characteristics
func (ble *BLE) DiscoverCharacterstics(deviceUuid xpc.UUID, serviceUuid string, characteristicUuids []string) {
sUuid := deviceUuid.String()
if p, ok := ble.peripherals[sUuid]; ok {
cUuids := make([]string, len(characteristicUuids))
for i, cuuid := range characteristicUuids {
cUuids[i] = cuuid // characteristicUuids may be a list of []byte (2 bytes)
}
ble.sendCBMsg(discoverCharacteristicsMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
"kCBMsgArgServiceStartHandle": p.Services[serviceUuid].startHandle,
"kCBMsgArgServiceEndHandle": p.Services[serviceUuid].endHandle,
"kCBMsgArgUUIDs": cUuids,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// discover descriptors
func (ble *BLE) DiscoverDescriptors(deviceUuid xpc.UUID, serviceUuid, characteristicUuid string) {
sUuid := deviceUuid.String()
if p, ok := ble.peripherals[sUuid]; ok {
s := p.Services[serviceUuid]
c := s.Characteristics[characteristicUuid]
ble.sendCBMsg(discoverDescriptorsMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
"kCBMsgArgCharacteristicHandle": c.Handle,
"kCBMsgArgCharacteristicValueHandle": c.ValueHandle,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// read
func (ble *BLE) Read(deviceUuid xpc.UUID, serviceUuid, characteristicUuid string) {
sUuid := deviceUuid.String()
if p, ok := ble.peripherals[sUuid]; ok {
s := p.Services[serviceUuid]
c := s.Characteristics[characteristicUuid]
ble.sendCBMsg(readMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
"kCBMsgArgCharacteristicHandle": c.Handle,
"kCBMsgArgCharacteristicValueHandle": c.ValueHandle,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// remove all services
func (ble *BLE) RemoveServices() {
ble.sendCBMsg(removeServicesMsg, nil)
}
// set services
func (ble *BLE) SetServices(services []Service) {
ble.RemoveServices()
ble.attributes = xpc.Array{nil}
attributeId := 1
for _, service := range services {
arg := xpc.Dict{
"kCBMsgArgAttributeID": attributeId,
"kCBMsgArgAttributeIDs": []int{},
"kCBMsgArgCharacteristics": nil,
"kCBMsgArgType": 1, // 1 => primary, 0 => excluded
"kCBMsgArgUUID": service.uuid.String(),
}
ble.attributes = append(ble.attributes, service)
ble.lastServiceAttributeId = attributeId
attributeId += 1
characteristics := xpc.Array{}
for _, characteristic := range service.characteristics {
properties := 0
permissions := 0
if Read&characteristic.properties != 0 {
properties |= 0x02
if Read&characteristic.secure != 0 {
permissions |= 0x04
} else {
permissions |= 0x01
}
}
if WriteWithoutResponse&characteristic.properties != 0 {
properties |= 0x04
if WriteWithoutResponse&characteristic.secure != 0 {
permissions |= 0x08
} else {
permissions |= 0x02
}
}
if Write&characteristic.properties != 0 {
properties |= 0x08
if WriteWithoutResponse&characteristic.secure != 0 {
permissions |= 0x08
} else {
permissions |= 0x02
}
}
if Notify&characteristic.properties != 0 {
if Notify&characteristic.secure != 0 {
properties |= 0x100
} else {
properties |= 0x10
}
}
if Indicate&characteristic.properties != 0 {
if Indicate&characteristic.secure != 0 {
properties |= 0x200
} else {
properties |= 0x20
}
}
descriptors := xpc.Array{}
for _, descriptor := range characteristic.descriptors {
descriptors = append(descriptors, xpc.Dict{"kCBMsgArgData": descriptor.value, "kCBMsgArgUUID": descriptor.uuid.String()})
}
characteristicArg := xpc.Dict{
"kCBMsgArgAttributeID": attributeId,
"kCBMsgArgAttributePermissions": permissions,
"kCBMsgArgCharacteristicProperties": properties,
"kCBMsgArgData": characteristic.value,
"kCBMsgArgDescriptors": descriptors,
"kCBMsgArgUUID": characteristic.uuid.String(),
}
ble.attributes = append(ble.attributes, characteristic)
characteristics = append(characteristics, characteristicArg)
attributeId += 1
}
arg["kCBMsgArgCharacteristics"] = characteristics
ble.sendCBMsg(setServicesMsg, arg) // remove all services
}
}
Better names for events
package goble
import (
"bytes"
"encoding/binary"
"fmt"
"log"
"time"
"github.com/dim13/goble/xpc"
)
// "github.com/dim13/goble/xpc"
//
// BLE support
//
//go:generate stringer -type State
type State int
const (
unknown State = iota
resetting
unsupported
unauthorized
poweredOff
poweredOn
)
type Property int
const (
Broadcast Property = 1 << iota
Read
WriteWithoutResponse
Write
Notify
Indicate
AuthenticatedSignedWrites
ExtendedProperties
)
func (p Property) Readable() bool {
return (p & Read) != 0
}
func (p Property) String() (result string) {
if (p & Broadcast) != 0 {
result += "broadcast "
}
if (p & Read) != 0 {
result += "read "
}
if (p & WriteWithoutResponse) != 0 {
result += "writeWithoutResponse "
}
if (p & Write) != 0 {
result += "write "
}
if (p & Notify) != 0 {
result += "notify "
}
if (p & Indicate) != 0 {
result += "indicate "
}
if (p & AuthenticatedSignedWrites) != 0 {
result += "authenticateSignedWrites "
}
if (p & ExtendedProperties) != 0 {
result += "extendedProperties "
}
return
}
type ServiceData struct {
Uuid string
Data []byte
}
type CharacteristicDescriptor struct {
Uuid string
Handle int
}
type ServiceCharacteristic struct {
Uuid string
Name string
Type string
Properties Property
Descriptors map[interface{}]*CharacteristicDescriptor
Handle int
ValueHandle int
}
type ServiceHandle struct {
Uuid string
Name string
Type string
Characteristics map[interface{}]*ServiceCharacteristic
startHandle int
endHandle int
}
type Advertisement struct {
LocalName string
TxPowerLevel int
ManufacturerData []byte
ServiceData []ServiceData
ServiceUuids []string
}
type Peripheral struct {
Uuid xpc.UUID
Address string
AddressType string
Connectable bool
Advertisement Advertisement
Rssi int
Services map[interface{}]*ServiceHandle
}
// GATT Descriptor
type Descriptor struct {
uuid xpc.UUID
value []byte
}
// GATT Characteristic
type Characteristic struct {
uuid xpc.UUID
properties Property
secure Property
descriptors []Descriptor
value []byte
}
// GATT Service
type Service struct {
uuid xpc.UUID
characteristics []Characteristic
}
type BLE struct {
Emitter
conn xpc.XPC
verbose bool
peripherals map[string]*Peripheral
attributes xpc.Array
lastServiceAttributeId int
allowDuplicates bool
}
func New() *BLE {
ble := &BLE{peripherals: map[string]*Peripheral{}, Emitter: Emitter{}}
ble.Emitter.Init()
ble.conn = xpc.XpcConnect("com.apple.blued", ble)
return ble
}
func (ble *BLE) SetVerbose(v bool) {
ble.verbose = v
ble.Emitter.SetVerbose(v)
}
// events
const (
stateChangeEvt = 6
advertisingStartEvt = 16
advertisingStopEvt = 17
discoverEvt = 37
connectEvt = 38
disconnectEvt = 40
mtuChangeEvt = 53
rssiUpdateEvt = 54
serviceDiscoverEvt = 55
characteristicsDiscoverEvt = 63
descriptorDiscoverEvt = 75
readEvt = 70
)
// process BLE events and asynchronous errors
// (implements XpcEventHandler)
func (ble *BLE) HandleXpcEvent(event xpc.Dict, err error) {
if err != nil {
log.Println("error:", err)
if event == nil {
return
}
}
id := event.MustGetInt("kCBMsgId")
args := event.MustGetDict("kCBMsgArgs")
if ble.verbose {
log.Printf("event: %v %#v\n", id, args)
}
switch id {
case stateChangeEvt:
state := args.MustGetInt("kCBMsgArgState")
ble.Emit(Event{
Name: "stateChange",
State: State(state).String(),
})
case advertisingStartEvt:
result := args.MustGetInt("kCBMsgArgResult")
if result != 0 {
log.Printf("event: error in advertisingStart %v\n", result)
} else {
ble.Emit(Event{
Name: "advertisingStart",
})
}
case advertisingStopEvt:
result := args.MustGetInt("kCBMsgArgResult")
if result != 0 {
log.Printf("event: error in advertisingStop %v\n", result)
} else {
ble.Emit(Event{
Name: "advertisingStop",
})
}
case discoverEvt:
advdata := args.MustGetDict("kCBMsgArgAdvertisementData")
if len(advdata) == 0 {
//log.Println("event: discover with no advertisment data")
break
}
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
advertisement := Advertisement{
LocalName: advdata.GetString("kCBAdvDataLocalName", args.GetString("kCBMsgArgName", "")),
TxPowerLevel: advdata.GetInt("kCBAdvDataTxPowerLevel", 0),
ManufacturerData: advdata.GetBytes("kCBAdvDataManufacturerData", nil),
ServiceData: []ServiceData{},
ServiceUuids: []string{},
}
connectable := advdata.GetInt("kCBAdvDataIsConnectable", 0) > 0
rssi := args.GetInt("kCBMsgArgRssi", 0)
if uuids, ok := advdata["kCBAdvDataServiceUUIDs"]; ok {
for _, uuid := range uuids.(xpc.Array) {
advertisement.ServiceUuids = append(advertisement.ServiceUuids, fmt.Sprintf("%x", uuid))
}
}
if data, ok := advdata["kCBAdvDataServiceData"]; ok {
sdata := data.(xpc.Array)
for i := 0; i < len(sdata); i += 2 {
sd := ServiceData{
Uuid: fmt.Sprintf("%x", sdata[i+0].([]byte)),
Data: sdata[i+1].([]byte),
}
advertisement.ServiceData = append(advertisement.ServiceData, sd)
}
}
pid := deviceUuid.String()
p := ble.peripherals[pid]
emit := ble.allowDuplicates || p == nil
if p == nil {
// add new peripheral
p = &Peripheral{
Uuid: deviceUuid,
Connectable: connectable,
Advertisement: advertisement,
Rssi: rssi,
Services: map[interface{}]*ServiceHandle{},
}
ble.peripherals[pid] = p
} else {
// update peripheral
p.Advertisement = advertisement
p.Rssi = rssi
}
if emit {
ble.Emit(Event{
Name: "discover",
DeviceUUID: deviceUuid,
Peripheral: *p,
})
}
case connectEvt:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
ble.Emit(Event{
Name: "connect",
DeviceUUID: deviceUuid,
})
case disconnectEvt:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
ble.Emit(Event{
Name: "disconnect",
DeviceUUID: deviceUuid,
})
case mtuChangeEvt:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
mtu := args.MustGetInt("kCBMsgArgATTMTU")
// bleno here converts the deviceUuid to an address
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
ble.Emit(Event{
Name: "mtuChange",
DeviceUUID: deviceUuid,
Peripheral: *p,
Mtu: mtu,
})
}
case rssiUpdateEvt:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
rssi := args.MustGetInt("kCBMsgArgData")
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
p.Rssi = rssi
ble.Emit(Event{
Name: "rssiUpdate",
DeviceUUID: deviceUuid,
Peripheral: *p,
})
}
case serviceDiscoverEvt:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
servicesUuids := []string{}
servicesHandles := map[interface{}]*ServiceHandle{}
if dservices, ok := args["kCBMsgArgServices"]; ok {
for _, s := range dservices.(xpc.Array) {
service := s.(xpc.Dict)
serviceHandle := ServiceHandle{
Uuid: service.MustGetHexBytes("kCBMsgArgUUID"),
startHandle: service.MustGetInt("kCBMsgArgServiceStartHandle"),
endHandle: service.MustGetInt("kCBMsgArgServiceEndHandle"),
Characteristics: map[interface{}]*ServiceCharacteristic{},
}
if nameType, ok := knownServices[serviceHandle.Uuid]; ok {
serviceHandle.Name = nameType.Name
serviceHandle.Type = nameType.Type
}
servicesHandles[serviceHandle.Uuid] = &serviceHandle
servicesHandles[serviceHandle.startHandle] = &serviceHandle
servicesUuids = append(servicesUuids, serviceHandle.Uuid)
}
}
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
p.Services = servicesHandles
ble.Emit(Event{
Name: "servicesDiscover",
DeviceUUID: deviceUuid,
Peripheral: *p,
})
}
case characteristicsDiscoverEvt:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
serviceStartHandle := args.MustGetInt("kCBMsgArgServiceStartHandle")
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
service := p.Services[serviceStartHandle]
//result := args.MustGetInt("kCBMsgArgResult")
for _, c := range args.MustGetArray("kCBMsgArgCharacteristics") {
cDict := c.(xpc.Dict)
characteristic := ServiceCharacteristic{
Uuid: cDict.MustGetHexBytes("kCBMsgArgUUID"),
Handle: cDict.MustGetInt("kCBMsgArgCharacteristicHandle"),
ValueHandle: cDict.MustGetInt("kCBMsgArgCharacteristicValueHandle"),
Descriptors: map[interface{}]*CharacteristicDescriptor{},
}
if nameType, ok := knownCharacteristics[characteristic.Uuid]; ok {
characteristic.Name = nameType.Name
characteristic.Type = nameType.Type
}
properties := cDict.MustGetInt("kCBMsgArgCharacteristicProperties")
characteristic.Properties = Property(properties)
if service != nil {
service.Characteristics[characteristic.Uuid] = &characteristic
service.Characteristics[characteristic.Handle] = &characteristic
service.Characteristics[characteristic.ValueHandle] = &characteristic
}
}
if service != nil {
ble.Emit(Event{
Name: "characteristicsDiscover",
DeviceUUID: deviceUuid,
ServiceUuid: service.Uuid,
Peripheral: *p,
})
} else {
log.Println("no service", serviceStartHandle)
}
} else {
log.Println("no peripheral", deviceUuid)
}
case descriptorDiscoverEvt:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
characteristicsHandle := args.MustGetInt("kCBMsgArgCharacteristicHandle")
//result := args.MustGetInt("kCBMsgArgResult")
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
for _, s := range p.Services {
if c, ok := s.Characteristics[characteristicsHandle]; ok {
for _, d := range args.MustGetArray("kCBMsgArgDescriptors") {
dDict := d.(xpc.Dict)
descriptor := CharacteristicDescriptor{
Uuid: dDict.MustGetHexBytes("kCBMsgArgUUID"),
Handle: dDict.MustGetInt("kCBMsgArgDescriptorHandle"),
}
c.Descriptors[descriptor.Uuid] = &descriptor
c.Descriptors[descriptor.Handle] = &descriptor
}
ble.Emit(Event{
Name: "descriptorsDiscover",
DeviceUUID: deviceUuid,
ServiceUuid: s.Uuid,
CharacteristicUuid: c.Uuid,
Peripheral: *p,
})
break
}
}
} else {
log.Println("no peripheral", deviceUuid)
}
case readEvt:
deviceUuid := args.MustGetUUID("kCBMsgArgDeviceUUID")
characteristicsHandle := args.MustGetInt("kCBMsgArgCharacteristicHandle")
//result := args.MustGetInt("kCBMsgArgResult")
isNotification := args.GetInt("kCBMsgArgIsNotification", 0) != 0
data := args.MustGetBytes("kCBMsgArgData")
if p, ok := ble.peripherals[deviceUuid.String()]; ok {
for _, s := range p.Services {
if c, ok := s.Characteristics[characteristicsHandle]; ok {
ble.Emit(Event{
Name: "read",
DeviceUUID: deviceUuid,
ServiceUuid: s.Uuid,
CharacteristicUuid: c.Uuid,
Peripheral: *p,
Data: data,
IsNotification: isNotification,
})
break
}
}
}
}
}
// send a message to Blued
func (ble *BLE) sendCBMsg(id int, args xpc.Dict) {
message := xpc.Dict{
"kCBMsgId": id,
"kCBMsgArgs": args,
}
if ble.verbose {
log.Printf("sendCBMsg %#v\n", message)
}
ble.conn.Send(message, ble.verbose)
}
const (
initMsg = 1
startAdvertisingMsg = 8
stopAdvertisingMsg = 9
startScanningMsg = 29
stopScanningMsg = 30
connectMsg = 31
disconnectMsg = 32
updateRssiMsg = 43
discoverServicesMsg = 44
discoverCharacteristicsMsg = 61
discoverDescriptorsMsg = 69
readMsg = 64
removeServicesMsg = 12
setServicesMsg = 10
)
// initialize BLE
func (ble *BLE) Init() {
ble.sendCBMsg(initMsg, xpc.Dict{
"kCBMsgArgName": fmt.Sprintf("goble-%v", time.Now().Unix()),
"kCBMsgArgOptions": xpc.Dict{"kCBInitOptionShowPowerAlert": 0},
"kCBMsgArgType": 0,
})
}
// start advertising
func (ble *BLE) StartAdvertising(name string, serviceUuids []xpc.UUID) {
uuids := make([][]byte, len(serviceUuids))
for i, uuid := range serviceUuids {
uuids[i] = []byte(uuid[:])
}
ble.sendCBMsg(startAdvertisingMsg, xpc.Dict{
"kCBAdvDataLocalName": name,
"kCBAdvDataServiceUUIDs": uuids,
})
}
// start advertising as IBeacon (raw data)
func (ble *BLE) StartAdvertisingIBeaconData(data []byte) {
var utsname xpc.Utsname
xpc.Uname(&utsname)
if utsname.Release >= "14." {
l := len(data)
buf := bytes.NewBuffer([]byte{byte(l + 5), 0xFF, 0x4C, 0x00, 0x02, byte(l)})
buf.Write(data)
ble.sendCBMsg(startAdvertisingMsg, xpc.Dict{
"kCBAdvDataAppleMfgData": buf.Bytes(),
})
} else {
ble.sendCBMsg(startAdvertisingMsg, xpc.Dict{
"kCBAdvDataAppleBeaconKey": data,
})
}
}
// start advertising as IBeacon
func (ble *BLE) StartAdvertisingIBeacon(uuid xpc.UUID, major, minor uint16, measuredPower int8) {
var buf bytes.Buffer
binary.Write(&buf, binary.BigEndian, uuid[:])
binary.Write(&buf, binary.BigEndian, major)
binary.Write(&buf, binary.BigEndian, minor)
binary.Write(&buf, binary.BigEndian, measuredPower)
ble.StartAdvertisingIBeaconData(buf.Bytes())
}
// stop advertising
func (ble *BLE) StopAdvertising() {
ble.sendCBMsg(stopAdvertisingMsg, nil)
}
// start scanning
func (ble *BLE) StartScanning(serviceUuids []xpc.UUID, allowDuplicates bool) {
uuids := []string{}
for _, uuid := range serviceUuids {
uuids = append(uuids, uuid.String())
}
args := xpc.Dict{"kCBMsgArgUUIDs": uuids}
if allowDuplicates {
args["kCBMsgArgOptions"] = xpc.Dict{"kCBScanOptionAllowDuplicates": 1}
} else {
args["kCBMsgArgOptions"] = xpc.Dict{}
}
ble.allowDuplicates = allowDuplicates
ble.sendCBMsg(startScanningMsg, args)
}
// stop scanning
func (ble *BLE) StopScanning() {
ble.sendCBMsg(stopScanningMsg, nil)
}
// connect
func (ble *BLE) Connect(deviceUuid xpc.UUID) {
uuid := deviceUuid.String()
if p, ok := ble.peripherals[uuid]; ok {
ble.sendCBMsg(connectMsg, xpc.Dict{
"kCBMsgArgOptions": xpc.Dict{"kCBConnectOptionNotifyOnDisconnection": 1},
"kCBMsgArgDeviceUUID": p.Uuid,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// disconnect
func (ble *BLE) Disconnect(deviceUuid xpc.UUID) {
uuid := deviceUuid.String()
if p, ok := ble.peripherals[uuid]; ok {
ble.sendCBMsg(disconnectMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// update rssi
func (ble *BLE) UpdateRssi(deviceUuid xpc.UUID) {
uuid := deviceUuid.String()
if p, ok := ble.peripherals[uuid]; ok {
ble.sendCBMsg(updateRssiMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// discover services
func (ble *BLE) DiscoverServices(deviceUuid xpc.UUID, uuids []xpc.UUID) {
sUuid := deviceUuid.String()
if p, ok := ble.peripherals[sUuid]; ok {
sUuids := make([]string, len(uuids))
for i, uuid := range uuids {
sUuids[i] = uuid.String() // uuids may be a list of []byte (2 bytes)
}
ble.sendCBMsg(discoverServicesMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
"kCBMsgArgUUIDs": sUuids,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// discover characteristics
func (ble *BLE) DiscoverCharacterstics(deviceUuid xpc.UUID, serviceUuid string, characteristicUuids []string) {
sUuid := deviceUuid.String()
if p, ok := ble.peripherals[sUuid]; ok {
cUuids := make([]string, len(characteristicUuids))
for i, cuuid := range characteristicUuids {
cUuids[i] = cuuid // characteristicUuids may be a list of []byte (2 bytes)
}
ble.sendCBMsg(discoverCharacteristicsMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
"kCBMsgArgServiceStartHandle": p.Services[serviceUuid].startHandle,
"kCBMsgArgServiceEndHandle": p.Services[serviceUuid].endHandle,
"kCBMsgArgUUIDs": cUuids,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// discover descriptors
func (ble *BLE) DiscoverDescriptors(deviceUuid xpc.UUID, serviceUuid, characteristicUuid string) {
sUuid := deviceUuid.String()
if p, ok := ble.peripherals[sUuid]; ok {
s := p.Services[serviceUuid]
c := s.Characteristics[characteristicUuid]
ble.sendCBMsg(discoverDescriptorsMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
"kCBMsgArgCharacteristicHandle": c.Handle,
"kCBMsgArgCharacteristicValueHandle": c.ValueHandle,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// read
func (ble *BLE) Read(deviceUuid xpc.UUID, serviceUuid, characteristicUuid string) {
sUuid := deviceUuid.String()
if p, ok := ble.peripherals[sUuid]; ok {
s := p.Services[serviceUuid]
c := s.Characteristics[characteristicUuid]
ble.sendCBMsg(readMsg, xpc.Dict{
"kCBMsgArgDeviceUUID": p.Uuid,
"kCBMsgArgCharacteristicHandle": c.Handle,
"kCBMsgArgCharacteristicValueHandle": c.ValueHandle,
})
} else {
log.Println("no peripheral", deviceUuid)
}
}
// remove all services
func (ble *BLE) RemoveServices() {
ble.sendCBMsg(removeServicesMsg, nil)
}
// set services
func (ble *BLE) SetServices(services []Service) {
ble.RemoveServices()
ble.attributes = xpc.Array{nil}
attributeId := 1
for _, service := range services {
arg := xpc.Dict{
"kCBMsgArgAttributeID": attributeId,
"kCBMsgArgAttributeIDs": []int{},
"kCBMsgArgCharacteristics": nil,
"kCBMsgArgType": 1, // 1 => primary, 0 => excluded
"kCBMsgArgUUID": service.uuid.String(),
}
ble.attributes = append(ble.attributes, service)
ble.lastServiceAttributeId = attributeId
attributeId += 1
characteristics := xpc.Array{}
for _, characteristic := range service.characteristics {
properties := 0
permissions := 0
if Read&characteristic.properties != 0 {
properties |= 0x02
if Read&characteristic.secure != 0 {
permissions |= 0x04
} else {
permissions |= 0x01
}
}
if WriteWithoutResponse&characteristic.properties != 0 {
properties |= 0x04
if WriteWithoutResponse&characteristic.secure != 0 {
permissions |= 0x08
} else {
permissions |= 0x02
}
}
if Write&characteristic.properties != 0 {
properties |= 0x08
if WriteWithoutResponse&characteristic.secure != 0 {
permissions |= 0x08
} else {
permissions |= 0x02
}
}
if Notify&characteristic.properties != 0 {
if Notify&characteristic.secure != 0 {
properties |= 0x100
} else {
properties |= 0x10
}
}
if Indicate&characteristic.properties != 0 {
if Indicate&characteristic.secure != 0 {
properties |= 0x200
} else {
properties |= 0x20
}
}
descriptors := xpc.Array{}
for _, descriptor := range characteristic.descriptors {
descriptors = append(descriptors, xpc.Dict{"kCBMsgArgData": descriptor.value, "kCBMsgArgUUID": descriptor.uuid.String()})
}
characteristicArg := xpc.Dict{
"kCBMsgArgAttributeID": attributeId,
"kCBMsgArgAttributePermissions": permissions,
"kCBMsgArgCharacteristicProperties": properties,
"kCBMsgArgData": characteristic.value,
"kCBMsgArgDescriptors": descriptors,
"kCBMsgArgUUID": characteristic.uuid.String(),
}
ble.attributes = append(ble.attributes, characteristic)
characteristics = append(characteristics, characteristicArg)
attributeId += 1
}
arg["kCBMsgArgCharacteristics"] = characteristics
ble.sendCBMsg(setServicesMsg, arg) // remove all services
}
}
|
package gps
import (
"errors"
"go/build"
"go/parser"
"go/token"
"path/filepath"
"sort"
"strconv"
"strings"
"unicode"
)
var (
gorootSrc = filepath.Join(build.Default.GOROOT, "src")
ignoreTags = []string{} //[]string{"appengine", "ignore"} //TODO: appengine is a special case for now: https://github.com/tools/godep/issues/353
)
// returns the package in dir either from a cache or by importing it and then caching it
func fullPackageInDir(dir string) (pkg *build.Package, err error) {
pkg, err = build.ImportDir(dir, build.FindOnly)
if pkg.Goroot {
pkg, err = build.ImportDir(pkg.Dir, 0)
} else {
err = fillPackage(pkg)
}
return pkg, err
}
// fillPackage full of info. Assumes p.Dir is set at a minimum
func fillPackage(p *build.Package) error {
if p.Goroot {
return nil
}
if p.SrcRoot == "" {
for _, base := range build.Default.SrcDirs() {
if strings.HasPrefix(p.Dir, base) {
p.SrcRoot = base
}
}
}
if p.SrcRoot == "" {
return errors.New("Unable to find SrcRoot for package " + p.ImportPath)
}
if p.Root == "" {
p.Root = filepath.Dir(p.SrcRoot)
}
var buildMatch = "+build "
var buildFieldSplit = func(r rune) bool {
return unicode.IsSpace(r) || r == ','
}
//debugln("Filling package:", p.ImportPath, "from", p.Dir)
gofiles, err := filepath.Glob(filepath.Join(p.Dir, "*.go"))
if err != nil {
//debugln("Error globbing", err)
return err
}
if len(gofiles) == 0 {
return &build.NoGoError{Dir: p.Dir}
}
var testImports []string
var imports []string
NextFile:
for _, file := range gofiles {
//debugln(file)
pf, err := parser.ParseFile(token.NewFileSet(), file, nil, parser.ImportsOnly|parser.ParseComments)
if err != nil {
return err
}
testFile := strings.HasSuffix(file, "_test.go")
fname := filepath.Base(file)
for _, c := range pf.Comments {
ct := c.Text()
if i := strings.Index(ct, buildMatch); i != -1 {
for _, t := range strings.FieldsFunc(ct[i+len(buildMatch):], buildFieldSplit) {
for _, tag := range ignoreTags {
if t == tag {
p.IgnoredGoFiles = append(p.IgnoredGoFiles, fname)
continue NextFile
}
}
//TODO: Needed in GPS?
/* if versionMatch.MatchString(t) && !isSameOrNewer(t, majorGoVersion) {
debugln("Adding", fname, "to ignored list because of version tag", t)
p.IgnoredGoFiles = append(p.IgnoredGoFiles, fname)
continue NextFile
}
if versionNegativeMatch.MatchString(t) && isSameOrNewer(t[1:], majorGoVersion) {
debugln("Adding", fname, "to ignored list because of version tag", t)
p.IgnoredGoFiles = append(p.IgnoredGoFiles, fname)
continue NextFile
} */
}
}
}
if testFile {
p.TestGoFiles = append(p.TestGoFiles, fname)
if p.Name == "" {
p.Name = strings.Split(pf.Name.Name, "_")[0]
}
} else {
if p.Name == "" {
p.Name = pf.Name.Name
}
p.GoFiles = append(p.GoFiles, fname)
}
for _, is := range pf.Imports {
name, err := strconv.Unquote(is.Path.Value)
if err != nil {
return err // can't happen?
}
if testFile {
testImports = append(testImports, name)
} else {
imports = append(imports, name)
}
}
}
imports = uniq(imports)
testImports = uniq(testImports)
p.Imports = imports
p.TestImports = testImports
return nil
}
func uniq(a []string) []string {
if a == nil {
return make([]string, 0)
}
var s string
var i int
if !sort.StringsAreSorted(a) {
sort.Strings(a)
}
for _, t := range a {
if t != s {
a[i] = t
i++
s = t
}
}
return a[:i]
}
Skip the comment if it's after the package declaration
package gps
import (
"errors"
"go/build"
"go/parser"
"go/token"
"path/filepath"
"sort"
"strconv"
"strings"
"unicode"
)
var (
gorootSrc = filepath.Join(build.Default.GOROOT, "src")
ignoreTags = []string{} //[]string{"appengine", "ignore"} //TODO: appengine is a special case for now: https://github.com/tools/godep/issues/353
)
// returns the package in dir either from a cache or by importing it and then caching it
func fullPackageInDir(dir string) (pkg *build.Package, err error) {
pkg, err = build.ImportDir(dir, build.FindOnly)
if pkg.Goroot {
pkg, err = build.ImportDir(pkg.Dir, 0)
} else {
err = fillPackage(pkg)
}
return pkg, err
}
// fillPackage full of info. Assumes p.Dir is set at a minimum
func fillPackage(p *build.Package) error {
if p.Goroot {
return nil
}
if p.SrcRoot == "" {
for _, base := range build.Default.SrcDirs() {
if strings.HasPrefix(p.Dir, base) {
p.SrcRoot = base
}
}
}
if p.SrcRoot == "" {
return errors.New("Unable to find SrcRoot for package " + p.ImportPath)
}
if p.Root == "" {
p.Root = filepath.Dir(p.SrcRoot)
}
var buildMatch = "+build "
var buildFieldSplit = func(r rune) bool {
return unicode.IsSpace(r) || r == ','
}
//debugln("Filling package:", p.ImportPath, "from", p.Dir)
gofiles, err := filepath.Glob(filepath.Join(p.Dir, "*.go"))
if err != nil {
//debugln("Error globbing", err)
return err
}
if len(gofiles) == 0 {
return &build.NoGoError{Dir: p.Dir}
}
var testImports []string
var imports []string
NextFile:
for _, file := range gofiles {
//debugln(file)
pf, err := parser.ParseFile(token.NewFileSet(), file, nil, parser.ImportsOnly|parser.ParseComments)
if err != nil {
return err
}
testFile := strings.HasSuffix(file, "_test.go")
fname := filepath.Base(file)
for _, c := range pf.Comments {
if c.Pos() > pf.Package { // +build must come before package
continue
}
ct := c.Text()
if i := strings.Index(ct, buildMatch); i != -1 {
for _, t := range strings.FieldsFunc(ct[i+len(buildMatch):], buildFieldSplit) {
for _, tag := range ignoreTags {
if t == tag {
p.IgnoredGoFiles = append(p.IgnoredGoFiles, fname)
continue NextFile
}
}
//TODO: Needed in GPS?
/* if versionMatch.MatchString(t) && !isSameOrNewer(t, majorGoVersion) {
debugln("Adding", fname, "to ignored list because of version tag", t)
p.IgnoredGoFiles = append(p.IgnoredGoFiles, fname)
continue NextFile
}
if versionNegativeMatch.MatchString(t) && isSameOrNewer(t[1:], majorGoVersion) {
debugln("Adding", fname, "to ignored list because of version tag", t)
p.IgnoredGoFiles = append(p.IgnoredGoFiles, fname)
continue NextFile
} */
}
}
}
if testFile {
p.TestGoFiles = append(p.TestGoFiles, fname)
if p.Name == "" {
p.Name = strings.Split(pf.Name.Name, "_")[0]
}
} else {
if p.Name == "" {
p.Name = pf.Name.Name
}
p.GoFiles = append(p.GoFiles, fname)
}
for _, is := range pf.Imports {
name, err := strconv.Unquote(is.Path.Value)
if err != nil {
return err // can't happen?
}
if testFile {
testImports = append(testImports, name)
} else {
imports = append(imports, name)
}
}
}
imports = uniq(imports)
testImports = uniq(testImports)
p.Imports = imports
p.TestImports = testImports
return nil
}
func uniq(a []string) []string {
if a == nil {
return make([]string, 0)
}
var s string
var i int
if !sort.StringsAreSorted(a) {
sort.Strings(a)
}
for _, t := range a {
if t != s {
a[i] = t
i++
s = t
}
}
return a[:i]
}
|
package godbg
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"regexp"
"runtime/debug"
"strings"
)
// http://stackoverflow.com/a/23554672/6309 https://vividcortex.com/blog/2013/12/03/go-idiom-package-and-object/
// you design a type with methods as usual, and then you also place matching functions at the package level itself.
// These functions simply delegate to a default instance of the type that’s a private package-level variable, created in an init() function.
// Pdbg allows to print debug message with indent and function name added
type Pdbg struct {
bout *bytes.Buffer
berr *bytes.Buffer
sout *bufio.Writer
serr *bufio.Writer
breaks []string
excludes []string
}
// Out returns a writer for normal messages.
// By default, os.StdOut
func Out() io.Writer {
return pdbg.Out()
}
// Out returns a writer for normal messages for a given pdbg instance.
// By default, os.StdOut
func (pdbg *Pdbg) Out() io.Writer {
if pdbg.sout == nil {
return os.Stdout
}
return pdbg.sout
}
// Err returns a writer for error messages.
// By default, os.StdErr
func Err() io.Writer {
return pdbg.Err()
}
// Err returns a writer for error messages for a given pdbg instance.
// By default, os.StdErr
func (pdbg *Pdbg) Err() io.Writer {
if pdbg.serr == nil {
return os.Stderr
}
return pdbg.serr
}
// global pdbg used for printing
var pdbg = NewPdbg()
// Option set an option for a Pdbg
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type Option func(*Pdbg)
// SetBuffers is an option for replacing stdout and stderr by
// bytes buffers (in a bufio.Writer).
// If apdbg is nil, set for the global pdbg instance
func SetBuffers(apdbg *Pdbg) {
if apdbg == nil {
apdbg = pdbg
}
apdbg.bout = bytes.NewBuffer(nil)
apdbg.sout = bufio.NewWriter(apdbg.bout)
apdbg.berr = bytes.NewBuffer(nil)
apdbg.serr = bufio.NewWriter(apdbg.berr)
}
// SetExcludes set excludes on a pdbg (nil for global pdbg)
func (apdbg *Pdbg) SetExcludes(excludes []string) {
apdbg.excludes = excludes
}
// OptExcludes is an option to set excludes at the creation of a pdbg
func OptExcludes(excludes []string) Option {
return func(apdbg *Pdbg) {
apdbg.SetExcludes(excludes)
}
}
// NewPdbg creates a PDbg instance, with options
func NewPdbg(options ...Option) *Pdbg {
newpdbg := &Pdbg{}
for _, option := range options {
option(newpdbg)
}
newpdbg.breaks = append(newpdbg.breaks, "smartystreets")
return newpdbg
}
// ResetIOs reset the out and err buffer of global pdbg instance
func ResetIOs() {
pdbg.ResetIOs()
}
// ResetIOs reset the out and err buffer
// (unless they were the default stdout and stderr,
// in which case it does nothing)
func (pdbg *Pdbg) ResetIOs() {
if pdbg.sout != nil {
pdbg.bout = bytes.NewBuffer(nil)
pdbg.sout.Reset(pdbg.bout)
pdbg.berr = bytes.NewBuffer(nil)
pdbg.serr.Reset(pdbg.berr)
}
}
// OutString returns the string for out messages for the global pdbg instance.
// It flushes the out buffer.
// If out is set to os.Stdout, returns an empty string
func OutString() string {
return pdbg.OutString()
}
// OutString returns the string for out messages for a given pdbg instance.
// It flushes the out buffer.
// If out is set to os.Stdout, returns an empty string
func (pdbg *Pdbg) OutString() string {
if pdbg.sout == nil {
return ""
}
pdbg.sout.Flush()
return pdbg.bout.String()
}
// ErrString returns the string for error messages for the global pdbg instance.
// It flushes the err buffer.
// If err is set to os.StdErr, returns an empty string
func ErrString() string {
return pdbg.ErrString()
}
// ErrString returns the string for error messages for a given pdbg instance.
// It flushes the err buffer.
// If err is set to os.StdErr, returns an empty string
func (pdbg *Pdbg) ErrString() string {
if pdbg.serr == nil {
return ""
}
pdbg.serr.Flush()
return pdbg.berr.String()
}
// For instance: github.com/VonC/godbg/_test/_obj_test/gogdb.go:174 (0x44711b)
var rxDbgLine, _ = regexp.Compile(`^.*\.go:(\d+)\s`)
var rxDbgFnct, _ = regexp.Compile(`^\s+(?:.*?\(([^\)]+)\))?\.?([^:]+)`)
func pdbgInc(scanner *bufio.Scanner, dbgLine string) string {
scanner.Scan()
line := scanner.Text()
mf := rxDbgFnct.FindSubmatchIndex([]byte(line))
// fmt.Printf("lineF '%v', mf '%+v'\n", line, mf)
/*if len(mf) == 0 {
return ""
}*/
dbgFnct := ""
if mf[2] > -1 {
dbgFnct = line[mf[2]:mf[3]]
}
if dbgFnct != "" {
dbgFnct = dbgFnct + "."
}
dbgFnct = dbgFnct + line[mf[4]:mf[5]]
return dbgFnct + ":" + dbgLine
}
func (pdbg *Pdbg) pdbgExcluded(dbg string) bool {
for _, e := range pdbg.excludes {
if strings.Contains(dbg, e) {
return true
}
}
return false
}
func (pdbg *Pdbg) pdbgBreak(dbg string) bool {
for _, b := range pdbg.breaks {
if strings.Contains(dbg, b) {
return true
}
}
return false
}
// Pdbgf uses global Pdbg variable for printing strings, with indent and function name
func Pdbgf(format string, args ...interface{}) string {
return pdbg.Pdbgf(format, args...)
}
// Pdbgf uses custom Pdbg variable for printing strings, with indent and function name
func (pdbg *Pdbg) Pdbgf(format string, args ...interface{}) string {
msg := fmt.Sprintf(format+"\n", args...)
msg = strings.TrimSpace(msg)
bstack := bytes.NewBuffer(debug.Stack())
// fmt.Printf("%+v\n", bstack)
scanner := bufio.NewScanner(bstack)
pmsg := ""
depth := 0
for scanner.Scan() {
line := scanner.Text()
// fmt.Printf("xx LINE '%+v'\n", line)
if strings.Contains(line, "/_obj_test/") {
depth = 1
continue
}
if pdbg.pdbgBreak(line) {
break
}
m := rxDbgLine.FindSubmatchIndex([]byte(line))
//fmt.Printf("'%s' (%s) => '%+v'\n", line, rxDbgLine.String(), m)
/*if len(m) == 0 {
continue
}*/
if depth > 0 && depth < 4 {
dbg := pdbgInc(scanner, line[m[2]:m[3]])
if dbg == "" {
continue
}
if depth == 1 {
if pdbg.pdbgExcluded(dbg) {
return ""
}
pmsg = "[" + dbg + "]"
} else {
pmsg = pmsg + " (" + dbg + ")"
}
}
depth = depth + 1
}
spaces := ""
if depth >= 2 {
spaces = strings.Repeat(" ", depth-2)
}
// fmt.Printf("spaces '%s', depth '%d'\n", spaces, depth)
res := pmsg
pmsg = spaces + pmsg
msg = pmsg + "\n" + spaces + " " + msg + "\n"
// fmt.Printf("MSG '%v'\n", msg)
fmt.Fprint(pdbg.Err(), fmt.Sprint(msg))
return res
}
Pdbgf removes non-testable case
package godbg
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"regexp"
"runtime/debug"
"strings"
)
// http://stackoverflow.com/a/23554672/6309 https://vividcortex.com/blog/2013/12/03/go-idiom-package-and-object/
// you design a type with methods as usual, and then you also place matching functions at the package level itself.
// These functions simply delegate to a default instance of the type that’s a private package-level variable, created in an init() function.
// Pdbg allows to print debug message with indent and function name added
type Pdbg struct {
bout *bytes.Buffer
berr *bytes.Buffer
sout *bufio.Writer
serr *bufio.Writer
breaks []string
excludes []string
}
// Out returns a writer for normal messages.
// By default, os.StdOut
func Out() io.Writer {
return pdbg.Out()
}
// Out returns a writer for normal messages for a given pdbg instance.
// By default, os.StdOut
func (pdbg *Pdbg) Out() io.Writer {
if pdbg.sout == nil {
return os.Stdout
}
return pdbg.sout
}
// Err returns a writer for error messages.
// By default, os.StdErr
func Err() io.Writer {
return pdbg.Err()
}
// Err returns a writer for error messages for a given pdbg instance.
// By default, os.StdErr
func (pdbg *Pdbg) Err() io.Writer {
if pdbg.serr == nil {
return os.Stderr
}
return pdbg.serr
}
// global pdbg used for printing
var pdbg = NewPdbg()
// Option set an option for a Pdbg
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type Option func(*Pdbg)
// SetBuffers is an option for replacing stdout and stderr by
// bytes buffers (in a bufio.Writer).
// If apdbg is nil, set for the global pdbg instance
func SetBuffers(apdbg *Pdbg) {
if apdbg == nil {
apdbg = pdbg
}
apdbg.bout = bytes.NewBuffer(nil)
apdbg.sout = bufio.NewWriter(apdbg.bout)
apdbg.berr = bytes.NewBuffer(nil)
apdbg.serr = bufio.NewWriter(apdbg.berr)
}
// SetExcludes set excludes on a pdbg (nil for global pdbg)
func (apdbg *Pdbg) SetExcludes(excludes []string) {
apdbg.excludes = excludes
}
// OptExcludes is an option to set excludes at the creation of a pdbg
func OptExcludes(excludes []string) Option {
return func(apdbg *Pdbg) {
apdbg.SetExcludes(excludes)
}
}
// NewPdbg creates a PDbg instance, with options
func NewPdbg(options ...Option) *Pdbg {
newpdbg := &Pdbg{}
for _, option := range options {
option(newpdbg)
}
newpdbg.breaks = append(newpdbg.breaks, "smartystreets")
return newpdbg
}
// ResetIOs reset the out and err buffer of global pdbg instance
func ResetIOs() {
pdbg.ResetIOs()
}
// ResetIOs reset the out and err buffer
// (unless they were the default stdout and stderr,
// in which case it does nothing)
func (pdbg *Pdbg) ResetIOs() {
if pdbg.sout != nil {
pdbg.bout = bytes.NewBuffer(nil)
pdbg.sout.Reset(pdbg.bout)
pdbg.berr = bytes.NewBuffer(nil)
pdbg.serr.Reset(pdbg.berr)
}
}
// OutString returns the string for out messages for the global pdbg instance.
// It flushes the out buffer.
// If out is set to os.Stdout, returns an empty string
func OutString() string {
return pdbg.OutString()
}
// OutString returns the string for out messages for a given pdbg instance.
// It flushes the out buffer.
// If out is set to os.Stdout, returns an empty string
func (pdbg *Pdbg) OutString() string {
if pdbg.sout == nil {
return ""
}
pdbg.sout.Flush()
return pdbg.bout.String()
}
// ErrString returns the string for error messages for the global pdbg instance.
// It flushes the err buffer.
// If err is set to os.StdErr, returns an empty string
func ErrString() string {
return pdbg.ErrString()
}
// ErrString returns the string for error messages for a given pdbg instance.
// It flushes the err buffer.
// If err is set to os.StdErr, returns an empty string
func (pdbg *Pdbg) ErrString() string {
if pdbg.serr == nil {
return ""
}
pdbg.serr.Flush()
return pdbg.berr.String()
}
// For instance: github.com/VonC/godbg/_test/_obj_test/gogdb.go:174 (0x44711b)
var rxDbgLine, _ = regexp.Compile(`^.*\.go:(\d+)\s`)
var rxDbgFnct, _ = regexp.Compile(`^\s+(?:.*?\(([^\)]+)\))?\.?([^:]+)`)
func pdbgInc(scanner *bufio.Scanner, dbgLine string) string {
scanner.Scan()
line := scanner.Text()
mf := rxDbgFnct.FindSubmatchIndex([]byte(line))
// fmt.Printf("lineF '%v', mf '%+v'\n", line, mf)
/*if len(mf) == 0 {
return ""
}*/
dbgFnct := ""
if mf[2] > -1 {
dbgFnct = line[mf[2]:mf[3]]
}
if dbgFnct != "" {
dbgFnct = dbgFnct + "."
}
dbgFnct = dbgFnct + line[mf[4]:mf[5]]
return dbgFnct + ":" + dbgLine
}
func (pdbg *Pdbg) pdbgExcluded(dbg string) bool {
for _, e := range pdbg.excludes {
if strings.Contains(dbg, e) {
return true
}
}
return false
}
func (pdbg *Pdbg) pdbgBreak(dbg string) bool {
for _, b := range pdbg.breaks {
if strings.Contains(dbg, b) {
return true
}
}
return false
}
// Pdbgf uses global Pdbg variable for printing strings, with indent and function name
func Pdbgf(format string, args ...interface{}) string {
return pdbg.Pdbgf(format, args...)
}
// Pdbgf uses custom Pdbg variable for printing strings, with indent and function name
func (pdbg *Pdbg) Pdbgf(format string, args ...interface{}) string {
msg := fmt.Sprintf(format+"\n", args...)
msg = strings.TrimSpace(msg)
bstack := bytes.NewBuffer(debug.Stack())
// fmt.Printf("%+v\n", bstack)
scanner := bufio.NewScanner(bstack)
pmsg := ""
depth := 0
for scanner.Scan() {
line := scanner.Text()
// fmt.Printf("xx LINE '%+v'\n", line)
if strings.Contains(line, "/_obj_test/") {
depth = 1
continue
}
if pdbg.pdbgBreak(line) {
break
}
m := rxDbgLine.FindSubmatchIndex([]byte(line))
//fmt.Printf("'%s' (%s) => '%+v'\n", line, rxDbgLine.String(), m)
/*if len(m) == 0 {
continue
}*/
if depth > 0 && depth < 4 {
dbg := pdbgInc(scanner, line[m[2]:m[3]])
/*if dbg == "" {
continue
}*/
if depth == 1 {
if pdbg.pdbgExcluded(dbg) {
return ""
}
pmsg = "[" + dbg + "]"
} else {
pmsg = pmsg + " (" + dbg + ")"
}
}
depth = depth + 1
}
spaces := ""
if depth >= 2 {
spaces = strings.Repeat(" ", depth-2)
}
// fmt.Printf("spaces '%s', depth '%d'\n", spaces, depth)
res := pmsg
pmsg = spaces + pmsg
msg = pmsg + "\n" + spaces + " " + msg + "\n"
// fmt.Printf("MSG '%v'\n", msg)
fmt.Fprint(pdbg.Err(), fmt.Sprint(msg))
return res
}
|
//Package gogtm enables access to gt.m database
package gogtm
/*
#cgo CFLAGS: -I/opt/fis-gtm/6.3-000A
#cgo LDFLAGS: -L/opt/fis-gtm/6.3-000A -lgtmshr
#include <gtmxc_types.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#define maxstr 1048576
#ifndef NULL
#define NULL ((void *) 0)
#endif
#define CALLGTM(xyz) status = xyz ; \
if (0 != status ) { \
gtm_zstatus( msg, 2048 ); \
snprintf(errmsg, 2048, "Failure of %s with error: %s\n", #xyz, msg); \
return (int) status; \
}
int cip_init(char *errmsg, int maxmsglen) {
gtm_char_t msg[maxmsglen], err[maxmsglen];
gtm_string_t gtminit_str;
ci_name_descriptor gtminit;
gtm_status_t status;
gtminit_str.address = "gtminit";
gtminit_str.length = sizeof("gtminit")-1;
gtminit.rtn_name=gtminit_str;
gtminit.handle = NULL;
errmsg[0] = '\0';
err[0] = '\0';
CALLGTM (gtm_cip( >minit, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_init error: [%s]\n", err);
return 100;
}
return 0;
} // end of cip_init
int cip_set(char *s_global, char *s_value, char *errmsg, int maxmsglen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmset_str, p_value;
ci_name_descriptor gtmset;
gtm_status_t status;
gtmset_str.address = "gtmset";
gtmset_str.length = sizeof("gtmset")-1;
gtmset.rtn_name=gtmset_str;
gtmset.handle = NULL;
err[0] = '\0';
p_value.address = ( gtm_char_t *) s_value; p_value.length = strlen(s_value);
CALLGTM( gtm_cip( >mset, s_global, &p_value, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_set error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_set
int cip_get(char *s_global, char *s_opt, char *s_ret, char *errmsg, int maxmsglen, int maxretlen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmget_str, p_opt;
ci_name_descriptor gtmget;
gtm_status_t status;
gtmget_str.address = "gtmget";
gtmget_str.length = sizeof("gtmget")-1;
gtmget.rtn_name=gtmget_str;
gtmget.handle = NULL;
err[0] = '\0';
p_opt.address = ( gtm_char_t *) s_opt; p_opt.length = strlen(s_opt);
CALLGTM( gtm_cip( >mget, s_global, &p_opt, s_ret, errmsg));
if (0 != strlen( errmsg )){
snprintf(errmsg, maxmsglen, "cip_get error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_get
int cip_kill(char *s_global, char *errmsg, int maxmsglen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmkill_str;
ci_name_descriptor gtmkill;
gtm_status_t status;
gtmkill_str.address = "gtmkill";
gtmkill_str.length = sizeof("gtmkill")-1;
gtmkill.rtn_name=gtmkill_str;
gtmkill.handle = NULL;
err[0] = '\0';
CALLGTM( gtm_cip( >mkill, s_global, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_kill error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_kill
int cip_zkill(char *s_global, char *errmsg, int maxmsglen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmzkill_str;
ci_name_descriptor gtmzkill;
gtm_status_t status;
gtmzkill_str.address = "gtmzkill";
gtmzkill_str.length = sizeof("gtmzkill")-1;
gtmzkill.rtn_name=gtmzkill_str;
gtmzkill.handle = NULL;
err[0] = '\0';
CALLGTM( gtm_cip( >mzkill, s_global, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_zkill error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_zkill
int cip_xecute(char *s_global, char *errmsg, int maxmsglen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmxecute_str;
ci_name_descriptor gtmxecute;
gtm_status_t status;
gtmxecute_str.address = "gtmxecute";
gtmxecute_str.length = sizeof("gtmxecute")-1;
gtmxecute.rtn_name=gtmxecute_str;
gtmxecute.handle = NULL;
err[0] = '\0';
CALLGTM( gtm_cip( >mxecute, s_global, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_xecute error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_xecute
*/
import "C"
import (
"unsafe"
"errors"
//"fmt"
)
//maxmsglen maximum length of message from gt.m
const maxmsglen = 2048
//maxretlen maximum length of value retrieved from gt.m
const maxretlen = 1048576
//Set saves value to global in gt.m db
//Sample usage: gogtm.Set("^test","value")
func Set(global string, val string) (error){
if len(global) < 1 {
return errors.New("Set failed - you must provide glvn")
}
_global := C.CString(global)
_val := C.CString(val)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_global))
defer C.free(unsafe.Pointer(_val))
result := C.cip_set(_global, _val, (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)))
if result != 0 {
return errors.New("Set failed: " + string(result) + "Error message: " + string(errmsg))
}
return nil
} // end of Set
func Get(global string, opt string) (string, error){
if len(global) < 1 {
return "", errors.New("Get failed - you must provide glvn")
}
_global := C.CString(global)
_opt := C.CString(opt)
_ret := make([]byte, maxretlen)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_global))
defer C.free(unsafe.Pointer(_opt))
p := C.malloc(C.size_t(maxmsglen))
defer C.free(p)
result := C.cip_get(_global, _opt, (*C.char)(unsafe.Pointer(&_ret[0])), (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)), maxretlen)
if result != 0 {
return "", errors.New("Get failed: " + string(result) + "Error message: " + string(errmsg))
}
return string(_ret), nil
} //end of Get
func Start() (error) {
{
result := C.gtm_init()
if result != 0 {
return errors.New("gtm_init failed: " + string(result))
}
}
errmsg := C.CString("")
defer C.free(unsafe.Pointer(errmsg))
result := C.cip_init(errmsg, maxmsglen)
if result != 0 {
return errors.New("CIP Init failed: " + string(result) + "Error MSG: " + C.GoString(errmsg))
}
return nil
} // end of Start
func Stop() (error){
result := C.gtm_exit()
if result != 0 {
return errors.New("gtm_exit failed: " + string(result))
}
return nil
} // end of Stop
//Kill deletes global variable and its descendant nodes
func Kill(global string) (error){
if len(global) < 1 {
return errors.New("Kill failed - you must provide [glvn | (glvn[,...]) | *lname | *lvn ]")
}
_global := C.CString(global)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_global))
result := C.cip_kill(_global, (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)))
if result != 0 {
return errors.New("Kill failed: " + string(result) + "Error message: " + string(errmsg))
}
return nil
} // end of Kill
//ZKill deletes global variable and its descendant nodes
func ZKill(global string) (error){
if len(global) < 1 {
return errors.New("ZKill failed - you must provide glvn")
}
_global := C.CString(global)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_global))
result := C.cip_zkill(_global, (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)))
if result != 0 {
return errors.New("ZKill failed: " + string(result) + "Error message: " + string(errmsg))
}
return nil
} // end of ZKill
//Xecute runs the M code
func Xecute(code string) (error){
if len(code) < 1 {
return errors.New("Xecute failed - you must provide some code")
}
_code := C.CString(code)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_code))
result := C.cip_xecute(_code, (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)))
if result != 0 {
return errors.New("Xecute failed: " + string(result) + "Error message: " + string(errmsg))
}
return nil
} // end of Xecute
+ Order
//Package gogtm enables access to gt.m database
package gogtm
/*
#cgo CFLAGS: -I/opt/fis-gtm/6.3-000A
#cgo LDFLAGS: -L/opt/fis-gtm/6.3-000A -lgtmshr
#include <gtmxc_types.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#define maxstr 1048576
#ifndef NULL
#define NULL ((void *) 0)
#endif
#define CALLGTM(xyz) status = xyz ; \
if (0 != status ) { \
gtm_zstatus( msg, 2048 ); \
snprintf(errmsg, 2048, "Failure of %s with error: %s\n", #xyz, msg); \
return (int) status; \
}
int cip_init(char *errmsg, int maxmsglen) {
gtm_char_t msg[maxmsglen], err[maxmsglen];
gtm_string_t gtminit_str;
ci_name_descriptor gtminit;
gtm_status_t status;
gtminit_str.address = "gtminit";
gtminit_str.length = sizeof("gtminit")-1;
gtminit.rtn_name=gtminit_str;
gtminit.handle = NULL;
errmsg[0] = '\0';
err[0] = '\0';
CALLGTM (gtm_cip( >minit, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_init error: [%s]\n", err);
return 100;
}
return 0;
} // end of cip_init
int cip_set(char *s_global, char *s_value, char *errmsg, int maxmsglen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmset_str, p_value;
ci_name_descriptor gtmset;
gtm_status_t status;
gtmset_str.address = "gtmset";
gtmset_str.length = sizeof("gtmset")-1;
gtmset.rtn_name=gtmset_str;
gtmset.handle = NULL;
err[0] = '\0';
p_value.address = ( gtm_char_t *) s_value; p_value.length = strlen(s_value);
CALLGTM( gtm_cip( >mset, s_global, &p_value, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_set error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_set
int cip_get(char *s_global, char *s_opt, char *s_ret, char *errmsg, int maxmsglen, int maxretlen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmget_str, p_opt;
ci_name_descriptor gtmget;
gtm_status_t status;
gtmget_str.address = "gtmget";
gtmget_str.length = sizeof("gtmget")-1;
gtmget.rtn_name=gtmget_str;
gtmget.handle = NULL;
err[0] = '\0';
p_opt.address = ( gtm_char_t *) s_opt; p_opt.length = strlen(s_opt);
CALLGTM( gtm_cip( >mget, s_global, &p_opt, s_ret, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_get error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_get
int cip_kill(char *s_global, char *errmsg, int maxmsglen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmkill_str;
ci_name_descriptor gtmkill;
gtm_status_t status;
gtmkill_str.address = "gtmkill";
gtmkill_str.length = sizeof("gtmkill")-1;
gtmkill.rtn_name=gtmkill_str;
gtmkill.handle = NULL;
err[0] = '\0';
CALLGTM( gtm_cip( >mkill, s_global, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_kill error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_kill
int cip_zkill(char *s_global, char *errmsg, int maxmsglen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmzkill_str;
ci_name_descriptor gtmzkill;
gtm_status_t status;
gtmzkill_str.address = "gtmzkill";
gtmzkill_str.length = sizeof("gtmzkill")-1;
gtmzkill.rtn_name=gtmzkill_str;
gtmzkill.handle = NULL;
err[0] = '\0';
CALLGTM( gtm_cip( >mzkill, s_global, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_zkill error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_zkill
int cip_xecute(char *s_global, char *errmsg, int maxmsglen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmxecute_str;
ci_name_descriptor gtmxecute;
gtm_status_t status;
gtmxecute_str.address = "gtmxecute";
gtmxecute_str.length = sizeof("gtmxecute")-1;
gtmxecute.rtn_name=gtmxecute_str;
gtmxecute.handle = NULL;
err[0] = '\0';
CALLGTM( gtm_cip( >mxecute, s_global, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_xecute error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_xecute
int cip_order(char *s_global, char *s_ret, char *errmsg, int maxmsglen, int maxretlen) {
gtm_char_t err[maxmsglen], msg[maxmsglen];
gtm_string_t gtmorder_str, p_opt;
ci_name_descriptor gtmorder;
gtm_status_t status;
gtmorder_str.address = "gtmorder";
gtmorder_str.length = sizeof("gtmorder")-1;
gtmorder.rtn_name=gtmorder_str;
gtmorder.handle = NULL;
err[0] = '\0';
CALLGTM( gtm_cip( >morder, s_global, s_ret, &err));
if (0 != strlen( err )){
snprintf(errmsg, maxmsglen, "cip_order error: [%s]\n", err);
fprintf( stderr, "error set: %s", err);
return 100;
}
return 0;
} // end of cip_order
*/
import "C"
import (
"unsafe"
"errors"
// "fmt"
// "strconv"
)
//maxmsglen maximum length of message from gt.m
const maxmsglen = 2048
//maxretlen maximum length of value retrieved from gt.m
const maxretlen = 1048576
//Set saves value to global in gt.m db
//Sample usage: gogtm.Set("^test","value")
func Set(global string, val string) (error){
if len(global) < 1 {
return errors.New("Set failed - you must provide glvn")
}
_global := C.CString(global)
_val := C.CString(val)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_global))
defer C.free(unsafe.Pointer(_val))
result := C.cip_set(_global, _val, (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)))
if result != 0 {
return errors.New("Set failed: " + string(result) + "Error message: " + string(errmsg))
}
return nil
} // end of Set
func Get(global string, opt string) (string, error){
if len(global) < 1 {
return "", errors.New("Get failed - you must provide glvn")
}
_global := C.CString(global)
_opt := C.CString(opt)
_ret := make([]byte, maxretlen)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_global))
defer C.free(unsafe.Pointer(_opt))
p := C.malloc(C.size_t(maxmsglen))
defer C.free(p)
result := C.cip_get(_global, _opt, (*C.char)(unsafe.Pointer(&_ret[0])), (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)), maxretlen)
if result != 0 {
return "", errors.New("Get failed: " + string(result) + "Error message: " + string(errmsg))
}
return string(_ret), nil
} //end of Get
func Start() (error) {
{
result := C.gtm_init()
if result != 0 {
return errors.New("gtm_init failed: " + string(result))
}
}
errmsg := C.CString("")
defer C.free(unsafe.Pointer(errmsg))
result := C.cip_init(errmsg, maxmsglen)
if result != 0 {
return errors.New("CIP Init failed: " + string(result) + "Error MSG: " + C.GoString(errmsg))
}
return nil
} // end of Start
func Stop() (error){
result := C.gtm_exit()
if result != 0 {
return errors.New("gtm_exit failed: " + string(result))
}
return nil
} // end of Stop
//Kill deletes global variable and its descendant nodes
func Kill(global string) (error){
if len(global) < 1 {
return errors.New("Kill failed - you must provide [glvn | (glvn[,...]) | *lname | *lvn ]")
}
_global := C.CString(global)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_global))
result := C.cip_kill(_global, (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)))
if result != 0 {
return errors.New("Kill failed: " + string(result) + "Error message: " + string(errmsg))
}
return nil
} // end of Kill
//ZKill deletes global variable and its descendant nodes
func ZKill(global string) (error){
if len(global) < 1 {
return errors.New("ZKill failed - you must provide glvn")
}
_global := C.CString(global)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_global))
result := C.cip_zkill(_global, (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)))
if result != 0 {
return errors.New("ZKill failed: " + string(result) + "Error message: " + string(errmsg))
}
return nil
} // end of ZKill
//Xecute runs the M code
func Xecute(code string) (error){
if len(code) < 1 {
return errors.New("Xecute failed - you must provide some code")
}
_code := C.CString(code)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_code))
result := C.cip_xecute(_code, (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)))
if result != 0 {
return errors.New("Xecute failed: " + string(result) + "Error message: " + string(errmsg))
}
return nil
} // end of Xecute
func Order(global string) (string, error){
if len(global) < 1 {
return "", errors.New("Get failed - you must provide glvn")
}
_global := C.CString(global)
_ret := make([]byte, maxretlen)
errmsg := make([]byte, maxmsglen)
defer C.free(unsafe.Pointer(_global))
p := C.malloc(C.size_t(maxmsglen))
defer C.free(p)
result := C.cip_order(_global, (*C.char)(unsafe.Pointer(&_ret[0])), (*C.char)(unsafe.Pointer(&errmsg[0])), C.int(len(errmsg)), maxretlen)
if result != 0 {
return "", errors.New("Get failed: " + string(result) + "Error message: " + string(errmsg))
}
return string(_ret), nil
} //end of Order
|
package gohex
import (
"bufio"
"encoding/hex"
"sort"
"strings"
)
// Constants definitions of IntelHex record types
const (
DATA_RECORD byte = 0
EOF_RECORD byte = 1
ADDRESS_RECORD byte = 4
START_RECORD byte = 5
)
// Structure with binary data segment fields
type DataSegment struct {
Address int
Data []byte
}
// Helper type for data segments sorting operations
type sortByAddress []*DataSegment
func (segs sortByAddress) Len() int { return len(segs) }
func (segs sortByAddress) Swap(i, j int) { segs[i], segs[j] = segs[j], segs[i] }
func (segs sortByAddress) Less(i, j int) bool { return segs[i].Address < segs[j].Address }
type Memory struct {
dataSegments []*DataSegment
startAddress int
extendedAddress int
eofFlag bool
startFlag bool
lineNum int
}
func NewMemory() *Memory {
m := new(Memory)
m.Clear()
return m
}
// Method to retrieve start address from IntelHex data
func (m *Memory) GetStartAddress() (adr int, ok bool) {
if m.startFlag {
return m.startAddress, true
}
return 0, false
}
// Method to retrieve data segments address from IntelHex data
func (m *Memory) GetDataSegments() []*DataSegment {
segs := m.dataSegments
sort.Sort(sortByAddress(segs))
return segs
}
func (m *Memory) Clear() {
m.startAddress = 0
m.extendedAddress = 0
m.lineNum = 0
m.dataSegments = []*DataSegment{}
m.startFlag = false
m.eofFlag = false
}
func (m *Memory) AddBinary(adr int, bytes []byte) error {
for _, s := range m.dataSegments {
if ((adr >= s.Address) && (adr < s.Address+len(s.Data))) ||
((adr < s.Address) && (adr+len(bytes) > s.Address)) {
return newParseError(DATA_ERROR, "data segments overlap", m.lineNum)
}
if adr == s.Address+len(s.Data) {
s.Data = append(s.Data, bytes...)
return nil
}
if adr+len(bytes) == s.Address {
s.Address = adr
s.Data = append(bytes, s.Data...)
return nil
}
}
m.dataSegments = append(m.dataSegments, &DataSegment{Address: adr, Data: bytes})
return nil
}
func (m *Memory) parseIntelHexRecord(bytes []byte) error {
if len(bytes) < 5 {
return newParseError(DATA_ERROR, "not enought data bytes", m.lineNum)
}
err := checkSum(bytes)
if err != nil {
return newParseError(CHECKSUM_ERROR, err.Error(), m.lineNum)
}
err = checkRecordSize(bytes)
if err != nil {
return newParseError(DATA_ERROR, err.Error(), m.lineNum)
}
switch record_type := bytes[3]; record_type {
case DATA_RECORD:
adr, data := getDataLine(bytes)
adr += m.extendedAddress
err = m.AddBinary(adr, data)
if err != nil {
return err
}
case EOF_RECORD:
err = checkEOF(bytes)
if err != nil {
return newParseError(RECORD_ERROR, err.Error(), m.lineNum)
}
m.eofFlag = true
case ADDRESS_RECORD:
m.extendedAddress, err = getExtendedAddress(bytes)
if err != nil {
return newParseError(RECORD_ERROR, err.Error(), m.lineNum)
}
case START_RECORD:
if m.startFlag == true {
return newParseError(DATA_ERROR, "multiple start address lines", m.lineNum)
}
m.startAddress, err = getStartAddress(bytes)
if err != nil {
return newParseError(RECORD_ERROR, err.Error(), m.lineNum)
}
m.startFlag = true
}
return nil
}
func (m *Memory) parseIntelHexLine(line string) error {
if len(line) == 0 {
return nil
}
if line[0] != ':' {
return newParseError(SYNTAX_ERROR, "no colon char on the first line character", m.lineNum)
}
bytes, err := hex.DecodeString(line[1:])
if err != nil {
return newParseError(SYNTAX_ERROR, err.Error(), m.lineNum)
}
return m.parseIntelHexRecord(bytes)
}
func (m *Memory) ParseIntelHex(str string) error {
scanner := bufio.NewScanner(strings.NewReader(str))
m.Clear()
for scanner.Scan() {
m.lineNum++
line := scanner.Text()
err := m.parseIntelHexLine(line)
if err != nil {
return err
}
}
if err := scanner.Err(); err != nil {
return newParseError(SYNTAX_ERROR, err.Error(), m.lineNum)
}
if m.eofFlag == false {
return newParseError(DATA_ERROR, "no end of file line", m.lineNum)
}
return nil
}
func (m *Memory) DumpIntelHex() error {
return nil
}
some more docs
package gohex
import (
"bufio"
"encoding/hex"
"sort"
"strings"
)
// Constants definitions of IntelHex record types
const (
DATA_RECORD byte = 0 // Record with data bytes
EOF_RECORD byte = 1 // Record with end of file indicator
ADDRESS_RECORD byte = 4 // Record with extended linear address
START_RECORD byte = 5 // Record with start linear address
)
// Structure with binary data segment fields
type DataSegment struct {
Address int // Starting address of data segment
Data []byte // Data segment bytes
}
// Helper type for data segments sorting operations
type sortByAddress []*DataSegment
func (segs sortByAddress) Len() int { return len(segs) }
func (segs sortByAddress) Swap(i, j int) { segs[i], segs[j] = segs[j], segs[i] }
func (segs sortByAddress) Less(i, j int) bool { return segs[i].Address < segs[j].Address }
type Memory struct {
dataSegments []*DataSegment
startAddress int
extendedAddress int
eofFlag bool
startFlag bool
lineNum int
}
func NewMemory() *Memory {
m := new(Memory)
m.Clear()
return m
}
// Method to retrieve start address from IntelHex data
func (m *Memory) GetStartAddress() (adr int, ok bool) {
if m.startFlag {
return m.startAddress, true
}
return 0, false
}
// Method to retrieve data segments address from IntelHex data
func (m *Memory) GetDataSegments() []*DataSegment {
segs := m.dataSegments
sort.Sort(sortByAddress(segs))
return segs
}
func (m *Memory) Clear() {
m.startAddress = 0
m.extendedAddress = 0
m.lineNum = 0
m.dataSegments = []*DataSegment{}
m.startFlag = false
m.eofFlag = false
}
func (m *Memory) AddBinary(adr int, bytes []byte) error {
for _, s := range m.dataSegments {
if ((adr >= s.Address) && (adr < s.Address+len(s.Data))) ||
((adr < s.Address) && (adr+len(bytes) > s.Address)) {
return newParseError(DATA_ERROR, "data segments overlap", m.lineNum)
}
if adr == s.Address+len(s.Data) {
s.Data = append(s.Data, bytes...)
return nil
}
if adr+len(bytes) == s.Address {
s.Address = adr
s.Data = append(bytes, s.Data...)
return nil
}
}
m.dataSegments = append(m.dataSegments, &DataSegment{Address: adr, Data: bytes})
return nil
}
func (m *Memory) parseIntelHexRecord(bytes []byte) error {
if len(bytes) < 5 {
return newParseError(DATA_ERROR, "not enought data bytes", m.lineNum)
}
err := checkSum(bytes)
if err != nil {
return newParseError(CHECKSUM_ERROR, err.Error(), m.lineNum)
}
err = checkRecordSize(bytes)
if err != nil {
return newParseError(DATA_ERROR, err.Error(), m.lineNum)
}
switch record_type := bytes[3]; record_type {
case DATA_RECORD:
adr, data := getDataLine(bytes)
adr += m.extendedAddress
err = m.AddBinary(adr, data)
if err != nil {
return err
}
case EOF_RECORD:
err = checkEOF(bytes)
if err != nil {
return newParseError(RECORD_ERROR, err.Error(), m.lineNum)
}
m.eofFlag = true
case ADDRESS_RECORD:
m.extendedAddress, err = getExtendedAddress(bytes)
if err != nil {
return newParseError(RECORD_ERROR, err.Error(), m.lineNum)
}
case START_RECORD:
if m.startFlag == true {
return newParseError(DATA_ERROR, "multiple start address lines", m.lineNum)
}
m.startAddress, err = getStartAddress(bytes)
if err != nil {
return newParseError(RECORD_ERROR, err.Error(), m.lineNum)
}
m.startFlag = true
}
return nil
}
func (m *Memory) parseIntelHexLine(line string) error {
if len(line) == 0 {
return nil
}
if line[0] != ':' {
return newParseError(SYNTAX_ERROR, "no colon char on the first line character", m.lineNum)
}
bytes, err := hex.DecodeString(line[1:])
if err != nil {
return newParseError(SYNTAX_ERROR, err.Error(), m.lineNum)
}
return m.parseIntelHexRecord(bytes)
}
func (m *Memory) ParseIntelHex(str string) error {
scanner := bufio.NewScanner(strings.NewReader(str))
m.Clear()
for scanner.Scan() {
m.lineNum++
line := scanner.Text()
err := m.parseIntelHexLine(line)
if err != nil {
return err
}
}
if err := scanner.Err(); err != nil {
return newParseError(SYNTAX_ERROR, err.Error(), m.lineNum)
}
if m.eofFlag == false {
return newParseError(DATA_ERROR, "no end of file line", m.lineNum)
}
return nil
}
func (m *Memory) DumpIntelHex() error {
return nil
}
|
package pigae
import (
"fmt"
"os"
"sort"
"strings"
"github.com/Deleplace/programming-idioms/pig"
)
//
// Language names exist in 3 forms : nice, standard, lowercase
// Ex : "C++", "Cpp", "cpp"
//
var mainStreamLangs = [...]string{"C", "Cpp", "Csharp", "Go", "Java", "JS", "Obj-C", "PHP", "Python", "Ruby"}
// Return alpha codes for each language (no encoding problems).
// See printNiceLang to display them more fancy.
func mainStreamLanguages() []string {
return mainStreamLangs[:]
}
var moreLangs = [...]string{"Ada", "Caml", "Clojure", "Cobol", "D", "Dart", "Elixir", "Erlang", "Fortran", "Haskell", "Lua", "Lisp", "Pascal", "Perl", "Prolog", "Rust", "Scala", "Scheme", "VB"}
func moreLanguages() []string {
// These do *not* include the mainStreamLanguages()
return moreLangs[:]
}
var synonymLangs = map[string]string{
"Javascript": "JS",
"Objective C": "Obj-C",
"Visual Basic": "VB",
}
var allLangs []string
var allNiceLangs []string
func allLanguages() []string {
if allLangs == nil {
allLangs = append(mainStreamLanguages(), moreLanguages()...)
sort.Strings(allLangs)
allNiceLangs = make([]string, len(allLangs))
for i, lg := range allLangs {
allNiceLangs[i] = printNiceLang(lg)
}
}
return allLangs
}
// autocompletions is a map[string][]string
var autocompletions = precomputeAutocompletions()
func languageAutoComplete(fragment string) []string {
fragment = strings.ToLower(fragment)
// Dynamic search (slow)
// options := []string{}
// for _, lg := range allLanguages() {
// if strings.Contains(strings.ToLower(lg), fragment) || strings.Contains(strings.ToLower(printNiceLang(lg)), fragment) {
// options = append(options, printNiceLang(lg))
// }
// }
// return options
// Precomputed search (fast)
return autocompletions[fragment]
}
func printNiceLang(lang string) string {
switch strings.TrimSpace(strings.ToLower(lang)) {
case "cpp":
return "C++"
case "csharp":
return "C#"
default:
return lang
}
}
func printNiceLangs(langs []string) []string {
nice := make([]string, len(langs))
for i, lang := range langs {
nice[i] = printNiceLang(lang)
}
return nice
}
func printShortLang(lang string) string {
switch strings.TrimSpace(strings.ToLower(lang)) {
case "clojure":
return "Clj"
case "cobol":
return "Co bol"
case "cpp":
return "C++"
case "csharp":
return "C#"
case "erlang":
return "Er lang"
case "elixir":
return "Eli xir"
case "fortran":
return "For tran"
case "haskell":
return "Has kell"
case "obj-c":
return "Obj C"
case "pascal":
return "Pas"
case "python":
return "Py"
case "scheme":
return "scm"
case "prolog":
return "Pro log"
default:
return lang
}
}
func indexByLowerCase(langs []string) map[string]string {
m := map[string]string{}
for _, lg := range langs {
m[strings.ToLower(lg)] = lg
}
return m
}
var langLowerCaseIndex = indexByLowerCase(allLanguages())
func normLang(lang string) string {
lg := strings.TrimSpace(strings.ToLower(lang))
switch lg {
case "c++":
return "Cpp"
case "c#":
return "Csharp"
case "javascript":
return "JS"
case "golang":
return "Go"
case "objective c":
return "Obj-C"
default:
return langLowerCaseIndex[lg]
}
}
func precomputeAutocompletions() map[string][]string {
m := make(map[string][]string, 100)
// Crazy shadowing of variable "lg" is allowed in go...
for _, trueLg := range allLanguages() {
niceLg := printNiceLang(trueLg)
for _, lg := range []string{trueLg, niceLg} {
lg := strings.ToLower(lg)
fragments := substrings(lg)
for _, frag := range fragments {
if !pig.StringSliceContains(m[frag], niceLg) {
m[frag] = append(m[frag], niceLg)
}
}
}
}
for lg, trueLg := range synonymLangs {
niceLg := printNiceLang(trueLg)
lg := strings.ToLower(lg)
fragments := substrings(lg)
for _, frag := range fragments {
if !pig.StringSliceContains(m[frag], niceLg) {
m[frag] = append(m[frag], niceLg)
}
}
}
fmt.Fprintf(os.Stderr, "---\n")
return m
}
func substrings(s string) []string {
L := len(s)
seen := make(map[string]bool, L*L)
fragments := make([]string, L*L)
// This works well for language names with only 1-byte chars, not for any string
for i := 0; i < L; i++ {
for j := i + 1; j <= L; j++ {
frag := s[i:j]
if seen[frag] {
continue
}
seen[frag] = true
fragments = append(fragments, frag)
}
}
return fragments
}
Fix weird sort problem, for the CheatSheets list page.
package pigae
import (
"fmt"
"os"
"sort"
"strings"
"github.com/Deleplace/programming-idioms/pig"
)
//
// Language names exist in 3 forms : nice, standard, lowercase
// Ex : "C++", "Cpp", "cpp"
//
var mainStreamLangs = [...]string{"C", "Cpp", "Csharp", "Go", "Java", "JS", "Obj-C", "PHP", "Python", "Ruby"}
// Return alpha codes for each language (no encoding problems).
// See printNiceLang to display them more fancy.
func mainStreamLanguages() []string {
return mainStreamLangs[:]
}
var moreLangs = [...]string{"Ada", "Caml", "Clojure", "Cobol", "D", "Dart", "Elixir", "Erlang", "Fortran", "Haskell", "Lua", "Lisp", "Pascal", "Perl", "Prolog", "Rust", "Scala", "Scheme", "VB"}
func moreLanguages() []string {
// These do *not* include the mainStreamLanguages()
return moreLangs[:]
}
var synonymLangs = map[string]string{
"Javascript": "JS",
"Objective C": "Obj-C",
"Visual Basic": "VB",
}
var allLangs []string
var allNiceLangs []string
func allLanguages() []string {
if allLangs == nil {
mainstream := mainStreamLanguages()
more := moreLanguages()
allLangs = make([]string, len(mainstream)+len(more))
copy(allLangs, mainstream)
copy(allLangs[len(mainstream):], more)
sort.Strings(allLangs)
allNiceLangs = make([]string, len(allLangs))
for i, lg := range allLangs {
allNiceLangs[i] = printNiceLang(lg)
}
}
return allLangs
}
// autocompletions is a map[string][]string
var autocompletions = precomputeAutocompletions()
func languageAutoComplete(fragment string) []string {
fragment = strings.ToLower(fragment)
// Dynamic search (slow)
// options := []string{}
// for _, lg := range allLanguages() {
// if strings.Contains(strings.ToLower(lg), fragment) || strings.Contains(strings.ToLower(printNiceLang(lg)), fragment) {
// options = append(options, printNiceLang(lg))
// }
// }
// return options
// Precomputed search (fast)
return autocompletions[fragment]
}
func printNiceLang(lang string) string {
switch strings.TrimSpace(strings.ToLower(lang)) {
case "cpp":
return "C++"
case "csharp":
return "C#"
default:
return lang
}
}
func printNiceLangs(langs []string) []string {
nice := make([]string, len(langs))
for i, lang := range langs {
nice[i] = printNiceLang(lang)
}
return nice
}
func printShortLang(lang string) string {
switch strings.TrimSpace(strings.ToLower(lang)) {
case "clojure":
return "Clj"
case "cobol":
return "Co bol"
case "cpp":
return "C++"
case "csharp":
return "C#"
case "erlang":
return "Er lang"
case "elixir":
return "Eli xir"
case "fortran":
return "For tran"
case "haskell":
return "Has kell"
case "obj-c":
return "Obj C"
case "pascal":
return "Pas"
case "python":
return "Py"
case "scheme":
return "scm"
case "prolog":
return "Pro log"
default:
return lang
}
}
func indexByLowerCase(langs []string) map[string]string {
m := map[string]string{}
for _, lg := range langs {
m[strings.ToLower(lg)] = lg
}
return m
}
var langLowerCaseIndex = indexByLowerCase(allLanguages())
func normLang(lang string) string {
lg := strings.TrimSpace(strings.ToLower(lang))
switch lg {
case "c++":
return "Cpp"
case "c#":
return "Csharp"
case "javascript":
return "JS"
case "golang":
return "Go"
case "objective c":
return "Obj-C"
default:
return langLowerCaseIndex[lg]
}
}
func precomputeAutocompletions() map[string][]string {
m := make(map[string][]string, 100)
// Crazy shadowing of variable "lg" is allowed in go...
for _, trueLg := range allLanguages() {
niceLg := printNiceLang(trueLg)
for _, lg := range []string{trueLg, niceLg} {
lg := strings.ToLower(lg)
fragments := substrings(lg)
for _, frag := range fragments {
if !pig.StringSliceContains(m[frag], niceLg) {
m[frag] = append(m[frag], niceLg)
}
}
}
}
for lg, trueLg := range synonymLangs {
niceLg := printNiceLang(trueLg)
lg := strings.ToLower(lg)
fragments := substrings(lg)
for _, frag := range fragments {
if !pig.StringSliceContains(m[frag], niceLg) {
m[frag] = append(m[frag], niceLg)
}
}
}
fmt.Fprintf(os.Stderr, "---\n")
return m
}
func substrings(s string) []string {
L := len(s)
seen := make(map[string]bool, L*L)
fragments := make([]string, L*L)
// This works well for language names with only 1-byte chars, not for any string
for i := 0; i < L; i++ {
for j := i + 1; j <= L; j++ {
frag := s[i:j]
if seen[frag] {
continue
}
seen[frag] = true
fragments = append(fragments, frag)
}
}
return fragments
}
|
package gopdf
import (
"bytes"
"errors"
ioutil "io/ioutil"
"log"
"os"
//"container/list"
"fmt"
"strconv"
"strings"
)
type GoPdf struct {
//page Margin
leftMargin float64
topMargin float64
pdfObjs []IObj
config Config
/*---index ของ obj สำคัญๆ เก็บเพื่อลด loop ตอนค้นหา---*/
//index ของ obj pages
indexOfPagesObj int
//index ของ obj page อันแรก
indexOfFirstPageObj int
//ต่ำแหน่งปัจจุบัน
Curr Current
indexEncodingObjFonts []int
indexOfContent int
//index ของ procset ซึ่งควรจะมีอันเดียว
indexOfProcSet int
//IsUnderline bool
}
/*---public---*/
//line width
func (me *GoPdf) SetLineWidth(width float64) {
me.getContent().AppendStreamSetLineWidth(width)
}
//วาดเส้น
func (me *GoPdf) Line(x1 float64, y1 float64, x2 float64, y2 float64) {
me.getContent().AppendStreamLine(x1, y1, x2, y2)
}
//ขึ้นบรรทัดใหม่
func (me *GoPdf) Br(h float64) {
me.Curr.Y += h
me.Curr.X = me.leftMargin
}
func (me *GoPdf) SetLeftMargin(margin float64) {
me.leftMargin = margin
}
func (me *GoPdf) SetTopMargin(margin float64) {
me.topMargin = margin
}
func (me *GoPdf) SetX(x float64) {
me.Curr.X = x
}
func (me *GoPdf) GetX() float64 {
return me.Curr.X
}
func (me *GoPdf) SetY(y float64) {
me.Curr.Y = y
}
func (me *GoPdf) GetY() float64 {
return me.Curr.Y
}
func (me *GoPdf) Image(picPath string, x float64, y float64, rect *Rect) {
//check
cacheImageIndex := -1
for _, imgcache := range me.Curr.ImgCaches {
if picPath == imgcache.Path {
cacheImageIndex = imgcache.Index
break
}
}
//create img object
imgobj := new(ImageObj)
imgobj.Init(func() *GoPdf {
return me
})
imgobj.SetImagePath(picPath)
if rect == nil {
rect = imgobj.GetRect()
}
if cacheImageIndex == -1 { //new image
index := me.addObj(imgobj)
if me.indexOfProcSet != -1 {
//ยัดรูป
procset := me.pdfObjs[me.indexOfProcSet].(*ProcSetObj)
me.getContent().AppendStreamImage(me.Curr.CountOfImg, x, y, rect)
procset.RealteXobjs = append(procset.RealteXobjs, RealteXobject{IndexOfObj: index})
//เก็บข้อมูลรูปเอาไว้
var imgcache ImageCache
imgcache.Index = me.Curr.CountOfImg
imgcache.Path = picPath
me.Curr.ImgCaches = append(me.Curr.ImgCaches, imgcache)
me.Curr.CountOfImg++
}
} else { //same img
me.getContent().AppendStreamImage(cacheImageIndex, x, y, rect)
}
}
//เพิ่ม page
func (me *GoPdf) AddPage() {
page := new(PageObj)
page.Init(func() *GoPdf {
return me
})
page.ResourcesRelate = strconv.Itoa(me.indexOfProcSet+1) + " 0 R"
index := me.addObj(page)
if me.indexOfFirstPageObj == -1 {
me.indexOfFirstPageObj = index
}
me.Curr.IndexOfPageObj = index
//reset
me.indexOfContent = -1
me.resetCurrXY()
}
//เริ่ม
func (me *GoPdf) Start(config Config) {
me.config = config
me.init()
//สร้าง obj พื้นฐาน
catalog := new(CatalogObj)
catalog.Init(func() *GoPdf {
return me
})
pages := new(PagesObj)
pages.Init(func() *GoPdf {
return me
})
me.addObj(catalog)
me.indexOfPagesObj = me.addObj(pages)
//indexOfProcSet
procset := new(ProcSetObj)
procset.Init(func() *GoPdf {
return me
})
me.indexOfProcSet = me.addObj(procset)
}
//set font
func (me *GoPdf) SetFont(family string, style string, size int) error {
found := false
i := 0
max := len(me.indexEncodingObjFonts)
for i < max {
ifont := me.pdfObjs[me.indexEncodingObjFonts[i]].(*EncodingObj).GetFont()
if ifont.GetFamily() == family {
me.Curr.Font_Size = size
me.Curr.Font_Style = style
me.Curr.Font_FontCount = me.pdfObjs[me.indexEncodingObjFonts[i]+4].(*FontObj).CountOfFont
me.Curr.Font_Type = CURRENT_FONT_TYPE_IFONT
me.Curr.Font_IFont = ifont
me.Curr.Font_ISubset = nil
found = true
break
}
i++
}
if !found { //find SubsetFont
i = 0
max = len(me.pdfObjs)
for i < max {
if me.pdfObjs[i].GetType() == "SubsetFont" {
obj := me.pdfObjs[i]
sub, ok := obj.(*SubsetFontObj)
if ok {
if sub.GetFamily() == family {
me.Curr.Font_Size = size
me.Curr.Font_Style = style
me.Curr.Font_FontCount = sub.CountOfFont
me.Curr.Font_Type = CURRENT_FONT_TYPE_SUBSET
me.Curr.Font_IFont = nil
me.Curr.Font_ISubset = sub
found = true
break
}
}
}
i++
}
}
if !found {
return errors.New("not found font family")
}
return nil
}
//create pdf file
func (me *GoPdf) WritePdf(pdfPath string) {
ioutil.WriteFile(pdfPath, me.GetBytesPdf(), 0644)
}
//get bytes of pdf file
func (me *GoPdf) GetBytesPdfReturnErr() ([]byte, error) {
me.prepare()
buff := new(bytes.Buffer)
i := 0
max := len(me.pdfObjs)
buff.WriteString("%PDF-1.7\n\n")
linelens := make([]int, max)
for i < max {
linelens[i] = buff.Len()
pdfObj := me.pdfObjs[i]
err := pdfObj.Build()
if err != nil {
return nil, err
}
buff.WriteString(strconv.Itoa(i+1) + " 0 obj\n")
buffbyte := pdfObj.GetObjBuff().Bytes()
buff.Write(buffbyte)
buff.WriteString("endobj\n\n")
i++
}
me.xref(linelens, buff, &i)
return buff.Bytes(), nil
}
//get bytes of pdf file
func (me *GoPdf) GetBytesPdf() []byte {
b, err := me.GetBytesPdfReturnErr()
if err != nil {
log.Fatalf("%s", err.Error())
}
return b
}
//สร้าง cell ของ text
//Note that this has no effect on Rect.H pdf (now). Fix later :-)
func (me *GoPdf) Cell(rectangle *Rect, text string) {
//undelineOffset := ContentObj_CalTextHeight(me.Curr.Font_Size) + 1
startX := me.Curr.X
startY := me.Curr.Y
if me.Curr.Font_Type == CURRENT_FONT_TYPE_IFONT {
me.getContent().AppendStream(rectangle, text)
} else if me.Curr.Font_Type == CURRENT_FONT_TYPE_SUBSET {
me.Curr.Font_ISubset.AddChars(text)
me.getContent().AppendStreamSubsetFont(rectangle, text)
}
endX := me.Curr.X
endY := me.Curr.Y
//underline
if strings.Contains(strings.ToUpper(me.Curr.Font_Style), "U") {
//me.Line(x1,y1+undelineOffset,x2,y2+undelineOffset)
me.getContent().AppendUnderline(startX, startY, endX, endY, text)
}
}
//AddTTFFont font use subtype font
func (me *GoPdf) AddTTFFont(family string, ttfpath string) error {
if _, err := os.Stat(ttfpath); os.IsNotExist(err) {
return err
}
subsetFont := new(SubsetFontObj)
subsetFont.Init(func() *GoPdf {
return me
})
subsetFont.SetFamily(family)
err := subsetFont.SetTTFByPath(ttfpath)
if err != nil {
return err
}
unicodemap := new(UnicodeMap)
unicodemap.Init(func() *GoPdf {
return me
})
unicodemap.SetPtrToSubsetFontObj(subsetFont)
unicodeindex := me.addObj(unicodemap)
pdfdic := new(PdfDictionaryObj)
pdfdic.Init(func() *GoPdf {
return me
})
pdfdic.SetPtrToSubsetFontObj(subsetFont)
pdfdicindex := me.addObj(pdfdic)
subfontdesc := new(SubfontDescriptorObj)
subfontdesc.Init(func() *GoPdf {
return me
})
subfontdesc.SetPtrToSubsetFontObj(subsetFont)
subfontdesc.SetIndexObjPdfDictionary(pdfdicindex)
subfontdescindex := me.addObj(subfontdesc)
cidfont := new(CIDFontObj)
cidfont.Init(func() *GoPdf {
return me
})
cidfont.SetPtrToSubsetFontObj(subsetFont)
cidfont.SetIndexObjSubfontDescriptor(subfontdescindex)
cidindex := me.addObj(cidfont)
subsetFont.SetIndexObjCIDFont(cidindex)
subsetFont.SetIndexObjUnicodeMap(unicodeindex)
index := me.addObj(subsetFont) //add หลังสุด
if me.indexOfProcSet != -1 {
procset := me.pdfObjs[me.indexOfProcSet].(*ProcSetObj)
if !procset.Realtes.IsContainsFamily(family) {
procset.Realtes = append(procset.Realtes, RelateFont{Family: family, IndexOfObj: index, CountOfFont: me.Curr.CountOfFont})
subsetFont.CountOfFont = me.Curr.CountOfFont
me.Curr.CountOfFont++
}
}
return nil
}
//AddFont user embed font in zfont file
func (me *GoPdf) AddFont(family string, ifont IFont, zfontpath string) {
encoding := new(EncodingObj)
ifont.Init()
ifont.SetFamily(family)
encoding.SetFont(ifont)
me.indexEncodingObjFonts = append(me.indexEncodingObjFonts, me.addObj(encoding))
fontWidth := new(BasicObj)
fontWidth.Init(func() *GoPdf {
return me
})
fontWidth.Data = "[" + FontConvertHelper_Cw2Str(ifont.GetCw()) + "]\n"
me.addObj(fontWidth) //1
fontDesc := new(FontDescriptorObj)
fontDesc.Init(func() *GoPdf {
return me
})
fontDesc.SetFont(ifont)
me.addObj(fontDesc) //2
embedfont := new(EmbedFontObj)
embedfont.Init(func() *GoPdf {
return me
})
embedfont.SetFont(ifont, zfontpath)
index := me.addObj(embedfont) //3
fontDesc.SetFontFileObjRelate(strconv.Itoa(index+1) + " 0 R")
//start add font obj
font := new(FontObj)
font.Init(func() *GoPdf {
return me
})
font.Family = family
font.Font = ifont
index = me.addObj(font) //4
if me.indexOfProcSet != -1 {
procset := me.pdfObjs[me.indexOfProcSet].(*ProcSetObj)
if !procset.Realtes.IsContainsFamily(family) {
procset.Realtes = append(procset.Realtes, RelateFont{Family: family, IndexOfObj: index, CountOfFont: me.Curr.CountOfFont})
font.CountOfFont = me.Curr.CountOfFont
me.Curr.CountOfFont++
}
}
//end add font obj
}
/*---private---*/
//init
func (me *GoPdf) init() {
//default
me.leftMargin = 10.0
me.topMargin = 10.0
//init curr
me.resetCurrXY()
me.Curr.IndexOfPageObj = -1
me.Curr.CountOfFont = 0
me.Curr.CountOfL = 0
me.Curr.CountOfImg = 0 //img
me.Curr.ImgCaches = *new([]ImageCache)
//init index
me.indexOfPagesObj = -1
me.indexOfFirstPageObj = -1
me.indexOfContent = -1
//No underline
//me.IsUnderline = false
}
func (me *GoPdf) resetCurrXY() {
me.Curr.X = me.leftMargin
me.Curr.Y = me.topMargin
}
func (me *GoPdf) prepare() {
if me.indexOfPagesObj != -1 {
indexCurrPage := -1
var pagesObj *PagesObj
pagesObj = me.pdfObjs[me.indexOfPagesObj].(*PagesObj)
i := 0 //me.indexOfFirstPageObj
max := len(me.pdfObjs)
for i < max {
objtype := me.pdfObjs[i].GetType()
//fmt.Printf(" objtype = %s , %d \n", objtype , i)
if objtype == "Page" {
pagesObj.Kids = fmt.Sprintf("%s %d 0 R ", pagesObj.Kids, i+1)
pagesObj.PageCount++
indexCurrPage = i
} else if objtype == "Content" {
if indexCurrPage != -1 {
me.pdfObjs[indexCurrPage].(*PageObj).Contents = fmt.Sprintf("%s %d 0 R ", me.pdfObjs[indexCurrPage].(*PageObj).Contents, i+1)
}
} else if objtype == "Font" {
tmpfont := me.pdfObjs[i].(*FontObj)
j := 0
jmax := len(me.indexEncodingObjFonts)
for j < jmax {
tmpencoding := me.pdfObjs[me.indexEncodingObjFonts[j]].(*EncodingObj).GetFont()
if tmpfont.Family == tmpencoding.GetFamily() { //ใส่ ข้อมูลของ embed font
tmpfont.IsEmbedFont = true
tmpfont.SetIndexObjEncoding(me.indexEncodingObjFonts[j] + 1)
tmpfont.SetIndexObjWidth(me.indexEncodingObjFonts[j] + 2)
tmpfont.SetIndexObjFontDescriptor(me.indexEncodingObjFonts[j] + 3)
break
}
j++
}
}
i++
}
}
}
func (me *GoPdf) xref(linelens []int, buff *bytes.Buffer, i *int) {
buff.WriteString("xref\n")
buff.WriteString("0 " + strconv.Itoa((*i)+1) + "\n")
buff.WriteString("0000000000 65535 f\n")
j := 0
max := len(linelens)
for j < max {
linelen := linelens[j]
buff.WriteString(me.formatXrefline(linelen) + " 00000 n\n")
j++
}
buff.WriteString("trailer\n")
buff.WriteString("<<\n")
buff.WriteString("/Size " + strconv.Itoa(max+1) + "\n")
buff.WriteString("/Root 1 0 R\n")
buff.WriteString(">>\n")
(*i)++
}
//ปรับ xref ให้เป็น 10 หลัก
func (me *GoPdf) formatXrefline(n int) string {
str := strconv.Itoa(n)
for len(str) < 10 {
str = "0" + str
}
return str
}
func (me *GoPdf) addObj(iobj IObj) int {
index := len(me.pdfObjs)
me.pdfObjs = append(me.pdfObjs, iobj)
return index
}
func (me *GoPdf) getContent() *ContentObj {
var content *ContentObj
if me.indexOfContent <= -1 {
content = new(ContentObj)
content.Init(func() *GoPdf {
return me
})
me.indexOfContent = me.addObj(content)
} else {
content = me.pdfObjs[me.indexOfContent].(*ContentObj)
}
return content
}
add comment
package gopdf
import (
"bytes"
"errors"
ioutil "io/ioutil"
"log"
"os"
//"container/list"
"fmt"
"strconv"
"strings"
)
type GoPdf struct {
//page Margin
leftMargin float64
topMargin float64
pdfObjs []IObj
config Config
/*---index ของ obj สำคัญๆ เก็บเพื่อลด loop ตอนค้นหา---*/
//index ของ obj pages
indexOfPagesObj int
//index ของ obj page อันแรก
indexOfFirstPageObj int
//ต่ำแหน่งปัจจุบัน
Curr Current
indexEncodingObjFonts []int
indexOfContent int
//index ของ procset ซึ่งควรจะมีอันเดียว
indexOfProcSet int
//IsUnderline bool
}
/*---public---*/
//line width
func (me *GoPdf) SetLineWidth(width float64) {
me.getContent().AppendStreamSetLineWidth(width)
}
//วาดเส้น
func (me *GoPdf) Line(x1 float64, y1 float64, x2 float64, y2 float64) {
me.getContent().AppendStreamLine(x1, y1, x2, y2)
}
//ขึ้นบรรทัดใหม่
func (me *GoPdf) Br(h float64) {
me.Curr.Y += h
me.Curr.X = me.leftMargin
}
func (me *GoPdf) SetLeftMargin(margin float64) {
me.leftMargin = margin
}
func (me *GoPdf) SetTopMargin(margin float64) {
me.topMargin = margin
}
func (me *GoPdf) SetX(x float64) {
me.Curr.X = x
}
func (me *GoPdf) GetX() float64 {
return me.Curr.X
}
func (me *GoPdf) SetY(y float64) {
me.Curr.Y = y
}
func (me *GoPdf) GetY() float64 {
return me.Curr.Y
}
func (me *GoPdf) Image(picPath string, x float64, y float64, rect *Rect) {
//check
cacheImageIndex := -1
for _, imgcache := range me.Curr.ImgCaches {
if picPath == imgcache.Path {
cacheImageIndex = imgcache.Index
break
}
}
//create img object
imgobj := new(ImageObj)
imgobj.Init(func() *GoPdf {
return me
})
imgobj.SetImagePath(picPath)
if rect == nil {
rect = imgobj.GetRect()
}
if cacheImageIndex == -1 { //new image
index := me.addObj(imgobj)
if me.indexOfProcSet != -1 {
//ยัดรูป
procset := me.pdfObjs[me.indexOfProcSet].(*ProcSetObj)
me.getContent().AppendStreamImage(me.Curr.CountOfImg, x, y, rect)
procset.RealteXobjs = append(procset.RealteXobjs, RealteXobject{IndexOfObj: index})
//เก็บข้อมูลรูปเอาไว้
var imgcache ImageCache
imgcache.Index = me.Curr.CountOfImg
imgcache.Path = picPath
me.Curr.ImgCaches = append(me.Curr.ImgCaches, imgcache)
me.Curr.CountOfImg++
}
} else { //same img
me.getContent().AppendStreamImage(cacheImageIndex, x, y, rect)
}
}
//เพิ่ม page
func (me *GoPdf) AddPage() {
page := new(PageObj)
page.Init(func() *GoPdf {
return me
})
page.ResourcesRelate = strconv.Itoa(me.indexOfProcSet+1) + " 0 R"
index := me.addObj(page)
if me.indexOfFirstPageObj == -1 {
me.indexOfFirstPageObj = index
}
me.Curr.IndexOfPageObj = index
//reset
me.indexOfContent = -1
me.resetCurrXY()
}
//เริ่ม
func (me *GoPdf) Start(config Config) {
me.config = config
me.init()
//สร้าง obj พื้นฐาน
catalog := new(CatalogObj)
catalog.Init(func() *GoPdf {
return me
})
pages := new(PagesObj)
pages.Init(func() *GoPdf {
return me
})
me.addObj(catalog)
me.indexOfPagesObj = me.addObj(pages)
//indexOfProcSet
procset := new(ProcSetObj)
procset.Init(func() *GoPdf {
return me
})
me.indexOfProcSet = me.addObj(procset)
}
//set font style support "" or "U"
func (me *GoPdf) SetFont(family string, style string, size int) error {
found := false
i := 0
max := len(me.indexEncodingObjFonts)
for i < max {
ifont := me.pdfObjs[me.indexEncodingObjFonts[i]].(*EncodingObj).GetFont()
if ifont.GetFamily() == family {
me.Curr.Font_Size = size
me.Curr.Font_Style = style
me.Curr.Font_FontCount = me.pdfObjs[me.indexEncodingObjFonts[i]+4].(*FontObj).CountOfFont
me.Curr.Font_Type = CURRENT_FONT_TYPE_IFONT
me.Curr.Font_IFont = ifont
me.Curr.Font_ISubset = nil
found = true
break
}
i++
}
if !found { //find SubsetFont
i = 0
max = len(me.pdfObjs)
for i < max {
if me.pdfObjs[i].GetType() == "SubsetFont" {
obj := me.pdfObjs[i]
sub, ok := obj.(*SubsetFontObj)
if ok {
if sub.GetFamily() == family {
me.Curr.Font_Size = size
me.Curr.Font_Style = style
me.Curr.Font_FontCount = sub.CountOfFont
me.Curr.Font_Type = CURRENT_FONT_TYPE_SUBSET
me.Curr.Font_IFont = nil
me.Curr.Font_ISubset = sub
found = true
break
}
}
}
i++
}
}
if !found {
return errors.New("not found font family")
}
return nil
}
//create pdf file
func (me *GoPdf) WritePdf(pdfPath string) {
ioutil.WriteFile(pdfPath, me.GetBytesPdf(), 0644)
}
//get bytes of pdf file
func (me *GoPdf) GetBytesPdfReturnErr() ([]byte, error) {
me.prepare()
buff := new(bytes.Buffer)
i := 0
max := len(me.pdfObjs)
buff.WriteString("%PDF-1.7\n\n")
linelens := make([]int, max)
for i < max {
linelens[i] = buff.Len()
pdfObj := me.pdfObjs[i]
err := pdfObj.Build()
if err != nil {
return nil, err
}
buff.WriteString(strconv.Itoa(i+1) + " 0 obj\n")
buffbyte := pdfObj.GetObjBuff().Bytes()
buff.Write(buffbyte)
buff.WriteString("endobj\n\n")
i++
}
me.xref(linelens, buff, &i)
return buff.Bytes(), nil
}
//get bytes of pdf file
func (me *GoPdf) GetBytesPdf() []byte {
b, err := me.GetBytesPdfReturnErr()
if err != nil {
log.Fatalf("%s", err.Error())
}
return b
}
//สร้าง cell ของ text
//Note that this has no effect on Rect.H pdf (now). Fix later :-)
func (me *GoPdf) Cell(rectangle *Rect, text string) {
//undelineOffset := ContentObj_CalTextHeight(me.Curr.Font_Size) + 1
startX := me.Curr.X
startY := me.Curr.Y
if me.Curr.Font_Type == CURRENT_FONT_TYPE_IFONT {
me.getContent().AppendStream(rectangle, text)
} else if me.Curr.Font_Type == CURRENT_FONT_TYPE_SUBSET {
me.Curr.Font_ISubset.AddChars(text)
me.getContent().AppendStreamSubsetFont(rectangle, text)
}
endX := me.Curr.X
endY := me.Curr.Y
//underline
if strings.Contains(strings.ToUpper(me.Curr.Font_Style), "U") {
//me.Line(x1,y1+undelineOffset,x2,y2+undelineOffset)
me.getContent().AppendUnderline(startX, startY, endX, endY, text)
}
}
//AddTTFFont font use subtype font
func (me *GoPdf) AddTTFFont(family string, ttfpath string) error {
if _, err := os.Stat(ttfpath); os.IsNotExist(err) {
return err
}
subsetFont := new(SubsetFontObj)
subsetFont.Init(func() *GoPdf {
return me
})
subsetFont.SetFamily(family)
err := subsetFont.SetTTFByPath(ttfpath)
if err != nil {
return err
}
unicodemap := new(UnicodeMap)
unicodemap.Init(func() *GoPdf {
return me
})
unicodemap.SetPtrToSubsetFontObj(subsetFont)
unicodeindex := me.addObj(unicodemap)
pdfdic := new(PdfDictionaryObj)
pdfdic.Init(func() *GoPdf {
return me
})
pdfdic.SetPtrToSubsetFontObj(subsetFont)
pdfdicindex := me.addObj(pdfdic)
subfontdesc := new(SubfontDescriptorObj)
subfontdesc.Init(func() *GoPdf {
return me
})
subfontdesc.SetPtrToSubsetFontObj(subsetFont)
subfontdesc.SetIndexObjPdfDictionary(pdfdicindex)
subfontdescindex := me.addObj(subfontdesc)
cidfont := new(CIDFontObj)
cidfont.Init(func() *GoPdf {
return me
})
cidfont.SetPtrToSubsetFontObj(subsetFont)
cidfont.SetIndexObjSubfontDescriptor(subfontdescindex)
cidindex := me.addObj(cidfont)
subsetFont.SetIndexObjCIDFont(cidindex)
subsetFont.SetIndexObjUnicodeMap(unicodeindex)
index := me.addObj(subsetFont) //add หลังสุด
if me.indexOfProcSet != -1 {
procset := me.pdfObjs[me.indexOfProcSet].(*ProcSetObj)
if !procset.Realtes.IsContainsFamily(family) {
procset.Realtes = append(procset.Realtes, RelateFont{Family: family, IndexOfObj: index, CountOfFont: me.Curr.CountOfFont})
subsetFont.CountOfFont = me.Curr.CountOfFont
me.Curr.CountOfFont++
}
}
return nil
}
//AddFont user embed font in zfont file
func (me *GoPdf) AddFont(family string, ifont IFont, zfontpath string) {
encoding := new(EncodingObj)
ifont.Init()
ifont.SetFamily(family)
encoding.SetFont(ifont)
me.indexEncodingObjFonts = append(me.indexEncodingObjFonts, me.addObj(encoding))
fontWidth := new(BasicObj)
fontWidth.Init(func() *GoPdf {
return me
})
fontWidth.Data = "[" + FontConvertHelper_Cw2Str(ifont.GetCw()) + "]\n"
me.addObj(fontWidth) //1
fontDesc := new(FontDescriptorObj)
fontDesc.Init(func() *GoPdf {
return me
})
fontDesc.SetFont(ifont)
me.addObj(fontDesc) //2
embedfont := new(EmbedFontObj)
embedfont.Init(func() *GoPdf {
return me
})
embedfont.SetFont(ifont, zfontpath)
index := me.addObj(embedfont) //3
fontDesc.SetFontFileObjRelate(strconv.Itoa(index+1) + " 0 R")
//start add font obj
font := new(FontObj)
font.Init(func() *GoPdf {
return me
})
font.Family = family
font.Font = ifont
index = me.addObj(font) //4
if me.indexOfProcSet != -1 {
procset := me.pdfObjs[me.indexOfProcSet].(*ProcSetObj)
if !procset.Realtes.IsContainsFamily(family) {
procset.Realtes = append(procset.Realtes, RelateFont{Family: family, IndexOfObj: index, CountOfFont: me.Curr.CountOfFont})
font.CountOfFont = me.Curr.CountOfFont
me.Curr.CountOfFont++
}
}
//end add font obj
}
/*---private---*/
//init
func (me *GoPdf) init() {
//default
me.leftMargin = 10.0
me.topMargin = 10.0
//init curr
me.resetCurrXY()
me.Curr.IndexOfPageObj = -1
me.Curr.CountOfFont = 0
me.Curr.CountOfL = 0
me.Curr.CountOfImg = 0 //img
me.Curr.ImgCaches = *new([]ImageCache)
//init index
me.indexOfPagesObj = -1
me.indexOfFirstPageObj = -1
me.indexOfContent = -1
//No underline
//me.IsUnderline = false
}
func (me *GoPdf) resetCurrXY() {
me.Curr.X = me.leftMargin
me.Curr.Y = me.topMargin
}
func (me *GoPdf) prepare() {
if me.indexOfPagesObj != -1 {
indexCurrPage := -1
var pagesObj *PagesObj
pagesObj = me.pdfObjs[me.indexOfPagesObj].(*PagesObj)
i := 0 //me.indexOfFirstPageObj
max := len(me.pdfObjs)
for i < max {
objtype := me.pdfObjs[i].GetType()
//fmt.Printf(" objtype = %s , %d \n", objtype , i)
if objtype == "Page" {
pagesObj.Kids = fmt.Sprintf("%s %d 0 R ", pagesObj.Kids, i+1)
pagesObj.PageCount++
indexCurrPage = i
} else if objtype == "Content" {
if indexCurrPage != -1 {
me.pdfObjs[indexCurrPage].(*PageObj).Contents = fmt.Sprintf("%s %d 0 R ", me.pdfObjs[indexCurrPage].(*PageObj).Contents, i+1)
}
} else if objtype == "Font" {
tmpfont := me.pdfObjs[i].(*FontObj)
j := 0
jmax := len(me.indexEncodingObjFonts)
for j < jmax {
tmpencoding := me.pdfObjs[me.indexEncodingObjFonts[j]].(*EncodingObj).GetFont()
if tmpfont.Family == tmpencoding.GetFamily() { //ใส่ ข้อมูลของ embed font
tmpfont.IsEmbedFont = true
tmpfont.SetIndexObjEncoding(me.indexEncodingObjFonts[j] + 1)
tmpfont.SetIndexObjWidth(me.indexEncodingObjFonts[j] + 2)
tmpfont.SetIndexObjFontDescriptor(me.indexEncodingObjFonts[j] + 3)
break
}
j++
}
}
i++
}
}
}
func (me *GoPdf) xref(linelens []int, buff *bytes.Buffer, i *int) {
buff.WriteString("xref\n")
buff.WriteString("0 " + strconv.Itoa((*i)+1) + "\n")
buff.WriteString("0000000000 65535 f\n")
j := 0
max := len(linelens)
for j < max {
linelen := linelens[j]
buff.WriteString(me.formatXrefline(linelen) + " 00000 n\n")
j++
}
buff.WriteString("trailer\n")
buff.WriteString("<<\n")
buff.WriteString("/Size " + strconv.Itoa(max+1) + "\n")
buff.WriteString("/Root 1 0 R\n")
buff.WriteString(">>\n")
(*i)++
}
//ปรับ xref ให้เป็น 10 หลัก
func (me *GoPdf) formatXrefline(n int) string {
str := strconv.Itoa(n)
for len(str) < 10 {
str = "0" + str
}
return str
}
func (me *GoPdf) addObj(iobj IObj) int {
index := len(me.pdfObjs)
me.pdfObjs = append(me.pdfObjs, iobj)
return index
}
func (me *GoPdf) getContent() *ContentObj {
var content *ContentObj
if me.indexOfContent <= -1 {
content = new(ContentObj)
content.Init(func() *GoPdf {
return me
})
me.indexOfContent = me.addObj(content)
} else {
content = me.pdfObjs[me.indexOfContent].(*ContentObj)
}
return content
}
|
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package status
import (
"context"
"sync"
"time"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/utils/clock"
"istio.io/istio/pilot/pkg/xds"
"istio.io/istio/pkg/config"
"istio.io/pkg/ledger"
)
func NewIstioContext(stop <-chan struct{}) context.Context {
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stop
cancel()
}()
return ctx
}
type inProgressEntry struct {
// the resource, including resourceVersion, we are currently tracking
Resource
// the number of reports we have written with this resource at 100%
completedIterations int
}
type Reporter struct {
mu sync.RWMutex
// map from connection id to latest nonce
status map[string]string
// map from nonce to connection ids for which it is current
// using map[string]struct to approximate a hashset
reverseStatus map[string]map[string]struct{}
dirty bool
inProgressResources map[string]*inProgressEntry
client v1.ConfigMapInterface
cm *corev1.ConfigMap
UpdateInterval time.Duration
PodName string
clock clock.Clock
ledger ledger.Ledger
distributionEventQueue chan distributionEvent
controller *DistributionController
}
var _ xds.DistributionStatusCache = &Reporter{}
const labelKey = "internal.istio.io/distribution-report"
const dataField = "distribution-report"
// Init starts all the read only features of the reporter, used for nonce generation
// and responding to istioctl wait.
func (r *Reporter) Init(ledger ledger.Ledger) {
r.ledger = ledger
if r.clock == nil {
r.clock = clock.RealClock{}
}
// default UpdateInterval
if r.UpdateInterval == 0 {
r.UpdateInterval = 500 * time.Millisecond
}
r.distributionEventQueue = make(chan distributionEvent, 100_000)
r.status = make(map[string]string)
r.reverseStatus = make(map[string]map[string]struct{})
r.inProgressResources = make(map[string]*inProgressEntry)
go r.readFromEventQueue()
}
// Starts the reporter, which watches dataplane ack's and resource changes so that it can update status leader
// with distribution information.
func (r *Reporter) Start(clientSet kubernetes.Interface, namespace string, podname string, stop <-chan struct{}) {
scope.Info("Starting status follower controller")
r.client = clientSet.CoreV1().ConfigMaps(namespace)
r.cm = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: r.PodName + "-distribution",
Labels: map[string]string{labelKey: "true"},
},
Data: make(map[string]string),
}
t := r.clock.Tick(r.UpdateInterval)
ctx := NewIstioContext(stop)
x, err := clientSet.CoreV1().Pods(namespace).Get(ctx, podname, metav1.GetOptions{})
if err != nil {
scope.Errorf("can't identify pod context: %s", err)
} else {
r.cm.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(x, schema.GroupVersionKind{
Version: "v1",
Kind: "Pod",
}),
}
}
go func() {
for {
select {
case <-ctx.Done():
if r.cm != nil {
// TODO: is the use of a cancelled context here a problem? Maybe set a short timeout context?
if err := r.client.Delete(context.Background(), r.cm.Name, metav1.DeleteOptions{}); err != nil {
scope.Errorf("failed to properly clean up distribution report: %v", err)
}
}
close(r.distributionEventQueue)
return
case <-t:
// TODO, check if report is necessary? May already be handled by client
r.writeReport(ctx)
}
}
}()
}
// build a distribution report to send to status leader
func (r *Reporter) buildReport() (DistributionReport, []Resource) {
r.mu.RLock()
defer r.mu.RUnlock()
var finishedResources []Resource
out := DistributionReport{
Reporter: r.PodName,
DataPlaneCount: len(r.status),
InProgressResources: map[string]int{},
}
// for every resource in flight
for _, ipr := range r.inProgressResources {
res := ipr.Resource
key := res.String()
// for every version (nonce) of the config currently in play
for nonce, dataplanes := range r.reverseStatus {
// check to see if this version of the config contains this version of the resource
// it might be more optimal to provide for a full dump of the config at a certain version?
dpVersion, err := r.ledger.GetPreviousValue(nonce, res.ToModelKey())
if err == nil && dpVersion == res.Generation {
if _, ok := out.InProgressResources[key]; !ok {
out.InProgressResources[key] = len(dataplanes)
} else {
out.InProgressResources[key] += len(dataplanes)
}
} else if err != nil {
scope.Errorf("Encountered error retrieving version %s of key %s from Store: %v", nonce, key, err)
continue
} else if nonce == r.ledger.RootHash() {
scope.Warnf("Cache appears to be missing latest version of %s", key)
}
if out.InProgressResources[key] >= out.DataPlaneCount {
// if this resource is done reconciling, let's not worry about it anymore
finishedResources = append(finishedResources, res)
// deleting it here doesn't work because we have a read lock and are inside an iterator.
// TODO: this will leak when a resource never reaches 100% before it is replaced.
// TODO: do deletes propagate through this thing?
}
}
}
return out, finishedResources
}
// For efficiency, we don't want to be checking on resources that have already reached 100% distribution.
// When this happens, we remove them from our watch list.
func (r *Reporter) removeCompletedResource(completedResources []Resource) {
r.mu.Lock()
defer r.mu.Unlock()
var toDelete []Resource
for _, item := range completedResources {
// TODO: handle cache miss
total := r.inProgressResources[item.ToModelKey()].completedIterations + 1
if int64(total) > (time.Minute.Milliseconds() / r.UpdateInterval.Milliseconds()) {
// remove from inProgressResources
// TODO: cleanup completedResources
toDelete = append(toDelete, item)
} else {
r.inProgressResources[item.ToModelKey()].completedIterations = total
}
}
for _, resource := range toDelete {
delete(r.inProgressResources, resource.ToModelKey())
}
}
// This function must be called every time a resource change is detected by pilot. This allows us to lookup
// only the resources we expect to be in flight, not the ones that have already distributed
func (r *Reporter) AddInProgressResource(res config.Config) {
tryLedgerPut(r.ledger, res)
myRes := ResourceFromModelConfig(res)
if myRes == nil {
scope.Errorf("Unable to locate schema for %v, will not update status.", res)
return
}
r.mu.Lock()
defer r.mu.Unlock()
r.inProgressResources[myRes.ToModelKey()] = &inProgressEntry{
Resource: *myRes,
completedIterations: 0,
}
}
func (r *Reporter) DeleteInProgressResource(res config.Config) {
tryLedgerDelete(r.ledger, res)
if r.controller != nil {
r.controller.configDeleted(res)
}
r.mu.Lock()
defer r.mu.Unlock()
delete(r.inProgressResources, res.Key())
}
// generate a distribution report and write it to a ConfigMap for the leader to read.
func (r *Reporter) writeReport(ctx context.Context) {
report, finishedResources := r.buildReport()
go r.removeCompletedResource(finishedResources)
// write to kubernetes here.
reportbytes, err := yaml.Marshal(report)
if err != nil {
scope.Errorf("Error serializing Distribution Report: %v", err)
return
}
r.cm.Data[dataField] = string(reportbytes)
// TODO: short circuit this write in the leader
_, err = CreateOrUpdateConfigMap(ctx, r.cm, r.client)
if err != nil {
scope.Errorf("Error writing Distribution Report: %v", err)
}
}
// this is lifted with few modifications from kubeadm's apiclient
func CreateOrUpdateConfigMap(ctx context.Context, cm *corev1.ConfigMap, client v1.ConfigMapInterface) (res *corev1.ConfigMap, err error) {
if res, err = client.Create(ctx, cm, metav1.CreateOptions{}); err != nil {
if !apierrors.IsAlreadyExists(err) {
scope.Errorf("%v", err)
return nil, errors.Wrap(err, "unable to create ConfigMap")
}
if res, err = client.Update(context.TODO(), cm, metav1.UpdateOptions{}); err != nil {
return nil, errors.Wrap(err, "unable to update ConfigMap")
}
}
return res, nil
}
type distributionEvent struct {
conID string
distributionType xds.EventType
nonce string
}
func (r *Reporter) QueryLastNonce(conID string, distributionType xds.EventType) (noncePrefix string) {
key := conID + distributionType
r.mu.RLock()
defer r.mu.RUnlock()
return r.status[key]
}
// Register that a dataplane has acknowledged a new version of the config.
// Theoretically, we could use the ads connections themselves to harvest this data,
// but the mutex there is pretty hot, and it seems best to trade memory for time.
func (r *Reporter) RegisterEvent(conID string, distributionType xds.EventType, nonce string) {
// Skip unsupported event types. This ensures we do not leak memory for types
// which may not be handled properly. For example, a type not in AllEventTypes
// will not be properly unregistered.
if _, f := xds.AllEventTypes[distributionType]; !f {
return
}
d := distributionEvent{nonce: nonce, distributionType: distributionType, conID: conID}
select {
case r.distributionEventQueue <- d:
return
default:
scope.Errorf("Distribution Event Queue overwhelmed, status will be invalid.")
}
}
func (r *Reporter) readFromEventQueue() {
for ev := range r.distributionEventQueue {
// TODO might need to batch this to prevent lock contention
r.processEvent(ev.conID, ev.distributionType, ev.nonce)
}
}
func (r *Reporter) processEvent(conID string, distributionType xds.EventType, nonce string) {
r.mu.Lock()
defer r.mu.Unlock()
r.dirty = true
key := conID + distributionType // TODO: delimit?
r.deleteKeyFromReverseMap(key)
var version string
if len(nonce) > 12 {
version = nonce[:xds.VersionLen]
} else {
version = nonce
}
// touch
r.status[key] = version
if _, ok := r.reverseStatus[version]; !ok {
r.reverseStatus[version] = make(map[string]struct{})
}
r.reverseStatus[version][key] = struct{}{}
}
// This is a helper function for keeping our reverseStatus map in step with status.
// must have write lock before calling.
func (r *Reporter) deleteKeyFromReverseMap(key string) {
if old, ok := r.status[key]; ok {
if keys, ok := r.reverseStatus[old]; ok {
delete(keys, key)
if len(r.reverseStatus[old]) < 1 {
delete(r.reverseStatus, old)
}
}
}
}
// When a dataplane disconnects, we should no longer count it, nor expect it to ack config.
func (r *Reporter) RegisterDisconnect(conID string, types []xds.EventType) {
r.mu.Lock()
defer r.mu.Unlock()
r.dirty = true
for _, xdsType := range types {
key := conID + xdsType // TODO: delimit?
r.deleteKeyFromReverseMap(key)
delete(r.status, key)
}
}
func (r *Reporter) SetController(controller *DistributionController) {
r.controller = controller
}
remove Reporter's *dirty* field (#29637)
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package status
import (
"context"
"sync"
"time"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/utils/clock"
"istio.io/istio/pilot/pkg/xds"
"istio.io/istio/pkg/config"
"istio.io/pkg/ledger"
)
func NewIstioContext(stop <-chan struct{}) context.Context {
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stop
cancel()
}()
return ctx
}
type inProgressEntry struct {
// the resource, including resourceVersion, we are currently tracking
Resource
// the number of reports we have written with this resource at 100%
completedIterations int
}
type Reporter struct {
mu sync.RWMutex
// map from connection id to latest nonce
status map[string]string
// map from nonce to connection ids for which it is current
// using map[string]struct to approximate a hashset
reverseStatus map[string]map[string]struct{}
inProgressResources map[string]*inProgressEntry
client v1.ConfigMapInterface
cm *corev1.ConfigMap
UpdateInterval time.Duration
PodName string
clock clock.Clock
ledger ledger.Ledger
distributionEventQueue chan distributionEvent
controller *DistributionController
}
var _ xds.DistributionStatusCache = &Reporter{}
const labelKey = "internal.istio.io/distribution-report"
const dataField = "distribution-report"
// Init starts all the read only features of the reporter, used for nonce generation
// and responding to istioctl wait.
func (r *Reporter) Init(ledger ledger.Ledger) {
r.ledger = ledger
if r.clock == nil {
r.clock = clock.RealClock{}
}
// default UpdateInterval
if r.UpdateInterval == 0 {
r.UpdateInterval = 500 * time.Millisecond
}
r.distributionEventQueue = make(chan distributionEvent, 100_000)
r.status = make(map[string]string)
r.reverseStatus = make(map[string]map[string]struct{})
r.inProgressResources = make(map[string]*inProgressEntry)
go r.readFromEventQueue()
}
// Starts the reporter, which watches dataplane ack's and resource changes so that it can update status leader
// with distribution information.
func (r *Reporter) Start(clientSet kubernetes.Interface, namespace string, podname string, stop <-chan struct{}) {
scope.Info("Starting status follower controller")
r.client = clientSet.CoreV1().ConfigMaps(namespace)
r.cm = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: r.PodName + "-distribution",
Labels: map[string]string{labelKey: "true"},
},
Data: make(map[string]string),
}
t := r.clock.Tick(r.UpdateInterval)
ctx := NewIstioContext(stop)
x, err := clientSet.CoreV1().Pods(namespace).Get(ctx, podname, metav1.GetOptions{})
if err != nil {
scope.Errorf("can't identify pod context: %s", err)
} else {
r.cm.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(x, schema.GroupVersionKind{
Version: "v1",
Kind: "Pod",
}),
}
}
go func() {
for {
select {
case <-ctx.Done():
if r.cm != nil {
// TODO: is the use of a cancelled context here a problem? Maybe set a short timeout context?
if err := r.client.Delete(context.Background(), r.cm.Name, metav1.DeleteOptions{}); err != nil {
scope.Errorf("failed to properly clean up distribution report: %v", err)
}
}
close(r.distributionEventQueue)
return
case <-t:
// TODO, check if report is necessary? May already be handled by client
r.writeReport(ctx)
}
}
}()
}
// build a distribution report to send to status leader
func (r *Reporter) buildReport() (DistributionReport, []Resource) {
r.mu.RLock()
defer r.mu.RUnlock()
var finishedResources []Resource
out := DistributionReport{
Reporter: r.PodName,
DataPlaneCount: len(r.status),
InProgressResources: map[string]int{},
}
// for every resource in flight
for _, ipr := range r.inProgressResources {
res := ipr.Resource
key := res.String()
// for every version (nonce) of the config currently in play
for nonce, dataplanes := range r.reverseStatus {
// check to see if this version of the config contains this version of the resource
// it might be more optimal to provide for a full dump of the config at a certain version?
dpVersion, err := r.ledger.GetPreviousValue(nonce, res.ToModelKey())
if err == nil && dpVersion == res.Generation {
if _, ok := out.InProgressResources[key]; !ok {
out.InProgressResources[key] = len(dataplanes)
} else {
out.InProgressResources[key] += len(dataplanes)
}
} else if err != nil {
scope.Errorf("Encountered error retrieving version %s of key %s from Store: %v", nonce, key, err)
continue
} else if nonce == r.ledger.RootHash() {
scope.Warnf("Cache appears to be missing latest version of %s", key)
}
if out.InProgressResources[key] >= out.DataPlaneCount {
// if this resource is done reconciling, let's not worry about it anymore
finishedResources = append(finishedResources, res)
// deleting it here doesn't work because we have a read lock and are inside an iterator.
// TODO: this will leak when a resource never reaches 100% before it is replaced.
// TODO: do deletes propagate through this thing?
}
}
}
return out, finishedResources
}
// For efficiency, we don't want to be checking on resources that have already reached 100% distribution.
// When this happens, we remove them from our watch list.
func (r *Reporter) removeCompletedResource(completedResources []Resource) {
r.mu.Lock()
defer r.mu.Unlock()
var toDelete []Resource
for _, item := range completedResources {
// TODO: handle cache miss
total := r.inProgressResources[item.ToModelKey()].completedIterations + 1
if int64(total) > (time.Minute.Milliseconds() / r.UpdateInterval.Milliseconds()) {
// remove from inProgressResources
// TODO: cleanup completedResources
toDelete = append(toDelete, item)
} else {
r.inProgressResources[item.ToModelKey()].completedIterations = total
}
}
for _, resource := range toDelete {
delete(r.inProgressResources, resource.ToModelKey())
}
}
// This function must be called every time a resource change is detected by pilot. This allows us to lookup
// only the resources we expect to be in flight, not the ones that have already distributed
func (r *Reporter) AddInProgressResource(res config.Config) {
tryLedgerPut(r.ledger, res)
myRes := ResourceFromModelConfig(res)
if myRes == nil {
scope.Errorf("Unable to locate schema for %v, will not update status.", res)
return
}
r.mu.Lock()
defer r.mu.Unlock()
r.inProgressResources[myRes.ToModelKey()] = &inProgressEntry{
Resource: *myRes,
completedIterations: 0,
}
}
func (r *Reporter) DeleteInProgressResource(res config.Config) {
tryLedgerDelete(r.ledger, res)
if r.controller != nil {
r.controller.configDeleted(res)
}
r.mu.Lock()
defer r.mu.Unlock()
delete(r.inProgressResources, res.Key())
}
// generate a distribution report and write it to a ConfigMap for the leader to read.
func (r *Reporter) writeReport(ctx context.Context) {
report, finishedResources := r.buildReport()
go r.removeCompletedResource(finishedResources)
// write to kubernetes here.
reportbytes, err := yaml.Marshal(report)
if err != nil {
scope.Errorf("Error serializing Distribution Report: %v", err)
return
}
r.cm.Data[dataField] = string(reportbytes)
// TODO: short circuit this write in the leader
_, err = CreateOrUpdateConfigMap(ctx, r.cm, r.client)
if err != nil {
scope.Errorf("Error writing Distribution Report: %v", err)
}
}
// this is lifted with few modifications from kubeadm's apiclient
func CreateOrUpdateConfigMap(ctx context.Context, cm *corev1.ConfigMap, client v1.ConfigMapInterface) (res *corev1.ConfigMap, err error) {
if res, err = client.Create(ctx, cm, metav1.CreateOptions{}); err != nil {
if !apierrors.IsAlreadyExists(err) {
scope.Errorf("%v", err)
return nil, errors.Wrap(err, "unable to create ConfigMap")
}
if res, err = client.Update(context.TODO(), cm, metav1.UpdateOptions{}); err != nil {
return nil, errors.Wrap(err, "unable to update ConfigMap")
}
}
return res, nil
}
type distributionEvent struct {
conID string
distributionType xds.EventType
nonce string
}
func (r *Reporter) QueryLastNonce(conID string, distributionType xds.EventType) (noncePrefix string) {
key := conID + distributionType
r.mu.RLock()
defer r.mu.RUnlock()
return r.status[key]
}
// Register that a dataplane has acknowledged a new version of the config.
// Theoretically, we could use the ads connections themselves to harvest this data,
// but the mutex there is pretty hot, and it seems best to trade memory for time.
func (r *Reporter) RegisterEvent(conID string, distributionType xds.EventType, nonce string) {
// Skip unsupported event types. This ensures we do not leak memory for types
// which may not be handled properly. For example, a type not in AllEventTypes
// will not be properly unregistered.
if _, f := xds.AllEventTypes[distributionType]; !f {
return
}
d := distributionEvent{nonce: nonce, distributionType: distributionType, conID: conID}
select {
case r.distributionEventQueue <- d:
return
default:
scope.Errorf("Distribution Event Queue overwhelmed, status will be invalid.")
}
}
func (r *Reporter) readFromEventQueue() {
for ev := range r.distributionEventQueue {
// TODO might need to batch this to prevent lock contention
r.processEvent(ev.conID, ev.distributionType, ev.nonce)
}
}
func (r *Reporter) processEvent(conID string, distributionType xds.EventType, nonce string) {
r.mu.Lock()
defer r.mu.Unlock()
key := conID + distributionType // TODO: delimit?
r.deleteKeyFromReverseMap(key)
var version string
if len(nonce) > 12 {
version = nonce[:xds.VersionLen]
} else {
version = nonce
}
// touch
r.status[key] = version
if _, ok := r.reverseStatus[version]; !ok {
r.reverseStatus[version] = make(map[string]struct{})
}
r.reverseStatus[version][key] = struct{}{}
}
// This is a helper function for keeping our reverseStatus map in step with status.
// must have write lock before calling.
func (r *Reporter) deleteKeyFromReverseMap(key string) {
if old, ok := r.status[key]; ok {
if keys, ok := r.reverseStatus[old]; ok {
delete(keys, key)
if len(r.reverseStatus[old]) < 1 {
delete(r.reverseStatus, old)
}
}
}
}
// When a dataplane disconnects, we should no longer count it, nor expect it to ack config.
func (r *Reporter) RegisterDisconnect(conID string, types []xds.EventType) {
r.mu.Lock()
defer r.mu.Unlock()
for _, xdsType := range types {
key := conID + xdsType // TODO: delimit?
r.deleteKeyFromReverseMap(key)
delete(r.status, key)
}
}
func (r *Reporter) SetController(controller *DistributionController) {
r.controller = controller
}
|
/*
* Copyright (c) 2017 AlexRuzin (stan.ruzin@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package govfs
// TODO
// create() can either create a folder or a file.
// When a folder/file is created, make all subdirectories in the map as well
// https://golang.org/src/encoding/gob/example_test.go
/* TEST5
* Supports:
* [+] UTF=8 file names <- not yet
* [+] 2^128 files
* [+] o(1) seek/write time for metadata
* [+] There can be two files with the same name, but only if one is a directory
*/
import (
"os"
"crypto/md5"
"encoding/hex"
"encoding/gob"
"compress/gzip"
"bytes"
"sync"
"strings"
"github.com/AlexRuzin/cryptog"
"io"
"io/ioutil"
"github.com/AlexRuzin/util"
)
/*
* Configurable constants
*/
const MAX_FILENAME_LENGTH int = 256
const FS_SIGNATURE string = "govfs_header" /* Cannot exceed 64 */
const STREAM_PAD_LEN int = 0 /* Length of the pad between two serialized RawFile structs */
const REMOVE_FS_HEADER bool = false /* Removes the header at the beginning of the serialized file - leave false */
const IRP_BASE int = 2 /* Start the IRP controller ID count from n */
const (
IRP_PURGE int = IRP_BASE + iota /* Flush the entire database and all files */
IRP_DELETE int = IRP_BASE + iota /* Delete a file/folder */
IRP_WRITE int = IRP_BASE + iota/* Write data to a file */
IRP_CREATE int = IRP_BASE + iota/* Create a new file or folder */
)
type FlagVal int
const (
FLAG_FILE FlagVal = 1 << iota
FLAG_DIRECTORY /* The target file is a directory */
FLAG_COMPRESS /* Compression on the fs serialized output */
FLAG_ENCRYPT /* Encryption on the fs serialized output */
FLAG_DB_LOAD /* Loads the database */
FLAG_DB_CREATE /* Creates the database */
FLAG_COMPRESS_FILES /* Compresses files in the FS stream */
)
type FSHeader struct {
filename string
key [16]byte
meta map[string]*govfsFile
t_size uint /* Total size of all files */
io_in chan *govfsIoBlock
create_sync sync.Mutex
flags FlagVal /* Generic flags as passed in by CreateDatabase() */
}
type govfsFile struct {
filename string
flags FlagVal /* FLAG_FILE, FLAG_DIRECTORY */
datasum string
data []byte
lock sync.Mutex
}
type govfsIoBlock struct {
file *govfsFile
name string
data []byte
status error
operation int /* 2 == purge, 3 == delete, 4 == write */
flags FlagVal
io_out chan *govfsIoBlock
}
/*
* Header which indicates the beginning of the raw filesystem file, written
* to the disk.
*/
type rawStreamHeader struct {
Signature string /* Uppercase so that it's "exported" i.e. visibile to the encoder */
FileCount uint
}
/*
* The meta header for each raw file
* (govfsFile is the virtual, in-memory file header)
*/
type RawFile /* Export required for gob serializer */ struct {
RawSum string
Flags FlagVal
Name string
UnzippedLen int
}
/*
* Creates or loads a filesystem database file. If the filename is nil, then create a new database
* otherwise try to load an existing fs database file.
*
* Flags: FLAG_ENCRYPT, FLAG_COMPRESS
*/
func CreateDatabase(name string, flags FlagVal) (*FSHeader, error) {
var header *FSHeader
if (flags & FLAG_DB_LOAD) > 0 {
/* Check if the file exists */
if _, err := os.Stat(name); !os.IsNotExist(err) {
raw, err := readFsStream(name, flags)
if raw == nil || err != nil {
return nil, err
}
header, err = loadHeader(raw, name)
if header == nil || err != nil {
return nil, err
}
}
}
if (flags & FLAG_DB_CREATE) > 0 {
/* Either the raw fs does not exist, or it is invalid -- create new */
header = &FSHeader{
filename: name,
meta: make(map[string]*govfsFile),
}
/* Generate the standard "/" file */
header.meta[s("/")] = new(govfsFile)
header.meta[s("/")].filename = "/"
header.t_size = 0
}
if header == nil {
return nil, util.RetErrStr("Invalid header. Failed to generate database header")
}
header.flags = flags
return header, nil
}
func (f *FSHeader) StartIOController() error {
var header *FSHeader = f
/* i/o channel processor. Performs i/o to the filesystem */
header.io_in = make(chan *govfsIoBlock)
go func (f *FSHeader) {
for {
var ioh = <- header.io_in
switch ioh.operation {
case IRP_PURGE:
/* PURGE */
ioh.status = util.RetErrStr("Purge command issued")
close(header.io_in)
return
case IRP_DELETE:
/* DELETE */
// FIXME/ADDME
ioh.status = util.RetErrStr("IRP_DELETE generic error")
if ioh.file.filename == "/" { /* Cannot delete the root file */
ioh.status = util.RetErrStr("IRP_DELETE: Tried to delete the root file")
ioh.io_out <- ioh
} else {
if i := f.check(ioh.name); i != nil {
delete(f.meta, s(ioh.name))
f.meta[s(ioh.name)] = nil
ioh.status = nil
}
ioh.io_out <- ioh
}
case IRP_WRITE:
/* WRITE */
if i := f.check(ioh.name); i != nil {
ioh.file.lock.Lock()
if f.writeInternal(i, ioh.data) == len(ioh.data) {
ioh.status = nil
ioh.file.lock.Unlock()
ioh.io_out <- ioh
} else {
ioh.status = util.RetErrStr("IRP_WRITE: Failed to write to filesystem")
ioh.file.lock.Unlock()
ioh.io_out <- ioh
}
}
case IRP_CREATE:
f.meta[s(ioh.name)] = new(govfsFile)
ioh.file = f.meta[s(ioh.name)]
ioh.file.filename = ioh.name
if string(ioh.name[len(ioh.name) - 1:]) == "/" {
ioh.file.flags |= FLAG_DIRECTORY
} else {
ioh.file.flags |= FLAG_FILE
}
/* Recursively create all subdirectory files */
sub_strings := strings.Split(ioh.name, "/")
sub_array := make([]string, len(sub_strings) - 2)
copy(sub_array, sub_strings[1:len(sub_strings) - 1]) /* We do not need the first/last file */
var tmp string = ""
for e := range sub_array {
tmp += "/" + sub_array[e]
/* Create a subdirectory header */
func (sub_directory string, f *FSHeader) {
if f := f.check(sub_directory); f != nil {
return /* There can exist two files with the same name,
as long as one is a directory and the other is a file */
}
f.meta[s(tmp)] = new(govfsFile)
f.meta[s(tmp)].filename = sub_directory + "/" /* Explicit directory name */
f.meta[s(tmp)].flags |= FLAG_DIRECTORY
} (tmp, f)
}
ioh.status = nil
ioh.io_out <- ioh
}
}
} (header)
return nil
}
func (f *FSHeader) check(name string) *govfsFile {
if sum := s(name); f.meta[sum] != nil {
return f.meta[sum]
}
return nil
}
func (f *FSHeader) generateIRP(name string, data []byte, irp_type int) *govfsIoBlock {
switch irp_type {
case IRP_DELETE:
/* DELETE */
var file_header = f.check(name)
if file_header == nil {
return nil /* ERROR -- deleting non-existant file */
}
irp := &govfsIoBlock {
file: file_header,
name: name,
io_out: make(chan *govfsIoBlock),
operation: IRP_DELETE,
}
return irp
case IRP_WRITE:
/* WRITE */
var file_header = f.check(name)
if file_header == nil {
return nil
}
irp := &govfsIoBlock{
file: file_header,
name: name,
data: make([]byte, len(data)),
io_out: make(chan *govfsIoBlock),
operation: IRP_WRITE, /* write IRP request */
}
copy(irp.data, data)
return irp
case IRP_CREATE:
/* CREATE IRP */
irp := &govfsIoBlock{
name: name,
operation: IRP_CREATE,
io_out: make(chan *govfsIoBlock),
}
return irp
}
return nil
}
func (f *FSHeader) Create(name string) (*govfsFile, error) {
if file := f.check(name); file != nil {
return nil, util.RetErrStr("create: File already exists")
}
if len(name) > MAX_FILENAME_LENGTH {
return nil, util.RetErrStr("create: File name is too long")
}
f.create_sync.Lock()
var irp *govfsIoBlock = f.generateIRP(name, nil, IRP_CREATE)
f.io_in <- irp
output_irp := <- irp.io_out
f.create_sync.Unlock()
if output_irp.file == nil {
return nil, output_irp.status
}
close(output_irp.io_out)
return output_irp.file, nil
}
/*
* Reader interface
*/
type Reader struct {
Name string
File *govfsFile
Hdr *FSHeader
Offset int
}
func (f *FSHeader) NewReader(name string) (*Reader, error) {
file := f.check(name)
if file == nil {
return nil, util.RetErrStr("File not found")
}
reader := &Reader{
Name: name,
File: file,
Hdr: f,
Offset: 0,
}
return reader, nil
}
func (f *Reader) Len() (int) {
return len(f.File.data)
}
func (f *Reader) Read(r []byte) (int, error) {
if f.Name == "" || f.File == nil || len(f.File.data) < 1 {
return 0, nil
}
data, err := f.Hdr.Read(f.Name)
if err != nil || len(data) == 0 {
return 0, err
}
if len(r) < len(data) {
f.Offset += len(r)
copy(r, data[:len(data) - len(r) - 1])
return len(data) - len(r) - 1, nil
}
/* Sufficient in length, so copy & return EOF */
copy(r, data)
return len(data), io.EOF
}
func (f *FSHeader) Read(name string) ([]byte, error) {
var file_header = f.check(name)
if file_header == nil {
return nil, util.RetErrStr("read: File does not exist")
}
if (file_header.flags & FLAG_DIRECTORY) > 0 {
return nil, util.RetErrStr("read: Cannot read a directory")
}
output := make([]byte, len(file_header.data))
copy(output, file_header.data)
return output, nil
}
func (f *FSHeader) Delete(name string) error {
irp := f.generateIRP(name, nil, IRP_DELETE)
if irp == nil {
return util.RetErrStr("delete: File does not exist") /* ERROR -- File does not exist */
}
f.io_in <- irp
var output_irp = <- irp.io_out
defer close(irp.io_out)
return output_irp.status
}
/*
* Writer interface
*/
type Writer struct {
Name string
File *govfsFile
Hdr *FSHeader
}
func (f *FSHeader) NewWriter(name string) (*Writer, error) {
file := f.check(name)
if file == nil {
return nil, util.RetErrStr("File not found")
}
writer := &Writer {
Name: name,
File: file,
Hdr: f,
}
return writer, nil
}
func (f *Writer) Write(p []byte) (int, error) {
if len(p) < 1 {
return 0, util.RetErrStr("Invalid write stream length")
}
if err := f.Hdr.Write(f.Name, p); err != nil {
return 0, err
}
return len(p), io.EOF
}
func (f *FSHeader) Write(name string, d []byte) error {
if i := f.check(name); i == nil {
return util.RetErrStr("write: Cannot write to nonexistent file")
}
irp := f.generateIRP(name, d, IRP_WRITE)
if irp == nil {
return util.RetErrStr("write: Failed to generate IRP_WRITE") /* FAILURE */
}
/*
* Send the write request IRP and receive the response
* IRP indicating the write status of the request
*/
f.io_in <- irp
var output_irp = <- irp.io_out
defer close(irp.io_out)
return output_irp.status
}
func (f *FSHeader) writeInternal(d *govfsFile, data []byte) int {
if len(data) == 0 {
return len(data)
}
if uint(len(data)) >= uint(len(d.data)) {
f.t_size += uint(len(data)) - uint(len(d.data))
} else {
f.t_size -= uint(len(d.data)) - uint(len(data))
}
d.data = make([]byte, len(data))
copy(d.data, data)
d.datasum = s(string(data))
datalen := len(d.data)
return datalen
}
func (f *FSHeader) UnmountDB(flags FlagVal /* FLAG_COMPRESS_FILES */) error {
type comp_data struct {
file *govfsFile
raw RawFile
}
commit_ch := make(chan bytes.Buffer)
for k := range f.meta {
var channel_header comp_data
channel_header.file = f.meta[k]
channel_header.raw = RawFile{
Flags: f.meta[k].flags,
RawSum: f.meta[k].datasum,
Name: f.meta[k].filename,
UnzippedLen: 0,
}
go func (d *comp_data) {
if d.file.filename == "/" {
return
}
var data_stream []byte
if (d.file.flags & FLAG_FILE) > 0 && len(d.file.data) > 0 {
d.raw.UnzippedLen = len(d.file.data)
if (flags & FLAG_COMPRESS_FILES) > 0 {
d.raw.Flags |= FLAG_COMPRESS_FILES
var zip_buf= bytes.NewBuffer(nil)
gzip_writer := gzip.NewWriter(zip_buf)
gzip_writer.Write(d.file.data)
gzipped := bytes.Buffer{}
gzipped.ReadFrom(zip_buf)
data_stream = make([]byte, gzipped.Len())
copy(data_stream, gzipped.Bytes())
gzip_writer.Close()
} else {
data_stream = make([]byte, d.raw.UnzippedLen)
copy(data_stream, d.file.data)
}
}
var output = bytes.Buffer{}
enc := gob.NewEncoder(&output)
enc.Encode(d.raw)
if len(data_stream) > 0 {
output.Write(data_stream)
}
commit_ch <- output
}(&channel_header)
}
/* Do not count "/" as a file, since it is not sent in channel */
total_files := f.GetFileCount() - 1
/*
* Generate the primary filesystem header and write it to the fs_stream
*/
hdr := rawStreamHeader {
Signature: FS_SIGNATURE, /* This signature may be modified in the configuration -- FIXME */
FileCount: total_files }
/* Serializer for fs_header */
var stream *bytes.Buffer
if REMOVE_FS_HEADER != true {
stream = func(hdr rawStreamHeader) *bytes.Buffer {
b := new(bytes.Buffer)
e := gob.NewEncoder(b)
if err := e.Encode(hdr); err != nil {
return nil /* Failure in encoding the fs_header structure -- Should not happen */
}
return b
}(hdr)
} else {
stream = new(bytes.Buffer)
}
/* serialized RawFile metadata includes the gzip'd file data, if necessary */
for total_files != 0 {
var meta_raw = <- commit_ch
stream.Write(meta_raw.Bytes())
total_files -= 1
}
close(commit_ch)
/* Compress, encrypt, and write stream */
written, err := f.writeFsStream(f.filename, stream, f.flags)
if err != nil || int(written) == 0 {
return util.RetErrStr("Failure in writing raw fs stream")
}
return err
}
func loadHeader(data []byte, filename string) (*FSHeader, error) {
ptr := bytes.NewBuffer(data) /* raw file stream */
if REMOVE_FS_HEADER != true {
header, err := func(p *bytes.Buffer) (*rawStreamHeader, error) {
output := new(rawStreamHeader)
d := gob.NewDecoder(p)
if err := d.Decode(output); err != nil {
return nil, err
}
return output, nil
}(ptr)
if err != nil || header == nil || header.Signature != FS_SIGNATURE {
return nil, err
}
}
output := &FSHeader{
filename: filename,
meta: make(map[string]*govfsFile),
}
output.meta[s("/")] = new(govfsFile)
output.meta[s("/")].filename = "/"
/* Enumerate files */
for {
if ptr.Len() == 0 {
break
}
file_hdr, err := func (p *bytes.Buffer) (*RawFile, error) {
output := &RawFile{}
d := gob.NewDecoder(p)
err := d.Decode(output)
if err != nil && err != io.EOF {
return nil, err
}
for i := STREAM_PAD_LEN; i != 0; i -= 1 {
p.UnreadByte()
}
return output, nil
} (ptr)
if err != nil {
return nil, err
}
output.meta[s(file_hdr.Name)] = &govfsFile{
filename: file_hdr.Name,
flags: file_hdr.Flags,
data: nil,
datasum: "",
}
//output.meta[s(file_hdr.Name)].data = make([]byte, decompressed_len)
if file_hdr.UnzippedLen > 0 {
output.meta[s(file_hdr.Name)].datasum = file_hdr.RawSum
var raw_file_data = make([]byte, file_hdr.UnzippedLen)
ptr.Read(raw_file_data)
if (file_hdr.Flags & FLAG_COMPRESS_FILES) > 0 {
var data_ptr *[]byte = &output.meta[s(file_hdr.Name)].data
*data_ptr = make([]byte, file_hdr.UnzippedLen)
zipped := bytes.NewBuffer(raw_file_data)
gzipd, err := gzip.NewReader(zipped)
if err != nil {
gzipd.Close()
return nil, err
}
gzipd.Close()
decompressed_len, err := gzipd.Read(*data_ptr)
if decompressed_len != file_hdr.UnzippedLen || err != nil {
return nil, err
}
output.t_size += uint(decompressed_len)
} else {
output.meta[s(file_hdr.Name)].data = make([]byte, file_hdr.UnzippedLen)
copy(output.meta[s(file_hdr.Name)].data, raw_file_data)
output.t_size += uint(file_hdr.UnzippedLen)
}
/* Verifiy sums */
if sum := s(string(output.meta[s(file_hdr.Name)].data)); sum != output.meta[s(file_hdr.Name)].datasum {
return nil, util.RetErrStr("Invalid file sum")
}
}
}
return output, nil
}
/*
* Generate the key used to encrypt/decrypt the raw fs table. The key is composed of the
* MD5 sum of the hostname + the FS_SIGNATURE string
*/
func getFsKey() []byte {
host, _ := os.Hostname()
host += FS_SIGNATURE
sum := md5.Sum([]byte(host))
output := make([]byte, len(sum))
copy(output, sum[:])
return output
}
/*
* Decrypts the raw fs stream from a filename, decompresses it, and returns a vector composed of the
* serialized fs table. Since no FSHeader exists yet, this method will not be apart of that
* structure, as per design choice
*/
func readFsStream(name string, flags FlagVal) ([]byte, error) {
if _, err := os.Stat(name); os.IsNotExist(err) {
return nil, err
}
raw_file, err := ioutil.ReadFile(name)
if err != nil {
return nil, err
}
var plaintext []byte
if (flags & FLAG_ENCRYPT) > 0 {
/* The crypto key is composed of the MD5 of the hostname + the FS_SIGNATURE */
key := getFsKey()
plaintext, err = cryptog.RC4_Decrypt(raw_file, &key)
if err != nil {
return nil, err
}
} else {
plaintext = make([]byte, len(raw_file))
copy(plaintext, raw_file)
}
var decompressed []byte
if (flags & FLAG_COMPRESS) > 0 {
var b bytes.Buffer
b.Read(plaintext)
reader, err := gzip.NewReader(&b)
defer reader.Close()
decompressed, err = ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
} else {
decompressed = make([]byte, len(plaintext))
copy(decompressed, plaintext)
}
return decompressed, nil
}
/*
* Takes in the serialized fs table, compresses it, encrypts it and writes it to the disk
*/
func (f *FSHeader) writeFsStream(name string, data *bytes.Buffer, flags FlagVal) (uint, error) {
var compressed = new(bytes.Buffer)
if (flags & FLAG_COMPRESS) > 0 {
w := gzip.NewWriter(compressed)
w.Write(data.Bytes())
w.Close()
} else {
compressed.Write(data.Bytes())
}
var ciphertext []byte
if (flags & FLAG_ENCRYPT) > 0 {
/* The crypto key will be the MD5 of the hostname string + the FS_SIGNATURE string */
key := getFsKey()
/* Perform RC4 encryption */
var err error
ciphertext, err = cryptog.RC4_Encrypt(data.Bytes(), &key)
if err != nil {
return 0, err
}
} else {
ciphertext = make([]byte, compressed.Len())
copy(ciphertext, compressed.Bytes())
}
if _, err := os.Stat(name); os.IsExist(err) {
os.Remove(name)
}
file, err := os.Create(name)
if err != nil {
return 0, err
}
defer file.Close()
written, err := file.Write(ciphertext)
if err != nil {
return uint(written), err
}
return uint(written), nil
}
func (f *FSHeader) GetFileCount() uint {
var total uint = 0
for range f.meta {
total += 1
}
return total
}
func (f *FSHeader) GetFileSize(name string) (uint, error) {
file := f.check(name)
if file == nil {
return 0, util.RetErrStr("GetFileSize: File does not exist")
}
return uint(len(file.data)), nil
}
func (f *FSHeader) GetTotalFilesizes() uint {
return f.t_size
}
func (f *FSHeader) GetFileList() []string {
var output []string
for k := range f.meta {
file := f.meta[k]
if (file.flags & FLAG_DIRECTORY) > 0 {
output = append(output, "(DIR) " + file.filename)
continue
}
output = append(output, "(FILE) " + file.filename)
}
return output
}
/* Returns an md5sum of a string */
func s(name string) string {
name_seeded := name + "gofs_magic"
d := make([]byte, len(name_seeded))
copy(d, name_seeded)
sum := md5.Sum(d)
return hex.EncodeToString(sum[:])
}
/* EOF */
All IRP_n operations use the FlagVal datatype now instead of its primitive
/*
* Copyright (c) 2017 AlexRuzin (stan.ruzin@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package govfs
// TODO
// create() can either create a folder or a file.
// When a folder/file is created, make all subdirectories in the map as well
// https://golang.org/src/encoding/gob/example_test.go
/* TEST5
* Supports:
* [+] UTF=8 file names <- not yet
* [+] 2^128 files
* [+] o(1) seek/write time for metadata
* [+] There can be two files with the same name, but only if one is a directory
*/
import (
"os"
"crypto/md5"
"encoding/hex"
"encoding/gob"
"compress/gzip"
"bytes"
"sync"
"strings"
"github.com/AlexRuzin/cryptog"
"io"
"io/ioutil"
"github.com/AlexRuzin/util"
)
/*
* Configurable constants
*/
const MAX_FILENAME_LENGTH int = 256
const FS_SIGNATURE string = "govfs_header" /* Cannot exceed 64 */
const STREAM_PAD_LEN int = 0 /* Length of the pad between two serialized RawFile structs */
const REMOVE_FS_HEADER bool = false /* Removes the header at the beginning of the serialized file - leave false */
type FlagVal int
const IRP_BASE FlagVal = 2 /* Start the IRP controller ID count from n */
const (
IRP_PURGE FlagVal = IRP_BASE + iota /* Flush the entire database and all files */
IRP_DELETE /* Delete a file/folder */
IRP_WRITE /* Write data to a file */
IRP_CREATE /* Create a new file or folder */
)
const (
FLAG_FILE FlagVal = 1 << iota
FLAG_DIRECTORY /* The target file is a directory */
FLAG_COMPRESS /* Compression on the fs serialized output */
FLAG_ENCRYPT /* Encryption on the fs serialized output */
FLAG_DB_LOAD /* Loads the database */
FLAG_DB_CREATE /* Creates the database */
FLAG_COMPRESS_FILES /* Compresses files in the FS stream */
)
type FSHeader struct {
filename string
key [16]byte
meta map[string]*govfsFile
t_size uint /* Total size of all files */
io_in chan *govfsIoBlock
create_sync sync.Mutex
flags FlagVal /* Generic flags as passed in by CreateDatabase() */
}
type govfsFile struct {
filename string
flags FlagVal /* FLAG_FILE, FLAG_DIRECTORY */
datasum string
data []byte
lock sync.Mutex
}
type govfsIoBlock struct {
file *govfsFile
name string
data []byte
status error
operation FlagVal /* 2 == purge, 3 == delete, 4 == write */
flags FlagVal
io_out chan *govfsIoBlock
}
/*
* Header which indicates the beginning of the raw filesystem file, written
* to the disk.
*/
type rawStreamHeader struct {
Signature string /* Uppercase so that it's "exported" i.e. visibile to the encoder */
FileCount uint
}
/*
* The meta header for each raw file
* (govfsFile is the virtual, in-memory file header)
*/
type RawFile /* Export required for gob serializer */ struct {
RawSum string
Flags FlagVal
Name string
UnzippedLen int
}
/*
* Creates or loads a filesystem database file. If the filename is nil, then create a new database
* otherwise try to load an existing fs database file.
*
* Flags: FLAG_ENCRYPT, FLAG_COMPRESS
*/
func CreateDatabase(name string, flags FlagVal) (*FSHeader, error) {
var header *FSHeader
if (flags & FLAG_DB_LOAD) > 0 {
/* Check if the file exists */
if _, err := os.Stat(name); !os.IsNotExist(err) {
raw, err := readFsStream(name, flags)
if raw == nil || err != nil {
return nil, err
}
header, err = loadHeader(raw, name)
if header == nil || err != nil {
return nil, err
}
}
}
if (flags & FLAG_DB_CREATE) > 0 {
/* Either the raw fs does not exist, or it is invalid -- create new */
header = &FSHeader{
filename: name,
meta: make(map[string]*govfsFile),
}
/* Generate the standard "/" file */
header.meta[s("/")] = new(govfsFile)
header.meta[s("/")].filename = "/"
header.t_size = 0
}
if header == nil {
return nil, util.RetErrStr("Invalid header. Failed to generate database header")
}
header.flags = flags
return header, nil
}
func (f *FSHeader) StartIOController() error {
var header *FSHeader = f
/* i/o channel processor. Performs i/o to the filesystem */
header.io_in = make(chan *govfsIoBlock)
go func (f *FSHeader) {
for {
var ioh = <- header.io_in
switch ioh.operation {
case IRP_PURGE:
/* PURGE */
ioh.status = util.RetErrStr("Purge command issued")
close(header.io_in)
return
case IRP_DELETE:
/* DELETE */
// FIXME/ADDME
ioh.status = util.RetErrStr("IRP_DELETE generic error")
if ioh.file.filename == "/" { /* Cannot delete the root file */
ioh.status = util.RetErrStr("IRP_DELETE: Tried to delete the root file")
ioh.io_out <- ioh
} else {
if i := f.check(ioh.name); i != nil {
delete(f.meta, s(ioh.name))
f.meta[s(ioh.name)] = nil
ioh.status = nil
}
ioh.io_out <- ioh
}
case IRP_WRITE:
/* WRITE */
if i := f.check(ioh.name); i != nil {
ioh.file.lock.Lock()
if f.writeInternal(i, ioh.data) == len(ioh.data) {
ioh.status = nil
ioh.file.lock.Unlock()
ioh.io_out <- ioh
} else {
ioh.status = util.RetErrStr("IRP_WRITE: Failed to write to filesystem")
ioh.file.lock.Unlock()
ioh.io_out <- ioh
}
}
case IRP_CREATE:
f.meta[s(ioh.name)] = new(govfsFile)
ioh.file = f.meta[s(ioh.name)]
ioh.file.filename = ioh.name
if string(ioh.name[len(ioh.name) - 1:]) == "/" {
ioh.file.flags |= FLAG_DIRECTORY
} else {
ioh.file.flags |= FLAG_FILE
}
/* Recursively create all subdirectory files */
sub_strings := strings.Split(ioh.name, "/")
sub_array := make([]string, len(sub_strings) - 2)
copy(sub_array, sub_strings[1:len(sub_strings) - 1]) /* We do not need the first/last file */
var tmp string = ""
for e := range sub_array {
tmp += "/" + sub_array[e]
/* Create a subdirectory header */
func (sub_directory string, f *FSHeader) {
if f := f.check(sub_directory); f != nil {
return /* There can exist two files with the same name,
as long as one is a directory and the other is a file */
}
f.meta[s(tmp)] = new(govfsFile)
f.meta[s(tmp)].filename = sub_directory + "/" /* Explicit directory name */
f.meta[s(tmp)].flags |= FLAG_DIRECTORY
} (tmp, f)
}
ioh.status = nil
ioh.io_out <- ioh
}
}
} (header)
return nil
}
func (f *FSHeader) check(name string) *govfsFile {
if sum := s(name); f.meta[sum] != nil {
return f.meta[sum]
}
return nil
}
func (f *FSHeader) generateIRP(name string, data []byte, irp_type FlagVal) *govfsIoBlock {
switch irp_type {
case IRP_DELETE:
/* DELETE */
var file_header = f.check(name)
if file_header == nil {
return nil /* ERROR -- deleting non-existant file */
}
irp := &govfsIoBlock {
file: file_header,
name: name,
io_out: make(chan *govfsIoBlock),
operation: IRP_DELETE,
}
return irp
case IRP_WRITE:
/* WRITE */
var file_header = f.check(name)
if file_header == nil {
return nil
}
irp := &govfsIoBlock{
file: file_header,
name: name,
data: make([]byte, len(data)),
io_out: make(chan *govfsIoBlock),
operation: IRP_WRITE, /* write IRP request */
}
copy(irp.data, data)
return irp
case IRP_CREATE:
/* CREATE IRP */
irp := &govfsIoBlock{
name: name,
operation: IRP_CREATE,
io_out: make(chan *govfsIoBlock),
}
return irp
}
return nil
}
func (f *FSHeader) Create(name string) (*govfsFile, error) {
if file := f.check(name); file != nil {
return nil, util.RetErrStr("create: File already exists")
}
if len(name) > MAX_FILENAME_LENGTH {
return nil, util.RetErrStr("create: File name is too long")
}
f.create_sync.Lock()
var irp *govfsIoBlock = f.generateIRP(name, nil, IRP_CREATE)
f.io_in <- irp
output_irp := <- irp.io_out
f.create_sync.Unlock()
if output_irp.file == nil {
return nil, output_irp.status
}
close(output_irp.io_out)
return output_irp.file, nil
}
/*
* Reader interface
*/
type Reader struct {
Name string
File *govfsFile
Hdr *FSHeader
Offset int
}
func (f *FSHeader) NewReader(name string) (*Reader, error) {
file := f.check(name)
if file == nil {
return nil, util.RetErrStr("File not found")
}
reader := &Reader{
Name: name,
File: file,
Hdr: f,
Offset: 0,
}
return reader, nil
}
func (f *Reader) Len() (int) {
return len(f.File.data)
}
func (f *Reader) Read(r []byte) (int, error) {
if f.Name == "" || f.File == nil || len(f.File.data) < 1 {
return 0, nil
}
data, err := f.Hdr.Read(f.Name)
if err != nil || len(data) == 0 {
return 0, err
}
if len(r) < len(data) {
f.Offset += len(r)
copy(r, data[:len(data) - len(r) - 1])
return len(data) - len(r) - 1, nil
}
/* Sufficient in length, so copy & return EOF */
copy(r, data)
return len(data), io.EOF
}
func (f *FSHeader) Read(name string) ([]byte, error) {
var file_header = f.check(name)
if file_header == nil {
return nil, util.RetErrStr("read: File does not exist")
}
if (file_header.flags & FLAG_DIRECTORY) > 0 {
return nil, util.RetErrStr("read: Cannot read a directory")
}
output := make([]byte, len(file_header.data))
copy(output, file_header.data)
return output, nil
}
func (f *FSHeader) Delete(name string) error {
irp := f.generateIRP(name, nil, IRP_DELETE)
if irp == nil {
return util.RetErrStr("delete: File does not exist") /* ERROR -- File does not exist */
}
f.io_in <- irp
var output_irp = <- irp.io_out
defer close(irp.io_out)
return output_irp.status
}
/*
* Writer interface
*/
type Writer struct {
Name string
File *govfsFile
Hdr *FSHeader
}
func (f *FSHeader) NewWriter(name string) (*Writer, error) {
file := f.check(name)
if file == nil {
return nil, util.RetErrStr("File not found")
}
writer := &Writer {
Name: name,
File: file,
Hdr: f,
}
return writer, nil
}
func (f *Writer) Write(p []byte) (int, error) {
if len(p) < 1 {
return 0, util.RetErrStr("Invalid write stream length")
}
if err := f.Hdr.Write(f.Name, p); err != nil {
return 0, err
}
return len(p), io.EOF
}
func (f *FSHeader) Write(name string, d []byte) error {
if i := f.check(name); i == nil {
return util.RetErrStr("write: Cannot write to nonexistent file")
}
irp := f.generateIRP(name, d, IRP_WRITE)
if irp == nil {
return util.RetErrStr("write: Failed to generate IRP_WRITE") /* FAILURE */
}
/*
* Send the write request IRP and receive the response
* IRP indicating the write status of the request
*/
f.io_in <- irp
var output_irp = <- irp.io_out
defer close(irp.io_out)
return output_irp.status
}
func (f *FSHeader) writeInternal(d *govfsFile, data []byte) int {
if len(data) == 0 {
return len(data)
}
if uint(len(data)) >= uint(len(d.data)) {
f.t_size += uint(len(data)) - uint(len(d.data))
} else {
f.t_size -= uint(len(d.data)) - uint(len(data))
}
d.data = make([]byte, len(data))
copy(d.data, data)
d.datasum = s(string(data))
datalen := len(d.data)
return datalen
}
func (f *FSHeader) UnmountDB(flags FlagVal /* FLAG_COMPRESS_FILES */) error {
type comp_data struct {
file *govfsFile
raw RawFile
}
commit_ch := make(chan bytes.Buffer)
for k := range f.meta {
var channel_header comp_data
channel_header.file = f.meta[k]
channel_header.raw = RawFile{
Flags: f.meta[k].flags,
RawSum: f.meta[k].datasum,
Name: f.meta[k].filename,
UnzippedLen: 0,
}
go func (d *comp_data) {
if d.file.filename == "/" {
return
}
var data_stream []byte
if (d.file.flags & FLAG_FILE) > 0 && len(d.file.data) > 0 {
d.raw.UnzippedLen = len(d.file.data)
if (flags & FLAG_COMPRESS_FILES) > 0 {
d.raw.Flags |= FLAG_COMPRESS_FILES
var zip_buf= bytes.NewBuffer(nil)
gzip_writer := gzip.NewWriter(zip_buf)
gzip_writer.Write(d.file.data)
gzipped := bytes.Buffer{}
gzipped.ReadFrom(zip_buf)
data_stream = make([]byte, gzipped.Len())
copy(data_stream, gzipped.Bytes())
gzip_writer.Close()
} else {
data_stream = make([]byte, d.raw.UnzippedLen)
copy(data_stream, d.file.data)
}
}
var output = bytes.Buffer{}
enc := gob.NewEncoder(&output)
enc.Encode(d.raw)
if len(data_stream) > 0 {
output.Write(data_stream)
}
commit_ch <- output
}(&channel_header)
}
/* Do not count "/" as a file, since it is not sent in channel */
total_files := f.GetFileCount() - 1
/*
* Generate the primary filesystem header and write it to the fs_stream
*/
hdr := rawStreamHeader {
Signature: FS_SIGNATURE, /* This signature may be modified in the configuration -- FIXME */
FileCount: total_files }
/* Serializer for fs_header */
var stream *bytes.Buffer
if REMOVE_FS_HEADER != true {
stream = func(hdr rawStreamHeader) *bytes.Buffer {
b := new(bytes.Buffer)
e := gob.NewEncoder(b)
if err := e.Encode(hdr); err != nil {
return nil /* Failure in encoding the fs_header structure -- Should not happen */
}
return b
}(hdr)
} else {
stream = new(bytes.Buffer)
}
/* serialized RawFile metadata includes the gzip'd file data, if necessary */
for total_files != 0 {
var meta_raw = <- commit_ch
stream.Write(meta_raw.Bytes())
total_files -= 1
}
close(commit_ch)
/* Compress, encrypt, and write stream */
written, err := f.writeFsStream(f.filename, stream, f.flags)
if err != nil || int(written) == 0 {
return util.RetErrStr("Failure in writing raw fs stream")
}
return err
}
func loadHeader(data []byte, filename string) (*FSHeader, error) {
ptr := bytes.NewBuffer(data) /* raw file stream */
if REMOVE_FS_HEADER != true {
header, err := func(p *bytes.Buffer) (*rawStreamHeader, error) {
output := new(rawStreamHeader)
d := gob.NewDecoder(p)
if err := d.Decode(output); err != nil {
return nil, err
}
return output, nil
}(ptr)
if err != nil || header == nil || header.Signature != FS_SIGNATURE {
return nil, err
}
}
output := &FSHeader{
filename: filename,
meta: make(map[string]*govfsFile),
}
output.meta[s("/")] = new(govfsFile)
output.meta[s("/")].filename = "/"
/* Enumerate files */
for {
if ptr.Len() == 0 {
break
}
file_hdr, err := func (p *bytes.Buffer) (*RawFile, error) {
output := &RawFile{}
d := gob.NewDecoder(p)
err := d.Decode(output)
if err != nil && err != io.EOF {
return nil, err
}
for i := STREAM_PAD_LEN; i != 0; i -= 1 {
p.UnreadByte()
}
return output, nil
} (ptr)
if err != nil {
return nil, err
}
output.meta[s(file_hdr.Name)] = &govfsFile{
filename: file_hdr.Name,
flags: file_hdr.Flags,
data: nil,
datasum: "",
}
//output.meta[s(file_hdr.Name)].data = make([]byte, decompressed_len)
if file_hdr.UnzippedLen > 0 {
output.meta[s(file_hdr.Name)].datasum = file_hdr.RawSum
var raw_file_data = make([]byte, file_hdr.UnzippedLen)
ptr.Read(raw_file_data)
if (file_hdr.Flags & FLAG_COMPRESS_FILES) > 0 {
var data_ptr *[]byte = &output.meta[s(file_hdr.Name)].data
*data_ptr = make([]byte, file_hdr.UnzippedLen)
zipped := bytes.NewBuffer(raw_file_data)
gzipd, err := gzip.NewReader(zipped)
if err != nil {
gzipd.Close()
return nil, err
}
gzipd.Close()
decompressed_len, err := gzipd.Read(*data_ptr)
if decompressed_len != file_hdr.UnzippedLen || err != nil {
return nil, err
}
output.t_size += uint(decompressed_len)
} else {
output.meta[s(file_hdr.Name)].data = make([]byte, file_hdr.UnzippedLen)
copy(output.meta[s(file_hdr.Name)].data, raw_file_data)
output.t_size += uint(file_hdr.UnzippedLen)
}
/* Verifiy sums */
if sum := s(string(output.meta[s(file_hdr.Name)].data)); sum != output.meta[s(file_hdr.Name)].datasum {
return nil, util.RetErrStr("Invalid file sum")
}
}
}
return output, nil
}
/*
* Generate the key used to encrypt/decrypt the raw fs table. The key is composed of the
* MD5 sum of the hostname + the FS_SIGNATURE string
*/
func getFsKey() []byte {
host, _ := os.Hostname()
host += FS_SIGNATURE
sum := md5.Sum([]byte(host))
output := make([]byte, len(sum))
copy(output, sum[:])
return output
}
/*
* Decrypts the raw fs stream from a filename, decompresses it, and returns a vector composed of the
* serialized fs table. Since no FSHeader exists yet, this method will not be apart of that
* structure, as per design choice
*/
func readFsStream(name string, flags FlagVal) ([]byte, error) {
if _, err := os.Stat(name); os.IsNotExist(err) {
return nil, err
}
raw_file, err := ioutil.ReadFile(name)
if err != nil {
return nil, err
}
var plaintext []byte
if (flags & FLAG_ENCRYPT) > 0 {
/* The crypto key is composed of the MD5 of the hostname + the FS_SIGNATURE */
key := getFsKey()
plaintext, err = cryptog.RC4_Decrypt(raw_file, &key)
if err != nil {
return nil, err
}
} else {
plaintext = make([]byte, len(raw_file))
copy(plaintext, raw_file)
}
var decompressed []byte
if (flags & FLAG_COMPRESS) > 0 {
var b bytes.Buffer
b.Read(plaintext)
reader, err := gzip.NewReader(&b)
defer reader.Close()
decompressed, err = ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
} else {
decompressed = make([]byte, len(plaintext))
copy(decompressed, plaintext)
}
return decompressed, nil
}
/*
* Takes in the serialized fs table, compresses it, encrypts it and writes it to the disk
*/
func (f *FSHeader) writeFsStream(name string, data *bytes.Buffer, flags FlagVal) (uint, error) {
var compressed = new(bytes.Buffer)
if (flags & FLAG_COMPRESS) > 0 {
w := gzip.NewWriter(compressed)
w.Write(data.Bytes())
w.Close()
} else {
compressed.Write(data.Bytes())
}
var ciphertext []byte
if (flags & FLAG_ENCRYPT) > 0 {
/* The crypto key will be the MD5 of the hostname string + the FS_SIGNATURE string */
key := getFsKey()
/* Perform RC4 encryption */
var err error
ciphertext, err = cryptog.RC4_Encrypt(data.Bytes(), &key)
if err != nil {
return 0, err
}
} else {
ciphertext = make([]byte, compressed.Len())
copy(ciphertext, compressed.Bytes())
}
if _, err := os.Stat(name); os.IsExist(err) {
os.Remove(name)
}
file, err := os.Create(name)
if err != nil {
return 0, err
}
defer file.Close()
written, err := file.Write(ciphertext)
if err != nil {
return uint(written), err
}
return uint(written), nil
}
func (f *FSHeader) GetFileCount() uint {
var total uint = 0
for range f.meta {
total += 1
}
return total
}
func (f *FSHeader) GetFileSize(name string) (uint, error) {
file := f.check(name)
if file == nil {
return 0, util.RetErrStr("GetFileSize: File does not exist")
}
return uint(len(file.data)), nil
}
func (f *FSHeader) GetTotalFilesizes() uint {
return f.t_size
}
func (f *FSHeader) GetFileList() []string {
var output []string
for k := range f.meta {
file := f.meta[k]
if (file.flags & FLAG_DIRECTORY) > 0 {
output = append(output, "(DIR) " + file.filename)
continue
}
output = append(output, "(FILE) " + file.filename)
}
return output
}
/* Returns an md5sum of a string */
func s(name string) string {
name_seeded := name + "gofs_magic"
d := make([]byte, len(name_seeded))
copy(d, name_seeded)
sum := md5.Sum(d)
return hex.EncodeToString(sum[:])
}
/* EOF */
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine
package build
// TODO(adg): test authentication
import (
"appengine"
"appengine/datastore"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"time"
)
func init() {
http.HandleFunc("/buildtest", testHandler)
}
var testEntityKinds = []string{
"Package",
"Commit",
"Result",
"Log",
}
const testPkg = "code.google.com/p/go.test"
var testPackage = &Package{Name: "Test", Kind: "subrepo", Path: testPkg}
var testPackages = []*Package{
{Name: "Go", Path: ""},
testPackage,
}
var tCommitTime = time.Now().Add(-time.Hour * 24 * 7)
func tCommit(hash, parentHash, path string) *Commit {
tCommitTime.Add(time.Hour) // each commit should have a different time
return &Commit{
PackagePath: path,
Hash: hash,
ParentHash: parentHash,
Time: tCommitTime,
User: "adg",
Desc: "change description " + hash,
}
}
var testRequests = []struct {
path string
vals url.Values
req interface{}
res interface{}
}{
// Packages
{"/packages?kind=subrepo", nil, nil, []*Package{testPackage}},
// Go repo
{"/commit", nil, tCommit("0001", "0000", ""), nil},
{"/commit", nil, tCommit("0002", "0001", ""), nil},
{"/commit", nil, tCommit("0003", "0002", ""), nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
{"/result", nil, &Result{Builder: "linux-386", Hash: "0001", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
{"/result", nil, &Result{Builder: "linux-386", Hash: "0002", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
// multiple builders
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
{"/result", nil, &Result{Builder: "linux-amd64", Hash: "0003", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0002"}}},
// branches
{"/commit", nil, tCommit("0004", "0003", ""), nil},
{"/commit", nil, tCommit("0005", "0002", ""), nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
{"/result", nil, &Result{Builder: "linux-386", Hash: "0005", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0004"}}},
{"/result", nil, &Result{Builder: "linux-386", Hash: "0004", OK: false}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
// logs
{"/result", nil, &Result{Builder: "linux-386", Hash: "0003", OK: false, Log: "test"}, nil},
{"/log/a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", nil, nil, "test"},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, nil},
// repeat failure (shouldn't re-send mail)
{"/result", nil, &Result{Builder: "linux-386", Hash: "0003", OK: false, Log: "test"}, nil},
// non-Go repos
{"/commit", nil, tCommit("1001", "1000", testPkg), nil},
{"/commit", nil, tCommit("1002", "1001", testPkg), nil},
{"/commit", nil, tCommit("1003", "1002", testPkg), nil},
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1003"}}},
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1003", GoHash: "0001", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1002"}}},
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1002", GoHash: "0001", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1001"}}},
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0001", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, nil},
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0002"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1003"}}},
// re-build Go revision for stale subrepos
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0005", OK: false, Log: "boo"}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, nil},
}
func testHandler(w http.ResponseWriter, r *http.Request) {
if !appengine.IsDevAppServer() {
fmt.Fprint(w, "These tests must be run under the dev_appserver.")
return
}
c := appengine.NewContext(r)
if err := nukeEntities(c, testEntityKinds); err != nil {
logErr(w, r, err)
return
}
if r.FormValue("nukeonly") != "" {
fmt.Fprint(w, "OK")
return
}
for _, p := range testPackages {
if _, err := datastore.Put(c, p.Key(c), p); err != nil {
logErr(w, r, err)
return
}
}
for i, t := range testRequests {
c.Infof("running test %d %s", i, t.path)
errorf := func(format string, args ...interface{}) {
fmt.Fprintf(w, "%d %s: ", i, t.path)
fmt.Fprintf(w, format, args...)
fmt.Fprintln(w)
}
var body io.ReadWriter
if t.req != nil {
body = new(bytes.Buffer)
json.NewEncoder(body).Encode(t.req)
}
url := "http://" + domain + t.path
if t.vals != nil {
url += "?" + t.vals.Encode()
}
req, err := http.NewRequest("POST", url, body)
if err != nil {
logErr(w, r, err)
return
}
if t.req != nil {
req.Method = "POST"
}
req.Header = r.Header
rec := httptest.NewRecorder()
// Make the request
http.DefaultServeMux.ServeHTTP(rec, req)
if rec.Code != 0 && rec.Code != 200 {
errorf(rec.Body.String())
return
}
resp := new(dashResponse)
// If we're expecting a *Todo value,
// prime the Response field with a Todo and a Commit inside it.
if _, ok := t.res.(*Todo); ok {
resp.Response = &Todo{Data: &Commit{}}
}
if strings.HasPrefix(t.path, "/log/") {
resp.Response = rec.Body.String()
} else {
err := json.NewDecoder(rec.Body).Decode(resp)
if err != nil {
errorf("decoding response: %v", err)
return
}
}
if e, ok := t.res.(string); ok {
g, ok := resp.Response.(string)
if !ok {
errorf("Response not string: %T", resp.Response)
return
}
if g != e {
errorf("response mismatch: got %q want %q", g, e)
return
}
}
if e, ok := t.res.(*Todo); ok {
g, ok := resp.Response.(*Todo)
if !ok {
errorf("Response not *Todo: %T", resp.Response)
return
}
if e.Data == nil && g.Data != nil {
errorf("Response.Data should be nil, got: %v", g.Data)
return
}
if g.Data == nil {
errorf("Response.Data is nil, want: %v", e.Data)
return
}
gd, ok := g.Data.(*Commit)
if !ok {
errorf("Response.Data not *Commit: %T", g.Data)
return
}
if eh := e.Data.(*Commit).Hash; eh != gd.Hash {
errorf("hashes don't match: got %q, want %q", gd.Hash, eh)
return
}
}
if t.res == nil && resp.Response != nil {
errorf("response mismatch: got %q expected <nil>",
resp.Response)
return
}
}
fmt.Fprint(w, "PASS\nYou should see only one mail notification (for 0003/linux-386) in the dev_appserver logs.")
}
func nukeEntities(c appengine.Context, kinds []string) error {
if !appengine.IsDevAppServer() {
return errors.New("can't nuke production data")
}
var keys []*datastore.Key
for _, kind := range kinds {
q := datastore.NewQuery(kind).KeysOnly()
for t := q.Run(c); ; {
k, err := t.Next(nil)
if err == datastore.Done {
break
}
if err != nil {
return err
}
keys = append(keys, k)
}
}
return datastore.DeleteMulti(c, keys)
}
go.tools/dashboard/app: fix tests and add TODO to reall fix them
This change is a really nasty hack to preserve the magic header
across requests. The nasty hack will go away once we refactor these
tests to use the new "appengine/aetest" package instead.
R=golang-dev, dvyukov
CC=golang-dev
https://codereview.appspot.com/39230043
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine
package build
// TODO(adg): test authentication
// TODO(adg): refactor to use appengine/aetest instead
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"time"
"appengine"
"appengine/datastore"
)
func init() {
http.HandleFunc("/buildtest", testHandler)
}
var testEntityKinds = []string{
"Package",
"Commit",
"Result",
"Log",
}
const testPkg = "code.google.com/p/go.test"
var testPackage = &Package{Name: "Test", Kind: "subrepo", Path: testPkg}
var testPackages = []*Package{
{Name: "Go", Path: ""},
testPackage,
}
var tCommitTime = time.Now().Add(-time.Hour * 24 * 7)
func tCommit(hash, parentHash, path string) *Commit {
tCommitTime.Add(time.Hour) // each commit should have a different time
return &Commit{
PackagePath: path,
Hash: hash,
ParentHash: parentHash,
Time: tCommitTime,
User: "adg",
Desc: "change description " + hash,
}
}
var testRequests = []struct {
path string
vals url.Values
req interface{}
res interface{}
}{
// Packages
{"/packages?kind=subrepo", nil, nil, []*Package{testPackage}},
// Go repo
{"/commit", nil, tCommit("0001", "0000", ""), nil},
{"/commit", nil, tCommit("0002", "0001", ""), nil},
{"/commit", nil, tCommit("0003", "0002", ""), nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
{"/result", nil, &Result{Builder: "linux-386", Hash: "0001", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
{"/result", nil, &Result{Builder: "linux-386", Hash: "0002", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
// multiple builders
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
{"/result", nil, &Result{Builder: "linux-amd64", Hash: "0003", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0002"}}},
// branches
{"/commit", nil, tCommit("0004", "0003", ""), nil},
{"/commit", nil, tCommit("0005", "0002", ""), nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
{"/result", nil, &Result{Builder: "linux-386", Hash: "0005", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0004"}}},
{"/result", nil, &Result{Builder: "linux-386", Hash: "0004", OK: false}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
// logs
{"/result", nil, &Result{Builder: "linux-386", Hash: "0003", OK: false, Log: "test"}, nil},
{"/log/a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", nil, nil, "test"},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, nil},
// repeat failure (shouldn't re-send mail)
{"/result", nil, &Result{Builder: "linux-386", Hash: "0003", OK: false, Log: "test"}, nil},
// non-Go repos
{"/commit", nil, tCommit("1001", "1000", testPkg), nil},
{"/commit", nil, tCommit("1002", "1001", testPkg), nil},
{"/commit", nil, tCommit("1003", "1002", testPkg), nil},
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1003"}}},
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1003", GoHash: "0001", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1002"}}},
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1002", GoHash: "0001", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1001"}}},
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0001", OK: true}, nil},
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, nil},
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0002"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1003"}}},
// re-build Go revision for stale subrepos
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0005", OK: false, Log: "boo"}, nil},
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, nil},
}
func testHandler(w http.ResponseWriter, r *http.Request) {
if !appengine.IsDevAppServer() {
fmt.Fprint(w, "These tests must be run under the dev_appserver.")
return
}
c := appengine.NewContext(r)
if err := nukeEntities(c, testEntityKinds); err != nil {
logErr(w, r, err)
return
}
if r.FormValue("nukeonly") != "" {
fmt.Fprint(w, "OK")
return
}
for _, p := range testPackages {
if _, err := datastore.Put(c, p.Key(c), p); err != nil {
logErr(w, r, err)
return
}
}
origReq := *r
defer func() {
// HACK: We need to clobber the original request (see below)
// so make sure we fix it before exiting the handler.
*r = origReq
}()
for i, t := range testRequests {
c.Infof("running test %d %s", i, t.path)
errorf := func(format string, args ...interface{}) {
fmt.Fprintf(w, "%d %s: ", i, t.path)
fmt.Fprintf(w, format, args...)
fmt.Fprintln(w)
}
var body io.ReadWriter
if t.req != nil {
body = new(bytes.Buffer)
json.NewEncoder(body).Encode(t.req)
}
url := "http://" + domain + t.path
if t.vals != nil {
url += "?" + t.vals.Encode()
}
req, err := http.NewRequest("POST", url, body)
if err != nil {
logErr(w, r, err)
return
}
if t.req != nil {
req.Method = "POST"
}
req.Header = origReq.Header
rec := httptest.NewRecorder()
// Make the request
*r = *req // HACK: App Engine uses the request pointer
// as a map key to resolve Contexts.
http.DefaultServeMux.ServeHTTP(rec, r)
if rec.Code != 0 && rec.Code != 200 {
errorf(rec.Body.String())
return
}
resp := new(dashResponse)
// If we're expecting a *Todo value,
// prime the Response field with a Todo and a Commit inside it.
if _, ok := t.res.(*Todo); ok {
resp.Response = &Todo{Data: &Commit{}}
}
if strings.HasPrefix(t.path, "/log/") {
resp.Response = rec.Body.String()
} else {
err := json.NewDecoder(rec.Body).Decode(resp)
if err != nil {
errorf("decoding response: %v", err)
return
}
}
if e, ok := t.res.(string); ok {
g, ok := resp.Response.(string)
if !ok {
errorf("Response not string: %T", resp.Response)
return
}
if g != e {
errorf("response mismatch: got %q want %q", g, e)
return
}
}
if e, ok := t.res.(*Todo); ok {
g, ok := resp.Response.(*Todo)
if !ok {
errorf("Response not *Todo: %T", resp.Response)
return
}
if e.Data == nil && g.Data != nil {
errorf("Response.Data should be nil, got: %v", g.Data)
return
}
if g.Data == nil {
errorf("Response.Data is nil, want: %v", e.Data)
return
}
gd, ok := g.Data.(*Commit)
if !ok {
errorf("Response.Data not *Commit: %T", g.Data)
return
}
if eh := e.Data.(*Commit).Hash; eh != gd.Hash {
errorf("hashes don't match: got %q, want %q", gd.Hash, eh)
return
}
}
if t.res == nil && resp.Response != nil {
errorf("response mismatch: got %q expected <nil>",
resp.Response)
return
}
}
fmt.Fprint(w, "PASS\nYou should see only one mail notification (for 0003/linux-386) in the dev_appserver logs.")
}
func nukeEntities(c appengine.Context, kinds []string) error {
if !appengine.IsDevAppServer() {
return errors.New("can't nuke production data")
}
var keys []*datastore.Key
for _, kind := range kinds {
q := datastore.NewQuery(kind).KeysOnly()
for t := q.Run(c); ; {
k, err := t.Next(nil)
if err == datastore.Done {
break
}
if err != nil {
return err
}
keys = append(keys, k)
}
}
return datastore.DeleteMulti(c, keys)
}
|
// SPDX-License-Identifier: MIT
// Package result 提供对自定义错误代码的支持
package result
import "github.com/issue9/validation"
type (
// Fields 表示字段的错误信息列表
//
// 类型为 map[string][]string
Fields = validation.Messages
// BuildFunc 用于生成 Result 接口对象的函数
BuildFunc func(status, code int, message string) Result
// Result 自定义错误代码的实现接口
//
// 用户可以根据自己的需求,在出错时,展示自定义的错误码以及相关的错误信息格式。
// 该对象依然有 content 中的相关功能进行解码输出。
// 只要该对象同时实现了 Result 接口即可。
//
// 比如类似以下的错误内容:
// {
// 'message': 'error message',
// 'code': 4000001,
// 'detail':[
// {'field': 'username': 'message': '已经存在相同用户名'},
// {'field': 'username': 'message': '已经存在相同用户名'},
// ]
// }
Result interface {
// 添加详细的错误信息
//
// 相同的 key 应该能关联多个 val 值。
Add(key string, val ...string)
// 设置详细的错误信息
//
// 如果已经相同的 key,会被覆盖。
Set(key string, val ...string)
// 是否存在详细的错误信息
//
// 如果有通过 Add 添加内容,那么应该返回 true
HasFields() bool
// HTTP 状态码
//
// 最终会经此值作为 HTTP 状态会返回给用户
Status() int
}
)
doc(result): 修正文字错误
// SPDX-License-Identifier: MIT
// Package result 提供对自定义错误代码的支持
package result
import "github.com/issue9/validation"
type (
// Fields 表示字段的错误信息列表
//
// 类型为 map[string][]string
Fields = validation.Messages
// BuildFunc 用于生成 Result 接口对象的函数
BuildFunc func(status, code int, message string) Result
// Result 自定义错误代码的实现接口
//
// 一般是对客户端提交数据 400 的具体反馈信息。
// 用户可以根据自己的需求,展示自定义的错误码以及相关的错误信息格式。
// 该对象依赖于 content 中的相关功能进行解码输出。
// 只要该对象同时实现了 Result 接口即可。
//
// 比如类似以下的错误内容:
// {
// 'message': 'error message',
// 'code': 4000001,
// 'detail':[
// {'field': 'username': 'message': '已经存在相同用户名'},
// {'field': 'username': 'message': '已经存在相同用户名'},
// ]
// }
Result interface {
// 添加详细的错误信息
//
// 相同的 key 应该能关联多个 val 值。
Add(key string, val ...string)
// 设置详细的错误信息
//
// 如果已经相同的 key,会被覆盖。
Set(key string, val ...string)
// 是否存在详细的错误信息
//
// 如果有通过 Add 添加内容,那么应该返回 true
HasFields() bool
// HTTP 状态码
//
// 最终会经此值作为 HTTP 状态会返回给用户
Status() int
}
)
|
package main
import (
"encoding/json"
"html/template"
"net/http"
"github.com/gorilla/mux"
)
type getTorrentData struct {
pageData
Torrent torrent
}
var torrentTemplate = template.Must(template.New("template").Funcs(templateFunctions).ParseFiles("templates/layout.html", "templates/page_torrent.html"))
func (a *app) getTorrent(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
res, err := a.es.Get().Index(a.config.esIndex).Id(vars["id"]).Do()
if err != nil {
http.Error(w, err.Error(), 500)
return
}
if !res.Found {
http.Error(w, "not found", 404)
return
}
var t torrent
if err := json.Unmarshal(*res.Source, &t); err != nil {
http.Error(w, err.Error(), 500)
return
}
pageData := getTorrentData{
pageData: pageData{
Title: t.Name,
},
Torrent: t,
}
if err := torrentTemplate.ExecuteTemplate(w, "layout", pageData); err != nil {
http.Error(w, err.Error(), 500)
}
}
use correct type for es
package main
import (
"encoding/json"
"html/template"
"net/http"
"github.com/gorilla/mux"
)
type getTorrentData struct {
pageData
Torrent torrent
}
var torrentTemplate = template.Must(template.New("template").Funcs(templateFunctions).ParseFiles("templates/layout.html", "templates/page_torrent.html"))
func (a *app) getTorrent(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
res, err := a.es.Get().Index(a.config.esIndex).Type("torrent").Id(vars["id"]).Do()
if err != nil {
http.Error(w, err.Error(), 500)
return
}
if !res.Found {
http.Error(w, "not found", 404)
return
}
var t torrent
if err := json.Unmarshal(*res.Source, &t); err != nil {
http.Error(w, err.Error(), 500)
return
}
pageData := getTorrentData{
pageData: pageData{
Title: t.Name,
},
Torrent: t,
}
if err := torrentTemplate.ExecuteTemplate(w, "layout", pageData); err != nil {
http.Error(w, err.Error(), 500)
}
}
|
package facebook
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"github.com/gorilla/feeds"
scraper "github.com/mono0x/my-scraper/lib"
"github.com/pkg/errors"
)
const (
serviceURL = "https://www.facebook.com/"
baseURL = "https://graph.facebook.com"
)
type posts struct {
// https://developers.facebook.com/docs/graph-api/reference/v2.6/post
Data []struct {
Id string `json:"id"`
CreatedTime string `json:"created_time"`
From struct {
Id string `json:"id"`
Name string `json:"name"`
} `json:"from"`
Link string `json:"link"`
Message string `json:"message"`
Picture string `json:"picture"`
} `json:"data"`
}
type source struct {
httpClient *http.Client
accessToken string
userID string
baseURL string // for testing
}
var _ scraper.Source = (*source)(nil)
func NewSource(c *http.Client, accessToken string, userID string) *source {
return &source{
httpClient: c,
accessToken: accessToken,
userID: userID,
baseURL: baseURL,
}
}
var (
photosURLRe = regexp.MustCompile(`^` + regexp.QuoteMeta(serviceURL) + `[^/]+/photos/`)
messageReplacer = strings.NewReplacer("\n", "<br />")
)
func (s *source) Scrape() (*feeds.Feed, error) {
posts, err := s.fetch()
if err != nil {
return nil, err
}
return s.render(posts)
}
func (s *source) fetch() (*posts, error) {
values := &url.Values{}
values.Set("access_token", s.accessToken)
values.Set("fields", "created_time,from,link,message,picture")
resp, err := s.httpClient.Get(s.baseURL + "/v2.6/" + s.userID + "/posts?" + values.Encode())
if err != nil {
return nil, errors.WithStack(err)
}
defer resp.Body.Close()
var posts posts
if err := json.NewDecoder(resp.Body).Decode(&posts); err != nil {
return nil, err
}
return &posts, nil
}
func (s *source) render(posts *posts) (*feeds.Feed, error) {
if len(posts.Data) == 0 {
return nil, errors.New("no posts found")
}
items := make([]*feeds.Item, 0, len(posts.Data))
for _, post := range posts.Data {
created, err := time.Parse("2006-01-02T15:04:05-0700", post.CreatedTime)
if err != nil {
return nil, errors.WithStack(err)
}
var title, description string
if index := strings.Index(post.Message, "\n"); index >= 0 {
title = post.Message[0:index]
description = messageReplacer.Replace(post.Message)
} else {
title = post.Message
description = post.Message
}
if post.Picture != "" {
description += fmt.Sprintf(`<br /><img src="%s" />`, post.Picture)
}
var link string
if photosURLRe.MatchString(post.Link) {
if parts := strings.SplitN(post.Id, "_", 2); len(parts) == 2 {
link = serviceURL + s.userID + "/posts/" + parts[1] + "/"
} else {
link = post.Link
}
} else {
link = post.Link
}
items = append(items, &feeds.Item{
Id: post.Id,
Author: &feeds.Author{Name: post.From.Name},
Title: title,
Description: description,
Created: created,
Link: &feeds.Link{Href: link},
})
}
feed := &feeds.Feed{
Title: posts.Data[0].From.Name,
Link: &feeds.Link{Href: serviceURL + s.userID},
Items: items,
}
return feed, nil
}
Upgrade Facebook Graph API
package facebook
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"github.com/gorilla/feeds"
scraper "github.com/mono0x/my-scraper/lib"
"github.com/pkg/errors"
)
const (
serviceURL = "https://www.facebook.com/"
baseURL = "https://graph.facebook.com"
)
type posts struct {
// https://developers.facebook.com/docs/graph-api/reference/v3.2/post
Data []struct {
Id string `json:"id"`
CreatedTime string `json:"created_time"`
From struct {
Id string `json:"id"`
Name string `json:"name"`
} `json:"from"`
Link string `json:"link"`
Message string `json:"message"`
Picture string `json:"picture"`
} `json:"data"`
}
type source struct {
httpClient *http.Client
accessToken string
userID string
baseURL string // for testing
}
var _ scraper.Source = (*source)(nil)
func NewSource(c *http.Client, accessToken string, userID string) *source {
return &source{
httpClient: c,
accessToken: accessToken,
userID: userID,
baseURL: baseURL,
}
}
var (
photosURLRe = regexp.MustCompile(`^` + regexp.QuoteMeta(serviceURL) + `[^/]+/photos/`)
messageReplacer = strings.NewReplacer("\n", "<br />")
)
func (s *source) Scrape() (*feeds.Feed, error) {
posts, err := s.fetch()
if err != nil {
return nil, err
}
return s.render(posts)
}
func (s *source) fetch() (*posts, error) {
values := &url.Values{}
values.Set("access_token", s.accessToken)
values.Set("fields", "created_time,from,link,message,picture")
resp, err := s.httpClient.Get(s.baseURL + "/v3.2/" + s.userID + "/posts?" + values.Encode())
if err != nil {
return nil, errors.WithStack(err)
}
defer resp.Body.Close()
var posts posts
if err := json.NewDecoder(resp.Body).Decode(&posts); err != nil {
return nil, err
}
return &posts, nil
}
func (s *source) render(posts *posts) (*feeds.Feed, error) {
if len(posts.Data) == 0 {
return nil, errors.New("no posts found")
}
items := make([]*feeds.Item, 0, len(posts.Data))
for _, post := range posts.Data {
created, err := time.Parse("2006-01-02T15:04:05-0700", post.CreatedTime)
if err != nil {
return nil, errors.WithStack(err)
}
var title, description string
if index := strings.Index(post.Message, "\n"); index >= 0 {
title = post.Message[0:index]
description = messageReplacer.Replace(post.Message)
} else {
title = post.Message
description = post.Message
}
if post.Picture != "" {
description += fmt.Sprintf(`<br /><img src="%s" />`, post.Picture)
}
var link string
if photosURLRe.MatchString(post.Link) {
if parts := strings.SplitN(post.Id, "_", 2); len(parts) == 2 {
link = serviceURL + s.userID + "/posts/" + parts[1] + "/"
} else {
link = post.Link
}
} else {
link = post.Link
}
items = append(items, &feeds.Item{
Id: post.Id,
Author: &feeds.Author{Name: post.From.Name},
Title: title,
Description: description,
Created: created,
Link: &feeds.Link{Href: link},
})
}
feed := &feeds.Feed{
Title: posts.Data[0].From.Name,
Link: &feeds.Link{Href: serviceURL + s.userID},
Items: items,
}
return feed, nil
}
|
package router
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
log "github.com/Sirupsen/logrus"
"github.com/oursky/ourd/oderr"
)
// pipeline encapsulates a transformation which a request will come throught
// from preprocessors to the actual handler. (and postprocessor later)
type pipeline struct {
Action string
Preprocessors []Processor
Handler
}
// Router to dispatch HTTP request to respective handler
type Router struct {
actions map[string]pipeline
}
// Processor specifies the function signature for a Preprocessor
type Processor func(*Payload, *Response) int
// NewRouter is factory for Router
func NewRouter() *Router {
return &Router{
map[string]pipeline{},
}
}
// Map to register action to handle mapping
func (r *Router) Map(action string, handler Handler, preprocessors ...Processor) {
r.actions[action] = pipeline{
Action: action,
Preprocessors: preprocessors,
Handler: handler,
}
}
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
var (
httpStatus = http.StatusOK
resp Response
handler Handler
preprocessors []Processor
payload *Payload
)
resp.writer = w
defer func() {
if !resp.written {
resp.Header().Set("Content-Type", "application/json")
if resp.Err != nil && httpStatus >= 200 && httpStatus <= 299 {
resp.writer.WriteHeader(http.StatusBadRequest)
} else {
resp.writer.WriteHeader(httpStatus)
}
if err := resp.WriteEntity(resp); err != nil {
panic(err)
}
}
}()
var err error
payload, err = newPayloadForJSONHandler(req)
if err != nil {
httpStatus = http.StatusBadRequest
resp.Err = oderr.NewRequestJSONInvalidErr(err)
return
}
handler, preprocessors = r.matchJSONHandler(payload)
if handler == nil {
httpStatus = http.StatusNotFound
resp.Err = oderr.NewRequestInvalidErr(errors.New("route unmatched"))
} else {
for _, p := range preprocessors {
httpStatus = p(payload, &resp)
if resp.Err != nil {
if httpStatus == 200 {
httpStatus = 500
}
if _, ok := resp.Err.(oderr.Error); !ok {
resp.Err = oderr.NewUnknownErr(resp.Err)
}
return
}
}
handler(payload, &resp)
}
}
func (r *Router) matchJSONHandler(p *Payload) (h Handler, pp []Processor) {
if pipeline, ok := r.actions[p.RouteAction()]; ok {
h = pipeline.Handler
pp = pipeline.Preprocessors
}
return
}
func newPayloadForJSONHandler(req *http.Request) (p *Payload, err error) {
reqBody := req.Body
if reqBody == nil {
reqBody = ioutil.NopCloser(bytes.NewReader(nil))
}
data := map[string]interface{}{}
if jsonErr := json.NewDecoder(reqBody).Decode(&data); jsonErr != nil && jsonErr != io.EOF {
err = jsonErr
return
}
p = &Payload{
Data: data,
Meta: map[string]interface{}{},
}
return
}
// CheckAuth will check on the AccessToken, attach DB/RequestID to the response
// This is a no-op if the request action belong to "auth:" group
func CheckAuth(payload *Payload, response *Response) (status int, err error) {
log.Println("CheckAuth")
token := payload.AccessToken()
if token == "validToken" {
log.Println("CheckAuth -> validToken, ", token)
return http.StatusOK, nil
}
log.Println("CheckAuth -> inValidToken, ", token)
return http.StatusUnauthorized, errors.New("Unauthorized request")
}
router: Router now also receives api key and access token from HTTP header
package router
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
log "github.com/Sirupsen/logrus"
"github.com/oursky/ourd/oderr"
)
// pipeline encapsulates a transformation which a request will come throught
// from preprocessors to the actual handler. (and postprocessor later)
type pipeline struct {
Action string
Preprocessors []Processor
Handler
}
// Router to dispatch HTTP request to respective handler
type Router struct {
actions map[string]pipeline
}
// Processor specifies the function signature for a Preprocessor
type Processor func(*Payload, *Response) int
// NewRouter is factory for Router
func NewRouter() *Router {
return &Router{
map[string]pipeline{},
}
}
// Map to register action to handle mapping
func (r *Router) Map(action string, handler Handler, preprocessors ...Processor) {
r.actions[action] = pipeline{
Action: action,
Preprocessors: preprocessors,
Handler: handler,
}
}
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
var (
httpStatus = http.StatusOK
resp Response
handler Handler
preprocessors []Processor
payload *Payload
)
resp.writer = w
defer func() {
if !resp.written {
resp.Header().Set("Content-Type", "application/json")
if resp.Err != nil && httpStatus >= 200 && httpStatus <= 299 {
resp.writer.WriteHeader(http.StatusBadRequest)
} else {
resp.writer.WriteHeader(httpStatus)
}
if err := resp.WriteEntity(resp); err != nil {
panic(err)
}
}
}()
var err error
payload, err = newPayloadForJSONHandler(req)
if err != nil {
httpStatus = http.StatusBadRequest
resp.Err = oderr.NewRequestJSONInvalidErr(err)
return
}
handler, preprocessors = r.matchJSONHandler(payload)
if handler == nil {
httpStatus = http.StatusNotFound
resp.Err = oderr.NewRequestInvalidErr(errors.New("route unmatched"))
} else {
for _, p := range preprocessors {
httpStatus = p(payload, &resp)
if resp.Err != nil {
if httpStatus == 200 {
httpStatus = 500
}
if _, ok := resp.Err.(oderr.Error); !ok {
resp.Err = oderr.NewUnknownErr(resp.Err)
}
return
}
}
handler(payload, &resp)
}
}
func (r *Router) matchJSONHandler(p *Payload) (h Handler, pp []Processor) {
if pipeline, ok := r.actions[p.RouteAction()]; ok {
h = pipeline.Handler
pp = pipeline.Preprocessors
}
return
}
func newPayloadForJSONHandler(req *http.Request) (p *Payload, err error) {
reqBody := req.Body
if reqBody == nil {
reqBody = ioutil.NopCloser(bytes.NewReader(nil))
}
data := map[string]interface{}{}
if jsonErr := json.NewDecoder(reqBody).Decode(&data); jsonErr != nil && jsonErr != io.EOF {
err = jsonErr
return
}
p = &Payload{
Data: data,
Meta: map[string]interface{}{},
}
if apiKey := req.Header.Get("X-Ourd-Api-Key"); apiKey != "" {
p.Data["api_key"] = apiKey
}
if accessToken := req.Header.Get("X-Ourd-Access-Token"); accessToken != "" {
p.Data["access_token"] = accessToken
}
return
}
// CheckAuth will check on the AccessToken, attach DB/RequestID to the response
// This is a no-op if the request action belong to "auth:" group
func CheckAuth(payload *Payload, response *Response) (status int, err error) {
log.Println("CheckAuth")
token := payload.AccessToken()
if token == "validToken" {
log.Println("CheckAuth -> validToken, ", token)
return http.StatusOK, nil
}
log.Println("CheckAuth -> inValidToken, ", token)
return http.StatusUnauthorized, errors.New("Unauthorized request")
}
|
package dalton
import "time"
const (
AnnouncementGrade_All = 0 // everyone
AnnouncementGrade_MiddleSchool = 14 // 4th grade through 8th grade
AnnouncementGrade_HighSchool = 15 // 9th grade through 12th grade
AnnouncementGrade_MiddleSchool_456 = 16 // 4th, 5th, and 6th grade
AnnouncementGrade_MiddleSchool_78 = 17 // 7th, and 8th grade
AnnouncementGrade_Faculty = 18 // faculty member
)
const (
BlackbaudLevel_MiddleSchool = 167
BlackbaudLevel_HighSchool = 166
)
const (
BlackbaudPersona_Student = 2
BlackbaudPersona_Faculty = 3
)
// An AssemblyType describes what happens for assembly on a given week.
type AssemblyType int
const (
AssemblyType_Assembly AssemblyType = iota
AssemblyType_LongHouse
AssemblyType_Lab
)
// these change every year
var (
// the grade that someone in the class of 2019 is in for this year
// used to calculate other people's grade
Grade_ClassOf2019 = 13
Day_SchoolStart, _ = time.Parse("2006-01-02", "2019-09-09")
Day_ExamRelief, _ = time.Parse("2006-01-02", "2020-01-24")
Day_SeniorLastDay, _ = time.Parse("2006-01-02", "2020-04-02")
Day_SchoolEnd, _ = time.Parse("2006-01-02", "2020-06-11")
AssemblyTypeList = map[string]AssemblyType{
"2019-09-12": AssemblyType_Assembly,
"2019-09-19": AssemblyType_Assembly,
"2019-09-26": AssemblyType_Lab,
"2019-10-03": AssemblyType_Assembly,
"2019-10-10": AssemblyType_Lab,
"2019-10-17": AssemblyType_Assembly,
"2019-10-24": AssemblyType_LongHouse,
"2019-10-31": AssemblyType_Lab,
"2019-11-07": AssemblyType_Lab,
"2019-11-14": AssemblyType_Assembly,
"2019-11-21": AssemblyType_Assembly,
"2019-12-05": AssemblyType_Assembly,
"2019-12-12": AssemblyType_LongHouse,
"2019-12-19": AssemblyType_Assembly,
"2020-01-09": AssemblyType_Assembly,
}
)
add second semester assemblies for dalton schedules
package dalton
import "time"
const (
AnnouncementGrade_All = 0 // everyone
AnnouncementGrade_MiddleSchool = 14 // 4th grade through 8th grade
AnnouncementGrade_HighSchool = 15 // 9th grade through 12th grade
AnnouncementGrade_MiddleSchool_456 = 16 // 4th, 5th, and 6th grade
AnnouncementGrade_MiddleSchool_78 = 17 // 7th, and 8th grade
AnnouncementGrade_Faculty = 18 // faculty member
)
const (
BlackbaudLevel_MiddleSchool = 167
BlackbaudLevel_HighSchool = 166
)
const (
BlackbaudPersona_Student = 2
BlackbaudPersona_Faculty = 3
)
// An AssemblyType describes what happens for assembly on a given week.
type AssemblyType int
const (
AssemblyType_Assembly AssemblyType = iota
AssemblyType_LongHouse
AssemblyType_Lab
)
// these change every year
var (
// the grade that someone in the class of 2019 is in for this year
// used to calculate other people's grade
Grade_ClassOf2019 = 13
Day_SchoolStart, _ = time.Parse("2006-01-02", "2019-09-09")
Day_ExamRelief, _ = time.Parse("2006-01-02", "2020-01-24")
Day_SeniorLastDay, _ = time.Parse("2006-01-02", "2020-04-02")
Day_SchoolEnd, _ = time.Parse("2006-01-02", "2020-06-11")
AssemblyTypeList = map[string]AssemblyType{
"2019-09-12": AssemblyType_Assembly,
"2019-09-19": AssemblyType_Assembly,
"2019-09-26": AssemblyType_Lab,
"2019-10-03": AssemblyType_Assembly,
"2019-10-10": AssemblyType_Lab,
"2019-10-17": AssemblyType_Assembly,
"2019-10-24": AssemblyType_LongHouse,
"2019-10-31": AssemblyType_Lab,
"2019-11-07": AssemblyType_Lab,
"2019-11-14": AssemblyType_Assembly,
"2019-11-21": AssemblyType_Assembly,
"2019-12-05": AssemblyType_Assembly,
"2019-12-12": AssemblyType_LongHouse,
"2019-12-19": AssemblyType_Assembly,
"2020-01-09": AssemblyType_Assembly,
"2020-01-30": AssemblyType_Assembly,
"2020-02-06": AssemblyType_Lab,
"2020-02-13": AssemblyType_Assembly,
"2020-02-20": AssemblyType_Lab,
"2020-02-27": AssemblyType_Assembly,
"2020-03-05": AssemblyType_Lab,
"2020-03-12": AssemblyType_Assembly,
"2020-04-02": AssemblyType_LongHouse,
"2020-04-16": AssemblyType_Assembly,
"2020-04-23": AssemblyType_Lab,
"2020-04-30": AssemblyType_Assembly,
"2020-05-07": AssemblyType_Assembly,
"2020-05-14": AssemblyType_Assembly,
"2020-05-21": AssemblyType_Assembly,
"2020-05-28": AssemblyType_Lab,
}
)
|
// Copyright 2012-2014, Rolf Veen and contributors.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ogdl
import (
"bytes"
"reflect"
"strconv"
"strings"
)
// Graph is a node with outgoing pointers to other Graph objects.
// It is implemented as a named list.
type Graph struct {
This interface{}
Out []*Graph
}
// New returns a pointer to Graph, which will be either empty or contain the
// (optional) object given.
func New(n ...interface{}) *Graph {
if len(n) == 0 {
return &Graph{}
}
return &Graph{n[0], nil}
}
// IsNil returns true is this node has no content.
func (g *Graph) IsNil() bool {
if g.This == nil {
return true
}
return false
}
// Len returns the number of subnodes (outgoing edges, out degree) of this node.
func (g *Graph) Len() int {
if g == nil {
return -1
}
return len(g.Out)
}
// Type returns the name of the native type contained in the current node.
func (g *Graph) ThisType() string {
return reflect.TypeOf(g.This).String()
}
// Kind returns the name of the native type contained in the current node.
func (g *Graph) thisKind() string {
return reflect.ValueOf(g.This).Kind().String()
}
// Depth returns the depth of the graph if it is a tree, or -1 if it has
// cycles.
//
// TODO: Cycles are inferred if level>100, but nodes traversed are not
// remembered (they should if cycles need to be detected).
func (g *Graph) Depth() int {
if g.Len() == 0 {
return 0
}
i := 0
for _, n := range g.Out {
j := n.Depth()
if j > i {
i = j
}
}
if i > 100 {
return -1
}
return i + 1
}
// Equals returns true if the given graph and the receiver graph are equal.
func (g *Graph) Equals(c *Graph) bool {
if c.This != g.This {
return false
}
if g.Len() != c.Len() {
return false
}
for i := 0; i < g.Len(); i++ {
if g.Out[i].Equals(c.Out[i]) == false {
return false
}
}
return true
}
// Add adds a subnode to the current node.
//
// An eventual nil root will not be bypassed.
func (g *Graph) Add(n interface{}) *Graph {
if node, ok := n.(*Graph); ok && node != nil {
if node.IsNil() {
for _, node2 := range node.Out {
g.Out = append(g.Out, node2)
}
} else {
g.Out = append(g.Out, node)
}
return node
}
gg := Graph{n, nil}
g.Out = append(g.Out, &gg)
return &gg
}
// AddNodes adds subnodes of the given Graph to the current node.
func (g *Graph) AddNodes(g2 *Graph) *Graph {
if g2 != nil {
for _, n := range g2.Out {
g.Out = append(g.Out, n)
}
}
return g
}
// addEqualNodes adds subnodes of the given Graph to the current node,
// if their content equals the given key. Optionally recurse into subnodes
// of the receiver graph.
func (g *Graph) addEqualNodes(g2 *Graph, key string, recurse bool) *Graph {
if g2 != nil {
for _, n := range g2.Out {
if key == _string(n.This) {
g.AddNodes(n)
}
if recurse {
g.addEqualNodes(n, key, true)
}
}
}
return g
}
// Copy adds a copy of the graph given to the current graph.
//
// Warning (from the Go faq): Copying an interface value makes a copy of the
// thing stored in the interface value. If the interface value holds a struct,
// copying the interface value makes a copy of the struct. If the interface
// value holds a pointer, copying the interface value makes a copy of the
// pointer, but not the data it points to.
func (g *Graph) Copy(c *Graph) {
for _, n := range c.Out {
nn := g.Add(n.This)
nn.Copy(n)
}
}
// Node returns the first subnode whose string value is equal to the given string.
// It returns nil if not found.
func (g *Graph) Node(s string) *Graph {
if g == nil {
return nil
}
for _, node := range g.Out {
if s == _string(node.This) {
return node
}
}
return nil
}
// GetAt returns a subnode by index, or nil if the index is out of range.
func (g *Graph) GetAt(i int) *Graph {
if i >= len(g.Out) || i < 0 {
return nil
}
return g.Out[i]
}
// Get recurses a Graph following a given path and returns the result.
//
// This function returns a *Graph in any condition. When there is nothing to
// return, a nil Graph is returned. This behavior is designed so that
// the next function in a chain never gets an invalid receiver, avoiding null
// pointer errors.
//
// OGDL Path:
// elements are separated by '.' or [] or {}
// index := [N]
// selector := {N}
// tokens can be quoted
//
func (g *Graph) Get(s string) *Graph {
if g == nil {
return (*Graph)(nil)
}
// Parse the input string into a Path graph.
path := NewPath(s)
g = g.get(path)
if g == nil {
return (*Graph)(nil)
}
return g
}
func (g *Graph) get(path *Graph) *Graph {
if g == nil || path == nil {
return nil
}
iknow := true
node := g
// nodePrev = Upper level of current node, used in {}
var nodePrev *Graph
// elemPrev = previous path element, used in {}
var elemPrev string
for _, elem := range path.Out {
p := elem.ThisString()
iknow = false
switch p {
case TypeIndex:
if elem.Len() == 0 {
return nil
}
i, err := strconv.Atoi(elem.Out[0].ThisString())
if err != nil {
return nil
}
nodePrev = node
node = node.GetAt(i)
if node == nil {
return nil
}
elemPrev = node.ThisString()
case TypeSelector:
if nodePrev == nil || nodePrev.Len() == 0 || len(elemPrev) == 0 {
return nil
}
r := New()
if elem.Len() == 0 {
// This case is {}, meaning that we must return
// all ocurrences of the token just before (elemPrev).
r.addEqualNodes(nodePrev, elemPrev, false)
if r.Len() == 0 {
return nil
}
node = r
} else {
i, err := strconv.Atoi(elem.Out[0].ThisString())
if err != nil || i < 0 {
return nil
}
// {0} must still be handled: add it to r
i++
// of all the nodes with name elemPrev, select the ith.
for _, nn := range nodePrev.Out {
if nn.ThisString() == elemPrev {
i--
if i == 0 {
r.AddNodes(nn)
node = r
break
}
}
}
if i > 0 {
return nil
}
}
case "_len":
nn := New()
nn.Add(node.Len())
return nn
default:
iknow = true
nodePrev = node
elemPrev = p
node = node.Node(p)
}
if node == nil {
break
}
}
if node == nil {
return nil
}
if node.This != nil && !iknow {
node2 := New()
node2.Add(node)
node = node2
}
return node
}
// Delete removes all subnodes with the given content
func (g *Graph) Delete(n interface{}) {
for i := 0; i < g.Len(); i++ {
if g.Out[i].This == n {
g.Out = append(g.Out[:i], g.Out[i+1:]...)
i--
}
}
}
// DeleteAt removes a subnode by index
func (g *Graph) DeleteAt(i int) {
if i < 0 || i >= g.Len() {
return
}
g.Out = append(g.Out[:i], g.Out[i+1:]...)
}
// Set sets the first occurrence of the given path to the value given.
//
// TODO: Support indexes
func (g *Graph) Set(s string, val interface{}) *Graph {
if g == nil {
return nil
}
// Parse the input string into a Path graph.
path := NewPath(s)
if path == nil {
return nil
}
return g.set(path, val)
}
// TODO: Clean this code:
func (g *Graph) set(path *Graph, val interface{}) *Graph {
node := g
i := 0
var prev *Graph
for ; i < len(path.Out); i++ {
prev = node
elem := path.Out[i]
if elem.ThisString() == TypeIndex {
i := elem.Int64()
if len(node.Out) <= int(i) {
o := make([]*Graph, i+1)
for j, n := range node.Out {
o[j] = n
}
node.Out = o
}
node.Out[i] = New(val)
return node.Out[i]
}
node = node.Node(elem.ThisString())
if node == nil {
break
}
}
if node == nil {
node = prev
for ; i < len(path.Out); i++ {
elem := path.Out[i]
if elem.ThisString() == TypeIndex {
i := elem.Int64()
if len(node.Out) <= int(i) {
o := make([]*Graph, i+1)
for j, n := range node.Out {
o[j] = n
}
node.Out = o
}
node.Out[i] = New(val)
return node.Out[i]
}
node = node.Add(elem.This)
}
}
node.Out = nil
return node.Add(val)
}
// Text is the OGDL text emitter. It converts a Graph into OGDL text.
//
// Strings are quoted if they contain spaces, newlines or special
// characters. Null elements are not printed, and act as transparent nodes.
//
// BUG():Handle comments correctly.
// BUG(): 2 times almost the same code:
func (g *Graph) Text() string {
if g == nil {
return ""
}
buffer := &bytes.Buffer{}
// Do not print the 'root' node
for _, node := range g.Out {
node._text(0, buffer, false)
}
// remove trailing \n
s := buffer.String()
if len(s) == 0 {
return ""
}
if s[len(s)-1] == '\n' {
s = s[0 : len(s)-1]
}
// unquote
if s[0] == '"' {
s = s[1 : len(s)-1]
// But then also replace \"
s = strings.Replace(s, "\\\"", "\"", -1)
}
return s
}
// Show prints the Graph as text including this (the top) node.
func (g *Graph) Show() string {
if g == nil {
return ""
}
buffer := &bytes.Buffer{}
g._text(0, buffer, true)
// remove trailing \n
s := buffer.String()
if len(s) == 0 {
return ""
}
if s[len(s)-1] == '\n' {
s = s[0 : len(s)-1]
}
// unquote
if s[0] == '"' {
s = s[1 : len(s)-1]
// But then also replace \"
s = strings.Replace(s, "\\\"", "\"", -1)
}
return s
}
// _text is the private, lower level, implementation of Text().
// It takes two parameters, the level and a buffer to which the
// result is printed.
func (g *Graph) _text(n int, buffer *bytes.Buffer, show bool) {
sp := ""
for i := 0; i < n; i++ {
sp += " "
}
/*
When printing strings with newlines, there are two possibilities:
block or quoted. Block is cleaner, but limited to leaf nodes. If the node
is not leaf (it has subnodes), then we are forced to print a multiline
quoted string.
If the string has no newlines but spaces or special characters, then the
same rule applies: quote those nodes that are non-leaf, print a block
otherways.
[!] Cannot print blocks at level 0? Or can we?
*/
s := "_"
if g != nil {
s = _string(g.This)
}
if strings.IndexAny(s, "\n\r \t'\",()") != -1 {
// print quoted, but not at level 0
// Do not convert " to \" below if level==0 !
if n > 0 {
buffer.WriteString(sp[:len(sp)-1])
buffer.WriteByte('"')
}
var c, cp byte
cp = 0
for i := 0; i < len(s); i++ {
c = s[i] // byte, not rune
if c == 13 {
continue // ignore CR's
} else if c == 10 {
buffer.WriteByte('\n')
buffer.WriteString(sp)
} else if c == '"' && n > 0 {
if cp != '\\' {
buffer.WriteString("\\\"")
}
} else {
buffer.WriteByte(c)
}
cp = c
}
if n > 0 {
buffer.WriteString("\"")
}
buffer.WriteString("\n")
} else {
if len(s) == 0 && !show {
n--
} else {
if len(s) == 0 && show {
s = "_"
}
buffer.WriteString(sp)
buffer.WriteString(s)
buffer.WriteByte('\n')
}
}
if g != nil {
for i := 0; i < len(g.Out); i++ {
node := g.Out[i]
node._text(n+1, buffer, show)
}
}
}
// Substitute traverses the graph substituting all nodes with content
// equal to s by v.
func (g *Graph) Substitute(s string, v interface{}) {
for _, n := range g.Out {
if _string(n.This) == s {
n.This = v
}
n.Substitute(s, v)
}
}
bug in Delete*, added Create, some nil checks
// Copyright 2012-2014, Rolf Veen and contributors.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ogdl
import (
"bytes"
"reflect"
"strconv"
"strings"
)
// Graph is a node with outgoing pointers to other Graph objects.
// It is implemented as a named list.
type Graph struct {
This interface{}
Out []*Graph
}
// New returns a pointer to Graph, which will be either empty or contain the
// (optional) object given.
func New(n ...interface{}) *Graph {
if len(n) == 0 {
return &Graph{}
}
return &Graph{n[0], nil}
}
// IsNil returns true is this node has no content.
func (g *Graph) IsNil() bool {
if g.This == nil {
return true
}
return false
}
// Len returns the number of subnodes (outgoing edges, out degree) of this node.
func (g *Graph) Len() int {
if g == nil {
return -1
}
return len(g.Out)
}
// Type returns the name of the native type contained in the current node.
func (g *Graph) ThisType() string {
return reflect.TypeOf(g.This).String()
}
// Kind returns the name of the native type contained in the current node.
func (g *Graph) thisKind() string {
return reflect.ValueOf(g.This).Kind().String()
}
// Depth returns the depth of the graph if it is a tree, or -1 if it has
// cycles.
//
// TODO: Cycles are inferred if level>100, but nodes traversed are not
// remembered (they should if cycles need to be detected).
func (g *Graph) Depth() int {
if g.Len() == 0 {
return 0
}
i := 0
for _, n := range g.Out {
j := n.Depth()
if j > i {
i = j
}
}
if i > 100 {
return -1
}
return i + 1
}
// Equals returns true if the given graph and the receiver graph are equal.
func (g *Graph) Equals(c *Graph) bool {
if c.This != g.This {
return false
}
if g.Len() != c.Len() {
return false
}
for i := 0; i < g.Len(); i++ {
if g.Out[i].Equals(c.Out[i]) == false {
return false
}
}
return true
}
// Add adds a subnode to the current node.
//
// An eventual nil root will not be bypassed.
func (g *Graph) Add(n interface{}) *Graph {
if node, ok := n.(*Graph); ok && node != nil {
if node.IsNil() {
for _, node2 := range node.Out {
g.Out = append(g.Out, node2)
}
} else {
g.Out = append(g.Out, node)
}
return node
}
gg := Graph{n, nil}
g.Out = append(g.Out, &gg)
return &gg
}
// AddNodes adds subnodes of the given Graph to the current node.
func (g *Graph) AddNodes(g2 *Graph) *Graph {
if g2 != nil {
for _, n := range g2.Out {
g.Out = append(g.Out, n)
}
}
return g
}
// addEqualNodes adds subnodes of the given Graph to the current node,
// if their content equals the given key. Optionally recurse into subnodes
// of the receiver graph.
func (g *Graph) addEqualNodes(g2 *Graph, key string, recurse bool) *Graph {
if g2 != nil {
for _, n := range g2.Out {
if key == _string(n.This) {
g.AddNodes(n)
}
if recurse {
g.addEqualNodes(n, key, true)
}
}
}
return g
}
// Copy adds a copy of the graph given to the current graph.
//
// Warning (from the Go faq): Copying an interface value makes a copy of the
// thing stored in the interface value. If the interface value holds a struct,
// copying the interface value makes a copy of the struct. If the interface
// value holds a pointer, copying the interface value makes a copy of the
// pointer, but not the data it points to.
func (g *Graph) Copy(c *Graph) {
for _, n := range c.Out {
nn := g.Add(n.This)
nn.Copy(n)
}
}
// Node returns the first subnode whose string value is equal to the given string.
// It returns nil if not found.
func (g *Graph) Node(s string) *Graph {
if g == nil || g.Out == nil {
return nil
}
for _, node := range g.Out {
if s == _string(node.This) {
return node
}
}
return nil
}
// Create returns the first subnode whose string value is equal to the given string,
// with its subnodes deleted. If not found, the node is created and returned.
func (g *Graph) Create(s string) *Graph {
n := g.Node(s)
if n == nil {
return g.Add(s)
} else {
n.Clear()
return n
}
}
// GetAt returns a subnode by index, or nil if the index is out of range.
func (g *Graph) GetAt(i int) *Graph {
if i >= len(g.Out) || i < 0 {
return nil
}
return g.Out[i]
}
// Get recurses a Graph following a given path and returns the result.
//
// This function returns a *Graph in any condition. When there is nothing to
// return, a nil Graph is returned. This behavior is designed so that
// the next function in a chain never gets an invalid receiver, avoiding null
// pointer errors.
//
// OGDL Path:
// elements are separated by '.' or [] or {}
// index := [N]
// selector := {N}
// tokens can be quoted
//
func (g *Graph) Get(s string) *Graph {
if g == nil {
return (*Graph)(nil)
}
// Parse the input string into a Path graph.
path := NewPath(s)
g = g.get(path)
if g == nil {
return (*Graph)(nil)
}
return g
}
func (g *Graph) get(path *Graph) *Graph {
if g == nil || path == nil {
return nil
}
iknow := true
node := g
// nodePrev = Upper level of current node, used in {}
var nodePrev *Graph
// elemPrev = previous path element, used in {}
var elemPrev string
for _, elem := range path.Out {
p := elem.ThisString()
iknow = false
switch p {
case TypeIndex:
if elem.Len() == 0 {
return nil
}
i, err := strconv.Atoi(elem.Out[0].ThisString())
if err != nil {
return nil
}
nodePrev = node
node = node.GetAt(i)
if node == nil {
return nil
}
elemPrev = node.ThisString()
case TypeSelector:
if nodePrev == nil || nodePrev.Len() == 0 || len(elemPrev) == 0 {
return nil
}
r := New()
if elem.Len() == 0 {
// This case is {}, meaning that we must return
// all ocurrences of the token just before (elemPrev).
r.addEqualNodes(nodePrev, elemPrev, false)
if r.Len() == 0 {
return nil
}
node = r
} else {
i, err := strconv.Atoi(elem.Out[0].ThisString())
if err != nil || i < 0 {
return nil
}
// {0} must still be handled: add it to r
i++
// of all the nodes with name elemPrev, select the ith.
for _, nn := range nodePrev.Out {
if nn.ThisString() == elemPrev {
i--
if i == 0 {
r.AddNodes(nn)
node = r
break
}
}
}
if i > 0 {
return nil
}
}
case "_len":
nn := New()
nn.Add(node.Len())
return nn
default:
iknow = true
nodePrev = node
elemPrev = p
node = node.Node(p)
}
if node == nil {
break
}
}
if node == nil {
return nil
}
if node.This != nil && !iknow {
node2 := New()
node2.Add(node)
node = node2
}
return node
}
// Delete removes all subnodes with the given content
func (g *Graph) Delete(n interface{}) {
if g == nil {
return
}
for i := 0; i < g.Len(); i++ {
if g.Out[i].This == n {
if i < (g.Len() - 1) {
g.Out = append(g.Out[:i], g.Out[i+1:]...)
} else {
g.Out = g.Out[:i]
}
i--
}
}
}
// Clear removes all subnodes
func (g *Graph) Clear() {
if g == nil || g.Out == nil {
return
}
g.Out = nil
}
// DeleteAt removes a subnode by index
func (g *Graph) DeleteAt(i int) {
if i < 0 || i >= g.Len() {
return
}
if i < (g.Len() - 1) {
g.Out = append(g.Out[:i], g.Out[i+1:]...)
} else {
g.Out = g.Out[:i]
}
}
// Set sets the first occurrence of the given path to the value given.
//
// TODO: Support indexes
func (g *Graph) Set(s string, val interface{}) *Graph {
if g == nil {
return nil
}
// Parse the input string into a Path graph.
path := NewPath(s)
if path == nil {
return nil
}
return g.set(path, val)
}
// TODO: Clean this code:
func (g *Graph) set(path *Graph, val interface{}) *Graph {
node := g
i := 0
var prev *Graph
for ; i < len(path.Out); i++ {
prev = node
elem := path.Out[i]
if elem.ThisString() == TypeIndex {
i := elem.Int64()
if len(node.Out) <= int(i) {
o := make([]*Graph, i+1)
for j, n := range node.Out {
o[j] = n
}
node.Out = o
}
node.Out[i] = New(val)
return node.Out[i]
}
node = node.Node(elem.ThisString())
if node == nil {
break
}
}
if node == nil {
node = prev
for ; i < len(path.Out); i++ {
elem := path.Out[i]
if elem.ThisString() == TypeIndex {
i := elem.Int64()
if len(node.Out) <= int(i) {
o := make([]*Graph, i+1)
for j, n := range node.Out {
o[j] = n
}
node.Out = o
}
node.Out[i] = New(val)
return node.Out[i]
}
node = node.Add(elem.This)
}
}
node.Out = nil
return node.Add(val)
}
// Text is the OGDL text emitter. It converts a Graph into OGDL text.
//
// Strings are quoted if they contain spaces, newlines or special
// characters. Null elements are not printed, and act as transparent nodes.
//
// BUG():Handle comments correctly.
// BUG(): 2 times almost the same code:
func (g *Graph) Text() string {
if g == nil {
return ""
}
buffer := &bytes.Buffer{}
// Do not print the 'root' node
for _, node := range g.Out {
node._text(0, buffer, false)
}
// remove trailing \n
s := buffer.String()
if len(s) == 0 {
return ""
}
if s[len(s)-1] == '\n' {
s = s[0 : len(s)-1]
}
// unquote
if s[0] == '"' {
s = s[1 : len(s)-1]
// But then also replace \"
s = strings.Replace(s, "\\\"", "\"", -1)
}
return s
}
// Show prints the Graph as text including this (the top) node.
func (g *Graph) Show() string {
if g == nil {
return ""
}
buffer := &bytes.Buffer{}
g._text(0, buffer, true)
// remove trailing \n
s := buffer.String()
if len(s) == 0 {
return ""
}
if s[len(s)-1] == '\n' {
s = s[0 : len(s)-1]
}
// unquote
if s[0] == '"' {
s = s[1 : len(s)-1]
// But then also replace \"
s = strings.Replace(s, "\\\"", "\"", -1)
}
return s
}
// _text is the private, lower level, implementation of Text().
// It takes two parameters, the level and a buffer to which the
// result is printed.
func (g *Graph) _text(n int, buffer *bytes.Buffer, show bool) {
sp := ""
for i := 0; i < n; i++ {
sp += " "
}
/*
When printing strings with newlines, there are two possibilities:
block or quoted. Block is cleaner, but limited to leaf nodes. If the node
is not leaf (it has subnodes), then we are forced to print a multiline
quoted string.
If the string has no newlines but spaces or special characters, then the
same rule applies: quote those nodes that are non-leaf, print a block
otherways.
[!] Cannot print blocks at level 0? Or can we?
*/
s := "_"
if g != nil {
s = _string(g.This)
}
if strings.IndexAny(s, "\n\r \t'\",()") != -1 {
// print quoted, but not at level 0
// Do not convert " to \" below if level==0 !
if n > 0 {
buffer.WriteString(sp[:len(sp)-1])
buffer.WriteByte('"')
}
var c, cp byte
cp = 0
for i := 0; i < len(s); i++ {
c = s[i] // byte, not rune
if c == 13 {
continue // ignore CR's
} else if c == 10 {
buffer.WriteByte('\n')
buffer.WriteString(sp)
} else if c == '"' && n > 0 {
if cp != '\\' {
buffer.WriteString("\\\"")
}
} else {
buffer.WriteByte(c)
}
cp = c
}
if n > 0 {
buffer.WriteString("\"")
}
buffer.WriteString("\n")
} else {
if len(s) == 0 && !show {
n--
} else {
if len(s) == 0 && show {
s = "_"
}
buffer.WriteString(sp)
buffer.WriteString(s)
buffer.WriteByte('\n')
}
}
if g != nil {
for i := 0; i < len(g.Out); i++ {
node := g.Out[i]
node._text(n+1, buffer, show)
}
}
}
// Substitute traverses the graph substituting all nodes with content
// equal to s by v.
func (g *Graph) Substitute(s string, v interface{}) {
if g == nil || g.Out == nil {
return
}
for _, n := range g.Out {
if _string(n.This) == s {
n.This = v
}
n.Substitute(s, v)
}
}
|
// Package osthrpool provides a pool of locked OS threads.
package osthrpool
import (
"container/heap"
"runtime"
"sync"
"time"
)
// Task represents a task that requires to be run on a locked OS thread.
type Task func()
// Pool represents a pool of locked OS threads.
type Pool struct {
maxSize int
timeout time.Duration
workers queue
mu sync.Mutex // protects workers
exit chan *worker
}
// New returns a pool of locked OS threads that grows to it's maximum size and
// shrinks automatically depending on the load. A thread automatically unlocks
// itself after a timeout if there are no more tasks to process.
func New(maxSize int, timeout time.Duration) *Pool {
return &Pool{
maxSize: maxSize,
timeout: timeout,
workers: make(queue, 0, maxSize),
exit: make(chan *worker, maxSize),
}
}
// Execute executes the given task on a locked OS thread.
func (p *Pool) Execute(t Task) {
p.mu.Lock()
p.startWorker()
w := p.getWorker()
p.mu.Unlock()
w.tasks <- t // send task for execution
<-w.done // wait on result of task
p.putWorker(w)
}
// startWorker starts a worker if the pool has not reached it's maximum number
// of workers.
func (p *Pool) startWorker() {
if p.workers.Len() < int(p.maxSize) {
if p.workers.Len() == 0 {
// Start background process that collects terminated workers and removes
// them from the pool.
go p.collectWorkers()
}
w := newWorker(p.exit, p.timeout)
heap.Push(&p.workers, w)
go w.work()
}
}
// collectWorkers is a background process that remove terminated workers from
// the worker pool. This stops itself if there are no more active workers left.
func (p *Pool) collectWorkers() {
for {
select {
case w := <-p.exit:
p.mu.Lock()
heap.Remove(&p.workers, w.index)
if p.workers.Len() == 0 {
p.mu.Unlock()
return
}
p.mu.Unlock()
}
}
}
// getWorker returns the lightest loaded worker from the worker pool.
func (p *Pool) getWorker() *worker {
w := heap.Pop(&p.workers).(*worker)
w.pending++
// Store worker back into queue to adjust the load of the worker.
heap.Push(&p.workers, w)
return w
}
// putWorker puts a worker back in the worker pool.
func (p *Pool) putWorker(w *worker) {
p.mu.Lock()
defer p.mu.Unlock()
w.pending--
// Reorder the queue based on the load of the workers.
heap.Fix(&p.workers, w.index)
}
type worker struct {
timeout time.Duration
exit chan *worker
tasks chan Task
done chan struct{}
index int
pending int
}
func newWorker(exit chan *worker, timeout time.Duration) *worker {
return &worker{
exit: exit,
timeout: timeout,
tasks: make(chan Task),
done: make(chan struct{}),
}
}
func (w *worker) work() {
runtime.LockOSThread()
for {
select {
case task := <-w.tasks:
task()
w.done <- struct{}{}
case <-time.After(w.timeout):
runtime.UnlockOSThread()
w.exit <- w
return
}
}
}
// queue is a priority queue of workers with the lightest loaded worker as head
// of the queue.
type queue []*worker
func (q queue) Len() int { return len(q) }
func (q queue) Less(i, j int) bool { return q[i].pending < q[j].pending }
func (q queue) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
q[i].index = i
q[j].index = j
}
func (q *queue) Push(item interface{}) {
s := *q
n := len(s)
s = s[0 : n+1]
w := item.(*worker)
s[n] = w
w.index = n
*q = s
}
func (q *queue) Pop() interface{} {
s := *q
*q = s[0 : len(s)-1]
w := s[len(s)-1]
w.index = -1 // for safety
return w
}
add worker init and exit closures.
// Package osthrpool provides a pool of locked OS threads.
package osthrpool
import (
"container/heap"
"runtime"
"sync"
"time"
)
// Task represents a task that requires to be run on a locked OS thread.
type Task func()
// Pool represents a pool of locked OS threads.
type Pool struct {
maxSize int
timeout time.Duration
workers queue
mu sync.Mutex // protects workers
exit chan *worker
// InitFn is called by a worker just before it starts the work loop. The
// goroutine is locked to an OS thread when InitFn is called.
InitFn func()
// ExitFn is called by a worker just before it exits. The goroutine is still
// locked to an OS thread when ExitFn is called.
ExitFn func()
}
// New returns a pool of locked OS threads that grows to it's maximum size and
// shrinks automatically depending on the load. A thread automatically unlocks
// itself after a timeout if there are no more tasks to process.
func New(maxSize int, timeout time.Duration) *Pool {
return &Pool{
maxSize: maxSize,
timeout: timeout,
workers: make(queue, 0, maxSize),
exit: make(chan *worker, maxSize),
}
}
// Execute executes the given task on a locked OS thread.
func (p *Pool) Execute(t Task) {
p.mu.Lock()
p.startWorker()
w := p.getWorker()
p.mu.Unlock()
w.tasks <- t // send task for execution
<-w.done // wait on result of task
p.putWorker(w)
}
// startWorker starts a worker if the pool has not reached it's maximum number
// of workers.
func (p *Pool) startWorker() {
if p.workers.Len() < int(p.maxSize) {
if p.workers.Len() == 0 {
// Start background process that collects terminated workers and removes
// them from the pool.
go p.collectWorkers()
}
w := newWorker(p.exit, p.timeout, p.InitFn, p.ExitFn)
heap.Push(&p.workers, w)
go w.work()
}
}
// collectWorkers is a background process that remove terminated workers from
// the worker pool. This stops itself if there are no more active workers left.
func (p *Pool) collectWorkers() {
for {
select {
case w := <-p.exit:
p.mu.Lock()
heap.Remove(&p.workers, w.index)
if p.workers.Len() == 0 {
p.mu.Unlock()
return
}
p.mu.Unlock()
}
}
}
// getWorker returns the lightest loaded worker from the worker pool.
func (p *Pool) getWorker() *worker {
w := heap.Pop(&p.workers).(*worker)
w.pending++
// Store worker back into queue to adjust the load of the worker.
heap.Push(&p.workers, w)
return w
}
// putWorker puts a worker back in the worker pool.
func (p *Pool) putWorker(w *worker) {
p.mu.Lock()
defer p.mu.Unlock()
w.pending--
// Reorder the queue based on the load of the workers.
heap.Fix(&p.workers, w.index)
}
type worker struct {
timeout time.Duration
exit chan *worker
tasks chan Task
done chan struct{}
index int
pending int
initFn func()
exitFn func()
}
func newWorker(exit chan *worker, timeout time.Duration, initFn, exitFn func()) *worker {
return &worker{
exit: exit,
timeout: timeout,
tasks: make(chan Task),
done: make(chan struct{}),
initFn: initFn,
exitFn: exitFn,
}
}
func (w *worker) work() {
runtime.LockOSThread()
if w.initFn != nil {
w.initFn()
}
for {
select {
case task := <-w.tasks:
task()
w.done <- struct{}{}
case <-time.After(w.timeout):
if w.exitFn != nil {
w.exitFn()
}
runtime.UnlockOSThread()
w.exit <- w
return
}
}
}
// queue is a priority queue of workers with the lightest loaded worker as head
// of the queue.
type queue []*worker
func (q queue) Len() int { return len(q) }
func (q queue) Less(i, j int) bool { return q[i].pending < q[j].pending }
func (q queue) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
q[i].index = i
q[j].index = j
}
func (q *queue) Push(item interface{}) {
s := *q
n := len(s)
s = s[0 : n+1]
w := item.(*worker)
s[n] = w
w.index = n
*q = s
}
func (q *queue) Pop() interface{} {
s := *q
*q = s[0 : len(s)-1]
w := s[len(s)-1]
w.index = -1 // for safety
return w
}
|
package otto
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/hashicorp/otto/app"
"github.com/hashicorp/otto/appfile"
"github.com/hashicorp/otto/context"
"github.com/hashicorp/otto/directory"
"github.com/hashicorp/otto/foundation"
"github.com/hashicorp/otto/infrastructure"
"github.com/hashicorp/otto/ui"
"github.com/hashicorp/terraform/dag"
)
// Core is the main struct to use to interact with Otto as a library.
type Core struct {
appfile *appfile.File
appfileCompiled *appfile.Compiled
apps map[app.Tuple]app.Factory
dir directory.Backend
infras map[string]infrastructure.Factory
foundationMap map[foundation.Tuple]foundation.Factory
dataDir string
localDir string
compileDir string
ui ui.Ui
}
// CoreConfig is configuration for creating a new core with NewCore.
type CoreConfig struct {
// DataDir is the directory where local data will be stored that
// is global to all Otto processes.
//
// LocalDir is the directory where data local to this single Appfile
// will be stored. This isn't necessarilly cleared for compilation.
//
// CompiledDir is the directory where compiled data will be written.
// Each compilation will clear this directory.
DataDir string
LocalDir string
CompileDir string
// Appfile is the appfile that this core will be using for configuration.
// This must be a compiled Appfile.
Appfile *appfile.Compiled
// Directory is the directory where data is stored about this Appfile.
Directory directory.Backend
// Apps is the map of available app implementations.
Apps map[app.Tuple]app.Factory
// Infrastructures is the map of available infrastructures. The
// value is a factory that can create the infrastructure impl.
Infrastructures map[string]infrastructure.Factory
// Foundations is the map of available foundations. The
// value is a factory that can create the impl.
Foundations map[foundation.Tuple]foundation.Factory
// Ui is the Ui that will be used to communicate with the user.
Ui ui.Ui
}
// NewCore creates a new core.
//
// Once this function is called, this CoreConfig should not be used again
// or modified, since the Core may use parts of it without deep copying.
func NewCore(c *CoreConfig) (*Core, error) {
return &Core{
appfile: c.Appfile.File,
appfileCompiled: c.Appfile,
apps: c.Apps,
dir: c.Directory,
infras: c.Infrastructures,
foundationMap: c.Foundations,
dataDir: c.DataDir,
localDir: c.LocalDir,
compileDir: c.CompileDir,
ui: c.Ui,
}, nil
}
// Compile takes the Appfile and compiles all the resulting data.
func (c *Core) Compile() error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
// Get all the foundation implementations (which are tied as singletons
// to the infrastructure).
foundations, foundationCtxs, err := c.foundations()
if err != nil {
return err
}
// Delete the prior output directory
log.Printf("[INFO] deleting prior compilation contents: %s", c.compileDir)
if err := os.RemoveAll(c.compileDir); err != nil {
return err
}
// Compile the infrastructure for our application
log.Printf("[INFO] running infra compile...")
c.ui.Message("Compiling infra...")
if _, err := infra.Compile(infraCtx); err != nil {
return err
}
// Compile the foundation (not tied to any app). This compilation
// of the foundation is used for `otto infra` to set everything up.
log.Printf("[INFO] running foundation compilations")
for i, f := range foundations {
ctx := foundationCtxs[i]
c.ui.Message(fmt.Sprintf(
"Compiling foundation: %s", ctx.Tuple.Type))
if _, err := f.Compile(ctx); err != nil {
return err
}
}
// Walk through the dependencies and compile all of them.
// We have to compile every dependency for dev building.
var resultLock sync.Mutex
results := make([]*app.CompileResult, 0, len(c.appfileCompiled.Graph.Vertices()))
err = c.walk(func(app app.App, ctx *app.Context, root bool) error {
if !root {
c.ui.Header(fmt.Sprintf(
"Compiling dependency '%s'...",
ctx.Appfile.Application.Name))
} else {
c.ui.Header(fmt.Sprintf(
"Compiling main application..."))
}
// If this is the root, we set the dev dep fragments.
if root {
// We grab the lock just in case although if we're the
// root this should be serialized.
resultLock.Lock()
ctx.DevDepFragments = make([]string, 0, len(results))
for _, result := range results {
if result.DevDepFragmentPath != "" {
ctx.DevDepFragments = append(
ctx.DevDepFragments, result.DevDepFragmentPath)
}
}
resultLock.Unlock()
}
// Build the contexts for the foundations. We use this
// to also compile the list of foundation dirs.
ctx.FoundationDirs = make([]string, len(foundations))
for i, _ := range foundations {
fCtx := foundationCtxs[i]
fCtx.Dir = filepath.Join(ctx.Dir, fmt.Sprintf("foundation-%s", fCtx.Tuple.Type))
ctx.FoundationDirs[i] = fCtx.Dir
}
// Compile!
result, err := app.Compile(ctx)
if err != nil {
return err
}
// Compile the foundations for this app
subdirs := []string{"app-dev", "app-dev-dep", "app-deploy"}
for i, f := range foundations {
fCtx := foundationCtxs[i]
if result != nil {
fCtx.AppConfig = &result.FoundationConfig
}
if _, err := f.Compile(fCtx); err != nil {
return err
}
// Make sure the subdirs exist
for _, dir := range subdirs {
if err := os.MkdirAll(filepath.Join(fCtx.Dir, dir), 0755); err != nil {
return err
}
}
}
// Store the compilation result for later
resultLock.Lock()
defer resultLock.Unlock()
results = append(results, result)
return nil
})
return err
}
func (c *Core) walk(f func(app.App, *app.Context, bool) error) error {
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return fmt.Errorf(
"Error loading app: %s", err)
}
// Walk the appfile graph.
var stop int32 = 0
return c.appfileCompiled.Graph.Walk(func(raw dag.Vertex) (err error) {
// If we're told to stop (something else had an error), then stop early.
// Graphs walks by default will complete all disjoint parts of the
// graph before failing, but Otto doesn't have to do that.
if atomic.LoadInt32(&stop) != 0 {
return nil
}
// If we exit with an error, then mark the stop atomic
defer func() {
if err != nil {
atomic.StoreInt32(&stop, 1)
}
}()
// Convert to the rich vertex type so that we can access data
v := raw.(*appfile.CompiledGraphVertex)
// Get the context and app for this appfile
appCtx, err := c.appContext(v.File)
if err != nil {
return fmt.Errorf(
"Error loading Appfile for '%s': %s",
dag.VertexName(raw), err)
}
app, err := c.app(appCtx)
if err != nil {
return fmt.Errorf(
"Error loading App implementation for '%s': %s",
dag.VertexName(raw), err)
}
// Call our callback
return f(app, appCtx, raw == root)
})
}
// Build builds the deployable artifact for the currently compiled
// Appfile.
func (c *Core) Build() error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
if err := c.creds(infra, infraCtx); err != nil {
return err
}
// We only use the root application for this task, upstream dependencies
// don't have an effect on the build process.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Just update our shared data so we get the creds
rootCtx.Shared = infraCtx.Shared
return rootApp.Build(rootCtx)
}
// Deploy deploys the application.
//
// Deploy supports subactions, which can be specified with action and args.
// Action can be "" to get the default deploy behavior.
func (c *Core) Deploy(action string, args []string) error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
// Special case: don't try to fetch creds during `help` or `info`
if action != "help" && action != "info" {
if err := c.creds(infra, infraCtx); err != nil {
return err
}
}
// TODO: Verify that upstream dependencies are deployed
// We only use the root application for this task, upstream dependencies
// don't have an effect on the build process.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Update our shared data so we get the creds
rootCtx.Shared = infraCtx.Shared
// Pass through the requested action
rootCtx.Action = action
rootCtx.ActionArgs = args
return rootApp.Deploy(rootCtx)
}
// Dev starts a dev environment for the current application. For destroying
// and other tasks against the dev environment, use the generic `Execute`
// method.
func (c *Core) Dev() error {
// We need to get the root data separately since we need that for
// all the function calls into the dependencies.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Go through all the dependencies and build their immutable
// dev environment pieces for the final configuration.
err = c.walk(func(appImpl app.App, ctx *app.Context, root bool) error {
// If it is the root, we just return and do nothing else since
// the root is a special case where we're building the actual
// dev environment.
if root {
return nil
}
// Get the path to where we'd cache the dependency if we have
// cached it...
cachePath := filepath.Join(ctx.CacheDir, "dev-dep.json")
// Check if we've cached this. If so, then use the cache.
if _, err := app.ReadDevDep(cachePath); err == nil {
ctx.Ui.Header(fmt.Sprintf(
"Using cached dev dependency for '%s'",
ctx.Appfile.Application.Name))
return nil
}
// Build the development dependency
dep, err := appImpl.DevDep(rootCtx, ctx)
if err != nil {
return fmt.Errorf(
"Error building dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
// If we have a dependency with files, then verify the files
// and store it in our cache directory so we can retrieve it
// later.
if dep != nil && len(dep.Files) > 0 {
if err := dep.RelFiles(ctx.CacheDir); err != nil {
return fmt.Errorf(
"Error caching dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
if err := app.WriteDevDep(cachePath, dep); err != nil {
return fmt.Errorf(
"Error caching dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
}
return nil
})
if err != nil {
return err
}
// All the development dependencies are built/loaded. We now have
// everything we need to build the complete development environment.
return rootApp.Dev(rootCtx)
}
// Infra manages the infrastructure for this Appfile.
//
// Infra supports subactions, which can be specified with action and args.
// Infra recognizes two special actions: "" (blank string) and "destroy".
// The former expects to create or update the complete infrastructure,
// and the latter will destroy the infrastructure.
func (c *Core) Infra(action string, args []string) error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
if err := c.creds(infra, infraCtx); err != nil {
return err
}
// Set the action and action args
infraCtx.Action = action
infraCtx.ActionArgs = args
// If we need the foundations, then get them
var foundations []foundation.Foundation
var foundationCtxs []*foundation.Context
if action == "" || action == "destroy" {
foundations, foundationCtxs, err = c.foundations()
if err != nil {
return err
}
}
// If we're doing anything other than destroying, then
// run the execution now.
if action != "destroy" {
c.ui.Header("Building main infrastructure...")
if err := infra.Execute(infraCtx); err != nil {
return err
}
}
// If we have any foundations, we now run their infra deployment.
// This should only ever execute if action is to deploy or destroy,
// since those are the only cases that we load foundations.
for i, f := range foundations {
ctx := foundationCtxs[i]
ctx.Action = action
ctx.ActionArgs = args
ctx.InfraCreds = infraCtx.InfraCreds
log.Printf(
"[INFO] infra action '%s' on foundation '%s'",
action, ctx.Tuple.Type)
switch action {
case "":
c.ui.Header(fmt.Sprintf(
"Building infrastructure for foundation: %s",
ctx.Tuple.Type))
case "destroy":
c.ui.Header(fmt.Sprintf(
"Destroying infrastructure for foundation: %s",
ctx.Tuple.Type))
}
if err := f.Infra(ctx); err != nil {
return err
}
}
// If the action is destroy, we run the infrastructure execution
// here. We mirror creation above since in the destruction case
// we need to first destroy all applications and foundations that
// are using this infra.
if action == "destroy" {
c.ui.Header("Destroying main infrastructure...")
if err := infra.Execute(infraCtx); err != nil {
return err
}
}
// Output the right thing
switch action {
case "":
infraCtx.Ui.Header("[green]Infrastructure successfully created!")
infraCtx.Ui.Message(
"[green]The infrastructure necessary to deploy this application\n" +
"is now available. You can now deploy using `otto deploy`.")
case "destroy":
infraCtx.Ui.Header("[green]Infrastructure successfully destroyed!")
infraCtx.Ui.Message(
"[green]The infrastructure necessary to run this application and\n" +
"all other applications in this project has been destroyed.")
}
return nil
}
// Status outputs to the UI the status of all the stages of this application.
func (c *Core) Status() error {
// Start loading the status info in a goroutine
statusCh := make(chan *statusInfo, 1)
go c.statusInfo(statusCh)
// Wait for the status. If this takes longer than a certain amount
// of time then we show a loading message.
var status *statusInfo
select {
case status = <-statusCh:
case <-time.After(150 * time.Millisecond):
c.ui.Header("Loading status...")
c.ui.Message(fmt.Sprintf(
"Depending on your configured directory backend, this may require\n" +
"network operations and can take some time. On a typical broadband\n" +
"connection, this shouldn't take more than a few seconds."))
}
if status == nil {
status = <-statusCh
}
// Create the status texts
devStatus := "[reset]NOT CREATED"
if status.Dev.IsReady() {
devStatus = "[green]CREATED"
}
buildStatus := "[reset]NOT BUILT"
if status.Build != nil {
buildStatus = "[green]BUILD READY"
}
deployStatus := "[reset]NOT DEPLOYED"
if status.Deploy.IsDeployed() {
deployStatus = "[green]DEPLOYED"
} else if status.Deploy.IsFailed() {
deployStatus = "[reset]DEPLOY FAILED"
}
infraStatus := "[reset]NOT CREATED"
if status.Infra.IsReady() {
infraStatus = "[green]READY"
} else if status.Infra.IsPartial() {
infraStatus = "[yellow]PARTIAL"
}
c.ui.Header("Status results...")
c.ui.Message(fmt.Sprintf("Dev environment: %s", devStatus))
c.ui.Message(fmt.Sprintf("Build: %s", buildStatus))
c.ui.Message(fmt.Sprintf("Deploy: %s", deployStatus))
c.ui.Message(fmt.Sprintf("Infra: %s", infraStatus))
return nil
}
// Execute executes the given task for this Appfile.
func (c *Core) Execute(opts *ExecuteOpts) error {
switch opts.Task {
case ExecuteTaskDev:
return c.executeApp(opts)
default:
return fmt.Errorf("unknown task: %s", opts.Task)
}
}
// creds reads the credentials if we have them, or queries the user
// for infrastructure credentials using the infrastructure if we
// don't have them.
func (c *Core) creds(
infra infrastructure.Infrastructure,
infraCtx *infrastructure.Context) error {
// Output to the user some information about what is about to
// happen here...
infraCtx.Ui.Header("Detecting infrastructure credentials...")
// The path to where we put the encrypted creds
path := filepath.Join(c.localDir, "creds")
// Determine whether we believe the creds exist already or not
var exists bool
if _, err := os.Stat(path); err == nil {
exists = true
} else {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
}
var creds map[string]string
if exists {
infraCtx.Ui.Message(
"Cached and encrypted infrastructure credentials found.\n" +
"Otto will now ask you for the password to decrypt these\n" +
"credentials.\n\n")
// If they exist, ask for the password
value, err := infraCtx.Ui.Input(&ui.InputOpts{
Id: "creds_password",
Query: "Encrypted Credentials Password",
Description: strings.TrimSpace(credsQueryPassExists),
Hide: true,
EnvVars: []string{"OTTO_CREDS_PASSWORD"},
})
if err != nil {
return err
}
// If the password is not blank, then just read the credentials
if value != "" {
plaintext, err := cryptRead(path, value)
if err == nil {
err = json.Unmarshal(plaintext, &creds)
}
if err != nil {
return fmt.Errorf(
"error reading encrypted credentials: %s\n\n"+
"If this error persists, you can force Otto to ask for credentials\n"+
"again by inputting the empty password as the password.",
err)
}
}
}
// If we don't have creds, then we need to query the user via
// the infrastructure implementation.
if creds == nil {
infraCtx.Ui.Message(
"Existing infrastructure credentials were not found! Otto will\n" +
"now ask you for infrastructure credentials. These will be encrypted\n" +
"and saved on disk so this doesn't need to be repeated.\n\n" +
"IMPORTANT: If you're re-entering new credentials, make sure the\n" +
"credentials are for the same account, otherwise you may lose\n" +
"access to your existing infrastructure Otto set up.\n\n")
var err error
creds, err = infra.Creds(infraCtx)
if err != nil {
return err
}
// Now that we have the credentials, we need to ask for the
// password to encrypt and store them.
var password string
for password == "" {
password, err = infraCtx.Ui.Input(&ui.InputOpts{
Id: "creds_password",
Query: "Password for Encrypting Credentials",
Description: strings.TrimSpace(credsQueryPassNew),
Hide: true,
EnvVars: []string{"OTTO_CREDS_PASSWORD"},
})
if err != nil {
return err
}
}
// With the password, encrypt and write the data
plaintext, err := json.Marshal(creds)
if err != nil {
// creds is a map[string]string, so this shouldn't ever fail
panic(err)
}
if err := cryptWrite(path, password, plaintext); err != nil {
return fmt.Errorf(
"error writing encrypted credentials: %s", err)
}
}
// Set the credentials
infraCtx.InfraCreds = creds
return nil
}
func (c *Core) executeApp(opts *ExecuteOpts) error {
// Get the infra implementation for this
appCtx, err := c.appContext(c.appfile)
if err != nil {
return err
}
app, err := c.app(appCtx)
if err != nil {
return err
}
// Set the action and action args
appCtx.Action = opts.Action
appCtx.ActionArgs = opts.Args
// Build the infrastructure compilation context
switch opts.Task {
case ExecuteTaskDev:
return app.Dev(appCtx)
default:
panic(fmt.Sprintf("uknown task: %s", opts.Task))
}
}
func (c *Core) appContext(f *appfile.File) (*app.Context, error) {
// We need the configuration for the active infrastructure
// so that we can build the tuple below
config := f.ActiveInfrastructure()
if config == nil {
return nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
f.Project.Infrastructure)
}
// The tuple we're looking for is the application type, the
// infrastructure type, and the infrastructure flavor. Build that
// tuple.
tuple := app.Tuple{
App: f.Application.Type,
Infra: f.Project.Infrastructure,
InfraFlavor: config.Flavor,
}
// The output directory for data. This is either the main app so
// it goes directly into "app" or it is a dependency and goes into
// a dep folder.
outputDir := filepath.Join(c.compileDir, "app")
if id := f.ID; id != c.appfile.ID {
outputDir = filepath.Join(
c.compileDir, fmt.Sprintf("dep-%s", id))
}
// The cache directory for this app
cacheDir := filepath.Join(c.dataDir, "cache", f.ID)
if err := os.MkdirAll(cacheDir, 0755); err != nil {
return nil, fmt.Errorf(
"error making cache directory '%s': %s",
cacheDir, err)
}
return &app.Context{
Dir: outputDir,
CacheDir: cacheDir,
LocalDir: c.localDir,
Tuple: tuple,
Appfile: f,
Application: f.Application,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}, nil
}
func (c *Core) app(ctx *app.Context) (app.App, error) {
log.Printf("[INFO] Loading app implementation for Tuple: %s", ctx.Tuple)
// Look for the app impl. factory
f := app.TupleMap(c.apps).Lookup(ctx.Tuple)
if f == nil {
return nil, fmt.Errorf(
"app implementation for tuple not found: %s", ctx.Tuple)
}
// Start the impl.
result, err := f()
if err != nil {
return nil, fmt.Errorf(
"app failed to start properly: %s", err)
}
return result, nil
}
func (c *Core) infra() (infrastructure.Infrastructure, *infrastructure.Context, error) {
// Get the infrastructure factory
f, ok := c.infras[c.appfile.Project.Infrastructure]
if !ok {
return nil, nil, fmt.Errorf(
"infrastructure type not supported: %s",
c.appfile.Project.Infrastructure)
}
// Get the infrastructure configuration
config := c.appfile.ActiveInfrastructure()
if config == nil {
return nil, nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
c.appfile.Project.Infrastructure)
}
// Start the infrastructure implementation
infra, err := f()
if err != nil {
return nil, nil, err
}
// The output directory for data
outputDir := filepath.Join(
c.compileDir, fmt.Sprintf("infra-%s", c.appfile.Project.Infrastructure))
// Build the context
return infra, &infrastructure.Context{
Dir: outputDir,
Infra: config,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}, nil
}
func (c *Core) foundations() ([]foundation.Foundation, []*foundation.Context, error) {
// Get the infrastructure configuration
config := c.appfile.ActiveInfrastructure()
if config == nil {
return nil, nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
c.appfile.Project.Infrastructure)
}
// If there are no foundations, return nothing.
if len(config.Foundations) == 0 {
return nil, nil, nil
}
// Create the arrays for our list
fs := make([]foundation.Foundation, 0, len(config.Foundations))
ctxs := make([]*foundation.Context, 0, cap(fs))
for _, f := range config.Foundations {
// The tuple we're looking for is the foundation type, the
// infrastructure type, and the infrastructure flavor. Build that
// tuple.
tuple := foundation.Tuple{
Type: f.Name,
Infra: config.Type,
InfraFlavor: config.Flavor,
}
// Look for the matching foundation
fun := foundation.TupleMap(c.foundationMap).Lookup(tuple)
if fun == nil {
return nil, nil, fmt.Errorf(
"foundation implementation for tuple not found: %s",
tuple)
}
// Instantiate the implementation
impl, err := fun()
if err != nil {
return nil, nil, err
}
// The output directory for data
outputDir := filepath.Join(
c.compileDir, fmt.Sprintf("foundation-%s", f.Name))
// Build the context
ctx := &foundation.Context{
Config: f.Config,
Dir: outputDir,
Tuple: tuple,
Appfile: c.appfile,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}
// Add to our results
fs = append(fs, impl)
ctxs = append(ctxs, ctx)
}
return fs, ctxs, nil
}
const credsQueryPassExists = `
Infrastructure credentials are required for this operation. Otto found
saved credentials that are password protected. Please enter the password
to decrypt these credentials. You may also just hit <enter> and leave
the password blank to force Otto to ask for the credentials again.
`
const credsQueryPassNew = `
This password will be used to encrypt and save the credentials so they
don't need to be repeated multiple times.
`
otto: fix proper infra references
package otto
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/hashicorp/otto/app"
"github.com/hashicorp/otto/appfile"
"github.com/hashicorp/otto/context"
"github.com/hashicorp/otto/directory"
"github.com/hashicorp/otto/foundation"
"github.com/hashicorp/otto/infrastructure"
"github.com/hashicorp/otto/ui"
"github.com/hashicorp/terraform/dag"
)
// Core is the main struct to use to interact with Otto as a library.
type Core struct {
appfile *appfile.File
appfileCompiled *appfile.Compiled
apps map[app.Tuple]app.Factory
dir directory.Backend
infras map[string]infrastructure.Factory
foundationMap map[foundation.Tuple]foundation.Factory
dataDir string
localDir string
compileDir string
ui ui.Ui
}
// CoreConfig is configuration for creating a new core with NewCore.
type CoreConfig struct {
// DataDir is the directory where local data will be stored that
// is global to all Otto processes.
//
// LocalDir is the directory where data local to this single Appfile
// will be stored. This isn't necessarilly cleared for compilation.
//
// CompiledDir is the directory where compiled data will be written.
// Each compilation will clear this directory.
DataDir string
LocalDir string
CompileDir string
// Appfile is the appfile that this core will be using for configuration.
// This must be a compiled Appfile.
Appfile *appfile.Compiled
// Directory is the directory where data is stored about this Appfile.
Directory directory.Backend
// Apps is the map of available app implementations.
Apps map[app.Tuple]app.Factory
// Infrastructures is the map of available infrastructures. The
// value is a factory that can create the infrastructure impl.
Infrastructures map[string]infrastructure.Factory
// Foundations is the map of available foundations. The
// value is a factory that can create the impl.
Foundations map[foundation.Tuple]foundation.Factory
// Ui is the Ui that will be used to communicate with the user.
Ui ui.Ui
}
// NewCore creates a new core.
//
// Once this function is called, this CoreConfig should not be used again
// or modified, since the Core may use parts of it without deep copying.
func NewCore(c *CoreConfig) (*Core, error) {
return &Core{
appfile: c.Appfile.File,
appfileCompiled: c.Appfile,
apps: c.Apps,
dir: c.Directory,
infras: c.Infrastructures,
foundationMap: c.Foundations,
dataDir: c.DataDir,
localDir: c.LocalDir,
compileDir: c.CompileDir,
ui: c.Ui,
}, nil
}
// Compile takes the Appfile and compiles all the resulting data.
func (c *Core) Compile() error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
// Get all the foundation implementations (which are tied as singletons
// to the infrastructure).
foundations, foundationCtxs, err := c.foundations()
if err != nil {
return err
}
// Delete the prior output directory
log.Printf("[INFO] deleting prior compilation contents: %s", c.compileDir)
if err := os.RemoveAll(c.compileDir); err != nil {
return err
}
// Compile the infrastructure for our application
log.Printf("[INFO] running infra compile...")
c.ui.Message("Compiling infra...")
if _, err := infra.Compile(infraCtx); err != nil {
return err
}
// Compile the foundation (not tied to any app). This compilation
// of the foundation is used for `otto infra` to set everything up.
log.Printf("[INFO] running foundation compilations")
for i, f := range foundations {
ctx := foundationCtxs[i]
c.ui.Message(fmt.Sprintf(
"Compiling foundation: %s", ctx.Tuple.Type))
if _, err := f.Compile(ctx); err != nil {
return err
}
}
// Walk through the dependencies and compile all of them.
// We have to compile every dependency for dev building.
var resultLock sync.Mutex
results := make([]*app.CompileResult, 0, len(c.appfileCompiled.Graph.Vertices()))
err = c.walk(func(app app.App, ctx *app.Context, root bool) error {
if !root {
c.ui.Header(fmt.Sprintf(
"Compiling dependency '%s'...",
ctx.Appfile.Application.Name))
} else {
c.ui.Header(fmt.Sprintf(
"Compiling main application..."))
}
// If this is the root, we set the dev dep fragments.
if root {
// We grab the lock just in case although if we're the
// root this should be serialized.
resultLock.Lock()
ctx.DevDepFragments = make([]string, 0, len(results))
for _, result := range results {
if result.DevDepFragmentPath != "" {
ctx.DevDepFragments = append(
ctx.DevDepFragments, result.DevDepFragmentPath)
}
}
resultLock.Unlock()
}
// Build the contexts for the foundations. We use this
// to also compile the list of foundation dirs.
ctx.FoundationDirs = make([]string, len(foundations))
for i, _ := range foundations {
fCtx := foundationCtxs[i]
fCtx.Dir = filepath.Join(ctx.Dir, fmt.Sprintf("foundation-%s", fCtx.Tuple.Type))
ctx.FoundationDirs[i] = fCtx.Dir
}
// Compile!
result, err := app.Compile(ctx)
if err != nil {
return err
}
// Compile the foundations for this app
subdirs := []string{"app-dev", "app-dev-dep", "app-deploy"}
for i, f := range foundations {
fCtx := foundationCtxs[i]
if result != nil {
fCtx.AppConfig = &result.FoundationConfig
}
if _, err := f.Compile(fCtx); err != nil {
return err
}
// Make sure the subdirs exist
for _, dir := range subdirs {
if err := os.MkdirAll(filepath.Join(fCtx.Dir, dir), 0755); err != nil {
return err
}
}
}
// Store the compilation result for later
resultLock.Lock()
defer resultLock.Unlock()
results = append(results, result)
return nil
})
return err
}
func (c *Core) walk(f func(app.App, *app.Context, bool) error) error {
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return fmt.Errorf(
"Error loading app: %s", err)
}
// Walk the appfile graph.
var stop int32 = 0
return c.appfileCompiled.Graph.Walk(func(raw dag.Vertex) (err error) {
// If we're told to stop (something else had an error), then stop early.
// Graphs walks by default will complete all disjoint parts of the
// graph before failing, but Otto doesn't have to do that.
if atomic.LoadInt32(&stop) != 0 {
return nil
}
// If we exit with an error, then mark the stop atomic
defer func() {
if err != nil {
atomic.StoreInt32(&stop, 1)
}
}()
// Convert to the rich vertex type so that we can access data
v := raw.(*appfile.CompiledGraphVertex)
// Get the context and app for this appfile
appCtx, err := c.appContext(v.File)
if err != nil {
return fmt.Errorf(
"Error loading Appfile for '%s': %s",
dag.VertexName(raw), err)
}
app, err := c.app(appCtx)
if err != nil {
return fmt.Errorf(
"Error loading App implementation for '%s': %s",
dag.VertexName(raw), err)
}
// Call our callback
return f(app, appCtx, raw == root)
})
}
// Build builds the deployable artifact for the currently compiled
// Appfile.
func (c *Core) Build() error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
if err := c.creds(infra, infraCtx); err != nil {
return err
}
// We only use the root application for this task, upstream dependencies
// don't have an effect on the build process.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Just update our shared data so we get the creds
rootCtx.Shared = infraCtx.Shared
return rootApp.Build(rootCtx)
}
// Deploy deploys the application.
//
// Deploy supports subactions, which can be specified with action and args.
// Action can be "" to get the default deploy behavior.
func (c *Core) Deploy(action string, args []string) error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
// Special case: don't try to fetch creds during `help` or `info`
if action != "help" && action != "info" {
if err := c.creds(infra, infraCtx); err != nil {
return err
}
}
// TODO: Verify that upstream dependencies are deployed
// We only use the root application for this task, upstream dependencies
// don't have an effect on the build process.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Update our shared data so we get the creds
rootCtx.Shared = infraCtx.Shared
// Pass through the requested action
rootCtx.Action = action
rootCtx.ActionArgs = args
return rootApp.Deploy(rootCtx)
}
// Dev starts a dev environment for the current application. For destroying
// and other tasks against the dev environment, use the generic `Execute`
// method.
func (c *Core) Dev() error {
// We need to get the root data separately since we need that for
// all the function calls into the dependencies.
root, err := c.appfileCompiled.Graph.Root()
if err != nil {
return err
}
rootCtx, err := c.appContext(root.(*appfile.CompiledGraphVertex).File)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
rootApp, err := c.app(rootCtx)
if err != nil {
return fmt.Errorf(
"Error loading App: %s", err)
}
// Go through all the dependencies and build their immutable
// dev environment pieces for the final configuration.
err = c.walk(func(appImpl app.App, ctx *app.Context, root bool) error {
// If it is the root, we just return and do nothing else since
// the root is a special case where we're building the actual
// dev environment.
if root {
return nil
}
// Get the path to where we'd cache the dependency if we have
// cached it...
cachePath := filepath.Join(ctx.CacheDir, "dev-dep.json")
// Check if we've cached this. If so, then use the cache.
if _, err := app.ReadDevDep(cachePath); err == nil {
ctx.Ui.Header(fmt.Sprintf(
"Using cached dev dependency for '%s'",
ctx.Appfile.Application.Name))
return nil
}
// Build the development dependency
dep, err := appImpl.DevDep(rootCtx, ctx)
if err != nil {
return fmt.Errorf(
"Error building dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
// If we have a dependency with files, then verify the files
// and store it in our cache directory so we can retrieve it
// later.
if dep != nil && len(dep.Files) > 0 {
if err := dep.RelFiles(ctx.CacheDir); err != nil {
return fmt.Errorf(
"Error caching dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
if err := app.WriteDevDep(cachePath, dep); err != nil {
return fmt.Errorf(
"Error caching dependency for dev '%s': %s",
ctx.Appfile.Application.Name,
err)
}
}
return nil
})
if err != nil {
return err
}
// All the development dependencies are built/loaded. We now have
// everything we need to build the complete development environment.
return rootApp.Dev(rootCtx)
}
// Infra manages the infrastructure for this Appfile.
//
// Infra supports subactions, which can be specified with action and args.
// Infra recognizes two special actions: "" (blank string) and "destroy".
// The former expects to create or update the complete infrastructure,
// and the latter will destroy the infrastructure.
func (c *Core) Infra(action string, args []string) error {
// Get the infra implementation for this
infra, infraCtx, err := c.infra()
if err != nil {
return err
}
if err := c.creds(infra, infraCtx); err != nil {
return err
}
// Set the action and action args
infraCtx.Action = action
infraCtx.ActionArgs = args
// If we need the foundations, then get them
var foundations []foundation.Foundation
var foundationCtxs []*foundation.Context
if action == "" || action == "destroy" {
foundations, foundationCtxs, err = c.foundations()
if err != nil {
return err
}
}
// If we're doing anything other than destroying, then
// run the execution now.
if action != "destroy" {
c.ui.Header("Building main infrastructure...")
if err := infra.Execute(infraCtx); err != nil {
return err
}
}
// If we have any foundations, we now run their infra deployment.
// This should only ever execute if action is to deploy or destroy,
// since those are the only cases that we load foundations.
for i, f := range foundations {
ctx := foundationCtxs[i]
ctx.Action = action
ctx.ActionArgs = args
ctx.InfraCreds = infraCtx.InfraCreds
log.Printf(
"[INFO] infra action '%s' on foundation '%s'",
action, ctx.Tuple.Type)
switch action {
case "":
c.ui.Header(fmt.Sprintf(
"Building infrastructure for foundation: %s",
ctx.Tuple.Type))
case "destroy":
c.ui.Header(fmt.Sprintf(
"Destroying infrastructure for foundation: %s",
ctx.Tuple.Type))
}
if err := f.Infra(ctx); err != nil {
return err
}
}
// If the action is destroy, we run the infrastructure execution
// here. We mirror creation above since in the destruction case
// we need to first destroy all applications and foundations that
// are using this infra.
if action == "destroy" {
c.ui.Header("Destroying main infrastructure...")
if err := infra.Execute(infraCtx); err != nil {
return err
}
}
// Output the right thing
switch action {
case "":
infraCtx.Ui.Header("[green]Infrastructure successfully created!")
infraCtx.Ui.Message(
"[green]The infrastructure necessary to deploy this application\n" +
"is now available. You can now deploy using `otto deploy`.")
case "destroy":
infraCtx.Ui.Header("[green]Infrastructure successfully destroyed!")
infraCtx.Ui.Message(
"[green]The infrastructure necessary to run this application and\n" +
"all other applications in this project has been destroyed.")
}
return nil
}
// Status outputs to the UI the status of all the stages of this application.
func (c *Core) Status() error {
// Start loading the status info in a goroutine
statusCh := make(chan *statusInfo, 1)
go c.statusInfo(statusCh)
// Wait for the status. If this takes longer than a certain amount
// of time then we show a loading message.
var status *statusInfo
select {
case status = <-statusCh:
case <-time.After(150 * time.Millisecond):
c.ui.Header("Loading status...")
c.ui.Message(fmt.Sprintf(
"Depending on your configured directory backend, this may require\n" +
"network operations and can take some time. On a typical broadband\n" +
"connection, this shouldn't take more than a few seconds."))
}
if status == nil {
status = <-statusCh
}
// Create the status texts
devStatus := "[reset]NOT CREATED"
if status.Dev.IsReady() {
devStatus = "[green]CREATED"
}
buildStatus := "[reset]NOT BUILT"
if status.Build != nil {
buildStatus = "[green]BUILD READY"
}
deployStatus := "[reset]NOT DEPLOYED"
if status.Deploy.IsDeployed() {
deployStatus = "[green]DEPLOYED"
} else if status.Deploy.IsFailed() {
deployStatus = "[reset]DEPLOY FAILED"
}
infraStatus := "[reset]NOT CREATED"
if status.Infra.IsReady() {
infraStatus = "[green]READY"
} else if status.Infra.IsPartial() {
infraStatus = "[yellow]PARTIAL"
}
c.ui.Header("Status results...")
c.ui.Message(fmt.Sprintf("Dev environment: %s", devStatus))
c.ui.Message(fmt.Sprintf("Build: %s", buildStatus))
c.ui.Message(fmt.Sprintf("Deploy: %s", deployStatus))
c.ui.Message(fmt.Sprintf("Infra: %s", infraStatus))
return nil
}
// Execute executes the given task for this Appfile.
func (c *Core) Execute(opts *ExecuteOpts) error {
switch opts.Task {
case ExecuteTaskDev:
return c.executeApp(opts)
default:
return fmt.Errorf("unknown task: %s", opts.Task)
}
}
// creds reads the credentials if we have them, or queries the user
// for infrastructure credentials using the infrastructure if we
// don't have them.
func (c *Core) creds(
infra infrastructure.Infrastructure,
infraCtx *infrastructure.Context) error {
// Output to the user some information about what is about to
// happen here...
infraCtx.Ui.Header("Detecting infrastructure credentials...")
// The path to where we put the encrypted creds
path := filepath.Join(c.localDir, "creds")
// Determine whether we believe the creds exist already or not
var exists bool
if _, err := os.Stat(path); err == nil {
exists = true
} else {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
}
var creds map[string]string
if exists {
infraCtx.Ui.Message(
"Cached and encrypted infrastructure credentials found.\n" +
"Otto will now ask you for the password to decrypt these\n" +
"credentials.\n\n")
// If they exist, ask for the password
value, err := infraCtx.Ui.Input(&ui.InputOpts{
Id: "creds_password",
Query: "Encrypted Credentials Password",
Description: strings.TrimSpace(credsQueryPassExists),
Hide: true,
EnvVars: []string{"OTTO_CREDS_PASSWORD"},
})
if err != nil {
return err
}
// If the password is not blank, then just read the credentials
if value != "" {
plaintext, err := cryptRead(path, value)
if err == nil {
err = json.Unmarshal(plaintext, &creds)
}
if err != nil {
return fmt.Errorf(
"error reading encrypted credentials: %s\n\n"+
"If this error persists, you can force Otto to ask for credentials\n"+
"again by inputting the empty password as the password.",
err)
}
}
}
// If we don't have creds, then we need to query the user via
// the infrastructure implementation.
if creds == nil {
infraCtx.Ui.Message(
"Existing infrastructure credentials were not found! Otto will\n" +
"now ask you for infrastructure credentials. These will be encrypted\n" +
"and saved on disk so this doesn't need to be repeated.\n\n" +
"IMPORTANT: If you're re-entering new credentials, make sure the\n" +
"credentials are for the same account, otherwise you may lose\n" +
"access to your existing infrastructure Otto set up.\n\n")
var err error
creds, err = infra.Creds(infraCtx)
if err != nil {
return err
}
// Now that we have the credentials, we need to ask for the
// password to encrypt and store them.
var password string
for password == "" {
password, err = infraCtx.Ui.Input(&ui.InputOpts{
Id: "creds_password",
Query: "Password for Encrypting Credentials",
Description: strings.TrimSpace(credsQueryPassNew),
Hide: true,
EnvVars: []string{"OTTO_CREDS_PASSWORD"},
})
if err != nil {
return err
}
}
// With the password, encrypt and write the data
plaintext, err := json.Marshal(creds)
if err != nil {
// creds is a map[string]string, so this shouldn't ever fail
panic(err)
}
if err := cryptWrite(path, password, plaintext); err != nil {
return fmt.Errorf(
"error writing encrypted credentials: %s", err)
}
}
// Set the credentials
infraCtx.InfraCreds = creds
return nil
}
func (c *Core) executeApp(opts *ExecuteOpts) error {
// Get the infra implementation for this
appCtx, err := c.appContext(c.appfile)
if err != nil {
return err
}
app, err := c.app(appCtx)
if err != nil {
return err
}
// Set the action and action args
appCtx.Action = opts.Action
appCtx.ActionArgs = opts.Args
// Build the infrastructure compilation context
switch opts.Task {
case ExecuteTaskDev:
return app.Dev(appCtx)
default:
panic(fmt.Sprintf("uknown task: %s", opts.Task))
}
}
func (c *Core) appContext(f *appfile.File) (*app.Context, error) {
// We need the configuration for the active infrastructure
// so that we can build the tuple below
config := f.ActiveInfrastructure()
if config == nil {
return nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
f.Project.Infrastructure)
}
// The tuple we're looking for is the application type, the
// infrastructure type, and the infrastructure flavor. Build that
// tuple.
tuple := app.Tuple{
App: f.Application.Type,
Infra: config.Type,
InfraFlavor: config.Flavor,
}
// The output directory for data. This is either the main app so
// it goes directly into "app" or it is a dependency and goes into
// a dep folder.
outputDir := filepath.Join(c.compileDir, "app")
if id := f.ID; id != c.appfile.ID {
outputDir = filepath.Join(
c.compileDir, fmt.Sprintf("dep-%s", id))
}
// The cache directory for this app
cacheDir := filepath.Join(c.dataDir, "cache", f.ID)
if err := os.MkdirAll(cacheDir, 0755); err != nil {
return nil, fmt.Errorf(
"error making cache directory '%s': %s",
cacheDir, err)
}
return &app.Context{
Dir: outputDir,
CacheDir: cacheDir,
LocalDir: c.localDir,
Tuple: tuple,
Appfile: f,
Application: f.Application,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}, nil
}
func (c *Core) app(ctx *app.Context) (app.App, error) {
log.Printf("[INFO] Loading app implementation for Tuple: %s", ctx.Tuple)
// Look for the app impl. factory
f := app.TupleMap(c.apps).Lookup(ctx.Tuple)
if f == nil {
return nil, fmt.Errorf(
"app implementation for tuple not found: %s", ctx.Tuple)
}
// Start the impl.
result, err := f()
if err != nil {
return nil, fmt.Errorf(
"app failed to start properly: %s", err)
}
return result, nil
}
func (c *Core) infra() (infrastructure.Infrastructure, *infrastructure.Context, error) {
// Get the infrastructure configuration
config := c.appfile.ActiveInfrastructure()
if config == nil {
return nil, nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
c.appfile.Project.Infrastructure)
}
// Get the infrastructure factory
f, ok := c.infras[config.Type]
if !ok {
return nil, nil, fmt.Errorf(
"infrastructure type not supported: %s",
c.appfile.Project.Infrastructure)
}
// Start the infrastructure implementation
infra, err := f()
if err != nil {
return nil, nil, err
}
// The output directory for data
outputDir := filepath.Join(
c.compileDir, fmt.Sprintf("infra-%s", c.appfile.Project.Infrastructure))
// Build the context
return infra, &infrastructure.Context{
Dir: outputDir,
Infra: config,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}, nil
}
func (c *Core) foundations() ([]foundation.Foundation, []*foundation.Context, error) {
// Get the infrastructure configuration
config := c.appfile.ActiveInfrastructure()
if config == nil {
return nil, nil, fmt.Errorf(
"infrastructure not found in appfile: %s",
c.appfile.Project.Infrastructure)
}
// If there are no foundations, return nothing.
if len(config.Foundations) == 0 {
return nil, nil, nil
}
// Create the arrays for our list
fs := make([]foundation.Foundation, 0, len(config.Foundations))
ctxs := make([]*foundation.Context, 0, cap(fs))
for _, f := range config.Foundations {
// The tuple we're looking for is the foundation type, the
// infrastructure type, and the infrastructure flavor. Build that
// tuple.
tuple := foundation.Tuple{
Type: f.Name,
Infra: config.Type,
InfraFlavor: config.Flavor,
}
// Look for the matching foundation
fun := foundation.TupleMap(c.foundationMap).Lookup(tuple)
if fun == nil {
return nil, nil, fmt.Errorf(
"foundation implementation for tuple not found: %s",
tuple)
}
// Instantiate the implementation
impl, err := fun()
if err != nil {
return nil, nil, err
}
// The output directory for data
outputDir := filepath.Join(
c.compileDir, fmt.Sprintf("foundation-%s", f.Name))
// Build the context
ctx := &foundation.Context{
Config: f.Config,
Dir: outputDir,
Tuple: tuple,
Appfile: c.appfile,
Shared: context.Shared{
InstallDir: filepath.Join(c.dataDir, "binaries"),
Directory: c.dir,
Ui: c.ui,
},
}
// Add to our results
fs = append(fs, impl)
ctxs = append(ctxs, ctx)
}
return fs, ctxs, nil
}
const credsQueryPassExists = `
Infrastructure credentials are required for this operation. Otto found
saved credentials that are password protected. Please enter the password
to decrypt these credentials. You may also just hit <enter> and leave
the password blank to force Otto to ask for the credentials again.
`
const credsQueryPassNew = `
This password will be used to encrypt and save the credentials so they
don't need to be repeated multiple times.
`
|
package rbac
import (
"context"
"strings"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
kuser "k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
rbacvalidation "k8s.io/component-helpers/auth/rbac/validation"
kauthenticationapi "k8s.io/kubernetes/pkg/apis/authentication"
kauthorizationapi "k8s.io/kubernetes/pkg/apis/authorization"
"k8s.io/kubernetes/pkg/apis/rbac"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
"k8s.io/kubernetes/pkg/apis/storage"
"k8s.io/kubernetes/pkg/registry/rbac/validation"
e2e "k8s.io/kubernetes/test/e2e/framework"
"github.com/openshift/api/authorization"
"github.com/openshift/api/build"
"github.com/openshift/api/console"
"github.com/openshift/api/image"
"github.com/openshift/api/oauth"
"github.com/openshift/api/project"
"github.com/openshift/api/template"
"github.com/openshift/api/user"
exutil "github.com/openshift/origin/test/extended/util"
)
// copied from bootstrap policy
var read = []string{"get", "list", "watch"}
// copied from bootstrap policy
const (
rbacGroup = rbac.GroupName
storageGroup = storage.GroupName
kAuthzGroup = kauthorizationapi.GroupName
kAuthnGroup = kauthenticationapi.GroupName
authzGroup = authorization.GroupName
buildGroup = build.GroupName
imageGroup = image.GroupName
oauthGroup = oauth.GroupName
projectGroup = project.GroupName
templateGroup = template.GroupName
userGroup = user.GroupName
consoleGroup = console.GroupName
legacyGroup = ""
legacyAuthzGroup = ""
legacyBuildGroup = ""
legacyImageGroup = ""
legacyProjectGroup = ""
legacyTemplateGroup = ""
legacyUserGroup = ""
legacyOauthGroup = ""
// Provided as CRD via cluster-csi-snapshot-controller-operator
snapshotGroup = "snapshot.storage.k8s.io"
)
// Do not change any of these lists without approval from the auth and master teams
// Most rules are copied from various cluster roles in bootstrap policy
var (
allUnauthenticatedRules = []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "create").Groups(buildGroup, legacyBuildGroup).Resources("buildconfigs/webhooks").RuleOrDie(),
rbacv1helpers.NewRule("impersonate").Groups(kAuthnGroup).Resources("userextras/scopes.authorization.openshift.io").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(authzGroup, legacyAuthzGroup).Resources("selfsubjectrulesreviews").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(kAuthzGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(),
rbacv1helpers.NewRule("delete").Groups(oauthGroup, legacyOauthGroup).Resources("oauthaccesstokens", "oauthauthorizetokens").RuleOrDie(),
// this is openshift specific
rbacv1helpers.NewRule("get").URLs(
"/version/openshift",
"/.well-known",
"/.well-known/*",
"/.well-known/oauth-authorization-server",
).RuleOrDie(),
// TODO: remove with after 1.15 rebase
rbacv1helpers.NewRule("get").URLs(
"/readyz",
).RuleOrDie(),
// this is from upstream kube
rbacv1helpers.NewRule("get").URLs(
"/healthz", "/livez",
"/version",
"/version/",
).RuleOrDie(),
}
allAuthenticatedRules = append(
[]rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").Groups(buildGroup, legacyBuildGroup).Resources("builds/docker", "builds/optimizeddocker").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(buildGroup, legacyBuildGroup).Resources("builds/jenkinspipeline").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(buildGroup, legacyBuildGroup).Resources("builds/source").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(userGroup, legacyUserGroup).Resources("users").Names("~").RuleOrDie(),
rbacv1helpers.NewRule("list").Groups(projectGroup, legacyProjectGroup).Resources("projectrequests").RuleOrDie(),
rbacv1helpers.NewRule("get", "list").Groups(authzGroup, legacyAuthzGroup).Resources("clusterroles").RuleOrDie(),
rbacv1helpers.NewRule(read...).Groups(rbacGroup).Resources("clusterroles").RuleOrDie(),
rbacv1helpers.NewRule("get", "list").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(snapshotGroup).Resources("volumesnapshotclasses").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch").Groups(projectGroup, legacyProjectGroup).Resources("projects").RuleOrDie(),
// These custom resources are used to extend console functionality
// The console team is working on eliminating this exception in the near future
rbacv1helpers.NewRule(read...).Groups(consoleGroup).Resources("consoleclidownloads", "consolelinks", "consoleexternalloglinks", "consolenotifications", "consoleyamlsamples", "consolequickstarts", "consoleplugins").RuleOrDie(),
// HelmChartRepository instances keep Helm chart repository configuration
// By default users are able to browse charts from all configured repositories through console UI
rbacv1helpers.NewRule("get", "list").Groups("helm.openshift.io").Resources("helmchartrepositories").RuleOrDie(),
// TODO: remove when openshift-apiserver has removed these
rbacv1helpers.NewRule("get").URLs(
"/healthz/",
"/oapi", "/oapi/*",
"/osapi", "/osapi/",
"/swaggerapi", "/swaggerapi/*", "/swagger.json", "/swagger-2.0.0.pb-v1",
"/version/*",
"/",
).RuleOrDie(),
// this is from upstream kube
rbacv1helpers.NewRule("get").URLs(
"/",
"/openapi", "/openapi/*",
"/api", "/api/*",
"/apis", "/apis/*",
).RuleOrDie(),
},
allUnauthenticatedRules...,
)
// group -> namespace -> rules
groupNamespaceRules = map[string]map[string][]rbacv1.PolicyRule{
kuser.AllAuthenticated: {
"openshift": {
rbacv1helpers.NewRule(read...).Groups(templateGroup, legacyTemplateGroup).Resources("templates").RuleOrDie(),
rbacv1helpers.NewRule(read...).Groups(imageGroup, legacyImageGroup).Resources("imagestreams", "imagestreamtags", "imagestreamimages", "imagetags").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(imageGroup, legacyImageGroup).Resources("imagestreams/layers").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups("").Resources("configmaps").RuleOrDie(),
},
"openshift-config-managed": {
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("configmaps").Names("console-public").RuleOrDie(),
rbacv1helpers.NewRule(read...).Groups("").Resources("configmaps").Names("oauth-serving-cert").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups("").Resources("configmaps").Names("openshift-network-features").RuleOrDie(),
},
"kube-system": {
// this allows every authenticated user to use in-cluster client certificate termination
rbacv1helpers.NewRule(read...).Groups(legacyGroup).Resources("configmaps").Names("extension-apiserver-authentication").RuleOrDie(),
},
},
kuser.AllUnauthenticated: {}, // no rules expect the cluster wide ones
"system:authenticated:oauth": {}, // no rules expect the cluster wide ones
}
)
var _ = g.Describe("[sig-auth][Feature:OpenShiftAuthorization] The default cluster RBAC policy", func() {
defer g.GinkgoRecover()
oc := exutil.NewCLI("default-rbac-policy")
g.It("should have correct RBAC rules", func() {
kubeInformers := informers.NewSharedInformerFactory(oc.AdminKubeClient(), 20*time.Minute)
ruleResolver := exutil.NewRuleResolver(kubeInformers.Rbac().V1()) // signal what informers we want to use early
stopCh := make(chan struct{})
defer func() { close(stopCh) }()
kubeInformers.Start(stopCh)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if ok := cache.WaitForCacheSync(ctx.Done(),
kubeInformers.Rbac().V1().ClusterRoles().Informer().HasSynced,
kubeInformers.Rbac().V1().ClusterRoleBindings().Informer().HasSynced,
kubeInformers.Rbac().V1().Roles().Informer().HasSynced,
kubeInformers.Rbac().V1().RoleBindings().Informer().HasSynced,
); !ok {
exutil.FatalErr("failed to sync RBAC cache")
}
namespaces, err := oc.AdminKubeClient().CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
if err != nil {
exutil.FatalErr(err)
}
g.By("should only allow the system:authenticated group to access certain policy rules", func() {
testAllGroupRules(ruleResolver, kuser.AllAuthenticated, allAuthenticatedRules, namespaces.Items)
})
g.By("should only allow the system:unauthenticated group to access certain policy rules", func() {
testAllGroupRules(ruleResolver, kuser.AllUnauthenticated, allUnauthenticatedRules, namespaces.Items)
})
g.By("should only allow the system:authenticated:oauth group to access certain policy rules", func() {
testAllGroupRules(ruleResolver, "system:authenticated:oauth", []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").Groups(projectGroup, legacyProjectGroup).Resources("projectrequests").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(oauthGroup).Resources("useroauthaccesstokens").RuleOrDie(),
}, namespaces.Items)
})
})
})
func testAllGroupRules(ruleResolver validation.AuthorizationRuleResolver, group string, expectedClusterRules []rbacv1.PolicyRule, namespaces []corev1.Namespace) {
testGroupRules(ruleResolver, group, metav1.NamespaceNone, expectedClusterRules)
for _, namespace := range namespaces {
// merge the namespace scoped and cluster wide rules
rules := append([]rbacv1.PolicyRule{}, groupNamespaceRules[group][namespace.Name]...)
rules = append(rules, expectedClusterRules...)
testGroupRules(ruleResolver, group, namespace.Name, rules)
}
}
func testGroupRules(ruleResolver validation.AuthorizationRuleResolver, group, namespace string, expectedRules []rbacv1.PolicyRule) {
actualRules, err := ruleResolver.RulesFor(&kuser.DefaultInfo{Groups: []string{group}}, namespace)
o.Expect(err).NotTo(o.HaveOccurred()) // our default RBAC policy should never have rule resolution errors
if cover, missing := rbacvalidation.Covers(expectedRules, actualRules); !cover {
e2e.Failf("%s has extra permissions in namespace %q:\n%s", group, namespace, rulesToString(missing))
}
// force test data to be cleaned up every so often but allow extra rules to not deadlock new changes
if cover, missing := rbacvalidation.Covers(actualRules, expectedRules); !cover {
log := e2e.Logf
if len(missing) > 15 {
log = e2e.Failf
}
log("test data for %s has too many unnecessary permissions:\n%s", group, rulesToString(missing))
}
}
func rulesToString(rules []rbacv1.PolicyRule) string {
compactRules := rules
if compact, err := validation.CompactRules(rules); err == nil {
compactRules = compact
}
missingDescriptions := sets.NewString()
for _, missing := range compactRules {
missingDescriptions.Insert(rbacv1helpers.CompactString(missing))
}
return strings.Join(missingDescriptions.List(), "\n")
}
default RBAC: add rule for restricted-v2 SCC
package rbac
import (
"context"
"strings"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
kuser "k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
rbacvalidation "k8s.io/component-helpers/auth/rbac/validation"
kauthenticationapi "k8s.io/kubernetes/pkg/apis/authentication"
kauthorizationapi "k8s.io/kubernetes/pkg/apis/authorization"
"k8s.io/kubernetes/pkg/apis/rbac"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
"k8s.io/kubernetes/pkg/apis/storage"
"k8s.io/kubernetes/pkg/registry/rbac/validation"
e2e "k8s.io/kubernetes/test/e2e/framework"
"github.com/openshift/api/authorization"
"github.com/openshift/api/build"
"github.com/openshift/api/console"
"github.com/openshift/api/image"
"github.com/openshift/api/oauth"
"github.com/openshift/api/project"
"github.com/openshift/api/security"
"github.com/openshift/api/template"
"github.com/openshift/api/user"
exutil "github.com/openshift/origin/test/extended/util"
)
// copied from bootstrap policy
var read = []string{"get", "list", "watch"}
// copied from bootstrap policy
const (
rbacGroup = rbac.GroupName
storageGroup = storage.GroupName
kAuthzGroup = kauthorizationapi.GroupName
kAuthnGroup = kauthenticationapi.GroupName
authzGroup = authorization.GroupName
buildGroup = build.GroupName
imageGroup = image.GroupName
oauthGroup = oauth.GroupName
projectGroup = project.GroupName
templateGroup = template.GroupName
userGroup = user.GroupName
consoleGroup = console.GroupName
legacyGroup = ""
legacyAuthzGroup = ""
legacyBuildGroup = ""
legacyImageGroup = ""
legacyProjectGroup = ""
legacyTemplateGroup = ""
legacyUserGroup = ""
legacyOauthGroup = ""
// Provided as CRD via cluster-csi-snapshot-controller-operator
snapshotGroup = "snapshot.storage.k8s.io"
)
// Do not change any of these lists without approval from the auth and master teams
// Most rules are copied from various cluster roles in bootstrap policy
var (
allUnauthenticatedRules = []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "create").Groups(buildGroup, legacyBuildGroup).Resources("buildconfigs/webhooks").RuleOrDie(),
rbacv1helpers.NewRule("impersonate").Groups(kAuthnGroup).Resources("userextras/scopes.authorization.openshift.io").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(authzGroup, legacyAuthzGroup).Resources("selfsubjectrulesreviews").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(kAuthzGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(),
rbacv1helpers.NewRule("delete").Groups(oauthGroup, legacyOauthGroup).Resources("oauthaccesstokens", "oauthauthorizetokens").RuleOrDie(),
// this is openshift specific
rbacv1helpers.NewRule("get").URLs(
"/version/openshift",
"/.well-known",
"/.well-known/*",
"/.well-known/oauth-authorization-server",
).RuleOrDie(),
// TODO: remove with after 1.15 rebase
rbacv1helpers.NewRule("get").URLs(
"/readyz",
).RuleOrDie(),
// this is from upstream kube
rbacv1helpers.NewRule("get").URLs(
"/healthz", "/livez",
"/version",
"/version/",
).RuleOrDie(),
}
allAuthenticatedRules = append(
[]rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").Groups(buildGroup, legacyBuildGroup).Resources("builds/docker", "builds/optimizeddocker").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(buildGroup, legacyBuildGroup).Resources("builds/jenkinspipeline").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(buildGroup, legacyBuildGroup).Resources("builds/source").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(userGroup, legacyUserGroup).Resources("users").Names("~").RuleOrDie(),
rbacv1helpers.NewRule("list").Groups(projectGroup, legacyProjectGroup).Resources("projectrequests").RuleOrDie(),
rbacv1helpers.NewRule("get", "list").Groups(authzGroup, legacyAuthzGroup).Resources("clusterroles").RuleOrDie(),
rbacv1helpers.NewRule(read...).Groups(rbacGroup).Resources("clusterroles").RuleOrDie(),
rbacv1helpers.NewRule("get", "list").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(snapshotGroup).Resources("volumesnapshotclasses").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch").Groups(projectGroup, legacyProjectGroup).Resources("projects").RuleOrDie(),
rbacv1helpers.NewRule("use").Groups(security.GroupName).Resources("securitycontextconstraints").Names("restricted-v2").RuleOrDie(),
// These custom resources are used to extend console functionality
// The console team is working on eliminating this exception in the near future
rbacv1helpers.NewRule(read...).Groups(consoleGroup).Resources("consoleclidownloads", "consolelinks", "consoleexternalloglinks", "consolenotifications", "consoleyamlsamples", "consolequickstarts", "consoleplugins").RuleOrDie(),
// HelmChartRepository instances keep Helm chart repository configuration
// By default users are able to browse charts from all configured repositories through console UI
rbacv1helpers.NewRule("get", "list").Groups("helm.openshift.io").Resources("helmchartrepositories").RuleOrDie(),
// TODO: remove when openshift-apiserver has removed these
rbacv1helpers.NewRule("get").URLs(
"/healthz/",
"/oapi", "/oapi/*",
"/osapi", "/osapi/",
"/swaggerapi", "/swaggerapi/*", "/swagger.json", "/swagger-2.0.0.pb-v1",
"/version/*",
"/",
).RuleOrDie(),
// this is from upstream kube
rbacv1helpers.NewRule("get").URLs(
"/",
"/openapi", "/openapi/*",
"/api", "/api/*",
"/apis", "/apis/*",
).RuleOrDie(),
},
allUnauthenticatedRules...,
)
// group -> namespace -> rules
groupNamespaceRules = map[string]map[string][]rbacv1.PolicyRule{
kuser.AllAuthenticated: {
"openshift": {
rbacv1helpers.NewRule(read...).Groups(templateGroup, legacyTemplateGroup).Resources("templates").RuleOrDie(),
rbacv1helpers.NewRule(read...).Groups(imageGroup, legacyImageGroup).Resources("imagestreams", "imagestreamtags", "imagestreamimages", "imagetags").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(imageGroup, legacyImageGroup).Resources("imagestreams/layers").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups("").Resources("configmaps").RuleOrDie(),
},
"openshift-config-managed": {
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("configmaps").Names("console-public").RuleOrDie(),
rbacv1helpers.NewRule(read...).Groups("").Resources("configmaps").Names("oauth-serving-cert").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups("").Resources("configmaps").Names("openshift-network-features").RuleOrDie(),
},
"kube-system": {
// this allows every authenticated user to use in-cluster client certificate termination
rbacv1helpers.NewRule(read...).Groups(legacyGroup).Resources("configmaps").Names("extension-apiserver-authentication").RuleOrDie(),
},
},
kuser.AllUnauthenticated: {}, // no rules expect the cluster wide ones
"system:authenticated:oauth": {}, // no rules expect the cluster wide ones
}
)
var _ = g.Describe("[sig-auth][Feature:OpenShiftAuthorization] The default cluster RBAC policy", func() {
defer g.GinkgoRecover()
oc := exutil.NewCLI("default-rbac-policy")
g.It("should have correct RBAC rules", func() {
kubeInformers := informers.NewSharedInformerFactory(oc.AdminKubeClient(), 20*time.Minute)
ruleResolver := exutil.NewRuleResolver(kubeInformers.Rbac().V1()) // signal what informers we want to use early
stopCh := make(chan struct{})
defer func() { close(stopCh) }()
kubeInformers.Start(stopCh)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if ok := cache.WaitForCacheSync(ctx.Done(),
kubeInformers.Rbac().V1().ClusterRoles().Informer().HasSynced,
kubeInformers.Rbac().V1().ClusterRoleBindings().Informer().HasSynced,
kubeInformers.Rbac().V1().Roles().Informer().HasSynced,
kubeInformers.Rbac().V1().RoleBindings().Informer().HasSynced,
); !ok {
exutil.FatalErr("failed to sync RBAC cache")
}
namespaces, err := oc.AdminKubeClient().CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
if err != nil {
exutil.FatalErr(err)
}
g.By("should only allow the system:authenticated group to access certain policy rules", func() {
testAllGroupRules(ruleResolver, kuser.AllAuthenticated, allAuthenticatedRules, namespaces.Items)
})
g.By("should only allow the system:unauthenticated group to access certain policy rules", func() {
testAllGroupRules(ruleResolver, kuser.AllUnauthenticated, allUnauthenticatedRules, namespaces.Items)
})
g.By("should only allow the system:authenticated:oauth group to access certain policy rules", func() {
testAllGroupRules(ruleResolver, "system:authenticated:oauth", []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").Groups(projectGroup, legacyProjectGroup).Resources("projectrequests").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(oauthGroup).Resources("useroauthaccesstokens").RuleOrDie(),
}, namespaces.Items)
})
})
})
func testAllGroupRules(ruleResolver validation.AuthorizationRuleResolver, group string, expectedClusterRules []rbacv1.PolicyRule, namespaces []corev1.Namespace) {
testGroupRules(ruleResolver, group, metav1.NamespaceNone, expectedClusterRules)
for _, namespace := range namespaces {
// merge the namespace scoped and cluster wide rules
rules := append([]rbacv1.PolicyRule{}, groupNamespaceRules[group][namespace.Name]...)
rules = append(rules, expectedClusterRules...)
testGroupRules(ruleResolver, group, namespace.Name, rules)
}
}
func testGroupRules(ruleResolver validation.AuthorizationRuleResolver, group, namespace string, expectedRules []rbacv1.PolicyRule) {
actualRules, err := ruleResolver.RulesFor(&kuser.DefaultInfo{Groups: []string{group}}, namespace)
o.Expect(err).NotTo(o.HaveOccurred()) // our default RBAC policy should never have rule resolution errors
if cover, missing := rbacvalidation.Covers(expectedRules, actualRules); !cover {
e2e.Failf("%s has extra permissions in namespace %q:\n%s", group, namespace, rulesToString(missing))
}
// force test data to be cleaned up every so often but allow extra rules to not deadlock new changes
if cover, missing := rbacvalidation.Covers(actualRules, expectedRules); !cover {
log := e2e.Logf
if len(missing) > 15 {
log = e2e.Failf
}
log("test data for %s has too many unnecessary permissions:\n%s", group, rulesToString(missing))
}
}
func rulesToString(rules []rbacv1.PolicyRule) string {
compactRules := rules
if compact, err := validation.CompactRules(rules); err == nil {
compactRules = compact
}
missingDescriptions := sets.NewString()
for _, missing := range compactRules {
missingDescriptions.Insert(rbacv1helpers.CompactString(missing))
}
return strings.Join(missingDescriptions.List(), "\n")
}
|
/*
Copyright 2022 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package migrate
import (
"context"
"os"
"testing"
"time"
testlogger "github.com/go-logr/logr/testing"
apiextinstall "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install"
apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/jetstack/cert-manager/cmd/ctl/pkg/upgrade/migrate"
"github.com/jetstack/cert-manager/pkg/webhook/handlers"
"github.com/jetstack/cert-manager/pkg/webhook/handlers/testdata/apis/testgroup/install"
"github.com/jetstack/cert-manager/pkg/webhook/handlers/testdata/apis/testgroup/v1"
"github.com/jetstack/cert-manager/pkg/webhook/handlers/testdata/apis/testgroup/v2"
"github.com/jetstack/cert-manager/test/integration/framework"
)
var (
equivalentResources = map[string]client.Object{
"v1": &v1.TestType{
ObjectMeta: metav1.ObjectMeta{
Name: "object",
Namespace: "default",
},
TestField: "abc",
TestFieldImmutable: "def",
},
"v2": &v2.TestType{
ObjectMeta: metav1.ObjectMeta{
Name: "object",
Namespace: "default",
},
TestField: "abc",
TestFieldImmutable: "def",
},
}
)
func newScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
apiextinstall.Install(scheme)
install.Install(scheme)
return scheme
}
func TestCtlUpgradeMigrate(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
// Create the control plane with the TestType conversion handlers registered
scheme := newScheme()
// name of the testtype CRD resource
crdName := "testtypes.testgroup.testing.cert-manager.io"
restCfg, stop := framework.RunControlPlane(t, context.Background(),
framework.WithCRDDirectory("../../../../pkg/webhook/handlers/testdata/apis/testgroup/crds"),
framework.WithWebhookConversionHandler(handlers.NewSchemeBackedConverter(testlogger.TestLogger{T: t}, scheme)))
defer stop()
// Ensure the OpenAPI endpoint has been updated with the TestType CRD
framework.WaitForOpenAPIResourcesToBeLoaded(t, ctx, restCfg, schema.GroupVersionKind{
Group: "testgroup.testing.cert-manager.io",
Version: "v1",
Kind: "TestType",
})
// Create an API client
cl, err := client.New(restCfg, client.Options{Scheme: scheme})
if err != nil {
t.Fatal(err)
}
// Fetch a copy of the recently created TestType CRD
crd := &apiext.CustomResourceDefinition{}
if err := cl.Get(ctx, client.ObjectKey{Name: crdName}, crd); err != nil {
t.Fatal(err)
}
// Identify the current storage version and one non-storage version for this CRD.
// We'll be creating objects and then changing the storage version on the CRD to
// the 'nonStorageVersion' and ensuring the migration/upgrade is successful.
storageVersion, nonStorageVersion := versionsForCRD(crd)
if storageVersion == "" || nonStorageVersion == "" {
t.Fatal("this test requires testdata with both a storage and non-storage version set")
}
// Ensure the original storage version is the only one on the CRD
if len(crd.Status.StoredVersions) != 1 || crd.Status.StoredVersions[0] != storageVersion {
t.Errorf("Expected status.storedVersions to only contain the storage version %q but it was: %v", storageVersion, crd.Status.StoredVersions)
}
// Create a resource
obj := equivalentResources[storageVersion]
if err := cl.Create(ctx, obj); err != nil {
t.Errorf("Failed to create test resource: %v", err)
}
// Set the storage version to the 'nonStorageVersion'
setStorageVersion(crd, nonStorageVersion)
if err := cl.Update(ctx, crd); err != nil {
t.Fatalf("Failed to update CRD storage version: %v", err)
}
if len(crd.Status.StoredVersions) != 2 || crd.Status.StoredVersions[0] != storageVersion || crd.Status.StoredVersions[1] != nonStorageVersion {
t.Fatalf("Expected status.storedVersions to contain [%s, %s] but it was: %v", storageVersion, nonStorageVersion, crd.Status.StoredVersions)
}
// Run the migrator and migrate all objects to the 'nonStorageVersion' (which is now the new storage version)
migrator := migrate.NewMigrator(cl, false, os.Stdout, os.Stderr)
if err := migrator.Run(ctx, nonStorageVersion, []string{crdName}); err != nil {
t.Errorf("migrator failed to run: %v", err)
}
// Check the status.storedVersions field to ensure it only contains one element
crd = &apiext.CustomResourceDefinition{}
if err := cl.Get(ctx, client.ObjectKey{Name: crdName}, crd); err != nil {
t.Fatal(err)
}
if len(crd.Status.StoredVersions) != 1 || crd.Status.StoredVersions[0] != nonStorageVersion {
t.Fatalf("Expected status.storedVersions to be %q but it was: %v", nonStorageVersion, crd.Status.StoredVersions)
}
// Remove the previous storage version from the CRD and update it
removeAPIVersion(crd, storageVersion)
if err := cl.Update(ctx, crd); err != nil {
t.Fatalf("Failed to remove old API version: %v", err)
}
// Attempt to read a resource list in the new API version
objList := &unstructured.UnstructuredList{}
objList.SetGroupVersionKind(schema.GroupVersionKind{
Group: crd.Spec.Group,
Version: nonStorageVersion,
Kind: crd.Spec.Names.ListKind,
})
if err := cl.List(ctx, objList); err != nil {
t.Fatalf("Failed to list objects (gvk %v): %v", objList.GroupVersionKind(), err)
}
if len(objList.Items) != 1 {
t.Fatalf("Expected a single TestType resource to exist")
}
}
func versionsForCRD(crd *apiext.CustomResourceDefinition) (storage, nonstorage string) {
storageVersion := ""
nonStorageVersion := ""
for _, v := range crd.Spec.Versions {
if v.Storage {
storageVersion = v.Name
} else {
nonStorageVersion = v.Name
}
if storageVersion != "" && nonStorageVersion != "" {
break
}
}
return storageVersion, nonStorageVersion
}
func setStorageVersion(crd *apiext.CustomResourceDefinition, newStorageVersion string) {
for i, v := range crd.Spec.Versions {
if v.Name == newStorageVersion {
v.Storage = true
} else if v.Storage {
v.Storage = false
}
crd.Spec.Versions[i] = v
}
}
func removeAPIVersion(crd *apiext.CustomResourceDefinition, version string) {
var newVersions []apiext.CustomResourceDefinitionVersion
for _, v := range crd.Spec.Versions {
if v.Name != version {
newVersions = append(newVersions, v)
}
}
crd.Spec.Versions = newVersions
}
Fix call to NewTestLogger after logr upgrade
Signed-off-by: James Munnelly <27e4c4b82a53c67f369351263d01ba84ab7f0685@apple.com>
/*
Copyright 2022 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package migrate
import (
"context"
"os"
"testing"
"time"
testlogger "github.com/go-logr/logr/testing"
apiextinstall "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install"
apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/jetstack/cert-manager/cmd/ctl/pkg/upgrade/migrate"
"github.com/jetstack/cert-manager/pkg/webhook/handlers"
"github.com/jetstack/cert-manager/pkg/webhook/handlers/testdata/apis/testgroup/install"
"github.com/jetstack/cert-manager/pkg/webhook/handlers/testdata/apis/testgroup/v1"
"github.com/jetstack/cert-manager/pkg/webhook/handlers/testdata/apis/testgroup/v2"
"github.com/jetstack/cert-manager/test/integration/framework"
)
var (
equivalentResources = map[string]client.Object{
"v1": &v1.TestType{
ObjectMeta: metav1.ObjectMeta{
Name: "object",
Namespace: "default",
},
TestField: "abc",
TestFieldImmutable: "def",
},
"v2": &v2.TestType{
ObjectMeta: metav1.ObjectMeta{
Name: "object",
Namespace: "default",
},
TestField: "abc",
TestFieldImmutable: "def",
},
}
)
func newScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
apiextinstall.Install(scheme)
install.Install(scheme)
return scheme
}
func TestCtlUpgradeMigrate(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
// Create the control plane with the TestType conversion handlers registered
scheme := newScheme()
// name of the testtype CRD resource
crdName := "testtypes.testgroup.testing.cert-manager.io"
restCfg, stop := framework.RunControlPlane(t, context.Background(),
framework.WithCRDDirectory("../../../../pkg/webhook/handlers/testdata/apis/testgroup/crds"),
framework.WithWebhookConversionHandler(handlers.NewSchemeBackedConverter(testlogger.NewTestLogger(t), scheme)))
defer stop()
// Ensure the OpenAPI endpoint has been updated with the TestType CRD
framework.WaitForOpenAPIResourcesToBeLoaded(t, ctx, restCfg, schema.GroupVersionKind{
Group: "testgroup.testing.cert-manager.io",
Version: "v1",
Kind: "TestType",
})
// Create an API client
cl, err := client.New(restCfg, client.Options{Scheme: scheme})
if err != nil {
t.Fatal(err)
}
// Fetch a copy of the recently created TestType CRD
crd := &apiext.CustomResourceDefinition{}
if err := cl.Get(ctx, client.ObjectKey{Name: crdName}, crd); err != nil {
t.Fatal(err)
}
// Identify the current storage version and one non-storage version for this CRD.
// We'll be creating objects and then changing the storage version on the CRD to
// the 'nonStorageVersion' and ensuring the migration/upgrade is successful.
storageVersion, nonStorageVersion := versionsForCRD(crd)
if storageVersion == "" || nonStorageVersion == "" {
t.Fatal("this test requires testdata with both a storage and non-storage version set")
}
// Ensure the original storage version is the only one on the CRD
if len(crd.Status.StoredVersions) != 1 || crd.Status.StoredVersions[0] != storageVersion {
t.Errorf("Expected status.storedVersions to only contain the storage version %q but it was: %v", storageVersion, crd.Status.StoredVersions)
}
// Create a resource
obj := equivalentResources[storageVersion]
if err := cl.Create(ctx, obj); err != nil {
t.Errorf("Failed to create test resource: %v", err)
}
// Set the storage version to the 'nonStorageVersion'
setStorageVersion(crd, nonStorageVersion)
if err := cl.Update(ctx, crd); err != nil {
t.Fatalf("Failed to update CRD storage version: %v", err)
}
if len(crd.Status.StoredVersions) != 2 || crd.Status.StoredVersions[0] != storageVersion || crd.Status.StoredVersions[1] != nonStorageVersion {
t.Fatalf("Expected status.storedVersions to contain [%s, %s] but it was: %v", storageVersion, nonStorageVersion, crd.Status.StoredVersions)
}
// Run the migrator and migrate all objects to the 'nonStorageVersion' (which is now the new storage version)
migrator := migrate.NewMigrator(cl, false, os.Stdout, os.Stderr)
if err := migrator.Run(ctx, nonStorageVersion, []string{crdName}); err != nil {
t.Errorf("migrator failed to run: %v", err)
}
// Check the status.storedVersions field to ensure it only contains one element
crd = &apiext.CustomResourceDefinition{}
if err := cl.Get(ctx, client.ObjectKey{Name: crdName}, crd); err != nil {
t.Fatal(err)
}
if len(crd.Status.StoredVersions) != 1 || crd.Status.StoredVersions[0] != nonStorageVersion {
t.Fatalf("Expected status.storedVersions to be %q but it was: %v", nonStorageVersion, crd.Status.StoredVersions)
}
// Remove the previous storage version from the CRD and update it
removeAPIVersion(crd, storageVersion)
if err := cl.Update(ctx, crd); err != nil {
t.Fatalf("Failed to remove old API version: %v", err)
}
// Attempt to read a resource list in the new API version
objList := &unstructured.UnstructuredList{}
objList.SetGroupVersionKind(schema.GroupVersionKind{
Group: crd.Spec.Group,
Version: nonStorageVersion,
Kind: crd.Spec.Names.ListKind,
})
if err := cl.List(ctx, objList); err != nil {
t.Fatalf("Failed to list objects (gvk %v): %v", objList.GroupVersionKind(), err)
}
if len(objList.Items) != 1 {
t.Fatalf("Expected a single TestType resource to exist")
}
}
func versionsForCRD(crd *apiext.CustomResourceDefinition) (storage, nonstorage string) {
storageVersion := ""
nonStorageVersion := ""
for _, v := range crd.Spec.Versions {
if v.Storage {
storageVersion = v.Name
} else {
nonStorageVersion = v.Name
}
if storageVersion != "" && nonStorageVersion != "" {
break
}
}
return storageVersion, nonStorageVersion
}
func setStorageVersion(crd *apiext.CustomResourceDefinition, newStorageVersion string) {
for i, v := range crd.Spec.Versions {
if v.Name == newStorageVersion {
v.Storage = true
} else if v.Storage {
v.Storage = false
}
crd.Spec.Versions[i] = v
}
}
func removeAPIVersion(crd *apiext.CustomResourceDefinition, version string) {
var newVersions []apiext.CustomResourceDefinitionVersion
for _, v := range crd.Spec.Versions {
if v.Name != version {
newVersions = append(newVersions, v)
}
}
crd.Spec.Versions = newVersions
}
|
// (c) Copyright 2015 JONNALAGADDA Srinivas
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package flow
import "fmt"
import "sort"
import "sync"
// Group represents a specified collection of users. A user belongs
// to zero or more groups.
type Group struct {
id uint16 // globally-unique ID
name string // globally-unique name
users []uint64 // users included in this group
mutex sync.RWMutex
}
// NewGroup creates and initialises a group.
//
// Usually, all available groups should be loaded during system
// initialization. Only groups created during runtime should be added
// dynamically.
func NewGroup(id uint16, name string) (*Group, error) {
if id == 0 || name == "" {
return nil, fmt.Errorf("invalid group data -- id: %d, name: %s", id, name)
}
g := &Group{id: id, name: name}
return g, nil
}
// AddUser includes the given user in this group
func (g *Group) AddUser(u uint64) bool {
g.mutex.Lock()
defer g.mutex.Unlock()
return g.addUser(u)
}
// addUser includes the given user in this group
func (g *Group) addUser(u uint64) bool {
idx := sort.Search(len(g.users), func(i int) bool { return g.users[i] >= u })
if idx < len(g.users) && g.users[idx] == u {
return false
}
g.users = append(g.users, 0)
copy(g.users[idx+1:], g.users[idx:])
g.users[idx] = u
return true
}
// RemoveUser removes the given user from this group.
func (g *Group) RemoveUser(u uint64) bool {
g.mutex.Lock()
defer g.mutex.Unlock()
return g.removeUser(u)
}
// removeUser removes the given user from this group.
func (g *Group) removeUser(u uint64) bool {
idx := sort.Search(len(g.users), func(i int) bool { return g.users[i] >= u })
if idx < len(g.users) && g.users[idx] == u {
return false
}
g.users = append(g.users[:idx], g.users[idx+1:]...)
return true
}
// AddGroup includes all the users in the given group to this group.
func (g *Group) AddGroup(other *Group) bool {
g.mutex.Lock()
defer g.mutex.Unlock()
ret := true
for _, u := range other.users {
if ok := g.addUser(u); !ok {
ret = false
}
}
return ret
}
// RemoveGroup removes all the users in the given group from this
// group.
func (g *Group) RemoveGroup(other *Group) bool {
g.mutex.Lock()
defer g.mutex.Unlock()
ret := true
for _, u := range other.users {
if ok := g.removeUser(u); !ok {
ret = false
}
}
return ret
}
Simplify `group.go` by using a `map` for storing users
// (c) Copyright 2015 JONNALAGADDA Srinivas
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package flow
import "fmt"
import "sync"
// Group represents a specified collection of users. A user belongs
// to zero or more groups.
type Group struct {
id uint16 // globally-unique ID
name string // globally-unique name
users map[uint64]struct{} // users included in this group
mutex sync.RWMutex
}
// NewGroup creates and initialises a group.
//
// Usually, all available groups should be loaded during system
// initialization. Only groups created during runtime should be added
// dynamically.
func NewGroup(id uint16, name string) (*Group, error) {
if id == 0 || name == "" {
return nil, fmt.Errorf("invalid group data -- id: %d, name: %s", id, name)
}
g := &Group{id: id, name: name}
g.users = make(map[uint64]struct{})
return g, nil
}
// AddUser includes the given user in this group.
//
// Answers `true` if the user was not already included in this group;
// `false` otherwise.
func (g *Group) AddUser(u uint64) bool {
g.mutex.Lock()
defer g.mutex.Unlock()
if _, ok := g.users[u]; ok {
return false
}
g.users[u] = struct{}{}
return true
}
// RemoveUser removes the given user from this group.
//
// Answers `true` if the user was removed from this group now; `false`
// if the user was not a part of this group.
func (g *Group) RemoveUser(u uint64) bool {
g.mutex.Lock()
defer g.mutex.Unlock()
if _, ok := g.users[u]; !ok {
return false
}
delete(g.users, u)
return true
}
// AddGroup includes all the users in the given group to this group.
//
// Answers `true` if at least one user from the other group did not
// already exist in this group; `false` otherwise.
func (g *Group) AddGroup(other *Group) bool {
g.mutex.Lock()
defer g.mutex.Unlock()
l1 := len(g.users)
for u := range other.users {
g.users[u] = struct{}{}
}
l2 := len(g.users)
return l2 > l1
}
// RemoveGroup removes all the users in the given group from this
// group.
//
// Answers `true` if at least one user from the other group existed in
// this group; `false` otherwise.
func (g *Group) RemoveGroup(other *Group) bool {
g.mutex.Lock()
defer g.mutex.Unlock()
l1 := len(g.users)
for u := range other.users {
delete(g.users, u)
}
l2 := len(g.users)
return l2 < l1
}
|
package main
import (
"fmt"
"github.com/alexandrebodin/gilibot"
"github.com/alexandrebodin/gilibot/listener"
)
func main() {
bot := gilibot.New("slack")
bot.ListenFunc(".*", func(c *gilibot.Context) {
c.Reply([]string{"coucou match everything"})
})
bot.RegisterListener(listener.NewTestListener())
err := bot.Start()
if err != nil {
fmt.Println(err)
}
}
test time listener
package main
import (
"fmt"
"github.com/alexandrebodin/gilibot"
"time"
)
func main() {
bot := gilibot.New("slack")
bot.ListenFunc("!time", func(c *gilibot.Context) {
layout := "Mon Jan 2 2006 15:04:05"
t := time.Now()
c.Reply([]string{t.Format(layout)})
})
err := bot.Start()
if err != nil {
fmt.Println(err)
}
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package reconciler implements interfaces that attempt to reconcile the
// desired state of the with the actual state of the world by triggering
// actions.
package reconciler
import (
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
kevents "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
)
// Reconciler runs a periodic loop to reconcile the desired state of the world with
// the actual state of the world by triggering attach detach operations.
// Note: This is distinct from the Reconciler implemented by the kubelet volume
// manager. This reconciles state for the attach/detach controller. That
// reconciles state for the kubelet volume manager.
type Reconciler interface {
// Starts running the reconciliation loop which executes periodically, checks
// if volumes that should be attached are attached and volumes that should
// be detached are detached. If not, it will trigger attach/detach
// operations to rectify.
Run(stopCh <-chan struct{})
}
// NewReconciler returns a new instance of Reconciler that waits loopPeriod
// between successive executions.
// loopPeriod is the amount of time the reconciler loop waits between
// successive executions.
// maxWaitForUnmountDuration is the max amount of time the reconciler will wait
// for the volume to be safely unmounted, after this it will detach the volume
// anyway (to handle crashed/unavailable nodes). If during this time the volume
// becomes used by a new pod, the detach request will be aborted and the timer
// cleared.
func NewReconciler(
loopPeriod time.Duration,
maxWaitForUnmountDuration time.Duration,
syncDuration time.Duration,
disableReconciliationSync bool,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld,
attacherDetacher operationexecutor.OperationExecutor,
nodeStatusUpdater statusupdater.NodeStatusUpdater,
recorder record.EventRecorder) Reconciler {
return &reconciler{
loopPeriod: loopPeriod,
maxWaitForUnmountDuration: maxWaitForUnmountDuration,
syncDuration: syncDuration,
disableReconciliationSync: disableReconciliationSync,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
attacherDetacher: attacherDetacher,
nodeStatusUpdater: nodeStatusUpdater,
timeOfLastSync: time.Now(),
recorder: recorder,
}
}
type reconciler struct {
loopPeriod time.Duration
maxWaitForUnmountDuration time.Duration
syncDuration time.Duration
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
attacherDetacher operationexecutor.OperationExecutor
nodeStatusUpdater statusupdater.NodeStatusUpdater
timeOfLastSync time.Time
disableReconciliationSync bool
recorder record.EventRecorder
}
func (rc *reconciler) Run(stopCh <-chan struct{}) {
wait.Until(rc.reconciliationLoopFunc(), rc.loopPeriod, stopCh)
}
// reconciliationLoopFunc this can be disabled via cli option disableReconciliation.
// It periodically checks whether the attached volumes from actual state
// are still attached to the node and update the status if they are not.
func (rc *reconciler) reconciliationLoopFunc() func() {
return func() {
rc.reconcile()
if rc.disableReconciliationSync {
glog.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line.")
} else if rc.syncDuration < time.Second {
glog.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line.")
} else if time.Since(rc.timeOfLastSync) > rc.syncDuration {
glog.V(5).Info("Starting reconciling attached volumes still attached")
rc.sync()
}
}
}
func (rc *reconciler) sync() {
defer rc.updateSyncTime()
rc.syncStates()
}
func (rc *reconciler) updateSyncTime() {
rc.timeOfLastSync = time.Now()
}
func (rc *reconciler) syncStates() {
volumesPerNode := rc.actualStateOfWorld.GetAttachedVolumesPerNode()
rc.attacherDetacher.VerifyVolumesAreAttached(volumesPerNode, rc.actualStateOfWorld)
}
// isMultiAttachForbidden checks if attaching this volume to multiple nodes is definitely not allowed/possible.
// In its current form, this function can only reliably say for which volumes it's definitely forbidden. If it returns
// false, it is not guaranteed that multi-attach is actually supported by the volume type and we must rely on the
// attacher to fail fast in such cases.
// Please see https://github.com/kubernetes/kubernetes/issues/40669 and https://github.com/kubernetes/kubernetes/pull/40148#discussion_r98055047
func (rc *reconciler) isMultiAttachForbidden(volumeSpec *volume.Spec) bool {
if volumeSpec.Volume != nil {
// Check for volume types which are known to fail slow or cause trouble when trying to multi-attach
if volumeSpec.Volume.AzureDisk != nil ||
volumeSpec.Volume.Cinder != nil {
return true
}
}
// Only if this volume is a persistent volume, we have reliable information on whether it's allowed or not to
// multi-attach. We trust in the individual volume implementations to not allow unsupported access modes
if volumeSpec.PersistentVolume != nil {
// Check for persistent volume types which do not fail when trying to multi-attach
if volumeSpec.PersistentVolume.Spec.VsphereVolume != nil {
return false
}
if len(volumeSpec.PersistentVolume.Spec.AccessModes) == 0 {
// No access mode specified so we don't know for sure. Let the attacher fail if needed
return false
}
// check if this volume is allowed to be attached to multiple PODs/nodes, if yes, return false
for _, accessMode := range volumeSpec.PersistentVolume.Spec.AccessModes {
if accessMode == v1.ReadWriteMany || accessMode == v1.ReadOnlyMany {
return false
}
}
return true
}
// we don't know if it's supported or not and let the attacher fail later in cases it's not supported
return false
}
func (rc *reconciler) reconcile() {
// Detaches are triggered before attaches so that volumes referenced by
// pods that are rescheduled to a different node are detached first.
// Ensure volumes that should be detached are detached.
for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() {
if !rc.desiredStateOfWorld.VolumeExists(
attachedVolume.VolumeName, attachedVolume.NodeName) {
// Don't even try to start an operation if there is already one running
// This check must be done before we do any other checks, as otherwise the other checks
// may pass while at the same time the volume leaves the pending state, resulting in
// double detach attempts
if rc.attacherDetacher.IsOperationPending(attachedVolume.VolumeName, "") {
glog.V(10).Infof("Operation for volume %q is already running. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName)
continue
}
// Set the detach request time
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil {
glog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err)
continue
}
// Check whether timeout has reached the maximum waiting time
timeout := elapsedTime > rc.maxWaitForUnmountDuration
// Check whether volume is still mounted. Skip detach if it is still mounted unless timeout
if attachedVolume.MountedByNode && !timeout {
glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Cannot detach volume because it is still mounted", ""))
continue
}
// Before triggering volume detach, mark volume as detached and update the node status
// If it fails to update node status, skip detach volume
err = rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil {
glog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v",
attachedVolume.VolumeName,
attachedVolume.NodeName,
err)
}
// Update Node Status to indicate volume is no longer safe to mount.
err = rc.nodeStatusUpdater.UpdateNodeStatuses()
if err != nil {
// Skip detaching this volume if unable to update node status
glog.Errorf(attachedVolume.GenerateErrorDetailed("UpdateNodeStatuses failed while attempting to report volume as attached", err).Error())
continue
}
// Trigger detach volume which requires verifing safe to detach step
// If timeout is true, skip verifySafeToDetach check
glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", ""))
verifySafeToDetach := !timeout
err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld)
if err == nil {
if !timeout {
glog.Infof(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", ""))
} else {
glog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration)))
}
}
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
glog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error())
}
}
}
rc.attachDesiredVolumes()
// Update Node Status
err := rc.nodeStatusUpdater.UpdateNodeStatuses()
if err != nil {
glog.Warningf("UpdateNodeStatuses failed with: %v", err)
}
}
func (rc *reconciler) attachDesiredVolumes() {
// Ensure volumes that should be attached are attached.
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
if rc.actualStateOfWorld.VolumeNodeExists(volumeToAttach.VolumeName, volumeToAttach.NodeName) {
// Volume/Node exists, touch it to reset detachRequestedTime
glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue
}
// Don't even try to start an operation if there is already one running
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "") {
glog.V(10).Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue
}
if rc.isMultiAttachForbidden(volumeToAttach.VolumeSpec) {
nodes := rc.actualStateOfWorld.GetNodesForVolume(volumeToAttach.VolumeName)
if len(nodes) > 0 {
if !volumeToAttach.MultiAttachErrorReported {
rc.reportMultiAttachError(volumeToAttach, nodes)
rc.desiredStateOfWorld.SetMultiAttachError(volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
}
// Volume/Node doesn't exist, spawn a goroutine to attach it
glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", ""))
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
if err == nil {
glog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", ""))
}
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
glog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error())
}
}
}
// reportMultiAttachError sends events and logs situation that a volume that
// should be attached to a node is already attached to different node(s).
func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach, nodes []types.NodeName) {
// Filter out the current node from list of nodes where the volume is
// attached.
// Some methods need []string, some other needs []NodeName, collect both.
// In theory, these arrays should have always only one element - the
// controller does not allow more than one attachment. But use array just
// in case...
otherNodes := []types.NodeName{}
otherNodesStr := []string{}
for _, node := range nodes {
if node != volumeToAttach.NodeName {
otherNodes = append(otherNodes, node)
otherNodesStr = append(otherNodesStr, string(node))
}
}
// Get list of pods that use the volume on the other nodes.
pods := rc.desiredStateOfWorld.GetVolumePodsOnNodes(otherNodes, volumeToAttach.VolumeName)
if len(pods) == 0 {
// We did not find any pods that requests the volume. The pod must have been deleted already.
simpleMsg, _ := volumeToAttach.GenerateMsg("Multi-Attach error", "Volume is already exclusively attached to one node and can't be attached to another")
for _, pod := range volumeToAttach.ScheduledPods {
rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg)
}
// Log detailed message to system admin
nodeList := strings.Join(otherNodesStr, ", ")
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already exclusively attached to node %s and can't be attached to another", nodeList))
glog.Warningf(detailedMsg)
return
}
// There are pods that require the volume and run on another node. Typically
// it's user error, e.g. a ReplicaSet uses a PVC and has >1 replicas. Let
// the user know what pods are blocking the volume.
for _, scheduledPod := range volumeToAttach.ScheduledPods {
// Each scheduledPod must get a custom message. They can run in
// different namespaces and user of a namespace should not see names of
// pods in other namespaces.
localPodNames := []string{} // Names of pods in scheduledPods's namespace
otherPods := 0 // Count of pods in other namespaces
for _, pod := range pods {
if pod.Namespace == scheduledPod.Namespace {
localPodNames = append(localPodNames, pod.Name)
} else {
otherPods++
}
}
var msg string
if len(localPodNames) > 0 {
msg = fmt.Sprintf("Volume is already used by pod(s) %s", strings.Join(localPodNames, ", "))
if otherPods > 0 {
msg = fmt.Sprintf("%s and %d pod(s) in different namespaces", msg, otherPods)
}
} else {
// No local pods, there are pods only in different namespaces.
msg = fmt.Sprintf("Volume is already used by %d pod(s) in different namespaces", otherPods)
}
simpleMsg, _ := volumeToAttach.GenerateMsg("Multi-Attach error", msg)
rc.recorder.Eventf(scheduledPod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg)
}
// Log all pods for system admin
podNames := []string{}
for _, pod := range pods {
podNames = append(podNames, pod.Namespace+"/"+pod.Name)
}
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already used by pods %s on node %s", strings.Join(podNames, ", "), strings.Join(otherNodesStr, ", ")))
glog.Warningf(detailedMsg)
}
volume: decrease memory allocations for debugging messages
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package reconciler implements interfaces that attempt to reconcile the
// desired state of the with the actual state of the world by triggering
// actions.
package reconciler
import (
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
kevents "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
)
// Reconciler runs a periodic loop to reconcile the desired state of the world with
// the actual state of the world by triggering attach detach operations.
// Note: This is distinct from the Reconciler implemented by the kubelet volume
// manager. This reconciles state for the attach/detach controller. That
// reconciles state for the kubelet volume manager.
type Reconciler interface {
// Starts running the reconciliation loop which executes periodically, checks
// if volumes that should be attached are attached and volumes that should
// be detached are detached. If not, it will trigger attach/detach
// operations to rectify.
Run(stopCh <-chan struct{})
}
// NewReconciler returns a new instance of Reconciler that waits loopPeriod
// between successive executions.
// loopPeriod is the amount of time the reconciler loop waits between
// successive executions.
// maxWaitForUnmountDuration is the max amount of time the reconciler will wait
// for the volume to be safely unmounted, after this it will detach the volume
// anyway (to handle crashed/unavailable nodes). If during this time the volume
// becomes used by a new pod, the detach request will be aborted and the timer
// cleared.
func NewReconciler(
loopPeriod time.Duration,
maxWaitForUnmountDuration time.Duration,
syncDuration time.Duration,
disableReconciliationSync bool,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld,
attacherDetacher operationexecutor.OperationExecutor,
nodeStatusUpdater statusupdater.NodeStatusUpdater,
recorder record.EventRecorder) Reconciler {
return &reconciler{
loopPeriod: loopPeriod,
maxWaitForUnmountDuration: maxWaitForUnmountDuration,
syncDuration: syncDuration,
disableReconciliationSync: disableReconciliationSync,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
attacherDetacher: attacherDetacher,
nodeStatusUpdater: nodeStatusUpdater,
timeOfLastSync: time.Now(),
recorder: recorder,
}
}
type reconciler struct {
loopPeriod time.Duration
maxWaitForUnmountDuration time.Duration
syncDuration time.Duration
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
attacherDetacher operationexecutor.OperationExecutor
nodeStatusUpdater statusupdater.NodeStatusUpdater
timeOfLastSync time.Time
disableReconciliationSync bool
recorder record.EventRecorder
}
func (rc *reconciler) Run(stopCh <-chan struct{}) {
wait.Until(rc.reconciliationLoopFunc(), rc.loopPeriod, stopCh)
}
// reconciliationLoopFunc this can be disabled via cli option disableReconciliation.
// It periodically checks whether the attached volumes from actual state
// are still attached to the node and update the status if they are not.
func (rc *reconciler) reconciliationLoopFunc() func() {
return func() {
rc.reconcile()
if rc.disableReconciliationSync {
glog.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line.")
} else if rc.syncDuration < time.Second {
glog.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line.")
} else if time.Since(rc.timeOfLastSync) > rc.syncDuration {
glog.V(5).Info("Starting reconciling attached volumes still attached")
rc.sync()
}
}
}
func (rc *reconciler) sync() {
defer rc.updateSyncTime()
rc.syncStates()
}
func (rc *reconciler) updateSyncTime() {
rc.timeOfLastSync = time.Now()
}
func (rc *reconciler) syncStates() {
volumesPerNode := rc.actualStateOfWorld.GetAttachedVolumesPerNode()
rc.attacherDetacher.VerifyVolumesAreAttached(volumesPerNode, rc.actualStateOfWorld)
}
// isMultiAttachForbidden checks if attaching this volume to multiple nodes is definitely not allowed/possible.
// In its current form, this function can only reliably say for which volumes it's definitely forbidden. If it returns
// false, it is not guaranteed that multi-attach is actually supported by the volume type and we must rely on the
// attacher to fail fast in such cases.
// Please see https://github.com/kubernetes/kubernetes/issues/40669 and https://github.com/kubernetes/kubernetes/pull/40148#discussion_r98055047
func (rc *reconciler) isMultiAttachForbidden(volumeSpec *volume.Spec) bool {
if volumeSpec.Volume != nil {
// Check for volume types which are known to fail slow or cause trouble when trying to multi-attach
if volumeSpec.Volume.AzureDisk != nil ||
volumeSpec.Volume.Cinder != nil {
return true
}
}
// Only if this volume is a persistent volume, we have reliable information on whether it's allowed or not to
// multi-attach. We trust in the individual volume implementations to not allow unsupported access modes
if volumeSpec.PersistentVolume != nil {
// Check for persistent volume types which do not fail when trying to multi-attach
if volumeSpec.PersistentVolume.Spec.VsphereVolume != nil {
return false
}
if len(volumeSpec.PersistentVolume.Spec.AccessModes) == 0 {
// No access mode specified so we don't know for sure. Let the attacher fail if needed
return false
}
// check if this volume is allowed to be attached to multiple PODs/nodes, if yes, return false
for _, accessMode := range volumeSpec.PersistentVolume.Spec.AccessModes {
if accessMode == v1.ReadWriteMany || accessMode == v1.ReadOnlyMany {
return false
}
}
return true
}
// we don't know if it's supported or not and let the attacher fail later in cases it's not supported
return false
}
func (rc *reconciler) reconcile() {
// Detaches are triggered before attaches so that volumes referenced by
// pods that are rescheduled to a different node are detached first.
// Ensure volumes that should be detached are detached.
for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() {
if !rc.desiredStateOfWorld.VolumeExists(
attachedVolume.VolumeName, attachedVolume.NodeName) {
// Don't even try to start an operation if there is already one running
// This check must be done before we do any other checks, as otherwise the other checks
// may pass while at the same time the volume leaves the pending state, resulting in
// double detach attempts
if rc.attacherDetacher.IsOperationPending(attachedVolume.VolumeName, "") {
glog.V(10).Infof("Operation for volume %q is already running. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName)
continue
}
// Set the detach request time
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil {
glog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err)
continue
}
// Check whether timeout has reached the maximum waiting time
timeout := elapsedTime > rc.maxWaitForUnmountDuration
// Check whether volume is still mounted. Skip detach if it is still mounted unless timeout
if attachedVolume.MountedByNode && !timeout {
glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Cannot detach volume because it is still mounted", ""))
continue
}
// Before triggering volume detach, mark volume as detached and update the node status
// If it fails to update node status, skip detach volume
err = rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil {
glog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v",
attachedVolume.VolumeName,
attachedVolume.NodeName,
err)
}
// Update Node Status to indicate volume is no longer safe to mount.
err = rc.nodeStatusUpdater.UpdateNodeStatuses()
if err != nil {
// Skip detaching this volume if unable to update node status
glog.Errorf(attachedVolume.GenerateErrorDetailed("UpdateNodeStatuses failed while attempting to report volume as attached", err).Error())
continue
}
// Trigger detach volume which requires verifing safe to detach step
// If timeout is true, skip verifySafeToDetach check
glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", ""))
verifySafeToDetach := !timeout
err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld)
if err == nil {
if !timeout {
glog.Infof(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", ""))
} else {
glog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration)))
}
}
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
glog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error())
}
}
}
rc.attachDesiredVolumes()
// Update Node Status
err := rc.nodeStatusUpdater.UpdateNodeStatuses()
if err != nil {
glog.Warningf("UpdateNodeStatuses failed with: %v", err)
}
}
func (rc *reconciler) attachDesiredVolumes() {
// Ensure volumes that should be attached are attached.
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
if rc.actualStateOfWorld.VolumeNodeExists(volumeToAttach.VolumeName, volumeToAttach.NodeName) {
// Volume/Node exists, touch it to reset detachRequestedTime
if glog.V(5) {
glog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
}
rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue
}
// Don't even try to start an operation if there is already one running
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "") {
if glog.V(10) {
glog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
if rc.isMultiAttachForbidden(volumeToAttach.VolumeSpec) {
nodes := rc.actualStateOfWorld.GetNodesForVolume(volumeToAttach.VolumeName)
if len(nodes) > 0 {
if !volumeToAttach.MultiAttachErrorReported {
rc.reportMultiAttachError(volumeToAttach, nodes)
rc.desiredStateOfWorld.SetMultiAttachError(volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
}
// Volume/Node doesn't exist, spawn a goroutine to attach it
if glog.V(5) {
glog.Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", ""))
}
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
if err == nil {
glog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", ""))
}
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
glog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error())
}
}
}
// reportMultiAttachError sends events and logs situation that a volume that
// should be attached to a node is already attached to different node(s).
func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach, nodes []types.NodeName) {
// Filter out the current node from list of nodes where the volume is
// attached.
// Some methods need []string, some other needs []NodeName, collect both.
// In theory, these arrays should have always only one element - the
// controller does not allow more than one attachment. But use array just
// in case...
otherNodes := []types.NodeName{}
otherNodesStr := []string{}
for _, node := range nodes {
if node != volumeToAttach.NodeName {
otherNodes = append(otherNodes, node)
otherNodesStr = append(otherNodesStr, string(node))
}
}
// Get list of pods that use the volume on the other nodes.
pods := rc.desiredStateOfWorld.GetVolumePodsOnNodes(otherNodes, volumeToAttach.VolumeName)
if len(pods) == 0 {
// We did not find any pods that requests the volume. The pod must have been deleted already.
simpleMsg, _ := volumeToAttach.GenerateMsg("Multi-Attach error", "Volume is already exclusively attached to one node and can't be attached to another")
for _, pod := range volumeToAttach.ScheduledPods {
rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg)
}
// Log detailed message to system admin
nodeList := strings.Join(otherNodesStr, ", ")
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already exclusively attached to node %s and can't be attached to another", nodeList))
glog.Warningf(detailedMsg)
return
}
// There are pods that require the volume and run on another node. Typically
// it's user error, e.g. a ReplicaSet uses a PVC and has >1 replicas. Let
// the user know what pods are blocking the volume.
for _, scheduledPod := range volumeToAttach.ScheduledPods {
// Each scheduledPod must get a custom message. They can run in
// different namespaces and user of a namespace should not see names of
// pods in other namespaces.
localPodNames := []string{} // Names of pods in scheduledPods's namespace
otherPods := 0 // Count of pods in other namespaces
for _, pod := range pods {
if pod.Namespace == scheduledPod.Namespace {
localPodNames = append(localPodNames, pod.Name)
} else {
otherPods++
}
}
var msg string
if len(localPodNames) > 0 {
msg = fmt.Sprintf("Volume is already used by pod(s) %s", strings.Join(localPodNames, ", "))
if otherPods > 0 {
msg = fmt.Sprintf("%s and %d pod(s) in different namespaces", msg, otherPods)
}
} else {
// No local pods, there are pods only in different namespaces.
msg = fmt.Sprintf("Volume is already used by %d pod(s) in different namespaces", otherPods)
}
simpleMsg, _ := volumeToAttach.GenerateMsg("Multi-Attach error", msg)
rc.recorder.Eventf(scheduledPod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg)
}
// Log all pods for system admin
podNames := []string{}
for _, pod := range pods {
podNames = append(podNames, pod.Namespace+"/"+pod.Name)
}
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already used by pods %s on node %s", strings.Join(podNames, ", "), strings.Join(otherNodesStr, ", ")))
glog.Warningf(detailedMsg)
}
|
//go:build linux
// +build linux
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeshutdown
import (
"bytes"
"fmt"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
_ "k8s.io/klog/v2/ktesting/init" // activate ktesting command line flags
"k8s.io/kubernetes/pkg/apis/scheduling"
pkgfeatures "k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
"k8s.io/kubernetes/pkg/kubelet/prober"
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
)
// lock is to prevent systemDbus from being modified in the case of concurrency.
var lock sync.Mutex
type fakeDbus struct {
currentInhibitDelay time.Duration
overrideSystemInhibitDelay time.Duration
shutdownChan chan bool
didInhibitShutdown bool
didOverrideInhibitDelay bool
}
func (f *fakeDbus) CurrentInhibitDelay() (time.Duration, error) {
if f.didOverrideInhibitDelay {
return f.overrideSystemInhibitDelay, nil
}
return f.currentInhibitDelay, nil
}
func (f *fakeDbus) InhibitShutdown() (systemd.InhibitLock, error) {
f.didInhibitShutdown = true
return systemd.InhibitLock(0), nil
}
func (f *fakeDbus) ReleaseInhibitLock(lock systemd.InhibitLock) error {
return nil
}
func (f *fakeDbus) ReloadLogindConf() error {
return nil
}
func (f *fakeDbus) MonitorShutdown() (<-chan bool, error) {
return f.shutdownChan, nil
}
func (f *fakeDbus) OverrideInhibitDelay(inhibitDelayMax time.Duration) error {
f.didOverrideInhibitDelay = true
return nil
}
func makePod(name string, priority int32, terminationGracePeriod *int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
UID: types.UID(name),
},
Spec: v1.PodSpec{
Priority: &priority,
TerminationGracePeriodSeconds: terminationGracePeriod,
},
}
}
func TestManager(t *testing.T) {
systemDbusTmp := systemDbus
defer func() {
systemDbus = systemDbusTmp
}()
normalPodNoGracePeriod := makePod("normal-pod-nil-grace-period", scheduling.DefaultPriorityWhenNoDefaultClassExists, nil /* terminationGracePeriod */)
criticalPodNoGracePeriod := makePod("critical-pod-nil-grace-period", scheduling.SystemCriticalPriority, nil /* terminationGracePeriod */)
shortGracePeriod := int64(2)
normalPodGracePeriod := makePod("normal-pod-grace-period", scheduling.DefaultPriorityWhenNoDefaultClassExists, &shortGracePeriod /* terminationGracePeriod */)
criticalPodGracePeriod := makePod("critical-pod-grace-period", scheduling.SystemCriticalPriority, &shortGracePeriod /* terminationGracePeriod */)
longGracePeriod := int64(1000)
normalPodLongGracePeriod := makePod("normal-pod-long-grace-period", scheduling.DefaultPriorityWhenNoDefaultClassExists, &longGracePeriod /* terminationGracePeriod */)
var tests = []struct {
desc string
activePods []*v1.Pod
shutdownGracePeriodRequested time.Duration
shutdownGracePeriodCriticalPods time.Duration
systemInhibitDelay time.Duration
overrideSystemInhibitDelay time.Duration
expectedDidOverrideInhibitDelay bool
expectedPodToGracePeriodOverride map[string]int64
expectedError error
}{
{
desc: "no override (total=30s, critical=10s)",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(40 * time.Second),
overrideSystemInhibitDelay: time.Duration(40 * time.Second),
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10},
},
{
desc: "no override (total=30s, critical=10s) pods with terminationGracePeriod and without",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod, normalPodGracePeriod, criticalPodGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(40 * time.Second),
overrideSystemInhibitDelay: time.Duration(40 * time.Second),
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10, "normal-pod-grace-period": 2, "critical-pod-grace-period": 2},
},
{
desc: "no override (total=30s, critical=10s) pod with long terminationGracePeriod is overridden",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod, normalPodGracePeriod, criticalPodGracePeriod, normalPodLongGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(40 * time.Second),
overrideSystemInhibitDelay: time.Duration(40 * time.Second),
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10, "normal-pod-grace-period": 2, "critical-pod-grace-period": 2, "normal-pod-long-grace-period": 20},
},
{
desc: "no override (total=30, critical=0)",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(0 * time.Second),
systemInhibitDelay: time.Duration(40 * time.Second),
overrideSystemInhibitDelay: time.Duration(40 * time.Second),
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 30, "critical-pod-nil-grace-period": 0},
},
{
desc: "override successful (total=30, critical=10)",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(5 * time.Second),
overrideSystemInhibitDelay: time.Duration(30 * time.Second),
expectedDidOverrideInhibitDelay: true,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10},
},
{
desc: "override unsuccessful",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(5 * time.Second),
overrideSystemInhibitDelay: time.Duration(5 * time.Second),
expectedDidOverrideInhibitDelay: true,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 5, "critical-pod-nil-grace-period": 0},
expectedError: fmt.Errorf("unable to update logind InhibitDelayMaxSec to 30s (ShutdownGracePeriod), current value of InhibitDelayMaxSec (5s) is less than requested ShutdownGracePeriod"),
},
{
desc: "override unsuccessful, zero time",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(5 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(5 * time.Second),
systemInhibitDelay: time.Duration(0 * time.Second),
overrideSystemInhibitDelay: time.Duration(0 * time.Second),
expectedError: fmt.Errorf("unable to update logind InhibitDelayMaxSec to 5s (ShutdownGracePeriod), current value of InhibitDelayMaxSec (0s) is less than requested ShutdownGracePeriod"),
},
{
desc: "no override, all time to critical pods",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(5 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(5 * time.Second),
systemInhibitDelay: time.Duration(5 * time.Second),
overrideSystemInhibitDelay: time.Duration(5 * time.Second),
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 0, "critical-pod-nil-grace-period": 5},
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
activePodsFunc := func() []*v1.Pod {
return tc.activePods
}
type PodKillInfo struct {
Name string
GracePeriod int64
}
podKillChan := make(chan PodKillInfo, 1)
killPodsFunc := func(pod *v1.Pod, evict bool, gracePeriodOverride *int64, fn func(podStatus *v1.PodStatus)) error {
var gracePeriod int64
if gracePeriodOverride != nil {
gracePeriod = *gracePeriodOverride
}
podKillChan <- PodKillInfo{Name: pod.Name, GracePeriod: gracePeriod}
return nil
}
fakeShutdownChan := make(chan bool)
fakeDbus := &fakeDbus{currentInhibitDelay: tc.systemInhibitDelay, shutdownChan: fakeShutdownChan, overrideSystemInhibitDelay: tc.overrideSystemInhibitDelay}
lock.Lock()
systemDbus = func() (dbusInhibiter, error) {
return fakeDbus, nil
}
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.GracefulNodeShutdown, true)()
proberManager := probetest.FakeManager{}
fakeRecorder := &record.FakeRecorder{}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
manager, _ := NewManager(&Config{
Logger: logger,
ProbeManager: proberManager,
Recorder: fakeRecorder,
NodeRef: nodeRef,
GetPodsFunc: activePodsFunc,
KillPodFunc: killPodsFunc,
SyncNodeStatusFunc: func() {},
ShutdownGracePeriodRequested: tc.shutdownGracePeriodRequested,
ShutdownGracePeriodCriticalPods: tc.shutdownGracePeriodCriticalPods,
Clock: testingclock.NewFakeClock(time.Now()),
StateDirectory: os.TempDir(),
})
err := manager.Start()
lock.Unlock()
if tc.expectedError != nil {
if err == nil {
t.Errorf("unexpected error message. Got: <nil> want %s", tc.expectedError.Error())
} else if !strings.Contains(err.Error(), tc.expectedError.Error()) {
t.Errorf("unexpected error message. Got: %s want %s", err.Error(), tc.expectedError.Error())
}
} else {
assert.NoError(t, err, "expected manager.Start() to not return error")
assert.True(t, fakeDbus.didInhibitShutdown, "expected that manager inhibited shutdown")
assert.NoError(t, manager.ShutdownStatus(), "expected that manager does not return error since shutdown is not active")
assert.Equal(t, manager.Admit(nil).Admit, true)
// Send fake shutdown event
select {
case fakeShutdownChan <- true:
case <-time.After(1 * time.Second):
t.Fatal()
}
// Wait for all the pods to be killed
killedPodsToGracePeriods := map[string]int64{}
for i := 0; i < len(tc.activePods); i++ {
select {
case podKillInfo := <-podKillChan:
killedPodsToGracePeriods[podKillInfo.Name] = podKillInfo.GracePeriod
continue
case <-time.After(1 * time.Second):
t.Fatal()
}
}
assert.Error(t, manager.ShutdownStatus(), "expected that manager returns error since shutdown is active")
assert.Equal(t, manager.Admit(nil).Admit, false)
assert.Equal(t, tc.expectedPodToGracePeriodOverride, killedPodsToGracePeriods)
assert.Equal(t, tc.expectedDidOverrideInhibitDelay, fakeDbus.didOverrideInhibitDelay, "override system inhibit delay differs")
}
})
}
}
func TestFeatureEnabled(t *testing.T) {
var tests = []struct {
desc string
shutdownGracePeriodRequested time.Duration
featureGateEnabled bool
expectEnabled bool
}{
{
desc: "shutdownGracePeriodRequested 0; disables feature",
shutdownGracePeriodRequested: time.Duration(0 * time.Second),
featureGateEnabled: true,
expectEnabled: false,
},
{
desc: "feature gate disabled; disables feature",
shutdownGracePeriodRequested: time.Duration(100 * time.Second),
featureGateEnabled: false,
expectEnabled: false,
},
{
desc: "feature gate enabled; shutdownGracePeriodRequested > 0; enables feature",
shutdownGracePeriodRequested: time.Duration(100 * time.Second),
featureGateEnabled: true,
expectEnabled: true,
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
activePodsFunc := func() []*v1.Pod {
return nil
}
killPodsFunc := func(pod *v1.Pod, evict bool, gracePeriodOverride *int64, fn func(*v1.PodStatus)) error {
return nil
}
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.GracefulNodeShutdown, tc.featureGateEnabled)()
proberManager := probetest.FakeManager{}
fakeRecorder := &record.FakeRecorder{}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
manager, _ := NewManager(&Config{
Logger: logger,
ProbeManager: proberManager,
Recorder: fakeRecorder,
NodeRef: nodeRef,
GetPodsFunc: activePodsFunc,
KillPodFunc: killPodsFunc,
SyncNodeStatusFunc: func() {},
ShutdownGracePeriodRequested: tc.shutdownGracePeriodRequested,
ShutdownGracePeriodCriticalPods: 0,
StateDirectory: os.TempDir(),
})
assert.Equal(t, tc.expectEnabled, manager != managerStub{})
})
}
}
func TestRestart(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
systemDbusTmp := systemDbus
defer func() {
systemDbus = systemDbusTmp
}()
shutdownGracePeriodRequested := 30 * time.Second
shutdownGracePeriodCriticalPods := 10 * time.Second
systemInhibitDelay := 40 * time.Second
overrideSystemInhibitDelay := 40 * time.Second
activePodsFunc := func() []*v1.Pod {
return nil
}
killPodsFunc := func(pod *v1.Pod, isEvicted bool, gracePeriodOverride *int64, fn func(*v1.PodStatus)) error {
return nil
}
syncNodeStatus := func() {}
var shutdownChan chan bool
var shutdownChanMut sync.Mutex
var connChan = make(chan struct{}, 1)
lock.Lock()
systemDbus = func() (dbusInhibiter, error) {
defer func() {
connChan <- struct{}{}
}()
ch := make(chan bool)
shutdownChanMut.Lock()
shutdownChan = ch
shutdownChanMut.Unlock()
dbus := &fakeDbus{currentInhibitDelay: systemInhibitDelay, shutdownChan: ch, overrideSystemInhibitDelay: overrideSystemInhibitDelay}
return dbus, nil
}
proberManager := probetest.FakeManager{}
fakeRecorder := &record.FakeRecorder{}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
manager, _ := NewManager(&Config{
Logger: logger,
ProbeManager: proberManager,
Recorder: fakeRecorder,
NodeRef: nodeRef,
GetPodsFunc: activePodsFunc,
KillPodFunc: killPodsFunc,
SyncNodeStatusFunc: syncNodeStatus,
ShutdownGracePeriodRequested: shutdownGracePeriodRequested,
ShutdownGracePeriodCriticalPods: shutdownGracePeriodCriticalPods,
StateDirectory: os.TempDir(),
})
err := manager.Start()
lock.Unlock()
if err != nil {
t.Errorf("unexpected error: %v", err)
}
for i := 0; i != 3; i++ {
select {
case <-time.After(dbusReconnectPeriod * 5):
t.Fatal("wait dbus connect timeout")
case <-connChan:
}
shutdownChanMut.Lock()
close(shutdownChan)
shutdownChanMut.Unlock()
}
}
func Test_migrateConfig(t *testing.T) {
type shutdownConfig struct {
shutdownGracePeriodRequested time.Duration
shutdownGracePeriodCriticalPods time.Duration
}
tests := []struct {
name string
args shutdownConfig
want []kubeletconfig.ShutdownGracePeriodByPodPriority
}{
{
name: "both shutdownGracePeriodRequested and shutdownGracePeriodCriticalPods",
args: shutdownConfig{
shutdownGracePeriodRequested: 300 * time.Second,
shutdownGracePeriodCriticalPods: 120 * time.Second,
},
want: []kubeletconfig.ShutdownGracePeriodByPodPriority{
{
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
ShutdownGracePeriodSeconds: 180,
},
{
Priority: scheduling.SystemCriticalPriority,
ShutdownGracePeriodSeconds: 120,
},
},
},
{
name: "only shutdownGracePeriodRequested",
args: shutdownConfig{
shutdownGracePeriodRequested: 100 * time.Second,
shutdownGracePeriodCriticalPods: 0 * time.Second,
},
want: []kubeletconfig.ShutdownGracePeriodByPodPriority{
{
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
ShutdownGracePeriodSeconds: 100,
},
{
Priority: scheduling.SystemCriticalPriority,
ShutdownGracePeriodSeconds: 0,
},
},
},
{
name: "empty configuration",
args: shutdownConfig{
shutdownGracePeriodRequested: 0 * time.Second,
shutdownGracePeriodCriticalPods: 0 * time.Second,
},
want: nil,
},
{
name: "wrong configuration",
args: shutdownConfig{
shutdownGracePeriodRequested: 1 * time.Second,
shutdownGracePeriodCriticalPods: 100 * time.Second,
},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := migrateConfig(tt.args.shutdownGracePeriodRequested, tt.args.shutdownGracePeriodCriticalPods); !assert.Equal(t, tt.want, got) {
t.Errorf("migrateConfig() = %v, want %v", got, tt.want)
}
})
}
}
func Test_groupByPriority(t *testing.T) {
type args struct {
shutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority
pods []*v1.Pod
}
tests := []struct {
name string
args args
want []podShutdownGroup
}{
{
name: "migrate config",
args: args{
shutdownGracePeriodByPodPriority: migrateConfig(300*time.Second /* shutdownGracePeriodRequested */, 120*time.Second /* shutdownGracePeriodCriticalPods */),
pods: []*v1.Pod{
makePod("normal-pod", scheduling.DefaultPriorityWhenNoDefaultClassExists, nil),
makePod("highest-user-definable-pod", scheduling.HighestUserDefinablePriority, nil),
makePod("critical-pod", scheduling.SystemCriticalPriority, nil),
},
},
want: []podShutdownGroup{
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
ShutdownGracePeriodSeconds: 180,
},
Pods: []*v1.Pod{
makePod("normal-pod", scheduling.DefaultPriorityWhenNoDefaultClassExists, nil),
makePod("highest-user-definable-pod", scheduling.HighestUserDefinablePriority, nil),
},
},
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: scheduling.SystemCriticalPriority,
ShutdownGracePeriodSeconds: 120,
},
Pods: []*v1.Pod{
makePod("critical-pod", scheduling.SystemCriticalPriority, nil),
},
},
},
},
{
name: "pod priority",
args: args{
shutdownGracePeriodByPodPriority: []kubeletconfig.ShutdownGracePeriodByPodPriority{
{
Priority: 1,
ShutdownGracePeriodSeconds: 10,
},
{
Priority: 2,
ShutdownGracePeriodSeconds: 20,
},
{
Priority: 3,
ShutdownGracePeriodSeconds: 30,
},
{
Priority: 4,
ShutdownGracePeriodSeconds: 40,
},
},
pods: []*v1.Pod{
makePod("pod-0", 0, nil),
makePod("pod-1", 1, nil),
makePod("pod-2", 2, nil),
makePod("pod-3", 3, nil),
makePod("pod-4", 4, nil),
makePod("pod-5", 5, nil),
},
},
want: []podShutdownGroup{
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: 1,
ShutdownGracePeriodSeconds: 10,
},
Pods: []*v1.Pod{
makePod("pod-0", 0, nil),
makePod("pod-1", 1, nil),
},
},
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: 2,
ShutdownGracePeriodSeconds: 20,
},
Pods: []*v1.Pod{
makePod("pod-2", 2, nil),
},
},
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: 3,
ShutdownGracePeriodSeconds: 30,
},
Pods: []*v1.Pod{
makePod("pod-3", 3, nil),
},
},
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: 4,
ShutdownGracePeriodSeconds: 40,
},
Pods: []*v1.Pod{
makePod("pod-4", 4, nil),
makePod("pod-5", 5, nil),
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := groupByPriority(tt.args.shutdownGracePeriodByPodPriority, tt.args.pods); !assert.Equal(t, tt.want, got) {
t.Errorf("groupByPriority() = %v, want %v", got, tt.want)
}
})
}
}
type buffer struct {
b bytes.Buffer
rw sync.RWMutex
}
func (b *buffer) String() string {
b.rw.RLock()
defer b.rw.RUnlock()
return b.b.String()
}
func (b *buffer) Write(p []byte) (n int, err error) {
b.rw.Lock()
defer b.rw.Unlock()
return b.b.Write(p)
}
func Test_managerImpl_processShutdownEvent(t *testing.T) {
var (
probeManager = probetest.FakeManager{}
fakeRecorder = &record.FakeRecorder{}
syncNodeStatus = func() {}
nodeRef = &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
fakeclock = testingclock.NewFakeClock(time.Now())
)
type fields struct {
recorder record.EventRecorder
nodeRef *v1.ObjectReference
probeManager prober.Manager
shutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority
getPods eviction.ActivePodsFunc
killPodFunc eviction.KillPodFunc
syncNodeStatus func()
dbusCon dbusInhibiter
inhibitLock systemd.InhibitLock
nodeShuttingDownNow bool
clock clock.Clock
}
tests := []struct {
name string
fields fields
wantErr bool
exceptOutputContains string
}{
{
name: "kill pod func take too long",
fields: fields{
recorder: fakeRecorder,
nodeRef: nodeRef,
probeManager: probeManager,
shutdownGracePeriodByPodPriority: []kubeletconfig.ShutdownGracePeriodByPodPriority{
{
Priority: 1,
ShutdownGracePeriodSeconds: 10,
},
{
Priority: 2,
ShutdownGracePeriodSeconds: 20,
},
},
getPods: func() []*v1.Pod {
return []*v1.Pod{
makePod("normal-pod", 1, nil),
makePod("critical-pod", 2, nil),
}
},
killPodFunc: func(pod *v1.Pod, isEvicted bool, gracePeriodOverride *int64, fn func(*v1.PodStatus)) error {
fakeclock.Step(60 * time.Second)
return nil
},
syncNodeStatus: syncNodeStatus,
clock: fakeclock,
dbusCon: &fakeDbus{},
},
wantErr: false,
exceptOutputContains: "Shutdown manager pod killing time out",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
l := klog.Level(1)
l.Set("1")
// hijack the klog output
tmpWriteBuffer := new(buffer)
klog.SetOutput(tmpWriteBuffer)
klog.LogToStderr(false)
m := &managerImpl{
logger: klog.TODO(), // This test will be updated in a separate commit.
recorder: tt.fields.recorder,
nodeRef: tt.fields.nodeRef,
probeManager: tt.fields.probeManager,
shutdownGracePeriodByPodPriority: tt.fields.shutdownGracePeriodByPodPriority,
getPods: tt.fields.getPods,
killPodFunc: tt.fields.killPodFunc,
syncNodeStatus: tt.fields.syncNodeStatus,
dbusCon: tt.fields.dbusCon,
inhibitLock: tt.fields.inhibitLock,
nodeShuttingDownMutex: sync.Mutex{},
nodeShuttingDownNow: tt.fields.nodeShuttingDownNow,
clock: tt.fields.clock,
}
if err := m.processShutdownEvent(); (err != nil) != tt.wantErr {
t.Errorf("managerImpl.processShutdownEvent() error = %v, wantErr %v", err, tt.wantErr)
}
klog.Flush()
log := tmpWriteBuffer.String()
if !strings.Contains(log, tt.exceptOutputContains) {
t.Errorf("managerImpl.processShutdownEvent() should log %s, got %s", tt.exceptOutputContains, log)
}
})
}
}
kubelet: avoid manipulating global logger during unit test
The code as it stands now works, but it is still complicated and previous
versions had race
conditions (https://github.com/kubernetes/kubernetes/issues/108040). Now the
test works without modifying global state. The individual test cases could run
in parallel, this just isn't done because they complete quickly already (2
seconds).
//go:build linux
// +build linux
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeshutdown
import (
"fmt"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2/ktesting"
_ "k8s.io/klog/v2/ktesting/init" // activate ktesting command line flags
"k8s.io/kubernetes/pkg/apis/scheduling"
pkgfeatures "k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
"k8s.io/kubernetes/pkg/kubelet/prober"
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
)
// lock is to prevent systemDbus from being modified in the case of concurrency.
var lock sync.Mutex
type fakeDbus struct {
currentInhibitDelay time.Duration
overrideSystemInhibitDelay time.Duration
shutdownChan chan bool
didInhibitShutdown bool
didOverrideInhibitDelay bool
}
func (f *fakeDbus) CurrentInhibitDelay() (time.Duration, error) {
if f.didOverrideInhibitDelay {
return f.overrideSystemInhibitDelay, nil
}
return f.currentInhibitDelay, nil
}
func (f *fakeDbus) InhibitShutdown() (systemd.InhibitLock, error) {
f.didInhibitShutdown = true
return systemd.InhibitLock(0), nil
}
func (f *fakeDbus) ReleaseInhibitLock(lock systemd.InhibitLock) error {
return nil
}
func (f *fakeDbus) ReloadLogindConf() error {
return nil
}
func (f *fakeDbus) MonitorShutdown() (<-chan bool, error) {
return f.shutdownChan, nil
}
func (f *fakeDbus) OverrideInhibitDelay(inhibitDelayMax time.Duration) error {
f.didOverrideInhibitDelay = true
return nil
}
func makePod(name string, priority int32, terminationGracePeriod *int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
UID: types.UID(name),
},
Spec: v1.PodSpec{
Priority: &priority,
TerminationGracePeriodSeconds: terminationGracePeriod,
},
}
}
func TestManager(t *testing.T) {
systemDbusTmp := systemDbus
defer func() {
systemDbus = systemDbusTmp
}()
normalPodNoGracePeriod := makePod("normal-pod-nil-grace-period", scheduling.DefaultPriorityWhenNoDefaultClassExists, nil /* terminationGracePeriod */)
criticalPodNoGracePeriod := makePod("critical-pod-nil-grace-period", scheduling.SystemCriticalPriority, nil /* terminationGracePeriod */)
shortGracePeriod := int64(2)
normalPodGracePeriod := makePod("normal-pod-grace-period", scheduling.DefaultPriorityWhenNoDefaultClassExists, &shortGracePeriod /* terminationGracePeriod */)
criticalPodGracePeriod := makePod("critical-pod-grace-period", scheduling.SystemCriticalPriority, &shortGracePeriod /* terminationGracePeriod */)
longGracePeriod := int64(1000)
normalPodLongGracePeriod := makePod("normal-pod-long-grace-period", scheduling.DefaultPriorityWhenNoDefaultClassExists, &longGracePeriod /* terminationGracePeriod */)
var tests = []struct {
desc string
activePods []*v1.Pod
shutdownGracePeriodRequested time.Duration
shutdownGracePeriodCriticalPods time.Duration
systemInhibitDelay time.Duration
overrideSystemInhibitDelay time.Duration
expectedDidOverrideInhibitDelay bool
expectedPodToGracePeriodOverride map[string]int64
expectedError error
}{
{
desc: "no override (total=30s, critical=10s)",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(40 * time.Second),
overrideSystemInhibitDelay: time.Duration(40 * time.Second),
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10},
},
{
desc: "no override (total=30s, critical=10s) pods with terminationGracePeriod and without",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod, normalPodGracePeriod, criticalPodGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(40 * time.Second),
overrideSystemInhibitDelay: time.Duration(40 * time.Second),
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10, "normal-pod-grace-period": 2, "critical-pod-grace-period": 2},
},
{
desc: "no override (total=30s, critical=10s) pod with long terminationGracePeriod is overridden",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod, normalPodGracePeriod, criticalPodGracePeriod, normalPodLongGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(40 * time.Second),
overrideSystemInhibitDelay: time.Duration(40 * time.Second),
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10, "normal-pod-grace-period": 2, "critical-pod-grace-period": 2, "normal-pod-long-grace-period": 20},
},
{
desc: "no override (total=30, critical=0)",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(0 * time.Second),
systemInhibitDelay: time.Duration(40 * time.Second),
overrideSystemInhibitDelay: time.Duration(40 * time.Second),
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 30, "critical-pod-nil-grace-period": 0},
},
{
desc: "override successful (total=30, critical=10)",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(5 * time.Second),
overrideSystemInhibitDelay: time.Duration(30 * time.Second),
expectedDidOverrideInhibitDelay: true,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10},
},
{
desc: "override unsuccessful",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(30 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(5 * time.Second),
overrideSystemInhibitDelay: time.Duration(5 * time.Second),
expectedDidOverrideInhibitDelay: true,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 5, "critical-pod-nil-grace-period": 0},
expectedError: fmt.Errorf("unable to update logind InhibitDelayMaxSec to 30s (ShutdownGracePeriod), current value of InhibitDelayMaxSec (5s) is less than requested ShutdownGracePeriod"),
},
{
desc: "override unsuccessful, zero time",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(5 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(5 * time.Second),
systemInhibitDelay: time.Duration(0 * time.Second),
overrideSystemInhibitDelay: time.Duration(0 * time.Second),
expectedError: fmt.Errorf("unable to update logind InhibitDelayMaxSec to 5s (ShutdownGracePeriod), current value of InhibitDelayMaxSec (0s) is less than requested ShutdownGracePeriod"),
},
{
desc: "no override, all time to critical pods",
activePods: []*v1.Pod{normalPodNoGracePeriod, criticalPodNoGracePeriod},
shutdownGracePeriodRequested: time.Duration(5 * time.Second),
shutdownGracePeriodCriticalPods: time.Duration(5 * time.Second),
systemInhibitDelay: time.Duration(5 * time.Second),
overrideSystemInhibitDelay: time.Duration(5 * time.Second),
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 0, "critical-pod-nil-grace-period": 5},
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
activePodsFunc := func() []*v1.Pod {
return tc.activePods
}
type PodKillInfo struct {
Name string
GracePeriod int64
}
podKillChan := make(chan PodKillInfo, 1)
killPodsFunc := func(pod *v1.Pod, evict bool, gracePeriodOverride *int64, fn func(podStatus *v1.PodStatus)) error {
var gracePeriod int64
if gracePeriodOverride != nil {
gracePeriod = *gracePeriodOverride
}
podKillChan <- PodKillInfo{Name: pod.Name, GracePeriod: gracePeriod}
return nil
}
fakeShutdownChan := make(chan bool)
fakeDbus := &fakeDbus{currentInhibitDelay: tc.systemInhibitDelay, shutdownChan: fakeShutdownChan, overrideSystemInhibitDelay: tc.overrideSystemInhibitDelay}
lock.Lock()
systemDbus = func() (dbusInhibiter, error) {
return fakeDbus, nil
}
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.GracefulNodeShutdown, true)()
proberManager := probetest.FakeManager{}
fakeRecorder := &record.FakeRecorder{}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
manager, _ := NewManager(&Config{
Logger: logger,
ProbeManager: proberManager,
Recorder: fakeRecorder,
NodeRef: nodeRef,
GetPodsFunc: activePodsFunc,
KillPodFunc: killPodsFunc,
SyncNodeStatusFunc: func() {},
ShutdownGracePeriodRequested: tc.shutdownGracePeriodRequested,
ShutdownGracePeriodCriticalPods: tc.shutdownGracePeriodCriticalPods,
Clock: testingclock.NewFakeClock(time.Now()),
StateDirectory: os.TempDir(),
})
err := manager.Start()
lock.Unlock()
if tc.expectedError != nil {
if err == nil {
t.Errorf("unexpected error message. Got: <nil> want %s", tc.expectedError.Error())
} else if !strings.Contains(err.Error(), tc.expectedError.Error()) {
t.Errorf("unexpected error message. Got: %s want %s", err.Error(), tc.expectedError.Error())
}
} else {
assert.NoError(t, err, "expected manager.Start() to not return error")
assert.True(t, fakeDbus.didInhibitShutdown, "expected that manager inhibited shutdown")
assert.NoError(t, manager.ShutdownStatus(), "expected that manager does not return error since shutdown is not active")
assert.Equal(t, manager.Admit(nil).Admit, true)
// Send fake shutdown event
select {
case fakeShutdownChan <- true:
case <-time.After(1 * time.Second):
t.Fatal()
}
// Wait for all the pods to be killed
killedPodsToGracePeriods := map[string]int64{}
for i := 0; i < len(tc.activePods); i++ {
select {
case podKillInfo := <-podKillChan:
killedPodsToGracePeriods[podKillInfo.Name] = podKillInfo.GracePeriod
continue
case <-time.After(1 * time.Second):
t.Fatal()
}
}
assert.Error(t, manager.ShutdownStatus(), "expected that manager returns error since shutdown is active")
assert.Equal(t, manager.Admit(nil).Admit, false)
assert.Equal(t, tc.expectedPodToGracePeriodOverride, killedPodsToGracePeriods)
assert.Equal(t, tc.expectedDidOverrideInhibitDelay, fakeDbus.didOverrideInhibitDelay, "override system inhibit delay differs")
}
})
}
}
func TestFeatureEnabled(t *testing.T) {
var tests = []struct {
desc string
shutdownGracePeriodRequested time.Duration
featureGateEnabled bool
expectEnabled bool
}{
{
desc: "shutdownGracePeriodRequested 0; disables feature",
shutdownGracePeriodRequested: time.Duration(0 * time.Second),
featureGateEnabled: true,
expectEnabled: false,
},
{
desc: "feature gate disabled; disables feature",
shutdownGracePeriodRequested: time.Duration(100 * time.Second),
featureGateEnabled: false,
expectEnabled: false,
},
{
desc: "feature gate enabled; shutdownGracePeriodRequested > 0; enables feature",
shutdownGracePeriodRequested: time.Duration(100 * time.Second),
featureGateEnabled: true,
expectEnabled: true,
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
activePodsFunc := func() []*v1.Pod {
return nil
}
killPodsFunc := func(pod *v1.Pod, evict bool, gracePeriodOverride *int64, fn func(*v1.PodStatus)) error {
return nil
}
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.GracefulNodeShutdown, tc.featureGateEnabled)()
proberManager := probetest.FakeManager{}
fakeRecorder := &record.FakeRecorder{}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
manager, _ := NewManager(&Config{
Logger: logger,
ProbeManager: proberManager,
Recorder: fakeRecorder,
NodeRef: nodeRef,
GetPodsFunc: activePodsFunc,
KillPodFunc: killPodsFunc,
SyncNodeStatusFunc: func() {},
ShutdownGracePeriodRequested: tc.shutdownGracePeriodRequested,
ShutdownGracePeriodCriticalPods: 0,
StateDirectory: os.TempDir(),
})
assert.Equal(t, tc.expectEnabled, manager != managerStub{})
})
}
}
func TestRestart(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
systemDbusTmp := systemDbus
defer func() {
systemDbus = systemDbusTmp
}()
shutdownGracePeriodRequested := 30 * time.Second
shutdownGracePeriodCriticalPods := 10 * time.Second
systemInhibitDelay := 40 * time.Second
overrideSystemInhibitDelay := 40 * time.Second
activePodsFunc := func() []*v1.Pod {
return nil
}
killPodsFunc := func(pod *v1.Pod, isEvicted bool, gracePeriodOverride *int64, fn func(*v1.PodStatus)) error {
return nil
}
syncNodeStatus := func() {}
var shutdownChan chan bool
var shutdownChanMut sync.Mutex
var connChan = make(chan struct{}, 1)
lock.Lock()
systemDbus = func() (dbusInhibiter, error) {
defer func() {
connChan <- struct{}{}
}()
ch := make(chan bool)
shutdownChanMut.Lock()
shutdownChan = ch
shutdownChanMut.Unlock()
dbus := &fakeDbus{currentInhibitDelay: systemInhibitDelay, shutdownChan: ch, overrideSystemInhibitDelay: overrideSystemInhibitDelay}
return dbus, nil
}
proberManager := probetest.FakeManager{}
fakeRecorder := &record.FakeRecorder{}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
manager, _ := NewManager(&Config{
Logger: logger,
ProbeManager: proberManager,
Recorder: fakeRecorder,
NodeRef: nodeRef,
GetPodsFunc: activePodsFunc,
KillPodFunc: killPodsFunc,
SyncNodeStatusFunc: syncNodeStatus,
ShutdownGracePeriodRequested: shutdownGracePeriodRequested,
ShutdownGracePeriodCriticalPods: shutdownGracePeriodCriticalPods,
StateDirectory: os.TempDir(),
})
err := manager.Start()
lock.Unlock()
if err != nil {
t.Errorf("unexpected error: %v", err)
}
for i := 0; i != 3; i++ {
select {
case <-time.After(dbusReconnectPeriod * 5):
t.Fatal("wait dbus connect timeout")
case <-connChan:
}
shutdownChanMut.Lock()
close(shutdownChan)
shutdownChanMut.Unlock()
}
}
func Test_migrateConfig(t *testing.T) {
type shutdownConfig struct {
shutdownGracePeriodRequested time.Duration
shutdownGracePeriodCriticalPods time.Duration
}
tests := []struct {
name string
args shutdownConfig
want []kubeletconfig.ShutdownGracePeriodByPodPriority
}{
{
name: "both shutdownGracePeriodRequested and shutdownGracePeriodCriticalPods",
args: shutdownConfig{
shutdownGracePeriodRequested: 300 * time.Second,
shutdownGracePeriodCriticalPods: 120 * time.Second,
},
want: []kubeletconfig.ShutdownGracePeriodByPodPriority{
{
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
ShutdownGracePeriodSeconds: 180,
},
{
Priority: scheduling.SystemCriticalPriority,
ShutdownGracePeriodSeconds: 120,
},
},
},
{
name: "only shutdownGracePeriodRequested",
args: shutdownConfig{
shutdownGracePeriodRequested: 100 * time.Second,
shutdownGracePeriodCriticalPods: 0 * time.Second,
},
want: []kubeletconfig.ShutdownGracePeriodByPodPriority{
{
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
ShutdownGracePeriodSeconds: 100,
},
{
Priority: scheduling.SystemCriticalPriority,
ShutdownGracePeriodSeconds: 0,
},
},
},
{
name: "empty configuration",
args: shutdownConfig{
shutdownGracePeriodRequested: 0 * time.Second,
shutdownGracePeriodCriticalPods: 0 * time.Second,
},
want: nil,
},
{
name: "wrong configuration",
args: shutdownConfig{
shutdownGracePeriodRequested: 1 * time.Second,
shutdownGracePeriodCriticalPods: 100 * time.Second,
},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := migrateConfig(tt.args.shutdownGracePeriodRequested, tt.args.shutdownGracePeriodCriticalPods); !assert.Equal(t, tt.want, got) {
t.Errorf("migrateConfig() = %v, want %v", got, tt.want)
}
})
}
}
func Test_groupByPriority(t *testing.T) {
type args struct {
shutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority
pods []*v1.Pod
}
tests := []struct {
name string
args args
want []podShutdownGroup
}{
{
name: "migrate config",
args: args{
shutdownGracePeriodByPodPriority: migrateConfig(300*time.Second /* shutdownGracePeriodRequested */, 120*time.Second /* shutdownGracePeriodCriticalPods */),
pods: []*v1.Pod{
makePod("normal-pod", scheduling.DefaultPriorityWhenNoDefaultClassExists, nil),
makePod("highest-user-definable-pod", scheduling.HighestUserDefinablePriority, nil),
makePod("critical-pod", scheduling.SystemCriticalPriority, nil),
},
},
want: []podShutdownGroup{
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
ShutdownGracePeriodSeconds: 180,
},
Pods: []*v1.Pod{
makePod("normal-pod", scheduling.DefaultPriorityWhenNoDefaultClassExists, nil),
makePod("highest-user-definable-pod", scheduling.HighestUserDefinablePriority, nil),
},
},
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: scheduling.SystemCriticalPriority,
ShutdownGracePeriodSeconds: 120,
},
Pods: []*v1.Pod{
makePod("critical-pod", scheduling.SystemCriticalPriority, nil),
},
},
},
},
{
name: "pod priority",
args: args{
shutdownGracePeriodByPodPriority: []kubeletconfig.ShutdownGracePeriodByPodPriority{
{
Priority: 1,
ShutdownGracePeriodSeconds: 10,
},
{
Priority: 2,
ShutdownGracePeriodSeconds: 20,
},
{
Priority: 3,
ShutdownGracePeriodSeconds: 30,
},
{
Priority: 4,
ShutdownGracePeriodSeconds: 40,
},
},
pods: []*v1.Pod{
makePod("pod-0", 0, nil),
makePod("pod-1", 1, nil),
makePod("pod-2", 2, nil),
makePod("pod-3", 3, nil),
makePod("pod-4", 4, nil),
makePod("pod-5", 5, nil),
},
},
want: []podShutdownGroup{
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: 1,
ShutdownGracePeriodSeconds: 10,
},
Pods: []*v1.Pod{
makePod("pod-0", 0, nil),
makePod("pod-1", 1, nil),
},
},
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: 2,
ShutdownGracePeriodSeconds: 20,
},
Pods: []*v1.Pod{
makePod("pod-2", 2, nil),
},
},
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: 3,
ShutdownGracePeriodSeconds: 30,
},
Pods: []*v1.Pod{
makePod("pod-3", 3, nil),
},
},
{
ShutdownGracePeriodByPodPriority: kubeletconfig.ShutdownGracePeriodByPodPriority{
Priority: 4,
ShutdownGracePeriodSeconds: 40,
},
Pods: []*v1.Pod{
makePod("pod-4", 4, nil),
makePod("pod-5", 5, nil),
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := groupByPriority(tt.args.shutdownGracePeriodByPodPriority, tt.args.pods); !assert.Equal(t, tt.want, got) {
t.Errorf("groupByPriority() = %v, want %v", got, tt.want)
}
})
}
}
func Test_managerImpl_processShutdownEvent(t *testing.T) {
var (
probeManager = probetest.FakeManager{}
fakeRecorder = &record.FakeRecorder{}
syncNodeStatus = func() {}
nodeRef = &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
fakeclock = testingclock.NewFakeClock(time.Now())
)
type fields struct {
recorder record.EventRecorder
nodeRef *v1.ObjectReference
probeManager prober.Manager
shutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority
getPods eviction.ActivePodsFunc
killPodFunc eviction.KillPodFunc
syncNodeStatus func()
dbusCon dbusInhibiter
inhibitLock systemd.InhibitLock
nodeShuttingDownNow bool
clock clock.Clock
}
tests := []struct {
name string
fields fields
wantErr bool
expectedOutputContains string
}{
{
name: "kill pod func take too long",
fields: fields{
recorder: fakeRecorder,
nodeRef: nodeRef,
probeManager: probeManager,
shutdownGracePeriodByPodPriority: []kubeletconfig.ShutdownGracePeriodByPodPriority{
{
Priority: 1,
ShutdownGracePeriodSeconds: 10,
},
{
Priority: 2,
ShutdownGracePeriodSeconds: 20,
},
},
getPods: func() []*v1.Pod {
return []*v1.Pod{
makePod("normal-pod", 1, nil),
makePod("critical-pod", 2, nil),
}
},
killPodFunc: func(pod *v1.Pod, isEvicted bool, gracePeriodOverride *int64, fn func(*v1.PodStatus)) error {
fakeclock.Step(60 * time.Second)
return nil
},
syncNodeStatus: syncNodeStatus,
clock: fakeclock,
dbusCon: &fakeDbus{},
},
wantErr: false,
expectedOutputContains: "Shutdown manager pod killing time out",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
m := &managerImpl{
logger: logger,
recorder: tt.fields.recorder,
nodeRef: tt.fields.nodeRef,
probeManager: tt.fields.probeManager,
shutdownGracePeriodByPodPriority: tt.fields.shutdownGracePeriodByPodPriority,
getPods: tt.fields.getPods,
killPodFunc: tt.fields.killPodFunc,
syncNodeStatus: tt.fields.syncNodeStatus,
dbusCon: tt.fields.dbusCon,
inhibitLock: tt.fields.inhibitLock,
nodeShuttingDownMutex: sync.Mutex{},
nodeShuttingDownNow: tt.fields.nodeShuttingDownNow,
clock: tt.fields.clock,
}
if err := m.processShutdownEvent(); (err != nil) != tt.wantErr {
t.Errorf("managerImpl.processShutdownEvent() error = %v, wantErr %v", err, tt.wantErr)
}
underlier, ok := logger.GetSink().(ktesting.Underlier)
if !ok {
t.Fatalf("Should have had a ktesting LogSink, got %T", logger.GetSink())
}
log := underlier.GetBuffer().String()
if !strings.Contains(log, tt.expectedOutputContains) {
// Log will be shown on failure. To see it
// during a successful run use "go test -v".
t.Errorf("managerImpl.processShutdownEvent() should have logged %s, see actual output above.", tt.expectedOutputContains)
}
})
}
}
|
package main
/*
www.rtve.es/api/clan/series/spanish/todas (follow redirect)
http://www.rtve.es/api/programas/80170/videos
*/
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"time"
)
func stripchars(str, chr string) string {
return strings.Map(func(r rune) rune {
if strings.IndexRune(chr, r) < 0 {
return r
}
return -1
}, str)
}
type Config struct {
Dirs map[string]string
Keys map[string]string
Programs []ProgramConfig
Verbose bool
Nocache bool
ItemsPerPage int
}
type ProgramConfig struct {
Id int
Name string
}
func (c *Config) load(f string) {
content, err := ioutil.ReadFile(f)
if err != nil {
log.Fatal(err)
}
err = json.Unmarshal(content, c)
if err != nil {
log.Fatal(err)
}
}
/*
Episode is a representation of each episode
*/
type Episode struct {
ShortTitle string
LongTitle string
ShortDescription string
LongDescription string
Episode int
ID int `json:",string"`
ProgramRef string
ProgramInfo struct {
Title string
}
Private struct {
URL string
EndURL string
Offset int
Size int64
Ext string
Videofile string
}
Qualities []struct {
Type string
Preset string
Filesize int64
Duration int
}
}
/*
Programa is a representation of the list of available episodes of a program
*/
type Programa struct {
Name string
WebOficial string
WebRtve string
Description string
LongTitle string
ShortDescription string
LongDescription string
ID int `json:",string"`
episodios []Episode
}
type videosPrograma struct {
Page struct {
TotalPages int
Total int
NumElements int
Number int
Offset int
Size int
Items []Episode
}
}
type Programas struct {
Page struct {
TotalPages int
Items []Programa
}
}
func makeDirs() {
for _, dir := range config.Dirs {
err := os.MkdirAll(dir, 0755)
if err != nil {
log.Fatal(err)
}
}
}
func pkcsS7Padding(data []byte) []byte {
blockSize := 16
padding := blockSize - len(data)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(data, padtext...)
}
func unpkcs7Padding(data []byte) []byte {
length := len(data)
unpadding := int(data[length-1])
return data[:(length - unpadding)]
}
func getTime() int64 {
return time.Now().Add(150*time.Hour).Round(time.Hour).UnixNano() / int64(time.Millisecond)
}
func cryptaes(text, key string) string {
ckey, err := aes.NewCipher([]byte(key))
if nil != err {
log.Fatal(err)
}
str := []byte(text)
var a [16]byte
iv := a[:]
encrypter := cipher.NewCBCEncrypter(ckey, iv)
str = pkcsS7Padding(str)
out := make([]byte, len(str))
encrypter.CryptBlocks(out, str)
base64Out := base64.StdEncoding.EncodeToString(out)
return base64Out
}
func ztnrurl(id int, t int64, clase string) string {
baseurl := fmt.Sprintf("http://www.rtve.es/ztnr/consumer/%s/video", clase)
secret := fmt.Sprintf("%d_es_%d", id, t)
url := fmt.Sprintf("%s/%s", baseurl, cryptaes(secret, config.Keys[clase]))
return url
}
func cacheFile(url string) string {
file := fmt.Sprintf("%x", sha256.Sum256([]byte(url)))
path := path.Join(config.Dirs["cache"], file)
return path
}
func read(url string, v interface{}) error {
cache := cacheFile(url)
fi, err := os.Stat(cache)
if err != nil && !os.IsNotExist(err) {
log.Fatal(err)
}
if config.Nocache || os.IsNotExist(err) || time.Now().Unix()-fi.ModTime().Unix() > 3*3600 {
log.Println("Fetching", url, "to cache", cache)
// Cache for 12h
res, err := http.Get(url)
if err != nil {
log.Fatalf("read http. Get error: %v", err)
}
if res.StatusCode >= 400 {
return fmt.Errorf("http. Unexpected HTTP Status Code %s downloading %s", res.StatusCode, url)
}
log.Printf("Reading %v", res.Request)
content, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile(cache, content, 0644)
if err != nil {
log.Fatal(err)
}
}
content, err := ioutil.ReadFile(cache)
if err != nil {
log.Fatal(err)
}
// log.Println(string(content[:]))
err = json.Unmarshal(content, v)
if err != nil {
return fmt.Errorf("Got error %v unmarshaling %v", err, content)
}
return nil
}
func (p *Programa) getVideos(programid, page int) error {
offset := (page - 1) * config.ItemsPerPage
url := fmt.Sprintf("http://www.rtve.es/api/programas/%d/videos?size=%d&offset=%d&page=%d", programid, config.ItemsPerPage, offset, page)
var videos videosPrograma
err := read(url, &videos)
if err != nil {
log.Println(err)
return fmt.Errorf("Error downloading page %d of %d", page, programid)
}
p.episodios = append(p.episodios, videos.Page.Items...)
if len(videos.Page.Items) == 0 {
return fmt.Errorf("Length 0 in videos.Page.Items for programid %d", programid)
}
log.Printf("Tenemos %d episodios de %s", videos.Page.NumElements, videos.Page.Items[0].ProgramInfo.Title)
if videos.Page.Number < videos.Page.TotalPages {
err = p.getVideos(programid, page+1)
if err != nil {
return err
}
}
return nil
}
func (e *Episode) remote(class string) int {
t := time.Now().UTC().Round(time.Second)
ts := t.UnixNano() / int64(time.Millisecond)
var videourl string
videourl = ztnrurl(e.ID, ts, class)
res, err := http.Head(videourl)
if err != nil {
log.Fatal(err)
}
if res.StatusCode == 200 {
ui := strings.Index(res.Request.URL.String(), "/playlist.m3u8")
e.Private.EndURL = res.Request.URL.String()[0:ui]
ei := strings.LastIndex(e.Private.EndURL, ".")
e.Private.Ext = e.Private.EndURL[ei:]
/* FIXME: Hack to ignore m3u8 playlists
rtve has changed the endurl to be hls playlists. It would be ideal
to download the hls fragments in parallel and then create a single file.
*/
if e.Private.Ext == "" {
e.Private.Ext = ".mp4"
log.Println("WARNING: Empty extension. Forcing mp4. %v", e)
}
e.Private.Videofile = fmt.Sprintf("%d%s", e.ID, e.Private.Ext)
e.Private.Size = res.ContentLength
e.Private.URL = videourl
}
return res.StatusCode
}
func (e *Episode) json() string {
b, err := json.MarshalIndent(e, "", " ")
if err != nil {
log.Println("json marshall error:", err)
}
return string(b[:])
}
func (e *Episode) writeData() {
filename := fmt.Sprintf("%d.json", e.ID)
err := ioutil.WriteFile(path.Join(config.Dirs["download"], filename), []byte(e.json()), 0644)
if err != nil {
log.Fatal(err)
}
}
func debug(wat ...interface{}) {
if config.Verbose {
fmt.Fprintln(os.Stderr, wat)
}
}
func (e *Episode) stat() bool {
keyorder := []string{"oceano", "carites", "orfeo", "caliope"}
gotcha := false
for _, k := range keyorder {
statusCode := e.remote(k)
debug("e.stat()", e.ID, e.humanName(), "statusCode", statusCode)
if statusCode == 200 {
gotcha = true
break
}
}
if !gotcha {
log.Println("No candidates for", e)
}
return gotcha
}
func (e *Episode) download() {
if e.Private.Videofile == "" {
log.Fatal("e.Private.Videofile is empty when trying to download")
}
filename := path.Join(config.Dirs["download"], e.Private.Videofile)
fi, err := os.Stat(filename)
if err != nil && !os.IsNotExist(err) {
log.Fatal(err)
}
sizes := map[int64]bool{}
if !os.IsNotExist(err) {
if e.Qualities != nil {
for _, q := range e.Qualities {
sizes[q.Filesize] = true
}
}
debug("sizes", sizes, len(sizes), "sizes[fi.Size()]=", sizes[fi.Size()], "sizes[fi.Size()+1]=", sizes[fi.Size()+1])
if fi.Size() >= e.Private.Size && sizes[fi.Size()] {
// Our file is bigger and canonical
// fmt.Fprintln(os.Stdout, err, "> Sile", fi.Size(), e.Private.Size)
return
}
if fi.Size() < e.Private.Size {
if sizes[e.Private.Size] {
log.Println("Better version of", e.ID, fi.Size(), "available. Remote size:", e.Private.Size)
} else {
// There's a greater size available but it's not listed. Better mak a backup of the local file.
log.Println("Larger NOT CANONICAL version of", e.ID, fi.Size(), "available. Remote size:", e.Private.Size)
log.Println("Backing up", filename, "to", filename+".bak")
err = os.Rename(filename, filename+".bak")
if err != nil {
log.Println("Error moving", filename, "to", filename+".bak", err)
return
}
}
}
}
output, err := os.Create(filename + ".temp")
if err != nil {
log.Println("Error while creating", filename, "-", err)
return
}
defer output.Close()
log.Printf("Downloading %s (%d MB) from %s (%s)", e.Private.Videofile, e.Private.Size/1024/1024, e.Private.URL, e.Private.EndURL)
response, err := http.Get(e.Private.URL)
if err != nil {
log.Println("Error while downloading", e.Private.URL, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
log.Println("Error while downloading", e.Private.URL, "-", err)
return
}
err = os.Rename(filename+".temp", filename)
if err != nil {
log.Println("Error renaming", filename+".temp", "to", filename, err)
return
}
log.Println(filename, "downloaded.", n, "bytes.")
}
func setupLog() *os.File {
t, _ := time.Now().UTC().Truncate(time.Hour).MarshalText()
ts := string(t[:])
filename := fmt.Sprintf("%s.log", ts)
logfile := path.Join(config.Dirs["log"], filename)
log.Println("Logfile:", logfile, config.Dirs["log"])
f, err := os.OpenFile(logfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("error opening file: %v", err)
}
log.SetFlags(log.LstdFlags)
log.SetOutput(io.MultiWriter(f, os.Stdout))
return f
}
func (e *Episode) fromURL(url string) {
type RemoteEpisode struct {
Page struct {
Items []Episode
}
}
var v RemoteEpisode
read(url, &v)
// log.Println(v)
*e = v.Page.Items[0]
}
func (e *Episode) fromFile(f string) {
content, err := ioutil.ReadFile(f)
if err != nil {
log.Fatal(err)
}
err = json.Unmarshal(content, e)
if err != nil {
log.Fatal(err)
}
}
func (e *Episode) humanName() string {
return fmt.Sprintf("%s %d - %s", e.ProgramInfo.Title, e.Episode, e.LongTitle)
}
func publish() {
dirfiles, err := ioutil.ReadDir(config.Dirs["download"])
if err != nil {
log.Fatalf("error reading dir: %v", err)
}
for _, file := range dirfiles {
if path.Ext(file.Name()) == ".json" {
var e Episode
e.fromFile(path.Join(config.Dirs["download"], file.Name()))
if e.ProgramInfo.Title == "Turno de oficio" {
continue
}
dir := path.Join(config.Dirs["publish"], e.ProgramInfo.Title)
err := os.MkdirAll(dir, 0755)
if err != nil {
log.Fatal(err)
}
videofile := path.Join(config.Dirs["download"], e.Private.Videofile)
filename := fmt.Sprintf("%s%s", e.humanName(), e.Private.Ext)
publishFile := path.Join(dir, filename)
// fmt.Println(e.ID, publishFile)
// Episode debería tener las funciones de comprobar integridad
err = os.Link(videofile, publishFile)
if err != nil {
if !os.IsExist(err) {
log.Printf("Cannot publish %s (%d) to %s", videofile, e.ID, publishFile)
}
} else {
log.Printf("Published %s to %s", videofile, publishFile)
}
}
}
}
func indexFiles() {
log.Println("Believe it or not I'm reindexing")
dirfiles, err := ioutil.ReadDir(config.Dirs["download"])
if err != nil {
log.Fatalf("error reading dir: %v", err)
}
for _, file := range dirfiles {
if path.Ext(file.Name()) == ".json" {
var e Episode
e.fromFile(path.Join(config.Dirs["download"], file.Name()))
// fmt.Println(file.Name(), e.ID, e.Private.Size)
// Episode debería tener las funciones de comprobar integridad
}
}
}
func remoteEpisode(id int) {
var e Episode
e.ID = id
log.Println("Getting remoteEpisode", e.json())
e.fromURL(fmt.Sprintf("http://www.rtve.es/api/videos/%d", id))
log.Println("Stat of remoteEpisode", e.json())
if e.stat() {
log.Println("remoteEpisode", e.json())
e.writeData() // should check if previous steps didn't work
e.download()
}
}
func listPrograms(page int) {
type RemotePrograms struct {
Page struct {
Items []Programa
Number int
TotalPages int
}
}
var rp RemotePrograms
// var drp RemotePrograms
url := fmt.Sprintf("http://www.rtve.es/api/agr-programas/490/programas.json?size=%d&page=%d", config.ItemsPerPage, page)
err := read(url, &rp)
if err != nil {
log.Fatal(err)
}
if rp.Page.Number < rp.Page.TotalPages {
listPrograms(rp.Page.Number + 1)
}
for _, v := range rp.Page.Items {
fmt.Printf("{ \"id\": %d, \"name\": \"%s\" },\n", v.ID, v.Name)
if config.Verbose {
fmt.Printf("// %s: %s\n", v.Name, v.WebRtve)
}
}
}
var config Config
func main() {
config.load(os.ExpandEnv("${HOME}/.local/rtve-alacarta.json"))
setupLog()
showconfig := false
doindex := false
dolist := false
doepisode := 0
config.ItemsPerPage = 50
flag.BoolVar(&showconfig, "sc", false, "show config")
flag.BoolVar(&config.Nocache, "nc", false, "nocache")
flag.BoolVar(&config.Verbose, "v", false, "verbose")
flag.BoolVar(&doindex, "i", false, "reindex the whole thing")
flag.BoolVar(&dolist, "l", false, "list programs")
flag.IntVar(&doepisode, "e", 0, "single episode")
flag.Parse()
debug("verbose active")
if dolist {
listPrograms(1)
return
}
if showconfig {
fmt.Println(config)
return
}
if doindex {
indexFiles()
publish()
return
}
if doepisode > 0 {
remoteEpisode(doepisode)
return
}
makeDirs()
log.Printf("Starting %s (PID: %d) at %s", os.Args[0], os.Getpid, time.Now().UTC())
for _, v := range config.Programs {
var p Programa
err := p.getVideos(v.Id, 1)
if err != nil {
log.Println(err)
continue
}
for _, e := range p.episodios {
if e.stat() {
e.writeData() // should check if previous steps didn't work
e.download()
} else {
log.Println("Cannot stat", e.humanName())
}
}
}
log.Printf("Finishing %s (PID: %d) at %s", os.Args[0], os.Getpid, time.Now().UTC())
}
Use correct URL for download
package main
/*
www.rtve.es/api/clan/series/spanish/todas (follow redirect)
http://www.rtve.es/api/programas/80170/videos
*/
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"time"
)
func stripchars(str, chr string) string {
return strings.Map(func(r rune) rune {
if strings.IndexRune(chr, r) < 0 {
return r
}
return -1
}, str)
}
type Config struct {
Dirs map[string]string
Keys map[string]string
Programs []ProgramConfig
Verbose bool
Nocache bool
ItemsPerPage int
}
type ProgramConfig struct {
Id int
Name string
}
func (c *Config) load(f string) {
content, err := ioutil.ReadFile(f)
if err != nil {
log.Fatal(err)
}
err = json.Unmarshal(content, c)
if err != nil {
log.Fatal(err)
}
}
/*
Episode is a representation of each episode
*/
type Episode struct {
ShortTitle string
LongTitle string
ShortDescription string
LongDescription string
Episode int
ID int `json:",string"`
ProgramRef string
ProgramInfo struct {
Title string
}
Private struct {
URL string
EndURL string
Offset int
Size int64
Ext string
Videofile string
}
Qualities []struct {
Type string
Preset string
Filesize int64
Duration int
}
}
/*
Programa is a representation of the list of available episodes of a program
*/
type Programa struct {
Name string
WebOficial string
WebRtve string
Description string
LongTitle string
ShortDescription string
LongDescription string
ID int `json:",string"`
episodios []Episode
}
type videosPrograma struct {
Page struct {
TotalPages int
Total int
NumElements int
Number int
Offset int
Size int
Items []Episode
}
}
type Programas struct {
Page struct {
TotalPages int
Items []Programa
}
}
func makeDirs() {
for _, dir := range config.Dirs {
err := os.MkdirAll(dir, 0755)
if err != nil {
log.Fatal(err)
}
}
}
func pkcsS7Padding(data []byte) []byte {
blockSize := 16
padding := blockSize - len(data)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(data, padtext...)
}
func unpkcs7Padding(data []byte) []byte {
length := len(data)
unpadding := int(data[length-1])
return data[:(length - unpadding)]
}
func getTime() int64 {
return time.Now().Add(150*time.Hour).Round(time.Hour).UnixNano() / int64(time.Millisecond)
}
func cryptaes(text, key string) string {
ckey, err := aes.NewCipher([]byte(key))
if nil != err {
log.Fatal(err)
}
str := []byte(text)
var a [16]byte
iv := a[:]
encrypter := cipher.NewCBCEncrypter(ckey, iv)
str = pkcsS7Padding(str)
out := make([]byte, len(str))
encrypter.CryptBlocks(out, str)
base64Out := base64.StdEncoding.EncodeToString(out)
return base64Out
}
func ztnrurl(id int, t int64, clase string) string {
baseurl := fmt.Sprintf("http://www.rtve.es/ztnr/consumer/%s/video", clase)
secret := fmt.Sprintf("%d_es_%d", id, t)
url := fmt.Sprintf("%s/%s", baseurl, cryptaes(secret, config.Keys[clase]))
return url
}
func cacheFile(url string) string {
file := fmt.Sprintf("%x", sha256.Sum256([]byte(url)))
path := path.Join(config.Dirs["cache"], file)
return path
}
func read(url string, v interface{}) error {
cache := cacheFile(url)
fi, err := os.Stat(cache)
if err != nil && !os.IsNotExist(err) {
log.Fatal(err)
}
if config.Nocache || os.IsNotExist(err) || time.Now().Unix()-fi.ModTime().Unix() > 3*3600 {
log.Println("Fetching", url, "to cache", cache)
// Cache for 12h
res, err := http.Get(url)
if err != nil {
log.Fatalf("read http. Get error: %v", err)
}
if res.StatusCode >= 400 {
return fmt.Errorf("http. Unexpected HTTP Status Code %s downloading %s", res.StatusCode, url)
}
log.Printf("Reading %v", res.Request)
content, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile(cache, content, 0644)
if err != nil {
log.Fatal(err)
}
}
content, err := ioutil.ReadFile(cache)
if err != nil {
log.Fatal(err)
}
// log.Println(string(content[:]))
err = json.Unmarshal(content, v)
if err != nil {
return fmt.Errorf("Got error %v unmarshaling %v", err, content)
}
return nil
}
func (p *Programa) getVideos(programid, page int) error {
offset := (page - 1) * config.ItemsPerPage
url := fmt.Sprintf("http://www.rtve.es/api/programas/%d/videos?size=%d&offset=%d&page=%d", programid, config.ItemsPerPage, offset, page)
var videos videosPrograma
err := read(url, &videos)
if err != nil {
log.Println(err)
return fmt.Errorf("Error downloading page %d of %d", page, programid)
}
p.episodios = append(p.episodios, videos.Page.Items...)
if len(videos.Page.Items) == 0 {
return fmt.Errorf("Length 0 in videos.Page.Items for programid %d", programid)
}
log.Printf("Tenemos %d episodios de %s", videos.Page.NumElements, videos.Page.Items[0].ProgramInfo.Title)
if videos.Page.Number < videos.Page.TotalPages {
err = p.getVideos(programid, page+1)
if err != nil {
return err
}
}
return nil
}
func (e *Episode) remote(class string) int {
t := time.Now().UTC().Round(time.Second)
ts := t.UnixNano() / int64(time.Millisecond)
var videourl string
videourl = ztnrurl(e.ID, ts, class)
res, err := http.Head(videourl)
if err != nil {
log.Fatal(err)
}
if res.StatusCode == 200 {
ui := strings.Index(res.Request.URL.String(), "/playlist.m3u8")
e.Private.EndURL = res.Request.URL.String()[0:ui]
ei := strings.LastIndex(e.Private.EndURL, ".")
e.Private.Ext = e.Private.EndURL[ei:]
/* FIXME: Hack to ignore m3u8 playlists
rtve has changed the endurl to be hls playlists. It would be ideal
to download the hls fragments in parallel and then create a single file.
*/
if e.Private.Ext == "" {
e.Private.Ext = ".mp4"
log.Println("WARNING: Empty extension. Forcing mp4. %v", e)
}
e.Private.Videofile = fmt.Sprintf("%d%s", e.ID, e.Private.Ext)
e.Private.Size = res.ContentLength
// FIXME: This is not the value we want to store
e.Private.URL = videourl
}
return res.StatusCode
}
func (e *Episode) json() string {
b, err := json.MarshalIndent(e, "", " ")
if err != nil {
log.Println("json marshall error:", err)
}
return string(b[:])
}
func (e *Episode) writeData() {
filename := fmt.Sprintf("%d.json", e.ID)
err := ioutil.WriteFile(path.Join(config.Dirs["download"], filename), []byte(e.json()), 0644)
if err != nil {
log.Fatal(err)
}
}
func debug(wat ...interface{}) {
if config.Verbose {
fmt.Fprintln(os.Stderr, wat)
}
}
func (e *Episode) stat() bool {
keyorder := []string{"oceano", "carites", "orfeo", "caliope"}
gotcha := false
for _, k := range keyorder {
statusCode := e.remote(k)
debug("e.stat()", e.ID, e.humanName(), "statusCode", statusCode)
if statusCode == 200 {
gotcha = true
break
}
}
if !gotcha {
log.Println("No candidates for", e)
}
return gotcha
}
func (e *Episode) download() {
if e.Private.Videofile == "" {
log.Fatal("e.Private.Videofile is empty when trying to download")
}
filename := path.Join(config.Dirs["download"], e.Private.Videofile)
fi, err := os.Stat(filename)
if err != nil && !os.IsNotExist(err) {
log.Fatal(err)
}
sizes := map[int64]bool{}
if !os.IsNotExist(err) {
if e.Qualities != nil {
for _, q := range e.Qualities {
sizes[q.Filesize] = true
}
}
debug("sizes", sizes, len(sizes), "sizes[fi.Size()]=", sizes[fi.Size()], "sizes[fi.Size()+1]=", sizes[fi.Size()+1])
if fi.Size() >= e.Private.Size && sizes[fi.Size()] {
// Our file is bigger and canonical
// fmt.Fprintln(os.Stdout, err, "> Sile", fi.Size(), e.Private.Size)
return
}
if fi.Size() < e.Private.Size {
if sizes[e.Private.Size] {
log.Println("Better version of", e.ID, fi.Size(), "available. Remote size:", e.Private.Size)
} else {
// There's a greater size available but it's not listed. Better mak a backup of the local file.
log.Println("Larger NOT CANONICAL version of", e.ID, fi.Size(), "available. Remote size:", e.Private.Size)
log.Println("Backing up", filename, "to", filename+".bak")
err = os.Rename(filename, filename+".bak")
if err != nil {
log.Println("Error moving", filename, "to", filename+".bak", err)
return
}
}
}
}
output, err := os.Create(filename + ".temp")
if err != nil {
log.Println("Error while creating", filename, "-", err)
return
}
defer output.Close()
log.Printf("Downloading %s (%d MB) from %s (%s)", e.Private.Videofile, e.Private.Size/1024/1024, e.Private.URL, e.Private.EndURL)
response, err := http.Get(e.Private.EndURL)
if err != nil {
log.Println("Error while downloading", e.Private.EndURL, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
log.Println("Error while downloading", e.Private.EndURL, "-", err)
return
}
err = os.Rename(filename+".temp", filename)
if err != nil {
log.Println("Error renaming", filename+".temp", "to", filename, err)
return
}
log.Println(filename, "downloaded.", n, "bytes.")
}
func setupLog() *os.File {
t, _ := time.Now().UTC().Truncate(time.Hour).MarshalText()
ts := string(t[:])
filename := fmt.Sprintf("%s.log", ts)
logfile := path.Join(config.Dirs["log"], filename)
log.Println("Logfile:", logfile, config.Dirs["log"])
f, err := os.OpenFile(logfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("error opening file: %v", err)
}
log.SetFlags(log.LstdFlags)
log.SetOutput(io.MultiWriter(f, os.Stdout))
return f
}
func (e *Episode) fromURL(url string) {
type RemoteEpisode struct {
Page struct {
Items []Episode
}
}
var v RemoteEpisode
read(url, &v)
// log.Println(v)
*e = v.Page.Items[0]
}
func (e *Episode) fromFile(f string) {
content, err := ioutil.ReadFile(f)
if err != nil {
log.Fatal(err)
}
err = json.Unmarshal(content, e)
if err != nil {
log.Fatal(err)
}
}
func (e *Episode) humanName() string {
return fmt.Sprintf("%s %d - %s", e.ProgramInfo.Title, e.Episode, e.LongTitle)
}
func publish() {
dirfiles, err := ioutil.ReadDir(config.Dirs["download"])
if err != nil {
log.Fatalf("error reading dir: %v", err)
}
for _, file := range dirfiles {
if path.Ext(file.Name()) == ".json" {
var e Episode
e.fromFile(path.Join(config.Dirs["download"], file.Name()))
if e.ProgramInfo.Title == "Turno de oficio" {
continue
}
dir := path.Join(config.Dirs["publish"], e.ProgramInfo.Title)
err := os.MkdirAll(dir, 0755)
if err != nil {
log.Fatal(err)
}
videofile := path.Join(config.Dirs["download"], e.Private.Videofile)
filename := fmt.Sprintf("%s%s", e.humanName(), e.Private.Ext)
publishFile := path.Join(dir, filename)
// fmt.Println(e.ID, publishFile)
// Episode debería tener las funciones de comprobar integridad
err = os.Link(videofile, publishFile)
if err != nil {
if !os.IsExist(err) {
log.Printf("Cannot publish %s (%d) to %s", videofile, e.ID, publishFile)
}
} else {
log.Printf("Published %s to %s", videofile, publishFile)
}
}
}
}
func indexFiles() {
log.Println("Believe it or not I'm reindexing")
dirfiles, err := ioutil.ReadDir(config.Dirs["download"])
if err != nil {
log.Fatalf("error reading dir: %v", err)
}
for _, file := range dirfiles {
if path.Ext(file.Name()) == ".json" {
var e Episode
e.fromFile(path.Join(config.Dirs["download"], file.Name()))
// fmt.Println(file.Name(), e.ID, e.Private.Size)
// Episode debería tener las funciones de comprobar integridad
}
}
}
func remoteEpisode(id int) {
var e Episode
e.ID = id
log.Println("Getting remoteEpisode", e.json())
e.fromURL(fmt.Sprintf("http://www.rtve.es/api/videos/%d", id))
log.Println("Stat of remoteEpisode", e.json())
if e.stat() {
log.Println("remoteEpisode", e.json())
e.writeData() // should check if previous steps didn't work
e.download()
}
}
func listPrograms(page int) {
type RemotePrograms struct {
Page struct {
Items []Programa
Number int
TotalPages int
}
}
var rp RemotePrograms
// var drp RemotePrograms
url := fmt.Sprintf("http://www.rtve.es/api/agr-programas/490/programas.json?size=%d&page=%d", config.ItemsPerPage, page)
err := read(url, &rp)
if err != nil {
log.Fatal(err)
}
if rp.Page.Number < rp.Page.TotalPages {
listPrograms(rp.Page.Number + 1)
}
for _, v := range rp.Page.Items {
fmt.Printf("{ \"id\": %d, \"name\": \"%s\" },\n", v.ID, v.Name)
if config.Verbose {
fmt.Printf("// %s: %s\n", v.Name, v.WebRtve)
}
}
}
var config Config
func main() {
config.load(os.ExpandEnv("${HOME}/.local/rtve-alacarta.json"))
setupLog()
showconfig := false
doindex := false
dolist := false
doepisode := 0
config.ItemsPerPage = 50
flag.BoolVar(&showconfig, "sc", false, "show config")
flag.BoolVar(&config.Nocache, "nc", false, "nocache")
flag.BoolVar(&config.Verbose, "v", false, "verbose")
flag.BoolVar(&doindex, "i", false, "reindex the whole thing")
flag.BoolVar(&dolist, "l", false, "list programs")
flag.IntVar(&doepisode, "e", 0, "single episode")
flag.Parse()
debug("verbose active")
if dolist {
listPrograms(1)
return
}
if showconfig {
fmt.Println(config)
return
}
if doindex {
indexFiles()
publish()
return
}
if doepisode > 0 {
remoteEpisode(doepisode)
return
}
makeDirs()
log.Printf("Starting %s (PID: %d) at %s", os.Args[0], os.Getpid, time.Now().UTC())
for _, v := range config.Programs {
var p Programa
err := p.getVideos(v.Id, 1)
if err != nil {
log.Println(err)
continue
}
for _, e := range p.episodios {
if e.stat() {
e.writeData() // should check if previous steps didn't work
e.download()
} else {
log.Println("Cannot stat", e.humanName())
}
}
}
log.Printf("Finishing %s (PID: %d) at %s", os.Args[0], os.Getpid, time.Now().UTC())
}
|
Cosmetic improvements
|
package air
import (
"net"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNewListener(t *testing.T) {
a := New()
a.PROXYEnabled = true
l := newListener(a)
assert.NotNil(t, l)
assert.Nil(t, l.TCPListener)
assert.NotNil(t, l.a)
assert.Nil(t, l.allowedPROXYRelayerIPNets)
a = New()
a.PROXYEnabled = true
a.PROXYRelayerIPWhitelist = []string{
"0.0.0.0",
"::",
"127.0.0.1",
"127.0.0.1/32",
"::1",
"::1/128",
}
l = newListener(a)
assert.NotNil(t, l)
assert.Nil(t, l.TCPListener)
assert.NotNil(t, l.a)
assert.Len(t, l.allowedPROXYRelayerIPNets, 6)
}
func TestListenerListen(t *testing.T) {
a := New()
l := newListener(a)
assert.NoError(t, l.listen("localhost:0"))
assert.NoError(t, l.Close())
a = New()
l = newListener(a)
assert.Error(t, l.listen(":-1"))
}
func TestListenerAccept(t *testing.T) {
a := New()
l := newListener(a)
c, err := l.Accept()
assert.Nil(t, c)
assert.Error(t, err)
a = New()
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err := net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok := c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
assert.NotNil(t, pc.Conn)
assert.NotNil(t, pc.bufReader)
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.NotNil(t, pc.readHeaderOnce)
assert.Nil(t, pc.readHeaderError)
assert.Zero(t, pc.readHeaderTimeout)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYRelayerIPWhitelist = []string{"127.0.0.1"}
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYRelayerIPWhitelist = []string{"127.0.0.2"}
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
assert.NoError(t, l.Close())
}
func TestPROXYConnRead(t *testing.T) {
a := New()
a.PROXYEnabled = true
l := newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err := net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err := l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok := c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("air"))
cc.Close()
}()
b := make([]byte, 3)
n, err := pc.Read(b)
assert.Equal(t, 3, n)
assert.NoError(t, err)
assert.Equal(t, "air", string(b))
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY "))
cc.Close()
}()
b = make([]byte, 6)
n, err = pc.Read(b)
assert.Zero(t, n)
assert.Error(t, err)
assert.Equal(t, "\x00\x00\x00\x00\x00\x00", string(b))
assert.NoError(t, l.Close())
}
func TestPROXYConnLocalAddr(t *testing.T) {
a := New()
a.PROXYEnabled = true
l := newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err := net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err := l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok := c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("air"))
cc.Close()
}()
b := make([]byte, 3)
n, err := pc.Read(b)
assert.Equal(t, 3, n)
assert.NoError(t, err)
assert.Equal(t, "air", string(b))
na := pc.LocalAddr()
assert.NotNil(t, na)
assert.Equal(t, c.LocalAddr().Network(), na.Network())
assert.Equal(t, c.LocalAddr().String(), na.String())
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
na = pc.LocalAddr()
assert.NotNil(t, na)
assert.Equal(t, "tcp", na.Network())
assert.Equal(t, "127.0.0.3:8082", na.String())
assert.NoError(t, l.Close())
}
func TestPROXYConnRemoteAddr(t *testing.T) {
a := New()
a.PROXYEnabled = true
l := newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err := net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err := l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok := c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("air"))
cc.Close()
}()
b := make([]byte, 3)
n, err := pc.Read(b)
assert.Equal(t, 3, n)
assert.NoError(t, err)
assert.Equal(t, "air", string(b))
na := pc.RemoteAddr()
assert.NotNil(t, na)
assert.Equal(t, c.RemoteAddr().Network(), na.Network())
assert.Equal(t, c.RemoteAddr().String(), na.String())
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
na = pc.RemoteAddr()
assert.NotNil(t, na)
assert.Equal(t, "tcp", na.Network())
assert.Equal(t, "127.0.0.2:8081", na.String())
assert.NoError(t, l.Close())
}
func TestPROXYConnReadHeader(t *testing.T) {
a := New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l := newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err := net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err := l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok := c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("air"))
cc.Close()
}()
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Nil(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
pc.readHeader()
assert.NotNil(t, pc.srcAddr)
assert.NotNil(t, pc.dstAddr)
assert.NoError(t, pc.readHeaderError)
assert.Equal(t, "tcp", pc.srcAddr.Network())
assert.Equal(t, "127.0.0.2:8081", pc.srcAddr.String())
assert.Equal(t, "tcp", pc.dstAddr.Network())
assert.Equal(t, "127.0.0.3:8082", pc.dstAddr.String())
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(200*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
time.Sleep(150 * time.Millisecond)
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.NoError(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
assert.NoError(t, pc.Close())
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY "))
time.Sleep(150 * time.Millisecond)
cc.Write([]byte("TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4\r\n"))
cc.Close()
}()
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY UDP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0 8081 8082\r\n"))
cc.Close()
}()
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 PORT 8082\r\n"))
cc.Close()
}()
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 PORT\r\n"))
cc.Close()
}()
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
}
test: update listener_test.go
package air
import (
"net"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNewListener(t *testing.T) {
a := New()
a.PROXYEnabled = true
l := newListener(a)
assert.NotNil(t, l)
assert.Nil(t, l.TCPListener)
assert.NotNil(t, l.a)
assert.Nil(t, l.allowedPROXYRelayerIPNets)
a = New()
a.PROXYEnabled = true
a.PROXYRelayerIPWhitelist = []string{
"0.0.0.0",
"::",
"127.0.0.1",
"127.0.0.1/32",
"::1",
"::1/128",
}
l = newListener(a)
assert.NotNil(t, l)
assert.Nil(t, l.TCPListener)
assert.NotNil(t, l.a)
assert.Len(t, l.allowedPROXYRelayerIPNets, 6)
}
func TestListenerListen(t *testing.T) {
a := New()
l := newListener(a)
assert.NoError(t, l.listen("localhost:0"))
assert.NoError(t, l.Close())
a = New()
l = newListener(a)
assert.Error(t, l.listen(":-1"))
}
func TestListenerAccept(t *testing.T) {
a := New()
l := newListener(a)
c, err := l.Accept()
assert.Nil(t, c)
assert.Error(t, err)
a = New()
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err := net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok := c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
assert.NotNil(t, pc.Conn)
assert.NotNil(t, pc.bufReader)
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.NotNil(t, pc.readHeaderOnce)
assert.Nil(t, pc.readHeaderError)
assert.Zero(t, pc.readHeaderTimeout)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYRelayerIPWhitelist = []string{"127.0.0.1"}
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYRelayerIPWhitelist = []string{"127.0.0.2"}
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
assert.NoError(t, l.Close())
}
func TestPROXYConnRead(t *testing.T) {
a := New()
a.PROXYEnabled = true
l := newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err := net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err := l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok := c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("air"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
b := make([]byte, 3)
n, err := pc.Read(b)
assert.Equal(t, 3, n)
assert.NoError(t, err)
assert.Equal(t, "air", string(b))
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY "))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
b = make([]byte, 6)
n, err = pc.Read(b)
assert.Zero(t, n)
assert.Error(t, err)
assert.Equal(t, "\x00\x00\x00\x00\x00\x00", string(b))
assert.NoError(t, l.Close())
}
func TestPROXYConnLocalAddr(t *testing.T) {
a := New()
a.PROXYEnabled = true
l := newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err := net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err := l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok := c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("air"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
b := make([]byte, 3)
n, err := pc.Read(b)
assert.Equal(t, 3, n)
assert.NoError(t, err)
assert.Equal(t, "air", string(b))
na := pc.LocalAddr()
assert.NotNil(t, na)
assert.Equal(t, c.LocalAddr().Network(), na.Network())
assert.Equal(t, c.LocalAddr().String(), na.String())
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
na = pc.LocalAddr()
assert.NotNil(t, na)
assert.Equal(t, "tcp", na.Network())
assert.Equal(t, "127.0.0.3:8082", na.String())
assert.NoError(t, l.Close())
}
func TestPROXYConnRemoteAddr(t *testing.T) {
a := New()
a.PROXYEnabled = true
l := newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err := net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err := l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok := c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("air"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
b := make([]byte, 3)
n, err := pc.Read(b)
assert.Equal(t, 3, n)
assert.NoError(t, err)
assert.Equal(t, "air", string(b))
na := pc.RemoteAddr()
assert.NotNil(t, na)
assert.Equal(t, c.RemoteAddr().Network(), na.Network())
assert.Equal(t, c.RemoteAddr().String(), na.String())
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
na = pc.RemoteAddr()
assert.NotNil(t, na)
assert.Equal(t, "tcp", na.Network())
assert.Equal(t, "127.0.0.2:8081", na.String())
assert.NoError(t, l.Close())
}
func TestPROXYConnReadHeader(t *testing.T) {
a := New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l := newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err := net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err := l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok := c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("air"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Nil(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
pc.readHeader()
assert.NotNil(t, pc.srcAddr)
assert.NotNil(t, pc.dstAddr)
assert.NoError(t, pc.readHeaderError)
assert.Equal(t, "tcp", pc.srcAddr.Network())
assert.Equal(t, "127.0.0.2:8081", pc.srcAddr.String())
assert.Equal(t, "tcp", pc.dstAddr.Network())
assert.Equal(t, "127.0.0.3:8082", pc.dstAddr.String())
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(200*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
time.Sleep(150 * time.Millisecond)
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.NoError(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
assert.NoError(t, pc.Close())
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY "))
time.Sleep(150 * time.Millisecond)
cc.Write([]byte("TCP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY UDP4 127.0.0.2 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0 127.0.0.3 8081 8082\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0 8081 8082\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 PORT 8082\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
a = New()
a.PROXYEnabled = true
a.PROXYReadHeaderTimeout = 100 * time.Millisecond
l = newListener(a)
assert.NoError(t, l.listen("localhost:0"))
cc, err = net.Dial("tcp", l.Addr().String())
assert.NotNil(t, cc)
assert.NoError(t, err)
assert.NoError(t, cc.SetDeadline(time.Now().Add(100*time.Millisecond)))
c, err = l.Accept()
assert.NotNil(t, c)
assert.NoError(t, err)
pc, ok = c.(*proxyConn)
assert.NotNil(t, pc)
assert.True(t, ok)
go func() {
cc.Write([]byte("PROXY TCP4 127.0.0.2 127.0.0.3 8081 PORT\r\n"))
cc.Close()
}()
time.Sleep(100 * time.Millisecond)
pc.readHeader()
assert.Nil(t, pc.srcAddr)
assert.Nil(t, pc.dstAddr)
assert.Error(t, pc.readHeaderError)
assert.NoError(t, l.Close())
}
|
package main
import (
"encoding/json"
"net/http"
"time"
)
type plugStats struct {
Path string
Functions []string
}
type cacheStats struct {
Enabled bool
GetCount uint64
GetHits uint64
HitPercent float64
SetCount uint64
}
func (cs *cacheStats) setHitPercent() {
cs.HitPercent = float64(cs.GetHits) / float64(cs.GetCount)
}
type serverStats struct {
InfoCache cacheStats
TileCache cacheStats
Plugins []plugStats
RAISVersion string
ServerStart time.Time
Uptime time.Duration
}
func (s *serverStats) setUptime() {
s.Uptime = time.Now().Sub(s.ServerStart)
}
// Serialize writes the stats data to w in JSON format
func (s *serverStats) Serialize() ([]byte, error) {
// Calculate derived stats only on serializations
s.setUptime()
s.InfoCache.setHitPercent()
s.TileCache.setHitPercent()
return json.Marshal(s)
}
func (s *serverStats) ServeHTTP(w http.ResponseWriter, req *http.Request) {
var json, err = s.Serialize()
if err != nil {
http.Error(w, "error generating json: "+err.Error(), 500)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(json)
}
rais-server: fix hit percent calculation
package main
import (
"encoding/json"
"net/http"
"time"
)
type plugStats struct {
Path string
Functions []string
}
type cacheStats struct {
Enabled bool
GetCount uint64
GetHits uint64
HitPercent float64
SetCount uint64
}
func (cs *cacheStats) setHitPercent() {
if cs.GetCount == 0 {
cs.HitPercent = 0
return
}
cs.HitPercent = float64(cs.GetHits) / float64(cs.GetCount)
}
type serverStats struct {
InfoCache cacheStats
TileCache cacheStats
Plugins []plugStats
RAISVersion string
ServerStart time.Time
Uptime time.Duration
}
func (s *serverStats) setUptime() {
s.Uptime = time.Now().Sub(s.ServerStart)
}
// Serialize writes the stats data to w in JSON format
func (s *serverStats) Serialize() ([]byte, error) {
// Calculate derived stats only on serializations
s.setUptime()
s.InfoCache.setHitPercent()
s.TileCache.setHitPercent()
return json.Marshal(s)
}
func (s *serverStats) ServeHTTP(w http.ResponseWriter, req *http.Request) {
var json, err = s.Serialize()
if err != nil {
http.Error(w, "error generating json: "+err.Error(), 500)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(json)
}
|
package loader
import (
"encoding/csv"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"github.com/brnstz/bus/common"
"github.com/brnstz/bus/models"
"github.com/jmoiron/sqlx"
)
var days = []string{"monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"}
var datefmt = "20060102"
type Loader struct {
// the dir from which we load google transit files
dir string
// mapping from trip id to a trip object
trips map[string]*models.Trip
// mapping from stop_id to a slice of trip_ids
stopTrips map[string][]string
// mapping trip_id to route_id
tripRoute map[string]string
// a map of "{stop_id}-{route_id}" to stop objects. Essentially a
// list of unique stops by route.
uniqueStop map[string]*models.Stop
// mapping of trip_id to service object
tripService map[string]*models.Service
// mapping of service_id to map of unique route_id
serviceRoute map[string]map[string]bool
Stops []*models.Stop
ScheduledStopTimes []*models.ScheduledStopTime
ServiceRouteDays []*models.ServiceRouteDay
ServiceRouteExceptions []*models.ServiceRouteException
}
func NewLoader(dir string) *Loader {
l := Loader{
dir: dir,
trips: map[string]*models.Trip{},
stopTrips: map[string][]string{},
tripRoute: map[string]string{},
uniqueStop: map[string]*models.Stop{},
tripService: map[string]*models.Service{},
serviceRoute: map[string]map[string]bool{},
}
l.init()
return &l
}
func (l *Loader) init() {
l.loadTrips()
l.loadStopTrips()
l.loadTripRoute()
l.loadUniqueStop()
l.loadCalendars()
l.Stops = make([]*models.Stop, len(l.uniqueStop))
i := 0
for _, v := range l.uniqueStop {
l.Stops[i] = v
i++
}
}
func getcsv(dir, name string) *csv.Reader {
f, err := os.Open(path.Join(dir, name))
if err != nil {
panic(err)
}
r := csv.NewReader(f)
r.LazyQuotes = true
return r
}
// find index of col in header
func find(header []string, col string) int {
for i := 0; i < len(header); i++ {
if header[i] == col {
return i
}
}
log.Fatalf("can't find header col %v", col)
return -1
}
func (l *Loader) loadTrips() {
f := getcsv(l.dir, "trips.txt")
header, err := f.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
tripIdx := find(header, "trip_id")
dirIdx := find(header, "direction_id")
headIdx := find(header, "trip_headsign")
serviceIdx := find(header, "service_id")
routeIdx := find(header, "route_id")
for i := 0; ; i++ {
rec, err := f.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
direction, err := strconv.Atoi(rec[dirIdx])
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
trip := &models.Trip{
Id: rec[tripIdx],
DirectionId: direction,
Headsign: rec[headIdx],
}
service := rec[serviceIdx]
route := rec[routeIdx]
l.trips[trip.Id] = trip
serviceObj := &models.Service{
Id: service,
RouteId: route,
}
l.tripService[trip.Id] = serviceObj
if l.serviceRoute[service] == nil {
l.serviceRoute[service] = map[string]bool{}
}
l.serviceRoute[service][route] = true
}
}
func (l *Loader) loadStopTrips() {
stopTimes := getcsv(l.dir, "stop_times.txt")
header, err := stopTimes.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
stopIdx := find(header, "stop_id")
tripIdx := find(header, "trip_id")
timeIdx := find(header, "departure_time")
for i := 0; ; i++ {
rec, err := stopTimes.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of stop_times.txt", err, i)
}
stop := rec[stopIdx]
trip := rec[tripIdx]
timeStr := rec[timeIdx]
l.stopTrips[stop] = append(l.stopTrips[stop], trip)
service := l.tripService[trip]
sst, err := models.NewScheduledStopTime(service.RouteId, stop, service.Id, timeStr)
if err != nil {
log.Fatal("can't create sst", rec, err)
}
l.ScheduledStopTimes = append(l.ScheduledStopTimes, &sst)
}
}
func (l *Loader) loadTripRoute() {
trips := getcsv(l.dir, "trips.txt")
header, err := trips.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
tripIdx := find(header, "trip_id")
routeIdx := find(header, "route_id")
trips.Read()
for i := 0; ; i++ {
rec, err := trips.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
trip := rec[tripIdx]
route := rec[routeIdx]
l.tripRoute[trip] = route
}
}
func (l *Loader) loadUniqueStop() {
stops := getcsv(l.dir, "stops.txt")
header, err := stops.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
stopIdx := find(header, "stop_id")
stopNameIdx := find(header, "stop_name")
stopLatIdx := find(header, "stop_lat")
stopLonIdx := find(header, "stop_lon")
for i := 0; ; i++ {
rec, err := stops.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
stopLat, err := strconv.ParseFloat(
strings.TrimSpace(rec[stopLatIdx]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
stopLon, err := strconv.ParseFloat(
strings.TrimSpace(rec[stopLonIdx]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
trips, exists := l.stopTrips[rec[stopIdx]]
if exists {
for _, trip := range trips {
obj := models.Stop{
Id: rec[stopIdx],
Name: rec[stopNameIdx],
Lat: stopLat,
Lon: stopLon,
RouteId: l.tripRoute[trip],
DirectionId: l.trips[trip].DirectionId,
Headsign: l.trips[trip].Headsign,
}
l.uniqueStop[obj.Key()] = &obj
}
}
}
}
func (l *Loader) loadCalendars() {
stops := getcsv(l.dir, "calendar.txt")
header, err := stops.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
idxs := map[string]int{}
for _, day := range days {
idxs[day] = find(header, day)
}
serviceIdx := find(header, "service_id")
startDateIdx := find(header, "start_date")
endDateIdx := find(header, "end_date")
for i := 0; ; i++ {
rec, err := stops.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of calendar.txt", err, i)
}
serviceId := rec[serviceIdx]
startDate, err := time.Parse(datefmt, rec[startDateIdx])
if err != nil {
log.Fatalf("can't parse start date %v %v", err, rec[startDateIdx])
}
endDate, err := time.Parse(datefmt, rec[endDateIdx])
if err != nil {
log.Fatalf("can't parse end date %v %v", err, rec[endDateIdx])
}
for day, dayIdx := range idxs {
dayVal := rec[dayIdx]
if dayVal != "1" {
continue
}
for route, _ := range l.serviceRoute[serviceId] {
srd := models.ServiceRouteDay{
ServiceId: serviceId,
RouteId: route,
Day: day,
StartDate: startDate,
EndDate: endDate,
}
l.ServiceRouteDays = append(l.ServiceRouteDays, &srd)
}
}
}
}
func doOne(dir string, stype string, db *sqlx.DB) {
l := NewLoader(dir)
for i, s := range l.ServiceRouteDays {
_, err := db.Exec(`
INSERT INTO service_route_day
(route_id, service_id, day, start_date, end_date)
VALUES($1, $2, $3, $4, $5)
`, s.RouteId, s.ServiceId, s.Day, s.StartDate, s.EndDate,
)
if err != nil && !strings.Contains(err.Error(), "violates unique constraint") {
log.Println("ERROR SERVICE ROUTE DAYS: ", err, s)
}
if i%100 == 0 {
log.Printf("loaded %v service route days", i)
}
}
for i, s := range l.Stops {
_, err := db.Exec(`
INSERT INTO stop
(stop_id, stop_name, direction_id, headsign, route_id,
location, stype)
VALUES($1, $2, $3, $4, $5, ll_to_earth($6, $7), $8)
`,
s.Id, s.Name, s.DirectionId, s.Headsign, s.RouteId,
s.Lat, s.Lon, stype,
)
if err != nil && !strings.Contains(err.Error(), "violates unique constraint") {
log.Println("ERROR STOPS: ", err, s)
}
if i%100 == 0 {
log.Printf("loaded %v stops", i)
}
}
for i, s := range l.ScheduledStopTimes {
_, err := db.Exec(`
INSERT INTO scheduled_stop_time
(route_id, stop_id, service_id, departure_sec)
VALUES($1, $2, $3, $4)
`, s.RouteId, s.StopId, s.ServiceId, s.DepartureSec,
)
if err != nil && !strings.Contains(err.Error(), "violates unique constraint") {
log.Println("ERROR SCHEDULED STOP TIMES: ", err, s)
}
if i%100000 == 0 {
log.Printf("loaded %v stop times", i)
}
}
}
func LoadForever() {
for {
for _, url := range []string{
"http://web.mta.info/developers/data/nyct/subway/google_transit.zip",
"http://web.mta.info/developers/data/nyct/bus/google_transit_bronx.zip",
"http://web.mta.info/developers/data/nyct/bus/google_transit_brooklyn.zip",
"http://web.mta.info/developers/data/nyct/bus/google_transit_manhattan.zip",
"http://web.mta.info/developers/data/nyct/bus/google_transit_queens.zip",
"http://web.mta.info/developers/data/nyct/bus/google_transit_staten_island.zip",
} {
var stype string
if strings.Contains(url, "subway") {
stype = "subway"
} else {
stype = "bus"
}
// FIXME: do this in Go, need to make it integrated with loader
dir, err := ioutil.TempDir(common.TmpDir, "")
if err != nil {
panic(err)
}
cmd := exec.Command("wget", url, "-O", path.Join(dir, "file.zip"))
err = cmd.Run()
if err != nil {
panic(err)
}
cmd = exec.Command("unzip", path.Join(dir, "file.zip"), "-d", dir)
err = cmd.Run()
if err != nil {
panic(err)
}
func() {
log.Println(url, dir, stype)
defer os.RemoveAll(dir)
t1 := time.Now()
doOne(dir, stype, common.DB)
t2 := time.Now()
log.Printf("took %v for %v\n", t2.Sub(t1), dir)
}()
}
log.Println("finished loading, sleeping for 24 hours")
time.Sleep(time.Hour * 24)
}
}
internal package issues
package loader
import (
"encoding/csv"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"github.com/brnstz/bus/internal/conf"
"github.com/brnstz/bus/models"
"github.com/jmoiron/sqlx"
)
var days = []string{"monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"}
var datefmt = "20060102"
type Loader struct {
// the dir from which we load google transit files
dir string
// mapping from trip id to a trip object
trips map[string]*models.Trip
// mapping from stop_id to a slice of trip_ids
stopTrips map[string][]string
// mapping trip_id to route_id
tripRoute map[string]string
// a map of "{stop_id}-{route_id}" to stop objects. Essentially a
// list of unique stops by route.
uniqueStop map[string]*models.Stop
// mapping of trip_id to service object
tripService map[string]*models.Service
// mapping of service_id to map of unique route_id
serviceRoute map[string]map[string]bool
Stops []*models.Stop
ScheduledStopTimes []*models.ScheduledStopTime
ServiceRouteDays []*models.ServiceRouteDay
ServiceRouteExceptions []*models.ServiceRouteException
}
func NewLoader(dir string) *Loader {
l := Loader{
dir: dir,
trips: map[string]*models.Trip{},
stopTrips: map[string][]string{},
tripRoute: map[string]string{},
uniqueStop: map[string]*models.Stop{},
tripService: map[string]*models.Service{},
serviceRoute: map[string]map[string]bool{},
}
l.init()
return &l
}
func (l *Loader) init() {
l.loadTrips()
l.loadStopTrips()
l.loadTripRoute()
l.loadUniqueStop()
l.loadCalendars()
l.Stops = make([]*models.Stop, len(l.uniqueStop))
i := 0
for _, v := range l.uniqueStop {
l.Stops[i] = v
i++
}
}
func getcsv(dir, name string) *csv.Reader {
f, err := os.Open(path.Join(dir, name))
if err != nil {
panic(err)
}
r := csv.NewReader(f)
r.LazyQuotes = true
return r
}
// find index of col in header
func find(header []string, col string) int {
for i := 0; i < len(header); i++ {
if header[i] == col {
return i
}
}
log.Fatalf("can't find header col %v", col)
return -1
}
func (l *Loader) loadTrips() {
f := getcsv(l.dir, "trips.txt")
header, err := f.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
tripIdx := find(header, "trip_id")
dirIdx := find(header, "direction_id")
headIdx := find(header, "trip_headsign")
serviceIdx := find(header, "service_id")
routeIdx := find(header, "route_id")
for i := 0; ; i++ {
rec, err := f.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
direction, err := strconv.Atoi(rec[dirIdx])
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
trip := &models.Trip{
Id: rec[tripIdx],
DirectionId: direction,
Headsign: rec[headIdx],
}
service := rec[serviceIdx]
route := rec[routeIdx]
l.trips[trip.Id] = trip
serviceObj := &models.Service{
Id: service,
RouteId: route,
}
l.tripService[trip.Id] = serviceObj
if l.serviceRoute[service] == nil {
l.serviceRoute[service] = map[string]bool{}
}
l.serviceRoute[service][route] = true
}
}
func (l *Loader) loadStopTrips() {
stopTimes := getcsv(l.dir, "stop_times.txt")
header, err := stopTimes.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
stopIdx := find(header, "stop_id")
tripIdx := find(header, "trip_id")
timeIdx := find(header, "departure_time")
for i := 0; ; i++ {
rec, err := stopTimes.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of stop_times.txt", err, i)
}
stop := rec[stopIdx]
trip := rec[tripIdx]
timeStr := rec[timeIdx]
l.stopTrips[stop] = append(l.stopTrips[stop], trip)
service := l.tripService[trip]
sst, err := models.NewScheduledStopTime(service.RouteId, stop, service.Id, timeStr)
if err != nil {
log.Fatal("can't create sst", rec, err)
}
l.ScheduledStopTimes = append(l.ScheduledStopTimes, &sst)
}
}
func (l *Loader) loadTripRoute() {
trips := getcsv(l.dir, "trips.txt")
header, err := trips.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
tripIdx := find(header, "trip_id")
routeIdx := find(header, "route_id")
trips.Read()
for i := 0; ; i++ {
rec, err := trips.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
trip := rec[tripIdx]
route := rec[routeIdx]
l.tripRoute[trip] = route
}
}
func (l *Loader) loadUniqueStop() {
stops := getcsv(l.dir, "stops.txt")
header, err := stops.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
stopIdx := find(header, "stop_id")
stopNameIdx := find(header, "stop_name")
stopLatIdx := find(header, "stop_lat")
stopLonIdx := find(header, "stop_lon")
for i := 0; ; i++ {
rec, err := stops.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
stopLat, err := strconv.ParseFloat(
strings.TrimSpace(rec[stopLatIdx]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
stopLon, err := strconv.ParseFloat(
strings.TrimSpace(rec[stopLonIdx]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
trips, exists := l.stopTrips[rec[stopIdx]]
if exists {
for _, trip := range trips {
obj := models.Stop{
Id: rec[stopIdx],
Name: rec[stopNameIdx],
Lat: stopLat,
Lon: stopLon,
RouteId: l.tripRoute[trip],
DirectionId: l.trips[trip].DirectionId,
Headsign: l.trips[trip].Headsign,
}
l.uniqueStop[obj.Key()] = &obj
}
}
}
}
func (l *Loader) loadCalendars() {
stops := getcsv(l.dir, "calendar.txt")
header, err := stops.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
idxs := map[string]int{}
for _, day := range days {
idxs[day] = find(header, day)
}
serviceIdx := find(header, "service_id")
startDateIdx := find(header, "start_date")
endDateIdx := find(header, "end_date")
for i := 0; ; i++ {
rec, err := stops.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of calendar.txt", err, i)
}
serviceId := rec[serviceIdx]
startDate, err := time.Parse(datefmt, rec[startDateIdx])
if err != nil {
log.Fatalf("can't parse start date %v %v", err, rec[startDateIdx])
}
endDate, err := time.Parse(datefmt, rec[endDateIdx])
if err != nil {
log.Fatalf("can't parse end date %v %v", err, rec[endDateIdx])
}
for day, dayIdx := range idxs {
dayVal := rec[dayIdx]
if dayVal != "1" {
continue
}
for route, _ := range l.serviceRoute[serviceId] {
srd := models.ServiceRouteDay{
ServiceId: serviceId,
RouteId: route,
Day: day,
StartDate: startDate,
EndDate: endDate,
}
l.ServiceRouteDays = append(l.ServiceRouteDays, &srd)
}
}
}
}
func doOne(dir string, stype string, db *sqlx.DB) {
l := NewLoader(dir)
for i, s := range l.ServiceRouteDays {
_, err := db.Exec(`
INSERT INTO service_route_day
(route_id, service_id, day, start_date, end_date)
VALUES($1, $2, $3, $4, $5)
`, s.RouteId, s.ServiceId, s.Day, s.StartDate, s.EndDate,
)
if err != nil && !strings.Contains(err.Error(), "violates unique constraint") {
log.Println("ERROR SERVICE ROUTE DAYS: ", err, s)
}
if i%100 == 0 {
log.Printf("loaded %v service route days", i)
}
}
for i, s := range l.Stops {
_, err := db.Exec(`
INSERT INTO stop
(stop_id, stop_name, direction_id, headsign, route_id,
location, stype)
VALUES($1, $2, $3, $4, $5, ll_to_earth($6, $7), $8)
`,
s.Id, s.Name, s.DirectionId, s.Headsign, s.RouteId,
s.Lat, s.Lon, stype,
)
if err != nil && !strings.Contains(err.Error(), "violates unique constraint") {
log.Println("ERROR STOPS: ", err, s)
}
if i%100 == 0 {
log.Printf("loaded %v stops", i)
}
}
for i, s := range l.ScheduledStopTimes {
_, err := db.Exec(`
INSERT INTO scheduled_stop_time
(route_id, stop_id, service_id, departure_sec)
VALUES($1, $2, $3, $4)
`, s.RouteId, s.StopId, s.ServiceId, s.DepartureSec,
)
if err != nil && !strings.Contains(err.Error(), "violates unique constraint") {
log.Println("ERROR SCHEDULED STOP TIMES: ", err, s)
}
if i%100000 == 0 {
log.Printf("loaded %v stop times", i)
}
}
}
func LoadForever() {
for {
for _, url := range []string{
"http://web.mta.info/developers/data/nyct/subway/google_transit.zip",
"http://web.mta.info/developers/data/nyct/bus/google_transit_bronx.zip",
"http://web.mta.info/developers/data/nyct/bus/google_transit_brooklyn.zip",
"http://web.mta.info/developers/data/nyct/bus/google_transit_manhattan.zip",
"http://web.mta.info/developers/data/nyct/bus/google_transit_queens.zip",
"http://web.mta.info/developers/data/nyct/bus/google_transit_staten_island.zip",
} {
var stype string
if strings.Contains(url, "subway") {
stype = "subway"
} else {
stype = "bus"
}
// FIXME: do this in Go, need to make it integrated with loader
dir, err := ioutil.TempDir(conf.TmpDir, "")
if err != nil {
panic(err)
}
cmd := exec.Command("wget", url, "-O", path.Join(dir, "file.zip"))
err = cmd.Run()
if err != nil {
panic(err)
}
cmd = exec.Command("unzip", path.Join(dir, "file.zip"), "-d", dir)
err = cmd.Run()
if err != nil {
panic(err)
}
func() {
log.Println(url, dir, stype)
defer os.RemoveAll(dir)
t1 := time.Now()
doOne(dir, stype, conf.DB)
t2 := time.Now()
log.Printf("took %v for %v\n", t2.Sub(t1), dir)
}()
}
log.Println("finished loading, sleeping for 24 hours")
time.Sleep(time.Hour * 24)
}
}
|
package loader
import (
"encoding/csv"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"github.com/brnstz/bus/internal/conf"
"github.com/brnstz/bus/internal/etc"
"github.com/brnstz/bus/internal/models"
)
var (
days = []string{"monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"}
datefmt = "20060102"
loaderBreak = time.Hour * 24
views = []string{"here", "service", "service_exception"}
logp = 1000
)
// rskey is the unique key for a route_shape
type rskey struct {
routeID string
directionID int
headsign string
}
type Loader struct {
// the dir from which we load google transit files
dir string
// mapping from trip id to a trip object
trips map[string]*models.Trip
// mapping from stop_id to a slice of trip_ids
stopTrips map[string][]string
// mapping of trip_id to service object
tripService map[string]*models.Service
// mapping of service_id to map of unique route_id
serviceRoute map[string]map[string]bool
routeIDs map[string]bool
// routeAgency contains the agency for each route after we loadRoutes()
routeAgency map[string]string
// mapping trip_id to route_id
tripRoute map[string]string
// shapeRoute maps shape_id to route_id (for purposes of adding agency_id
// to shapes table)
shapeRoute map[string]string
// routeShapeCount keeps a running tab of the biggest shape for this
// route/dir/headsign combo
/*
routeShapeCount map[rskey]int
routeShapeID map[rskey]
*/
}
func newLoader(dir string) *Loader {
l := Loader{
dir: dir,
trips: map[string]*models.Trip{},
stopTrips: map[string][]string{},
tripRoute: map[string]string{},
tripService: map[string]*models.Service{},
serviceRoute: map[string]map[string]bool{},
routeAgency: map[string]string{},
shapeRoute: map[string]string{},
}
// Checking the length of the 0th entry ensures we ignore the case where
// BUS_ROUTE_FILTER was an empty string (resulting in []string{""}).
// Possibly we want to check this with the conf package, but doing this for
// now.
if len(conf.Loader.RouteFilter) > 0 &&
len(conf.Loader.RouteFilter[0]) > 0 {
l.routeIDs = map[string]bool{}
for _, v := range conf.Loader.RouteFilter {
l.routeIDs[v] = true
}
}
return &l
}
func (l *Loader) load() {
l.loadRoutes()
l.loadTrips()
l.loadStopTrips()
l.loadUniqueStop()
l.loadCalendars()
l.loadCalendarDates()
l.loadShapes()
l.updateRouteShapes()
}
func getcsv(dir, name string) *csv.Reader {
f, err := os.Open(path.Join(dir, name))
if err != nil {
panic(err)
}
r := csv.NewReader(f)
r.LazyQuotes = true
return r
}
// find index of col in header
func find(header []string, col string) int {
for i := 0; i < len(header); i++ {
if header[i] == col {
return i
}
}
log.Fatalf("can't find header col %v", col)
return -1
}
// skipRoute returns true if we should skip this route given our routeFilter
// config
func (l *Loader) skipRoute(routeID string) bool {
if l.routeIDs != nil && l.routeIDs[routeID] == false {
return true
} else {
return false
}
}
func (l *Loader) loadRoutes() {
var i int
f := getcsv(l.dir, "routes.txt")
header, err := f.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
routeIdx := find(header, "route_id")
routeTypeIdx := find(header, "route_type")
routeColorIdx := find(header, "route_color")
routeTextColorIdx := find(header, "route_text_color")
routeAgencyIdx := find(header, "agency_id")
for i = 0; ; i++ {
rec, err := f.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of routes.txt", err, i)
}
route := rec[routeIdx]
if l.skipRoute(route) {
continue
}
routeType, err := strconv.Atoi(rec[routeTypeIdx])
if err != nil {
log.Fatalf("%v on line %v of routes.txt", err, i)
}
routeColor := rec[routeColorIdx]
routeTextColor := rec[routeTextColorIdx]
agencyID := rec[routeAgencyIdx]
r, err := models.NewRoute(
route, routeType, routeColor, routeTextColor, agencyID,
)
if err != nil {
log.Fatalf("%v on line %v of routes.txt", err, i)
}
err = r.Save()
if err != nil {
log.Fatalf("%v on line %v of routes.txt", err, i)
}
l.routeAgency[route] = agencyID
}
log.Printf("loaded %v routes", i)
}
func (l *Loader) loadTrips() {
var i int
f := getcsv(l.dir, "trips.txt")
header, err := f.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
tripIdx := find(header, "trip_id")
dirIdx := find(header, "direction_id")
headIdx := find(header, "trip_headsign")
serviceIdx := find(header, "service_id")
routeIdx := find(header, "route_id")
shapeIdx := find(header, "shape_id")
for i = 0; ; i++ {
rec, err := f.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
direction, err := strconv.Atoi(rec[dirIdx])
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
id := rec[tripIdx]
service := rec[serviceIdx]
route := rec[routeIdx]
shape := rec[shapeIdx]
agency := l.routeAgency[route]
if l.skipRoute(route) {
continue
}
trip, err := models.NewTrip(
id, route, agency, service, shape, rec[headIdx], direction,
)
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
l.trips[trip.TripID] = trip
serviceObj := &models.Service{
ID: service,
RouteID: route,
}
l.tripService[trip.TripID] = serviceObj
if l.serviceRoute[service] == nil {
l.serviceRoute[service] = map[string]bool{}
}
l.serviceRoute[service][route] = true
err = trip.Save()
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
l.tripRoute[id] = route
l.shapeRoute[shape] = route
if i%logp == 0 {
log.Printf("loaded %v trips", i)
}
}
log.Printf("loaded %v trips", i)
}
func (l *Loader) loadStopTrips() {
var i int
stopTimes := getcsv(l.dir, "stop_times.txt")
header, err := stopTimes.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
stopIdx := find(header, "stop_id")
tripIdx := find(header, "trip_id")
arrivalIdx := find(header, "arrival_time")
depatureIdx := find(header, "departure_time")
sequenceIdx := find(header, "stop_sequence")
for i = 0; ; i++ {
rec, err := stopTimes.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of stop_times.txt", err, i)
}
stop := rec[stopIdx]
trip := rec[tripIdx]
arrivalStr := rec[arrivalIdx]
departureStr := rec[depatureIdx]
agencyID := l.routeAgency[l.tripRoute[trip]]
sequenceStr := rec[sequenceIdx]
sequence, err := strconv.Atoi(sequenceStr)
if err != nil {
log.Fatalf("%v on line %v of stop_times.txt", err, i)
}
l.stopTrips[stop] = append(l.stopTrips[stop], trip)
service, exists := l.tripService[trip]
if !exists {
continue
}
sst, err := models.NewScheduledStopTime(
service.RouteID, stop, service.ID, arrivalStr, departureStr,
agencyID, trip, sequence,
)
if err != nil {
log.Fatalf("%v on line %v of stop_times.txt", err, i)
}
err = sst.Save()
if err != nil {
log.Fatalf("%v on line %v of stop_times.txt", err, i)
}
if i%logp == 0 {
log.Printf("loaded %v stop times", i)
}
}
log.Printf("loaded %v stop times", i)
}
func (l *Loader) loadUniqueStop() {
var i int
stops := getcsv(l.dir, "stops.txt")
header, err := stops.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
stopIdx := find(header, "stop_id")
stopNameIdx := find(header, "stop_name")
stopLatIdx := find(header, "stop_lat")
stopLonIdx := find(header, "stop_lon")
for i = 0; ; i++ {
rec, err := stops.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
stopLat, err := strconv.ParseFloat(
strings.TrimSpace(rec[stopLatIdx]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
stopLon, err := strconv.ParseFloat(
strings.TrimSpace(rec[stopLonIdx]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
trips, exists := l.stopTrips[rec[stopIdx]]
if exists {
for _, trip := range trips {
if l.skipRoute(l.tripRoute[trip]) {
continue
}
obj := models.Stop{
StopID: rec[stopIdx],
Name: rec[stopNameIdx],
Lat: stopLat,
Lon: stopLon,
RouteID: l.tripRoute[trip],
DirectionID: l.trips[trip].DirectionID,
Headsign: l.trips[trip].Headsign,
AgencyID: l.routeAgency[l.tripRoute[trip]],
}
err = obj.Save()
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
}
}
if i%logp == 0 {
log.Printf("loaded %v stops", i)
}
}
log.Printf("loaded %v stops", i)
}
func (l *Loader) loadCalendarDates() {
cal := getcsv(l.dir, "calendar_dates.txt")
header, err := cal.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
serviceIdx := find(header, "service_id")
exceptionDateIdx := find(header, "date")
exceptionTypeIdx := find(header, "exception_type")
for i := 0; ; i++ {
rec, err := cal.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of calendar_dates.txt", err, i)
}
serviceId := rec[serviceIdx]
exceptionDate, err := time.Parse(datefmt, rec[exceptionDateIdx])
if err != nil {
log.Fatalf("can't parse exception date %v %v",
err, rec[exceptionDateIdx])
}
exceptionType, err := strconv.Atoi(rec[exceptionTypeIdx])
if err != nil {
log.Fatalf("can't parse exception type integer %v %v",
err, rec[exceptionTypeIdx])
}
if !(exceptionType == models.ServiceAdded || exceptionType == models.ServiceRemoved) {
log.Fatalf("invalid value for exception_type %v", exceptionType)
}
for route, _ := range l.serviceRoute[serviceId] {
s := models.ServiceRouteException{
AgencyID: l.routeAgency[route],
ServiceID: serviceId,
RouteID: route,
ExceptionDate: exceptionDate,
ExceptionType: exceptionType,
}
err = s.Save()
if err != nil {
log.Fatalf("%v on line %v of calendar_dates.txt with %v", err, i, s)
}
}
}
}
func (l *Loader) loadCalendars() {
var i int
cal := getcsv(l.dir, "calendar.txt")
header, err := cal.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
idxs := map[string]int{}
for _, day := range days {
idxs[day] = find(header, day)
}
serviceIdx := find(header, "service_id")
startDateIdx := find(header, "start_date")
endDateIdx := find(header, "end_date")
for i = 0; ; i++ {
rec, err := cal.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of calendar.txt", err, i)
}
serviceId := rec[serviceIdx]
startDate, err := time.Parse(datefmt, rec[startDateIdx])
if err != nil {
log.Fatalf("can't parse start date %v %v", err, rec[startDateIdx])
}
endDate, err := time.Parse(datefmt, rec[endDateIdx])
if err != nil {
log.Fatalf("can't parse end date %v %v", err, rec[endDateIdx])
}
for day, dayIdx := range idxs {
dayVal := rec[dayIdx]
if dayVal != "1" {
continue
}
for route, _ := range l.serviceRoute[serviceId] {
srd := models.ServiceRouteDay{
ServiceID: serviceId,
RouteID: route,
AgencyID: l.routeAgency[route],
Day: day,
StartDate: startDate,
EndDate: endDate,
}
err = srd.Save()
if err != nil {
log.Fatalf("%v on line %v of calendar.txt with %v", err, i, srd)
}
}
}
if i%logp == 0 {
log.Printf("loaded %v calendars", i)
}
}
log.Printf("loaded %v calendars", i)
}
func (l *Loader) loadShapes() {
var i int
shapes := getcsv(l.dir, "shapes.txt")
header, err := shapes.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
idIDX := find(header, "shape_id")
latIDX := find(header, "shape_pt_lat")
lonIDX := find(header, "shape_pt_lon")
seqIDX := find(header, "shape_pt_sequence")
for i = 0; ; i++ {
rec, err := shapes.Read()
if err == io.EOF {
break
}
lat, err := strconv.ParseFloat(
strings.TrimSpace(rec[latIDX]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of shapes.txt", err, i)
}
lon, err := strconv.ParseFloat(
strings.TrimSpace(rec[lonIDX]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of shapes.txt", err, i)
}
seq, err := strconv.ParseInt(
strings.TrimSpace(rec[seqIDX]), 10, 32,
)
id := rec[idIDX]
route := l.shapeRoute[id]
if len(route) < 1 || l.skipRoute(route) {
continue
}
agency := l.routeAgency[l.shapeRoute[id]]
shape, err := models.NewShape(
id, agency, int(seq), lat, lon,
)
err = shape.Save()
if err != nil {
log.Fatalf("%v on line %v of shapes.txt", err, i)
}
if i%logp == 0 {
log.Printf("loaded %v shapes", i)
}
}
log.Printf("loaded %v shapes", i)
}
// updateRouteShapes updates the route_shape table by identifying
// the "biggest" shapes typical for a route
func (l *Loader) updateRouteShapes() {
var err error
tx, err := etc.DBConn.Beginx()
if err != nil {
log.Fatal(err)
}
defer func() {
if err == nil {
err = tx.Commit()
if err != nil {
log.Println("can't commit route shapes", err)
}
} else {
tx.Rollback()
if err != nil {
log.Println("can't rollback oute shapes", err)
}
}
}()
// delete existing routes within a transaction (won't take effect
// unless committed)
err = models.DeleteRouteShapes(tx)
if err != nil {
log.Fatal(err)
}
// Get shapes ordered from smallest to largest
routeShapes, err := models.GetRouteShapes(tx)
if err != nil {
log.Fatal(err)
}
log.Printf("got %d route shapes", len(routeShapes))
for _, rs := range routeShapes {
// upsert each route so we end up with the most common
err = rs.Save(tx)
if err != nil {
log.Fatal(err)
} else {
log.Printf("saved %v", rs)
}
}
}
// LoadOnce loads the files in conf.Loader.GTFSURLs, possibly filtering by the
// routes specified in conf.Loader.RouteFilter. If no filter is defined,
// it loads all data in the specified URLs.
func LoadOnce() {
for _, url := range conf.Loader.GTFSURLs {
log.Printf("starting %v", url)
// FIXME: do this in Go, need to make it integrated with loader
dir, err := ioutil.TempDir(conf.Loader.TmpDir, "")
if err != nil {
panic(err)
}
cmd := exec.Command("wget", url, "-O", path.Join(dir, "file.zip"))
err = cmd.Run()
if err != nil {
panic(err)
}
cmd = exec.Command("unzip", path.Join(dir, "file.zip"), "-d", dir)
err = cmd.Run()
if err != nil {
panic(err)
}
func() {
log.Printf("loading: %v in %v", url, dir)
defer os.RemoveAll(dir)
t1 := time.Now()
l := newLoader(dir)
l.load()
t2 := time.Now()
log.Printf("took %v for %v", t2.Sub(t1), url)
}()
}
for _, view := range views {
func() {
var err error
tx, err := etc.DBConn.Beginx()
if err != nil {
log.Fatal("can't create tx to update view", view, err)
}
defer func() {
if err == nil {
commitErr := tx.Commit()
if commitErr != nil {
log.Fatal("error committing update to view", view, err)
}
} else {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Fatal("error rolling back update to view", view, err)
}
}
}()
statements := []string{
fmt.Sprintf("ALTER SEQUENCE %s_seq RESTART WITH 1", view),
fmt.Sprintf("REFRESH MATERIALIZED VIEW CONCURRENTLY %s", view),
}
for _, statement := range statements {
log.Println(statement)
_, err = tx.Exec(statement)
if err != nil {
log.Println("can't exec", statement, err)
return
}
log.Println("complete")
}
}()
}
}
// LoadForever continuously runs LoadOnce, breaking for 24 hours between loads
func LoadForever() {
for {
LoadOnce()
log.Printf("finished loading, sleeping for %v", loaderBreak)
time.Sleep(loaderBreak)
}
}
adding comment
package loader
import (
"encoding/csv"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"github.com/brnstz/bus/internal/conf"
"github.com/brnstz/bus/internal/etc"
"github.com/brnstz/bus/internal/models"
)
var (
days = []string{"monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"}
datefmt = "20060102"
loaderBreak = time.Hour * 24
views = []string{"here", "service", "service_exception"}
logp = 1000
)
// rskey is the unique key for a route_shape
type rskey struct {
routeID string
directionID int
headsign string
}
type Loader struct {
// the dir from which we load google transit files
dir string
// mapping from trip id to a trip object
trips map[string]*models.Trip
// mapping from stop_id to a slice of trip_ids
stopTrips map[string][]string
// mapping of trip_id to service object
tripService map[string]*models.Service
// mapping of service_id to map of unique route_id
serviceRoute map[string]map[string]bool
routeIDs map[string]bool
// routeAgency contains the agency for each route after we loadRoutes()
routeAgency map[string]string
// mapping trip_id to route_id
tripRoute map[string]string
// shapeRoute maps shape_id to route_id (for purposes of adding agency_id
// to shapes table)
shapeRoute map[string]string
// routeShapeCount keeps a running tab of the biggest shape for this
// route/dir/headsign combo
/*
routeShapeCount map[rskey]int
routeShapeID map[rskey]
*/
}
func newLoader(dir string) *Loader {
l := Loader{
dir: dir,
trips: map[string]*models.Trip{},
stopTrips: map[string][]string{},
tripRoute: map[string]string{},
tripService: map[string]*models.Service{},
serviceRoute: map[string]map[string]bool{},
routeAgency: map[string]string{},
shapeRoute: map[string]string{},
}
// Checking the length of the 0th entry ensures we ignore the case where
// BUS_ROUTE_FILTER was an empty string (resulting in []string{""}).
// Possibly we want to check this with the conf package, but doing this for
// now.
if len(conf.Loader.RouteFilter) > 0 &&
len(conf.Loader.RouteFilter[0]) > 0 {
l.routeIDs = map[string]bool{}
for _, v := range conf.Loader.RouteFilter {
l.routeIDs[v] = true
}
}
return &l
}
func (l *Loader) load() {
l.loadRoutes()
l.loadTrips()
l.loadStopTrips()
l.loadUniqueStop()
l.loadCalendars()
l.loadCalendarDates()
l.loadShapes()
l.updateRouteShapes()
}
func getcsv(dir, name string) *csv.Reader {
f, err := os.Open(path.Join(dir, name))
if err != nil {
panic(err)
}
r := csv.NewReader(f)
r.LazyQuotes = true
return r
}
// find index of col in header
func find(header []string, col string) int {
for i := 0; i < len(header); i++ {
if header[i] == col {
return i
}
}
log.Fatalf("can't find header col %v", col)
return -1
}
// skipRoute returns true if we should skip this route given our routeFilter
// config
func (l *Loader) skipRoute(routeID string) bool {
if l.routeIDs != nil && l.routeIDs[routeID] == false {
return true
} else {
return false
}
}
func (l *Loader) loadRoutes() {
var i int
f := getcsv(l.dir, "routes.txt")
header, err := f.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
routeIdx := find(header, "route_id")
routeTypeIdx := find(header, "route_type")
routeColorIdx := find(header, "route_color")
routeTextColorIdx := find(header, "route_text_color")
routeAgencyIdx := find(header, "agency_id")
for i = 0; ; i++ {
rec, err := f.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of routes.txt", err, i)
}
route := rec[routeIdx]
if l.skipRoute(route) {
continue
}
routeType, err := strconv.Atoi(rec[routeTypeIdx])
if err != nil {
log.Fatalf("%v on line %v of routes.txt", err, i)
}
routeColor := rec[routeColorIdx]
routeTextColor := rec[routeTextColorIdx]
agencyID := rec[routeAgencyIdx]
r, err := models.NewRoute(
route, routeType, routeColor, routeTextColor, agencyID,
)
if err != nil {
log.Fatalf("%v on line %v of routes.txt", err, i)
}
err = r.Save()
if err != nil {
log.Fatalf("%v on line %v of routes.txt", err, i)
}
l.routeAgency[route] = agencyID
}
log.Printf("loaded %v routes", i)
}
func (l *Loader) loadTrips() {
var i int
f := getcsv(l.dir, "trips.txt")
header, err := f.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
tripIdx := find(header, "trip_id")
dirIdx := find(header, "direction_id")
headIdx := find(header, "trip_headsign")
serviceIdx := find(header, "service_id")
routeIdx := find(header, "route_id")
shapeIdx := find(header, "shape_id")
for i = 0; ; i++ {
rec, err := f.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
direction, err := strconv.Atoi(rec[dirIdx])
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
id := rec[tripIdx]
service := rec[serviceIdx]
route := rec[routeIdx]
shape := rec[shapeIdx]
agency := l.routeAgency[route]
if l.skipRoute(route) {
continue
}
trip, err := models.NewTrip(
id, route, agency, service, shape, rec[headIdx], direction,
)
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
l.trips[trip.TripID] = trip
serviceObj := &models.Service{
ID: service,
RouteID: route,
}
l.tripService[trip.TripID] = serviceObj
if l.serviceRoute[service] == nil {
l.serviceRoute[service] = map[string]bool{}
}
l.serviceRoute[service][route] = true
err = trip.Save()
if err != nil {
log.Fatalf("%v on line %v of trips.txt", err, i)
}
l.tripRoute[id] = route
l.shapeRoute[shape] = route
if i%logp == 0 {
log.Printf("loaded %v trips", i)
}
}
log.Printf("loaded %v trips", i)
}
func (l *Loader) loadStopTrips() {
var i int
stopTimes := getcsv(l.dir, "stop_times.txt")
header, err := stopTimes.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
stopIdx := find(header, "stop_id")
tripIdx := find(header, "trip_id")
arrivalIdx := find(header, "arrival_time")
depatureIdx := find(header, "departure_time")
sequenceIdx := find(header, "stop_sequence")
for i = 0; ; i++ {
rec, err := stopTimes.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of stop_times.txt", err, i)
}
stop := rec[stopIdx]
trip := rec[tripIdx]
arrivalStr := rec[arrivalIdx]
departureStr := rec[depatureIdx]
agencyID := l.routeAgency[l.tripRoute[trip]]
sequenceStr := rec[sequenceIdx]
sequence, err := strconv.Atoi(sequenceStr)
if err != nil {
log.Fatalf("%v on line %v of stop_times.txt", err, i)
}
l.stopTrips[stop] = append(l.stopTrips[stop], trip)
service, exists := l.tripService[trip]
if !exists {
continue
}
sst, err := models.NewScheduledStopTime(
service.RouteID, stop, service.ID, arrivalStr, departureStr,
agencyID, trip, sequence,
)
if err != nil {
log.Fatalf("%v on line %v of stop_times.txt", err, i)
}
err = sst.Save()
if err != nil {
log.Fatalf("%v on line %v of stop_times.txt", err, i)
}
if i%logp == 0 {
log.Printf("loaded %v stop times", i)
}
}
log.Printf("loaded %v stop times", i)
}
func (l *Loader) loadUniqueStop() {
var i int
stops := getcsv(l.dir, "stops.txt")
header, err := stops.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
stopIdx := find(header, "stop_id")
stopNameIdx := find(header, "stop_name")
stopLatIdx := find(header, "stop_lat")
stopLonIdx := find(header, "stop_lon")
for i = 0; ; i++ {
rec, err := stops.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
stopLat, err := strconv.ParseFloat(
strings.TrimSpace(rec[stopLatIdx]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
stopLon, err := strconv.ParseFloat(
strings.TrimSpace(rec[stopLonIdx]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
trips, exists := l.stopTrips[rec[stopIdx]]
if exists {
for _, trip := range trips {
if l.skipRoute(l.tripRoute[trip]) {
continue
}
obj := models.Stop{
StopID: rec[stopIdx],
Name: rec[stopNameIdx],
Lat: stopLat,
Lon: stopLon,
RouteID: l.tripRoute[trip],
DirectionID: l.trips[trip].DirectionID,
Headsign: l.trips[trip].Headsign,
AgencyID: l.routeAgency[l.tripRoute[trip]],
}
err = obj.Save()
if err != nil {
log.Fatalf("%v on line %v of stops.txt", err, i)
}
}
}
if i%logp == 0 {
log.Printf("loaded %v stops", i)
}
}
log.Printf("loaded %v stops", i)
}
func (l *Loader) loadCalendarDates() {
cal := getcsv(l.dir, "calendar_dates.txt")
header, err := cal.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
serviceIdx := find(header, "service_id")
exceptionDateIdx := find(header, "date")
exceptionTypeIdx := find(header, "exception_type")
for i := 0; ; i++ {
rec, err := cal.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of calendar_dates.txt", err, i)
}
serviceId := rec[serviceIdx]
exceptionDate, err := time.Parse(datefmt, rec[exceptionDateIdx])
if err != nil {
log.Fatalf("can't parse exception date %v %v",
err, rec[exceptionDateIdx])
}
exceptionType, err := strconv.Atoi(rec[exceptionTypeIdx])
if err != nil {
log.Fatalf("can't parse exception type integer %v %v",
err, rec[exceptionTypeIdx])
}
if !(exceptionType == models.ServiceAdded || exceptionType == models.ServiceRemoved) {
log.Fatalf("invalid value for exception_type %v", exceptionType)
}
for route, _ := range l.serviceRoute[serviceId] {
s := models.ServiceRouteException{
AgencyID: l.routeAgency[route],
ServiceID: serviceId,
RouteID: route,
ExceptionDate: exceptionDate,
ExceptionType: exceptionType,
}
err = s.Save()
if err != nil {
log.Fatalf("%v on line %v of calendar_dates.txt with %v", err, i, s)
}
}
}
}
func (l *Loader) loadCalendars() {
var i int
cal := getcsv(l.dir, "calendar.txt")
header, err := cal.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
idxs := map[string]int{}
for _, day := range days {
idxs[day] = find(header, day)
}
serviceIdx := find(header, "service_id")
startDateIdx := find(header, "start_date")
endDateIdx := find(header, "end_date")
for i = 0; ; i++ {
rec, err := cal.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v on line %v of calendar.txt", err, i)
}
serviceId := rec[serviceIdx]
startDate, err := time.Parse(datefmt, rec[startDateIdx])
if err != nil {
log.Fatalf("can't parse start date %v %v", err, rec[startDateIdx])
}
endDate, err := time.Parse(datefmt, rec[endDateIdx])
if err != nil {
log.Fatalf("can't parse end date %v %v", err, rec[endDateIdx])
}
for day, dayIdx := range idxs {
dayVal := rec[dayIdx]
if dayVal != "1" {
continue
}
for route, _ := range l.serviceRoute[serviceId] {
srd := models.ServiceRouteDay{
ServiceID: serviceId,
RouteID: route,
AgencyID: l.routeAgency[route],
Day: day,
StartDate: startDate,
EndDate: endDate,
}
err = srd.Save()
if err != nil {
log.Fatalf("%v on line %v of calendar.txt with %v", err, i, srd)
}
}
}
if i%logp == 0 {
log.Printf("loaded %v calendars", i)
}
}
log.Printf("loaded %v calendars", i)
}
func (l *Loader) loadShapes() {
var i int
shapes := getcsv(l.dir, "shapes.txt")
header, err := shapes.Read()
if err != nil {
log.Fatalf("unable to read header: %v", err)
}
idIDX := find(header, "shape_id")
latIDX := find(header, "shape_pt_lat")
lonIDX := find(header, "shape_pt_lon")
seqIDX := find(header, "shape_pt_sequence")
for i = 0; ; i++ {
rec, err := shapes.Read()
if err == io.EOF {
break
}
lat, err := strconv.ParseFloat(
strings.TrimSpace(rec[latIDX]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of shapes.txt", err, i)
}
lon, err := strconv.ParseFloat(
strings.TrimSpace(rec[lonIDX]), 64,
)
if err != nil {
log.Fatalf("%v on line %v of shapes.txt", err, i)
}
seq, err := strconv.ParseInt(
strings.TrimSpace(rec[seqIDX]), 10, 32,
)
id := rec[idIDX]
route := l.shapeRoute[id]
if len(route) < 1 || l.skipRoute(route) {
continue
}
agency := l.routeAgency[l.shapeRoute[id]]
shape, err := models.NewShape(
id, agency, int(seq), lat, lon,
)
err = shape.Save()
if err != nil {
log.Fatalf("%v on line %v of shapes.txt", err, i)
}
if i%logp == 0 {
log.Printf("loaded %v shapes", i)
}
}
log.Printf("loaded %v shapes", i)
}
// updateRouteShapes updates the route_shape table by identifying
// the "biggest" shapes typical for a route
func (l *Loader) updateRouteShapes() {
var err error
tx, err := etc.DBConn.Beginx()
if err != nil {
log.Fatal(err)
}
defer func() {
if err == nil {
err = tx.Commit()
if err != nil {
log.Println("can't commit route shapes", err)
}
} else {
tx.Rollback()
if err != nil {
log.Println("can't rollback oute shapes", err)
}
}
}()
// delete existing routes within a transaction (won't take effect
// unless committed)
err = models.DeleteRouteShapes(tx)
if err != nil {
log.Fatal(err)
}
// Get shapes ordered from smallest to largest
routeShapes, err := models.GetRouteShapes(tx)
if err != nil {
log.Fatal(err)
}
log.Printf("got %d route shapes", len(routeShapes))
for _, rs := range routeShapes {
// upsert each route so we end up with the most common
err = rs.Save(tx)
if err != nil {
log.Fatal(err)
} else {
log.Printf("saved %v", rs)
}
}
}
// LoadOnce loads the files in conf.Loader.GTFSURLs, possibly filtering by the
// routes specified in conf.Loader.RouteFilter. If no filter is defined,
// it loads all data in the specified URLs.
func LoadOnce() {
for _, url := range conf.Loader.GTFSURLs {
log.Printf("starting %v", url)
// FIXME: do this in Go, need to make it integrated with loader
dir, err := ioutil.TempDir(conf.Loader.TmpDir, "")
if err != nil {
panic(err)
}
cmd := exec.Command("wget", url, "-O", path.Join(dir, "file.zip"))
err = cmd.Run()
if err != nil {
panic(err)
}
cmd = exec.Command("unzip", path.Join(dir, "file.zip"), "-d", dir)
err = cmd.Run()
if err != nil {
panic(err)
}
func() {
log.Printf("loading: %v in %v", url, dir)
defer os.RemoveAll(dir)
t1 := time.Now()
l := newLoader(dir)
l.load()
t2 := time.Now()
log.Printf("took %v for %v", t2.Sub(t1), url)
}()
}
// Update materialized views. Use a transaction for each one, because
// we reset each view's primary ID sequence in a separate statement.
for _, view := range views {
func() {
var err error
tx, err := etc.DBConn.Beginx()
if err != nil {
log.Fatal("can't create tx to update view", view, err)
}
defer func() {
if err == nil {
commitErr := tx.Commit()
if commitErr != nil {
log.Fatal("error committing update to view", view, err)
}
} else {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Fatal("error rolling back update to view", view, err)
}
}
}()
statements := []string{
fmt.Sprintf("ALTER SEQUENCE %s_seq RESTART WITH 1", view),
fmt.Sprintf("REFRESH MATERIALIZED VIEW CONCURRENTLY %s", view),
}
for _, statement := range statements {
log.Println(statement)
_, err = tx.Exec(statement)
if err != nil {
log.Println("can't exec", statement, err)
return
}
log.Println("complete")
}
}()
}
}
// LoadForever continuously runs LoadOnce, breaking for 24 hours between loads
func LoadForever() {
for {
LoadOnce()
log.Printf("finished loading, sleeping for %v", loaderBreak)
time.Sleep(loaderBreak)
}
}
|
package loader
import (
"errors"
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"go/types"
"log"
"os"
"golang.org/x/tools/go/gcexportdata"
"golang.org/x/tools/go/packages"
)
// Graph resolves patterns and returns packages with all the
// information required to later load type information, and optionally
// syntax trees.
//
// The provided config can set any setting with the exception of Mode.
func Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) {
cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedTypesSizes
pkgs, err := packages.Load(&cfg, patterns...)
if err != nil {
return nil, err
}
fset := token.NewFileSet()
packages.Visit(pkgs, nil, func(pkg *packages.Package) {
pkg.Fset = fset
})
return pkgs, nil
}
// LoadFromExport loads a package from export data. All of its
// dependencies must have been loaded already.
func LoadFromExport(pkg *packages.Package) error {
pkg.IllTyped = true
for path, pkg := range pkg.Imports {
if pkg.Types == nil {
return fmt.Errorf("dependency %q hasn't been loaded yet", path)
}
}
if pkg.ExportFile == "" {
return fmt.Errorf("no export data for %q", pkg.ID)
}
f, err := os.Open(pkg.ExportFile)
if err != nil {
return err
}
defer f.Close()
r, err := gcexportdata.NewReader(f)
if err != nil {
return err
}
view := make(map[string]*types.Package) // view seen by gcexportdata
seen := make(map[*packages.Package]bool) // all visited packages
var visit func(pkgs map[string]*packages.Package)
visit = func(pkgs map[string]*packages.Package) {
for _, pkg := range pkgs {
if !seen[pkg] {
seen[pkg] = true
view[pkg.PkgPath] = pkg.Types
visit(pkg.Imports)
}
}
}
visit(pkg.Imports)
tpkg, err := gcexportdata.Read(r, pkg.Fset, view, pkg.PkgPath)
if err != nil {
return err
}
pkg.Types = tpkg
pkg.IllTyped = false
return nil
}
// LoadFromSource loads a package from source. All of its dependencies
// must have been loaded already.
func LoadFromSource(pkg *packages.Package) error {
pkg.IllTyped = true
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
// OPT(dh): many packages have few files, much fewer than there
// are CPU cores. Additionally, parsing each individual file is
// very fast. A naive parallel implementation of this loop won't
// be faster, and tends to be slower due to extra scheduling,
// bookkeeping and potentially false sharing of cache lines.
pkg.Syntax = make([]*ast.File, len(pkg.CompiledGoFiles))
for i, file := range pkg.CompiledGoFiles {
f, err := parser.ParseFile(pkg.Fset, file, nil, parser.ParseComments)
if err != nil {
pkg.Errors = append(pkg.Errors, convertError(err)...)
return err
}
pkg.Syntax[i] = f
}
pkg.TypesInfo = &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
importer := func(path string) (*types.Package, error) {
if path == "unsafe" {
return types.Unsafe, nil
}
if path == "C" {
// go/packages doesn't tell us that cgo preprocessing
// failed. When we subsequently try to parse the package,
// we'll encounter the raw C import.
return nil, errors.New("cgo preprocessing failed")
}
imp := pkg.Imports[path]
if imp == nil {
return nil, nil
}
if len(imp.Errors) > 0 {
return nil, imp.Errors[0]
}
return imp.Types, nil
}
tc := &types.Config{
Importer: importerFunc(importer),
Error: func(err error) {
pkg.Errors = append(pkg.Errors, convertError(err)...)
},
}
err := types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax)
if err != nil {
return err
}
pkg.IllTyped = false
return nil
}
func convertError(err error) []packages.Error {
var errs []packages.Error
// taken from go/packages
switch err := err.(type) {
case packages.Error:
// from driver
errs = append(errs, err)
case *os.PathError:
// from parser
errs = append(errs, packages.Error{
Pos: err.Path + ":1",
Msg: err.Err.Error(),
Kind: packages.ParseError,
})
case scanner.ErrorList:
// from parser
for _, err := range err {
errs = append(errs, packages.Error{
Pos: err.Pos.String(),
Msg: err.Msg,
Kind: packages.ParseError,
})
}
case types.Error:
// from type checker
errs = append(errs, packages.Error{
Pos: err.Fset.Position(err.Pos).String(),
Msg: err.Msg,
Kind: packages.TypeError,
})
default:
// unexpected impoverished error from parser?
errs = append(errs, packages.Error{
Pos: "-",
Msg: err.Error(),
Kind: packages.UnknownError,
})
// If you see this error message, please file a bug.
log.Printf("internal error: error %q (%T) without position", err, err)
}
return errs
}
type importerFunc func(path string) (*types.Package, error)
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
loader: don't return empty packages
Closes gh-646
package loader
import (
"errors"
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"go/types"
"log"
"os"
"golang.org/x/tools/go/gcexportdata"
"golang.org/x/tools/go/packages"
)
// Graph resolves patterns and returns packages with all the
// information required to later load type information, and optionally
// syntax trees.
//
// The provided config can set any setting with the exception of Mode.
func Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) {
cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedTypesSizes
pkgs, err := packages.Load(&cfg, patterns...)
if err != nil {
return nil, err
}
fset := token.NewFileSet()
packages.Visit(pkgs, nil, func(pkg *packages.Package) {
pkg.Fset = fset
})
n := 0
for _, pkg := range pkgs {
if len(pkg.CompiledGoFiles) == 0 && pkg.PkgPath != "unsafe" {
// If a package consists only of test files, then
// go/packages incorrectly(?) returns an empty package for
// the non-test variant. Get rid of those packages. See
// #646.
continue
}
pkgs[n] = pkg
n++
}
return pkgs[:n], nil
}
// LoadFromExport loads a package from export data. All of its
// dependencies must have been loaded already.
func LoadFromExport(pkg *packages.Package) error {
pkg.IllTyped = true
for path, pkg := range pkg.Imports {
if pkg.Types == nil {
return fmt.Errorf("dependency %q hasn't been loaded yet", path)
}
}
if pkg.ExportFile == "" {
return fmt.Errorf("no export data for %q", pkg.ID)
}
f, err := os.Open(pkg.ExportFile)
if err != nil {
return err
}
defer f.Close()
r, err := gcexportdata.NewReader(f)
if err != nil {
return err
}
view := make(map[string]*types.Package) // view seen by gcexportdata
seen := make(map[*packages.Package]bool) // all visited packages
var visit func(pkgs map[string]*packages.Package)
visit = func(pkgs map[string]*packages.Package) {
for _, pkg := range pkgs {
if !seen[pkg] {
seen[pkg] = true
view[pkg.PkgPath] = pkg.Types
visit(pkg.Imports)
}
}
}
visit(pkg.Imports)
tpkg, err := gcexportdata.Read(r, pkg.Fset, view, pkg.PkgPath)
if err != nil {
return err
}
pkg.Types = tpkg
pkg.IllTyped = false
return nil
}
// LoadFromSource loads a package from source. All of its dependencies
// must have been loaded already.
func LoadFromSource(pkg *packages.Package) error {
pkg.IllTyped = true
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
// OPT(dh): many packages have few files, much fewer than there
// are CPU cores. Additionally, parsing each individual file is
// very fast. A naive parallel implementation of this loop won't
// be faster, and tends to be slower due to extra scheduling,
// bookkeeping and potentially false sharing of cache lines.
pkg.Syntax = make([]*ast.File, len(pkg.CompiledGoFiles))
for i, file := range pkg.CompiledGoFiles {
f, err := parser.ParseFile(pkg.Fset, file, nil, parser.ParseComments)
if err != nil {
pkg.Errors = append(pkg.Errors, convertError(err)...)
return err
}
pkg.Syntax[i] = f
}
pkg.TypesInfo = &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
importer := func(path string) (*types.Package, error) {
if path == "unsafe" {
return types.Unsafe, nil
}
if path == "C" {
// go/packages doesn't tell us that cgo preprocessing
// failed. When we subsequently try to parse the package,
// we'll encounter the raw C import.
return nil, errors.New("cgo preprocessing failed")
}
imp := pkg.Imports[path]
if imp == nil {
return nil, nil
}
if len(imp.Errors) > 0 {
return nil, imp.Errors[0]
}
return imp.Types, nil
}
tc := &types.Config{
Importer: importerFunc(importer),
Error: func(err error) {
pkg.Errors = append(pkg.Errors, convertError(err)...)
},
}
err := types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax)
if err != nil {
return err
}
pkg.IllTyped = false
return nil
}
func convertError(err error) []packages.Error {
var errs []packages.Error
// taken from go/packages
switch err := err.(type) {
case packages.Error:
// from driver
errs = append(errs, err)
case *os.PathError:
// from parser
errs = append(errs, packages.Error{
Pos: err.Path + ":1",
Msg: err.Err.Error(),
Kind: packages.ParseError,
})
case scanner.ErrorList:
// from parser
for _, err := range err {
errs = append(errs, packages.Error{
Pos: err.Pos.String(),
Msg: err.Msg,
Kind: packages.ParseError,
})
}
case types.Error:
// from type checker
errs = append(errs, packages.Error{
Pos: err.Fset.Position(err.Pos).String(),
Msg: err.Msg,
Kind: packages.TypeError,
})
default:
// unexpected impoverished error from parser?
errs = append(errs, packages.Error{
Pos: "-",
Msg: err.Error(),
Kind: packages.UnknownError,
})
// If you see this error message, please file a bug.
log.Printf("internal error: error %q (%T) without position", err, err)
}
return errs
}
type importerFunc func(path string) (*types.Package, error)
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"fmt"
"strconv"
"strings"
"unicode/utf8"
)
// A FmtFlag value is a set of flags (or 0).
// They control how the Xconv functions format their values.
// See the respective function's documentation for details.
type FmtFlag int
const ( // fmt.Format flag/prec or verb
FmtLeft FmtFlag = 1 << iota // '-'
FmtSharp // '#'
FmtSign // '+'
FmtUnsigned // internal use only (historic: u flag)
FmtShort // verb == 'S' (historic: h flag)
FmtLong // verb == 'L' (historic: l flag)
FmtComma // '.' (== hasPrec) (historic: , flag)
FmtByte // '0' (historic: hh flag)
)
// fmtFlag computes the (internal) FmtFlag
// value given the fmt.State and format verb.
func fmtFlag(s fmt.State, verb rune) FmtFlag {
var flag FmtFlag
if s.Flag('-') {
flag |= FmtLeft
}
if s.Flag('#') {
flag |= FmtSharp
}
if s.Flag('+') {
flag |= FmtSign
}
if s.Flag(' ') {
Fatalf("FmtUnsigned in format string")
}
if _, ok := s.Precision(); ok {
flag |= FmtComma
}
if s.Flag('0') {
flag |= FmtByte
}
switch verb {
case 'S':
flag |= FmtShort
case 'L':
flag |= FmtLong
}
return flag
}
// Format conversions:
// TODO(gri) verify these; eliminate those not used anymore
//
// %v Op Node opcodes
// Flags: #: print Go syntax (automatic unless fmtmode == FDbg)
//
// %j *Node Node details
// Flags: 0: suppresses things not relevant until walk
//
// %v *Val Constant values
//
// %v *Sym Symbols
// %S unqualified identifier in any mode
// Flags: +,- #: mode (see below)
// 0: in export mode: unqualified identifier if exported, qualified if not
//
// %v *Type Types
// %S omit "func" and receiver in function types
// %L definition instead of name.
// Flags: +,- #: mode (see below)
// ' ' (only in -/Sym mode) print type identifiers wit package name instead of prefix.
//
// %v *Node Nodes
// %S (only in +/debug mode) suppress recursion
// %L (only in Error mode) print "foo (type Bar)"
// Flags: +,- #: mode (see below)
//
// %v Nodes Node lists
// Flags: those of *Node
// .: separate items with ',' instead of ';'
// *Sym, *Type, and *Node types use the flags below to set the format mode
const (
FErr = iota
FDbg
FTypeId
)
var fmtmode int = FErr
var fmtpkgpfx int // "% v" stickyness for *Type objects
// The mode flags '+', '-', and '#' are sticky; they persist through
// recursions of *Node, *Type, and *Sym values. The ' ' flag is
// sticky only on *Type recursions and only used in %-/*Sym mode.
//
// Example: given a *Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode
// Useful format combinations:
// TODO(gri): verify these
//
// *Node, Nodes:
// %+v multiline recursive debug dump of *Node/Nodes
// %+S non-recursive debug dump
//
// *Node:
// %#v Go format
// %L "foo (type Bar)" for error messages
//
// *Type:
// %#v Go format
// %#L type definition instead of name
// %#S omit"func" and receiver in function signature
//
// %-v type identifiers
// %-S type identifiers without "func" and arg names in type signatures (methodsym)
// %- v type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
func setfmode(flags *FmtFlag) (fm int) {
fm = fmtmode
if *flags&FmtSign != 0 {
fmtmode = FDbg
} else if *flags&FmtSharp != 0 {
// ignore (textual export format no longer supported)
} else if *flags&FmtLeft != 0 {
fmtmode = FTypeId
}
*flags &^= (FmtSharp | FmtLeft | FmtSign)
return
}
var goopnames = []string{
OADDR: "&",
OADD: "+",
OADDSTR: "+",
OALIGNOF: "unsafe.Alignof",
OANDAND: "&&",
OANDNOT: "&^",
OAND: "&",
OAPPEND: "append",
OAS: "=",
OAS2: "=",
OBREAK: "break",
OCALL: "function call", // not actual syntax
OCAP: "cap",
OCASE: "case",
OCLOSE: "close",
OCOMPLEX: "complex",
OCOM: "^",
OCONTINUE: "continue",
OCOPY: "copy",
ODELETE: "delete",
ODEFER: "defer",
ODIV: "/",
OEQ: "==",
OFALL: "fallthrough",
OFOR: "for",
OFORUNTIL: "foruntil", // not actual syntax; used to avoid off-end pointer live on backedge.892
OGE: ">=",
OGOTO: "goto",
OGT: ">",
OIF: "if",
OIMAG: "imag",
OIND: "*",
OLEN: "len",
OLE: "<=",
OLSH: "<<",
OLT: "<",
OMAKE: "make",
OMINUS: "-",
OMOD: "%",
OMUL: "*",
ONEW: "new",
ONE: "!=",
ONOT: "!",
OOFFSETOF: "unsafe.Offsetof",
OOROR: "||",
OOR: "|",
OPANIC: "panic",
OPLUS: "+",
OPRINTN: "println",
OPRINT: "print",
ORANGE: "range",
OREAL: "real",
ORECV: "<-",
ORECOVER: "recover",
ORETURN: "return",
ORSH: ">>",
OSELECT: "select",
OSEND: "<-",
OSIZEOF: "unsafe.Sizeof",
OSUB: "-",
OSWITCH: "switch",
OXOR: "^",
OXFALL: "fallthrough",
}
func (o Op) String() string {
return fmt.Sprint(o)
}
func (o Op) GoString() string {
return fmt.Sprintf("%#v", o)
}
func (o Op) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
o.oconv(s, fmtFlag(s, verb))
default:
fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o))
}
}
func (o Op) oconv(s fmt.State, flag FmtFlag) {
if (flag&FmtSharp != 0) || fmtmode != FDbg {
if o >= 0 && int(o) < len(goopnames) && goopnames[o] != "" {
fmt.Fprint(s, goopnames[o])
return
}
}
if o >= 0 && int(o) < len(opnames) && opnames[o] != "" {
fmt.Fprint(s, opnames[o])
return
}
fmt.Fprintf(s, "O-%d", int(o))
}
var classnames = []string{
"Pxxx",
"PEXTERN",
"PAUTO",
"PAUTOHEAP",
"PPARAM",
"PPARAMOUT",
"PFUNC",
}
func (n *Node) Format(s fmt.State, verb rune) {
switch verb {
case 'v', 'S', 'L':
n.nconv(s, fmtFlag(s, verb))
case 'j':
n.jconv(s, fmtFlag(s, verb))
default:
fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
}
}
// *Node details
func (n *Node) jconv(s fmt.State, flag FmtFlag) {
c := flag & FmtShort
if c == 0 && n.Addable() {
fmt.Fprintf(s, " a(%v)", n.Addable())
}
if c == 0 && n.Name != nil && n.Name.Vargen != 0 {
fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
}
if n.Pos.IsKnown() {
fmt.Fprintf(s, " l(%d)", n.Pos.Line())
}
if c == 0 && n.Xoffset != BADWIDTH {
fmt.Fprintf(s, " x(%d)", n.Xoffset)
}
if n.Class != 0 {
if int(n.Class) < len(classnames) {
fmt.Fprintf(s, " class(%s)", classnames[n.Class])
} else {
fmt.Fprintf(s, " class(%d?)", n.Class)
}
}
if n.Colas() {
fmt.Fprintf(s, " colas(%v)", n.Colas())
}
if n.Name != nil && n.Name.Funcdepth != 0 {
fmt.Fprintf(s, " f(%d)", n.Name.Funcdepth)
}
if n.Func != nil && n.Func.Depth != 0 {
fmt.Fprintf(s, " ff(%d)", n.Func.Depth)
}
switch n.Esc {
case EscUnknown:
break
case EscHeap:
fmt.Fprint(s, " esc(h)")
case EscNone:
fmt.Fprint(s, " esc(no)")
case EscNever:
if c == 0 {
fmt.Fprint(s, " esc(N)")
}
default:
fmt.Fprintf(s, " esc(%d)", n.Esc)
}
if e, ok := n.Opt().(*NodeEscState); ok && e.Loopdepth != 0 {
fmt.Fprintf(s, " ld(%d)", e.Loopdepth)
}
if c == 0 && n.Typecheck != 0 {
fmt.Fprintf(s, " tc(%d)", n.Typecheck)
}
if n.Isddd() {
fmt.Fprintf(s, " isddd(%v)", n.Isddd())
}
if n.Implicit() {
fmt.Fprintf(s, " implicit(%v)", n.Implicit())
}
if n.Embedded != 0 {
fmt.Fprintf(s, " embedded(%d)", n.Embedded)
}
if n.Addrtaken() {
fmt.Fprint(s, " addrtaken")
}
if n.Assigned() {
fmt.Fprint(s, " assigned")
}
if n.Bounded() {
fmt.Fprint(s, " bounded")
}
if n.NonNil() {
fmt.Fprint(s, " nonnil")
}
if c == 0 && n.HasCall() {
fmt.Fprintf(s, " hascall")
}
if c == 0 && n.Used() {
fmt.Fprintf(s, " used(%v)", n.Used())
}
}
func (v Val) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
v.vconv(s, fmtFlag(s, verb))
default:
fmt.Fprintf(s, "%%!%c(Val=%T)", verb, v)
}
}
func (v Val) vconv(s fmt.State, flag FmtFlag) {
switch u := v.U.(type) {
case *Mpint:
if !u.Rune {
if flag&FmtSharp != 0 {
fmt.Fprint(s, bconv(u, FmtSharp))
return
}
fmt.Fprint(s, bconv(u, 0))
return
}
switch x := u.Int64(); {
case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
fmt.Fprintf(s, "'%c'", int(x))
case 0 <= x && x < 1<<16:
fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
case 0 <= x && x <= utf8.MaxRune:
fmt.Fprintf(s, "'\\U%08x'", uint64(x))
default:
fmt.Fprintf(s, "('\\x00' + %v)", u)
}
case *Mpflt:
if flag&FmtSharp != 0 {
fmt.Fprint(s, fconv(u, 0))
return
}
fmt.Fprint(s, fconv(u, FmtSharp))
return
case *Mpcplx:
switch {
case flag&FmtSharp != 0:
fmt.Fprintf(s, "(%v+%vi)", &u.Real, &u.Imag)
case v.U.(*Mpcplx).Real.CmpFloat64(0) == 0:
fmt.Fprintf(s, "%vi", fconv(&u.Imag, FmtSharp))
case v.U.(*Mpcplx).Imag.CmpFloat64(0) == 0:
fmt.Fprint(s, fconv(&u.Real, FmtSharp))
case v.U.(*Mpcplx).Imag.CmpFloat64(0) < 0:
fmt.Fprintf(s, "(%v%vi)", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp))
default:
fmt.Fprintf(s, "(%v+%vi)", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp))
}
case string:
fmt.Fprint(s, strconv.Quote(u))
case bool:
fmt.Fprint(s, u)
case *NilVal:
fmt.Fprint(s, "nil")
default:
fmt.Fprintf(s, "<ctype=%d>", v.Ctype())
}
}
/*
s%,%,\n%g
s%\n+%\n%g
s%^[ ]*T%%g
s%,.*%%g
s%.+% [T&] = "&",%g
s%^ ........*\]%&~%g
s%~ %%g
*/
var etnames = []string{
Txxx: "Txxx",
TINT: "INT",
TUINT: "UINT",
TINT8: "INT8",
TUINT8: "UINT8",
TINT16: "INT16",
TUINT16: "UINT16",
TINT32: "INT32",
TUINT32: "UINT32",
TINT64: "INT64",
TUINT64: "UINT64",
TUINTPTR: "UINTPTR",
TFLOAT32: "FLOAT32",
TFLOAT64: "FLOAT64",
TCOMPLEX64: "COMPLEX64",
TCOMPLEX128: "COMPLEX128",
TBOOL: "BOOL",
TPTR32: "PTR32",
TPTR64: "PTR64",
TFUNC: "FUNC",
TARRAY: "ARRAY",
TSLICE: "SLICE",
TSTRUCT: "STRUCT",
TCHAN: "CHAN",
TMAP: "MAP",
TINTER: "INTER",
TFORW: "FORW",
TSTRING: "STRING",
TUNSAFEPTR: "TUNSAFEPTR",
TANY: "ANY",
TIDEAL: "TIDEAL",
TNIL: "TNIL",
TBLANK: "TBLANK",
TFUNCARGS: "TFUNCARGS",
TCHANARGS: "TCHANARGS",
TINTERMETH: "TINTERMETH",
TDDDFIELD: "TDDDFIELD",
}
func (et EType) String() string {
if int(et) < len(etnames) && etnames[et] != "" {
return etnames[et]
}
return fmt.Sprintf("E-%d", et)
}
func (s *Sym) symfmt(flag FmtFlag) string {
if s.Pkg != nil && flag&FmtShort == 0 {
switch fmtmode {
case FErr: // This is for the user
if s.Pkg == builtinpkg || s.Pkg == localpkg {
return s.Name
}
// If the name was used by multiple packages, display the full path,
if s.Pkg.Name != "" && numImport[s.Pkg.Name] > 1 {
return fmt.Sprintf("%q.%s", s.Pkg.Path, s.Name)
}
return s.Pkg.Name + "." + s.Name
case FDbg:
return s.Pkg.Name + "." + s.Name
case FTypeId:
if flag&FmtUnsigned != 0 {
return s.Pkg.Name + "." + s.Name // dcommontype, typehash
}
return s.Pkg.Prefix + "." + s.Name // (methodsym), typesym, weaksym
}
}
if flag&FmtByte != 0 {
// FmtByte (hh) implies FmtShort (h)
// skip leading "type." in method name
name := s.Name
if i := strings.LastIndex(name, "."); i >= 0 {
name = name[i+1:]
}
if fmtmode == FDbg {
return fmt.Sprintf("@%q.%s", s.Pkg.Path, name)
}
return name
}
return s.Name
}
var basicnames = []string{
TINT: "int",
TUINT: "uint",
TINT8: "int8",
TUINT8: "uint8",
TINT16: "int16",
TUINT16: "uint16",
TINT32: "int32",
TUINT32: "uint32",
TINT64: "int64",
TUINT64: "uint64",
TUINTPTR: "uintptr",
TFLOAT32: "float32",
TFLOAT64: "float64",
TCOMPLEX64: "complex64",
TCOMPLEX128: "complex128",
TBOOL: "bool",
TANY: "any",
TSTRING: "string",
TNIL: "nil",
TIDEAL: "untyped number",
TBLANK: "blank",
}
func (t *Type) typefmt(flag FmtFlag) string {
if t == nil {
return "<T>"
}
if t == bytetype || t == runetype {
// in %-T mode collapse rune and byte with their originals.
if fmtmode != FTypeId {
return t.Sym.sconv(FmtShort)
}
t = Types[t.Etype]
}
if t == errortype {
return "error"
}
// Unless the 'l' flag was specified, if the type has a name, just print that name.
if flag&FmtLong == 0 && t.Sym != nil && t != Types[t.Etype] {
switch fmtmode {
case FTypeId:
if flag&FmtShort != 0 {
if t.Vargen != 0 {
return fmt.Sprintf("%v·%d", t.Sym.sconv(FmtShort), t.Vargen)
}
return t.Sym.sconv(FmtShort)
}
if flag&FmtUnsigned != 0 {
return t.Sym.sconv(FmtUnsigned)
}
if t.Sym.Pkg == localpkg && t.Vargen != 0 {
return fmt.Sprintf("%v·%d", t.Sym, t.Vargen)
}
}
return t.Sym.String()
}
if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
prefix := ""
if fmtmode == FErr && (t == idealbool || t == idealstring) {
prefix = "untyped "
}
return prefix + basicnames[t.Etype]
}
if fmtmode == FDbg {
fmtmode = 0
str := t.Etype.String() + "-" + t.typefmt(flag)
fmtmode = FDbg
return str
}
switch t.Etype {
case TPTR32, TPTR64:
if fmtmode == FTypeId && (flag&FmtShort != 0) {
return "*" + t.Elem().tconv(FmtShort)
}
return "*" + t.Elem().String()
case TARRAY:
if t.isDDDArray() {
return "[...]" + t.Elem().String()
}
return fmt.Sprintf("[%d]%v", t.NumElem(), t.Elem())
case TSLICE:
return "[]" + t.Elem().String()
case TCHAN:
switch t.ChanDir() {
case Crecv:
return "<-chan " + t.Elem().String()
case Csend:
return "chan<- " + t.Elem().String()
}
if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == Crecv {
return "chan (" + t.Elem().String() + ")"
}
return "chan " + t.Elem().String()
case TMAP:
return "map[" + t.Key().String() + "]" + t.Val().String()
case TINTER:
if t.IsEmptyInterface() {
return "interface {}"
}
buf := make([]byte, 0, 64)
buf = append(buf, "interface {"...)
for i, f := range t.Fields().Slice() {
if i != 0 {
buf = append(buf, ';')
}
buf = append(buf, ' ')
switch {
case f.Sym == nil:
// Check first that a symbol is defined for this type.
// Wrong interface definitions may have types lacking a symbol.
break
case exportname(f.Sym.Name):
buf = append(buf, f.Sym.sconv(FmtShort)...)
default:
buf = append(buf, f.Sym.sconv(FmtUnsigned)...)
}
buf = append(buf, f.Type.tconv(FmtShort)...)
}
if t.NumFields() != 0 {
buf = append(buf, ' ')
}
buf = append(buf, '}')
return string(buf)
case TFUNC:
buf := make([]byte, 0, 64)
if flag&FmtShort != 0 {
// no leading func
} else {
if t.Recv() != nil {
buf = append(buf, "method"...)
buf = append(buf, t.Recvs().String()...)
buf = append(buf, ' ')
}
buf = append(buf, "func"...)
}
buf = append(buf, t.Params().String()...)
switch t.Results().NumFields() {
case 0:
// nothing to do
case 1:
buf = append(buf, ' ')
buf = append(buf, t.Results().Field(0).Type.String()...) // struct->field->field's type
default:
buf = append(buf, ' ')
buf = append(buf, t.Results().String()...)
}
return string(buf)
case TSTRUCT:
if m := t.StructType().Map; m != nil {
mt := m.MapType()
// Format the bucket struct for map[x]y as map.bucket[x]y.
// This avoids a recursive print that generates very long names.
if mt.Bucket == t {
return "map.bucket[" + m.Key().String() + "]" + m.Val().String()
}
if mt.Hmap == t {
return "map.hdr[" + m.Key().String() + "]" + m.Val().String()
}
if mt.Hiter == t {
return "map.iter[" + m.Key().String() + "]" + m.Val().String()
}
yyerror("unknown internal map type")
}
buf := make([]byte, 0, 64)
if t.IsFuncArgStruct() {
buf = append(buf, '(')
var flag1 FmtFlag
if fmtmode == FTypeId || fmtmode == FErr { // no argument names on function signature, and no "noescape"/"nosplit" tags
flag1 = FmtShort
}
for i, f := range t.Fields().Slice() {
if i != 0 {
buf = append(buf, ", "...)
}
buf = append(buf, fldconv(f, flag1)...)
}
buf = append(buf, ')')
} else {
buf = append(buf, "struct {"...)
for i, f := range t.Fields().Slice() {
if i != 0 {
buf = append(buf, ';')
}
buf = append(buf, ' ')
buf = append(buf, fldconv(f, FmtLong)...)
}
if t.NumFields() != 0 {
buf = append(buf, ' ')
}
buf = append(buf, '}')
}
return string(buf)
case TFORW:
if t.Sym != nil {
return "undefined " + t.Sym.String()
}
return "undefined"
case TUNSAFEPTR:
return "unsafe.Pointer"
case TDDDFIELD:
return fmt.Sprintf("%v <%v> %v", t.Etype, t.Sym, t.DDDField())
case Txxx:
return "Txxx"
}
// Don't know how to handle - fall back to detailed prints.
return fmt.Sprintf("%v <%v> %v", t.Etype, t.Sym, t.Elem())
}
// Statements which may be rendered with a simplestmt as init.
func stmtwithinit(op Op) bool {
switch op {
case OIF, OFOR, OFORUNTIL, OSWITCH:
return true
}
return false
}
func (n *Node) stmtfmt(s fmt.State) {
// some statements allow for an init, but at most one,
// but we may have an arbitrary number added, eg by typecheck
// and inlining. If it doesn't fit the syntax, emit an enclosing
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && stmtwithinit(n.Op)
// otherwise, print the inits as separate statements
complexinit := n.Ninit.Len() != 0 && !simpleinit && (fmtmode != FErr)
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
extrablock := complexinit && stmtwithinit(n.Op)
if extrablock {
fmt.Fprint(s, "{")
}
if complexinit {
fmt.Fprintf(s, " %v; ", n.Ninit)
}
switch n.Op {
case ODCL:
fmt.Fprintf(s, "var %v %v", n.Left.Sym, n.Left.Type)
case ODCLFIELD:
if n.Left != nil {
fmt.Fprintf(s, "%v %v", n.Left, n.Right)
} else {
fmt.Fprintf(s, "%v", n.Right)
}
// Don't export "v = <N>" initializing statements, hope they're always
// preceded by the DCL which will be re-parsed and typechecked to reproduce
// the "v = <N>" again.
case OAS:
if n.Colas() && !complexinit {
fmt.Fprintf(s, "%v := %v", n.Left, n.Right)
} else {
fmt.Fprintf(s, "%v = %v", n.Left, n.Right)
}
case OASOP:
if n.Implicit() {
if Op(n.Etype) == OADD {
fmt.Fprintf(s, "%v++", n.Left)
} else {
fmt.Fprintf(s, "%v--", n.Left)
}
break
}
fmt.Fprintf(s, "%v %#v= %v", n.Left, Op(n.Etype), n.Right)
case OAS2:
if n.Colas() && !complexinit {
fmt.Fprintf(s, "%.v := %.v", n.List, n.Rlist)
break
}
fallthrough
case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
fmt.Fprintf(s, "%.v = %.v", n.List, n.Rlist)
case ORETURN:
fmt.Fprintf(s, "return %.v", n.List)
case ORETJMP:
fmt.Fprintf(s, "retjmp %v", n.Sym)
case OPROC:
fmt.Fprintf(s, "go %v", n.Left)
case ODEFER:
fmt.Fprintf(s, "defer %v", n.Left)
case OIF:
if simpleinit {
fmt.Fprintf(s, "if %v; %v { %v }", n.Ninit.First(), n.Left, n.Nbody)
} else {
fmt.Fprintf(s, "if %v { %v }", n.Left, n.Nbody)
}
if n.Rlist.Len() != 0 {
fmt.Fprintf(s, " else { %v }", n.Rlist)
}
case OFOR, OFORUNTIL:
opname := "for"
if n.Op == OFORUNTIL {
opname = "foruntil"
}
if fmtmode == FErr { // TODO maybe only if FmtShort, same below
fmt.Fprintf(s, "%s loop", opname)
break
}
fmt.Fprint(s, opname)
if simpleinit {
fmt.Fprintf(s, " %v;", n.Ninit.First())
} else if n.Right != nil {
fmt.Fprint(s, " ;")
}
if n.Left != nil {
fmt.Fprintf(s, " %v", n.Left)
}
if n.Right != nil {
fmt.Fprintf(s, "; %v", n.Right)
} else if simpleinit {
fmt.Fprint(s, ";")
}
fmt.Fprintf(s, " { %v }", n.Nbody)
case ORANGE:
if fmtmode == FErr {
fmt.Fprint(s, "for loop")
break
}
if n.List.Len() == 0 {
fmt.Fprintf(s, "for range %v { %v }", n.Right, n.Nbody)
break
}
fmt.Fprintf(s, "for %.v = range %v { %v }", n.List, n.Right, n.Nbody)
case OSELECT, OSWITCH:
if fmtmode == FErr {
fmt.Fprintf(s, "%v statement", n.Op)
break
}
fmt.Fprint(s, n.Op.GoString()) // %#v
if simpleinit {
fmt.Fprintf(s, " %v;", n.Ninit.First())
}
if n.Left != nil {
fmt.Fprintf(s, " %v ", n.Left)
}
fmt.Fprintf(s, " { %v }", n.List)
case OXCASE:
if n.List.Len() != 0 {
fmt.Fprintf(s, "case %.v", n.List)
} else {
fmt.Fprint(s, "default")
}
fmt.Fprintf(s, ": %v", n.Nbody)
case OCASE:
switch {
case n.Left != nil:
// single element
fmt.Fprintf(s, "case %v", n.Left)
case n.List.Len() > 0:
// range
if n.List.Len() != 2 {
Fatalf("bad OCASE list length %d", n.List.Len())
}
fmt.Fprintf(s, "case %v..%v", n.List.First(), n.List.Second())
default:
fmt.Fprint(s, "default")
}
fmt.Fprintf(s, ": %v", n.Nbody)
case OBREAK,
OCONTINUE,
OGOTO,
OFALL,
OXFALL:
if n.Left != nil {
fmt.Fprintf(s, "%#v %v", n.Op, n.Left)
} else {
fmt.Fprint(s, n.Op.GoString()) // %#v
}
case OEMPTY:
break
case OLABEL:
fmt.Fprintf(s, "%v: ", n.Left)
}
if extrablock {
fmt.Fprint(s, "}")
}
}
var opprec = []int{
OALIGNOF: 8,
OAPPEND: 8,
OARRAYBYTESTR: 8,
OARRAYLIT: 8,
OSLICELIT: 8,
OARRAYRUNESTR: 8,
OCALLFUNC: 8,
OCALLINTER: 8,
OCALLMETH: 8,
OCALL: 8,
OCAP: 8,
OCLOSE: 8,
OCONVIFACE: 8,
OCONVNOP: 8,
OCONV: 8,
OCOPY: 8,
ODELETE: 8,
OGETG: 8,
OLEN: 8,
OLITERAL: 8,
OMAKESLICE: 8,
OMAKE: 8,
OMAPLIT: 8,
ONAME: 8,
ONEW: 8,
ONONAME: 8,
OOFFSETOF: 8,
OPACK: 8,
OPANIC: 8,
OPAREN: 8,
OPRINTN: 8,
OPRINT: 8,
ORUNESTR: 8,
OSIZEOF: 8,
OSTRARRAYBYTE: 8,
OSTRARRAYRUNE: 8,
OSTRUCTLIT: 8,
OTARRAY: 8,
OTCHAN: 8,
OTFUNC: 8,
OTINTER: 8,
OTMAP: 8,
OTSTRUCT: 8,
OINDEXMAP: 8,
OINDEX: 8,
OSLICE: 8,
OSLICESTR: 8,
OSLICEARR: 8,
OSLICE3: 8,
OSLICE3ARR: 8,
ODOTINTER: 8,
ODOTMETH: 8,
ODOTPTR: 8,
ODOTTYPE2: 8,
ODOTTYPE: 8,
ODOT: 8,
OXDOT: 8,
OCALLPART: 8,
OPLUS: 7,
ONOT: 7,
OCOM: 7,
OMINUS: 7,
OADDR: 7,
OIND: 7,
ORECV: 7,
OMUL: 6,
ODIV: 6,
OMOD: 6,
OLSH: 6,
ORSH: 6,
OAND: 6,
OANDNOT: 6,
OADD: 5,
OSUB: 5,
OOR: 5,
OXOR: 5,
OEQ: 4,
OLT: 4,
OLE: 4,
OGE: 4,
OGT: 4,
ONE: 4,
OCMPSTR: 4,
OCMPIFACE: 4,
OSEND: 3,
OANDAND: 2,
OOROR: 1,
// Statements handled by stmtfmt
OAS: -1,
OAS2: -1,
OAS2DOTTYPE: -1,
OAS2FUNC: -1,
OAS2MAPR: -1,
OAS2RECV: -1,
OASOP: -1,
OBREAK: -1,
OCASE: -1,
OCONTINUE: -1,
ODCL: -1,
ODCLFIELD: -1,
ODEFER: -1,
OEMPTY: -1,
OFALL: -1,
OFOR: -1,
OFORUNTIL: -1,
OGOTO: -1,
OIF: -1,
OLABEL: -1,
OPROC: -1,
ORANGE: -1,
ORETURN: -1,
OSELECT: -1,
OSWITCH: -1,
OXCASE: -1,
OXFALL: -1,
OEND: 0,
}
func (n *Node) exprfmt(s fmt.State, prec int) {
for n != nil && n.Implicit() && (n.Op == OIND || n.Op == OADDR) {
n = n.Left
}
if n == nil {
fmt.Fprint(s, "<N>")
return
}
nprec := opprec[n.Op]
if n.Op == OTYPE && n.Sym != nil {
nprec = 8
}
if prec > nprec {
fmt.Fprintf(s, "(%v)", n)
return
}
switch n.Op {
case OPAREN:
fmt.Fprintf(s, "(%v)", n.Left)
case ODDDARG:
fmt.Fprint(s, "... argument")
case OLITERAL: // this is a bit of a mess
if fmtmode == FErr {
if n.Orig != nil && n.Orig != n {
n.Orig.exprfmt(s, prec)
return
}
if n.Sym != nil {
fmt.Fprint(s, n.Sym.String())
return
}
}
if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
n.Orig.exprfmt(s, prec)
return
}
if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != idealbool && n.Type != idealstring {
// Need parens when type begins with what might
// be misinterpreted as a unary operator: * or <-.
if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == Crecv) {
fmt.Fprintf(s, "(%v)(%v)", n.Type, n.Val())
return
} else {
fmt.Fprintf(s, "%v(%v)", n.Type, n.Val())
return
}
}
fmt.Fprintf(s, "%v", n.Val())
// Special case: name used as local variable in export.
// _ becomes ~b%d internally; print as _ for export
case ONAME:
if fmtmode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
fmt.Fprint(s, "_")
return
}
fallthrough
case OPACK, ONONAME:
fmt.Fprint(s, n.Sym.String())
case OTYPE:
if n.Type == nil && n.Sym != nil {
fmt.Fprint(s, n.Sym.String())
return
}
fmt.Fprintf(s, "%v", n.Type)
case OTARRAY:
if n.Left != nil {
fmt.Fprintf(s, "[]%v", n.Left)
return
}
fmt.Fprintf(s, "[]%v", n.Right) // happens before typecheck
case OTMAP:
fmt.Fprintf(s, "map[%v]%v", n.Left, n.Right)
case OTCHAN:
switch ChanDir(n.Etype) {
case Crecv:
fmt.Fprintf(s, "<-chan %v", n.Left)
case Csend:
fmt.Fprintf(s, "chan<- %v", n.Left)
default:
if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && ChanDir(n.Left.Etype) == Crecv {
fmt.Fprintf(s, "chan (%v)", n.Left)
} else {
fmt.Fprintf(s, "chan %v", n.Left)
}
}
case OTSTRUCT:
fmt.Fprint(s, "<struct>")
case OTINTER:
fmt.Fprint(s, "<inter>")
case OTFUNC:
fmt.Fprint(s, "<func>")
case OCLOSURE:
if fmtmode == FErr {
fmt.Fprint(s, "func literal")
return
}
if n.Nbody.Len() != 0 {
fmt.Fprintf(s, "%v { %v }", n.Type, n.Nbody)
return
}
fmt.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody)
case OCOMPLIT:
ptrlit := n.Right != nil && n.Right.Implicit() && n.Right.Type != nil && n.Right.Type.IsPtr()
if fmtmode == FErr {
if n.Right != nil && n.Right.Type != nil && !n.Implicit() {
if ptrlit {
fmt.Fprintf(s, "&%v literal", n.Right.Type.Elem())
return
} else {
fmt.Fprintf(s, "%v literal", n.Right.Type)
return
}
}
fmt.Fprint(s, "composite literal")
return
}
fmt.Fprintf(s, "(%v{ %.v })", n.Right, n.List)
case OPTRLIT:
fmt.Fprintf(s, "&%v", n.Left)
case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
if fmtmode == FErr {
fmt.Fprintf(s, "%v literal", n.Type)
return
}
fmt.Fprintf(s, "(%v{ %.v })", n.Type, n.List)
case OKEY:
if n.Left != nil && n.Right != nil {
fmt.Fprintf(s, "%v:%v", n.Left, n.Right)
return
}
if n.Left == nil && n.Right != nil {
fmt.Fprintf(s, ":%v", n.Right)
return
}
if n.Left != nil && n.Right == nil {
fmt.Fprintf(s, "%v:", n.Left)
return
}
fmt.Fprint(s, ":")
case OSTRUCTKEY:
fmt.Fprintf(s, "%v:%v", n.Sym, n.Left)
case OCALLPART:
n.Left.exprfmt(s, nprec)
if n.Right == nil || n.Right.Sym == nil {
fmt.Fprint(s, ".<nil>")
return
}
fmt.Fprintf(s, ".%0S", n.Right.Sym)
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
n.Left.exprfmt(s, nprec)
if n.Sym == nil {
fmt.Fprint(s, ".<nil>")
return
}
fmt.Fprintf(s, ".%0S", n.Sym)
case ODOTTYPE, ODOTTYPE2:
n.Left.exprfmt(s, nprec)
if n.Right != nil {
fmt.Fprintf(s, ".(%v)", n.Right)
return
}
fmt.Fprintf(s, ".(%v)", n.Type)
case OINDEX, OINDEXMAP:
n.Left.exprfmt(s, nprec)
fmt.Fprintf(s, "[%v]", n.Right)
case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
n.Left.exprfmt(s, nprec)
fmt.Fprint(s, "[")
low, high, max := n.SliceBounds()
if low != nil {
fmt.Fprint(s, low.String())
}
fmt.Fprint(s, ":")
if high != nil {
fmt.Fprint(s, high.String())
}
if n.Op.IsSlice3() {
fmt.Fprint(s, ":")
if max != nil {
fmt.Fprint(s, max.String())
}
}
fmt.Fprint(s, "]")
case OCOPY, OCOMPLEX:
fmt.Fprintf(s, "%#v(%v, %v)", n.Op, n.Left, n.Right)
case OCONV,
OCONVIFACE,
OCONVNOP,
OARRAYBYTESTR,
OARRAYRUNESTR,
OSTRARRAYBYTE,
OSTRARRAYRUNE,
ORUNESTR:
if n.Type == nil || n.Type.Sym == nil {
fmt.Fprintf(s, "(%v)", n.Type)
} else {
fmt.Fprintf(s, "%v", n.Type)
}
if n.Left != nil {
fmt.Fprintf(s, "(%v)", n.Left)
} else {
fmt.Fprintf(s, "(%.v)", n.List)
}
case OREAL,
OIMAG,
OAPPEND,
OCAP,
OCLOSE,
ODELETE,
OLEN,
OMAKE,
ONEW,
OPANIC,
ORECOVER,
OALIGNOF,
OOFFSETOF,
OSIZEOF,
OPRINT,
OPRINTN:
if n.Left != nil {
fmt.Fprintf(s, "%#v(%v)", n.Op, n.Left)
return
}
if n.Isddd() {
fmt.Fprintf(s, "%#v(%.v...)", n.Op, n.List)
return
}
fmt.Fprintf(s, "%#v(%.v)", n.Op, n.List)
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
n.Left.exprfmt(s, nprec)
if n.Isddd() {
fmt.Fprintf(s, "(%.v...)", n.List)
return
}
fmt.Fprintf(s, "(%.v)", n.List)
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
if n.List.Len() != 0 { // pre-typecheck
fmt.Fprintf(s, "make(%v, %.v)", n.Type, n.List)
return
}
if n.Right != nil {
fmt.Fprintf(s, "make(%v, %v, %v)", n.Type, n.Left, n.Right)
return
}
if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) {
fmt.Fprintf(s, "make(%v, %v)", n.Type, n.Left)
return
}
fmt.Fprintf(s, "make(%v)", n.Type)
// Unary
case OPLUS,
OMINUS,
OADDR,
OCOM,
OIND,
ONOT,
ORECV:
fmt.Fprint(s, n.Op.GoString()) // %#v
if n.Left.Op == n.Op {
fmt.Fprint(s, " ")
}
n.Left.exprfmt(s, nprec+1)
// Binary
case OADD,
OAND,
OANDAND,
OANDNOT,
ODIV,
OEQ,
OGE,
OGT,
OLE,
OLT,
OLSH,
OMOD,
OMUL,
ONE,
OOR,
OOROR,
ORSH,
OSEND,
OSUB,
OXOR:
n.Left.exprfmt(s, nprec)
fmt.Fprintf(s, " %#v ", n.Op)
n.Right.exprfmt(s, nprec+1)
case OADDSTR:
i := 0
for _, n1 := range n.List.Slice() {
if i != 0 {
fmt.Fprint(s, " + ")
}
n1.exprfmt(s, nprec)
i++
}
case OCMPSTR, OCMPIFACE:
n.Left.exprfmt(s, nprec)
// TODO(marvin): Fix Node.EType type union.
fmt.Fprintf(s, " %#v ", Op(n.Etype))
n.Right.exprfmt(s, nprec+1)
default:
fmt.Fprintf(s, "<node %v>", n.Op)
}
}
func (n *Node) nodefmt(s fmt.State, flag FmtFlag) {
t := n.Type
// we almost always want the original, except in export mode for literals
// this saves the importer some work, and avoids us having to redo some
// special casing for package unsafe
if n.Op != OLITERAL && n.Orig != nil {
n = n.Orig
}
if flag&FmtLong != 0 && t != nil {
if t.Etype == TNIL {
fmt.Fprint(s, "nil")
} else {
fmt.Fprintf(s, "%v (type %v)", n, t)
}
return
}
// TODO inlining produces expressions with ninits. we can't print these yet.
if opprec[n.Op] < 0 {
n.stmtfmt(s)
return
}
n.exprfmt(s, 0)
}
func (n *Node) nodedump(s fmt.State, flag FmtFlag) {
if n == nil {
return
}
recur := flag&FmtShort == 0
if recur {
indent(s)
if dumpdepth > 40 {
fmt.Fprint(s, "...")
return
}
if n.Ninit.Len() != 0 {
fmt.Fprintf(s, "%v-init%v", n.Op, n.Ninit)
indent(s)
}
}
switch n.Op {
default:
fmt.Fprintf(s, "%v%j", n.Op, n)
case OINDREGSP:
fmt.Fprintf(s, "%v-SP%j", n.Op, n)
case OLITERAL:
fmt.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n)
case ONAME, ONONAME:
if n.Sym != nil {
fmt.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n)
} else {
fmt.Fprintf(s, "%v%j", n.Op, n)
}
if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
indent(s)
fmt.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
}
case OASOP:
fmt.Fprintf(s, "%v-%v%j", n.Op, Op(n.Etype), n)
case OTYPE:
fmt.Fprintf(s, "%v %v%j type=%v", n.Op, n.Sym, n, n.Type)
if recur && n.Type == nil && n.Name.Param.Ntype != nil {
indent(s)
fmt.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
}
}
if n.Sym != nil && n.Op != ONAME {
fmt.Fprintf(s, " %v", n.Sym)
}
if n.Type != nil {
fmt.Fprintf(s, " %v", n.Type)
}
if recur {
if n.Left != nil {
fmt.Fprintf(s, "%v", n.Left)
}
if n.Right != nil {
fmt.Fprintf(s, "%v", n.Right)
}
if n.List.Len() != 0 {
indent(s)
fmt.Fprintf(s, "%v-list%v", n.Op, n.List)
}
if n.Rlist.Len() != 0 {
indent(s)
fmt.Fprintf(s, "%v-rlist%v", n.Op, n.Rlist)
}
if n.Nbody.Len() != 0 {
indent(s)
fmt.Fprintf(s, "%v-body%v", n.Op, n.Nbody)
}
}
}
// "%S" suppresses qualifying with package
func (s *Sym) Format(f fmt.State, verb rune) {
switch verb {
case 'v', 'S':
fmt.Fprint(f, s.sconv(fmtFlag(f, verb)))
default:
fmt.Fprintf(f, "%%!%c(*Sym=%p)", verb, s)
}
}
func (s *Sym) String() string {
return s.sconv(0)
}
// See #16897 before changing the implementation of sconv.
func (s *Sym) sconv(flag FmtFlag) string {
if flag&FmtLong != 0 {
panic("linksymfmt")
}
if s == nil {
return "<S>"
}
if s.Name == "_" {
return "_"
}
sf := flag
sm := setfmode(&flag)
str := s.symfmt(flag)
flag = sf
fmtmode = sm
return str
}
func (t *Type) String() string {
return t.tconv(0)
}
// ShortString generates a short description of t.
// It is used in autogenerated method names, reflection,
// and itab names.
func (t *Type) ShortString() string {
if fmtmode != FErr {
Fatalf("ShortString fmtmode %v", fmtmode)
}
return t.tconv(FmtLeft)
}
// LongString generates a complete description of t.
// It is useful for reflection,
// or when a unique fingerprint or hash of a type is required.
func (t *Type) LongString() string {
if fmtmode != FErr {
Fatalf("LongString fmtmode %v", fmtmode)
}
return t.tconv(FmtLeft | FmtUnsigned)
}
func fldconv(f *Field, flag FmtFlag) string {
if f == nil {
return "<T>"
}
sf := flag
sm := setfmode(&flag)
if fmtmode == FTypeId && (sf&FmtUnsigned != 0) {
fmtpkgpfx++
}
if fmtpkgpfx != 0 {
flag |= FmtUnsigned
}
var name string
if flag&FmtShort == 0 {
s := f.Sym
// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
// ~r%d is a (formerly) unnamed result.
if fmtmode == FErr && f.Nname != nil {
if f.Nname.Orig != nil {
s = f.Nname.Orig.Sym
if s != nil && s.Name[0] == '~' {
if s.Name[1] == 'r' { // originally an unnamed result
s = nil
} else if s.Name[1] == 'b' { // originally the blank identifier _
s = lookup("_")
}
}
} else {
s = nil
}
}
if s != nil && f.Embedded == 0 {
if f.Funarg != FunargNone {
name = f.Nname.String()
} else if flag&FmtLong != 0 {
name = fmt.Sprintf("%0S", s)
if !exportname(name) && flag&FmtUnsigned == 0 {
name = s.String() // qualify non-exported names (used on structs, not on funarg)
}
} else {
name = s.String()
}
}
}
var typ string
if f.Isddd() {
typ = fmt.Sprintf("...%v", f.Type.Elem())
} else {
typ = fmt.Sprintf("%v", f.Type)
}
str := typ
if name != "" {
str = name + " " + typ
}
if flag&FmtShort == 0 && f.Funarg == FunargNone && f.Note != "" {
str += " " + strconv.Quote(f.Note)
}
if fmtmode == FTypeId && (sf&FmtUnsigned != 0) {
fmtpkgpfx--
}
flag = sf
fmtmode = sm
return str
}
// "%L" print definition, not name
// "%S" omit 'func' and receiver from function types, short type names
func (t *Type) Format(s fmt.State, verb rune) {
switch verb {
case 'v', 'S', 'L':
fmt.Fprint(s, t.tconv(fmtFlag(s, verb)))
default:
fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
}
}
// See #16897 before changing the implementation of tconv.
func (t *Type) tconv(flag FmtFlag) string {
if t == nil {
return "<T>"
}
if t.Trecur > 4 {
return "<...>"
}
t.Trecur++
sf := flag
sm := setfmode(&flag)
if fmtmode == FTypeId && (sf&FmtUnsigned != 0) {
fmtpkgpfx++
}
if fmtpkgpfx != 0 {
flag |= FmtUnsigned
}
str := t.typefmt(flag)
if fmtmode == FTypeId && (sf&FmtUnsigned != 0) {
fmtpkgpfx--
}
flag = sf
fmtmode = sm
t.Trecur--
return str
}
func (n *Node) String() string {
return fmt.Sprint(n)
}
// "%L" suffix with "(type %T)" where possible
// "%+S" in debug mode, don't recurse, no multiline output
func (n *Node) nconv(s fmt.State, flag FmtFlag) {
if n == nil {
fmt.Fprint(s, "<N>")
return
}
sf := flag
sm := setfmode(&flag)
switch fmtmode {
case FErr:
n.nodefmt(s, flag)
case FDbg:
dumpdepth++
n.nodedump(s, flag)
dumpdepth--
default:
Fatalf("unhandled %%N mode: %d", fmtmode)
}
flag = sf
fmtmode = sm
}
func (l Nodes) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
l.hconv(s, fmtFlag(s, verb))
default:
fmt.Fprintf(s, "%%!%c(Nodes)", verb)
}
}
func (n Nodes) String() string {
return fmt.Sprint(n)
}
// Flags: all those of %N plus '.': separate with comma's instead of semicolons.
func (l Nodes) hconv(s fmt.State, flag FmtFlag) {
if l.Len() == 0 && fmtmode == FDbg {
fmt.Fprint(s, "<nil>")
return
}
sf := flag
sm := setfmode(&flag)
sep := "; "
if fmtmode == FDbg {
sep = "\n"
} else if flag&FmtComma != 0 {
sep = ", "
}
for i, n := range l.Slice() {
fmt.Fprint(s, n)
if i+1 < l.Len() {
fmt.Fprint(s, sep)
}
}
flag = sf
fmtmode = sm
}
func dumplist(s string, l Nodes) {
fmt.Printf("%s%+v\n", s, l)
}
func Dump(s string, n *Node) {
fmt.Printf("%s [%p]%+v\n", s, n, n)
}
// TODO(gri) make variable local somehow
var dumpdepth int
// indent prints indentation to s.
func indent(s fmt.State) {
fmt.Fprint(s, "\n")
for i := 0; i < dumpdepth; i++ {
fmt.Fprint(s, ". ")
}
}
cmd/compile: remove FmtFlag save and restore
It is unnecessary.
Passes toolstash -cmp.
Change-Id: I7c03523b6110c3d9bd5ba2b37d9a1e17a7ae570e
Reviewed-on: https://go-review.googlesource.com/38145
Run-TryBot: Josh Bleecher Snyder <cdc248cc4d32e4fbfe04e45228163bcecd48a0b9@gmail.com>
Reviewed-by: Matthew Dempsky <e56ac2c500064424afac424d4b5e53bd0570a731@google.com>
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"fmt"
"strconv"
"strings"
"unicode/utf8"
)
// A FmtFlag value is a set of flags (or 0).
// They control how the Xconv functions format their values.
// See the respective function's documentation for details.
type FmtFlag int
const ( // fmt.Format flag/prec or verb
FmtLeft FmtFlag = 1 << iota // '-'
FmtSharp // '#'
FmtSign // '+'
FmtUnsigned // internal use only (historic: u flag)
FmtShort // verb == 'S' (historic: h flag)
FmtLong // verb == 'L' (historic: l flag)
FmtComma // '.' (== hasPrec) (historic: , flag)
FmtByte // '0' (historic: hh flag)
)
// fmtFlag computes the (internal) FmtFlag
// value given the fmt.State and format verb.
func fmtFlag(s fmt.State, verb rune) FmtFlag {
var flag FmtFlag
if s.Flag('-') {
flag |= FmtLeft
}
if s.Flag('#') {
flag |= FmtSharp
}
if s.Flag('+') {
flag |= FmtSign
}
if s.Flag(' ') {
Fatalf("FmtUnsigned in format string")
}
if _, ok := s.Precision(); ok {
flag |= FmtComma
}
if s.Flag('0') {
flag |= FmtByte
}
switch verb {
case 'S':
flag |= FmtShort
case 'L':
flag |= FmtLong
}
return flag
}
// Format conversions:
// TODO(gri) verify these; eliminate those not used anymore
//
// %v Op Node opcodes
// Flags: #: print Go syntax (automatic unless fmtmode == FDbg)
//
// %j *Node Node details
// Flags: 0: suppresses things not relevant until walk
//
// %v *Val Constant values
//
// %v *Sym Symbols
// %S unqualified identifier in any mode
// Flags: +,- #: mode (see below)
// 0: in export mode: unqualified identifier if exported, qualified if not
//
// %v *Type Types
// %S omit "func" and receiver in function types
// %L definition instead of name.
// Flags: +,- #: mode (see below)
// ' ' (only in -/Sym mode) print type identifiers wit package name instead of prefix.
//
// %v *Node Nodes
// %S (only in +/debug mode) suppress recursion
// %L (only in Error mode) print "foo (type Bar)"
// Flags: +,- #: mode (see below)
//
// %v Nodes Node lists
// Flags: those of *Node
// .: separate items with ',' instead of ';'
// *Sym, *Type, and *Node types use the flags below to set the format mode
const (
FErr = iota
FDbg
FTypeId
)
var fmtmode int = FErr
var fmtpkgpfx int // "% v" stickyness for *Type objects
// The mode flags '+', '-', and '#' are sticky; they persist through
// recursions of *Node, *Type, and *Sym values. The ' ' flag is
// sticky only on *Type recursions and only used in %-/*Sym mode.
//
// Example: given a *Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode
// Useful format combinations:
// TODO(gri): verify these
//
// *Node, Nodes:
// %+v multiline recursive debug dump of *Node/Nodes
// %+S non-recursive debug dump
//
// *Node:
// %#v Go format
// %L "foo (type Bar)" for error messages
//
// *Type:
// %#v Go format
// %#L type definition instead of name
// %#S omit"func" and receiver in function signature
//
// %-v type identifiers
// %-S type identifiers without "func" and arg names in type signatures (methodsym)
// %- v type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
func setfmode(flags *FmtFlag) (fm int) {
fm = fmtmode
if *flags&FmtSign != 0 {
fmtmode = FDbg
} else if *flags&FmtSharp != 0 {
// ignore (textual export format no longer supported)
} else if *flags&FmtLeft != 0 {
fmtmode = FTypeId
}
*flags &^= (FmtSharp | FmtLeft | FmtSign)
return
}
var goopnames = []string{
OADDR: "&",
OADD: "+",
OADDSTR: "+",
OALIGNOF: "unsafe.Alignof",
OANDAND: "&&",
OANDNOT: "&^",
OAND: "&",
OAPPEND: "append",
OAS: "=",
OAS2: "=",
OBREAK: "break",
OCALL: "function call", // not actual syntax
OCAP: "cap",
OCASE: "case",
OCLOSE: "close",
OCOMPLEX: "complex",
OCOM: "^",
OCONTINUE: "continue",
OCOPY: "copy",
ODELETE: "delete",
ODEFER: "defer",
ODIV: "/",
OEQ: "==",
OFALL: "fallthrough",
OFOR: "for",
OFORUNTIL: "foruntil", // not actual syntax; used to avoid off-end pointer live on backedge.892
OGE: ">=",
OGOTO: "goto",
OGT: ">",
OIF: "if",
OIMAG: "imag",
OIND: "*",
OLEN: "len",
OLE: "<=",
OLSH: "<<",
OLT: "<",
OMAKE: "make",
OMINUS: "-",
OMOD: "%",
OMUL: "*",
ONEW: "new",
ONE: "!=",
ONOT: "!",
OOFFSETOF: "unsafe.Offsetof",
OOROR: "||",
OOR: "|",
OPANIC: "panic",
OPLUS: "+",
OPRINTN: "println",
OPRINT: "print",
ORANGE: "range",
OREAL: "real",
ORECV: "<-",
ORECOVER: "recover",
ORETURN: "return",
ORSH: ">>",
OSELECT: "select",
OSEND: "<-",
OSIZEOF: "unsafe.Sizeof",
OSUB: "-",
OSWITCH: "switch",
OXOR: "^",
OXFALL: "fallthrough",
}
func (o Op) String() string {
return fmt.Sprint(o)
}
func (o Op) GoString() string {
return fmt.Sprintf("%#v", o)
}
func (o Op) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
o.oconv(s, fmtFlag(s, verb))
default:
fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o))
}
}
func (o Op) oconv(s fmt.State, flag FmtFlag) {
if (flag&FmtSharp != 0) || fmtmode != FDbg {
if o >= 0 && int(o) < len(goopnames) && goopnames[o] != "" {
fmt.Fprint(s, goopnames[o])
return
}
}
if o >= 0 && int(o) < len(opnames) && opnames[o] != "" {
fmt.Fprint(s, opnames[o])
return
}
fmt.Fprintf(s, "O-%d", int(o))
}
var classnames = []string{
"Pxxx",
"PEXTERN",
"PAUTO",
"PAUTOHEAP",
"PPARAM",
"PPARAMOUT",
"PFUNC",
}
func (n *Node) Format(s fmt.State, verb rune) {
switch verb {
case 'v', 'S', 'L':
n.nconv(s, fmtFlag(s, verb))
case 'j':
n.jconv(s, fmtFlag(s, verb))
default:
fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
}
}
// *Node details
func (n *Node) jconv(s fmt.State, flag FmtFlag) {
c := flag & FmtShort
if c == 0 && n.Addable() {
fmt.Fprintf(s, " a(%v)", n.Addable())
}
if c == 0 && n.Name != nil && n.Name.Vargen != 0 {
fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
}
if n.Pos.IsKnown() {
fmt.Fprintf(s, " l(%d)", n.Pos.Line())
}
if c == 0 && n.Xoffset != BADWIDTH {
fmt.Fprintf(s, " x(%d)", n.Xoffset)
}
if n.Class != 0 {
if int(n.Class) < len(classnames) {
fmt.Fprintf(s, " class(%s)", classnames[n.Class])
} else {
fmt.Fprintf(s, " class(%d?)", n.Class)
}
}
if n.Colas() {
fmt.Fprintf(s, " colas(%v)", n.Colas())
}
if n.Name != nil && n.Name.Funcdepth != 0 {
fmt.Fprintf(s, " f(%d)", n.Name.Funcdepth)
}
if n.Func != nil && n.Func.Depth != 0 {
fmt.Fprintf(s, " ff(%d)", n.Func.Depth)
}
switch n.Esc {
case EscUnknown:
break
case EscHeap:
fmt.Fprint(s, " esc(h)")
case EscNone:
fmt.Fprint(s, " esc(no)")
case EscNever:
if c == 0 {
fmt.Fprint(s, " esc(N)")
}
default:
fmt.Fprintf(s, " esc(%d)", n.Esc)
}
if e, ok := n.Opt().(*NodeEscState); ok && e.Loopdepth != 0 {
fmt.Fprintf(s, " ld(%d)", e.Loopdepth)
}
if c == 0 && n.Typecheck != 0 {
fmt.Fprintf(s, " tc(%d)", n.Typecheck)
}
if n.Isddd() {
fmt.Fprintf(s, " isddd(%v)", n.Isddd())
}
if n.Implicit() {
fmt.Fprintf(s, " implicit(%v)", n.Implicit())
}
if n.Embedded != 0 {
fmt.Fprintf(s, " embedded(%d)", n.Embedded)
}
if n.Addrtaken() {
fmt.Fprint(s, " addrtaken")
}
if n.Assigned() {
fmt.Fprint(s, " assigned")
}
if n.Bounded() {
fmt.Fprint(s, " bounded")
}
if n.NonNil() {
fmt.Fprint(s, " nonnil")
}
if c == 0 && n.HasCall() {
fmt.Fprintf(s, " hascall")
}
if c == 0 && n.Used() {
fmt.Fprintf(s, " used(%v)", n.Used())
}
}
func (v Val) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
v.vconv(s, fmtFlag(s, verb))
default:
fmt.Fprintf(s, "%%!%c(Val=%T)", verb, v)
}
}
func (v Val) vconv(s fmt.State, flag FmtFlag) {
switch u := v.U.(type) {
case *Mpint:
if !u.Rune {
if flag&FmtSharp != 0 {
fmt.Fprint(s, bconv(u, FmtSharp))
return
}
fmt.Fprint(s, bconv(u, 0))
return
}
switch x := u.Int64(); {
case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
fmt.Fprintf(s, "'%c'", int(x))
case 0 <= x && x < 1<<16:
fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
case 0 <= x && x <= utf8.MaxRune:
fmt.Fprintf(s, "'\\U%08x'", uint64(x))
default:
fmt.Fprintf(s, "('\\x00' + %v)", u)
}
case *Mpflt:
if flag&FmtSharp != 0 {
fmt.Fprint(s, fconv(u, 0))
return
}
fmt.Fprint(s, fconv(u, FmtSharp))
return
case *Mpcplx:
switch {
case flag&FmtSharp != 0:
fmt.Fprintf(s, "(%v+%vi)", &u.Real, &u.Imag)
case v.U.(*Mpcplx).Real.CmpFloat64(0) == 0:
fmt.Fprintf(s, "%vi", fconv(&u.Imag, FmtSharp))
case v.U.(*Mpcplx).Imag.CmpFloat64(0) == 0:
fmt.Fprint(s, fconv(&u.Real, FmtSharp))
case v.U.(*Mpcplx).Imag.CmpFloat64(0) < 0:
fmt.Fprintf(s, "(%v%vi)", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp))
default:
fmt.Fprintf(s, "(%v+%vi)", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp))
}
case string:
fmt.Fprint(s, strconv.Quote(u))
case bool:
fmt.Fprint(s, u)
case *NilVal:
fmt.Fprint(s, "nil")
default:
fmt.Fprintf(s, "<ctype=%d>", v.Ctype())
}
}
/*
s%,%,\n%g
s%\n+%\n%g
s%^[ ]*T%%g
s%,.*%%g
s%.+% [T&] = "&",%g
s%^ ........*\]%&~%g
s%~ %%g
*/
var etnames = []string{
Txxx: "Txxx",
TINT: "INT",
TUINT: "UINT",
TINT8: "INT8",
TUINT8: "UINT8",
TINT16: "INT16",
TUINT16: "UINT16",
TINT32: "INT32",
TUINT32: "UINT32",
TINT64: "INT64",
TUINT64: "UINT64",
TUINTPTR: "UINTPTR",
TFLOAT32: "FLOAT32",
TFLOAT64: "FLOAT64",
TCOMPLEX64: "COMPLEX64",
TCOMPLEX128: "COMPLEX128",
TBOOL: "BOOL",
TPTR32: "PTR32",
TPTR64: "PTR64",
TFUNC: "FUNC",
TARRAY: "ARRAY",
TSLICE: "SLICE",
TSTRUCT: "STRUCT",
TCHAN: "CHAN",
TMAP: "MAP",
TINTER: "INTER",
TFORW: "FORW",
TSTRING: "STRING",
TUNSAFEPTR: "TUNSAFEPTR",
TANY: "ANY",
TIDEAL: "TIDEAL",
TNIL: "TNIL",
TBLANK: "TBLANK",
TFUNCARGS: "TFUNCARGS",
TCHANARGS: "TCHANARGS",
TINTERMETH: "TINTERMETH",
TDDDFIELD: "TDDDFIELD",
}
func (et EType) String() string {
if int(et) < len(etnames) && etnames[et] != "" {
return etnames[et]
}
return fmt.Sprintf("E-%d", et)
}
func (s *Sym) symfmt(flag FmtFlag) string {
if s.Pkg != nil && flag&FmtShort == 0 {
switch fmtmode {
case FErr: // This is for the user
if s.Pkg == builtinpkg || s.Pkg == localpkg {
return s.Name
}
// If the name was used by multiple packages, display the full path,
if s.Pkg.Name != "" && numImport[s.Pkg.Name] > 1 {
return fmt.Sprintf("%q.%s", s.Pkg.Path, s.Name)
}
return s.Pkg.Name + "." + s.Name
case FDbg:
return s.Pkg.Name + "." + s.Name
case FTypeId:
if flag&FmtUnsigned != 0 {
return s.Pkg.Name + "." + s.Name // dcommontype, typehash
}
return s.Pkg.Prefix + "." + s.Name // (methodsym), typesym, weaksym
}
}
if flag&FmtByte != 0 {
// FmtByte (hh) implies FmtShort (h)
// skip leading "type." in method name
name := s.Name
if i := strings.LastIndex(name, "."); i >= 0 {
name = name[i+1:]
}
if fmtmode == FDbg {
return fmt.Sprintf("@%q.%s", s.Pkg.Path, name)
}
return name
}
return s.Name
}
var basicnames = []string{
TINT: "int",
TUINT: "uint",
TINT8: "int8",
TUINT8: "uint8",
TINT16: "int16",
TUINT16: "uint16",
TINT32: "int32",
TUINT32: "uint32",
TINT64: "int64",
TUINT64: "uint64",
TUINTPTR: "uintptr",
TFLOAT32: "float32",
TFLOAT64: "float64",
TCOMPLEX64: "complex64",
TCOMPLEX128: "complex128",
TBOOL: "bool",
TANY: "any",
TSTRING: "string",
TNIL: "nil",
TIDEAL: "untyped number",
TBLANK: "blank",
}
func (t *Type) typefmt(flag FmtFlag) string {
if t == nil {
return "<T>"
}
if t == bytetype || t == runetype {
// in %-T mode collapse rune and byte with their originals.
if fmtmode != FTypeId {
return t.Sym.sconv(FmtShort)
}
t = Types[t.Etype]
}
if t == errortype {
return "error"
}
// Unless the 'l' flag was specified, if the type has a name, just print that name.
if flag&FmtLong == 0 && t.Sym != nil && t != Types[t.Etype] {
switch fmtmode {
case FTypeId:
if flag&FmtShort != 0 {
if t.Vargen != 0 {
return fmt.Sprintf("%v·%d", t.Sym.sconv(FmtShort), t.Vargen)
}
return t.Sym.sconv(FmtShort)
}
if flag&FmtUnsigned != 0 {
return t.Sym.sconv(FmtUnsigned)
}
if t.Sym.Pkg == localpkg && t.Vargen != 0 {
return fmt.Sprintf("%v·%d", t.Sym, t.Vargen)
}
}
return t.Sym.String()
}
if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
prefix := ""
if fmtmode == FErr && (t == idealbool || t == idealstring) {
prefix = "untyped "
}
return prefix + basicnames[t.Etype]
}
if fmtmode == FDbg {
fmtmode = 0
str := t.Etype.String() + "-" + t.typefmt(flag)
fmtmode = FDbg
return str
}
switch t.Etype {
case TPTR32, TPTR64:
if fmtmode == FTypeId && (flag&FmtShort != 0) {
return "*" + t.Elem().tconv(FmtShort)
}
return "*" + t.Elem().String()
case TARRAY:
if t.isDDDArray() {
return "[...]" + t.Elem().String()
}
return fmt.Sprintf("[%d]%v", t.NumElem(), t.Elem())
case TSLICE:
return "[]" + t.Elem().String()
case TCHAN:
switch t.ChanDir() {
case Crecv:
return "<-chan " + t.Elem().String()
case Csend:
return "chan<- " + t.Elem().String()
}
if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == Crecv {
return "chan (" + t.Elem().String() + ")"
}
return "chan " + t.Elem().String()
case TMAP:
return "map[" + t.Key().String() + "]" + t.Val().String()
case TINTER:
if t.IsEmptyInterface() {
return "interface {}"
}
buf := make([]byte, 0, 64)
buf = append(buf, "interface {"...)
for i, f := range t.Fields().Slice() {
if i != 0 {
buf = append(buf, ';')
}
buf = append(buf, ' ')
switch {
case f.Sym == nil:
// Check first that a symbol is defined for this type.
// Wrong interface definitions may have types lacking a symbol.
break
case exportname(f.Sym.Name):
buf = append(buf, f.Sym.sconv(FmtShort)...)
default:
buf = append(buf, f.Sym.sconv(FmtUnsigned)...)
}
buf = append(buf, f.Type.tconv(FmtShort)...)
}
if t.NumFields() != 0 {
buf = append(buf, ' ')
}
buf = append(buf, '}')
return string(buf)
case TFUNC:
buf := make([]byte, 0, 64)
if flag&FmtShort != 0 {
// no leading func
} else {
if t.Recv() != nil {
buf = append(buf, "method"...)
buf = append(buf, t.Recvs().String()...)
buf = append(buf, ' ')
}
buf = append(buf, "func"...)
}
buf = append(buf, t.Params().String()...)
switch t.Results().NumFields() {
case 0:
// nothing to do
case 1:
buf = append(buf, ' ')
buf = append(buf, t.Results().Field(0).Type.String()...) // struct->field->field's type
default:
buf = append(buf, ' ')
buf = append(buf, t.Results().String()...)
}
return string(buf)
case TSTRUCT:
if m := t.StructType().Map; m != nil {
mt := m.MapType()
// Format the bucket struct for map[x]y as map.bucket[x]y.
// This avoids a recursive print that generates very long names.
if mt.Bucket == t {
return "map.bucket[" + m.Key().String() + "]" + m.Val().String()
}
if mt.Hmap == t {
return "map.hdr[" + m.Key().String() + "]" + m.Val().String()
}
if mt.Hiter == t {
return "map.iter[" + m.Key().String() + "]" + m.Val().String()
}
yyerror("unknown internal map type")
}
buf := make([]byte, 0, 64)
if t.IsFuncArgStruct() {
buf = append(buf, '(')
var flag1 FmtFlag
if fmtmode == FTypeId || fmtmode == FErr { // no argument names on function signature, and no "noescape"/"nosplit" tags
flag1 = FmtShort
}
for i, f := range t.Fields().Slice() {
if i != 0 {
buf = append(buf, ", "...)
}
buf = append(buf, fldconv(f, flag1)...)
}
buf = append(buf, ')')
} else {
buf = append(buf, "struct {"...)
for i, f := range t.Fields().Slice() {
if i != 0 {
buf = append(buf, ';')
}
buf = append(buf, ' ')
buf = append(buf, fldconv(f, FmtLong)...)
}
if t.NumFields() != 0 {
buf = append(buf, ' ')
}
buf = append(buf, '}')
}
return string(buf)
case TFORW:
if t.Sym != nil {
return "undefined " + t.Sym.String()
}
return "undefined"
case TUNSAFEPTR:
return "unsafe.Pointer"
case TDDDFIELD:
return fmt.Sprintf("%v <%v> %v", t.Etype, t.Sym, t.DDDField())
case Txxx:
return "Txxx"
}
// Don't know how to handle - fall back to detailed prints.
return fmt.Sprintf("%v <%v> %v", t.Etype, t.Sym, t.Elem())
}
// Statements which may be rendered with a simplestmt as init.
func stmtwithinit(op Op) bool {
switch op {
case OIF, OFOR, OFORUNTIL, OSWITCH:
return true
}
return false
}
func (n *Node) stmtfmt(s fmt.State) {
// some statements allow for an init, but at most one,
// but we may have an arbitrary number added, eg by typecheck
// and inlining. If it doesn't fit the syntax, emit an enclosing
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && stmtwithinit(n.Op)
// otherwise, print the inits as separate statements
complexinit := n.Ninit.Len() != 0 && !simpleinit && (fmtmode != FErr)
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
extrablock := complexinit && stmtwithinit(n.Op)
if extrablock {
fmt.Fprint(s, "{")
}
if complexinit {
fmt.Fprintf(s, " %v; ", n.Ninit)
}
switch n.Op {
case ODCL:
fmt.Fprintf(s, "var %v %v", n.Left.Sym, n.Left.Type)
case ODCLFIELD:
if n.Left != nil {
fmt.Fprintf(s, "%v %v", n.Left, n.Right)
} else {
fmt.Fprintf(s, "%v", n.Right)
}
// Don't export "v = <N>" initializing statements, hope they're always
// preceded by the DCL which will be re-parsed and typechecked to reproduce
// the "v = <N>" again.
case OAS:
if n.Colas() && !complexinit {
fmt.Fprintf(s, "%v := %v", n.Left, n.Right)
} else {
fmt.Fprintf(s, "%v = %v", n.Left, n.Right)
}
case OASOP:
if n.Implicit() {
if Op(n.Etype) == OADD {
fmt.Fprintf(s, "%v++", n.Left)
} else {
fmt.Fprintf(s, "%v--", n.Left)
}
break
}
fmt.Fprintf(s, "%v %#v= %v", n.Left, Op(n.Etype), n.Right)
case OAS2:
if n.Colas() && !complexinit {
fmt.Fprintf(s, "%.v := %.v", n.List, n.Rlist)
break
}
fallthrough
case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
fmt.Fprintf(s, "%.v = %.v", n.List, n.Rlist)
case ORETURN:
fmt.Fprintf(s, "return %.v", n.List)
case ORETJMP:
fmt.Fprintf(s, "retjmp %v", n.Sym)
case OPROC:
fmt.Fprintf(s, "go %v", n.Left)
case ODEFER:
fmt.Fprintf(s, "defer %v", n.Left)
case OIF:
if simpleinit {
fmt.Fprintf(s, "if %v; %v { %v }", n.Ninit.First(), n.Left, n.Nbody)
} else {
fmt.Fprintf(s, "if %v { %v }", n.Left, n.Nbody)
}
if n.Rlist.Len() != 0 {
fmt.Fprintf(s, " else { %v }", n.Rlist)
}
case OFOR, OFORUNTIL:
opname := "for"
if n.Op == OFORUNTIL {
opname = "foruntil"
}
if fmtmode == FErr { // TODO maybe only if FmtShort, same below
fmt.Fprintf(s, "%s loop", opname)
break
}
fmt.Fprint(s, opname)
if simpleinit {
fmt.Fprintf(s, " %v;", n.Ninit.First())
} else if n.Right != nil {
fmt.Fprint(s, " ;")
}
if n.Left != nil {
fmt.Fprintf(s, " %v", n.Left)
}
if n.Right != nil {
fmt.Fprintf(s, "; %v", n.Right)
} else if simpleinit {
fmt.Fprint(s, ";")
}
fmt.Fprintf(s, " { %v }", n.Nbody)
case ORANGE:
if fmtmode == FErr {
fmt.Fprint(s, "for loop")
break
}
if n.List.Len() == 0 {
fmt.Fprintf(s, "for range %v { %v }", n.Right, n.Nbody)
break
}
fmt.Fprintf(s, "for %.v = range %v { %v }", n.List, n.Right, n.Nbody)
case OSELECT, OSWITCH:
if fmtmode == FErr {
fmt.Fprintf(s, "%v statement", n.Op)
break
}
fmt.Fprint(s, n.Op.GoString()) // %#v
if simpleinit {
fmt.Fprintf(s, " %v;", n.Ninit.First())
}
if n.Left != nil {
fmt.Fprintf(s, " %v ", n.Left)
}
fmt.Fprintf(s, " { %v }", n.List)
case OXCASE:
if n.List.Len() != 0 {
fmt.Fprintf(s, "case %.v", n.List)
} else {
fmt.Fprint(s, "default")
}
fmt.Fprintf(s, ": %v", n.Nbody)
case OCASE:
switch {
case n.Left != nil:
// single element
fmt.Fprintf(s, "case %v", n.Left)
case n.List.Len() > 0:
// range
if n.List.Len() != 2 {
Fatalf("bad OCASE list length %d", n.List.Len())
}
fmt.Fprintf(s, "case %v..%v", n.List.First(), n.List.Second())
default:
fmt.Fprint(s, "default")
}
fmt.Fprintf(s, ": %v", n.Nbody)
case OBREAK,
OCONTINUE,
OGOTO,
OFALL,
OXFALL:
if n.Left != nil {
fmt.Fprintf(s, "%#v %v", n.Op, n.Left)
} else {
fmt.Fprint(s, n.Op.GoString()) // %#v
}
case OEMPTY:
break
case OLABEL:
fmt.Fprintf(s, "%v: ", n.Left)
}
if extrablock {
fmt.Fprint(s, "}")
}
}
var opprec = []int{
OALIGNOF: 8,
OAPPEND: 8,
OARRAYBYTESTR: 8,
OARRAYLIT: 8,
OSLICELIT: 8,
OARRAYRUNESTR: 8,
OCALLFUNC: 8,
OCALLINTER: 8,
OCALLMETH: 8,
OCALL: 8,
OCAP: 8,
OCLOSE: 8,
OCONVIFACE: 8,
OCONVNOP: 8,
OCONV: 8,
OCOPY: 8,
ODELETE: 8,
OGETG: 8,
OLEN: 8,
OLITERAL: 8,
OMAKESLICE: 8,
OMAKE: 8,
OMAPLIT: 8,
ONAME: 8,
ONEW: 8,
ONONAME: 8,
OOFFSETOF: 8,
OPACK: 8,
OPANIC: 8,
OPAREN: 8,
OPRINTN: 8,
OPRINT: 8,
ORUNESTR: 8,
OSIZEOF: 8,
OSTRARRAYBYTE: 8,
OSTRARRAYRUNE: 8,
OSTRUCTLIT: 8,
OTARRAY: 8,
OTCHAN: 8,
OTFUNC: 8,
OTINTER: 8,
OTMAP: 8,
OTSTRUCT: 8,
OINDEXMAP: 8,
OINDEX: 8,
OSLICE: 8,
OSLICESTR: 8,
OSLICEARR: 8,
OSLICE3: 8,
OSLICE3ARR: 8,
ODOTINTER: 8,
ODOTMETH: 8,
ODOTPTR: 8,
ODOTTYPE2: 8,
ODOTTYPE: 8,
ODOT: 8,
OXDOT: 8,
OCALLPART: 8,
OPLUS: 7,
ONOT: 7,
OCOM: 7,
OMINUS: 7,
OADDR: 7,
OIND: 7,
ORECV: 7,
OMUL: 6,
ODIV: 6,
OMOD: 6,
OLSH: 6,
ORSH: 6,
OAND: 6,
OANDNOT: 6,
OADD: 5,
OSUB: 5,
OOR: 5,
OXOR: 5,
OEQ: 4,
OLT: 4,
OLE: 4,
OGE: 4,
OGT: 4,
ONE: 4,
OCMPSTR: 4,
OCMPIFACE: 4,
OSEND: 3,
OANDAND: 2,
OOROR: 1,
// Statements handled by stmtfmt
OAS: -1,
OAS2: -1,
OAS2DOTTYPE: -1,
OAS2FUNC: -1,
OAS2MAPR: -1,
OAS2RECV: -1,
OASOP: -1,
OBREAK: -1,
OCASE: -1,
OCONTINUE: -1,
ODCL: -1,
ODCLFIELD: -1,
ODEFER: -1,
OEMPTY: -1,
OFALL: -1,
OFOR: -1,
OFORUNTIL: -1,
OGOTO: -1,
OIF: -1,
OLABEL: -1,
OPROC: -1,
ORANGE: -1,
ORETURN: -1,
OSELECT: -1,
OSWITCH: -1,
OXCASE: -1,
OXFALL: -1,
OEND: 0,
}
func (n *Node) exprfmt(s fmt.State, prec int) {
for n != nil && n.Implicit() && (n.Op == OIND || n.Op == OADDR) {
n = n.Left
}
if n == nil {
fmt.Fprint(s, "<N>")
return
}
nprec := opprec[n.Op]
if n.Op == OTYPE && n.Sym != nil {
nprec = 8
}
if prec > nprec {
fmt.Fprintf(s, "(%v)", n)
return
}
switch n.Op {
case OPAREN:
fmt.Fprintf(s, "(%v)", n.Left)
case ODDDARG:
fmt.Fprint(s, "... argument")
case OLITERAL: // this is a bit of a mess
if fmtmode == FErr {
if n.Orig != nil && n.Orig != n {
n.Orig.exprfmt(s, prec)
return
}
if n.Sym != nil {
fmt.Fprint(s, n.Sym.String())
return
}
}
if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
n.Orig.exprfmt(s, prec)
return
}
if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != idealbool && n.Type != idealstring {
// Need parens when type begins with what might
// be misinterpreted as a unary operator: * or <-.
if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == Crecv) {
fmt.Fprintf(s, "(%v)(%v)", n.Type, n.Val())
return
} else {
fmt.Fprintf(s, "%v(%v)", n.Type, n.Val())
return
}
}
fmt.Fprintf(s, "%v", n.Val())
// Special case: name used as local variable in export.
// _ becomes ~b%d internally; print as _ for export
case ONAME:
if fmtmode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
fmt.Fprint(s, "_")
return
}
fallthrough
case OPACK, ONONAME:
fmt.Fprint(s, n.Sym.String())
case OTYPE:
if n.Type == nil && n.Sym != nil {
fmt.Fprint(s, n.Sym.String())
return
}
fmt.Fprintf(s, "%v", n.Type)
case OTARRAY:
if n.Left != nil {
fmt.Fprintf(s, "[]%v", n.Left)
return
}
fmt.Fprintf(s, "[]%v", n.Right) // happens before typecheck
case OTMAP:
fmt.Fprintf(s, "map[%v]%v", n.Left, n.Right)
case OTCHAN:
switch ChanDir(n.Etype) {
case Crecv:
fmt.Fprintf(s, "<-chan %v", n.Left)
case Csend:
fmt.Fprintf(s, "chan<- %v", n.Left)
default:
if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && ChanDir(n.Left.Etype) == Crecv {
fmt.Fprintf(s, "chan (%v)", n.Left)
} else {
fmt.Fprintf(s, "chan %v", n.Left)
}
}
case OTSTRUCT:
fmt.Fprint(s, "<struct>")
case OTINTER:
fmt.Fprint(s, "<inter>")
case OTFUNC:
fmt.Fprint(s, "<func>")
case OCLOSURE:
if fmtmode == FErr {
fmt.Fprint(s, "func literal")
return
}
if n.Nbody.Len() != 0 {
fmt.Fprintf(s, "%v { %v }", n.Type, n.Nbody)
return
}
fmt.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody)
case OCOMPLIT:
ptrlit := n.Right != nil && n.Right.Implicit() && n.Right.Type != nil && n.Right.Type.IsPtr()
if fmtmode == FErr {
if n.Right != nil && n.Right.Type != nil && !n.Implicit() {
if ptrlit {
fmt.Fprintf(s, "&%v literal", n.Right.Type.Elem())
return
} else {
fmt.Fprintf(s, "%v literal", n.Right.Type)
return
}
}
fmt.Fprint(s, "composite literal")
return
}
fmt.Fprintf(s, "(%v{ %.v })", n.Right, n.List)
case OPTRLIT:
fmt.Fprintf(s, "&%v", n.Left)
case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
if fmtmode == FErr {
fmt.Fprintf(s, "%v literal", n.Type)
return
}
fmt.Fprintf(s, "(%v{ %.v })", n.Type, n.List)
case OKEY:
if n.Left != nil && n.Right != nil {
fmt.Fprintf(s, "%v:%v", n.Left, n.Right)
return
}
if n.Left == nil && n.Right != nil {
fmt.Fprintf(s, ":%v", n.Right)
return
}
if n.Left != nil && n.Right == nil {
fmt.Fprintf(s, "%v:", n.Left)
return
}
fmt.Fprint(s, ":")
case OSTRUCTKEY:
fmt.Fprintf(s, "%v:%v", n.Sym, n.Left)
case OCALLPART:
n.Left.exprfmt(s, nprec)
if n.Right == nil || n.Right.Sym == nil {
fmt.Fprint(s, ".<nil>")
return
}
fmt.Fprintf(s, ".%0S", n.Right.Sym)
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
n.Left.exprfmt(s, nprec)
if n.Sym == nil {
fmt.Fprint(s, ".<nil>")
return
}
fmt.Fprintf(s, ".%0S", n.Sym)
case ODOTTYPE, ODOTTYPE2:
n.Left.exprfmt(s, nprec)
if n.Right != nil {
fmt.Fprintf(s, ".(%v)", n.Right)
return
}
fmt.Fprintf(s, ".(%v)", n.Type)
case OINDEX, OINDEXMAP:
n.Left.exprfmt(s, nprec)
fmt.Fprintf(s, "[%v]", n.Right)
case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
n.Left.exprfmt(s, nprec)
fmt.Fprint(s, "[")
low, high, max := n.SliceBounds()
if low != nil {
fmt.Fprint(s, low.String())
}
fmt.Fprint(s, ":")
if high != nil {
fmt.Fprint(s, high.String())
}
if n.Op.IsSlice3() {
fmt.Fprint(s, ":")
if max != nil {
fmt.Fprint(s, max.String())
}
}
fmt.Fprint(s, "]")
case OCOPY, OCOMPLEX:
fmt.Fprintf(s, "%#v(%v, %v)", n.Op, n.Left, n.Right)
case OCONV,
OCONVIFACE,
OCONVNOP,
OARRAYBYTESTR,
OARRAYRUNESTR,
OSTRARRAYBYTE,
OSTRARRAYRUNE,
ORUNESTR:
if n.Type == nil || n.Type.Sym == nil {
fmt.Fprintf(s, "(%v)", n.Type)
} else {
fmt.Fprintf(s, "%v", n.Type)
}
if n.Left != nil {
fmt.Fprintf(s, "(%v)", n.Left)
} else {
fmt.Fprintf(s, "(%.v)", n.List)
}
case OREAL,
OIMAG,
OAPPEND,
OCAP,
OCLOSE,
ODELETE,
OLEN,
OMAKE,
ONEW,
OPANIC,
ORECOVER,
OALIGNOF,
OOFFSETOF,
OSIZEOF,
OPRINT,
OPRINTN:
if n.Left != nil {
fmt.Fprintf(s, "%#v(%v)", n.Op, n.Left)
return
}
if n.Isddd() {
fmt.Fprintf(s, "%#v(%.v...)", n.Op, n.List)
return
}
fmt.Fprintf(s, "%#v(%.v)", n.Op, n.List)
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
n.Left.exprfmt(s, nprec)
if n.Isddd() {
fmt.Fprintf(s, "(%.v...)", n.List)
return
}
fmt.Fprintf(s, "(%.v)", n.List)
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
if n.List.Len() != 0 { // pre-typecheck
fmt.Fprintf(s, "make(%v, %.v)", n.Type, n.List)
return
}
if n.Right != nil {
fmt.Fprintf(s, "make(%v, %v, %v)", n.Type, n.Left, n.Right)
return
}
if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) {
fmt.Fprintf(s, "make(%v, %v)", n.Type, n.Left)
return
}
fmt.Fprintf(s, "make(%v)", n.Type)
// Unary
case OPLUS,
OMINUS,
OADDR,
OCOM,
OIND,
ONOT,
ORECV:
fmt.Fprint(s, n.Op.GoString()) // %#v
if n.Left.Op == n.Op {
fmt.Fprint(s, " ")
}
n.Left.exprfmt(s, nprec+1)
// Binary
case OADD,
OAND,
OANDAND,
OANDNOT,
ODIV,
OEQ,
OGE,
OGT,
OLE,
OLT,
OLSH,
OMOD,
OMUL,
ONE,
OOR,
OOROR,
ORSH,
OSEND,
OSUB,
OXOR:
n.Left.exprfmt(s, nprec)
fmt.Fprintf(s, " %#v ", n.Op)
n.Right.exprfmt(s, nprec+1)
case OADDSTR:
i := 0
for _, n1 := range n.List.Slice() {
if i != 0 {
fmt.Fprint(s, " + ")
}
n1.exprfmt(s, nprec)
i++
}
case OCMPSTR, OCMPIFACE:
n.Left.exprfmt(s, nprec)
// TODO(marvin): Fix Node.EType type union.
fmt.Fprintf(s, " %#v ", Op(n.Etype))
n.Right.exprfmt(s, nprec+1)
default:
fmt.Fprintf(s, "<node %v>", n.Op)
}
}
func (n *Node) nodefmt(s fmt.State, flag FmtFlag) {
t := n.Type
// we almost always want the original, except in export mode for literals
// this saves the importer some work, and avoids us having to redo some
// special casing for package unsafe
if n.Op != OLITERAL && n.Orig != nil {
n = n.Orig
}
if flag&FmtLong != 0 && t != nil {
if t.Etype == TNIL {
fmt.Fprint(s, "nil")
} else {
fmt.Fprintf(s, "%v (type %v)", n, t)
}
return
}
// TODO inlining produces expressions with ninits. we can't print these yet.
if opprec[n.Op] < 0 {
n.stmtfmt(s)
return
}
n.exprfmt(s, 0)
}
func (n *Node) nodedump(s fmt.State, flag FmtFlag) {
if n == nil {
return
}
recur := flag&FmtShort == 0
if recur {
indent(s)
if dumpdepth > 40 {
fmt.Fprint(s, "...")
return
}
if n.Ninit.Len() != 0 {
fmt.Fprintf(s, "%v-init%v", n.Op, n.Ninit)
indent(s)
}
}
switch n.Op {
default:
fmt.Fprintf(s, "%v%j", n.Op, n)
case OINDREGSP:
fmt.Fprintf(s, "%v-SP%j", n.Op, n)
case OLITERAL:
fmt.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n)
case ONAME, ONONAME:
if n.Sym != nil {
fmt.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n)
} else {
fmt.Fprintf(s, "%v%j", n.Op, n)
}
if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
indent(s)
fmt.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
}
case OASOP:
fmt.Fprintf(s, "%v-%v%j", n.Op, Op(n.Etype), n)
case OTYPE:
fmt.Fprintf(s, "%v %v%j type=%v", n.Op, n.Sym, n, n.Type)
if recur && n.Type == nil && n.Name.Param.Ntype != nil {
indent(s)
fmt.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
}
}
if n.Sym != nil && n.Op != ONAME {
fmt.Fprintf(s, " %v", n.Sym)
}
if n.Type != nil {
fmt.Fprintf(s, " %v", n.Type)
}
if recur {
if n.Left != nil {
fmt.Fprintf(s, "%v", n.Left)
}
if n.Right != nil {
fmt.Fprintf(s, "%v", n.Right)
}
if n.List.Len() != 0 {
indent(s)
fmt.Fprintf(s, "%v-list%v", n.Op, n.List)
}
if n.Rlist.Len() != 0 {
indent(s)
fmt.Fprintf(s, "%v-rlist%v", n.Op, n.Rlist)
}
if n.Nbody.Len() != 0 {
indent(s)
fmt.Fprintf(s, "%v-body%v", n.Op, n.Nbody)
}
}
}
// "%S" suppresses qualifying with package
func (s *Sym) Format(f fmt.State, verb rune) {
switch verb {
case 'v', 'S':
fmt.Fprint(f, s.sconv(fmtFlag(f, verb)))
default:
fmt.Fprintf(f, "%%!%c(*Sym=%p)", verb, s)
}
}
func (s *Sym) String() string {
return s.sconv(0)
}
// See #16897 before changing the implementation of sconv.
func (s *Sym) sconv(flag FmtFlag) string {
if flag&FmtLong != 0 {
panic("linksymfmt")
}
if s == nil {
return "<S>"
}
if s.Name == "_" {
return "_"
}
sm := setfmode(&flag)
str := s.symfmt(flag)
fmtmode = sm
return str
}
func (t *Type) String() string {
return t.tconv(0)
}
// ShortString generates a short description of t.
// It is used in autogenerated method names, reflection,
// and itab names.
func (t *Type) ShortString() string {
if fmtmode != FErr {
Fatalf("ShortString fmtmode %v", fmtmode)
}
return t.tconv(FmtLeft)
}
// LongString generates a complete description of t.
// It is useful for reflection,
// or when a unique fingerprint or hash of a type is required.
func (t *Type) LongString() string {
if fmtmode != FErr {
Fatalf("LongString fmtmode %v", fmtmode)
}
return t.tconv(FmtLeft | FmtUnsigned)
}
func fldconv(f *Field, flag FmtFlag) string {
if f == nil {
return "<T>"
}
unsigned := flag&FmtUnsigned != 0
sm := setfmode(&flag)
if fmtmode == FTypeId && unsigned {
fmtpkgpfx++
}
if fmtpkgpfx != 0 {
flag |= FmtUnsigned
}
var name string
if flag&FmtShort == 0 {
s := f.Sym
// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
// ~r%d is a (formerly) unnamed result.
if fmtmode == FErr && f.Nname != nil {
if f.Nname.Orig != nil {
s = f.Nname.Orig.Sym
if s != nil && s.Name[0] == '~' {
if s.Name[1] == 'r' { // originally an unnamed result
s = nil
} else if s.Name[1] == 'b' { // originally the blank identifier _
s = lookup("_")
}
}
} else {
s = nil
}
}
if s != nil && f.Embedded == 0 {
if f.Funarg != FunargNone {
name = f.Nname.String()
} else if flag&FmtLong != 0 {
name = fmt.Sprintf("%0S", s)
if !exportname(name) && flag&FmtUnsigned == 0 {
name = s.String() // qualify non-exported names (used on structs, not on funarg)
}
} else {
name = s.String()
}
}
}
var typ string
if f.Isddd() {
typ = fmt.Sprintf("...%v", f.Type.Elem())
} else {
typ = fmt.Sprintf("%v", f.Type)
}
str := typ
if name != "" {
str = name + " " + typ
}
if flag&FmtShort == 0 && f.Funarg == FunargNone && f.Note != "" {
str += " " + strconv.Quote(f.Note)
}
if fmtmode == FTypeId && unsigned {
fmtpkgpfx--
}
fmtmode = sm
return str
}
// "%L" print definition, not name
// "%S" omit 'func' and receiver from function types, short type names
func (t *Type) Format(s fmt.State, verb rune) {
switch verb {
case 'v', 'S', 'L':
fmt.Fprint(s, t.tconv(fmtFlag(s, verb)))
default:
fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
}
}
// See #16897 before changing the implementation of tconv.
func (t *Type) tconv(flag FmtFlag) string {
if t == nil {
return "<T>"
}
if t.Trecur > 4 {
return "<...>"
}
t.Trecur++
unsigned := flag&FmtUnsigned != 0
sm := setfmode(&flag)
if fmtmode == FTypeId && unsigned {
fmtpkgpfx++
}
if fmtpkgpfx != 0 {
flag |= FmtUnsigned
}
str := t.typefmt(flag)
if fmtmode == FTypeId && unsigned {
fmtpkgpfx--
}
fmtmode = sm
t.Trecur--
return str
}
func (n *Node) String() string {
return fmt.Sprint(n)
}
// "%L" suffix with "(type %T)" where possible
// "%+S" in debug mode, don't recurse, no multiline output
func (n *Node) nconv(s fmt.State, flag FmtFlag) {
if n == nil {
fmt.Fprint(s, "<N>")
return
}
sm := setfmode(&flag)
switch fmtmode {
case FErr:
n.nodefmt(s, flag)
case FDbg:
dumpdepth++
n.nodedump(s, flag)
dumpdepth--
default:
Fatalf("unhandled %%N mode: %d", fmtmode)
}
fmtmode = sm
}
func (l Nodes) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
l.hconv(s, fmtFlag(s, verb))
default:
fmt.Fprintf(s, "%%!%c(Nodes)", verb)
}
}
func (n Nodes) String() string {
return fmt.Sprint(n)
}
// Flags: all those of %N plus '.': separate with comma's instead of semicolons.
func (l Nodes) hconv(s fmt.State, flag FmtFlag) {
if l.Len() == 0 && fmtmode == FDbg {
fmt.Fprint(s, "<nil>")
return
}
sm := setfmode(&flag)
sep := "; "
if fmtmode == FDbg {
sep = "\n"
} else if flag&FmtComma != 0 {
sep = ", "
}
for i, n := range l.Slice() {
fmt.Fprint(s, n)
if i+1 < l.Len() {
fmt.Fprint(s, sep)
}
}
fmtmode = sm
}
func dumplist(s string, l Nodes) {
fmt.Printf("%s%+v\n", s, l)
}
func Dump(s string, n *Node) {
fmt.Printf("%s [%p]%+v\n", s, n, n)
}
// TODO(gri) make variable local somehow
var dumpdepth int
// indent prints indentation to s.
func indent(s fmt.State) {
fmt.Fprint(s, "\n")
for i := 0; i < dumpdepth; i++ {
fmt.Fprint(s, ". ")
}
}
|
// +build local
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"bufio"
"context"
"io"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"github.com/docker/compose-cli/api/compose"
"github.com/docker/compose-cli/api/containers"
"github.com/docker/compose-cli/api/resources"
"github.com/docker/compose-cli/api/secrets"
"github.com/docker/compose-cli/api/volumes"
"github.com/docker/compose-cli/backend"
"github.com/docker/compose-cli/context/cloud"
"github.com/docker/compose-cli/errdefs"
)
type local struct {
apiClient *client.Client
}
func init() {
backend.Register("local", "local", service, cloud.NotImplementedCloudService)
}
func service(ctx context.Context) (backend.Service, error) {
apiClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return nil, err
}
return &local{
apiClient,
}, nil
}
func (ms *local) ContainerService() containers.Service {
return ms
}
func (ms *local) ComposeService() compose.Service {
return nil
}
func (ms *local) SecretsService() secrets.Service {
return nil
}
func (ms *local) VolumeService() volumes.Service {
return nil
}
func (ms *local) ResourceService() resources.Service {
return nil
}
func (ms *local) Inspect(ctx context.Context, id string) (containers.Container, error) {
c, err := ms.apiClient.ContainerInspect(ctx, id)
if err != nil {
return containers.Container{}, err
}
status := ""
if c.State != nil {
status = c.State.Status
}
command := ""
if c.Config != nil &&
c.Config.Cmd != nil {
command = strings.Join(c.Config.Cmd, " ")
}
rc := toRuntimeConfig(&c)
hc := toHostConfig(&c)
return containers.Container{
ID: stringid.TruncateID(c.ID),
Status: status,
Image: c.Image,
Command: command,
Platform: c.Platform,
Config: rc,
HostConfig: hc,
}, nil
}
func (ms *local) List(ctx context.Context, all bool) ([]containers.Container, error) {
css, err := ms.apiClient.ContainerList(ctx, types.ContainerListOptions{
All: all,
})
if err != nil {
return []containers.Container{}, err
}
var result []containers.Container
for _, container := range css {
result = append(result, containers.Container{
ID: stringid.TruncateID(container.ID),
Image: container.Image,
// TODO: `Status` is a human readable string ("Up 24 minutes"),
// we need to return the `State` instead but first we need to
// define an enum on the proto side with all the possible container
// statuses. We also need to add a `Created` property on the gRPC side.
Status: container.Status,
Command: container.Command,
Ports: toPorts(container.Ports),
})
}
return result, nil
}
func (ms *local) Run(ctx context.Context, r containers.ContainerConfig) error {
exposedPorts, hostBindings, err := fromPorts(r.Ports)
if err != nil {
return err
}
containerConfig := &container.Config{
Image: r.Image,
Labels: r.Labels,
Env: r.Environment,
ExposedPorts: exposedPorts,
}
hostConfig := &container.HostConfig{
PortBindings: hostBindings,
AutoRemove: r.AutoRemove,
Resources: container.Resources{
NanoCPUs: int64(r.CPULimit * 1e9),
Memory: int64(r.MemLimit),
},
}
created, err := ms.apiClient.ContainerCreate(ctx, containerConfig, hostConfig, nil, r.ID)
if err != nil {
if client.IsErrNotFound(err) {
io, err := ms.apiClient.ImagePull(ctx, r.Image, types.ImagePullOptions{})
if err != nil {
return err
}
scanner := bufio.NewScanner(io)
// Read the whole body, otherwise the pulling stops
for scanner.Scan() {
}
if err = scanner.Err(); err != nil {
return err
}
if err = io.Close(); err != nil {
return err
}
created, err = ms.apiClient.ContainerCreate(ctx, containerConfig, hostConfig, nil, r.ID)
if err != nil {
return err
}
} else {
return err
}
}
return ms.apiClient.ContainerStart(ctx, created.ID, types.ContainerStartOptions{})
}
func (ms *local) Start(ctx context.Context, containerID string) error {
return ms.apiClient.ContainerStart(ctx, containerID, types.ContainerStartOptions{})
}
func (ms *local) Stop(ctx context.Context, containerID string, timeout *uint32) error {
var t *time.Duration
if timeout != nil {
timeoutValue := time.Duration(*timeout) * time.Second
t = &timeoutValue
}
return ms.apiClient.ContainerStop(ctx, containerID, t)
}
func (ms *local) Kill(ctx context.Context, containerID string, signal string) error {
return ms.apiClient.ContainerKill(ctx, containerID, signal)
}
func (ms *local) Exec(ctx context.Context, name string, request containers.ExecRequest) error {
cec, err := ms.apiClient.ContainerExecCreate(ctx, name, types.ExecConfig{
Cmd: []string{request.Command},
Tty: true,
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
})
if err != nil {
return err
}
resp, err := ms.apiClient.ContainerExecAttach(ctx, cec.ID, types.ExecStartCheck{
Tty: true,
})
if err != nil {
return err
}
defer resp.Close()
readChannel := make(chan error, 10)
writeChannel := make(chan error, 10)
go func() {
_, err := io.Copy(request.Stdout, resp.Reader)
readChannel <- err
}()
go func() {
_, err := io.Copy(resp.Conn, request.Stdin)
writeChannel <- err
}()
for {
select {
case err := <-readChannel:
return err
case err := <-writeChannel:
return err
}
}
}
func (ms *local) Logs(ctx context.Context, containerName string, request containers.LogsRequest) error {
c, err := ms.apiClient.ContainerInspect(ctx, containerName)
if err != nil {
return err
}
r, err := ms.apiClient.ContainerLogs(ctx, containerName, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: request.Follow,
})
if err != nil {
return err
}
// nolint errcheck
defer r.Close()
if c.Config.Tty {
_, err = io.Copy(request.Writer, r)
} else {
_, err = stdcopy.StdCopy(request.Writer, request.Writer, r)
}
return err
}
func (ms *local) Delete(ctx context.Context, containerID string, request containers.DeleteRequest) error {
err := ms.apiClient.ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{
Force: request.Force,
})
if client.IsErrNotFound(err) {
return errors.Wrapf(errdefs.ErrNotFound, "container %q", containerID)
}
return err
}
backend.local: Add restart policy support to run
Signed-off-by: Chris Crone <0bd24f27efbdecb22ed8ee46cb16be12b5608d69@docker.com>
// +build local
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"bufio"
"context"
"io"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"github.com/docker/compose-cli/api/compose"
"github.com/docker/compose-cli/api/containers"
"github.com/docker/compose-cli/api/resources"
"github.com/docker/compose-cli/api/secrets"
"github.com/docker/compose-cli/api/volumes"
"github.com/docker/compose-cli/backend"
"github.com/docker/compose-cli/context/cloud"
"github.com/docker/compose-cli/errdefs"
)
type local struct {
apiClient *client.Client
}
func init() {
backend.Register("local", "local", service, cloud.NotImplementedCloudService)
}
func service(ctx context.Context) (backend.Service, error) {
apiClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return nil, err
}
return &local{
apiClient,
}, nil
}
func (ms *local) ContainerService() containers.Service {
return ms
}
func (ms *local) ComposeService() compose.Service {
return nil
}
func (ms *local) SecretsService() secrets.Service {
return nil
}
func (ms *local) VolumeService() volumes.Service {
return nil
}
func (ms *local) ResourceService() resources.Service {
return nil
}
func (ms *local) Inspect(ctx context.Context, id string) (containers.Container, error) {
c, err := ms.apiClient.ContainerInspect(ctx, id)
if err != nil {
return containers.Container{}, err
}
status := ""
if c.State != nil {
status = c.State.Status
}
command := ""
if c.Config != nil &&
c.Config.Cmd != nil {
command = strings.Join(c.Config.Cmd, " ")
}
rc := toRuntimeConfig(&c)
hc := toHostConfig(&c)
return containers.Container{
ID: stringid.TruncateID(c.ID),
Status: status,
Image: c.Image,
Command: command,
Platform: c.Platform,
Config: rc,
HostConfig: hc,
}, nil
}
func (ms *local) List(ctx context.Context, all bool) ([]containers.Container, error) {
css, err := ms.apiClient.ContainerList(ctx, types.ContainerListOptions{
All: all,
})
if err != nil {
return []containers.Container{}, err
}
var result []containers.Container
for _, container := range css {
result = append(result, containers.Container{
ID: stringid.TruncateID(container.ID),
Image: container.Image,
// TODO: `Status` is a human readable string ("Up 24 minutes"),
// we need to return the `State` instead but first we need to
// define an enum on the proto side with all the possible container
// statuses. We also need to add a `Created` property on the gRPC side.
Status: container.Status,
Command: container.Command,
Ports: toPorts(container.Ports),
})
}
return result, nil
}
func (ms *local) Run(ctx context.Context, r containers.ContainerConfig) error {
exposedPorts, hostBindings, err := fromPorts(r.Ports)
if err != nil {
return err
}
containerConfig := &container.Config{
Image: r.Image,
Labels: r.Labels,
Env: r.Environment,
ExposedPorts: exposedPorts,
}
hostConfig := &container.HostConfig{
PortBindings: hostBindings,
AutoRemove: r.AutoRemove,
RestartPolicy: toRestartPolicy(r.RestartPolicyCondition),
Resources: container.Resources{
NanoCPUs: int64(r.CPULimit * 1e9),
Memory: int64(r.MemLimit),
},
}
created, err := ms.apiClient.ContainerCreate(ctx, containerConfig, hostConfig, nil, r.ID)
if err != nil {
if client.IsErrNotFound(err) {
io, err := ms.apiClient.ImagePull(ctx, r.Image, types.ImagePullOptions{})
if err != nil {
return err
}
scanner := bufio.NewScanner(io)
// Read the whole body, otherwise the pulling stops
for scanner.Scan() {
}
if err = scanner.Err(); err != nil {
return err
}
if err = io.Close(); err != nil {
return err
}
created, err = ms.apiClient.ContainerCreate(ctx, containerConfig, hostConfig, nil, r.ID)
if err != nil {
return err
}
} else {
return err
}
}
return ms.apiClient.ContainerStart(ctx, created.ID, types.ContainerStartOptions{})
}
func (ms *local) Start(ctx context.Context, containerID string) error {
return ms.apiClient.ContainerStart(ctx, containerID, types.ContainerStartOptions{})
}
func (ms *local) Stop(ctx context.Context, containerID string, timeout *uint32) error {
var t *time.Duration
if timeout != nil {
timeoutValue := time.Duration(*timeout) * time.Second
t = &timeoutValue
}
return ms.apiClient.ContainerStop(ctx, containerID, t)
}
func (ms *local) Kill(ctx context.Context, containerID string, signal string) error {
return ms.apiClient.ContainerKill(ctx, containerID, signal)
}
func (ms *local) Exec(ctx context.Context, name string, request containers.ExecRequest) error {
cec, err := ms.apiClient.ContainerExecCreate(ctx, name, types.ExecConfig{
Cmd: []string{request.Command},
Tty: true,
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
})
if err != nil {
return err
}
resp, err := ms.apiClient.ContainerExecAttach(ctx, cec.ID, types.ExecStartCheck{
Tty: true,
})
if err != nil {
return err
}
defer resp.Close()
readChannel := make(chan error, 10)
writeChannel := make(chan error, 10)
go func() {
_, err := io.Copy(request.Stdout, resp.Reader)
readChannel <- err
}()
go func() {
_, err := io.Copy(resp.Conn, request.Stdin)
writeChannel <- err
}()
for {
select {
case err := <-readChannel:
return err
case err := <-writeChannel:
return err
}
}
}
func (ms *local) Logs(ctx context.Context, containerName string, request containers.LogsRequest) error {
c, err := ms.apiClient.ContainerInspect(ctx, containerName)
if err != nil {
return err
}
r, err := ms.apiClient.ContainerLogs(ctx, containerName, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: request.Follow,
})
if err != nil {
return err
}
// nolint errcheck
defer r.Close()
if c.Config.Tty {
_, err = io.Copy(request.Writer, r)
} else {
_, err = stdcopy.StdCopy(request.Writer, request.Writer, r)
}
return err
}
func (ms *local) Delete(ctx context.Context, containerID string, request containers.DeleteRequest) error {
err := ms.apiClient.ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{
Force: request.Force,
})
if client.IsErrNotFound(err) {
return errors.Wrapf(errdefs.ErrNotFound, "container %q", containerID)
}
return err
}
|
package udata
//---------------------------------------------------------------------------
// CoreOS master user data:
//---------------------------------------------------------------------------
const templMaster = `#cloud-config
hostname: "master-{{.HostID}}.{{.Domain}}"
write_files:
- path: "/etc/hosts"
content: |
127.0.0.1 localhost
$private_ipv4 master-{{.HostID}}.{{.Domain}} master-{{.HostID}}
$private_ipv4 master-{{.HostID}}.int.{{.Domain}} master-{{.HostID}}.int
- path: "/etc/.hosts"
content: |
127.0.0.1 localhost
$private_ipv4 master-{{.HostID}}.{{.Domain}} master-{{.HostID}}
$private_ipv4 master-{{.HostID}}.int.{{.Domain}} master-{{.HostID}}.int
- path: "/etc/resolv.conf"
content: |
search {{.Domain}}
nameserver 8.8.8.8
- path: "/etc/kato.env"
content: |
KATO_CLUSTER_ID={{.ClusterID}}
KATO_MASTER_COUNT={{.MasterCount}}
KATO_ROLE={{.Role}}
KATO_HOST_ID={{.HostID}}
KATO_ZK={{.ZkServers}}
{{if .CaCert}}- path: "/etc/ssl/certs/{{.ClusterID}}.pem"
content: |
{{.CaCert}}
{{- end}}
- path: "/etc/rexray/rexray.env"
- path: "/etc/rexray/config.yml"
{{- if .RexrayStorageDriver }}
content: |
rexray:
storageDrivers:
- {{.RexrayStorageDriver}}
{{.RexrayConfigSnippet}}
{{- end}}
- path: "/home/core/.bashrc"
owner: "core:core"
content: |
[[ $- != *i* ]] && return
alias ls='ls -hF --color=auto --group-directories-first'
alias l='ls -l'
alias ll='ls -la'
alias grep='grep --color=auto'
alias dim='docker images'
alias dps='docker ps'
alias drm='docker rm -v $(docker ps -qaf status=exited)'
alias drmi='docker rmi $(docker images -qf dangling=true)'
alias drmv='docker volume rm $(docker volume ls -qf dangling=true)'
- path: "/home/core/.aws/config"
owner: "core:core"
permissions: "0644"
content: |
[default]
region = {{.Ec2Region}}
- path: "/etc/ssh/sshd_config"
permissions: "0600"
content: |
UsePrivilegeSeparation sandbox
Subsystem sftp internal-sftp
ClientAliveInterval 180
UseDNS no
PermitRootLogin no
AllowUsers core
PasswordAuthentication no
ChallengeResponseAuthentication no
- path: "/opt/bin/ns1dns"
permissions: "0755"
content: |
#!/bin/bash
readonly HOST="$(hostname -s)"
readonly DOMAIN="$(hostname -d)"
readonly APIURL='https://api.nsone.net/v1'
readonly APIKEY='{{.Ns1ApiKey}}'
readonly IP_PUB="$(dig +short myip.opendns.com @resolver1.opendns.com)"
readonly IP_PRI="$(hostname -i)"
declare -A IP=(['ext']="${IP_PUB}" ['int']="${IP_PRI}")
for i in ext int; do
curl -sX GET -H "X-NSONE-Key: ${APIKEY}" \
${APIURL}/zones/${i}.${DOMAIN}/${HOST}.${i}.${DOMAIN}/A | \
grep -q 'record not found' && METHOD='PUT' || METHOD='POST'
curl -sX ${METHOD} -H "X-NSONE-Key: ${APIKEY}" \
${APIURL}/zones/${i}.${DOMAIN}/${HOST}.${i}.${DOMAIN}/A -d "{
\"zone\":\"${i}.${DOMAIN}\",
\"domain\":\"${HOST}.${i}.${DOMAIN}\",
\"type\":\"A\",
\"answers\":[{\"answer\":[\"${IP[${i}]}\"]}]}"
done
- path: "/opt/bin/etchost"
permissions: "0755"
content: |
#!/bin/bash
source /etc/kato.env
PUSH+=$(echo $(hostname -i) $(hostname -f) $(hostname -s))$'\n'
PUSH+=$(echo $(hostname -i) $(hostname -s).int.$(hostname -d) $(hostname -s).int)
etcdctl set /hosts/${KATO_ROLE}/$(hostname -f) "${PUSH}"
KEYS=$(etcdctl ls --recursive /hosts | grep $(hostname -d) | grep -v $(hostname -f) | sort)
for i in $KEYS; do PULL+=$(etcdctl get ${i})$'\n'; done
cat /etc/.hosts > /etc/hosts
echo "${PULL}" >> /etc/hosts
- path: "/opt/bin/loopssh"
permissions: "0755"
content: |
#!/bin/bash
A=$(fleetctl list-machines -fields=ip -no-legend)
for i in $A; do ssh -o UserKnownHostsFile=/dev/null \
-o StrictHostKeyChecking=no $i -C "$*"; done
- path: "/opt/bin/awscli"
permissions: "0755"
content: |
#!/bin/bash
docker run -i --rm \
--volume /home/core/.aws:/root/.aws:ro \
--volume ${PWD}:/aws \
h0tbird/awscli "${@}"
- path: "/etc/prometheus/targets/prometheus.yml"
- path: "/etc/prometheus/prometheus.yml"
permissions: "0600"
content: |
global:
scrape_interval: 1m
scrape_timeout: 10s
evaluation_interval: 10s
rule_files:
- /etc/prometheus/prometheus.rules
scrape_configs:
- job_name: 'prometheus'
scrape_interval: 10s
file_sd_configs:
- files:
- /etc/prometheus/targets/prometheus.yml
- path: "/etc/fleet/zookeeper.service"
content: |
[Unit]
Description=Zookeeper
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill zookeeper
ExecStartPre=-/usr/bin/docker rm zookeeper
ExecStartPre=-/usr/bin/docker pull h0tbird/zookeeper:v3.4.8-2
ExecStart=/usr/bin/sh -c "docker run \
--net host \
--name zookeeper \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--env ZK_SERVER_ID=${KATO_HOST_ID} \
--env ZK_TICK_TIME=2000 \
--env ZK_INIT_LIMIT=5 \
--env ZK_SYNC_LIMIT=2 \
--env ZK_SERVERS=$${KATO_ZK//:2181/} \
--env ZK_DATA_DIR=/var/lib/zookeeper \
--env ZK_CLIENT_PORT=2181 \
--env ZK_CLIENT_PORT_ADDRESS=$(hostname -i) \
--env JMXDISABLE=true \
h0tbird/zookeeper:v3.4.8-2"
ExecStop=/usr/bin/docker stop -t 5 zookeeper
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/prometheus.service"
content: |
[Unit]
Description=Prometheus Service
After=docker.service rexray.service
Requires=docker.service rexray.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill prometheus
ExecStartPre=-/usr/bin/docker rm -f prometheus
ExecStartPre=-/usr/bin/docker pull prom/prometheus:0.20.0
ExecStartPre=-/usr/bin/docker volume create --name ${KATO_CLUSTER_ID}-prometheus-${KATO_HOST_ID} -d rexray
ExecStart=/usr/bin/sh -c "docker run \
--net host \
--name prometheus \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--volume /etc/prometheus:/etc/prometheus:ro \
--volume ${KATO_CLUSTER_ID}-prometheus-${KATO_HOST_ID}:/prometheus:rw \
prom/prometheus:0.20.0 \
-config.file=/etc/prometheus/prometheus.yml \
-storage.local.path=/prometheus \
-web.console.libraries=/etc/prometheus/console_libraries \
-web.console.templates=/etc/prometheus/consoles \
-web.listen-address=:9191"
ExecStop=/usr/bin/docker stop -t 5 prometheus
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/mesos-master.service"
content: |
[Unit]
Description=Mesos Master
After=docker.service zookeeper.service
Requires=docker.service zookeeper.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill mesos-master
ExecStartPre=-/usr/bin/docker rm mesos-master
ExecStartPre=-/usr/bin/docker pull mesosphere/mesos-master:0.28.1
ExecStart=/usr/bin/sh -c "docker run \
--privileged \
--name mesos-master \
--net host \
--volume /var/lib/mesos:/var/lib/mesos:rw \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
mesosphere/mesos-master:0.28.1 \
--ip=$(hostname -i) \
--zk=zk://${KATO_ZK}/mesos \
--work_dir=/var/lib/mesos/master \
--log_dir=/var/log/mesos \
--quorum=$(($KATO_MASTER_COUNT/2 + 1))"
ExecStop=/usr/bin/docker stop -t 5 mesos-master
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/mesos-node.service"
content: |
[Unit]
Description=Mesos Node
After=docker.service dnsmasq.service
Wants=dnsmasq.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill mesos-node
ExecStartPre=-/usr/bin/docker rm mesos-node
ExecStartPre=-/usr/bin/docker pull mesosphere/mesos-slave:0.28.1
ExecStart=/usr/bin/sh -c "docker run \
--privileged \
--net host \
--pid host \
--name mesos-node \
--volume /sys:/sys \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--volume /usr/bin/docker:/usr/bin/docker:ro \
--volume /var/run/docker.sock:/var/run/docker.sock:rw \
--volume /lib64/libdevmapper.so.1.02:/lib/libdevmapper.so.1.02:ro \
--volume /lib64/libsystemd.so.0:/lib/libsystemd.so.0:ro \
--volume /lib64/libgcrypt.so.20:/lib/libgcrypt.so.20:ro \
--volume /var/lib/mesos:/var/lib/mesos:rw \
--volume /etc/certs:/etc/certs:ro \
mesosphere/mesos-slave:0.28.1 \
--ip=$(hostname -i) \
--containerizers=docker \
--executor_registration_timeout=2mins \
--master=zk://${KATO_ZK}/mesos \
--work_dir=/var/lib/mesos/node \
--log_dir=/var/log/mesos/node"
ExecStop=/usr/bin/docker stop -t 5 mesos-node
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=worker
- path: "/etc/fleet/mesos-dns.service"
content: |
[Unit]
Description=Mesos DNS
After=docker.service zookeeper.service mesos-master.service
Requires=docker.service zookeeper.service mesos-master.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill mesos-dns
ExecStartPre=-/usr/bin/docker rm mesos-dns
ExecStartPre=-/usr/bin/docker pull h0tbird/mesos-dns:v0.5.2-1
ExecStart=/usr/bin/sh -c "docker run \
--name mesos-dns \
--net host \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--env MDNS_ZK=zk://${KATO_ZK}/mesos \
--env MDNS_REFRESHSECONDS=45 \
--env MDNS_LISTENER=$(hostname -i) \
--env MDNS_HTTPON=false \
--env MDNS_TTL=45 \
--env MDNS_RESOLVERS=8.8.8.8 \
--env MDNS_DOMAIN=$(hostname -d | cut -d. -f-2).mesos \
--env MDNS_IPSOURCE=netinfo \
h0tbird/mesos-dns:v0.5.2-1"
ExecStartPost=/usr/bin/sh -c ' \
echo search $(hostname -d | cut -d. -f-2).mesos $(hostname -d) > /etc/resolv.conf && \
echo "nameserver $(hostname -i)" >> /etc/resolv.conf'
ExecStop=/usr/bin/sh -c ' \
echo search $(hostname -d) > /etc/resolv.conf && \
echo "nameserver 8.8.8.8" >> /etc/resolv.conf'
ExecStop=/usr/bin/docker stop -t 5 mesos-dns
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/marathon.service"
content: |
[Unit]
Description=Marathon
After=docker.service zookeeper.service mesos-master.service
Requires=docker.service zookeeper.service mesos-master.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill marathon
ExecStartPre=-/usr/bin/docker rm marathon
ExecStartPre=-/usr/bin/docker pull mesosphere/marathon:v1.1.1
ExecStart=/usr/bin/sh -c "docker run \
--name marathon \
--net host \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--env LIBPROCESS_IP=$(hostname -i) \
--env LIBPROCESS_PORT=9090 \
mesosphere/marathon:v1.1.1 \
--http_address $(hostname -i) \
--master zk://${KATO_ZK}/mesos \
--zk zk://${KATO_ZK}/marathon \
--task_launch_timeout 240000 \
--checkpoint"
ExecStop=/usr/bin/docker stop -t 5 marathon
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/marathon-lb.service"
content: |
[Unit]
Description=marathon-lb
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill marathon-lb
ExecStartPre=-/usr/bin/docker rm marathon-lb
ExecStartPre=-/usr/bin/docker pull mesosphere/marathon-lb:v1.3.0
ExecStart=/usr/bin/sh -c "docker run \
--name marathon-lb \
--net host \
--privileged \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--env PORTS=9090,9091 \
mesosphere/marathon-lb:v1.3.0 sse \
--marathon http://marathon:8080 \
--health-check \
--group external \
--group internal"
ExecStop=/usr/bin/docker stop -t 5 marathon-lb
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=worker
- path: "/etc/fleet/cadvisor.service"
content: |
[Unit]
Description=cAdvisor Service
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill cadvisor
ExecStartPre=-/usr/bin/docker rm -f cadvisor
ExecStartPre=-/usr/bin/docker pull google/cadvisor:v0.23.2
ExecStart=/usr/bin/sh -c "docker run \
--net host \
--name cadvisor \
--volume /:/rootfs:ro \
--volume /var/run:/var/run:rw \
--volume /sys:/sys:ro \
--volume /var/lib/docker/:/var/lib/docker:ro \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
google/cadvisor:v0.23.2 \
--listen_ip $(hostname -i) \
--logtostderr \
--port=4194"
ExecStop=/usr/bin/docker stop -t 5 cadvisor
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
- path: "/etc/fleet/dnsmasq.service"
content: |
[Unit]
Description=Lightweight caching DNS proxy
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill dnsmasq
ExecStartPre=-/usr/bin/docker rm -f dnsmasq
ExecStartPre=-/usr/bin/docker pull janeczku/go-dnsmasq:release-1.0.6
ExecStartPre=/usr/bin/sh -c " \
etcdctl member list 2>1 | awk -F [/:] '{print $9}' | tr '\n' ',' > /tmp/ns && \
awk '/^nameserver/ {print $2; exit}' /run/systemd/resolve/resolv.conf >> /tmp/ns"
ExecStart=/usr/bin/sh -c "docker run \
--name dnsmasq \
--net host \
--volume /etc/resolv.conf:/etc/resolv.conf:rw \
--volume /etc/hosts:/etc/hosts:ro \
janeczku/go-dnsmasq:release-1.0.6 \
--listen $(hostname -i) \
--nameservers $(cat /tmp/ns) \
--hostsfile /etc/hosts \
--hostsfile-poll 60 \
--default-resolver \
--search-domains $(hostname -d | cut -d. -f-2).mesos,$(hostname -d) \
--append-search-domains"
ExecStop=/usr/bin/docker stop -t 5 dnsmasq
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=worker
- path: "/etc/fleet/mongodb.service"
content: |
[Unit]
Description=MongoDB
After=docker.service rexray.service
Requires=docker.service rexray.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill mongodb
ExecStartPre=-/usr/bin/docker rm mongodb
ExecStartPre=-/usr/bin/docker pull mongo:3.2
ExecStartPre=-/usr/bin/docker volume create --name ${KATO_CLUSTER_ID}-pritunl-mongo -d rexray
ExecStart=/usr/bin/sh -c "docker run \
--name mongodb \
--net host \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--volume ${KATO_CLUSTER_ID}-pritunl-mongo:/data/db:rw \
mongo:3.2 \
--bind_ip 127.0.0.1"
ExecStop=/usr/bin/docker stop -t 5 mongodb
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=edge
- path: "/etc/fleet/pritunl.service"
content: |
[Unit]
Description=Pritunl
After=docker.service mongodb.service
Requires=docker.service mongodb.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill pritunl
ExecStartPre=-/usr/bin/docker rm pritunl
ExecStartPre=-/usr/bin/docker pull h0tbird/pritunl:v1.21.954.48-3
ExecStart=/usr/bin/sh -c "docker run \
--privileged \
--name pritunl \
--net host \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--env MONGODB_URI=mongodb://127.0.0.1:27017/pritunl \
h0tbird/pritunl:v1.21.954.48-3"
ExecStop=/usr/bin/docker stop -t 5 pritunl
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=edge
- path: "/etc/fleet/haproxy-exporter.service"
content: |
[Unit]
Description=Prometheus haproxy exporter
After=docker.service marathon-lb.service
Requires=docker.service marathon-lb.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill haproxy-exporter
ExecStartPre=-/usr/bin/docker rm -f haproxy-exporter
ExecStartPre=-/usr/bin/docker pull katosys/exporters:v0.1.0-1
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name haproxy-exporter \
katosys/exporters:v0.1.0-1 haproxy_exporter \
-haproxy.scrape-uri 'http://localhost:9090/haproxy?stats;csv' \
-web.listen-address :9102"
ExecStop=/usr/bin/docker stop -t 5 haproxy-exporter
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=worker
- path: "/etc/fleet/mesos-master-exporter.service"
content: |
[Unit]
Description=Prometheus mesos master exporter
After=docker.service mesos-master.service
Requires=docker.service mesos-master.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill mesos-exporter
ExecStartPre=-/usr/bin/docker rm -f mesos-exporter
ExecStartPre=-/usr/bin/docker pull katosys/exporters:v0.1.0-1
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name mesos-exporter \
katosys/exporters:v0.1.0-1 mesos_exporter \
-master http://$(hostname):5050 \
-addr :9104"
ExecStop=/usr/bin/docker stop -t 5 mesos-exporter
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/mesos-node-exporter.service"
content: |
[Unit]
Description=Prometheus mesos node exporter
After=docker.service mesos-node.service
Requires=docker.service mesos-node.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill mesos-exporter
ExecStartPre=-/usr/bin/docker rm -f mesos-exporter
ExecStartPre=-/usr/bin/docker pull katosys/exporters:v0.1.0-1
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name mesos-exporter \
katosys/exporters:v0.1.0-1 mesos_exporter \
-slave http://$(hostname):5051 \
-addr :9104"
ExecStop=/usr/bin/docker stop -t 5 mesos-exporter
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=worker
- path: "/etc/fleet/node-exporter.service"
content: |
[Unit]
Description=Prometheus node exporter
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill node-exporter
ExecStartPre=-/usr/bin/docker rm -f node-exporter
ExecStartPre=-/usr/bin/docker pull katosys/exporters:v0.1.0-1
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name node-exporter \
katosys/exporters:v0.1.0-1 node_exporter \
-web.listen-address :9101"
ExecStop=/usr/bin/docker stop -t 5 node-exporter
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
- path: "/etc/fleet/zookeeper-exporter.service"
content: |
[Unit]
Description=Prometheus zookeeper exporter
After=docker.service zookeeper.service
Requires=docker.service zookeeper.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill zookeeper-exporter
ExecStartPre=-/usr/bin/docker rm -f zookeeper-exporter
ExecStartPre=-/usr/bin/docker pull katosys/exporters:v0.1.0-1
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name zookeeper-exporter \
katosys/exporters:v0.1.0-1 zookeeper_exporter \
-web.listen-address :9103 \
$(echo ${KATO_ZK} | tr , ' ')"
ExecStop=/usr/bin/docker stop -t 5 zookeeper-exporter
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/confd.service"
content: |
[Unit]
Description=Lightweight configuration management tool
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill confd
ExecStartPre=-/usr/bin/docker rm -f confd
ExecStartPre=-/usr/bin/docker pull katosys/confd:v0.11.0-2
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name confd \
--volume /etc:/etc:rw \
katosys/confd:v0.11.0-2 \
-node 127.0.0.1:2379 \
-watch"
ExecStop=/usr/bin/docker stop -t 5 confd
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/confd/conf.d/prom-prometheus.toml"
content: |
[template]
src = "prom-prometheus.tmpl"
dest = "/etc/prometheus/targets/prometheus.yml"
keys = [ "/hosts/master" ]
- path: "/etc/confd/templates/prom-prometheus.tmpl"
content: |
- targets:{{"{{"}}range gets "/hosts/master/*"{{"}}"}}
- {{"{{"}}base .Key{{"}}"}}:9191{{"{{"}}end{{"}}"}}
- path: "/etc/confd/conf.d/prom-cadvisor.toml"
content: |
[template]
src = "prom-cadvisor.tmpl"
dest = "/etc/prometheus/targets/cadvisor.yml"
keys = [
"/hosts/master",
"/hosts/worker",
]
- path: "/etc/confd/templates/prom-cadvisor.tmpl"
content: |
- targets:{{"{{"}}range gets "/hosts/master/*"{{"}}"}}
- {{"{{"}}base .Key{{"}}"}}:4194{{"{{"}}end{{"}}"}}
labels:
group: masters
- targets:{{"{{"}}range gets "/hosts/worker/*"{{"}}"}}
- {{"{{"}}base .Key{{"}}"}}:4194{{"{{"}}end{{"}}"}}
labels:
group: workers
coreos:
units:
- name: "etcd2.service"
command: "start"
- name: "fleet.service"
command: "start"
- name: "flanneld.service"
command: "start"
drop-ins:
- name: 50-network-config.conf
content: |
[Service]
ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "{{.FlannelNetwork}}","SubnetLen":{{.FlannelSubnetLen}} ,"SubnetMin": "{{.FlannelSubnetMin}}","SubnetMax": "{{.FlannelSubnetMax}}","Backend": {"Type": "{{.FlannelBackend}}"} }'
- name: "update-ca-certificates.service"
drop-ins:
- name: 50-rehash-certs.conf
content: |
[Unit]
ConditionPathIsSymbolicLink=
[Service]
ExecStart=
ExecStart=/usr/sbin/update-ca-certificates
- name: "ns1dns.service"
command: "start"
content: |
[Unit]
Description=Publish DNS records to nsone
Before=etcd2.service
[Service]
Type=oneshot
ExecStart=/opt/bin/ns1dns
- name: "etchost.service"
command: "start"
content: |
[Unit]
Description=Stores IP and hostname in etcd
Requires=etcd2.service
After=etcd2.service
[Service]
Type=oneshot
ExecStart=/opt/bin/etchost
- name: "etchost.timer"
command: "start"
content: |
[Unit]
Description=Run etchost.service every 5 minutes
[Timer]
OnBootSec=2min
OnUnitActiveSec=5min
- name: "rexray.service"
command: "start"
content: |
[Unit]
Description=REX-Ray volume plugin
Before=docker.service
[Service]
EnvironmentFile=/etc/rexray/rexray.env
ExecStartPre=-/bin/bash -c '\
REXRAY_URL=https://dl.bintray.com/emccode/rexray/stable/0.3.3/rexray-Linux-x86_64-0.3.3.tar.gz; \
[ -f /opt/bin/rexray ] || { curl -sL $${REXRAY_URL} | tar -xz -C /opt/bin; }; \
[ -x /opt/bin/rexray ] || { chmod +x /opt/bin/rexray; }'
ExecStart=/opt/bin/rexray start -f
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
[Install]
WantedBy=docker.service
- name: "format-ephemeral.service"
command: "start"
content: |
[Unit]
Description=Formats the ephemeral drive
After=dev-xvdb.device
Requires=dev-xvdb.device
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/sbin/wipefs -f /dev/xvdb
ExecStart=/usr/sbin/mkfs.ext4 -F /dev/xvdb
- name: "var-lib-docker.mount"
command: "start"
content: |
[Unit]
Description=Mount ephemeral to /var/lib/docker
Requires=format-ephemeral.service
After=format-ephemeral.service
[Mount]
What=/dev/xvdb
Where=/var/lib/docker
Type=ext4
- name: "docker.service"
drop-ins:
- name: "50-docker-opts.conf"
content: |
[Service]
Environment='DOCKER_OPTS=--registry-mirror=http://external-registry-sys.marathon:5000'
- name: "10-wait-docker.conf"
content: |
[Unit]
After=var-lib-docker.mount
Requires=var-lib-docker.mount
flannel:
interface: $private_ipv4
fleet:
public-ip: "$private_ipv4"
metadata: "role=master,id={{.HostID}}"
etcd2:
{{if .EtcdToken }} discovery: https://discovery.etcd.io/{{.EtcdToken}}{{else}} name: "master-{{.HostID}}"
initial-cluster: "master-1=http://master-1:2380,master-2=http://master-2:2380,master-3=http://master-3:2380"
initial-cluster-state: "new"{{end}}
advertise-client-urls: "http://$private_ipv4:2379"
initial-advertise-peer-urls: "http://$private_ipv4:2380"
listen-client-urls: "http://127.0.0.1:2379,http://$private_ipv4:2379"
listen-peer-urls: "http://$private_ipv4:2380"
`
Added cAdvisor scrape targets (2)
package udata
//---------------------------------------------------------------------------
// CoreOS master user data:
//---------------------------------------------------------------------------
const templMaster = `#cloud-config
hostname: "master-{{.HostID}}.{{.Domain}}"
write_files:
- path: "/etc/hosts"
content: |
127.0.0.1 localhost
$private_ipv4 master-{{.HostID}}.{{.Domain}} master-{{.HostID}}
$private_ipv4 master-{{.HostID}}.int.{{.Domain}} master-{{.HostID}}.int
- path: "/etc/.hosts"
content: |
127.0.0.1 localhost
$private_ipv4 master-{{.HostID}}.{{.Domain}} master-{{.HostID}}
$private_ipv4 master-{{.HostID}}.int.{{.Domain}} master-{{.HostID}}.int
- path: "/etc/resolv.conf"
content: |
search {{.Domain}}
nameserver 8.8.8.8
- path: "/etc/kato.env"
content: |
KATO_CLUSTER_ID={{.ClusterID}}
KATO_MASTER_COUNT={{.MasterCount}}
KATO_ROLE={{.Role}}
KATO_HOST_ID={{.HostID}}
KATO_ZK={{.ZkServers}}
{{if .CaCert}}- path: "/etc/ssl/certs/{{.ClusterID}}.pem"
content: |
{{.CaCert}}
{{- end}}
- path: "/etc/rexray/rexray.env"
- path: "/etc/rexray/config.yml"
{{- if .RexrayStorageDriver }}
content: |
rexray:
storageDrivers:
- {{.RexrayStorageDriver}}
{{.RexrayConfigSnippet}}
{{- end}}
- path: "/home/core/.bashrc"
owner: "core:core"
content: |
[[ $- != *i* ]] && return
alias ls='ls -hF --color=auto --group-directories-first'
alias l='ls -l'
alias ll='ls -la'
alias grep='grep --color=auto'
alias dim='docker images'
alias dps='docker ps'
alias drm='docker rm -v $(docker ps -qaf status=exited)'
alias drmi='docker rmi $(docker images -qf dangling=true)'
alias drmv='docker volume rm $(docker volume ls -qf dangling=true)'
- path: "/home/core/.aws/config"
owner: "core:core"
permissions: "0644"
content: |
[default]
region = {{.Ec2Region}}
- path: "/etc/ssh/sshd_config"
permissions: "0600"
content: |
UsePrivilegeSeparation sandbox
Subsystem sftp internal-sftp
ClientAliveInterval 180
UseDNS no
PermitRootLogin no
AllowUsers core
PasswordAuthentication no
ChallengeResponseAuthentication no
- path: "/opt/bin/ns1dns"
permissions: "0755"
content: |
#!/bin/bash
readonly HOST="$(hostname -s)"
readonly DOMAIN="$(hostname -d)"
readonly APIURL='https://api.nsone.net/v1'
readonly APIKEY='{{.Ns1ApiKey}}'
readonly IP_PUB="$(dig +short myip.opendns.com @resolver1.opendns.com)"
readonly IP_PRI="$(hostname -i)"
declare -A IP=(['ext']="${IP_PUB}" ['int']="${IP_PRI}")
for i in ext int; do
curl -sX GET -H "X-NSONE-Key: ${APIKEY}" \
${APIURL}/zones/${i}.${DOMAIN}/${HOST}.${i}.${DOMAIN}/A | \
grep -q 'record not found' && METHOD='PUT' || METHOD='POST'
curl -sX ${METHOD} -H "X-NSONE-Key: ${APIKEY}" \
${APIURL}/zones/${i}.${DOMAIN}/${HOST}.${i}.${DOMAIN}/A -d "{
\"zone\":\"${i}.${DOMAIN}\",
\"domain\":\"${HOST}.${i}.${DOMAIN}\",
\"type\":\"A\",
\"answers\":[{\"answer\":[\"${IP[${i}]}\"]}]}"
done
- path: "/opt/bin/etchost"
permissions: "0755"
content: |
#!/bin/bash
source /etc/kato.env
PUSH+=$(echo $(hostname -i) $(hostname -f) $(hostname -s))$'\n'
PUSH+=$(echo $(hostname -i) $(hostname -s).int.$(hostname -d) $(hostname -s).int)
etcdctl set /hosts/${KATO_ROLE}/$(hostname -f) "${PUSH}"
KEYS=$(etcdctl ls --recursive /hosts | grep $(hostname -d) | grep -v $(hostname -f) | sort)
for i in $KEYS; do PULL+=$(etcdctl get ${i})$'\n'; done
cat /etc/.hosts > /etc/hosts
echo "${PULL}" >> /etc/hosts
- path: "/opt/bin/loopssh"
permissions: "0755"
content: |
#!/bin/bash
A=$(fleetctl list-machines -fields=ip -no-legend)
for i in $A; do ssh -o UserKnownHostsFile=/dev/null \
-o StrictHostKeyChecking=no $i -C "$*"; done
- path: "/opt/bin/awscli"
permissions: "0755"
content: |
#!/bin/bash
docker run -i --rm \
--volume /home/core/.aws:/root/.aws:ro \
--volume ${PWD}:/aws \
h0tbird/awscli "${@}"
- path: "/etc/prometheus/targets/prometheus.yml"
- path: "/etc/prometheus/prometheus.yml"
permissions: "0600"
content: |
global:
scrape_interval: 1m
scrape_timeout: 10s
evaluation_interval: 10s
rule_files:
- /etc/prometheus/prometheus.rules
scrape_configs:
- job_name: 'prometheus'
scrape_interval: 10s
file_sd_configs:
- files:
- /etc/prometheus/targets/prometheus.yml
- job_name: 'cAdvisor'
scrape_interval: 10s
file_sd_configs:
- files:
- /etc/prometheus/targets/cadvisor.yml
- path: "/etc/fleet/zookeeper.service"
content: |
[Unit]
Description=Zookeeper
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill zookeeper
ExecStartPre=-/usr/bin/docker rm zookeeper
ExecStartPre=-/usr/bin/docker pull h0tbird/zookeeper:v3.4.8-2
ExecStart=/usr/bin/sh -c "docker run \
--net host \
--name zookeeper \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--env ZK_SERVER_ID=${KATO_HOST_ID} \
--env ZK_TICK_TIME=2000 \
--env ZK_INIT_LIMIT=5 \
--env ZK_SYNC_LIMIT=2 \
--env ZK_SERVERS=$${KATO_ZK//:2181/} \
--env ZK_DATA_DIR=/var/lib/zookeeper \
--env ZK_CLIENT_PORT=2181 \
--env ZK_CLIENT_PORT_ADDRESS=$(hostname -i) \
--env JMXDISABLE=true \
h0tbird/zookeeper:v3.4.8-2"
ExecStop=/usr/bin/docker stop -t 5 zookeeper
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/prometheus.service"
content: |
[Unit]
Description=Prometheus Service
After=docker.service rexray.service
Requires=docker.service rexray.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill prometheus
ExecStartPre=-/usr/bin/docker rm -f prometheus
ExecStartPre=-/usr/bin/docker pull prom/prometheus:0.20.0
ExecStartPre=-/usr/bin/docker volume create --name ${KATO_CLUSTER_ID}-prometheus-${KATO_HOST_ID} -d rexray
ExecStart=/usr/bin/sh -c "docker run \
--net host \
--name prometheus \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--volume /etc/prometheus:/etc/prometheus:ro \
--volume ${KATO_CLUSTER_ID}-prometheus-${KATO_HOST_ID}:/prometheus:rw \
prom/prometheus:0.20.0 \
-config.file=/etc/prometheus/prometheus.yml \
-storage.local.path=/prometheus \
-web.console.libraries=/etc/prometheus/console_libraries \
-web.console.templates=/etc/prometheus/consoles \
-web.listen-address=:9191"
ExecStop=/usr/bin/docker stop -t 5 prometheus
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/mesos-master.service"
content: |
[Unit]
Description=Mesos Master
After=docker.service zookeeper.service
Requires=docker.service zookeeper.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill mesos-master
ExecStartPre=-/usr/bin/docker rm mesos-master
ExecStartPre=-/usr/bin/docker pull mesosphere/mesos-master:0.28.1
ExecStart=/usr/bin/sh -c "docker run \
--privileged \
--name mesos-master \
--net host \
--volume /var/lib/mesos:/var/lib/mesos:rw \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
mesosphere/mesos-master:0.28.1 \
--ip=$(hostname -i) \
--zk=zk://${KATO_ZK}/mesos \
--work_dir=/var/lib/mesos/master \
--log_dir=/var/log/mesos \
--quorum=$(($KATO_MASTER_COUNT/2 + 1))"
ExecStop=/usr/bin/docker stop -t 5 mesos-master
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/mesos-node.service"
content: |
[Unit]
Description=Mesos Node
After=docker.service dnsmasq.service
Wants=dnsmasq.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill mesos-node
ExecStartPre=-/usr/bin/docker rm mesos-node
ExecStartPre=-/usr/bin/docker pull mesosphere/mesos-slave:0.28.1
ExecStart=/usr/bin/sh -c "docker run \
--privileged \
--net host \
--pid host \
--name mesos-node \
--volume /sys:/sys \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--volume /usr/bin/docker:/usr/bin/docker:ro \
--volume /var/run/docker.sock:/var/run/docker.sock:rw \
--volume /lib64/libdevmapper.so.1.02:/lib/libdevmapper.so.1.02:ro \
--volume /lib64/libsystemd.so.0:/lib/libsystemd.so.0:ro \
--volume /lib64/libgcrypt.so.20:/lib/libgcrypt.so.20:ro \
--volume /var/lib/mesos:/var/lib/mesos:rw \
--volume /etc/certs:/etc/certs:ro \
mesosphere/mesos-slave:0.28.1 \
--ip=$(hostname -i) \
--containerizers=docker \
--executor_registration_timeout=2mins \
--master=zk://${KATO_ZK}/mesos \
--work_dir=/var/lib/mesos/node \
--log_dir=/var/log/mesos/node"
ExecStop=/usr/bin/docker stop -t 5 mesos-node
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=worker
- path: "/etc/fleet/mesos-dns.service"
content: |
[Unit]
Description=Mesos DNS
After=docker.service zookeeper.service mesos-master.service
Requires=docker.service zookeeper.service mesos-master.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill mesos-dns
ExecStartPre=-/usr/bin/docker rm mesos-dns
ExecStartPre=-/usr/bin/docker pull h0tbird/mesos-dns:v0.5.2-1
ExecStart=/usr/bin/sh -c "docker run \
--name mesos-dns \
--net host \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--env MDNS_ZK=zk://${KATO_ZK}/mesos \
--env MDNS_REFRESHSECONDS=45 \
--env MDNS_LISTENER=$(hostname -i) \
--env MDNS_HTTPON=false \
--env MDNS_TTL=45 \
--env MDNS_RESOLVERS=8.8.8.8 \
--env MDNS_DOMAIN=$(hostname -d | cut -d. -f-2).mesos \
--env MDNS_IPSOURCE=netinfo \
h0tbird/mesos-dns:v0.5.2-1"
ExecStartPost=/usr/bin/sh -c ' \
echo search $(hostname -d | cut -d. -f-2).mesos $(hostname -d) > /etc/resolv.conf && \
echo "nameserver $(hostname -i)" >> /etc/resolv.conf'
ExecStop=/usr/bin/sh -c ' \
echo search $(hostname -d) > /etc/resolv.conf && \
echo "nameserver 8.8.8.8" >> /etc/resolv.conf'
ExecStop=/usr/bin/docker stop -t 5 mesos-dns
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/marathon.service"
content: |
[Unit]
Description=Marathon
After=docker.service zookeeper.service mesos-master.service
Requires=docker.service zookeeper.service mesos-master.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill marathon
ExecStartPre=-/usr/bin/docker rm marathon
ExecStartPre=-/usr/bin/docker pull mesosphere/marathon:v1.1.1
ExecStart=/usr/bin/sh -c "docker run \
--name marathon \
--net host \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--env LIBPROCESS_IP=$(hostname -i) \
--env LIBPROCESS_PORT=9090 \
mesosphere/marathon:v1.1.1 \
--http_address $(hostname -i) \
--master zk://${KATO_ZK}/mesos \
--zk zk://${KATO_ZK}/marathon \
--task_launch_timeout 240000 \
--checkpoint"
ExecStop=/usr/bin/docker stop -t 5 marathon
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/marathon-lb.service"
content: |
[Unit]
Description=marathon-lb
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill marathon-lb
ExecStartPre=-/usr/bin/docker rm marathon-lb
ExecStartPre=-/usr/bin/docker pull mesosphere/marathon-lb:v1.3.0
ExecStart=/usr/bin/sh -c "docker run \
--name marathon-lb \
--net host \
--privileged \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--env PORTS=9090,9091 \
mesosphere/marathon-lb:v1.3.0 sse \
--marathon http://marathon:8080 \
--health-check \
--group external \
--group internal"
ExecStop=/usr/bin/docker stop -t 5 marathon-lb
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=worker
- path: "/etc/fleet/cadvisor.service"
content: |
[Unit]
Description=cAdvisor Service
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill cadvisor
ExecStartPre=-/usr/bin/docker rm -f cadvisor
ExecStartPre=-/usr/bin/docker pull google/cadvisor:v0.23.2
ExecStart=/usr/bin/sh -c "docker run \
--net host \
--name cadvisor \
--volume /:/rootfs:ro \
--volume /var/run:/var/run:rw \
--volume /sys:/sys:ro \
--volume /var/lib/docker/:/var/lib/docker:ro \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
google/cadvisor:v0.23.2 \
--listen_ip $(hostname -i) \
--logtostderr \
--port=4194"
ExecStop=/usr/bin/docker stop -t 5 cadvisor
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
- path: "/etc/fleet/dnsmasq.service"
content: |
[Unit]
Description=Lightweight caching DNS proxy
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill dnsmasq
ExecStartPre=-/usr/bin/docker rm -f dnsmasq
ExecStartPre=-/usr/bin/docker pull janeczku/go-dnsmasq:release-1.0.6
ExecStartPre=/usr/bin/sh -c " \
etcdctl member list 2>1 | awk -F [/:] '{print $9}' | tr '\n' ',' > /tmp/ns && \
awk '/^nameserver/ {print $2; exit}' /run/systemd/resolve/resolv.conf >> /tmp/ns"
ExecStart=/usr/bin/sh -c "docker run \
--name dnsmasq \
--net host \
--volume /etc/resolv.conf:/etc/resolv.conf:rw \
--volume /etc/hosts:/etc/hosts:ro \
janeczku/go-dnsmasq:release-1.0.6 \
--listen $(hostname -i) \
--nameservers $(cat /tmp/ns) \
--hostsfile /etc/hosts \
--hostsfile-poll 60 \
--default-resolver \
--search-domains $(hostname -d | cut -d. -f-2).mesos,$(hostname -d) \
--append-search-domains"
ExecStop=/usr/bin/docker stop -t 5 dnsmasq
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=worker
- path: "/etc/fleet/mongodb.service"
content: |
[Unit]
Description=MongoDB
After=docker.service rexray.service
Requires=docker.service rexray.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill mongodb
ExecStartPre=-/usr/bin/docker rm mongodb
ExecStartPre=-/usr/bin/docker pull mongo:3.2
ExecStartPre=-/usr/bin/docker volume create --name ${KATO_CLUSTER_ID}-pritunl-mongo -d rexray
ExecStart=/usr/bin/sh -c "docker run \
--name mongodb \
--net host \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--volume ${KATO_CLUSTER_ID}-pritunl-mongo:/data/db:rw \
mongo:3.2 \
--bind_ip 127.0.0.1"
ExecStop=/usr/bin/docker stop -t 5 mongodb
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=edge
- path: "/etc/fleet/pritunl.service"
content: |
[Unit]
Description=Pritunl
After=docker.service mongodb.service
Requires=docker.service mongodb.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill pritunl
ExecStartPre=-/usr/bin/docker rm pritunl
ExecStartPre=-/usr/bin/docker pull h0tbird/pritunl:v1.21.954.48-3
ExecStart=/usr/bin/sh -c "docker run \
--privileged \
--name pritunl \
--net host \
--volume /etc/resolv.conf:/etc/resolv.conf:ro \
--volume /etc/hosts:/etc/hosts:ro \
--env MONGODB_URI=mongodb://127.0.0.1:27017/pritunl \
h0tbird/pritunl:v1.21.954.48-3"
ExecStop=/usr/bin/docker stop -t 5 pritunl
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=edge
- path: "/etc/fleet/haproxy-exporter.service"
content: |
[Unit]
Description=Prometheus haproxy exporter
After=docker.service marathon-lb.service
Requires=docker.service marathon-lb.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill haproxy-exporter
ExecStartPre=-/usr/bin/docker rm -f haproxy-exporter
ExecStartPre=-/usr/bin/docker pull katosys/exporters:v0.1.0-1
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name haproxy-exporter \
katosys/exporters:v0.1.0-1 haproxy_exporter \
-haproxy.scrape-uri 'http://localhost:9090/haproxy?stats;csv' \
-web.listen-address :9102"
ExecStop=/usr/bin/docker stop -t 5 haproxy-exporter
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=worker
- path: "/etc/fleet/mesos-master-exporter.service"
content: |
[Unit]
Description=Prometheus mesos master exporter
After=docker.service mesos-master.service
Requires=docker.service mesos-master.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill mesos-exporter
ExecStartPre=-/usr/bin/docker rm -f mesos-exporter
ExecStartPre=-/usr/bin/docker pull katosys/exporters:v0.1.0-1
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name mesos-exporter \
katosys/exporters:v0.1.0-1 mesos_exporter \
-master http://$(hostname):5050 \
-addr :9104"
ExecStop=/usr/bin/docker stop -t 5 mesos-exporter
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/mesos-node-exporter.service"
content: |
[Unit]
Description=Prometheus mesos node exporter
After=docker.service mesos-node.service
Requires=docker.service mesos-node.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill mesos-exporter
ExecStartPre=-/usr/bin/docker rm -f mesos-exporter
ExecStartPre=-/usr/bin/docker pull katosys/exporters:v0.1.0-1
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name mesos-exporter \
katosys/exporters:v0.1.0-1 mesos_exporter \
-slave http://$(hostname):5051 \
-addr :9104"
ExecStop=/usr/bin/docker stop -t 5 mesos-exporter
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=worker
- path: "/etc/fleet/node-exporter.service"
content: |
[Unit]
Description=Prometheus node exporter
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill node-exporter
ExecStartPre=-/usr/bin/docker rm -f node-exporter
ExecStartPre=-/usr/bin/docker pull katosys/exporters:v0.1.0-1
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name node-exporter \
katosys/exporters:v0.1.0-1 node_exporter \
-web.listen-address :9101"
ExecStop=/usr/bin/docker stop -t 5 node-exporter
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
- path: "/etc/fleet/zookeeper-exporter.service"
content: |
[Unit]
Description=Prometheus zookeeper exporter
After=docker.service zookeeper.service
Requires=docker.service zookeeper.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
EnvironmentFile=/etc/kato.env
ExecStartPre=-/usr/bin/docker kill zookeeper-exporter
ExecStartPre=-/usr/bin/docker rm -f zookeeper-exporter
ExecStartPre=-/usr/bin/docker pull katosys/exporters:v0.1.0-1
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name zookeeper-exporter \
katosys/exporters:v0.1.0-1 zookeeper_exporter \
-web.listen-address :9103 \
$(echo ${KATO_ZK} | tr , ' ')"
ExecStop=/usr/bin/docker stop -t 5 zookeeper-exporter
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/fleet/confd.service"
content: |
[Unit]
Description=Lightweight configuration management tool
After=docker.service
Requires=docker.service
[Service]
Restart=on-failure
RestartSec=10
TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill confd
ExecStartPre=-/usr/bin/docker rm -f confd
ExecStartPre=-/usr/bin/docker pull katosys/confd:v0.11.0-2
ExecStart=/usr/bin/sh -c "docker run --rm \
--net host \
--name confd \
--volume /etc:/etc:rw \
katosys/confd:v0.11.0-2 \
-node 127.0.0.1:2379 \
-watch"
ExecStop=/usr/bin/docker stop -t 5 confd
[Install]
WantedBy=multi-user.target
[X-Fleet]
Global=true
MachineMetadata=role=master
- path: "/etc/confd/conf.d/prom-prometheus.toml"
content: |
[template]
src = "prom-prometheus.tmpl"
dest = "/etc/prometheus/targets/prometheus.yml"
keys = [ "/hosts/master" ]
- path: "/etc/confd/templates/prom-prometheus.tmpl"
content: |
- targets:{{"{{"}}range gets "/hosts/master/*"{{"}}"}}
- {{"{{"}}base .Key{{"}}"}}:9191{{"{{"}}end{{"}}"}}
- path: "/etc/confd/conf.d/prom-cadvisor.toml"
content: |
[template]
src = "prom-cadvisor.tmpl"
dest = "/etc/prometheus/targets/cadvisor.yml"
keys = [
"/hosts/master",
"/hosts/worker",
]
- path: "/etc/confd/templates/prom-cadvisor.tmpl"
content: |
- targets:{{"{{"}}range gets "/hosts/master/*"{{"}}"}}
- {{"{{"}}base .Key{{"}}"}}:4194{{"{{"}}end{{"}}"}}
labels:
group: masters
- targets:{{"{{"}}range gets "/hosts/worker/*"{{"}}"}}
- {{"{{"}}base .Key{{"}}"}}:4194{{"{{"}}end{{"}}"}}
labels:
group: workers
coreos:
units:
- name: "etcd2.service"
command: "start"
- name: "fleet.service"
command: "start"
- name: "flanneld.service"
command: "start"
drop-ins:
- name: 50-network-config.conf
content: |
[Service]
ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "{{.FlannelNetwork}}","SubnetLen":{{.FlannelSubnetLen}} ,"SubnetMin": "{{.FlannelSubnetMin}}","SubnetMax": "{{.FlannelSubnetMax}}","Backend": {"Type": "{{.FlannelBackend}}"} }'
- name: "update-ca-certificates.service"
drop-ins:
- name: 50-rehash-certs.conf
content: |
[Unit]
ConditionPathIsSymbolicLink=
[Service]
ExecStart=
ExecStart=/usr/sbin/update-ca-certificates
- name: "ns1dns.service"
command: "start"
content: |
[Unit]
Description=Publish DNS records to nsone
Before=etcd2.service
[Service]
Type=oneshot
ExecStart=/opt/bin/ns1dns
- name: "etchost.service"
command: "start"
content: |
[Unit]
Description=Stores IP and hostname in etcd
Requires=etcd2.service
After=etcd2.service
[Service]
Type=oneshot
ExecStart=/opt/bin/etchost
- name: "etchost.timer"
command: "start"
content: |
[Unit]
Description=Run etchost.service every 5 minutes
[Timer]
OnBootSec=2min
OnUnitActiveSec=5min
- name: "rexray.service"
command: "start"
content: |
[Unit]
Description=REX-Ray volume plugin
Before=docker.service
[Service]
EnvironmentFile=/etc/rexray/rexray.env
ExecStartPre=-/bin/bash -c '\
REXRAY_URL=https://dl.bintray.com/emccode/rexray/stable/0.3.3/rexray-Linux-x86_64-0.3.3.tar.gz; \
[ -f /opt/bin/rexray ] || { curl -sL $${REXRAY_URL} | tar -xz -C /opt/bin; }; \
[ -x /opt/bin/rexray ] || { chmod +x /opt/bin/rexray; }'
ExecStart=/opt/bin/rexray start -f
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
[Install]
WantedBy=docker.service
- name: "format-ephemeral.service"
command: "start"
content: |
[Unit]
Description=Formats the ephemeral drive
After=dev-xvdb.device
Requires=dev-xvdb.device
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/sbin/wipefs -f /dev/xvdb
ExecStart=/usr/sbin/mkfs.ext4 -F /dev/xvdb
- name: "var-lib-docker.mount"
command: "start"
content: |
[Unit]
Description=Mount ephemeral to /var/lib/docker
Requires=format-ephemeral.service
After=format-ephemeral.service
[Mount]
What=/dev/xvdb
Where=/var/lib/docker
Type=ext4
- name: "docker.service"
drop-ins:
- name: "50-docker-opts.conf"
content: |
[Service]
Environment='DOCKER_OPTS=--registry-mirror=http://external-registry-sys.marathon:5000'
- name: "10-wait-docker.conf"
content: |
[Unit]
After=var-lib-docker.mount
Requires=var-lib-docker.mount
flannel:
interface: $private_ipv4
fleet:
public-ip: "$private_ipv4"
metadata: "role=master,id={{.HostID}}"
etcd2:
{{if .EtcdToken }} discovery: https://discovery.etcd.io/{{.EtcdToken}}{{else}} name: "master-{{.HostID}}"
initial-cluster: "master-1=http://master-1:2380,master-2=http://master-2:2380,master-3=http://master-3:2380"
initial-cluster-state: "new"{{end}}
advertise-client-urls: "http://$private_ipv4:2379"
initial-advertise-peer-urls: "http://$private_ipv4:2380"
listen-client-urls: "http://127.0.0.1:2379,http://$private_ipv4:2379"
listen-peer-urls: "http://$private_ipv4:2380"
`
|
/*
* Copyright 2011 Nan Deng
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package uniqush
import (
"crypto/tls"
"net"
"json"
"os"
"io"
"bytes"
"encoding/hex"
"encoding/binary"
"strconv"
"sync/atomic"
"time"
"fmt"
)
type APNSPushService struct {
nextid uint32
}
func init() {
GetPushServiceManager().RegisterPushServiceType(NewAPNSPushService())
}
func NewAPNSPushService() *APNSPushService {
ret := new(APNSPushService)
return ret
}
func (p *APNSPushService) Name() string {
return "apns"
}
func (p *APNSPushService) SetAsyncFailureProcessor(pfp PushFailureProcessor) {
}
func (p *APNSPushService) BuildPushServiceProviderFromMap(kv map[string]string) (*PushServiceProvider, os.Error) {
psp := NewEmptyPushServiceProvider()
if service, ok := kv["service"]; ok {
psp.FixedData["service"] = service
} else {
return nil, os.NewError("NoService")
}
if cert, ok := kv["cert"]; ok {
psp.FixedData["cert"] = cert
} else {
return nil, os.NewError("NoCertificate")
}
if key, ok := kv["key"]; ok {
psp.FixedData["key"] = key
} else {
return nil, os.NewError("NoPrivateKey")
}
if sandbox, ok := kv["sandbox"]; ok {
if sandbox == "true" {
psp.VolatileData["addr"] = "gateway.sandbox.push.apple.com:2195"
return psp, nil
}
}
psp.VolatileData["addr"] = "gateway.push.apple.com:2195"
return psp, nil
}
func (p *APNSPushService) BuildDeliveryPointFromMap(kv map[string]string) (*DeliveryPoint, os.Error) {
dp := NewEmptyDeliveryPoint()
if service, ok := kv["service"]; ok {
dp.FixedData["service"] = service
} else {
return nil, os.NewError("NoService")
}
if sub, ok := kv["subscriber"]; ok {
dp.FixedData["subscriber"] = sub
} else {
return nil, os.NewError("NoSubscriber")
}
if devtoken, ok := kv["devtoken"]; ok {
dp.FixedData["devtoken"] = devtoken
} else {
return nil, os.NewError("NoDevToken")
}
return dp, nil
}
func toAPNSPayload(n *Notification) []byte {
payload := make(map[string]interface{})
aps := make(map[string]interface{})
alert := make(map[string]interface{})
for k, v := range n.Data {
switch (k) {
case "msg":
alert["body"] = v
case "badge":
b, err := strconv.Atoi(v)
if err != nil {
continue
} else {
aps["badge"] = b
}
case "sound":
aps["sound"] = v
case "img":
alert["launch-image"] = v
case "id":
continue
case "expiry":
continue
default:
payload[k] = v
}
}
aps["alert"] = alert
payload["aps"] = aps
j, err := json.Marshal(payload)
if err != nil {
return nil
}
return j
}
func writen (w io.Writer, buf []byte) os.Error {
n := len(buf)
for n >= 0 {
l, err := w.Write(buf)
if err != nil {
return err
}
if l >= n {
return nil
}
n -= l
buf = buf[l:]
}
return nil
}
func (p *APNSPushService) Push(sp *PushServiceProvider,
s *DeliveryPoint,
n *Notification) (string, os.Error) {
cert, err := tls.LoadX509KeyPair(sp.FixedData["cert"], sp.FixedData["key"])
if err != nil {
return "", NewInvalidPushServiceProviderError(sp)
}
conf := &tls.Config {
Certificates: []tls.Certificate{cert},
}
conn, err := net.Dial("tcp", sp.VolatileData["addr"])
if err != nil {
return "", NewInvalidPushServiceProviderError(sp)
}
tlsconn := tls.Client(conn, conf)
defer tlsconn.Close()
err = tlsconn.Handshake()
if err != nil {
return "", NewInvalidPushServiceProviderError(sp)
}
devtoken := s.FixedData["devtoken"]
btoken, err := hex.DecodeString(devtoken)
if err != nil {
return "", NewInvalidDeliveryPointError(sp, s)
}
bpayload := toAPNSPayload(n)
if bpayload == nil {
return "", NewInvalidNotification(sp, s, n)
}
buffer := bytes.NewBuffer([]byte{})
// command
binary.Write(buffer, binary.BigEndian, uint8(1))
// transaction id
mid := atomic.AddUint32(&(p.nextid), 1)
if smid, ok := n.Data["id"]; ok {
imid, err := strconv.Atoui(smid)
if err == nil {
mid = uint32(imid)
}
}
binary.Write(buffer, binary.BigEndian, mid)
// Expiry
expiry := uint32(time.Seconds() + 60*60)
if sexpiry, ok := n.Data["expiry"]; ok {
uiexp, err := strconv.Atoui(sexpiry)
if err == nil {
expiry = uint32(uiexp)
}
}
binary.Write(buffer, binary.BigEndian, expiry)
// device token
binary.Write(buffer, binary.BigEndian, uint16(len(btoken)))
binary.Write(buffer, binary.BigEndian, btoken)
// payload
binary.Write(buffer, binary.BigEndian, uint16(len(bpayload)))
binary.Write(buffer, binary.BigEndian, bpayload)
pdu := buffer.Bytes()
err = writen(tlsconn, pdu)
if err != nil {
return "", NewInvalidPushServiceProviderError(sp)
}
tlsconn.SetReadTimeout(5E8)
readb := [6]byte{}
nr, err := tlsconn.Read(readb[:])
/* TODO error handling */
if nr > 0 {
switch(readb[1]) {
case 2:
return "", NewInvalidDeliveryPointError(sp, s)
default:
return "", NewInvalidPushServiceProviderError(sp)
}
}
return fmt.Sprintf("%d", mid), nil
}
APNS: code clean
/*
* Copyright 2011 Nan Deng
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package uniqush
import (
"crypto/tls"
"json"
"os"
"io"
"bytes"
"encoding/hex"
"encoding/binary"
"strconv"
"sync/atomic"
"time"
"fmt"
)
type APNSPushService struct {
nextid uint32
}
func init() {
GetPushServiceManager().RegisterPushServiceType(NewAPNSPushService())
}
func NewAPNSPushService() *APNSPushService {
ret := new(APNSPushService)
return ret
}
func (p *APNSPushService) Name() string {
return "apns"
}
func (p *APNSPushService) SetAsyncFailureProcessor(pfp PushFailureProcessor) {
}
func (p *APNSPushService) BuildPushServiceProviderFromMap(kv map[string]string) (*PushServiceProvider, os.Error) {
psp := NewEmptyPushServiceProvider()
if service, ok := kv["service"]; ok {
psp.FixedData["service"] = service
} else {
return nil, os.NewError("NoService")
}
if cert, ok := kv["cert"]; ok {
psp.FixedData["cert"] = cert
} else {
return nil, os.NewError("NoCertificate")
}
if key, ok := kv["key"]; ok {
psp.FixedData["key"] = key
} else {
return nil, os.NewError("NoPrivateKey")
}
if sandbox, ok := kv["sandbox"]; ok {
if sandbox == "true" {
psp.VolatileData["addr"] = "gateway.sandbox.push.apple.com:2195"
return psp, nil
}
}
psp.VolatileData["addr"] = "gateway.push.apple.com:2195"
return psp, nil
}
func (p *APNSPushService) BuildDeliveryPointFromMap(kv map[string]string) (*DeliveryPoint, os.Error) {
dp := NewEmptyDeliveryPoint()
if service, ok := kv["service"]; ok {
dp.FixedData["service"] = service
} else {
return nil, os.NewError("NoService")
}
if sub, ok := kv["subscriber"]; ok {
dp.FixedData["subscriber"] = sub
} else {
return nil, os.NewError("NoSubscriber")
}
if devtoken, ok := kv["devtoken"]; ok {
dp.FixedData["devtoken"] = devtoken
} else {
return nil, os.NewError("NoDevToken")
}
return dp, nil
}
func toAPNSPayload(n *Notification) []byte {
payload := make(map[string]interface{})
aps := make(map[string]interface{})
alert := make(map[string]interface{})
for k, v := range n.Data {
switch (k) {
case "msg":
alert["body"] = v
case "badge":
b, err := strconv.Atoi(v)
if err != nil {
continue
} else {
aps["badge"] = b
}
case "sound":
aps["sound"] = v
case "img":
alert["launch-image"] = v
case "id":
continue
case "expiry":
continue
default:
payload[k] = v
}
}
aps["alert"] = alert
payload["aps"] = aps
j, err := json.Marshal(payload)
if err != nil {
return nil
}
return j
}
func writen (w io.Writer, buf []byte) os.Error {
n := len(buf)
for n >= 0 {
l, err := w.Write(buf)
if err != nil {
return err
}
if l >= n {
return nil
}
n -= l
buf = buf[l:]
}
return nil
}
func (p *APNSPushService) Push(sp *PushServiceProvider,
s *DeliveryPoint,
n *Notification) (string, os.Error) {
cert, err := tls.LoadX509KeyPair(sp.FixedData["cert"], sp.FixedData["key"])
if err != nil {
return "", NewInvalidPushServiceProviderError(sp)
}
conf := &tls.Config {
Certificates: []tls.Certificate{cert},
}
tlsconn, err := tls.Dial("tcp", sp.VolatileData["addr"], conf)
if err != nil {
return "", NewInvalidPushServiceProviderError(sp)
}
defer tlsconn.Close()
err = tlsconn.Handshake()
if err != nil {
return "", NewInvalidPushServiceProviderError(sp)
}
devtoken := s.FixedData["devtoken"]
btoken, err := hex.DecodeString(devtoken)
if err != nil {
return "", NewInvalidDeliveryPointError(sp, s)
}
bpayload := toAPNSPayload(n)
if bpayload == nil {
return "", NewInvalidNotification(sp, s, n)
}
buffer := bytes.NewBuffer([]byte{})
// command
binary.Write(buffer, binary.BigEndian, uint8(1))
// transaction id
mid := atomic.AddUint32(&(p.nextid), 1)
if smid, ok := n.Data["id"]; ok {
imid, err := strconv.Atoui(smid)
if err == nil {
mid = uint32(imid)
}
}
binary.Write(buffer, binary.BigEndian, mid)
// Expiry
expiry := uint32(time.Seconds() + 60*60)
if sexpiry, ok := n.Data["expiry"]; ok {
uiexp, err := strconv.Atoui(sexpiry)
if err == nil {
expiry = uint32(uiexp)
}
}
binary.Write(buffer, binary.BigEndian, expiry)
// device token
binary.Write(buffer, binary.BigEndian, uint16(len(btoken)))
binary.Write(buffer, binary.BigEndian, btoken)
// payload
binary.Write(buffer, binary.BigEndian, uint16(len(bpayload)))
binary.Write(buffer, binary.BigEndian, bpayload)
pdu := buffer.Bytes()
err = writen(tlsconn, pdu)
if err != nil {
return "", NewInvalidPushServiceProviderError(sp)
}
tlsconn.SetReadTimeout(5E8)
readb := [6]byte{}
nr, err := tlsconn.Read(readb[:])
/* TODO error handling */
if nr > 0 {
switch(readb[1]) {
case 2:
return "", NewInvalidDeliveryPointError(sp, s)
default:
return "", NewInvalidPushServiceProviderError(sp)
}
}
return fmt.Sprintf("%d", mid), nil
}
|
package mpb
import (
"bytes"
"fmt"
"io"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/vbauerster/mpb/decor"
)
const (
rLeft = iota
rFill
rTip
rEmpty
rRight
)
const (
formatLen = 5
etaAlpha = 0.25
)
type barRunes [formatLen]rune
// Bar represents a progress Bar
type Bar struct {
priority int
index int
// pointer to running bar, which this bar should replace
runningBar *Bar
// completed is set from master Progress goroutine only
completed bool
removeOnComplete bool
operateState chan func(*bState)
// done is closed by Bar's goroutine, after cacheState is written
done chan struct{}
// shutdown is closed from master Progress goroutine only
shutdown chan struct{}
cacheState *bState
}
type (
bState struct {
id int
width int
runes barRunes
etaAlpha float64
total int64
current int64
totalAutoIncrTrigger int64
totalAutoIncrBy int64
trimLeftSpace bool
trimRightSpace bool
toComplete bool
dynamic bool
noBarOnComplete bool
startTime time.Time
timeElapsed time.Duration
blockStartTime time.Time
timePerItem time.Duration
aDecorators []decor.DecoratorFunc
pDecorators []decor.DecoratorFunc
refill *refill
bufP, bufB, bufA *bytes.Buffer
panicMsg string
// following options are assigned to the *Bar
priority int
removeOnComplete bool
runningBar *Bar
}
refill struct {
char rune
till int64
}
renderedState struct {
bar *Bar
reader io.Reader
toComplete bool
}
)
func newBar(wg *sync.WaitGroup, id int, total int64, cancel <-chan struct{}, options ...BarOption) *Bar {
if total <= 0 {
total = time.Now().Unix()
}
s := &bState{
id: id,
priority: id,
total: total,
etaAlpha: etaAlpha,
}
for _, opt := range options {
if opt != nil {
opt(s)
}
}
s.bufP = bytes.NewBuffer(make([]byte, 0, s.width))
s.bufB = bytes.NewBuffer(make([]byte, 0, s.width))
s.bufA = bytes.NewBuffer(make([]byte, 0, s.width))
b := &Bar{
priority: s.priority,
removeOnComplete: s.removeOnComplete,
runningBar: s.runningBar,
operateState: make(chan func(*bState)),
done: make(chan struct{}),
shutdown: make(chan struct{}),
}
if b.runningBar != nil {
b.priority = b.runningBar.priority
}
go b.serve(wg, s, cancel)
return b
}
// RemoveAllPrependers removes all prepend functions
func (b *Bar) RemoveAllPrependers() {
select {
case b.operateState <- func(s *bState) { s.pDecorators = nil }:
case <-b.done:
}
}
// RemoveAllAppenders removes all append functions
func (b *Bar) RemoveAllAppenders() {
select {
case b.operateState <- func(s *bState) { s.aDecorators = nil }:
case <-b.done:
}
}
// ProxyReader wrapper for io operations, like io.Copy
func (b *Bar) ProxyReader(r io.Reader) *Reader {
return &Reader{r, b}
}
// Increment is a shorthand for b.IncrBy(1)
func (b *Bar) Increment() {
b.IncrBy(1)
}
// ResumeFill fills bar with different r rune,
// from 0 to till amount of progress.
func (b *Bar) ResumeFill(r rune, till int64) {
if till < 1 {
return
}
select {
case b.operateState <- func(s *bState) { s.refill = &refill{r, till} }:
case <-b.done:
}
}
// NumOfAppenders returns current number of append decorators
func (b *Bar) NumOfAppenders() int {
result := make(chan int, 1)
select {
case b.operateState <- func(s *bState) { result <- len(s.aDecorators) }:
return <-result
case <-b.done:
return len(b.cacheState.aDecorators)
}
}
// NumOfPrependers returns current number of prepend decorators
func (b *Bar) NumOfPrependers() int {
result := make(chan int, 1)
select {
case b.operateState <- func(s *bState) { result <- len(s.pDecorators) }:
return <-result
case <-b.done:
return len(b.cacheState.pDecorators)
}
}
// ID returs id of the bar
func (b *Bar) ID() int {
result := make(chan int, 1)
select {
case b.operateState <- func(s *bState) { result <- s.id }:
return <-result
case <-b.done:
return b.cacheState.id
}
}
// Current returns bar's current number, in other words sum of all increments.
func (b *Bar) Current() int64 {
result := make(chan int64, 1)
select {
case b.operateState <- func(s *bState) { result <- s.current }:
return <-result
case <-b.done:
return b.cacheState.current
}
}
// Total returns bar's total number.
func (b *Bar) Total() int64 {
result := make(chan int64, 1)
select {
case b.operateState <- func(s *bState) { result <- s.total }:
return <-result
case <-b.done:
return b.cacheState.total
}
}
// SetTotal sets total dynamically. The final param indicates the very last set,
// in other words you should set it to true when total is determined.
func (b *Bar) SetTotal(total int64, final bool) {
select {
case b.operateState <- func(s *bState) {
s.total = total
s.dynamic = !final
}:
case <-b.done:
}
}
// IncrBy increments progress bar by amount of n
func (b *Bar) IncrBy(n int) {
if n < 1 {
return
}
select {
case b.operateState <- func(s *bState) {
if s.toComplete {
return
}
next := time.Now()
if s.current == 0 {
s.startTime = next
s.blockStartTime = next
} else {
now := time.Now()
s.updateTimePerItemEstimate(n, now, next)
s.timeElapsed = now.Sub(s.startTime)
}
s.current += int64(n)
if s.dynamic {
curp := decor.CalcPercentage(s.total, s.current, 100)
if 100-curp <= s.totalAutoIncrTrigger {
s.total += s.totalAutoIncrBy
}
} else if s.current >= s.total {
s.current = s.total
s.toComplete = true
}
}:
case <-b.done:
}
}
// Completed reports whether the bar is in completed state
func (b *Bar) Completed() bool {
result := make(chan bool, 1)
select {
case b.operateState <- func(s *bState) { result <- s.toComplete }:
return <-result
case <-b.done:
return b.cacheState.toComplete
}
}
func (b *Bar) serve(wg *sync.WaitGroup, s *bState, cancel <-chan struct{}) {
defer wg.Done()
for {
select {
case op := <-b.operateState:
op(s)
case <-cancel:
s.toComplete = true
cancel = nil
case <-b.shutdown:
b.cacheState = s
close(b.done)
return
}
}
}
func (b *Bar) render(debugOut io.Writer, tw int, pSyncer, aSyncer *widthSyncer) <-chan *renderedState {
ch := make(chan *renderedState, 1)
go func() {
select {
case b.operateState <- func(s *bState) {
var r io.Reader
defer func() {
// recovering if external decorators panic
if p := recover(); p != nil {
s.panicMsg = fmt.Sprintf("panic: %v", p)
s.pDecorators = nil
s.aDecorators = nil
s.toComplete = true
// truncate panic msg to one tw line, if necessary
r = strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", tw), s.panicMsg))
fmt.Fprintf(debugOut, "%s %s bar id %02d %v\n", "[mpb]", time.Now(), s.id, s.panicMsg)
}
ch <- &renderedState{b, r, s.toComplete}
}()
r = s.draw(tw, pSyncer, aSyncer)
}:
case <-b.done:
s := b.cacheState
var r io.Reader
if s.panicMsg != "" {
r = strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", tw), s.panicMsg))
} else {
r = s.draw(tw, pSyncer, aSyncer)
}
ch <- &renderedState{b, r, s.toComplete}
}
}()
return ch
}
func (s *bState) draw(termWidth int, pSyncer, aSyncer *widthSyncer) io.Reader {
defer s.bufA.WriteByte('\n')
if termWidth <= 0 {
termWidth = s.width
}
stat := newStatistics(s)
// render prepend functions to the left of the bar
for i, f := range s.pDecorators {
s.bufP.WriteString(f(stat, pSyncer.Accumulator[i], pSyncer.Distributor[i]))
}
for i, f := range s.aDecorators {
s.bufA.WriteString(f(stat, aSyncer.Accumulator[i], aSyncer.Distributor[i]))
}
prependCount := utf8.RuneCount(s.bufP.Bytes())
appendCount := utf8.RuneCount(s.bufA.Bytes())
if s.toComplete && s.noBarOnComplete {
return io.MultiReader(s.bufP, s.bufA)
}
s.fillBar(s.width)
barCount := utf8.RuneCount(s.bufB.Bytes())
totalCount := prependCount + barCount + appendCount
if spaceCount := 0; totalCount > termWidth {
if !s.trimLeftSpace {
spaceCount++
}
if !s.trimRightSpace {
spaceCount++
}
s.fillBar(termWidth - prependCount - appendCount - spaceCount)
}
return io.MultiReader(s.bufP, s.bufB, s.bufA)
}
func (s *bState) fillBar(width int) {
defer func() {
s.bufB.WriteRune(s.runes[rRight])
if !s.trimRightSpace {
s.bufB.WriteByte(' ')
}
}()
s.bufB.Reset()
if !s.trimLeftSpace {
s.bufB.WriteByte(' ')
}
s.bufB.WriteRune(s.runes[rLeft])
if width <= 2 {
return
}
// bar s.width without leftEnd and rightEnd runes
barWidth := width - 2
completedWidth := decor.CalcPercentage(s.total, s.current, int64(barWidth))
if s.refill != nil {
till := decor.CalcPercentage(s.total, s.refill.till, int64(barWidth))
// append refill rune
var i int64
for i = 0; i < till; i++ {
s.bufB.WriteRune(s.refill.char)
}
for i = till; i < completedWidth; i++ {
s.bufB.WriteRune(s.runes[rFill])
}
} else {
var i int64
for i = 0; i < completedWidth; i++ {
s.bufB.WriteRune(s.runes[rFill])
}
}
if completedWidth < int64(barWidth) && completedWidth > 0 {
_, size := utf8.DecodeLastRune(s.bufB.Bytes())
s.bufB.Truncate(s.bufB.Len() - size)
s.bufB.WriteRune(s.runes[rTip])
}
for i := completedWidth; i < int64(barWidth); i++ {
s.bufB.WriteRune(s.runes[rEmpty])
}
}
func (s *bState) updateTimePerItemEstimate(amount int, now, next time.Time) {
lastBlockTime := now.Sub(s.blockStartTime)
lastItemEstimate := float64(lastBlockTime) / float64(amount)
s.timePerItem = time.Duration((s.etaAlpha * lastItemEstimate) + (1-s.etaAlpha)*float64(s.timePerItem))
s.blockStartTime = next
}
func newStatistics(s *bState) *decor.Statistics {
return &decor.Statistics{
ID: s.id,
Completed: s.toComplete,
Total: s.total,
Current: s.current,
StartTime: s.startTime,
TimeElapsed: s.timeElapsed,
TimePerItemEstimate: s.timePerItem,
}
}
func strToBarRunes(format string) (array barRunes) {
for i, n := 0, 0; len(format) > 0; i++ {
array[i], n = utf8.DecodeRuneInString(format)
format = format[n:]
}
return
}
refactoring eta vars
package mpb
import (
"bytes"
"fmt"
"io"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/vbauerster/mpb/decor"
)
const (
rLeft = iota
rFill
rTip
rEmpty
rRight
)
const (
formatLen = 5
etaAlpha = 0.25
)
type barRunes [formatLen]rune
// Bar represents a progress Bar
type Bar struct {
priority int
index int
// pointer to running bar, which this bar should replace
runningBar *Bar
// completed is set from master Progress goroutine only
completed bool
removeOnComplete bool
operateState chan func(*bState)
// done is closed by Bar's goroutine, after cacheState is written
done chan struct{}
// shutdown is closed from master Progress goroutine only
shutdown chan struct{}
cacheState *bState
}
type (
bState struct {
id int
width int
runes barRunes
etaAlpha float64
total int64
current int64
totalAutoIncrTrigger int64
totalAutoIncrBy int64
trimLeftSpace bool
trimRightSpace bool
toComplete bool
dynamic bool
noBarOnComplete bool
startTime time.Time
timeElapsed time.Duration
blockStartTime time.Time
timePerItem time.Duration
aDecorators []decor.DecoratorFunc
pDecorators []decor.DecoratorFunc
refill *refill
bufP, bufB, bufA *bytes.Buffer
panicMsg string
// following options are assigned to the *Bar
priority int
removeOnComplete bool
runningBar *Bar
}
refill struct {
char rune
till int64
}
renderedState struct {
bar *Bar
reader io.Reader
toComplete bool
}
)
func newBar(wg *sync.WaitGroup, id int, total int64, cancel <-chan struct{}, options ...BarOption) *Bar {
if total <= 0 {
total = time.Now().Unix()
}
s := &bState{
id: id,
priority: id,
total: total,
etaAlpha: etaAlpha,
}
for _, opt := range options {
if opt != nil {
opt(s)
}
}
s.bufP = bytes.NewBuffer(make([]byte, 0, s.width))
s.bufB = bytes.NewBuffer(make([]byte, 0, s.width))
s.bufA = bytes.NewBuffer(make([]byte, 0, s.width))
b := &Bar{
priority: s.priority,
removeOnComplete: s.removeOnComplete,
runningBar: s.runningBar,
operateState: make(chan func(*bState)),
done: make(chan struct{}),
shutdown: make(chan struct{}),
}
if b.runningBar != nil {
b.priority = b.runningBar.priority
}
go b.serve(wg, s, cancel)
return b
}
// RemoveAllPrependers removes all prepend functions
func (b *Bar) RemoveAllPrependers() {
select {
case b.operateState <- func(s *bState) { s.pDecorators = nil }:
case <-b.done:
}
}
// RemoveAllAppenders removes all append functions
func (b *Bar) RemoveAllAppenders() {
select {
case b.operateState <- func(s *bState) { s.aDecorators = nil }:
case <-b.done:
}
}
// ProxyReader wrapper for io operations, like io.Copy
func (b *Bar) ProxyReader(r io.Reader) *Reader {
return &Reader{r, b}
}
// Increment is a shorthand for b.IncrBy(1)
func (b *Bar) Increment() {
b.IncrBy(1)
}
// ResumeFill fills bar with different r rune,
// from 0 to till amount of progress.
func (b *Bar) ResumeFill(r rune, till int64) {
if till < 1 {
return
}
select {
case b.operateState <- func(s *bState) { s.refill = &refill{r, till} }:
case <-b.done:
}
}
// NumOfAppenders returns current number of append decorators
func (b *Bar) NumOfAppenders() int {
result := make(chan int, 1)
select {
case b.operateState <- func(s *bState) { result <- len(s.aDecorators) }:
return <-result
case <-b.done:
return len(b.cacheState.aDecorators)
}
}
// NumOfPrependers returns current number of prepend decorators
func (b *Bar) NumOfPrependers() int {
result := make(chan int, 1)
select {
case b.operateState <- func(s *bState) { result <- len(s.pDecorators) }:
return <-result
case <-b.done:
return len(b.cacheState.pDecorators)
}
}
// ID returs id of the bar
func (b *Bar) ID() int {
result := make(chan int, 1)
select {
case b.operateState <- func(s *bState) { result <- s.id }:
return <-result
case <-b.done:
return b.cacheState.id
}
}
// Current returns bar's current number, in other words sum of all increments.
func (b *Bar) Current() int64 {
result := make(chan int64, 1)
select {
case b.operateState <- func(s *bState) { result <- s.current }:
return <-result
case <-b.done:
return b.cacheState.current
}
}
// Total returns bar's total number.
func (b *Bar) Total() int64 {
result := make(chan int64, 1)
select {
case b.operateState <- func(s *bState) { result <- s.total }:
return <-result
case <-b.done:
return b.cacheState.total
}
}
// SetTotal sets total dynamically. The final param indicates the very last set,
// in other words you should set it to true when total is determined.
func (b *Bar) SetTotal(total int64, final bool) {
select {
case b.operateState <- func(s *bState) {
s.total = total
s.dynamic = !final
}:
case <-b.done:
}
}
// IncrBy increments progress bar by amount of n
func (b *Bar) IncrBy(n int) {
if n < 1 {
return
}
now := time.Now()
select {
case b.operateState <- func(s *bState) {
if s.toComplete {
return
}
if s.current == 0 {
s.startTime = now
s.blockStartTime = now
} else {
s.updateTimePerItemEstimate(n, now)
s.timeElapsed = now.Sub(s.startTime)
}
s.current += int64(n)
if s.dynamic {
curp := decor.CalcPercentage(s.total, s.current, 100)
if 100-curp <= s.totalAutoIncrTrigger {
s.total += s.totalAutoIncrBy
}
} else if s.current >= s.total {
s.current = s.total
s.toComplete = true
}
}:
case <-b.done:
}
}
// Completed reports whether the bar is in completed state
func (b *Bar) Completed() bool {
result := make(chan bool, 1)
select {
case b.operateState <- func(s *bState) { result <- s.toComplete }:
return <-result
case <-b.done:
return b.cacheState.toComplete
}
}
func (b *Bar) serve(wg *sync.WaitGroup, s *bState, cancel <-chan struct{}) {
defer wg.Done()
for {
select {
case op := <-b.operateState:
op(s)
case <-cancel:
s.toComplete = true
cancel = nil
case <-b.shutdown:
b.cacheState = s
close(b.done)
return
}
}
}
func (b *Bar) render(debugOut io.Writer, tw int, pSyncer, aSyncer *widthSyncer) <-chan *renderedState {
ch := make(chan *renderedState, 1)
go func() {
select {
case b.operateState <- func(s *bState) {
var r io.Reader
defer func() {
// recovering if external decorators panic
if p := recover(); p != nil {
s.panicMsg = fmt.Sprintf("panic: %v", p)
s.pDecorators = nil
s.aDecorators = nil
s.toComplete = true
// truncate panic msg to one tw line, if necessary
r = strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", tw), s.panicMsg))
fmt.Fprintf(debugOut, "%s %s bar id %02d %v\n", "[mpb]", time.Now(), s.id, s.panicMsg)
}
ch <- &renderedState{b, r, s.toComplete}
}()
r = s.draw(tw, pSyncer, aSyncer)
}:
case <-b.done:
s := b.cacheState
var r io.Reader
if s.panicMsg != "" {
r = strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", tw), s.panicMsg))
} else {
r = s.draw(tw, pSyncer, aSyncer)
}
ch <- &renderedState{b, r, s.toComplete}
}
}()
return ch
}
func (s *bState) draw(termWidth int, pSyncer, aSyncer *widthSyncer) io.Reader {
defer s.bufA.WriteByte('\n')
if termWidth <= 0 {
termWidth = s.width
}
stat := newStatistics(s)
// render prepend functions to the left of the bar
for i, f := range s.pDecorators {
s.bufP.WriteString(f(stat, pSyncer.Accumulator[i], pSyncer.Distributor[i]))
}
for i, f := range s.aDecorators {
s.bufA.WriteString(f(stat, aSyncer.Accumulator[i], aSyncer.Distributor[i]))
}
prependCount := utf8.RuneCount(s.bufP.Bytes())
appendCount := utf8.RuneCount(s.bufA.Bytes())
if s.toComplete && s.noBarOnComplete {
return io.MultiReader(s.bufP, s.bufA)
}
s.fillBar(s.width)
barCount := utf8.RuneCount(s.bufB.Bytes())
totalCount := prependCount + barCount + appendCount
if spaceCount := 0; totalCount > termWidth {
if !s.trimLeftSpace {
spaceCount++
}
if !s.trimRightSpace {
spaceCount++
}
s.fillBar(termWidth - prependCount - appendCount - spaceCount)
}
return io.MultiReader(s.bufP, s.bufB, s.bufA)
}
func (s *bState) fillBar(width int) {
defer func() {
s.bufB.WriteRune(s.runes[rRight])
if !s.trimRightSpace {
s.bufB.WriteByte(' ')
}
}()
s.bufB.Reset()
if !s.trimLeftSpace {
s.bufB.WriteByte(' ')
}
s.bufB.WriteRune(s.runes[rLeft])
if width <= 2 {
return
}
// bar s.width without leftEnd and rightEnd runes
barWidth := width - 2
completedWidth := decor.CalcPercentage(s.total, s.current, int64(barWidth))
if s.refill != nil {
till := decor.CalcPercentage(s.total, s.refill.till, int64(barWidth))
// append refill rune
var i int64
for i = 0; i < till; i++ {
s.bufB.WriteRune(s.refill.char)
}
for i = till; i < completedWidth; i++ {
s.bufB.WriteRune(s.runes[rFill])
}
} else {
var i int64
for i = 0; i < completedWidth; i++ {
s.bufB.WriteRune(s.runes[rFill])
}
}
if completedWidth < int64(barWidth) && completedWidth > 0 {
_, size := utf8.DecodeLastRune(s.bufB.Bytes())
s.bufB.Truncate(s.bufB.Len() - size)
s.bufB.WriteRune(s.runes[rTip])
}
for i := completedWidth; i < int64(barWidth); i++ {
s.bufB.WriteRune(s.runes[rEmpty])
}
}
func (s *bState) updateTimePerItemEstimate(amount int, now time.Time) {
lastBlockTime := now.Sub(s.blockStartTime)
lastItemEstimate := float64(lastBlockTime) / float64(amount)
s.timePerItem = time.Duration((s.etaAlpha * lastItemEstimate) + (1-s.etaAlpha)*float64(s.timePerItem))
s.blockStartTime = now
}
func newStatistics(s *bState) *decor.Statistics {
return &decor.Statistics{
ID: s.id,
Completed: s.toComplete,
Total: s.total,
Current: s.current,
StartTime: s.startTime,
TimeElapsed: s.timeElapsed,
TimePerItemEstimate: s.timePerItem,
}
}
func strToBarRunes(format string) (array barRunes) {
for i, n := 0, 0; len(format) > 0; i++ {
array[i], n = utf8.DecodeRuneInString(format)
format = format[n:]
}
return
}
|
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"fmt"
"io/ioutil"
"net/url"
"path/filepath"
"sync"
"time"
html_template "html/template"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/strutil"
)
// Constants for instrumentation.
const namespace = "prometheus"
var (
evalDuration = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Namespace: namespace,
Name: "rule_evaluation_duration_seconds",
Help: "The duration for a rule to execute.",
},
[]string{"rule_type"},
)
evalFailures = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Name: "rule_evaluation_failures_total",
Help: "The total number of rule evaluation failures.",
},
[]string{"rule_type"},
)
evalTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Name: "rule_evaluations_total",
Help: "The total number of rule evaluations.",
},
[]string{"rule_type"},
)
iterationDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Name: "evaluator_duration_seconds",
Help: "The duration of rule group evaluations.",
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
})
iterationsSkipped = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "evaluator_iterations_skipped_total",
Help: "The total number of rule group evaluations skipped due to throttled metric storage.",
})
iterationsMissed = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "evaluator_iterations_missed_total",
Help: "The total number of rule group evaluations missed due to slow rule group evaluation.",
})
iterationsScheduled = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "evaluator_iterations_total",
Help: "The total number of scheduled rule group evaluations, whether executed, missed or skipped.",
})
)
func init() {
evalTotal.WithLabelValues(string(ruleTypeAlert))
evalTotal.WithLabelValues(string(ruleTypeRecording))
evalFailures.WithLabelValues(string(ruleTypeAlert))
evalFailures.WithLabelValues(string(ruleTypeRecording))
prometheus.MustRegister(iterationDuration)
prometheus.MustRegister(iterationsScheduled)
prometheus.MustRegister(iterationsSkipped)
prometheus.MustRegister(iterationsMissed)
prometheus.MustRegister(evalFailures)
prometheus.MustRegister(evalDuration)
}
type ruleType string
const (
ruleTypeAlert = "alerting"
ruleTypeRecording = "recording"
)
// A Rule encapsulates a vector expression which is evaluated at a specified
// interval and acted upon (currently either recorded or used for alerting).
type Rule interface {
Name() string
// eval evaluates the rule, including any associated recording or alerting actions.
Eval(context.Context, time.Time, *promql.Engine, *url.URL) (promql.Vector, error)
// String returns a human-readable string representation of the rule.
String() string
// HTMLSnippet returns a human-readable string representation of the rule,
// decorated with HTML elements for use the web frontend.
HTMLSnippet(pathPrefix string) html_template.HTML
}
// Group is a set of rules that have a logical relation.
type Group struct {
name string
interval time.Duration
rules []Rule
opts *ManagerOptions
done chan struct{}
terminated chan struct{}
}
// NewGroup makes a new Group with the given name, options, and rules.
func NewGroup(name string, interval time.Duration, rules []Rule, opts *ManagerOptions) *Group {
return &Group{
name: name,
interval: interval,
rules: rules,
opts: opts,
done: make(chan struct{}),
terminated: make(chan struct{}),
}
}
func (g *Group) run() {
defer close(g.terminated)
// Wait an initial amount to have consistently slotted intervals.
select {
case <-time.After(g.offset()):
case <-g.done:
return
}
iter := func() {
iterationsScheduled.Inc()
start := time.Now()
g.Eval()
iterationDuration.Observe(time.Since(start).Seconds())
}
lastTriggered := time.Now()
iter()
tick := time.NewTicker(g.interval)
defer tick.Stop()
for {
select {
case <-g.done:
return
default:
select {
case <-g.done:
return
case <-tick.C:
missed := (time.Since(lastTriggered).Nanoseconds() / g.interval.Nanoseconds()) - 1
if missed > 0 {
iterationsMissed.Add(float64(missed))
iterationsScheduled.Add(float64(missed))
}
lastTriggered = time.Now()
iter()
}
}
}
}
func (g *Group) stop() {
close(g.done)
<-g.terminated
}
func (g *Group) fingerprint() model.Fingerprint {
l := model.LabelSet{"name": model.LabelValue(g.name)}
return l.Fingerprint()
}
// offset returns until the next consistently slotted evaluation interval.
func (g *Group) offset() time.Duration {
now := time.Now().UnixNano()
var (
base = now - (now % int64(g.interval))
offset = uint64(g.fingerprint()) % uint64(g.interval)
next = base + int64(offset)
)
if next < now {
next += int64(g.interval)
}
return time.Duration(next - now)
}
// copyState copies the alerting rule state from the given group.
func (g *Group) copyState(from *Group) {
for _, fromRule := range from.rules {
far, ok := fromRule.(*AlertingRule)
if !ok {
continue
}
for _, rule := range g.rules {
ar, ok := rule.(*AlertingRule)
if !ok {
continue
}
// TODO(fabxc): forbid same alert definitions that are not unique by
// at least on static label or alertname?
if far.equal(ar) {
for fp, a := range far.active {
ar.active[fp] = a
}
}
}
}
}
func typeForRule(r Rule) ruleType {
switch r.(type) {
case *AlertingRule:
return ruleTypeAlert
case *RecordingRule:
return ruleTypeRecording
}
panic(fmt.Errorf("unknown rule type: %T", r))
}
// Eval runs a single evaluation cycle in which all rules are evaluated in parallel.
// In the future a single group will be evaluated sequentially to properly handle
// rule dependency.
func (g *Group) Eval() {
var (
now = time.Now()
wg sync.WaitGroup
)
for _, rule := range g.rules {
rtyp := string(typeForRule(rule))
wg.Add(1)
// BUG(julius): Look at fixing thundering herd.
go func(rule Rule) {
defer wg.Done()
defer func(t time.Time) {
evalDuration.WithLabelValues(rtyp).Observe(time.Since(t).Seconds())
}(time.Now())
evalTotal.WithLabelValues(rtyp).Inc()
vector, err := rule.Eval(g.opts.Context, now, g.opts.QueryEngine, g.opts.ExternalURL)
if err != nil {
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
if _, ok := err.(promql.ErrQueryCanceled); !ok {
log.Warnf("Error while evaluating rule %q: %s", rule, err)
}
evalFailures.WithLabelValues(rtyp).Inc()
return
}
if ar, ok := rule.(*AlertingRule); ok {
g.sendAlerts(ar)
}
var (
numOutOfOrder = 0
numDuplicates = 0
)
app, err := g.opts.Appendable.Appender()
if err != nil {
log.With("err", err).Warn("creating appender failed")
return
}
for _, s := range vector {
if _, err := app.Add(s.Metric, s.T, s.V); err != nil {
switch err {
case storage.ErrOutOfOrderSample:
numOutOfOrder++
log.With("sample", s).With("err", err).Debug("Rule evaluation result discarded")
case storage.ErrDuplicateSampleForTimestamp:
numDuplicates++
log.With("sample", s).With("err", err).Debug("Rule evaluation result discarded")
default:
log.With("sample", s).With("err", err).Warn("Rule evaluation result discarded")
}
}
}
if numOutOfOrder > 0 {
log.With("numDropped", numOutOfOrder).Warn("Error on ingesting out-of-order result from rule evaluation")
}
if numDuplicates > 0 {
log.With("numDropped", numDuplicates).Warn("Error on ingesting results from rule evaluation with different value but same timestamp")
}
if err := app.Commit(); err != nil {
log.With("err", err).Warn("rule sample appending failed")
}
}(rule)
}
wg.Wait()
}
// sendAlerts sends alert notifications for the given rule.
func (g *Group) sendAlerts(rule *AlertingRule) error {
var alerts []*notifier.Alert
for _, alert := range rule.currentAlerts() {
// Only send actually firing alerts.
if alert.State == StatePending {
continue
}
a := ¬ifier.Alert{
StartsAt: alert.ActiveAt.Add(rule.holdDuration),
Labels: alert.Labels,
Annotations: alert.Annotations,
GeneratorURL: g.opts.ExternalURL.String() + strutil.GraphLinkForExpression(rule.vector.String()),
}
if !alert.ResolvedAt.IsZero() {
a.EndsAt = alert.ResolvedAt
}
alerts = append(alerts, a)
}
if len(alerts) > 0 {
g.opts.Notifier.Send(alerts...)
}
return nil
}
// The Manager manages recording and alerting rules.
type Manager struct {
opts *ManagerOptions
groups map[string]*Group
mtx sync.RWMutex
block chan struct{}
}
type Appendable interface {
Appender() (storage.Appender, error)
}
// ManagerOptions bundles options for the Manager.
type ManagerOptions struct {
ExternalURL *url.URL
QueryEngine *promql.Engine
Context context.Context
Notifier *notifier.Notifier
Appendable Appendable
}
// NewManager returns an implementation of Manager, ready to be started
// by calling the Run method.
func NewManager(o *ManagerOptions) *Manager {
manager := &Manager{
groups: map[string]*Group{},
opts: o,
block: make(chan struct{}),
}
return manager
}
// Run starts processing of the rule manager.
func (m *Manager) Run() {
close(m.block)
}
// Stop the rule manager's rule evaluation cycles.
func (m *Manager) Stop() {
m.mtx.Lock()
defer m.mtx.Unlock()
log.Info("Stopping rule manager...")
for _, eg := range m.groups {
eg.stop()
}
log.Info("Rule manager stopped.")
}
// ApplyConfig updates the rule manager's state as the config requires. If
// loading the new rules failed the old rule set is restored.
func (m *Manager) ApplyConfig(conf *config.Config) error {
m.mtx.Lock()
defer m.mtx.Unlock()
// Get all rule files and load the groups they define.
var files []string
for _, pat := range conf.RuleFiles {
fs, err := filepath.Glob(pat)
if err != nil {
// The only error can be a bad pattern.
return fmt.Errorf("error retrieving rule files for %s: %s", pat, err)
}
files = append(files, fs...)
}
// To be replaced with a configurable per-group interval.
groups, err := m.loadGroups(time.Duration(conf.GlobalConfig.EvaluationInterval), files...)
if err != nil {
return fmt.Errorf("error loading rules, previous rule set restored: %s", err)
}
var wg sync.WaitGroup
for _, newg := range groups {
wg.Add(1)
// If there is an old group with the same identifier, stop it and wait for
// it to finish the current iteration. Then copy its into the new group.
oldg, ok := m.groups[newg.name]
delete(m.groups, newg.name)
go func(newg *Group) {
if ok {
oldg.stop()
newg.copyState(oldg)
}
go func() {
// Wait with starting evaluation until the rule manager
// is told to run. This is necessary to avoid running
// queries against a bootstrapping storage.
<-m.block
newg.run()
}()
wg.Done()
}(newg)
}
// Stop remaining old groups.
for _, oldg := range m.groups {
oldg.stop()
}
wg.Wait()
m.groups = groups
return nil
}
// loadGroups reads groups from a list of files.
// As there's currently no group syntax a single group named "default" containing
// all rules will be returned.
func (m *Manager) loadGroups(interval time.Duration, filenames ...string) (map[string]*Group, error) {
rules := []Rule{}
for _, fn := range filenames {
content, err := ioutil.ReadFile(fn)
if err != nil {
return nil, err
}
stmts, err := promql.ParseStmts(string(content))
if err != nil {
return nil, fmt.Errorf("error parsing %s: %s", fn, err)
}
for _, stmt := range stmts {
var rule Rule
switch r := stmt.(type) {
case *promql.AlertStmt:
rule = NewAlertingRule(r.Name, r.Expr, r.Duration, r.Labels, r.Annotations)
case *promql.RecordStmt:
rule = NewRecordingRule(r.Name, r.Expr, r.Labels)
default:
panic("retrieval.Manager.LoadRuleFiles: unknown statement type")
}
rules = append(rules, rule)
}
}
// Currently there is no group syntax implemented. Thus all rules
// are read into a single default group.
g := NewGroup("default", interval, rules, m.opts)
groups := map[string]*Group{g.name: g}
return groups, nil
}
// Rules returns the list of the manager's rules.
func (m *Manager) Rules() []Rule {
m.mtx.RLock()
defer m.mtx.RUnlock()
var rules []Rule
for _, g := range m.groups {
rules = append(rules, g.rules...)
}
return rules
}
// AlertingRules returns the list of the manager's alerting rules.
func (m *Manager) AlertingRules() []*AlertingRule {
m.mtx.RLock()
defer m.mtx.RUnlock()
alerts := []*AlertingRule{}
for _, rule := range m.Rules() {
if alertingRule, ok := rule.(*AlertingRule); ok {
alerts = append(alerts, alertingRule)
}
}
return alerts
}
Very basic staleness handling for rules.
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"fmt"
"io/ioutil"
"math"
"net/url"
"path/filepath"
"sync"
"time"
html_template "html/template"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/pkg/value"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/strutil"
)
// Constants for instrumentation.
const namespace = "prometheus"
var (
evalDuration = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Namespace: namespace,
Name: "rule_evaluation_duration_seconds",
Help: "The duration for a rule to execute.",
},
[]string{"rule_type"},
)
evalFailures = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Name: "rule_evaluation_failures_total",
Help: "The total number of rule evaluation failures.",
},
[]string{"rule_type"},
)
evalTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Name: "rule_evaluations_total",
Help: "The total number of rule evaluations.",
},
[]string{"rule_type"},
)
iterationDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Name: "evaluator_duration_seconds",
Help: "The duration of rule group evaluations.",
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
})
iterationsSkipped = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "evaluator_iterations_skipped_total",
Help: "The total number of rule group evaluations skipped due to throttled metric storage.",
})
iterationsMissed = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "evaluator_iterations_missed_total",
Help: "The total number of rule group evaluations missed due to slow rule group evaluation.",
})
iterationsScheduled = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "evaluator_iterations_total",
Help: "The total number of scheduled rule group evaluations, whether executed, missed or skipped.",
})
)
func init() {
evalTotal.WithLabelValues(string(ruleTypeAlert))
evalTotal.WithLabelValues(string(ruleTypeRecording))
evalFailures.WithLabelValues(string(ruleTypeAlert))
evalFailures.WithLabelValues(string(ruleTypeRecording))
prometheus.MustRegister(iterationDuration)
prometheus.MustRegister(iterationsScheduled)
prometheus.MustRegister(iterationsSkipped)
prometheus.MustRegister(iterationsMissed)
prometheus.MustRegister(evalFailures)
prometheus.MustRegister(evalDuration)
}
type ruleType string
const (
ruleTypeAlert = "alerting"
ruleTypeRecording = "recording"
)
// A Rule encapsulates a vector expression which is evaluated at a specified
// interval and acted upon (currently either recorded or used for alerting).
type Rule interface {
Name() string
// eval evaluates the rule, including any associated recording or alerting actions.
Eval(context.Context, time.Time, *promql.Engine, *url.URL) (promql.Vector, error)
// String returns a human-readable string representation of the rule.
String() string
// HTMLSnippet returns a human-readable string representation of the rule,
// decorated with HTML elements for use the web frontend.
HTMLSnippet(pathPrefix string) html_template.HTML
}
// Group is a set of rules that have a logical relation.
type Group struct {
name string
interval time.Duration
rules []Rule
opts *ManagerOptions
seriesInPreviousEval map[string]labels.Labels
done chan struct{}
terminated chan struct{}
}
// NewGroup makes a new Group with the given name, options, and rules.
func NewGroup(name string, interval time.Duration, rules []Rule, opts *ManagerOptions) *Group {
return &Group{
name: name,
interval: interval,
rules: rules,
opts: opts,
seriesInPreviousEval: map[string]labels.Labels{},
done: make(chan struct{}),
terminated: make(chan struct{}),
}
}
func (g *Group) run() {
defer close(g.terminated)
// Wait an initial amount to have consistently slotted intervals.
select {
case <-time.After(g.offset()):
case <-g.done:
return
}
iter := func() {
iterationsScheduled.Inc()
start := time.Now()
g.Eval()
iterationDuration.Observe(time.Since(start).Seconds())
}
lastTriggered := time.Now()
iter()
tick := time.NewTicker(g.interval)
defer tick.Stop()
for {
select {
case <-g.done:
return
default:
select {
case <-g.done:
return
case <-tick.C:
missed := (time.Since(lastTriggered).Nanoseconds() / g.interval.Nanoseconds()) - 1
if missed > 0 {
iterationsMissed.Add(float64(missed))
iterationsScheduled.Add(float64(missed))
}
lastTriggered = time.Now()
iter()
}
}
}
}
func (g *Group) stop() {
close(g.done)
<-g.terminated
}
func (g *Group) fingerprint() model.Fingerprint {
l := model.LabelSet{"name": model.LabelValue(g.name)}
return l.Fingerprint()
}
// offset returns until the next consistently slotted evaluation interval.
func (g *Group) offset() time.Duration {
now := time.Now().UnixNano()
var (
base = now - (now % int64(g.interval))
offset = uint64(g.fingerprint()) % uint64(g.interval)
next = base + int64(offset)
)
if next < now {
next += int64(g.interval)
}
return time.Duration(next - now)
}
// copyState copies the alerting rule and staleness related state from the given group.
func (g *Group) copyState(from *Group) {
g.seriesInPreviousEval = from.seriesInPreviousEval
for _, fromRule := range from.rules {
far, ok := fromRule.(*AlertingRule)
if !ok {
continue
}
for _, rule := range g.rules {
ar, ok := rule.(*AlertingRule)
if !ok {
continue
}
// TODO(fabxc): forbid same alert definitions that are not unique by
// at least on static label or alertname?
if far.equal(ar) {
for fp, a := range far.active {
ar.active[fp] = a
}
}
}
}
}
func typeForRule(r Rule) ruleType {
switch r.(type) {
case *AlertingRule:
return ruleTypeAlert
case *RecordingRule:
return ruleTypeRecording
}
panic(fmt.Errorf("unknown rule type: %T", r))
}
// Eval runs a single evaluation cycle in which all rules are evaluated in parallel.
// In the future a single group will be evaluated sequentially to properly handle
// rule dependency.
func (g *Group) Eval() {
var (
now = time.Now()
wg sync.WaitGroup
mu sync.Mutex
seriesReturned = make(map[string]labels.Labels, len(g.seriesInPreviousEval))
)
for _, rule := range g.rules {
rtyp := string(typeForRule(rule))
wg.Add(1)
// BUG(julius): Look at fixing thundering herd.
go func(rule Rule) {
defer wg.Done()
defer func(t time.Time) {
evalDuration.WithLabelValues(rtyp).Observe(time.Since(t).Seconds())
}(time.Now())
evalTotal.WithLabelValues(rtyp).Inc()
vector, err := rule.Eval(g.opts.Context, now, g.opts.QueryEngine, g.opts.ExternalURL)
if err != nil {
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
if _, ok := err.(promql.ErrQueryCanceled); !ok {
log.Warnf("Error while evaluating rule %q: %s", rule, err)
}
evalFailures.WithLabelValues(rtyp).Inc()
return
}
if ar, ok := rule.(*AlertingRule); ok {
g.sendAlerts(ar)
}
var (
numOutOfOrder = 0
numDuplicates = 0
)
app, err := g.opts.Appendable.Appender()
if err != nil {
log.With("err", err).Warn("creating appender failed")
return
}
for _, s := range vector {
if _, err := app.Add(s.Metric, s.T, s.V); err != nil {
switch err {
case storage.ErrOutOfOrderSample:
numOutOfOrder++
log.With("sample", s).With("err", err).Debug("Rule evaluation result discarded")
case storage.ErrDuplicateSampleForTimestamp:
numDuplicates++
log.With("sample", s).With("err", err).Debug("Rule evaluation result discarded")
default:
log.With("sample", s).With("err", err).Warn("Rule evaluation result discarded")
}
} else {
mu.Lock()
seriesReturned[s.Metric.String()] = s.Metric
mu.Unlock()
}
}
if numOutOfOrder > 0 {
log.With("numDropped", numOutOfOrder).Warn("Error on ingesting out-of-order result from rule evaluation")
}
if numDuplicates > 0 {
log.With("numDropped", numDuplicates).Warn("Error on ingesting results from rule evaluation with different value but same timestamp")
}
if err := app.Commit(); err != nil {
log.With("err", err).Warn("rule sample appending failed")
}
}(rule)
}
wg.Wait()
// TODO(bbrazil): This should apply per-rule.
app, err := g.opts.Appendable.Appender()
if err != nil {
log.With("err", err).Warn("creating appender failed")
return
}
for metric, lset := range g.seriesInPreviousEval {
if _, ok := seriesReturned[metric]; !ok {
// Series no longer exposed, mark it stale.
_, err = app.Add(lset, timestamp.FromTime(now), math.Float64frombits(value.StaleNaN))
switch err {
case nil:
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
// Do not count these in logging, as this is expected if series
// is exposed from a different group.
continue
default:
log.With("sample", metric).With("err", err).Warn("adding stale sample failed")
if err := app.Rollback(); err != nil {
log.With("err", err).Warn("rule stale sample rollback failed")
}
}
}
}
if err := app.Commit(); err != nil {
log.With("err", err).Warn("rule stale sample appending failed")
}
g.seriesInPreviousEval = seriesReturned
}
// sendAlerts sends alert notifications for the given rule.
func (g *Group) sendAlerts(rule *AlertingRule) error {
var alerts []*notifier.Alert
for _, alert := range rule.currentAlerts() {
// Only send actually firing alerts.
if alert.State == StatePending {
continue
}
a := ¬ifier.Alert{
StartsAt: alert.ActiveAt.Add(rule.holdDuration),
Labels: alert.Labels,
Annotations: alert.Annotations,
GeneratorURL: g.opts.ExternalURL.String() + strutil.GraphLinkForExpression(rule.vector.String()),
}
if !alert.ResolvedAt.IsZero() {
a.EndsAt = alert.ResolvedAt
}
alerts = append(alerts, a)
}
if len(alerts) > 0 {
g.opts.Notifier.Send(alerts...)
}
return nil
}
// The Manager manages recording and alerting rules.
type Manager struct {
opts *ManagerOptions
groups map[string]*Group
mtx sync.RWMutex
block chan struct{}
}
type Appendable interface {
Appender() (storage.Appender, error)
}
// ManagerOptions bundles options for the Manager.
type ManagerOptions struct {
ExternalURL *url.URL
QueryEngine *promql.Engine
Context context.Context
Notifier *notifier.Notifier
Appendable Appendable
}
// NewManager returns an implementation of Manager, ready to be started
// by calling the Run method.
func NewManager(o *ManagerOptions) *Manager {
manager := &Manager{
groups: map[string]*Group{},
opts: o,
block: make(chan struct{}),
}
return manager
}
// Run starts processing of the rule manager.
func (m *Manager) Run() {
close(m.block)
}
// Stop the rule manager's rule evaluation cycles.
func (m *Manager) Stop() {
m.mtx.Lock()
defer m.mtx.Unlock()
log.Info("Stopping rule manager...")
for _, eg := range m.groups {
eg.stop()
}
log.Info("Rule manager stopped.")
}
// ApplyConfig updates the rule manager's state as the config requires. If
// loading the new rules failed the old rule set is restored.
func (m *Manager) ApplyConfig(conf *config.Config) error {
m.mtx.Lock()
defer m.mtx.Unlock()
// Get all rule files and load the groups they define.
var files []string
for _, pat := range conf.RuleFiles {
fs, err := filepath.Glob(pat)
if err != nil {
// The only error can be a bad pattern.
return fmt.Errorf("error retrieving rule files for %s: %s", pat, err)
}
files = append(files, fs...)
}
// To be replaced with a configurable per-group interval.
groups, err := m.loadGroups(time.Duration(conf.GlobalConfig.EvaluationInterval), files...)
if err != nil {
return fmt.Errorf("error loading rules, previous rule set restored: %s", err)
}
var wg sync.WaitGroup
for _, newg := range groups {
wg.Add(1)
// If there is an old group with the same identifier, stop it and wait for
// it to finish the current iteration. Then copy it into the new group.
oldg, ok := m.groups[newg.name]
delete(m.groups, newg.name)
go func(newg *Group) {
if ok {
oldg.stop()
newg.copyState(oldg)
}
go func() {
// Wait with starting evaluation until the rule manager
// is told to run. This is necessary to avoid running
// queries against a bootstrapping storage.
<-m.block
newg.run()
}()
wg.Done()
}(newg)
}
// Stop remaining old groups.
for _, oldg := range m.groups {
oldg.stop()
}
wg.Wait()
m.groups = groups
return nil
}
// loadGroups reads groups from a list of files.
// As there's currently no group syntax a single group named "default" containing
// all rules will be returned.
func (m *Manager) loadGroups(interval time.Duration, filenames ...string) (map[string]*Group, error) {
rules := []Rule{}
for _, fn := range filenames {
content, err := ioutil.ReadFile(fn)
if err != nil {
return nil, err
}
stmts, err := promql.ParseStmts(string(content))
if err != nil {
return nil, fmt.Errorf("error parsing %s: %s", fn, err)
}
for _, stmt := range stmts {
var rule Rule
switch r := stmt.(type) {
case *promql.AlertStmt:
rule = NewAlertingRule(r.Name, r.Expr, r.Duration, r.Labels, r.Annotations)
case *promql.RecordStmt:
rule = NewRecordingRule(r.Name, r.Expr, r.Labels)
default:
panic("retrieval.Manager.LoadRuleFiles: unknown statement type")
}
rules = append(rules, rule)
}
}
// Currently there is no group syntax implemented. Thus all rules
// are read into a single default group.
g := NewGroup("default", interval, rules, m.opts)
groups := map[string]*Group{g.name: g}
return groups, nil
}
// Rules returns the list of the manager's rules.
func (m *Manager) Rules() []Rule {
m.mtx.RLock()
defer m.mtx.RUnlock()
var rules []Rule
for _, g := range m.groups {
rules = append(rules, g.rules...)
}
return rules
}
// AlertingRules returns the list of the manager's alerting rules.
func (m *Manager) AlertingRules() []*AlertingRule {
m.mtx.RLock()
defer m.mtx.RUnlock()
alerts := []*AlertingRule{}
for _, rule := range m.Rules() {
if alertingRule, ok := rule.(*AlertingRule); ok {
alerts = append(alerts, alertingRule)
}
}
return alerts
}
|
// Copyright 2013 bee authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Bee is a tool for developling applications based on beego framework.
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"strings"
"text/template"
)
const version = "1.5.2"
// Command is the unit of execution
type Command struct {
// Run runs the command.
// The args are the arguments after the command name.
Run func(cmd *Command, args []string) int
// PreRun performs an operation before running the command
PreRun func(cmd *Command, args []string)
// UsageLine is the one-line usage message.
// The first word in the line is taken to be the command name.
UsageLine string
// Short is the short description shown in the 'go help' output.
Short string
// Long is the long message shown in the 'go help <this-command>' output.
Long string
// Flag is a set of flags specific to this command.
Flag flag.FlagSet
// CustomFlags indicates that the command will do its own
// flag parsing.
CustomFlags bool
// output out writer if set in SetOutput(w)
output *io.Writer
}
// Name returns the command's name: the first word in the usage line.
func (c *Command) Name() string {
name := c.UsageLine
i := strings.Index(name, " ")
if i >= 0 {
name = name[:i]
}
return name
}
// SetOutput sets the destination for usage and error messages.
// If output is nil, os.Stderr is used.
func (c *Command) SetOutput(output io.Writer) {
c.output = &output
}
// Out returns the out writer of the current command.
// If cmd.output is nil, os.Stderr is used.
func (c *Command) Out() io.Writer {
if c.output != nil {
return *c.output
}
return NewColorWriter(os.Stderr)
}
// Usage puts out the usage for the command.
func (c *Command) Usage() {
tmpl(cmdUsage, c)
os.Exit(2)
}
// Runnable reports whether the command can be run; otherwise
// it is a documentation pseudo-command such as importpath.
func (c *Command) Runnable() bool {
return c.Run != nil
}
func (c *Command) Options() map[string]string {
options := make(map[string]string)
c.Flag.VisitAll(func(f *flag.Flag) {
defaultVal := f.DefValue
if len(defaultVal) > 0 {
if strings.Contains(defaultVal, ":") {
// Truncate the flag's default value by appending '...' at the end
options[f.Name+"="+strings.Split(defaultVal, ":")[0]+":..."] = f.Usage
} else {
options[f.Name+"="+defaultVal] = f.Usage
}
} else {
options[f.Name] = f.Usage
}
})
return options
}
var availableCommands = []*Command{
cmdNew,
cmdRun,
cmdPack,
cmdApiapp,
cmdHproseapp,
//cmdRouter,
//cmdTest,
cmdBale,
cmdVersion,
cmdGenerate,
//cmdRundocs,
cmdMigrate,
cmdFix,
}
var logger = GetBeeLogger(os.Stdout)
func main() {
currentpath, _ := os.Getwd()
flag.Usage = usage
flag.Parse()
log.SetFlags(0)
args := flag.Args()
if len(args) < 1 {
usage()
}
if args[0] == "help" {
help(args[1:])
return
}
for _, cmd := range availableCommands {
if cmd.Name() == args[0] && cmd.Run != nil {
cmd.Flag.Usage = func() { cmd.Usage() }
if cmd.CustomFlags {
args = args[1:]
} else {
cmd.Flag.Parse(args[1:])
args = cmd.Flag.Args()
}
if cmd.PreRun != nil {
cmd.PreRun(cmd, args)
}
// Check if current directory is inside the GOPATH,
// if so parse the packages inside it.
if strings.Contains(currentpath, GetGOPATHs()[0]+"/src") {
parsePackagesFromDir(currentpath)
}
os.Exit(cmd.Run(cmd, args))
return
}
}
printErrorAndExit("Unknown subcommand")
}
var usageTemplate = `Bee is a Fast and Flexible tool for managing your Beego Web Application.
{{"USAGE" | headline}}
{{"bee command [arguments]" | bold}}
{{"AVAILABLE COMMANDS" | headline}}
{{range .}}{{if .Runnable}}
{{.Name | printf "%-11s" | bold}} {{.Short}}{{end}}{{end}}
Use {{"bee help [command]" | bold}} for more information about a command.
{{"ADDITIONAL HELP TOPICS" | headline}}
{{range .}}{{if not .Runnable}}
{{.Name | printf "%-11s"}} {{.Short}}{{end}}{{end}}
Use {{"bee help [topic]" | bold}} for more information about that topic.
`
var helpTemplate = `{{"USAGE" | headline}}
{{.UsageLine | printf "bee %s" | bold}}
{{if .Options}}{{endline}}{{"OPTIONS" | headline}}{{range $k,$v := .Options}}
{{$k | printf "-%-12s" | bold}} {{$v}}{{end}}{{endline}}{{end}}
{{"DESCRIPTION" | headline}}
{{tmpltostr .Long . | trim}}
`
var errorTemplate = `bee: %s.
Use {{"bee help" | bold}} for more information.
`
var cmdUsage = `Use {{printf "bee help %s" .Name | bold}} for more information.{{endline}}`
func usage() {
tmpl(usageTemplate, availableCommands)
os.Exit(2)
}
func tmpl(text string, data interface{}) {
output := NewColorWriter(os.Stderr)
t := template.New("usage").Funcs(BeeFuncMap())
template.Must(t.Parse(text))
err := t.Execute(output, data)
MustCheck(err)
}
func help(args []string) {
if len(args) == 0 {
usage()
}
if len(args) != 1 {
printErrorAndExit("Too many arguments")
}
arg := args[0]
for _, cmd := range availableCommands {
if cmd.Name() == arg {
tmpl(helpTemplate, cmd)
return
}
}
printErrorAndExit("Unknown help topic")
}
func printErrorAndExit(message string) {
tmpl(fmt.Sprintf(errorTemplate, message), nil)
os.Exit(2)
}
bee 1.6.0
// Copyright 2013 bee authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Bee is a tool for developling applications based on beego framework.
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"strings"
"text/template"
)
const version = "1.6.0"
// Command is the unit of execution
type Command struct {
// Run runs the command.
// The args are the arguments after the command name.
Run func(cmd *Command, args []string) int
// PreRun performs an operation before running the command
PreRun func(cmd *Command, args []string)
// UsageLine is the one-line usage message.
// The first word in the line is taken to be the command name.
UsageLine string
// Short is the short description shown in the 'go help' output.
Short string
// Long is the long message shown in the 'go help <this-command>' output.
Long string
// Flag is a set of flags specific to this command.
Flag flag.FlagSet
// CustomFlags indicates that the command will do its own
// flag parsing.
CustomFlags bool
// output out writer if set in SetOutput(w)
output *io.Writer
}
// Name returns the command's name: the first word in the usage line.
func (c *Command) Name() string {
name := c.UsageLine
i := strings.Index(name, " ")
if i >= 0 {
name = name[:i]
}
return name
}
// SetOutput sets the destination for usage and error messages.
// If output is nil, os.Stderr is used.
func (c *Command) SetOutput(output io.Writer) {
c.output = &output
}
// Out returns the out writer of the current command.
// If cmd.output is nil, os.Stderr is used.
func (c *Command) Out() io.Writer {
if c.output != nil {
return *c.output
}
return NewColorWriter(os.Stderr)
}
// Usage puts out the usage for the command.
func (c *Command) Usage() {
tmpl(cmdUsage, c)
os.Exit(2)
}
// Runnable reports whether the command can be run; otherwise
// it is a documentation pseudo-command such as importpath.
func (c *Command) Runnable() bool {
return c.Run != nil
}
func (c *Command) Options() map[string]string {
options := make(map[string]string)
c.Flag.VisitAll(func(f *flag.Flag) {
defaultVal := f.DefValue
if len(defaultVal) > 0 {
if strings.Contains(defaultVal, ":") {
// Truncate the flag's default value by appending '...' at the end
options[f.Name+"="+strings.Split(defaultVal, ":")[0]+":..."] = f.Usage
} else {
options[f.Name+"="+defaultVal] = f.Usage
}
} else {
options[f.Name] = f.Usage
}
})
return options
}
var availableCommands = []*Command{
cmdNew,
cmdRun,
cmdPack,
cmdApiapp,
cmdHproseapp,
//cmdRouter,
//cmdTest,
cmdBale,
cmdVersion,
cmdGenerate,
//cmdRundocs,
cmdMigrate,
cmdFix,
}
var logger = GetBeeLogger(os.Stdout)
func main() {
currentpath, _ := os.Getwd()
flag.Usage = usage
flag.Parse()
log.SetFlags(0)
args := flag.Args()
if len(args) < 1 {
usage()
}
if args[0] == "help" {
help(args[1:])
return
}
for _, cmd := range availableCommands {
if cmd.Name() == args[0] && cmd.Run != nil {
cmd.Flag.Usage = func() { cmd.Usage() }
if cmd.CustomFlags {
args = args[1:]
} else {
cmd.Flag.Parse(args[1:])
args = cmd.Flag.Args()
}
if cmd.PreRun != nil {
cmd.PreRun(cmd, args)
}
// Check if current directory is inside the GOPATH,
// if so parse the packages inside it.
if strings.Contains(currentpath, GetGOPATHs()[0]+"/src") {
parsePackagesFromDir(currentpath)
}
os.Exit(cmd.Run(cmd, args))
return
}
}
printErrorAndExit("Unknown subcommand")
}
var usageTemplate = `Bee is a Fast and Flexible tool for managing your Beego Web Application.
{{"USAGE" | headline}}
{{"bee command [arguments]" | bold}}
{{"AVAILABLE COMMANDS" | headline}}
{{range .}}{{if .Runnable}}
{{.Name | printf "%-11s" | bold}} {{.Short}}{{end}}{{end}}
Use {{"bee help [command]" | bold}} for more information about a command.
{{"ADDITIONAL HELP TOPICS" | headline}}
{{range .}}{{if not .Runnable}}
{{.Name | printf "%-11s"}} {{.Short}}{{end}}{{end}}
Use {{"bee help [topic]" | bold}} for more information about that topic.
`
var helpTemplate = `{{"USAGE" | headline}}
{{.UsageLine | printf "bee %s" | bold}}
{{if .Options}}{{endline}}{{"OPTIONS" | headline}}{{range $k,$v := .Options}}
{{$k | printf "-%-12s" | bold}} {{$v}}{{end}}{{endline}}{{end}}
{{"DESCRIPTION" | headline}}
{{tmpltostr .Long . | trim}}
`
var errorTemplate = `bee: %s.
Use {{"bee help" | bold}} for more information.
`
var cmdUsage = `Use {{printf "bee help %s" .Name | bold}} for more information.{{endline}}`
func usage() {
tmpl(usageTemplate, availableCommands)
os.Exit(2)
}
func tmpl(text string, data interface{}) {
output := NewColorWriter(os.Stderr)
t := template.New("usage").Funcs(BeeFuncMap())
template.Must(t.Parse(text))
err := t.Execute(output, data)
MustCheck(err)
}
func help(args []string) {
if len(args) == 0 {
usage()
}
if len(args) != 1 {
printErrorAndExit("Too many arguments")
}
arg := args[0]
for _, cmd := range availableCommands {
if cmd.Name() == arg {
tmpl(helpTemplate, cmd)
return
}
}
printErrorAndExit("Unknown help topic")
}
func printErrorAndExit(message string) {
tmpl(fmt.Sprintf(errorTemplate, message), nil)
os.Exit(2)
}
|
// Copyright 2016 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package router provides interfaces that need to be satisfied in order to
// implement a new router on tsuru.
package router
import (
"fmt"
"net/url"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/tsuru/config"
"github.com/tsuru/tsuru/db"
"github.com/tsuru/tsuru/db/storage"
"github.com/tsuru/tsuru/log"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type routerFactory func(routerName, configPrefix string) (Router, error)
var (
ErrBackendExists = errors.New("Backend already exists")
ErrBackendNotFound = errors.New("Backend not found")
ErrBackendSwapped = errors.New("Backend is swapped cannot remove")
ErrRouteExists = errors.New("Route already exists")
ErrRouteNotFound = errors.New("Route not found")
ErrCNameExists = errors.New("CName already exists")
ErrCNameNotFound = errors.New("CName not found")
ErrCNameNotAllowed = errors.New("CName as router subdomain not allowed")
)
const HttpScheme = "http"
var routers = make(map[string]routerFactory)
// Register registers a new router.
func Register(name string, r routerFactory) {
routers[name] = r
}
func Type(name string) (string, string, error) {
prefix := "routers:" + name
routerType, err := config.GetString(prefix + ":type")
if err != nil {
msg := fmt.Sprintf("config key '%s:type' not found", prefix)
if name != "hipache" {
return "", "", errors.New(msg)
}
log.Errorf("WARNING: %s, fallback to top level '%s:*' router config", msg, name)
return name, name, nil
}
return routerType, prefix, nil
}
// Get gets the named router from the registry.
func Get(name string) (Router, error) {
routerType, prefix, err := Type(name)
if err != nil {
return nil, err
}
factory, ok := routers[routerType]
if !ok {
return nil, errors.Errorf("unknown router: %q.", routerType)
}
r, err := factory(name, prefix)
if err != nil {
return nil, err
}
return r, nil
}
// Router is the basic interface of this package. It provides methods for
// managing backends and routes. Each backend can have multiple routes.
type Router interface {
AddBackend(name string) error
RemoveBackend(name string) error
AddRoute(name string, address *url.URL) error
AddRoutes(name string, address []*url.URL) error
RemoveRoute(name string, address *url.URL) error
RemoveRoutes(name string, addresses []*url.URL) error
Addr(name string) (string, error)
// Swap change the router between two backends.
Swap(backend1, backend2 string, cnameOnly bool) error
// Routes returns a list of routes of a backend.
Routes(name string) ([]*url.URL, error)
}
type CNameRouter interface {
Router
SetCName(cname, name string) error
UnsetCName(cname, name string) error
CNames(name string) ([]*url.URL, error)
}
type MessageRouter interface {
StartupMessage() (string, error)
}
type CustomHealthcheckRouter interface {
SetHealthcheck(name string, data HealthcheckData) error
}
type HealthChecker interface {
HealthCheck() error
}
type OptsRouter interface {
AddBackendOpts(name string, opts map[string]string) error
}
type HealthcheckData struct {
Path string
Status int
Body string
}
type RouterError struct {
Op string
Err error
}
func (e *RouterError) Error() string {
return fmt.Sprintf("[router %s] %s", e.Op, e.Err)
}
func collection() (*storage.Collection, error) {
conn, err := db.Conn()
if err != nil {
return nil, err
}
coll := conn.Collection("routers")
err = coll.EnsureIndex(mgo.Index{Key: []string{"app"}, Unique: true})
if err != nil {
return nil, err
}
return coll, nil
}
// Store stores the app name related with the
// router name.
func Store(appName, routerName, kind string) error {
coll, err := collection()
if err != nil {
return err
}
defer coll.Close()
data := map[string]string{
"app": appName,
"router": routerName,
"kind": kind,
}
_, err = coll.Upsert(bson.M{"app": appName}, data)
return err
}
func retrieveRouterData(appName string) (map[string]string, error) {
data := map[string]string{}
coll, err := collection()
if err != nil {
return data, err
}
defer coll.Close()
err = coll.Find(bson.M{"app": appName}).One(&data)
// Avoid need for data migrations, before kind existed we only supported
// hipache as a router so we set is as default here.
if data["kind"] == "" {
data["kind"] = "hipache"
}
return data, err
}
func Retrieve(appName string) (string, error) {
data, err := retrieveRouterData(appName)
if err != nil {
if err == mgo.ErrNotFound {
return "", ErrBackendNotFound
}
return "", err
}
return data["router"], nil
}
func Remove(appName string) error {
coll, err := collection()
if err != nil {
return err
}
defer coll.Close()
return coll.Remove(bson.M{"app": appName})
}
func swapBackendName(backend1, backend2 string) error {
coll, err := collection()
if err != nil {
return err
}
defer coll.Close()
router1, err := Retrieve(backend1)
if err != nil {
return err
}
router2, err := Retrieve(backend2)
if err != nil {
return err
}
update := bson.M{"$set": bson.M{"router": router2}}
err = coll.Update(bson.M{"app": backend1}, update)
if err != nil {
return err
}
update = bson.M{"$set": bson.M{"router": router1}}
return coll.Update(bson.M{"app": backend2}, update)
}
func swapCnames(r Router, backend1, backend2 string) error {
cnameRouter, ok := r.(CNameRouter)
if !ok {
return nil
}
cnames1, err := cnameRouter.CNames(backend1)
if err != nil {
return err
}
cnames2, err := cnameRouter.CNames(backend2)
if err != nil {
return err
}
for _, cname := range cnames1 {
err = cnameRouter.UnsetCName(cname.Host, backend1)
if err != nil {
return err
}
err = cnameRouter.SetCName(cname.Host, backend2)
if err != nil {
return err
}
}
for _, cname := range cnames2 {
err = cnameRouter.UnsetCName(cname.Host, backend2)
if err != nil {
return err
}
err = cnameRouter.SetCName(cname.Host, backend1)
if err != nil {
return err
}
}
return nil
}
func swapBackends(r Router, backend1, backend2 string) error {
routes1, err := r.Routes(backend1)
if err != nil {
return err
}
routes2, err := r.Routes(backend2)
if err != nil {
return err
}
err = r.AddRoutes(backend1, routes2)
if err != nil {
return err
}
err = r.AddRoutes(backend2, routes1)
if err != nil {
return err
}
err = r.RemoveRoutes(backend1, routes1)
if err != nil {
return err
}
err = r.RemoveRoutes(backend2, routes2)
if err != nil {
return err
}
return swapBackendName(backend1, backend2)
}
func Swap(r Router, backend1, backend2 string, cnameOnly bool) error {
data1, err := retrieveRouterData(backend1)
if err != nil {
return err
}
data2, err := retrieveRouterData(backend2)
if err != nil {
return err
}
if data1["kind"] != data2["kind"] {
return errors.Errorf("swap is only allowed between routers of the same kind. %q uses %q, %q uses %q",
backend1, data1["kind"], backend2, data2["kind"])
}
if cnameOnly {
return swapCnames(r, backend1, backend2)
}
return swapBackends(r, backend1, backend2)
}
type PlanRouter struct {
Name string `json:"name"`
Type string `json:"type"`
}
func List() ([]PlanRouter, error) {
routerConfig, err := config.Get("routers")
var routers map[interface{}]interface{}
if err == nil {
routers, _ = routerConfig.(map[interface{}]interface{})
}
routersList := make([]PlanRouter, 0, len(routers))
var keys []string
for key := range routers {
keys = append(keys, key.(string))
}
topLevelHipacheConfig, _ := config.Get("hipache")
if topLevelHipacheConfig != nil {
keys = append(keys, "hipache")
}
sort.Strings(keys)
for _, value := range keys {
var routerType string
routerProperties, _ := routers[value].(map[interface{}]interface{})
if routerProperties != nil {
routerType, _ = routerProperties["type"].(string)
}
if routerType == "" {
routerType = value
}
routersList = append(routersList, PlanRouter{Name: value, Type: routerType})
}
return routersList, nil
}
// validCName returns true if the cname is not a subdomain of
// the router current domain, false otherwise.
func ValidCName(cname, domain string) bool {
return !strings.HasSuffix(cname, domain)
}
func IsSwapped(name string) (bool, string, error) {
backendName, err := Retrieve(name)
if err != nil {
return false, "", err
}
return name != backendName, backendName, nil
}
router: adds TLSRouter interface
// Copyright 2016 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package router provides interfaces that need to be satisfied in order to
// implement a new router on tsuru.
package router
import (
"fmt"
"net/url"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/tsuru/config"
"github.com/tsuru/tsuru/db"
"github.com/tsuru/tsuru/db/storage"
"github.com/tsuru/tsuru/log"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type routerFactory func(routerName, configPrefix string) (Router, error)
var (
ErrBackendExists = errors.New("Backend already exists")
ErrBackendNotFound = errors.New("Backend not found")
ErrBackendSwapped = errors.New("Backend is swapped cannot remove")
ErrRouteExists = errors.New("Route already exists")
ErrRouteNotFound = errors.New("Route not found")
ErrCNameExists = errors.New("CName already exists")
ErrCNameNotFound = errors.New("CName not found")
ErrCNameNotAllowed = errors.New("CName as router subdomain not allowed")
)
const HttpScheme = "http"
var routers = make(map[string]routerFactory)
// Register registers a new router.
func Register(name string, r routerFactory) {
routers[name] = r
}
func Type(name string) (string, string, error) {
prefix := "routers:" + name
routerType, err := config.GetString(prefix + ":type")
if err != nil {
msg := fmt.Sprintf("config key '%s:type' not found", prefix)
if name != "hipache" {
return "", "", errors.New(msg)
}
log.Errorf("WARNING: %s, fallback to top level '%s:*' router config", msg, name)
return name, name, nil
}
return routerType, prefix, nil
}
// Get gets the named router from the registry.
func Get(name string) (Router, error) {
routerType, prefix, err := Type(name)
if err != nil {
return nil, err
}
factory, ok := routers[routerType]
if !ok {
return nil, errors.Errorf("unknown router: %q.", routerType)
}
r, err := factory(name, prefix)
if err != nil {
return nil, err
}
return r, nil
}
// Router is the basic interface of this package. It provides methods for
// managing backends and routes. Each backend can have multiple routes.
type Router interface {
AddBackend(name string) error
RemoveBackend(name string) error
AddRoute(name string, address *url.URL) error
AddRoutes(name string, address []*url.URL) error
RemoveRoute(name string, address *url.URL) error
RemoveRoutes(name string, addresses []*url.URL) error
Addr(name string) (string, error)
// Swap change the router between two backends.
Swap(backend1, backend2 string, cnameOnly bool) error
// Routes returns a list of routes of a backend.
Routes(name string) ([]*url.URL, error)
}
type CNameRouter interface {
Router
SetCName(cname, name string) error
UnsetCName(cname, name string) error
CNames(name string) ([]*url.URL, error)
}
type MessageRouter interface {
StartupMessage() (string, error)
}
type CustomHealthcheckRouter interface {
SetHealthcheck(name string, data HealthcheckData) error
}
type HealthChecker interface {
HealthCheck() error
}
type OptsRouter interface {
AddBackendOpts(name string, opts map[string]string) error
}
// TLSRouter is a router that supports adding and removing
// certificates for a given cname
type TLSRouter interface {
AddCertificate(cname, certificate, key string) error
RemoveCertificate(cname string) error
}
type HealthcheckData struct {
Path string
Status int
Body string
}
type RouterError struct {
Op string
Err error
}
func (e *RouterError) Error() string {
return fmt.Sprintf("[router %s] %s", e.Op, e.Err)
}
func collection() (*storage.Collection, error) {
conn, err := db.Conn()
if err != nil {
return nil, err
}
coll := conn.Collection("routers")
err = coll.EnsureIndex(mgo.Index{Key: []string{"app"}, Unique: true})
if err != nil {
return nil, err
}
return coll, nil
}
// Store stores the app name related with the
// router name.
func Store(appName, routerName, kind string) error {
coll, err := collection()
if err != nil {
return err
}
defer coll.Close()
data := map[string]string{
"app": appName,
"router": routerName,
"kind": kind,
}
_, err = coll.Upsert(bson.M{"app": appName}, data)
return err
}
func retrieveRouterData(appName string) (map[string]string, error) {
data := map[string]string{}
coll, err := collection()
if err != nil {
return data, err
}
defer coll.Close()
err = coll.Find(bson.M{"app": appName}).One(&data)
// Avoid need for data migrations, before kind existed we only supported
// hipache as a router so we set is as default here.
if data["kind"] == "" {
data["kind"] = "hipache"
}
return data, err
}
func Retrieve(appName string) (string, error) {
data, err := retrieveRouterData(appName)
if err != nil {
if err == mgo.ErrNotFound {
return "", ErrBackendNotFound
}
return "", err
}
return data["router"], nil
}
func Remove(appName string) error {
coll, err := collection()
if err != nil {
return err
}
defer coll.Close()
return coll.Remove(bson.M{"app": appName})
}
func swapBackendName(backend1, backend2 string) error {
coll, err := collection()
if err != nil {
return err
}
defer coll.Close()
router1, err := Retrieve(backend1)
if err != nil {
return err
}
router2, err := Retrieve(backend2)
if err != nil {
return err
}
update := bson.M{"$set": bson.M{"router": router2}}
err = coll.Update(bson.M{"app": backend1}, update)
if err != nil {
return err
}
update = bson.M{"$set": bson.M{"router": router1}}
return coll.Update(bson.M{"app": backend2}, update)
}
func swapCnames(r Router, backend1, backend2 string) error {
cnameRouter, ok := r.(CNameRouter)
if !ok {
return nil
}
cnames1, err := cnameRouter.CNames(backend1)
if err != nil {
return err
}
cnames2, err := cnameRouter.CNames(backend2)
if err != nil {
return err
}
for _, cname := range cnames1 {
err = cnameRouter.UnsetCName(cname.Host, backend1)
if err != nil {
return err
}
err = cnameRouter.SetCName(cname.Host, backend2)
if err != nil {
return err
}
}
for _, cname := range cnames2 {
err = cnameRouter.UnsetCName(cname.Host, backend2)
if err != nil {
return err
}
err = cnameRouter.SetCName(cname.Host, backend1)
if err != nil {
return err
}
}
return nil
}
func swapBackends(r Router, backend1, backend2 string) error {
routes1, err := r.Routes(backend1)
if err != nil {
return err
}
routes2, err := r.Routes(backend2)
if err != nil {
return err
}
err = r.AddRoutes(backend1, routes2)
if err != nil {
return err
}
err = r.AddRoutes(backend2, routes1)
if err != nil {
return err
}
err = r.RemoveRoutes(backend1, routes1)
if err != nil {
return err
}
err = r.RemoveRoutes(backend2, routes2)
if err != nil {
return err
}
return swapBackendName(backend1, backend2)
}
func Swap(r Router, backend1, backend2 string, cnameOnly bool) error {
data1, err := retrieveRouterData(backend1)
if err != nil {
return err
}
data2, err := retrieveRouterData(backend2)
if err != nil {
return err
}
if data1["kind"] != data2["kind"] {
return errors.Errorf("swap is only allowed between routers of the same kind. %q uses %q, %q uses %q",
backend1, data1["kind"], backend2, data2["kind"])
}
if cnameOnly {
return swapCnames(r, backend1, backend2)
}
return swapBackends(r, backend1, backend2)
}
type PlanRouter struct {
Name string `json:"name"`
Type string `json:"type"`
}
func List() ([]PlanRouter, error) {
routerConfig, err := config.Get("routers")
var routers map[interface{}]interface{}
if err == nil {
routers, _ = routerConfig.(map[interface{}]interface{})
}
routersList := make([]PlanRouter, 0, len(routers))
var keys []string
for key := range routers {
keys = append(keys, key.(string))
}
topLevelHipacheConfig, _ := config.Get("hipache")
if topLevelHipacheConfig != nil {
keys = append(keys, "hipache")
}
sort.Strings(keys)
for _, value := range keys {
var routerType string
routerProperties, _ := routers[value].(map[interface{}]interface{})
if routerProperties != nil {
routerType, _ = routerProperties["type"].(string)
}
if routerType == "" {
routerType = value
}
routersList = append(routersList, PlanRouter{Name: value, Type: routerType})
}
return routersList, nil
}
// validCName returns true if the cname is not a subdomain of
// the router current domain, false otherwise.
func ValidCName(cname, domain string) bool {
return !strings.HasSuffix(cname, domain)
}
func IsSwapped(name string) (bool, string, error) {
backendName, err := Retrieve(name)
if err != nil {
return false, "", err
}
return name != backendName, backendName, nil
}
|
package main
import (
"fmt"
"io"
"sync"
"container/ring"
"github.com/Shopify/exabgp-util/types"
"github.com/Shopify/exabgp-util/util"
)
const (
NotificationsBufSize = 10
StatesBufSize = 10
)
type BGP struct {
reader io.Reader
notifications *ring.Ring
states *ring.Ring
updates []*types.UpdateType
nMu sync.Mutex
sMu sync.Mutex
uMu sync.Mutex
}
func NewBGP(reader io.Reader) (*BGP, error) {
return &BGP{reader: reader, notifications: ring.New(NotificationsBufSize), states: ring.New(StatesBufSize), updates: []*types.UpdateType{}}, nil
}
func (bgp *BGP) handleNotificationMessage(t *types.NotificationType) {
Log.WithField("notification", fmt.Sprintf("%v", *t)).Debugf("Notification message")
if *t != types.NotificationType("shutdown") {
Log.Warnf("Unknown BGP notification message: %s", t)
}
mutexed(&bgp.nMu, func() {
bgp.notifications.Value = t
bgp.notifications = bgp.notifications.Next()
})
}
func (bgp *BGP) handleStateMessage(t *types.StateType) {
switch t.State {
case "connected":
Log.Info("BGP session connected")
case "up":
Log.Info("BGP session up")
case "down":
Log.Info("BGP session down")
default:
Log.Warnf("Unknown BGP session state: %s", t.State)
}
mutexed(&bgp.sMu, func() {
bgp.states.Value = t
bgp.states = bgp.states.Next()
})
}
func (bgp *BGP) handleUpdateMessage(t *types.UpdateType) {
Log.WithField("message", fmt.Sprintf("%v", t.Message)).Debugf("Update message")
mutexed(&bgp.uMu, func() {
bgp.updates = append(bgp.updates, t)
})
}
func (bgp *BGP) ReadMessages() {
Log.Info("Starting BGP message scanner")
messageChan := util.ScanMessage(bgp.reader)
for m := range messageChan {
Log.Debugf("Received BGP message: %v", *m)
switch m.Type {
case types.NotificationMessageType:
t, err := types.UnmarshalNotificationType(m.Notification)
if err != nil {
Log.WithField("error", err).Error("Error unmarshaling BGP notification message")
continue
}
bgp.handleNotificationMessage(t)
case types.StateMessageType:
t, err := types.UnmarshalStateType(m.Neighbor)
if err != nil {
Log.WithField("error", err).Error("Error unmarshaling BGP state message")
continue
}
bgp.handleStateMessage(t)
case types.UpdateMessageType:
t, err := types.UnmarshalUpdateType(m.Neighbor)
if err != nil {
Log.WithField("error", err).Error("Error unmarshaling BGP update message")
continue
}
bgp.handleUpdateMessage(t)
default:
Log.Warnf("Unknown BGP message type: %s", m.Type)
}
}
Log.Info("Stopped BGP message scanner.")
}
func (bgp *BGP) Notifications() []*types.NotificationType {
notifications := make([]*types.NotificationType, 0, NotificationsBufSize)
mutexed(&bgp.nMu, func() {
bgp.notifications.Do(
func(n interface{}) {
if n != nil {
notifications = append(notifications, n.(*types.NotificationType))
}
})
})
return notifications
}
func (bgp *BGP) States() []*types.StateType {
states := make([]*types.StateType, 0, StatesBufSize)
mutexed(&bgp.sMu, func() {
bgp.states.Do(
func(n interface{}) {
if n != nil {
states = append(states, n.(*types.StateType))
}
})
})
return states
}
func (bgp *BGP) Updates() []*types.UpdateType {
updates := make([]*types.UpdateType, 0, len(bgp.updates))
mutexed(&bgp.uMu, func() {
for _, u := range bgp.updates {
updates = append(updates, u)
}
})
return updates
}
Better debug messaging for BGP messages.
package main
import (
"fmt"
"io"
"sync"
"container/ring"
"github.com/Shopify/exabgp-util/types"
"github.com/Shopify/exabgp-util/util"
"github.com/Sirupsen/logrus"
)
const (
NotificationsBufSize = 10
StatesBufSize = 10
)
type BGP struct {
reader io.Reader
notifications *ring.Ring
states *ring.Ring
updates []*types.UpdateType
nMu sync.Mutex
sMu sync.Mutex
uMu sync.Mutex
}
func NewBGP(reader io.Reader) (*BGP, error) {
return &BGP{reader: reader, notifications: ring.New(NotificationsBufSize), states: ring.New(StatesBufSize), updates: []*types.UpdateType{}}, nil
}
func (bgp *BGP) handleNotificationMessage(t *types.NotificationType) {
Log.WithField("notification", fmt.Sprintf("%s", *t)).Debugf("Notification message")
if *t != types.NotificationType("shutdown") {
Log.Warnf("Unknown BGP notification message: %s", t)
}
mutexed(&bgp.nMu, func() {
bgp.notifications.Value = t
bgp.notifications = bgp.notifications.Next()
})
}
func (bgp *BGP) handleStateMessage(t *types.StateType) {
Log.WithField("state", fmt.Sprintf("%+v", *t)).Debugf("State message")
switch t.State {
case "connected":
Log.Info("BGP session connected")
case "up":
Log.Info("BGP session up")
case "down":
Log.Info("BGP session down")
default:
Log.Warnf("Unknown BGP session state: %s", t.State)
}
mutexed(&bgp.sMu, func() {
bgp.states.Value = t
bgp.states = bgp.states.Next()
})
}
func (bgp *BGP) handleUpdateMessage(t *types.UpdateType) {
Log.WithField("message", fmt.Sprintf("%+v", *t)).Debugf("Update message")
mutexed(&bgp.uMu, func() {
bgp.updates = append(bgp.updates, t)
})
}
func (bgp *BGP) ReadMessages() {
Log.Info("Starting BGP message scanner")
messageChan := util.ScanMessage(bgp.reader)
for m := range messageChan {
Log.WithFields(logrus.Fields{
"type": m.Type,
"message": fmt.Sprintf("%+v", *m),
}).Debugf("Received BGP message")
switch m.Type {
case types.NotificationMessageType:
t, err := types.UnmarshalNotificationType(m.Notification)
if err != nil {
Log.WithField("error", err).Error("Error unmarshaling BGP notification message")
continue
}
bgp.handleNotificationMessage(t)
case types.StateMessageType:
t, err := types.UnmarshalStateType(m.Neighbor)
if err != nil {
Log.WithField("error", err).Error("Error unmarshaling BGP state message")
continue
}
bgp.handleStateMessage(t)
case types.UpdateMessageType:
t, err := types.UnmarshalUpdateType(m.Neighbor)
if err != nil {
Log.WithField("error", err).Error("Error unmarshaling BGP update message")
continue
}
bgp.handleUpdateMessage(t)
default:
Log.WithField("type", m.Type).Warn("Unknown BGP message type")
}
}
Log.Info("Stopped BGP message scanner")
}
func (bgp *BGP) Notifications() []*types.NotificationType {
notifications := make([]*types.NotificationType, 0, NotificationsBufSize)
mutexed(&bgp.nMu, func() {
bgp.notifications.Do(
func(n interface{}) {
if n != nil {
notifications = append(notifications, n.(*types.NotificationType))
}
})
})
return notifications
}
func (bgp *BGP) States() []*types.StateType {
states := make([]*types.StateType, 0, StatesBufSize)
mutexed(&bgp.sMu, func() {
bgp.states.Do(
func(n interface{}) {
if n != nil {
states = append(states, n.(*types.StateType))
}
})
})
return states
}
func (bgp *BGP) Updates() []*types.UpdateType {
updates := make([]*types.UpdateType, 0, len(bgp.updates))
mutexed(&bgp.uMu, func() {
for _, u := range bgp.updates {
updates = append(updates, u)
}
})
return updates
}
|
package main
import (
"fmt"
"net/url"
"os"
"github.com/ajm188/slack"
"github.com/ajm188/slack/plugins/github"
)
const (
version = "0.4.0"
)
func setRealNameFields(bot *slack.Bot, event map[string]interface{}) (*slack.Message, slack.Status) {
channel := event["channel"].(string)
if channel != bot.Channels["general"] {
return nil, slack.Continue
}
userID := event["user"].(string)
dmChan := make(chan string)
userChan := make(chan interface{})
go func() {
dm, _ := bot.OpenDirectMessage(userID)
dmChan <- dm
}()
go func() {
payload, _ := bot.Call("users.info", url.Values{"user": []string{userID}})
userChan <- payload
}()
payload := (<-userChan).(map[string]interface{})
success := payload["ok"].(bool)
if !success {
fmt.Println(payload)
return nil, slack.Continue
}
user := payload["user"].(map[string]interface{})
nick := user["name"].(string)
text := "Please set your real name fields. https://hacsoc.slack.com/team/%s."
text += " Then click \"Edit\"."
text = fmt.Sprintf(text, nick)
dm := <-dmChan
return slack.NewMessage(text, dm), slack.Continue
}
func getSlackId(bot *slack.Bot, event map[string]interface{}) (*slack.Message, slack.Status) {
user := event["user"].(string)
return bot.Mention(user, "Your slack id is `" + user + "`.",
event["channel"].(string)), slack.Continue
}
func sendDM(bot *slack.Bot, event map[string]interface{}) (*slack.Message, slack.Status) {
user := event["user"].(string)
return bot.DirectMessage(user, "hi"), slack.Continue
}
func troll(bot *slack.Bot, event map[string]interface{}) (*slack.Message, slack.Status) {
user, ok := event["user"]
if !ok || user.(string) != bot.Users["catofnostalgia"].ID {
return nil, slack.Continue
}
return bot.Mention(user.(string), "where is the third lambda?",
event["channel"].(string)), slack.Continue
}
func configureGithubPlugin(id, secret, token string) {
github.ClientID = id
github.ClientSecret = secret
github.AccessToken = token
github.SharedClient = github.DefaultClient()
}
func getEnvvar(name string) (envvar string) {
envvar = os.Getenv(name)
if envvar == "" {
fmt.Println("Missing environment variable %s", name)
os.Exit(1)
}
return
}
func main() {
token := getEnvvar("SLACKSOC_TOKEN")
ghClientID := getEnvvar("GH_CLIENT_ID")
ghClientSecret := getEnvvar("GH_CLIENT_SECRET")
ghAccessToken := getEnvvar("GH_ACCESS_TOKEN")
bot := slack.NewBot(token)
bot.Respond("hi\\z", slack.Respond("hi there!"))
bot.Respond("pm me", sendDM)
bot.Respond("((what's)|(tell me) your)? ?version??",
slack.Respond(fmt.Sprintf("My version is %s. My lib version is %s", version, slack.Version)))
bot.Respond("((what's)|(tell me))? ?my id??", getSlackId)
bot.Listen("gentoo", slack.React("funroll-loops"))
bot.Listen(".+\\bslacksoc\\b", slack.React("raisedeyebrow"))
bot.Listen("GNU/Linux", slack.React("stallman"))
bot.OnEvent("message", troll)
bot.OnEventWithSubtype("message", "channel_join", setRealNameFields)
configureGithubPlugin(ghClientID, ghClientSecret, ghAccessToken)
github.OpenIssue(bot, nil)
fmt.Println("Starting bot")
if err := bot.Start(); err != nil {
fmt.Println(err)
}
}
Update version too
package main
import (
"fmt"
"net/url"
"os"
"github.com/ajm188/slack"
"github.com/ajm188/slack/plugins/github"
)
const (
version = "0.4.1"
)
func setRealNameFields(bot *slack.Bot, event map[string]interface{}) (*slack.Message, slack.Status) {
channel := event["channel"].(string)
if channel != bot.Channels["general"] {
return nil, slack.Continue
}
userID := event["user"].(string)
dmChan := make(chan string)
userChan := make(chan interface{})
go func() {
dm, _ := bot.OpenDirectMessage(userID)
dmChan <- dm
}()
go func() {
payload, _ := bot.Call("users.info", url.Values{"user": []string{userID}})
userChan <- payload
}()
payload := (<-userChan).(map[string]interface{})
success := payload["ok"].(bool)
if !success {
fmt.Println(payload)
return nil, slack.Continue
}
user := payload["user"].(map[string]interface{})
nick := user["name"].(string)
text := "Please set your real name fields. https://hacsoc.slack.com/team/%s."
text += " Then click \"Edit\"."
text = fmt.Sprintf(text, nick)
dm := <-dmChan
return slack.NewMessage(text, dm), slack.Continue
}
func getSlackId(bot *slack.Bot, event map[string]interface{}) (*slack.Message, slack.Status) {
user := event["user"].(string)
return bot.Mention(user, "Your slack id is `" + user + "`.",
event["channel"].(string)), slack.Continue
}
func sendDM(bot *slack.Bot, event map[string]interface{}) (*slack.Message, slack.Status) {
user := event["user"].(string)
return bot.DirectMessage(user, "hi"), slack.Continue
}
func troll(bot *slack.Bot, event map[string]interface{}) (*slack.Message, slack.Status) {
user, ok := event["user"]
if !ok || user.(string) != bot.Users["catofnostalgia"].ID {
return nil, slack.Continue
}
return bot.Mention(user.(string), "where is the third lambda?",
event["channel"].(string)), slack.Continue
}
func configureGithubPlugin(id, secret, token string) {
github.ClientID = id
github.ClientSecret = secret
github.AccessToken = token
github.SharedClient = github.DefaultClient()
}
func getEnvvar(name string) (envvar string) {
envvar = os.Getenv(name)
if envvar == "" {
fmt.Println("Missing environment variable %s", name)
os.Exit(1)
}
return
}
func main() {
token := getEnvvar("SLACKSOC_TOKEN")
ghClientID := getEnvvar("GH_CLIENT_ID")
ghClientSecret := getEnvvar("GH_CLIENT_SECRET")
ghAccessToken := getEnvvar("GH_ACCESS_TOKEN")
bot := slack.NewBot(token)
bot.Respond("hi\\z", slack.Respond("hi there!"))
bot.Respond("pm me", sendDM)
bot.Respond("((what's)|(tell me) your)? ?version??",
slack.Respond(fmt.Sprintf("My version is %s. My lib version is %s", version, slack.Version)))
bot.Respond("((what's)|(tell me))? ?my id??", getSlackId)
bot.Listen("gentoo", slack.React("funroll-loops"))
bot.Listen(".+\\bslacksoc\\b", slack.React("raisedeyebrow"))
bot.Listen("GNU/Linux", slack.React("stallman"))
bot.OnEvent("message", troll)
bot.OnEventWithSubtype("message", "channel_join", setRealNameFields)
configureGithubPlugin(ghClientID, ghClientSecret, ghAccessToken)
github.OpenIssue(bot, nil)
fmt.Println("Starting bot")
if err := bot.Start(); err != nil {
fmt.Println(err)
}
}
|
package proto
import (
pb "code.google.com/p/goprotobuf/proto"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
type FileList struct {
RootDirectory string
Files []*File
}
func (fl *FileList) VisitDir(path string, f os.FileInfo) bool {
return true
}
func (fl *FileList) VisitFile(path string, f os.FileInfo) {
file := fl.buildFile(path)
fl.Files = append(fl.Files, file)
}
func CollectFiles(dir string) []*File {
fileList := &FileList{
RootDirectory: dir,
Files: make([]*File, 0),
}
markFn := func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return filepath.SkipDir
}
if err != nil {
return err
}
file := fileList.buildFile(path)
fileList.Files = append(fileList.Files, file)
return nil
}
filepath.Walk(dir, markFn)
return fileList.Files
}
func (fl *FileList) Unpack(verbose bool) (err error) {
for _, file := range fl.Files {
absolutePath := file.AbsolutePath(fl.RootDirectory)
path, _ := filepath.Split(absolutePath)
err = os.MkdirAll(path, 0755)
if err != nil {
panic(err.Error())
}
err = file.Write(fl.RootDirectory)
if err != nil {
return
}
if verbose {
fmt.Printf("* %v\n", absolutePath)
}
}
return
}
func (fl *FileList) buildFile(path string) *File {
data, err := ioutil.ReadFile(path)
if err != nil {
panic("Could not read file (" + path + "):" + err.Error())
}
pdata := make([][]uint8, 0)
pdata = append(pdata, data)
path = relativePath(fl.RootDirectory, path)
file := &File{
Path: pb.String(path),
Data: pdata,
}
return file
}
func (f *File) AbsolutePath(dir string) string {
return filepath.Join(dir, pb.GetString(f.Path))
}
func (f *File) Write(dir string) (err error) {
absolutePath := f.AbsolutePath(dir)
err = ioutil.WriteFile(absolutePath, f.Data[0], 0644)
if err != nil {
err = errors.New("Couldn't write file (" + absolutePath + "):" + err.Error())
}
return
}
func relativePath(parentDirectory string, path string) string {
cleanedParentPath := filepath.Clean(parentDirectory)
relativePath := strings.Replace(path, cleanedParentPath, "", 1)
return relativePath
}
go1's walk dir calls the markfunction even if the file/dir doesn't exist
package proto
import (
pb "code.google.com/p/goprotobuf/proto"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
type FileList struct {
RootDirectory string
Files []*File
}
func (fl *FileList) VisitDir(path string, f os.FileInfo) bool {
return true
}
func (fl *FileList) VisitFile(path string, f os.FileInfo) {
file := fl.buildFile(path)
fl.Files = append(fl.Files, file)
}
func CollectFiles(dir string) []*File {
fileList := &FileList{
RootDirectory: dir,
Files: make([]*File, 0),
}
markFn := func(path string, info os.FileInfo, err error) error {
if info == nil {
return filepath.SkipDir
}
if info.IsDir() {
return filepath.SkipDir
}
if err != nil {
return err
}
file := fileList.buildFile(path)
fileList.Files = append(fileList.Files, file)
return nil
}
filepath.Walk(dir, markFn)
return fileList.Files
}
func (fl *FileList) Unpack(verbose bool) (err error) {
for _, file := range fl.Files {
absolutePath := file.AbsolutePath(fl.RootDirectory)
path, _ := filepath.Split(absolutePath)
err = os.MkdirAll(path, 0755)
if err != nil {
panic(err.Error())
}
err = file.Write(fl.RootDirectory)
if err != nil {
return
}
if verbose {
fmt.Printf("* %v\n", absolutePath)
}
}
return
}
func (fl *FileList) buildFile(path string) *File {
data, err := ioutil.ReadFile(path)
if err != nil {
panic("Could not read file (" + path + "):" + err.Error())
}
pdata := make([][]uint8, 0)
pdata = append(pdata, data)
path = relativePath(fl.RootDirectory, path)
file := &File{
Path: pb.String(path),
Data: pdata,
}
return file
}
func (f *File) AbsolutePath(dir string) string {
return filepath.Join(dir, pb.GetString(f.Path))
}
func (f *File) Write(dir string) (err error) {
absolutePath := f.AbsolutePath(dir)
err = ioutil.WriteFile(absolutePath, f.Data[0], 0644)
if err != nil {
err = errors.New("Couldn't write file (" + absolutePath + "):" + err.Error())
}
return
}
func relativePath(parentDirectory string, path string) string {
cleanedParentPath := filepath.Clean(parentDirectory)
relativePath := strings.Replace(path, cleanedParentPath, "", 1)
return relativePath
}
|
package seabird
import (
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/inject"
"github.com/belak/go-plugin"
"github.com/go-irc/irc"
)
type coreConfig struct {
Nick string
User string
Name string
Pass string
PingFrequency duration
PingTimeout duration
Host string
TLS bool
TLSNoVerify bool
TLSCert string
TLSKey string
Cmds []string
Prefix string
Plugins []string
Debug bool
}
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
// A Bot is our wrapper around the irc.Client. It could be used for a general
// client, but the provided convenience functions are designed around using this
// package to write a bot.
type Bot struct {
mux *BasicMux
// Config stuff
confValues map[string]toml.Primitive
md toml.MetaData
config coreConfig
// Internal things
client *irc.Client
registry *plugin.Registry
log *logrus.Entry
injector inject.Injector
}
// NewBot will return a new Bot given an io.Reader pointing to a
// config file.
func NewBot(confReader io.Reader) (*Bot, error) {
var err error
b := &Bot{
mux: NewBasicMux(),
confValues: make(map[string]toml.Primitive),
md: toml.MetaData{},
registry: plugins.Copy(),
}
// Decode the file, but leave all the config sections intact so we can
// decode those later.
b.md, err = toml.DecodeReader(confReader, &b.confValues)
if err != nil {
return nil, err
}
// Load up the core config
err = b.Config("core", &b.config)
if err != nil {
return nil, err
}
// Set up logging/debugging
b.log = logrus.NewEntry(logrus.New())
if b.config.Debug {
b.log.Logger.Level = logrus.DebugLevel
} else {
b.log.Logger.Level = logrus.InfoLevel
}
commandMux := NewCommandMux(b.config.Prefix)
mentionMux := NewMentionMux()
b.mux.Event("PRIVMSG", commandMux.HandleEvent)
b.mux.Event("PRIVMSG", mentionMux.HandleEvent)
// Register all the things we want with the plugin registry.
b.registry.RegisterProvider(func() (*Bot, *BasicMux, *CommandMux, *MentionMux) {
return b, b.mux, commandMux, mentionMux
})
return b, nil
}
// GetLogger grabs the underlying logger for this bot.
func (b *Bot) GetLogger() *logrus.Entry {
return b.log
}
// CurrentNick returns the current nick of the bot.
func (b *Bot) CurrentNick() string {
return b.client.CurrentNick()
}
// Config will decode the config section for the given name into the given
// interface{}
func (b *Bot) Config(name string, c interface{}) error {
if v, ok := b.confValues[name]; ok {
return b.md.PrimitiveDecode(v, c)
}
return fmt.Errorf("Config section for %q missing", name)
}
// Send is a simple function to send an IRC event
func (b *Bot) Send(m *irc.Message) {
b.client.WriteMessage(m)
}
// Reply to an irc.Message with a convenience wrapper around fmt.Sprintf
func (b *Bot) Reply(m *irc.Message, format string, v ...interface{}) error {
if len(m.Params) < 1 || len(m.Params[0]) < 1 {
return errors.New("Invalid IRC message")
}
target := m.Prefix.Name
if m.FromChannel() {
target = m.Params[0]
}
fullMsg := fmt.Sprintf(format, v...)
for _, resp := range strings.Split(fullMsg, "\n") {
b.Send(&irc.Message{
Prefix: &irc.Prefix{},
Command: "PRIVMSG",
Params: []string{
target,
resp,
},
})
}
return nil
}
// MentionReply acts the same as Bot.Reply but it will prefix it with the user's
// nick if we are in a channel.
func (b *Bot) MentionReply(m *irc.Message, format string, v ...interface{}) error {
if len(m.Params) < 1 || len(m.Params[0]) < 1 {
return errors.New("Invalid IRC message")
}
target := m.Prefix.Name
prefix := ""
if m.FromChannel() {
target = m.Params[0]
prefix = m.Prefix.Name + ": "
}
fullMsg := fmt.Sprintf(format, v...)
for _, resp := range strings.Split(fullMsg, "\n") {
b.Send(&irc.Message{
Prefix: &irc.Prefix{},
Command: "PRIVMSG",
Params: []string{
target,
prefix + resp,
},
})
}
return nil
}
// PrivateReply is similar to Reply, but it will always send privately.
func (b *Bot) PrivateReply(m *irc.Message, format string, v ...interface{}) {
b.Send(&irc.Message{
Prefix: &irc.Prefix{},
Command: "PRIVMSG",
Params: []string{
m.Prefix.Name,
fmt.Sprintf(format, v...),
},
})
}
// CTCPReply is a convenience function to respond to CTCP requests.
func (b *Bot) CTCPReply(m *irc.Message, format string, v ...interface{}) error {
if m.Command != "CTCP" {
return errors.New("Invalid CTCP message")
}
b.Send(&irc.Message{
Prefix: &irc.Prefix{},
Command: "NOTICE",
Params: []string{
m.Prefix.Name,
fmt.Sprintf(format, v...),
},
})
return nil
}
func (b *Bot) handshake() {
b.client.Writef("CAP END")
b.client.Writef("NICK %s", b.config.Nick)
b.client.Writef("USER %s 0.0.0.0 0.0.0.0 :%s", b.config.User, b.config.Name)
}
// Write will write an raw IRC message to the stream
func (b *Bot) Write(line string) {
b.client.Write(line)
}
// Writef is a convenience method around fmt.Sprintf and Bot.Write
func (b *Bot) Writef(format string, args ...interface{}) {
b.client.Writef(format, args...)
}
// FromChannel is a wrapper around the irc package's FromChannel. It's
// more accurate than Message.FromChannel so this should be used
// whenever possible.
func (b *Bot) FromChannel(m *irc.Message) bool {
return b.client.FromChannel(m)
}
func (b *Bot) handler(c *irc.Client, m *irc.Message) {
// Handle the event and pass it along
if m.Command == "001" {
b.log.Info("Connected")
for _, v := range b.config.Cmds {
b.Write(v)
}
}
b.mux.HandleEvent(b, m)
}
// ConnectAndRun is a convenience function which will pull the
// connection information out of the config and connect, then call
// Run.
func (b *Bot) ConnectAndRun() error {
// The ReadWriteCloser will contain either a *net.Conn or *tls.Conn
var c io.ReadWriteCloser
var err error
if b.config.TLS {
conf := &tls.Config{
InsecureSkipVerify: b.config.TLSNoVerify,
}
if b.config.TLSCert != "" && b.config.TLSKey != "" {
var cert tls.Certificate
cert, err = tls.LoadX509KeyPair(b.config.TLSCert, b.config.TLSKey)
if err != nil {
return err
}
conf.Certificates = []tls.Certificate{cert}
conf.BuildNameToCertificate()
}
c, err = tls.Dial("tcp", b.config.Host, conf)
} else {
c, err = net.Dial("tcp", b.config.Host)
}
if err != nil {
return err
}
return b.Run(c)
}
// Run starts the bot and loops until it dies. It accepts a
// ReadWriter. If you wish to use the connection feature from the
// config, use ConnectAndRun.
func (b *Bot) Run(c io.ReadWriter) error {
var err error
b.injector, err = b.registry.Load(b.config.Plugins, nil)
if err != nil {
return err
}
// Create a client from the connection we've just opened
rc := irc.ClientConfig{
Nick: b.config.Nick,
Pass: b.config.Pass,
User: b.config.User,
Name: b.config.Name,
PingFrequency: b.config.PingFrequency.Duration,
PingTimeout: b.config.PingTimeout.Duration,
Handler: irc.HandlerFunc(b.handler),
}
b.client = irc.NewClient(c, rc)
// Now that we have a client, set up debug callbacks
b.client.Reader.DebugCallback = func(line string) {
b.log.Debug("<-- ", strings.Trim(line, "\r\n"))
}
b.client.Writer.DebugCallback = func(line string) {
if len(line) > 512 {
b.log.Warnf("Line longer than 512 chars: %s", strings.Trim(line, "\r\n"))
}
b.log.Debug("--> ", strings.Trim(line, "\r\n"))
}
// Start the main loop
return b.client.Run()
}
Move CTCP rewriting from go-irc to seabird
package seabird
import (
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/inject"
"github.com/belak/go-plugin"
"github.com/go-irc/irc"
)
type coreConfig struct {
Nick string
User string
Name string
Pass string
PingFrequency duration
PingTimeout duration
Host string
TLS bool
TLSNoVerify bool
TLSCert string
TLSKey string
Cmds []string
Prefix string
Plugins []string
Debug bool
}
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
// A Bot is our wrapper around the irc.Client. It could be used for a general
// client, but the provided convenience functions are designed around using this
// package to write a bot.
type Bot struct {
mux *BasicMux
// Config stuff
confValues map[string]toml.Primitive
md toml.MetaData
config coreConfig
// Internal things
client *irc.Client
registry *plugin.Registry
log *logrus.Entry
injector inject.Injector
}
// NewBot will return a new Bot given an io.Reader pointing to a
// config file.
func NewBot(confReader io.Reader) (*Bot, error) {
var err error
b := &Bot{
mux: NewBasicMux(),
confValues: make(map[string]toml.Primitive),
md: toml.MetaData{},
registry: plugins.Copy(),
}
// Decode the file, but leave all the config sections intact so we can
// decode those later.
b.md, err = toml.DecodeReader(confReader, &b.confValues)
if err != nil {
return nil, err
}
// Load up the core config
err = b.Config("core", &b.config)
if err != nil {
return nil, err
}
// Set up logging/debugging
b.log = logrus.NewEntry(logrus.New())
if b.config.Debug {
b.log.Logger.Level = logrus.DebugLevel
} else {
b.log.Logger.Level = logrus.InfoLevel
}
commandMux := NewCommandMux(b.config.Prefix)
mentionMux := NewMentionMux()
b.mux.Event("PRIVMSG", commandMux.HandleEvent)
b.mux.Event("PRIVMSG", mentionMux.HandleEvent)
// Register all the things we want with the plugin registry.
b.registry.RegisterProvider(func() (*Bot, *BasicMux, *CommandMux, *MentionMux) {
return b, b.mux, commandMux, mentionMux
})
return b, nil
}
// GetLogger grabs the underlying logger for this bot.
func (b *Bot) GetLogger() *logrus.Entry {
return b.log
}
// CurrentNick returns the current nick of the bot.
func (b *Bot) CurrentNick() string {
return b.client.CurrentNick()
}
// Config will decode the config section for the given name into the given
// interface{}
func (b *Bot) Config(name string, c interface{}) error {
if v, ok := b.confValues[name]; ok {
return b.md.PrimitiveDecode(v, c)
}
return fmt.Errorf("Config section for %q missing", name)
}
// Send is a simple function to send an IRC event
func (b *Bot) Send(m *irc.Message) {
b.client.WriteMessage(m)
}
// Reply to an irc.Message with a convenience wrapper around fmt.Sprintf
func (b *Bot) Reply(m *irc.Message, format string, v ...interface{}) error {
if len(m.Params) < 1 || len(m.Params[0]) < 1 {
return errors.New("Invalid IRC message")
}
target := m.Prefix.Name
if m.FromChannel() {
target = m.Params[0]
}
fullMsg := fmt.Sprintf(format, v...)
for _, resp := range strings.Split(fullMsg, "\n") {
b.Send(&irc.Message{
Prefix: &irc.Prefix{},
Command: "PRIVMSG",
Params: []string{
target,
resp,
},
})
}
return nil
}
// MentionReply acts the same as Bot.Reply but it will prefix it with the user's
// nick if we are in a channel.
func (b *Bot) MentionReply(m *irc.Message, format string, v ...interface{}) error {
if len(m.Params) < 1 || len(m.Params[0]) < 1 {
return errors.New("Invalid IRC message")
}
target := m.Prefix.Name
prefix := ""
if m.FromChannel() {
target = m.Params[0]
prefix = m.Prefix.Name + ": "
}
fullMsg := fmt.Sprintf(format, v...)
for _, resp := range strings.Split(fullMsg, "\n") {
b.Send(&irc.Message{
Prefix: &irc.Prefix{},
Command: "PRIVMSG",
Params: []string{
target,
prefix + resp,
},
})
}
return nil
}
// PrivateReply is similar to Reply, but it will always send privately.
func (b *Bot) PrivateReply(m *irc.Message, format string, v ...interface{}) {
b.Send(&irc.Message{
Prefix: &irc.Prefix{},
Command: "PRIVMSG",
Params: []string{
m.Prefix.Name,
fmt.Sprintf(format, v...),
},
})
}
// CTCPReply is a convenience function to respond to CTCP requests.
func (b *Bot) CTCPReply(m *irc.Message, format string, v ...interface{}) error {
if m.Command != "CTCP" {
return errors.New("Invalid CTCP message")
}
b.Send(&irc.Message{
Prefix: &irc.Prefix{},
Command: "NOTICE",
Params: []string{
m.Prefix.Name,
fmt.Sprintf(format, v...),
},
})
return nil
}
func (b *Bot) handshake() {
b.client.Writef("CAP END")
b.client.Writef("NICK %s", b.config.Nick)
b.client.Writef("USER %s 0.0.0.0 0.0.0.0 :%s", b.config.User, b.config.Name)
}
// Write will write an raw IRC message to the stream
func (b *Bot) Write(line string) {
b.client.Write(line)
}
// Writef is a convenience method around fmt.Sprintf and Bot.Write
func (b *Bot) Writef(format string, args ...interface{}) {
b.client.Writef(format, args...)
}
// FromChannel is a wrapper around the irc package's FromChannel. It's
// more accurate than Message.FromChannel so this should be used
// whenever possible.
func (b *Bot) FromChannel(m *irc.Message) bool {
return b.client.FromChannel(m)
}
func (b *Bot) handler(c *irc.Client, m *irc.Message) {
// Handle the event and pass it along
if m.Command == "001" {
b.log.Info("Connected")
for _, v := range b.config.Cmds {
b.Write(v)
}
} else if m.Command == "PRIVMSG" {
// Clean up CTCP stuff so plugins don't need to parse it manually
lastArg := m.Trailing()
lastIdx := len(lastArg) - 1
if lastIdx > 0 && lastArg[0] == '\x01' && lastArg[lastIdx] == '\x01' {
m.Command = "CTCP"
m.Params[len(m.Params)-1] = lastArg[1:lastIdx]
}
}
b.mux.HandleEvent(b, m)
}
// ConnectAndRun is a convenience function which will pull the
// connection information out of the config and connect, then call
// Run.
func (b *Bot) ConnectAndRun() error {
// The ReadWriteCloser will contain either a *net.Conn or *tls.Conn
var c io.ReadWriteCloser
var err error
if b.config.TLS {
conf := &tls.Config{
InsecureSkipVerify: b.config.TLSNoVerify,
}
if b.config.TLSCert != "" && b.config.TLSKey != "" {
var cert tls.Certificate
cert, err = tls.LoadX509KeyPair(b.config.TLSCert, b.config.TLSKey)
if err != nil {
return err
}
conf.Certificates = []tls.Certificate{cert}
conf.BuildNameToCertificate()
}
c, err = tls.Dial("tcp", b.config.Host, conf)
} else {
c, err = net.Dial("tcp", b.config.Host)
}
if err != nil {
return err
}
return b.Run(c)
}
// Run starts the bot and loops until it dies. It accepts a
// ReadWriter. If you wish to use the connection feature from the
// config, use ConnectAndRun.
func (b *Bot) Run(c io.ReadWriter) error {
var err error
b.injector, err = b.registry.Load(b.config.Plugins, nil)
if err != nil {
return err
}
// Create a client from the connection we've just opened
rc := irc.ClientConfig{
Nick: b.config.Nick,
Pass: b.config.Pass,
User: b.config.User,
Name: b.config.Name,
PingFrequency: b.config.PingFrequency.Duration,
PingTimeout: b.config.PingTimeout.Duration,
Handler: irc.HandlerFunc(b.handler),
}
b.client = irc.NewClient(c, rc)
// Now that we have a client, set up debug callbacks
b.client.Reader.DebugCallback = func(line string) {
b.log.Debug("<-- ", strings.Trim(line, "\r\n"))
}
b.client.Writer.DebugCallback = func(line string) {
if len(line) > 512 {
b.log.Warnf("Line longer than 512 chars: %s", strings.Trim(line, "\r\n"))
}
b.log.Debug("--> ", strings.Trim(line, "\r\n"))
}
// Start the main loop
return b.client.Run()
}
|
package routes
import (
"fmt"
"net/http"
"github.com/DVI-GI-2017/Jira__backend/handlers"
"github.com/DVI-GI-2017/Jira__backend/auth"
)
func NewRouter() (http.Handler, error) {
const apiRoot = "/api/v1"
mux := http.NewServeMux()
mux.HandleFunc(fmt.Sprintf("%s%s", apiRoot, "/login/"), login.Login)
return handlers.Logger(mux), nil
}
type Router struct {
mux http.ServeMux
}
Fix unused package.
package routes
import (
"fmt"
"net/http"
"github.com/DVI-GI-2017/Jira__backend/auth"
"github.com/DVI-GI-2017/Jira__backend/handlers"
)
func NewRouter() (http.Handler, error) {
const apiRoot = "/api/v1"
mux := http.NewServeMux()
mux.HandleFunc(fmt.Sprintf("%s%s", apiRoot, "/login/"), auth.Login)
return handlers.Logger(mux), nil
}
type Router struct {
mux http.ServeMux
}
|
package unison
import (
"fmt"
"strings"
"github.com/Sirupsen/logrus"
"github.com/bwmarrin/discordgo"
"github.com/s1kx/unison/events"
)
// BotSettings contains the definition of bot behavior.
// It is used for creating the actual bot.
type BotSettings struct {
Token string
Commands []*Command
EventHooks []*EventHook
Services []*Service
// Every option here is added to an array of accepted prefixes later
// So you can set values in CommandPrefix and/or CommandPrefixes at the same time
// This also regards the option CommandInvokedByMention
CommandPrefix string
CommandPrefixes []string
CommandInvokedByMention bool
}
func RunBot(settings *BotSettings) error {
// TODO: Validate commands
// discordgo requires "Bot " prefix for Bot applications
token := settings.Token
if !strings.HasPrefix(token, "Bot ") {
token = "Bot " + token
}
// Initialize discord client
ds, err := discordgo.New(token)
if err != nil {
return err
}
// Initialize and start bot
bot, err := newBot(settings, ds)
if err != nil {
return err
}
bot.Run()
return nil
}
// Bot is an active bot session.
type Bot struct {
*BotSettings
Discord *discordgo.Session
// Lookup map for name/alias => command
commandMap map[string]*Command
// Lookup map for name => hook
eventHookMap map[string]*EventHook
// Lookup map for name => service
serviceMap map[string]*Service
eventDispatcher *eventDispatcher
// Contains a generated array of accepted prefixes based on BotSettings
commandPrefixes []string
commandInvokedByMention bool
readyState *discordgo.Ready
User *discordgo.User
}
func newBot(settings *BotSettings, ds *discordgo.Session) (*Bot, error) {
// Initialize bot
bot := &Bot{
BotSettings: settings,
Discord: ds,
commandMap: make(map[string]*Command),
eventHookMap: make(map[string]*EventHook),
serviceMap: make(map[string]*Service),
eventDispatcher: newEventDispatcher(),
commandPrefixes: []string{},
commandInvokedByMention: settings.CommandInvokedByMention,
}
// Register commands
for _, cmd := range bot.Commands {
err := bot.RegisterCommand(cmd)
if err != nil {
return nil, err
}
}
// Register event hooks
for _, hook := range bot.EventHooks {
err := bot.RegisterEventHook(hook)
if err != nil {
return nil, err
}
}
// Register services
for _, srv := range bot.Services {
err := bot.RegisterService(srv)
if err != nil {
return nil, err
}
}
// Generate the array of accepted command prefixes.
// Use a channel to generate one for bot mentions, or add it later
if settings.CommandPrefix != "" {
settings.CommandPrefixes = append(settings.CommandPrefixes, settings.CommandPrefix)
}
for _, prefix := range settings.CommandPrefixes {
err := bot.RegisterCommandPrefix(prefix)
if err != nil {
return nil, err
}
}
// Make sure at least one prefix exists, let's use "!" as default if none was given.
if len(bot.CommandPrefixes) == 0 && !settings.CommandInvokedByMention {
err := bot.RegisterCommandPrefix(DefaultCommandPrefix) // See command.go
if err != nil {
return nil, err
}
}
return bot, nil
}
// Get a data value from existing services
func (bot *Bot) GetServiceData(srvName string, key string) string {
if val, ok := bot.serviceMap[srvName]; ok {
if d, s := val.Data[key]; s {
// key exist
return d
}
}
return ""
}
func (bot *Bot) SetServiceData(srvName string, key string, val string) string {
if v, ok := bot.serviceMap[srvName]; ok {
if _, s := v.Data[key]; s {
bot.serviceMap[srvName].Data[key] = val
return val
}
}
return ""
}
func (bot *Bot) Run() error {
// Add handler to wait for ready state in order to initialize the bot fully.
bot.Discord.AddHandler(bot.onReady)
// Open the websocket and begin listening.
err := bot.Discord.Open()
if err != nil {
return fmt.Errorf("error opening connection: %s", err)
}
logrus.Info("Bot is now running. Press CTRL-C to exit.")
// Simple way to keep program running until CTRL-C is pressed.
// TODO: Add signal handler to exit gracefully.
<-make(chan struct{})
return nil
}
func (bot *Bot) RegisterCommand(cmd *Command) error {
name := cmd.Name
if ex, exists := bot.commandMap[name]; exists {
return &DuplicateCommandError{Existing: ex, New: cmd, Name: name}
}
bot.commandMap[name] = cmd
// TODO: Register aliases
return nil
}
func (bot *Bot) RegisterEventHook(hook *EventHook) error {
name := hook.Name
if ex, exists := bot.eventHookMap[name]; exists {
return &DuplicateEventHookError{Existing: ex, New: hook}
}
bot.eventHookMap[name] = hook
if len(hook.Events) == 0 {
logrus.Warnf("Hook '%s' is not subscribed to any events", name)
}
bot.eventDispatcher.AddHook(hook)
return nil
}
func (bot *Bot) RegisterService(srv *Service) error {
name := srv.Name
if ex, exists := bot.serviceMap[name]; exists {
return &DuplicateServiceError{Existing: ex, New: srv, Name: name}
}
bot.serviceMap[name] = srv
return nil
}
func (bot *Bot) RegisterCommandPrefix(prefix string) error {
// The prefix must have a length of minimum 1
if len(prefix) < 1 {
return &TooShortCommandPrefixError{Prefix: prefix}
}
// Dont add one that already exists
exists := false
for _, existingPrefix := range bot.commandPrefixes {
if existingPrefix == prefix {
exists = true
break
}
}
if !exists {
// Add the new prefix entry.
bot.commandPrefixes = append(bot.commandPrefixes, prefix)
} else {
// Was not able to add the prefix because it already exists.
return &DuplicateCommandPrefixError{Prefix: prefix}
}
return nil
}
func (bot *Bot) onReady(ds *discordgo.Session, r *discordgo.Ready) {
// Set bot state
bot.readyState = r
bot.User = r.User
logrus.WithFields(logrus.Fields{
"ID": r.User.ID,
"Username": r.User.Username,
}).Infof("Bot is connected and running.")
// Add a command prefix based on the Bot ID if commandInvokedByMention is set to true
if bot.commandInvokedByMention {
bot.RegisterCommandPrefix("<@" + bot.User.ID + ">")
//TODO[BLOCKER]: What if this fails?
}
// Create context for services
ctx := NewContext(bot, ds)
// Run services
for _, srv := range bot.serviceMap {
if srv.Deactivated {
continue
}
// run service
go srv.Action(ctx)
}
// Add generic handler for event hooks
// Add command handler
bot.Discord.AddHandler(func(ds *discordgo.Session, event interface{}) {
bot.onEvent(ds, event)
})
}
func (bot *Bot) onEvent(ds *discordgo.Session, dv interface{}) {
// Inspect and wrap event
ev, err := events.NewDiscordEvent(dv)
if err != nil {
logrus.Errorf("event handler: %s", err)
}
// Create context for handlers
ctx := NewContext(bot, ds)
// Invoke event hooks for the hooks that are subscribed to the event type
bot.eventDispatcher.Dispatch(ctx, ev)
// Invoke command handler on new messages
if ev.Type == events.MessageCreateEvent {
handleMessageCreate(ctx, ev.Event.(*discordgo.MessageCreate))
}
}
Fixed #14
package unison
import (
"fmt"
"os"
"os/signal"
"strings"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/bwmarrin/discordgo"
"github.com/s1kx/unison/events"
)
var termSignal chan os.Signal
// BotSettings contains the definition of bot behavior.
// It is used for creating the actual bot.
type BotSettings struct {
Token string
Commands []*Command
EventHooks []*EventHook
Services []*Service
// Every option here is added to an array of accepted prefixes later
// So you can set values in CommandPrefix and/or CommandPrefixes at the same time
// This also regards the option CommandInvokedByMention
CommandPrefix string
CommandPrefixes []string
CommandInvokedByMention bool
}
func RunBot(settings *BotSettings) error {
// TODO: Validate commands
// discordgo requires "Bot " prefix for Bot applications
token := settings.Token
if !strings.HasPrefix(token, "Bot ") {
token = "Bot " + token
}
// Initialize discord client
ds, err := discordgo.New(token)
if err != nil {
return err
}
// Initialize and start bot
bot, err := newBot(settings, ds)
if err != nil {
return err
}
bot.Run()
return nil
}
// Bot is an active bot session.
type Bot struct {
*BotSettings
Discord *discordgo.Session
// Lookup map for name/alias => command
commandMap map[string]*Command
// Lookup map for name => hook
eventHookMap map[string]*EventHook
// Lookup map for name => service
serviceMap map[string]*Service
eventDispatcher *eventDispatcher
// Contains a generated array of accepted prefixes based on BotSettings
commandPrefixes []string
commandInvokedByMention bool
readyState *discordgo.Ready
User *discordgo.User
}
func newBot(settings *BotSettings, ds *discordgo.Session) (*Bot, error) {
// Initialize bot
bot := &Bot{
BotSettings: settings,
Discord: ds,
commandMap: make(map[string]*Command),
eventHookMap: make(map[string]*EventHook),
serviceMap: make(map[string]*Service),
eventDispatcher: newEventDispatcher(),
commandPrefixes: []string{},
commandInvokedByMention: settings.CommandInvokedByMention,
}
// Register commands
for _, cmd := range bot.Commands {
err := bot.RegisterCommand(cmd)
if err != nil {
return nil, err
}
}
// Register event hooks
for _, hook := range bot.EventHooks {
err := bot.RegisterEventHook(hook)
if err != nil {
return nil, err
}
}
// Register services
for _, srv := range bot.Services {
err := bot.RegisterService(srv)
if err != nil {
return nil, err
}
}
// Generate the array of accepted command prefixes.
// Use a channel to generate one for bot mentions, or add it later
if settings.CommandPrefix != "" {
settings.CommandPrefixes = append(settings.CommandPrefixes, settings.CommandPrefix)
}
for _, prefix := range settings.CommandPrefixes {
err := bot.RegisterCommandPrefix(prefix)
if err != nil {
return nil, err
}
}
// Make sure at least one prefix exists, let's use "!" as default if none was given.
if len(bot.CommandPrefixes) == 0 && !settings.CommandInvokedByMention {
err := bot.RegisterCommandPrefix(DefaultCommandPrefix) // See command.go
if err != nil {
return nil, err
}
}
return bot, nil
}
// Get a data value from existing services
func (bot *Bot) GetServiceData(srvName string, key string) string {
if val, ok := bot.serviceMap[srvName]; ok {
if d, s := val.Data[key]; s {
// key exist
return d
}
}
return ""
}
func (bot *Bot) SetServiceData(srvName string, key string, val string) string {
if v, ok := bot.serviceMap[srvName]; ok {
if _, s := v.Data[key]; s {
bot.serviceMap[srvName].Data[key] = val
return val
}
}
return ""
}
func (bot *Bot) Run() error {
// Add handler to wait for ready state in order to initialize the bot fully.
bot.Discord.AddHandler(bot.onReady)
// Open the websocket and begin listening.
fmt.Print("Opening WS connection to Discord .. ")
err := bot.Discord.Open()
if err != nil {
return fmt.Errorf("error: %s", err)
}
fmt.Println("OK")
fmt.Println("Bot is now running. Press CTRL-C to exit.")
termSignal = make(chan os.Signal, 1)
signal.Notify(termSignal, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-termSignal
fmt.Println("\nShutting down bot..")
// Cleanly close down the Discord session.
fmt.Print("\tClosing WS discord connection .. ")
err = bot.Discord.Close()
if err != nil {
fmt.Println("ERROR")
return err
}
fmt.Println("OK")
return nil
}
func (bot *Bot) RegisterCommand(cmd *Command) error {
name := cmd.Name
if ex, exists := bot.commandMap[name]; exists {
return &DuplicateCommandError{Existing: ex, New: cmd, Name: name}
}
bot.commandMap[name] = cmd
// TODO: Register aliases
return nil
}
func (bot *Bot) RegisterEventHook(hook *EventHook) error {
name := hook.Name
if ex, exists := bot.eventHookMap[name]; exists {
return &DuplicateEventHookError{Existing: ex, New: hook}
}
bot.eventHookMap[name] = hook
if len(hook.Events) == 0 {
logrus.Warnf("Hook '%s' is not subscribed to any events", name)
}
bot.eventDispatcher.AddHook(hook)
return nil
}
func (bot *Bot) RegisterService(srv *Service) error {
name := srv.Name
if ex, exists := bot.serviceMap[name]; exists {
return &DuplicateServiceError{Existing: ex, New: srv, Name: name}
}
bot.serviceMap[name] = srv
return nil
}
func (bot *Bot) RegisterCommandPrefix(prefix string) error {
// The prefix must have a length of minimum 1
if len(prefix) < 1 {
return &TooShortCommandPrefixError{Prefix: prefix}
}
// Dont add one that already exists
exists := false
for _, existingPrefix := range bot.commandPrefixes {
if existingPrefix == prefix {
exists = true
break
}
}
if !exists {
// Add the new prefix entry.
bot.commandPrefixes = append(bot.commandPrefixes, prefix)
} else {
// Was not able to add the prefix because it already exists.
return &DuplicateCommandPrefixError{Prefix: prefix}
}
return nil
}
func (bot *Bot) onReady(ds *discordgo.Session, r *discordgo.Ready) {
// Set bot state
bot.readyState = r
bot.User = r.User
logrus.WithFields(logrus.Fields{
"ID": r.User.ID,
"Username": r.User.Username,
}).Infof("Bot is connected and running.")
// Add a command prefix based on the Bot ID if commandInvokedByMention is set to true
if bot.commandInvokedByMention {
bot.RegisterCommandPrefix("<@" + bot.User.ID + ">")
//TODO[BLOCKER]: What if this fails?
}
// Create context for services
ctx := NewContext(bot, ds, termSignal)
// Run services
for _, srv := range bot.serviceMap {
if srv.Deactivated {
continue
}
// run service
go srv.Action(ctx)
}
// Add generic handler for event hooks
// Add command handler
bot.Discord.AddHandler(func(ds *discordgo.Session, event interface{}) {
bot.onEvent(ds, event)
})
}
func (bot *Bot) onEvent(ds *discordgo.Session, dv interface{}) {
// Inspect and wrap event
ev, err := events.NewDiscordEvent(dv)
if err != nil {
logrus.Errorf("event handler: %s", err)
}
// Create context for handlers
ctx := NewContext(bot, ds, termSignal)
// Invoke event hooks for the hooks that are subscribed to the event type
bot.eventDispatcher.Dispatch(ctx, ev)
// Invoke command handler on new messages
if ev.Type == events.MessageCreateEvent {
handleMessageCreate(ctx, ev.Event.(*discordgo.MessageCreate))
}
}
|
package routes
import (
"context"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
"github.com/DVI-GI-2017/Jira__backend/params"
)
func NewRouter(rootPath string) (*router, error) {
r := &router{}
r.routes = make(map[string]map[*regexp.Regexp]Route)
r.routes[http.MethodGet] = make(map[*regexp.Regexp]Route)
r.routes[http.MethodPost] = make(map[*regexp.Regexp]Route)
err := r.SetRootPath(rootPath)
if err != nil {
return r, err
}
return r, nil
}
type router struct {
root *url.URL
routes map[string]map[*regexp.Regexp]Route
}
// Set router root path, other paths will be relative to it
func (r *router) SetRootPath(path string) error {
newRoot, err := url.Parse(path)
if err != nil {
return fmt.Errorf("invalid path format %s: %v", path, err)
}
r.root = newRoot
return nil
}
func (r *router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
relPath, err := relativePath(r.root.Path, req.URL.Path)
if err != nil {
http.NotFound(w, req)
}
r.handleRequest(w, req, relPath)
}
func (r *router) handleRequest(w http.ResponseWriter, req *http.Request, path string) {
if routeMap, ok := r.routes[req.Method]; ok {
for pattern, route := range routeMap {
if pattern.MatchString(path) {
parameters, err := params.NewParams(req, pattern, path)
if err != nil {
fmt.Printf("error while parsing params: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
req = req.WithContext(context.WithValue(req.Context(), "params", parameters))
route.Handler(w, req)
return
}
}
w.WriteHeader(http.StatusMethodNotAllowed)
fmt.Fprintf(w, "Method: %s not allowed on path: %s", req.Method, req.URL.Path)
return
}
w.WriteHeader(http.StatusMethodNotAllowed)
fmt.Fprintf(w, "Method: %s not supported", req.Method)
}
func (r *router) Add(route Route) error {
pattern := route.Pattern
if strings.Contains(pattern, ":") {
pattern = convertSimplePatternToRegexp(pattern)
}
compiledPattern, err := regexp.Compile(pattern)
if err != nil {
return err
}
switch route.Method {
case http.MethodGet:
r.routes[http.MethodGet][compiledPattern] = route
return nil
case http.MethodPost:
r.routes[http.MethodPost][compiledPattern] = route
return nil
}
return fmt.Errorf("Error method '%s' not supported.", route.Method)
}
Add some comments.
package routes
import (
"context"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
"github.com/DVI-GI-2017/Jira__backend/params"
)
func NewRouter(rootPath string) (*router, error) {
r := &router{}
r.routes = make(map[string]map[*regexp.Regexp]Route)
r.routes[http.MethodGet] = make(map[*regexp.Regexp]Route)
r.routes[http.MethodPost] = make(map[*regexp.Regexp]Route)
err := r.SetRootPath(rootPath)
if err != nil {
return r, err
}
return r, nil
}
type router struct {
root *url.URL
routes map[string]map[*regexp.Regexp]Route
}
// Set router root path, other paths will be relative to it
func (r *router) SetRootPath(path string) error {
newRoot, err := url.Parse(path)
if err != nil {
return fmt.Errorf("invalid path format %s: %v", path, err)
}
r.root = newRoot
return nil
}
// Implements http.Handler interface
func (r *router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
relPath, err := relativePath(r.root.Path, req.URL.Path)
if err != nil {
http.NotFound(w, req)
}
r.handleRequest(w, req, relPath)
}
// Handles request: iterate over all routes before finds first matching route.
func (r *router) handleRequest(w http.ResponseWriter, req *http.Request, path string) {
if routeMap, ok := r.routes[req.Method]; ok {
for pattern, route := range routeMap {
if pattern.MatchString(path) {
parameters, err := params.NewParams(req, pattern, path)
if err != nil {
fmt.Printf("error while parsing params: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
req = req.WithContext(context.WithValue(req.Context(), "params", parameters))
route.Handler(w, req)
return
}
}
w.WriteHeader(http.StatusMethodNotAllowed)
fmt.Fprintf(w, "Method: %s not allowed on path: %s", req.Method, req.URL.Path)
return
}
w.WriteHeader(http.StatusMethodNotAllowed)
fmt.Fprintf(w, "Method: %s not supported", req.Method)
}
// Add new route.
func (r *router) Add(route Route) error {
pattern := route.Pattern
if strings.Contains(pattern, ":") {
pattern = convertSimplePatternToRegexp(pattern)
}
compiledPattern, err := regexp.Compile(pattern)
if err != nil {
return err
}
switch route.Method {
case http.MethodGet:
r.routes[http.MethodGet][compiledPattern] = route
return nil
case http.MethodPost:
r.routes[http.MethodPost][compiledPattern] = route
return nil
}
return fmt.Errorf("Error method '%s' not supported.", route.Method)
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/fatih/color"
"gopkg.in/cheggaaa/pb.v1"
)
// attrs store user-defined parameters
type attrs struct {
Region string
Bucket string
Config string
Section string
Concurrency int
}
// Get user-defined parameters from CLI
var (
bucketPtr = flag.String("bucket", "", "Defines bucket. This is a mandatory paramenter!")
regionPtr = flag.String("region", "", "Defines region")
configPtr = flag.String("config", "", "Allow changing AWS account")
sectionPtr = flag.String("section", "default", "Which part of AWS credentials to use")
concurrencyPtr = flag.Int("maxcon", 10, "Set up maximum concurrency for this task. Default is 10")
)
func logger(bucket string, info map[string]error) {
f, err := os.Create(bucket + "-error.log")
if err != nil {
log.Println("Script ended with some errors, but log-file wasn't created due to: ", err)
}
defer f.Close()
logFile, err := os.OpenFile(bucket+"-error.log", os.O_WRONLY, 0666)
if err != nil {
log.Println("Script ended with some errors, but log-file wasn't written due to: ", err)
}
defer logFile.Close()
log.SetOutput(logFile)
for object, warning := range info {
log.Println("WARNING: Some issues occur while processing ", object, warning)
}
endMessg := "Script finished with some errors. Check " + bucket + "-error.log for details"
color.Red(endMessg)
}
func convert(attrs attrs) map[string]error {
warns := map[string]error{}
creds := credentials.NewSharedCredentials(attrs.Config, attrs.Section)
_, err := creds.Get()
if err != nil {
color.Set(color.FgRed)
log.Fatal(err)
color.Unset()
}
// Create new connection to S3
svc := s3.New(session.New(), &aws.Config{
Region: aws.String(attrs.Region),
Credentials: creds,
})
params := &s3.ListObjectsInput{
Bucket: aws.String(attrs.Bucket),
}
resp, _ := svc.ListObjects(params)
fmt.Println(len(resp.Contents), " objects in the bucket.")
// This is used to limit simultaneous goroutines
throttle := make(chan int, attrs.Concurrency)
var wg sync.WaitGroup
// Loop trough the objects in the bucket and create a copy
// of each object with the REDUCED_REDUNDANCY storage class
bar := pb.StartNew(len(resp.Contents))
for _, content := range resp.Contents {
if *content.StorageClass != "REDUCED_REDUNDANCY" {
throttle <- 1
wg.Add(1)
go func() {
defer wg.Done()
copyParams := &s3.CopyObjectInput{
Bucket: aws.String(attrs.Bucket),
CopySource: aws.String(attrs.Bucket + "/" + *content.Key),
Key: aws.String(*content.Key),
StorageClass: aws.String("REDUCED_REDUNDANCY"),
}
_, err := svc.CopyObject(copyParams)
if err != nil {
warns[*content.Key] = err
}
<-throttle
}()
wg.Wait()
}
bar.Increment()
}
bar.FinishPrint("Done!")
// Fill the channel to be sure, that all goroutines finished
for i := 0; i < cap(throttle); i++ {
throttle <- 1
}
return warns
}
func main() {
start := time.Now()
var region, config string
// Parsing arguments
flag.Parse()
if *bucketPtr == "" {
color.Set(color.FgRed)
fmt.Println("You haven't define bucket! Please, do it with -bucket= \n Script usage:")
flag.PrintDefaults()
log.Fatal("Bucket not specified")
color.Unset()
return
}
if *regionPtr == "" {
region = "us-east-1"
fmt.Println("You haven't specified region. Default region will be us-east-1")
} else {
region = *regionPtr
}
attrs := attrs{
Region: region,
Bucket: *bucketPtr,
Config: config,
Section: *sectionPtr,
Concurrency: *concurrencyPtr,
}
warns := convert(attrs)
if len(warns) > 0 {
logger(attrs.Bucket, warns)
}
elapsed := time.Since(start)
log.Printf("Convertion took: %s", elapsed)
}
Convert in any S3 storage class
package main
import (
"flag"
"fmt"
"log"
"os"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/fatih/color"
"gopkg.in/cheggaaa/pb.v1"
)
// attrs store user-defined parameters
type attrs struct {
Region string
Bucket string
Config string
Section string
Type string
Concurrency int
}
// Get user-defined parameters from CLI
var (
bucketPtr = flag.String("bucket", "", "Defines bucket. This is a mandatory paramenter!")
regionPtr = flag.String("region", "", "Defines region")
configPtr = flag.String("config", "", "Allow changing AWS account")
sectionPtr = flag.String("section", "default", "Which part of AWS credentials to use")
typePtr = flag.String("type", "STANDARD", "Define AWS storage class to convert to: STANDARD, REDUCED_REDUNDANCY, GLACIER. Default: STANDARD")
concurrencyPtr = flag.Int("maxcon", 10, "Set up maximum concurrency for this task. Default is 10")
)
func checkClass(storageType string) string {
var class string
if storageType == "STANDARD" || storageType == "standard" {
class = "STANDARD"
} else if storageType == "REDUCED_REDUNDANCY" || storageType == "reduced_redundancy" {
class = "REDUCED_REDUNDANCY"
} else if storageType == "GLACIER" || storageType == "glacier" {
class = "GLACIER"
} else {
log.Fatal("Unknown S3 storage class!")
}
return class
}
func logger(bucket string, info map[string]error) {
f, err := os.Create(bucket + "-error.log")
if err != nil {
log.Println("Script ended with some errors, but log-file wasn't created due to: ", err)
}
defer f.Close()
logFile, err := os.OpenFile(bucket+"-error.log", os.O_WRONLY, 0666)
if err != nil {
log.Println("Script ended with some errors, but log-file wasn't written due to: ", err)
}
defer logFile.Close()
log.SetOutput(logFile)
for object, warning := range info {
log.Println("WARNING: Some issues occur while processing ", object, warning)
}
endMessg := "Script finished with some errors. Check " + bucket + "-error.log for details"
color.Red(endMessg)
}
func convert(attrs attrs) map[string]error {
warns := map[string]error{}
creds := credentials.NewSharedCredentials(attrs.Config, attrs.Section)
_, err := creds.Get()
if err != nil {
color.Set(color.FgRed)
log.Fatal(err)
color.Unset()
}
// Create new connection to S3
svc := s3.New(session.New(), &aws.Config{
Region: aws.String(attrs.Region),
Credentials: creds,
})
params := &s3.ListObjectsInput{
Bucket: aws.String(attrs.Bucket),
}
resp, _ := svc.ListObjects(params)
fmt.Println(len(resp.Contents), " objects in the bucket.")
// This is used to limit simultaneous goroutines
throttle := make(chan int, attrs.Concurrency)
var wg sync.WaitGroup
// Loop trough the objects in the bucket and create a copy
// of each object with the storage class, you've chosen
bar := pb.StartNew(len(resp.Contents))
for _, content := range resp.Contents {
if *content.StorageClass != attrs.Type {
throttle <- 1
wg.Add(1)
go func() {
defer wg.Done()
copyParams := &s3.CopyObjectInput{
Bucket: aws.String(attrs.Bucket),
CopySource: aws.String(attrs.Bucket + "/" + *content.Key),
Key: aws.String(*content.Key),
StorageClass: aws.String(attrs.Type),
}
_, err := svc.CopyObject(copyParams)
if err != nil {
warns[*content.Key] = err
}
<-throttle
}()
wg.Wait()
}
bar.Increment()
}
bar.FinishPrint("Done!")
// Fill the channel to be sure, that all goroutines finished
for i := 0; i < cap(throttle); i++ {
throttle <- 1
}
return warns
}
func main() {
start := time.Now()
var region, config string
// Parsing arguments
flag.Parse()
if *bucketPtr == "" {
color.Set(color.FgRed)
fmt.Println("You haven't define bucket! Please, do it with -bucket= \n Script usage:")
flag.PrintDefaults()
log.Fatal("Bucket not specified")
color.Unset()
return
}
if *regionPtr == "" {
region = "us-east-1"
fmt.Println("You haven't specified region. Default region will be us-east-1")
} else {
region = *regionPtr
}
storageType := checkClass(*typePtr)
attrs := attrs{
Region: region,
Bucket: *bucketPtr,
Config: config,
Section: *sectionPtr,
Type: storageType,
Concurrency: *concurrencyPtr,
}
warns := convert(attrs)
if len(warns) > 0 {
logger(attrs.Bucket, warns)
}
elapsed := time.Since(start)
log.Printf("Convertion took: %s", elapsed)
}
|
package main
/*
www.rtve.es/api/clan/series/spanish/todas (follow redirect)
http://www.rtve.es/api/programas/80170/videos
*/
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"time"
)
var verbose = false
var dirs = map[string]string{
"base": "/nas/3TB/Media/In/rtve/",
"download": "/nas/3TB/Media/In/rtve/d",
"cache": "/nas/3TB/Media/In/rtve/cache",
"log": "/nas/3TB/Media/In/rtve/log",
"publish": "/nas/3TB/Media/Video/Infantil",
}
var keys = map[string]string{
"oceano": "pmku579tg465GDjf1287gDFFED56788C", // Tablet Clan
"carites": "167Sdfg8r4Kuo94hnserw4Zis87wtiVr", // Tablet RTVE
"orfeo": "k0rf30jfpmbn8s0rcl4nTvE0ip3doRan", // Movil Clan
"caliope": "9qfr0ydg6dGJ3cho2p1mo284dgXcVsdi", // Movil RTVE
}
func stripchars(str, chr string) string {
return strings.Map(func(r rune) rune {
if strings.IndexRune(chr, r) < 0 {
return r
}
return -1
}, str)
}
/*
Episode is a representation of each episode
*/
type Episode struct {
ShortTitle string
LongTitle string
ShortDescription string
LongDescription string
Episode int
ID int `json:",string"`
ProgramRef string
ProgramInfo struct {
Title string
}
Private struct {
URL string
EndURL string
Offset int
Size int64
Ext string
Videofile string
}
Qualities []struct {
Type string
Preset string
Filesize int64
Duration int
}
}
/*
Programa is a representation of the list of available episodes of a program
*/
type Programa struct {
Name string
WebOficial string
Description string
LongTitle string
ShortDescription string
LongDescription string
ID int `json:",string"`
episodios []Episode
}
type videosPrograma struct {
Page struct {
TotalPages int
Total int
NumElements int
Number int
Offset int
Size int
Items []Episode
}
}
type Programas struct {
Page struct {
TotalPages int
Items []Programa
}
}
func makeDirs() {
for _, dir := range dirs {
err := os.MkdirAll(dir, 0755)
if err != nil {
log.Fatal(err)
}
}
}
func pkcsS7Padding(data []byte) []byte {
blockSize := 16
padding := blockSize - len(data)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(data, padtext...)
}
func unpkcs7Padding(data []byte) []byte {
length := len(data)
unpadding := int(data[length-1])
return data[:(length - unpadding)]
}
func getTime() int64 {
return time.Now().Add(150*time.Hour).Round(time.Hour).UnixNano() / int64(time.Millisecond)
}
func cryptaes(text, key string) string {
ckey, err := aes.NewCipher([]byte(key))
if nil != err {
log.Fatal(err)
}
str := []byte(text)
var a [16]byte
iv := a[:]
encrypter := cipher.NewCBCEncrypter(ckey, iv)
str = pkcsS7Padding(str)
out := make([]byte, len(str))
encrypter.CryptBlocks(out, str)
base64Out := base64.StdEncoding.EncodeToString(out)
return base64Out
}
func orfeo(id int, t int64) string {
mobilekey := "k0rf30jfpmbn8s0rcl4nTvE0ip3doRan"
secret := fmt.Sprintf("%d_es_%d", id, t)
orfeo := cryptaes(secret, mobilekey)
return "http://www.rtve.es/ztnr/consumer/orfeo/video/" + orfeo
}
func ztnrurl(id int, t int64, clase string) string {
baseurl := fmt.Sprintf("http://www.rtve.es/ztnr/consumer/%s/video", clase)
secret := fmt.Sprintf("%d_es_%d", id, t)
url := fmt.Sprintf("%s/%s", baseurl, cryptaes(secret, keys[clase]))
return url
}
func oceano(id int, t int64) string {
tabletkey := "pmku579tg465GDjf1287gDFFED56788C"
secret := fmt.Sprintf("%d_es_%d", id, t)
oceano := cryptaes(secret, tabletkey)
return "http://www.rtve.es/ztnr/consumer/oceano/video/" + oceano
}
func cacheFile(url string) string {
file := fmt.Sprintf("%x", sha256.Sum256([]byte(url)))
path := path.Join(dirs["cache"], file)
return path
}
func read(url string, v interface{}) error {
cache := cacheFile(url)
fi, err := os.Stat(cache)
if err != nil && !os.IsNotExist(err) {
log.Fatal(err)
}
if os.IsNotExist(err) || time.Now().Unix()-fi.ModTime().Unix() > 3*3600 {
log.Println("Downloading", url, "to cache")
// Cache for 12h
res, err := http.Get(url)
content, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile(cache, content, 0644)
if err != nil {
log.Fatal(err)
}
}
content, err := ioutil.ReadFile(cache)
if err != nil {
log.Fatal(err)
}
// log.Println(string(content[:]))
err = json.Unmarshal(content, v)
if err != nil {
log.Fatal(err)
}
return nil
}
func (p *Programa) getVideos(programid int) {
url := fmt.Sprintf("http://www.rtve.es/api/programas/%d/videos?size=60", programid)
var videos videosPrograma
if videos.Page.TotalPages > 1 {
log.Printf("Warning: More than 1 page of results: %d. NumElements: ", videos.Page.TotalPages, videos.Page.NumElements)
}
err := read(url, &videos)
if err != nil {
log.Fatal(err)
}
p.episodios = videos.Page.Items
log.Println("Tenemos episodios de", videos.Page.Items[0].ProgramInfo.Title)
}
func (e *Episode) remote(class string) int {
t := time.Now().UTC().Round(time.Second)
ts := t.UnixNano() / int64(time.Millisecond)
var videourl string
videourl = ztnrurl(e.ID, ts, class)
res, err := http.Head(videourl)
if err != nil {
log.Fatal(err)
}
if res.StatusCode == 200 {
e.Private.Ext = path.Ext(res.Request.URL.Path)
if e.Private.Ext == "" {
e.Private.Ext = ".mp4"
log.Println("WARNING: Empty extension. Forcing mp4.")
}
e.Private.Videofile = fmt.Sprintf("%d%s", e.ID, e.Private.Ext)
e.Private.Size = res.ContentLength
e.Private.EndURL = res.Request.URL.String()
e.Private.URL = videourl
}
return res.StatusCode
}
func (e *Episode) json() string {
b, err := json.MarshalIndent(e, "", " ")
if err != nil {
log.Println("json marshall error:", err)
}
return string(b[:])
}
func (e *Episode) writeData() {
filename := fmt.Sprintf("%d.json", e.ID)
err := ioutil.WriteFile(path.Join(dirs["download"], filename), []byte(e.json()), 0644)
if err != nil {
log.Fatal(err)
}
}
func debug(wat... interface{}) {
if verbose {
fmt.Fprintln(os.Stderr, wat)
}
}
func (e *Episode) stat() bool {
keyorder := []string{"oceano", "carites", "orfeo", "caliope"}
debug("e.stat()", e.ID, e.humanName())
gotcha := false
for _, k := range keyorder {
if e.remote(k) == 200 {
gotcha = true
break
}
}
if !gotcha {
log.Println("No candidates for", e)
}
return gotcha
}
func (e *Episode) download() {
if e.Private.Videofile == "" {
log.Fatal("e.Private.Videofile is empty when trying to download")
}
filename := path.Join(dirs["download"], e.Private.Videofile)
fi, err := os.Stat(filename)
if err != nil && !os.IsNotExist(err) {
log.Fatal(err)
}
sizes := map[int64]bool{}
if !os.IsNotExist(err) {
if e.Qualities != nil {
for _,q := range e.Qualities {
sizes[q.Filesize]=true
}
}
debug("sizes",sizes, len(sizes),"sizes[fi.Size()]=", sizes[fi.Size()],"sizes[fi.Size()+1]=", sizes[fi.Size()+1])
if fi.Size() >= e.Private.Size && sizes[fi.Size()] {
// Our file is bigger and canonical
// fmt.Fprintln(os.Stdout, err, "> Sile", fi.Size(), e.Private.Size)
return
}
if fi.Size() < e.Private.Size {
if sizes[e.Private.Size] {
log.Println("Better version of", e.ID, fi.Size(), "available. Remote size:", e.Private.Size)
} else {
// There's a greater size available but it's not listed. Better mak a backup of the local file.
log.Println("Larger NOT CANONICAL version of", e.ID, fi.Size(), "available. Remote size:", e.Private.Size)
log.Println("Backing up", filename, "to", filename+".bak")
err = os.Rename(filename, filename+".bak")
if err != nil {
log.Println("Error moving", filename, "to", filename+".bak", err)
return
}
}
}
}
output, err := os.Create(filename + ".temp")
if err != nil {
log.Println("Error while creating", filename, "-", err)
return
}
defer output.Close()
log.Printf("Downloading %s (%d MB) from %s (%s)", e.Private.Videofile, e.Private.Size/1024/1024, e.Private.URL, e.Private.EndURL)
response, err := http.Get(e.Private.URL)
if err != nil {
log.Println("Error while downloading", e.Private.URL, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
log.Println("Error while downloading", e.Private.URL, "-", err)
return
}
err = os.Rename(filename+".temp", filename)
if err != nil {
log.Println("Error moving", filename+".temp", "to", filename, err)
return
}
log.Println(filename, "downloaded.", n, "bytes.")
}
func setupLog() *os.File {
t, _ := time.Now().UTC().Truncate(time.Hour).MarshalText()
ts := string(t[:])
filename := fmt.Sprintf("%s.log", ts)
logfile := path.Join(dirs["log"], filename)
f, err := os.OpenFile(logfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("error opening file: %v", err)
}
log.SetFlags(log.LstdFlags)
log.SetOutput(io.MultiWriter(f, os.Stdout))
return f
}
func (e *Episode) fromURL(url string) {
type RemoteEpisode struct {
Page struct {
Items []Episode
}
}
var v RemoteEpisode
read(url, &v)
// log.Println(v)
*e = v.Page.Items[0]
}
func (e *Episode) fromFile(f string) {
content, err := ioutil.ReadFile(f)
if err != nil {
log.Fatal(err)
}
err = json.Unmarshal(content, e)
if err != nil {
log.Fatal(err)
}
}
func (e *Episode) humanName() string {
return fmt.Sprintf("%s %d - %s", e.ProgramInfo.Title, e.Episode, e.LongTitle)
}
func publish() {
dirfiles, err := ioutil.ReadDir(dirs["download"])
if err != nil {
log.Fatalf("error reading dir: %v", err)
}
for _, file := range dirfiles {
if path.Ext(file.Name()) == ".json" {
var e Episode
e.fromFile(path.Join(dirs["download"], file.Name()))
if e.ProgramInfo.Title == "Turno de oficio" {
continue
}
dir := path.Join(dirs["publish"], e.ProgramInfo.Title)
err := os.MkdirAll(dir, 0755)
if err != nil {
log.Fatal(err)
}
videofile := path.Join(dirs["download"], e.Private.Videofile)
filename := fmt.Sprintf("%s%s", e.humanName(), e.Private.Ext)
publishFile := path.Join(dir, filename)
// fmt.Println(e.ID, publishFile)
// Episode debería tener las funciones de comprobar integridad
err = os.Link(videofile, publishFile)
if err != nil {
if !os.IsExist(err) {
log.Printf("Cannot publish: %d to %s", e.ID, publishFile)
}
} else {
log.Printf("Published %s to %s", videofile, publishFile)
}
}
}
}
func indexFiles() {
log.Println("Believe it or not I'm reindexing")
dirfiles, err := ioutil.ReadDir(dirs["download"])
if err != nil {
log.Fatalf("error reading dir: %v", err)
}
for _, file := range dirfiles {
if path.Ext(file.Name()) == ".json" {
var e Episode
e.fromFile(path.Join(dirs["download"], file.Name()))
// fmt.Println(file.Name(), e.ID, e.Private.Size)
// Episode debería tener las funciones de comprobar integridad
}
}
}
func test(id int) {
}
func remoteEpisode(id int) {
var e Episode
e.ID = id
log.Println("Getting remoteEpisode", e.json())
e.fromURL(fmt.Sprintf("http://www.rtve.es/api/videos/%d", id))
log.Println("Stat of remoteEpisode", e.json())
if e.stat() {
log.Println("remoteEpisode", e.json())
e.writeData() // should check if previous steps didn't work
e.download()
}
}
func listPrograms() {
type RemotePrograms struct {
Page struct {
Items []Programa
}
}
var rp RemotePrograms
err := read("http://www.rtve.es/api/clan/series/spanish/todas", &rp)
if err != nil {
log.Fatal(err)
}
for _, v := range rp.Page.Items {
fmt.Printf("%d, // %s\n", v.ID, v.Name)
}
}
func main() {
setupLog()
dotest := 0
doindex := false
dolist := false
doepisode := 0
flag.BoolVar(&verbose, "v", false, "verbose")
flag.BoolVar(&doindex, "i", false, "reindex the whole thing")
flag.BoolVar(&dolist, "l", false, "list programs")
flag.IntVar(&dotest, "t", 0, "test algorithms")
flag.IntVar(&doepisode, "e", 0, "single episode")
flag.Parse()
debug("verbose active")
if dolist {
listPrograms()
return
}
if dotest > 0 {
test(dotest)
return
}
if doindex {
indexFiles()
publish()
return
}
if doepisode > 0 {
remoteEpisode(doepisode)
return
}
makeDirs()
log.Printf("Starting %s (PID: %d) at %s", os.Args[0], os.Getpid, time.Now().UTC())
programids := []int{
80170, // Pokémon XY
44450, // Pokémon Advanced Challenge
41651, // Pokémon Advanced
68590, // Pokémon Negro y Blanco: Aventuras en Teselia
49230, // Pokémon Negro y Blanco
50650, // Desafío Champions Sendokai
49750, // Scooby Doo Misterios S.A.
51350, // Jelly Jamm
78590, // Turno de Oficio
70450, // Planeta Imaginario
}
for _, v := range programids {
var p Programa
p.getVideos(v)
for _, e := range p.episodios {
if e.stat() {
e.writeData() // should check if previous steps didn't work
e.download()
} else {
log.Println("Cannot stat", e)
}
}
}
log.Printf("Finishing %s (PID: %d) at %s", os.Args[0], os.Getpid, time.Now().UTC())
}
Nuevos programas
package main
/*
www.rtve.es/api/clan/series/spanish/todas (follow redirect)
http://www.rtve.es/api/programas/80170/videos
*/
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"time"
)
var verbose = false
var dirs = map[string]string{
"base": "/nas/3TB/Media/In/rtve/",
"download": "/nas/3TB/Media/In/rtve/d",
"cache": "/nas/3TB/Media/In/rtve/cache",
"log": "/nas/3TB/Media/In/rtve/log",
"publish": "/nas/3TB/Media/Video/Infantil",
}
var keys = map[string]string{
"oceano": "pmku579tg465GDjf1287gDFFED56788C", // Tablet Clan
"carites": "167Sdfg8r4Kuo94hnserw4Zis87wtiVr", // Tablet RTVE
"orfeo": "k0rf30jfpmbn8s0rcl4nTvE0ip3doRan", // Movil Clan
"caliope": "9qfr0ydg6dGJ3cho2p1mo284dgXcVsdi", // Movil RTVE
}
func stripchars(str, chr string) string {
return strings.Map(func(r rune) rune {
if strings.IndexRune(chr, r) < 0 {
return r
}
return -1
}, str)
}
/*
Episode is a representation of each episode
*/
type Episode struct {
ShortTitle string
LongTitle string
ShortDescription string
LongDescription string
Episode int
ID int `json:",string"`
ProgramRef string
ProgramInfo struct {
Title string
}
Private struct {
URL string
EndURL string
Offset int
Size int64
Ext string
Videofile string
}
Qualities []struct {
Type string
Preset string
Filesize int64
Duration int
}
}
/*
Programa is a representation of the list of available episodes of a program
*/
type Programa struct {
Name string
WebOficial string
Description string
LongTitle string
ShortDescription string
LongDescription string
ID int `json:",string"`
episodios []Episode
}
type videosPrograma struct {
Page struct {
TotalPages int
Total int
NumElements int
Number int
Offset int
Size int
Items []Episode
}
}
type Programas struct {
Page struct {
TotalPages int
Items []Programa
}
}
func makeDirs() {
for _, dir := range dirs {
err := os.MkdirAll(dir, 0755)
if err != nil {
log.Fatal(err)
}
}
}
func pkcsS7Padding(data []byte) []byte {
blockSize := 16
padding := blockSize - len(data)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(data, padtext...)
}
func unpkcs7Padding(data []byte) []byte {
length := len(data)
unpadding := int(data[length-1])
return data[:(length - unpadding)]
}
func getTime() int64 {
return time.Now().Add(150*time.Hour).Round(time.Hour).UnixNano() / int64(time.Millisecond)
}
func cryptaes(text, key string) string {
ckey, err := aes.NewCipher([]byte(key))
if nil != err {
log.Fatal(err)
}
str := []byte(text)
var a [16]byte
iv := a[:]
encrypter := cipher.NewCBCEncrypter(ckey, iv)
str = pkcsS7Padding(str)
out := make([]byte, len(str))
encrypter.CryptBlocks(out, str)
base64Out := base64.StdEncoding.EncodeToString(out)
return base64Out
}
func orfeo(id int, t int64) string {
mobilekey := "k0rf30jfpmbn8s0rcl4nTvE0ip3doRan"
secret := fmt.Sprintf("%d_es_%d", id, t)
orfeo := cryptaes(secret, mobilekey)
return "http://www.rtve.es/ztnr/consumer/orfeo/video/" + orfeo
}
func ztnrurl(id int, t int64, clase string) string {
baseurl := fmt.Sprintf("http://www.rtve.es/ztnr/consumer/%s/video", clase)
secret := fmt.Sprintf("%d_es_%d", id, t)
url := fmt.Sprintf("%s/%s", baseurl, cryptaes(secret, keys[clase]))
return url
}
func oceano(id int, t int64) string {
tabletkey := "pmku579tg465GDjf1287gDFFED56788C"
secret := fmt.Sprintf("%d_es_%d", id, t)
oceano := cryptaes(secret, tabletkey)
return "http://www.rtve.es/ztnr/consumer/oceano/video/" + oceano
}
func cacheFile(url string) string {
file := fmt.Sprintf("%x", sha256.Sum256([]byte(url)))
path := path.Join(dirs["cache"], file)
return path
}
func read(url string, v interface{}) error {
cache := cacheFile(url)
fi, err := os.Stat(cache)
if err != nil && !os.IsNotExist(err) {
log.Fatal(err)
}
if os.IsNotExist(err) || time.Now().Unix()-fi.ModTime().Unix() > 3*3600 {
log.Println("Downloading", url, "to cache")
// Cache for 12h
res, err := http.Get(url)
content, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile(cache, content, 0644)
if err != nil {
log.Fatal(err)
}
}
content, err := ioutil.ReadFile(cache)
if err != nil {
log.Fatal(err)
}
// log.Println(string(content[:]))
err = json.Unmarshal(content, v)
if err != nil {
log.Fatal(err)
}
return nil
}
func (p *Programa) getVideos(programid int) {
url := fmt.Sprintf("http://www.rtve.es/api/programas/%d/videos?size=60", programid)
var videos videosPrograma
if videos.Page.TotalPages > 1 {
log.Printf("Warning: More than 1 page of results: %d. NumElements: ", videos.Page.TotalPages, videos.Page.NumElements)
}
err := read(url, &videos)
if err != nil {
log.Fatal(err)
}
p.episodios = videos.Page.Items
log.Println("Tenemos episodios de", videos.Page.Items[0].ProgramInfo.Title)
}
func (e *Episode) remote(class string) int {
t := time.Now().UTC().Round(time.Second)
ts := t.UnixNano() / int64(time.Millisecond)
var videourl string
videourl = ztnrurl(e.ID, ts, class)
res, err := http.Head(videourl)
if err != nil {
log.Fatal(err)
}
if res.StatusCode == 200 {
e.Private.Ext = path.Ext(res.Request.URL.Path)
if e.Private.Ext == "" {
e.Private.Ext = ".mp4"
log.Println("WARNING: Empty extension. Forcing mp4.")
}
e.Private.Videofile = fmt.Sprintf("%d%s", e.ID, e.Private.Ext)
e.Private.Size = res.ContentLength
e.Private.EndURL = res.Request.URL.String()
e.Private.URL = videourl
}
return res.StatusCode
}
func (e *Episode) json() string {
b, err := json.MarshalIndent(e, "", " ")
if err != nil {
log.Println("json marshall error:", err)
}
return string(b[:])
}
func (e *Episode) writeData() {
filename := fmt.Sprintf("%d.json", e.ID)
err := ioutil.WriteFile(path.Join(dirs["download"], filename), []byte(e.json()), 0644)
if err != nil {
log.Fatal(err)
}
}
func debug(wat... interface{}) {
if verbose {
fmt.Fprintln(os.Stderr, wat)
}
}
func (e *Episode) stat() bool {
keyorder := []string{"oceano", "carites", "orfeo", "caliope"}
debug("e.stat()", e.ID, e.humanName())
gotcha := false
for _, k := range keyorder {
if e.remote(k) == 200 {
gotcha = true
break
}
}
if !gotcha {
log.Println("No candidates for", e)
}
return gotcha
}
func (e *Episode) download() {
if e.Private.Videofile == "" {
log.Fatal("e.Private.Videofile is empty when trying to download")
}
filename := path.Join(dirs["download"], e.Private.Videofile)
fi, err := os.Stat(filename)
if err != nil && !os.IsNotExist(err) {
log.Fatal(err)
}
sizes := map[int64]bool{}
if !os.IsNotExist(err) {
if e.Qualities != nil {
for _,q := range e.Qualities {
sizes[q.Filesize]=true
}
}
debug("sizes",sizes, len(sizes),"sizes[fi.Size()]=", sizes[fi.Size()],"sizes[fi.Size()+1]=", sizes[fi.Size()+1])
if fi.Size() >= e.Private.Size && sizes[fi.Size()] {
// Our file is bigger and canonical
// fmt.Fprintln(os.Stdout, err, "> Sile", fi.Size(), e.Private.Size)
return
}
if fi.Size() < e.Private.Size {
if sizes[e.Private.Size] {
log.Println("Better version of", e.ID, fi.Size(), "available. Remote size:", e.Private.Size)
} else {
// There's a greater size available but it's not listed. Better mak a backup of the local file.
log.Println("Larger NOT CANONICAL version of", e.ID, fi.Size(), "available. Remote size:", e.Private.Size)
log.Println("Backing up", filename, "to", filename+".bak")
err = os.Rename(filename, filename+".bak")
if err != nil {
log.Println("Error moving", filename, "to", filename+".bak", err)
return
}
}
}
}
output, err := os.Create(filename + ".temp")
if err != nil {
log.Println("Error while creating", filename, "-", err)
return
}
defer output.Close()
log.Printf("Downloading %s (%d MB) from %s (%s)", e.Private.Videofile, e.Private.Size/1024/1024, e.Private.URL, e.Private.EndURL)
response, err := http.Get(e.Private.URL)
if err != nil {
log.Println("Error while downloading", e.Private.URL, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
log.Println("Error while downloading", e.Private.URL, "-", err)
return
}
err = os.Rename(filename+".temp", filename)
if err != nil {
log.Println("Error moving", filename+".temp", "to", filename, err)
return
}
log.Println(filename, "downloaded.", n, "bytes.")
}
func setupLog() *os.File {
t, _ := time.Now().UTC().Truncate(time.Hour).MarshalText()
ts := string(t[:])
filename := fmt.Sprintf("%s.log", ts)
logfile := path.Join(dirs["log"], filename)
f, err := os.OpenFile(logfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("error opening file: %v", err)
}
log.SetFlags(log.LstdFlags)
log.SetOutput(io.MultiWriter(f, os.Stdout))
return f
}
func (e *Episode) fromURL(url string) {
type RemoteEpisode struct {
Page struct {
Items []Episode
}
}
var v RemoteEpisode
read(url, &v)
// log.Println(v)
*e = v.Page.Items[0]
}
func (e *Episode) fromFile(f string) {
content, err := ioutil.ReadFile(f)
if err != nil {
log.Fatal(err)
}
err = json.Unmarshal(content, e)
if err != nil {
log.Fatal(err)
}
}
func (e *Episode) humanName() string {
return fmt.Sprintf("%s %d - %s", e.ProgramInfo.Title, e.Episode, e.LongTitle)
}
func publish() {
dirfiles, err := ioutil.ReadDir(dirs["download"])
if err != nil {
log.Fatalf("error reading dir: %v", err)
}
for _, file := range dirfiles {
if path.Ext(file.Name()) == ".json" {
var e Episode
e.fromFile(path.Join(dirs["download"], file.Name()))
if e.ProgramInfo.Title == "Turno de oficio" {
continue
}
dir := path.Join(dirs["publish"], e.ProgramInfo.Title)
err := os.MkdirAll(dir, 0755)
if err != nil {
log.Fatal(err)
}
videofile := path.Join(dirs["download"], e.Private.Videofile)
filename := fmt.Sprintf("%s%s", e.humanName(), e.Private.Ext)
publishFile := path.Join(dir, filename)
// fmt.Println(e.ID, publishFile)
// Episode debería tener las funciones de comprobar integridad
err = os.Link(videofile, publishFile)
if err != nil {
if !os.IsExist(err) {
log.Printf("Cannot publish: %d to %s", e.ID, publishFile)
}
} else {
log.Printf("Published %s to %s", videofile, publishFile)
}
}
}
}
func indexFiles() {
log.Println("Believe it or not I'm reindexing")
dirfiles, err := ioutil.ReadDir(dirs["download"])
if err != nil {
log.Fatalf("error reading dir: %v", err)
}
for _, file := range dirfiles {
if path.Ext(file.Name()) == ".json" {
var e Episode
e.fromFile(path.Join(dirs["download"], file.Name()))
// fmt.Println(file.Name(), e.ID, e.Private.Size)
// Episode debería tener las funciones de comprobar integridad
}
}
}
func test(id int) {
}
func remoteEpisode(id int) {
var e Episode
e.ID = id
log.Println("Getting remoteEpisode", e.json())
e.fromURL(fmt.Sprintf("http://www.rtve.es/api/videos/%d", id))
log.Println("Stat of remoteEpisode", e.json())
if e.stat() {
log.Println("remoteEpisode", e.json())
e.writeData() // should check if previous steps didn't work
e.download()
}
}
func listPrograms() {
type RemotePrograms struct {
Page struct {
Items []Programa
}
}
var rp RemotePrograms
err := read("http://www.rtve.es/api/clan/series/spanish/todas", &rp)
if err != nil {
log.Fatal(err)
}
for _, v := range rp.Page.Items {
fmt.Printf("%d, // %s\n", v.ID, v.Name)
}
}
func main() {
setupLog()
dotest := 0
doindex := false
dolist := false
doepisode := 0
flag.BoolVar(&verbose, "v", false, "verbose")
flag.BoolVar(&doindex, "i", false, "reindex the whole thing")
flag.BoolVar(&dolist, "l", false, "list programs")
flag.IntVar(&dotest, "t", 0, "test algorithms")
flag.IntVar(&doepisode, "e", 0, "single episode")
flag.Parse()
debug("verbose active")
if dolist {
listPrograms()
return
}
if dotest > 0 {
test(dotest)
return
}
if doindex {
indexFiles()
publish()
return
}
if doepisode > 0 {
remoteEpisode(doepisode)
return
}
makeDirs()
log.Printf("Starting %s (PID: %d) at %s", os.Args[0], os.Getpid, time.Now().UTC())
programids := []int{
80170, // Pokémon XY
44450, // Pokémon Advanced Challenge
41651, // Pokémon Advanced
68590, // Pokémon Negro y Blanco: Aventuras en Teselia
49230, // Pokémon Negro y Blanco
50650, // Desafío Champions Sendokai
49750, // Scooby Doo Misterios S.A.
51350, // Jelly Jamm
78590, // Turno de Oficio
70450, // Planeta Imaginario
57030, // Ruy, el pequeño Cid
57050, // DArtacan y los tres mosqueperros
57051, // La vuelta al mundo de Willy Fog
57052, // David el gnomo
82170, // Mortadelo y filemon
61750, // Maya
}
for _, v := range programids {
var p Programa
p.getVideos(v)
for _, e := range p.episodios {
if e.stat() {
e.writeData() // should check if previous steps didn't work
e.download()
} else {
log.Println("Cannot stat", e)
}
}
}
log.Printf("Finishing %s (PID: %d) at %s", os.Args[0], os.Getpid, time.Now().UTC())
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.