CombinedText
stringlengths
4
3.42M
package main import ( // stdlib packages "encoding/json" "flag" "fmt" "log" "os" "runtime" "strconv" "sync" // custom packages "config" "modules/amqpmodule" es "modules/elasticsearchmodule" ) const rxQueue = "conn_scan_results_queue" const rxRoutKey = "conn_scan_results" const esIndex = "observer" const esType = "connection" var broker *amqpmodule.Broker //the 2 following structs represent the cipherscan output. type ScanInfo struct { Target string `json:"target"` Timestamp string `json:"utctimestamp"` ServerSide string `json:"serverside"` CurvesFallback string `json:"curves_fallback"` CipherSuites []Ciphersuite `json:"ciphersuite"` } type Ciphersuite struct { Cipher string `json:"cipher"` Protocols []string `json:"protocols"` PubKey []string `json:"pubkey"` SigAlg []string `json:"sigalg"` Trusted string `json:"trusted"` TicketHint string `json:"ticket_hint"` OCSPStapling string `json:"ocsp_stapling"` PFS string `json:"pfs"` Curves []string `json:"curves,omitempty"` } //the following structs represent the output we want to provide to DB. type ConnectionInfo struct { ConnectionTimestamp string `json:"connectionTimestamp"` ServerSide bool `json:"serverside"` CipherSuites map[string]ConnectionCiphersuite `json:"ciphersuite"` CurvesFallback bool `json:"curvesFallback"` } type ConnectionCiphersuite struct { Cipher string `json:"cipher"` Protocols []string `json:"protocols"` PubKey float64 `json:"pubkey"` SigAlg string `json:"sigalg"` TicketHint string `json:"ticket_hint"` OCSPStapling bool `json:"ocsp_stapling"` PFS string `json:"pfs"` Curves []string `json:"curves"` } func failOnError(err error, msg string) { if err != nil { log.Fatalf("%s: %s", msg, err) panic(fmt.Sprintf("%s: %s", msg, err)) } } func panicIf(err error) bool { if err != nil { log.Println(fmt.Sprintf("%s", err)) return true } return false } func stringtoBool(s string) bool { if s == "True" { return true } else { return false } } func (s ScanInfo) toConnInfo() (ConnectionInfo, error) { c := ConnectionInfo{} var err error c.ConnectionTimestamp = s.Timestamp c.ServerSide = stringtoBool(s.ServerSide) c.CurvesFallback = stringtoBool(s.CurvesFallback) c.CipherSuites = make(map[string]ConnectionCiphersuite) pos := 1 for _, cipher := range s.CipherSuites { newcipher := ConnectionCiphersuite{} newcipher.Cipher = cipher.Cipher newcipher.OCSPStapling = stringtoBool(cipher.OCSPStapling) newcipher.PFS = cipher.PFS newcipher.Protocols = cipher.Protocols if len(cipher.PubKey) > 1 { log.Println("Multiple PubKeys for ", s.Target, " at cipher :", cipher.Cipher) } if len(cipher.PubKey) > 0 { newcipher.PubKey, err = strconv.ParseFloat(cipher.PubKey[0], 64) } else { return c, fmt.Errorf("No Public Keys found") } if len(cipher.SigAlg) > 1 { log.Println("Multiple SigAlgs for ", s.Target, " at cipher :", cipher.Cipher) } if len(cipher.SigAlg) > 0 { newcipher.SigAlg = cipher.SigAlg[0] } else { return c, fmt.Errorf("No Signature Algorithms found") } newcipher.TicketHint = cipher.TicketHint if err != nil { return c, err } newcipher.Curves = append(newcipher.Curves, cipher.Curves...) c.CipherSuites[strconv.Itoa(pos)] = newcipher pos++ } return c, nil } //worker is the main body of the goroutine that handles each received message. func worker(msgs <-chan []byte) { forever := make(chan bool) defer wg.Done() for d := range msgs { info := ScanInfo{} err := json.Unmarshal(d, &info) panicIf(err) if err != nil { continue } c, err := info.toConnInfo() panicIf(err) if err != nil { continue } id := info.Target jsonConn, err := json.Marshal(c) panicIf(err) if err != nil { continue } err = es.Push(esIndex, esType, id, jsonConn) panicIf(err) } <-forever } func printIntro() { fmt.Println(` ################################## # TLSAnalyzer # ################################## `) } var wg sync.WaitGroup func main() { var ( err error ) printIntro() conf := config.AnalyzerConfig{} var cfgFile string flag.StringVar(&cfgFile, "c", "/etc/observer/analyzer.cfg", "Input file csv format") flag.Parse() _, err = os.Stat(cfgFile) failOnError(err, "Missing configuration file from '-c' or /etc/observer/retriever.cfg") conf, err = config.AnalyzerConfigLoad(cfgFile) if err != nil { conf = config.GetAnalyzerDefaults() } cores := runtime.NumCPU() runtime.GOMAXPROCS(cores * conf.General.GoRoutines) err = es.RegisterConnection(conf.General.ElasticSearch) failOnError(err, "Failed to register ElasticSearch") broker, err = amqpmodule.RegisterURL(conf.General.RabbitMQRelay) failOnError(err, "Failed to register RabbitMQ") msgs, err := broker.Consume(rxQueue, rxRoutKey) if err != nil { failOnError(err, "Failed to Consume from receiving queue") } for i := 0; i < cores; i++ { wg.Add(1) go worker(msgs) } wg.Wait() } moved connection structs to separate package, implemented target connection versioning... package main import ( // stdlib packages "encoding/json" "flag" "fmt" "log" "os" "runtime" "sync" // custom packages "config" "connection" "modules/amqpmodule" es "modules/elasticsearchmodule" ) const rxQueue = "conn_scan_results_queue" const rxRoutKey = "conn_scan_results" const esIndex = "observer" const esType = "connection" var broker *amqpmodule.Broker //the 2 following structs represent the cipherscan output. func failOnError(err error, msg string) { if err != nil { log.Fatalf("%s: %s", msg, err) panic(fmt.Sprintf("%s: %s", msg, err)) } } func panicIf(err error) bool { if err != nil { log.Println(fmt.Sprintf("%s", err)) return true } return false } // retrieves stored connections ( if any ) for the given scan target func getConnsforTarget(t string) (map[string]connection.Stored, error) { res, err := es.SearchbyTerm(esIndex, esType, "scanTarget.raw", t) if err != nil { return nil, err } storedConns := make(map[string]connection.Stored) if res.Total > 0 && err != nil { for i := 0; i < res.Total; i++ { s := connection.Stored{} err = json.Unmarshal(*res.Hits[i].Source, &s) if err != nil { panicIf(err) continue } storedConns[res.Hits[i].Id] = s } if len(storedConns) > 0 { return storedConns, nil } } return storedConns, nil } //worker is the main body of the goroutine that handles each received message. func worker(msgs <-chan []byte) { forever := make(chan bool) defer wg.Done() for d := range msgs { info := connection.CipherscanOutput{} err := json.Unmarshal(d, &info) panicIf(err) if err != nil { continue } c, err := info.Stored() panicIf(err) if err != nil { continue } stored, err := getConnsforTarget(c.ScanTarget) if err != nil { panicIf(err) continue } err = updateAndPushConnections(c, stored) panicIf(err) //Should we requeue the connection in case of error? } <-forever } func updateAndPushConnections(newconn connection.Stored, conns map[string]connection.Stored) error { err := error(nil) if len(conns) > 0 { for id, conn := range conns { if conn.ObsoletedBy == "" { if newconn.Equal(conn) { log.Println("Updating doc for ", conn.ScanTarget) conn.LastSeenTimestamp = newconn.LastSeenTimestamp jsonConn, err := json.Marshal(conn) if err == nil { _, err = es.Push(esIndex, esType, "", jsonConn) } break } else { log.Println("Pushing new doc for ", conn.ScanTarget) jsonConn, err := json.Marshal(newconn) obsID := "" if err != nil { break } obsID, err = es.Push(esIndex, esType, "", jsonConn) if err != nil { break } conn.ObsoletedBy = obsID jsonConn, err = json.Marshal(conn) obsID, err = es.Push(esIndex, esType, id, jsonConn) } } } } else { log.Println("No older doc found for ", newconn.ScanTarget) jsonConn, err := json.Marshal(newconn) if err == nil { _, err = es.Push(esIndex, esType, "", jsonConn) } } return err } func printIntro() { fmt.Println(` ################################## # TLSAnalyzer # ################################## `) } var wg sync.WaitGroup func main() { var ( err error ) printIntro() conf := config.AnalyzerConfig{} var cfgFile string flag.StringVar(&cfgFile, "c", "/etc/observer/analyzer.cfg", "Input file csv format") flag.Parse() _, err = os.Stat(cfgFile) failOnError(err, "Missing configuration file from '-c' or /etc/observer/retriever.cfg") conf, err = config.AnalyzerConfigLoad(cfgFile) if err != nil { conf = config.GetAnalyzerDefaults() } cores := runtime.NumCPU() runtime.GOMAXPROCS(cores * conf.General.GoRoutines) err = es.RegisterConnection(conf.General.ElasticSearch) failOnError(err, "Failed to register ElasticSearch") broker, err = amqpmodule.RegisterURL(conf.General.RabbitMQRelay) failOnError(err, "Failed to register RabbitMQ") msgs, err := broker.Consume(rxQueue, rxRoutKey) if err != nil { failOnError(err, "Failed to Consume from receiving queue") } for i := 0; i < cores; i++ { wg.Add(1) go worker(msgs) } wg.Wait() }
/* Copyright IBM Corp. 2017 All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package cluster_test import ( "context" "io" "sync" "sync/atomic" "testing" "time" "github.com/hyperledger/fabric-protos-go/common" "github.com/hyperledger/fabric-protos-go/orderer" "github.com/hyperledger/fabric/common/flogging" "github.com/hyperledger/fabric/common/metrics/disabled" "github.com/hyperledger/fabric/orderer/common/cluster" "github.com/hyperledger/fabric/orderer/common/cluster/mocks" "github.com/pkg/errors" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc" ) func noopReport(_ error) { } func TestSendSubmitWithReport(t *testing.T) { t.Parallel() node1 := newTestNode(t) node2 := newTestNode(t) var receptionWaitGroup sync.WaitGroup receptionWaitGroup.Add(1) node2.handler.On("OnSubmit", testChannel, mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { receptionWaitGroup.Done() }) defer node1.stop() defer node2.stop() config := []cluster.RemoteNode{node1.nodeInfo, node2.nodeInfo} node1.c.Configure(testChannel, config) node2.c.Configure(testChannel, config) node1RPC := &cluster.RPC{ Logger: flogging.MustGetLogger("test"), Timeout: time.Hour, StreamsByType: cluster.NewStreamsByType(), Channel: testChannel, Comm: node1.c, } // Wait for connections to be established time.Sleep(time.Second * 5) err := node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("1")}}, noopReport) require.NoError(t, err) receptionWaitGroup.Wait() // Wait for message to be received // Restart the node node2.stop() node2.resurrect() var wg2 sync.WaitGroup wg2.Add(1) reportSubmitFailed := func(err error) { require.EqualError(t, err, io.EOF.Error()) defer wg2.Done() } err = node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("2")}}, reportSubmitFailed) require.NoError(t, err) wg2.Wait() // Ensure stale stream is cleaned up and removed from the mapping require.Len(t, node1RPC.StreamsByType[cluster.SubmitOperation], 0) // Wait for connection to be re-established time.Sleep(time.Second * 5) // Send again, this time it should be received receptionWaitGroup.Add(1) err = node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("3")}}, noopReport) require.NoError(t, err) receptionWaitGroup.Wait() } func TestRPCChangeDestination(t *testing.T) { // We send a Submit() to 2 different nodes - 1 and 2. // The first invocation of Submit() establishes a stream with node 1 // and the second establishes a stream with node 2. // We define a mock behavior for only a single invocation of Send() on each // of the streams (to node 1 and to node 2), therefore we test that invocation // of rpc.SendSubmit to node 2 doesn't send the message to node 1. comm := &mocks.Communicator{} client1 := &mocks.ClusterClient{} client2 := &mocks.ClusterClient{} metrics := cluster.NewMetrics(&disabled.Provider{}) comm.On("Remote", "mychannel", uint64(1)).Return(&cluster.RemoteContext{ SendBuffSize: 10, Metrics: metrics, Logger: flogging.MustGetLogger("test"), Client: client1, ProbeConn: func(_ *grpc.ClientConn) error { return nil }, }, nil) comm.On("Remote", "mychannel", uint64(2)).Return(&cluster.RemoteContext{ SendBuffSize: 10, Metrics: metrics, Logger: flogging.MustGetLogger("test"), Client: client2, ProbeConn: func(_ *grpc.ClientConn) error { return nil }, }, nil) streamToNode1 := &mocks.StepClient{} streamToNode2 := &mocks.StepClient{} streamToNode1.On("Context", mock.Anything).Return(context.Background()) streamToNode2.On("Context", mock.Anything).Return(context.Background()) client1.On("Step", mock.Anything).Return(streamToNode1, nil).Once() client2.On("Step", mock.Anything).Return(streamToNode2, nil).Once() rpc := &cluster.RPC{ Logger: flogging.MustGetLogger("test"), Timeout: time.Hour, StreamsByType: cluster.NewStreamsByType(), Channel: "mychannel", Comm: comm, } var sent sync.WaitGroup sent.Add(2) signalSent := func(_ mock.Arguments) { sent.Done() } streamToNode1.On("Send", mock.Anything).Return(nil).Run(signalSent).Once() streamToNode2.On("Send", mock.Anything).Return(nil).Run(signalSent).Once() streamToNode1.On("Recv").Return(nil, io.EOF) streamToNode2.On("Recv").Return(nil, io.EOF) rpc.SendSubmit(1, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) rpc.SendSubmit(2, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) sent.Wait() streamToNode1.AssertNumberOfCalls(t, "Send", 1) streamToNode2.AssertNumberOfCalls(t, "Send", 1) } func TestSend(t *testing.T) { submitRequest := &orderer.SubmitRequest{Channel: "mychannel"} submitResponse := &orderer.StepResponse{ Payload: &orderer.StepResponse_SubmitRes{ SubmitRes: &orderer.SubmitResponse{Status: common.Status_SUCCESS}, }, } consensusRequest := &orderer.ConsensusRequest{ Channel: "mychannel", } submitReq := wrapSubmitReq(submitRequest) consensusReq := &orderer.StepRequest{ Payload: &orderer.StepRequest_ConsensusRequest{ ConsensusRequest: consensusRequest, }, } submit := func(rpc *cluster.RPC) error { err := rpc.SendSubmit(1, submitRequest, noopReport) return err } step := func(rpc *cluster.RPC) error { return rpc.SendConsensus(1, consensusRequest) } type testCase struct { name string method func(rpc *cluster.RPC) error sendReturns error sendCalledWith *orderer.StepRequest receiveReturns []interface{} stepReturns []interface{} remoteError error expectedErr string } l := &sync.Mutex{} var tst testCase sent := make(chan struct{}) var sendCalls uint32 stream := &mocks.StepClient{} stream.On("Context", mock.Anything).Return(context.Background()) stream.On("Send", mock.Anything).Return(func(*orderer.StepRequest) error { l.Lock() defer l.Unlock() atomic.AddUint32(&sendCalls, 1) sent <- struct{}{} return tst.sendReturns }) for _, tst := range []testCase{ { name: "Send and Receive submit succeed", method: submit, sendReturns: nil, stepReturns: []interface{}{stream, nil}, receiveReturns: []interface{}{submitResponse, nil}, sendCalledWith: submitReq, }, { name: "Send step succeed", method: step, sendReturns: nil, stepReturns: []interface{}{stream, nil}, sendCalledWith: consensusReq, }, { name: "Send submit fails", method: submit, sendReturns: errors.New("oops"), stepReturns: []interface{}{stream, nil}, sendCalledWith: submitReq, expectedErr: "stream is aborted", }, { name: "Send step fails", method: step, sendReturns: errors.New("oops"), stepReturns: []interface{}{stream, nil}, sendCalledWith: consensusReq, expectedErr: "stream is aborted", }, { name: "Remote() fails", method: submit, remoteError: errors.New("timed out"), stepReturns: []interface{}{stream, nil}, expectedErr: "timed out", }, { name: "Submit fails with Send", method: submit, stepReturns: []interface{}{nil, errors.New("deadline exceeded")}, expectedErr: "deadline exceeded", }, } { l.Lock() testCase := tst l.Unlock() t.Run(testCase.name, func(t *testing.T) { atomic.StoreUint32(&sendCalls, 0) isSend := testCase.receiveReturns == nil comm := &mocks.Communicator{} client := &mocks.ClusterClient{} client.On("Step", mock.Anything).Return(testCase.stepReturns...) rm := &cluster.RemoteContext{ Metrics: cluster.NewMetrics(&disabled.Provider{}), SendBuffSize: 1, Logger: flogging.MustGetLogger("test"), ProbeConn: func(_ *grpc.ClientConn) error { return nil }, Client: client, } defer rm.Abort() comm.On("Remote", "mychannel", uint64(1)).Return(rm, testCase.remoteError) rpc := &cluster.RPC{ Logger: flogging.MustGetLogger("test"), Timeout: time.Hour, StreamsByType: cluster.NewStreamsByType(), Channel: "mychannel", Comm: comm, } err := testCase.method(rpc) if testCase.remoteError == nil && testCase.stepReturns[1] == nil { <-sent } if testCase.stepReturns[1] == nil && testCase.remoteError == nil { require.NoError(t, err) } else { require.EqualError(t, err, testCase.expectedErr) } if testCase.remoteError == nil && testCase.expectedErr == "" && isSend { stream.AssertCalled(t, "Send", testCase.sendCalledWith) // Ensure that if we succeeded - only 1 stream was created despite 2 calls // to Send() were made err := testCase.method(rpc) <-sent require.NoError(t, err) require.Equal(t, 2, int(atomic.LoadUint32(&sendCalls))) client.AssertNumberOfCalls(t, "Step", 1) } }) } } func TestRPCGarbageCollection(t *testing.T) { // Scenario: Send a message to a remote node, and establish a stream // while doing it. // Afterwards - make that stream be aborted, and send a message to a different // remote node. // The first stream should be cleaned from the mapping. comm := &mocks.Communicator{} client := &mocks.ClusterClient{} stream := &mocks.StepClient{} remote := &cluster.RemoteContext{ SendBuffSize: 10, Metrics: cluster.NewMetrics(&disabled.Provider{}), Logger: flogging.MustGetLogger("test"), Client: client, ProbeConn: func(_ *grpc.ClientConn) error { return nil }, } var sent sync.WaitGroup defineMocks := func(destination uint64) { sent.Add(1) comm.On("Remote", "mychannel", destination).Return(remote, nil) stream.On("Context", mock.Anything).Return(context.Background()) client.On("Step", mock.Anything).Return(stream, nil).Once() stream.On("Send", mock.Anything).Return(nil).Once().Run(func(_ mock.Arguments) { sent.Done() }) stream.On("Recv").Return(nil, nil) } mapping := cluster.NewStreamsByType() rpc := &cluster.RPC{ Logger: flogging.MustGetLogger("test"), Timeout: time.Hour, StreamsByType: mapping, Channel: "mychannel", Comm: comm, } defineMocks(1) rpc.SendSubmit(1, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) // Wait for the message to arrive sent.Wait() // Ensure the stream is initialized in the mapping require.Len(t, mapping[cluster.SubmitOperation], 1) require.Equal(t, uint64(1), mapping[cluster.SubmitOperation][1].ID) // And the underlying gRPC stream indeed had Send invoked on it. stream.AssertNumberOfCalls(t, "Send", 1) // Abort all streams we currently have that are associated to the remote. remote.Abort() // The stream still exists, as it is not cleaned yet. require.Len(t, mapping[cluster.SubmitOperation], 1) require.Equal(t, uint64(1), mapping[cluster.SubmitOperation][1].ID) // Prepare for the next transmission. defineMocks(2) // Send a message to a different node. rpc.SendSubmit(2, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) // The mapping should be now cleaned from the previous stream. require.Len(t, mapping[cluster.SubmitOperation], 1) require.Equal(t, uint64(2), mapping[cluster.SubmitOperation][2].ID) } Unit test flake when rpc server stream not closed (#2935) Testcase expects to fail SubmitRequest after node server restarts, but if restart not closed streams before sendSubmit submitted; test fails. It is random failure. Introduced a delay between restart and submitRequest. Signed-off-by: Parameswaran Selvam <34419c96699e413317b495e16a44e58e2d92c899@in.ibm.com> /* Copyright IBM Corp. 2017 All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package cluster_test import ( "context" "io" "sync" "sync/atomic" "testing" "time" "github.com/hyperledger/fabric-protos-go/common" "github.com/hyperledger/fabric-protos-go/orderer" "github.com/hyperledger/fabric/common/flogging" "github.com/hyperledger/fabric/common/metrics/disabled" "github.com/hyperledger/fabric/orderer/common/cluster" "github.com/hyperledger/fabric/orderer/common/cluster/mocks" "github.com/pkg/errors" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc" ) func noopReport(_ error) { } func TestSendSubmitWithReport(t *testing.T) { t.Parallel() node1 := newTestNode(t) node2 := newTestNode(t) var receptionWaitGroup sync.WaitGroup receptionWaitGroup.Add(1) node2.handler.On("OnSubmit", testChannel, mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { receptionWaitGroup.Done() }) defer node1.stop() defer node2.stop() config := []cluster.RemoteNode{node1.nodeInfo, node2.nodeInfo} node1.c.Configure(testChannel, config) node2.c.Configure(testChannel, config) node1RPC := &cluster.RPC{ Logger: flogging.MustGetLogger("test"), Timeout: time.Hour, StreamsByType: cluster.NewStreamsByType(), Channel: testChannel, Comm: node1.c, } // Wait for connections to be established time.Sleep(time.Second * 5) err := node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("1")}}, noopReport) require.NoError(t, err) receptionWaitGroup.Wait() // Wait for message to be received // Restart the node node2.stop() node2.resurrect() /* * allow the node2 to restart completely * if restart not complete, the existing stream able to successfully send * the next SubmitRequest which makes the testcase fails. Hence this delay * required */ time.Sleep(time.Second * 5) var wg2 sync.WaitGroup wg2.Add(1) reportSubmitFailed := func(err error) { defer wg2.Done() require.EqualError(t, err, io.EOF.Error()) } err = node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("2")}}, reportSubmitFailed) require.NoError(t, err) wg2.Wait() // Ensure stale stream is cleaned up and removed from the mapping require.Len(t, node1RPC.StreamsByType[cluster.SubmitOperation], 0) // Wait for connection to be re-established time.Sleep(time.Second * 5) // Send again, this time it should be received receptionWaitGroup.Add(1) err = node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("3")}}, noopReport) require.NoError(t, err) receptionWaitGroup.Wait() } func TestRPCChangeDestination(t *testing.T) { // We send a Submit() to 2 different nodes - 1 and 2. // The first invocation of Submit() establishes a stream with node 1 // and the second establishes a stream with node 2. // We define a mock behavior for only a single invocation of Send() on each // of the streams (to node 1 and to node 2), therefore we test that invocation // of rpc.SendSubmit to node 2 doesn't send the message to node 1. comm := &mocks.Communicator{} client1 := &mocks.ClusterClient{} client2 := &mocks.ClusterClient{} metrics := cluster.NewMetrics(&disabled.Provider{}) comm.On("Remote", "mychannel", uint64(1)).Return(&cluster.RemoteContext{ SendBuffSize: 10, Metrics: metrics, Logger: flogging.MustGetLogger("test"), Client: client1, ProbeConn: func(_ *grpc.ClientConn) error { return nil }, }, nil) comm.On("Remote", "mychannel", uint64(2)).Return(&cluster.RemoteContext{ SendBuffSize: 10, Metrics: metrics, Logger: flogging.MustGetLogger("test"), Client: client2, ProbeConn: func(_ *grpc.ClientConn) error { return nil }, }, nil) streamToNode1 := &mocks.StepClient{} streamToNode2 := &mocks.StepClient{} streamToNode1.On("Context", mock.Anything).Return(context.Background()) streamToNode2.On("Context", mock.Anything).Return(context.Background()) client1.On("Step", mock.Anything).Return(streamToNode1, nil).Once() client2.On("Step", mock.Anything).Return(streamToNode2, nil).Once() rpc := &cluster.RPC{ Logger: flogging.MustGetLogger("test"), Timeout: time.Hour, StreamsByType: cluster.NewStreamsByType(), Channel: "mychannel", Comm: comm, } var sent sync.WaitGroup sent.Add(2) signalSent := func(_ mock.Arguments) { sent.Done() } streamToNode1.On("Send", mock.Anything).Return(nil).Run(signalSent).Once() streamToNode2.On("Send", mock.Anything).Return(nil).Run(signalSent).Once() streamToNode1.On("Recv").Return(nil, io.EOF) streamToNode2.On("Recv").Return(nil, io.EOF) rpc.SendSubmit(1, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) rpc.SendSubmit(2, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) sent.Wait() streamToNode1.AssertNumberOfCalls(t, "Send", 1) streamToNode2.AssertNumberOfCalls(t, "Send", 1) } func TestSend(t *testing.T) { submitRequest := &orderer.SubmitRequest{Channel: "mychannel"} submitResponse := &orderer.StepResponse{ Payload: &orderer.StepResponse_SubmitRes{ SubmitRes: &orderer.SubmitResponse{Status: common.Status_SUCCESS}, }, } consensusRequest := &orderer.ConsensusRequest{ Channel: "mychannel", } submitReq := wrapSubmitReq(submitRequest) consensusReq := &orderer.StepRequest{ Payload: &orderer.StepRequest_ConsensusRequest{ ConsensusRequest: consensusRequest, }, } submit := func(rpc *cluster.RPC) error { err := rpc.SendSubmit(1, submitRequest, noopReport) return err } step := func(rpc *cluster.RPC) error { return rpc.SendConsensus(1, consensusRequest) } type testCase struct { name string method func(rpc *cluster.RPC) error sendReturns error sendCalledWith *orderer.StepRequest receiveReturns []interface{} stepReturns []interface{} remoteError error expectedErr string } l := &sync.Mutex{} var tst testCase sent := make(chan struct{}) var sendCalls uint32 stream := &mocks.StepClient{} stream.On("Context", mock.Anything).Return(context.Background()) stream.On("Send", mock.Anything).Return(func(*orderer.StepRequest) error { l.Lock() defer l.Unlock() atomic.AddUint32(&sendCalls, 1) sent <- struct{}{} return tst.sendReturns }) for _, tst := range []testCase{ { name: "Send and Receive submit succeed", method: submit, sendReturns: nil, stepReturns: []interface{}{stream, nil}, receiveReturns: []interface{}{submitResponse, nil}, sendCalledWith: submitReq, }, { name: "Send step succeed", method: step, sendReturns: nil, stepReturns: []interface{}{stream, nil}, sendCalledWith: consensusReq, }, { name: "Send submit fails", method: submit, sendReturns: errors.New("oops"), stepReturns: []interface{}{stream, nil}, sendCalledWith: submitReq, expectedErr: "stream is aborted", }, { name: "Send step fails", method: step, sendReturns: errors.New("oops"), stepReturns: []interface{}{stream, nil}, sendCalledWith: consensusReq, expectedErr: "stream is aborted", }, { name: "Remote() fails", method: submit, remoteError: errors.New("timed out"), stepReturns: []interface{}{stream, nil}, expectedErr: "timed out", }, { name: "Submit fails with Send", method: submit, stepReturns: []interface{}{nil, errors.New("deadline exceeded")}, expectedErr: "deadline exceeded", }, } { l.Lock() testCase := tst l.Unlock() t.Run(testCase.name, func(t *testing.T) { atomic.StoreUint32(&sendCalls, 0) isSend := testCase.receiveReturns == nil comm := &mocks.Communicator{} client := &mocks.ClusterClient{} client.On("Step", mock.Anything).Return(testCase.stepReturns...) rm := &cluster.RemoteContext{ Metrics: cluster.NewMetrics(&disabled.Provider{}), SendBuffSize: 1, Logger: flogging.MustGetLogger("test"), ProbeConn: func(_ *grpc.ClientConn) error { return nil }, Client: client, } defer rm.Abort() comm.On("Remote", "mychannel", uint64(1)).Return(rm, testCase.remoteError) rpc := &cluster.RPC{ Logger: flogging.MustGetLogger("test"), Timeout: time.Hour, StreamsByType: cluster.NewStreamsByType(), Channel: "mychannel", Comm: comm, } err := testCase.method(rpc) if testCase.remoteError == nil && testCase.stepReturns[1] == nil { <-sent } if testCase.stepReturns[1] == nil && testCase.remoteError == nil { require.NoError(t, err) } else { require.EqualError(t, err, testCase.expectedErr) } if testCase.remoteError == nil && testCase.expectedErr == "" && isSend { stream.AssertCalled(t, "Send", testCase.sendCalledWith) // Ensure that if we succeeded - only 1 stream was created despite 2 calls // to Send() were made err := testCase.method(rpc) <-sent require.NoError(t, err) require.Equal(t, 2, int(atomic.LoadUint32(&sendCalls))) client.AssertNumberOfCalls(t, "Step", 1) } }) } } func TestRPCGarbageCollection(t *testing.T) { // Scenario: Send a message to a remote node, and establish a stream // while doing it. // Afterwards - make that stream be aborted, and send a message to a different // remote node. // The first stream should be cleaned from the mapping. comm := &mocks.Communicator{} client := &mocks.ClusterClient{} stream := &mocks.StepClient{} remote := &cluster.RemoteContext{ SendBuffSize: 10, Metrics: cluster.NewMetrics(&disabled.Provider{}), Logger: flogging.MustGetLogger("test"), Client: client, ProbeConn: func(_ *grpc.ClientConn) error { return nil }, } var sent sync.WaitGroup defineMocks := func(destination uint64) { sent.Add(1) comm.On("Remote", "mychannel", destination).Return(remote, nil) stream.On("Context", mock.Anything).Return(context.Background()) client.On("Step", mock.Anything).Return(stream, nil).Once() stream.On("Send", mock.Anything).Return(nil).Once().Run(func(_ mock.Arguments) { sent.Done() }) stream.On("Recv").Return(nil, nil) } mapping := cluster.NewStreamsByType() rpc := &cluster.RPC{ Logger: flogging.MustGetLogger("test"), Timeout: time.Hour, StreamsByType: mapping, Channel: "mychannel", Comm: comm, } defineMocks(1) rpc.SendSubmit(1, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) // Wait for the message to arrive sent.Wait() // Ensure the stream is initialized in the mapping require.Len(t, mapping[cluster.SubmitOperation], 1) require.Equal(t, uint64(1), mapping[cluster.SubmitOperation][1].ID) // And the underlying gRPC stream indeed had Send invoked on it. stream.AssertNumberOfCalls(t, "Send", 1) // Abort all streams we currently have that are associated to the remote. remote.Abort() // The stream still exists, as it is not cleaned yet. require.Len(t, mapping[cluster.SubmitOperation], 1) require.Equal(t, uint64(1), mapping[cluster.SubmitOperation][1].ID) // Prepare for the next transmission. defineMocks(2) // Send a message to a different node. rpc.SendSubmit(2, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) // The mapping should be now cleaned from the previous stream. require.Len(t, mapping[cluster.SubmitOperation], 1) require.Equal(t, uint64(2), mapping[cluster.SubmitOperation][2].ID) }
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package protobuf import ( "fmt" "io" "log" "reflect" "sort" "strconv" "strings" "k8s.io/klog/v2" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" ) // genProtoIDL produces a .proto IDL. type genProtoIDL struct { generator.DefaultGen localPackage types.Name localGoPackage types.Name imports namer.ImportTracker generateAll bool omitGogo bool omitFieldTypes map[types.Name]struct{} } func (g *genProtoIDL) PackageVars(c *generator.Context) []string { if g.omitGogo { return []string{ fmt.Sprintf("option go_package = %q;", g.localGoPackage.Name), } } return []string{ "option (gogoproto.marshaler_all) = true;", "option (gogoproto.stable_marshaler_all) = true;", "option (gogoproto.sizer_all) = true;", "option (gogoproto.goproto_stringer_all) = false;", "option (gogoproto.stringer_all) = true;", "option (gogoproto.unmarshaler_all) = true;", "option (gogoproto.goproto_unrecognized_all) = false;", "option (gogoproto.goproto_enum_prefix_all) = false;", "option (gogoproto.goproto_getters_all) = false;", fmt.Sprintf("option go_package = %q;", g.localGoPackage.Name), } } func (g *genProtoIDL) Filename() string { return g.OptionalName + ".proto" } func (g *genProtoIDL) FileType() string { return "protoidl" } func (g *genProtoIDL) Namers(c *generator.Context) namer.NameSystems { return namer.NameSystems{ // The local namer returns the correct protobuf name for a proto type // in the context of a package "local": localNamer{g.localPackage}, } } // Filter ignores types that are identified as not exportable. func (g *genProtoIDL) Filter(c *generator.Context, t *types.Type) bool { tagVals := types.ExtractCommentTags("+", t.CommentLines)["protobuf"] if tagVals != nil { if tagVals[0] == "false" { // Type specified "false". return false } if tagVals[0] == "true" { // Type specified "true". return true } klog.Fatalf(`Comment tag "protobuf" must be true or false, found: %q`, tagVals[0]) } if !g.generateAll { // We're not generating everything. return false } seen := map[*types.Type]bool{} ok := isProtoable(seen, t) return ok } func isProtoable(seen map[*types.Type]bool, t *types.Type) bool { if seen[t] { // be optimistic in the case of type cycles. return true } seen[t] = true switch t.Kind { case types.Builtin: return true case types.Alias: return isProtoable(seen, t.Underlying) case types.Slice, types.Pointer: return isProtoable(seen, t.Elem) case types.Map: return isProtoable(seen, t.Key) && isProtoable(seen, t.Elem) case types.Struct: if len(t.Members) == 0 { return true } for _, m := range t.Members { if isProtoable(seen, m.Type) { return true } } return false case types.Func, types.Chan: return false case types.DeclarationOf, types.Unknown, types.Unsupported: return false case types.Interface: return false default: log.Printf("WARNING: type %q is not portable: %s", t.Kind, t.Name) return false } } // isOptionalAlias should return true if the specified type has an underlying type // (is an alias) of a map or slice and has the comment tag protobuf.nullable=true, // indicating that the type should be nullable in protobuf. func isOptionalAlias(t *types.Type) bool { if t.Underlying == nil || (t.Underlying.Kind != types.Map && t.Underlying.Kind != types.Slice) { return false } if extractBoolTagOrDie("protobuf.nullable", t.CommentLines) == false { return false } return true } func (g *genProtoIDL) Imports(c *generator.Context) (imports []string) { lines := []string{} // TODO: this could be expressed more cleanly for _, line := range g.imports.ImportLines() { if g.omitGogo && line == "github.com/gogo/protobuf/gogoproto/gogo.proto" { continue } lines = append(lines, line) } return lines } // GenerateType makes the body of a file implementing a set for type t. func (g *genProtoIDL) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") b := bodyGen{ locator: &protobufLocator{ namer: c.Namers["proto"].(ProtobufFromGoNamer), tracker: g.imports, universe: c.Universe, localGoPackage: g.localGoPackage.Package, }, localPackage: g.localPackage, omitGogo: g.omitGogo, omitFieldTypes: g.omitFieldTypes, t: t, } switch t.Kind { case types.Alias: return b.doAlias(sw) case types.Struct: return b.doStruct(sw) default: return b.unknown(sw) } } // ProtobufFromGoNamer finds the protobuf name of a type (and its package, and // the package path) from its Go name. type ProtobufFromGoNamer interface { GoNameToProtoName(name types.Name) types.Name } type ProtobufLocator interface { ProtoTypeFor(t *types.Type) (*types.Type, error) GoTypeForName(name types.Name) *types.Type CastTypeName(name types.Name) string } type protobufLocator struct { namer ProtobufFromGoNamer tracker namer.ImportTracker universe types.Universe localGoPackage string } // CastTypeName returns the cast type name of a Go type // TODO: delegate to a new localgo namer? func (p protobufLocator) CastTypeName(name types.Name) string { if name.Package == p.localGoPackage { return name.Name } return name.String() } func (p protobufLocator) GoTypeForName(name types.Name) *types.Type { if len(name.Package) == 0 { name.Package = p.localGoPackage } return p.universe.Type(name) } // ProtoTypeFor locates a Protobuf type for the provided Go type (if possible). func (p protobufLocator) ProtoTypeFor(t *types.Type) (*types.Type, error) { switch { // we've already converted the type, or it's a map case t.Kind == types.Protobuf || t.Kind == types.Map: p.tracker.AddType(t) return t, nil } // it's a fundamental type if t, ok := isFundamentalProtoType(t); ok { p.tracker.AddType(t) return t, nil } // it's a message if t.Kind == types.Struct || isOptionalAlias(t) { t := &types.Type{ Name: p.namer.GoNameToProtoName(t.Name), Kind: types.Protobuf, CommentLines: t.CommentLines, } p.tracker.AddType(t) return t, nil } return nil, errUnrecognizedType } type bodyGen struct { locator ProtobufLocator localPackage types.Name omitGogo bool omitFieldTypes map[types.Name]struct{} t *types.Type } func (b bodyGen) unknown(sw *generator.SnippetWriter) error { return fmt.Errorf("not sure how to generate: %#v", b.t) } func (b bodyGen) doAlias(sw *generator.SnippetWriter) error { if !isOptionalAlias(b.t) { return nil } var kind string switch b.t.Underlying.Kind { case types.Map: kind = "map" default: kind = "slice" } optional := &types.Type{ Name: b.t.Name, Kind: types.Struct, CommentLines: b.t.CommentLines, SecondClosestCommentLines: b.t.SecondClosestCommentLines, Members: []types.Member{ { Name: "Items", CommentLines: []string{fmt.Sprintf("items, if empty, will result in an empty %s\n", kind)}, Type: b.t.Underlying, }, }, } nested := b nested.t = optional return nested.doStruct(sw) } func (b bodyGen) doStruct(sw *generator.SnippetWriter) error { if len(b.t.Name.Name) == 0 { return nil } if namer.IsPrivateGoName(b.t.Name.Name) { return nil } var alias *types.Type var fields []protoField options := []string{} allOptions := types.ExtractCommentTags("+", b.t.CommentLines) for k, v := range allOptions { switch { case strings.HasPrefix(k, "protobuf.options."): key := strings.TrimPrefix(k, "protobuf.options.") switch key { case "marshal": if v[0] == "false" { if !b.omitGogo { options = append(options, "(gogoproto.marshaler) = false", "(gogoproto.unmarshaler) = false", "(gogoproto.sizer) = false", ) } } default: if !b.omitGogo || !strings.HasPrefix(key, "(gogoproto.") { if key == "(gogoproto.goproto_stringer)" && v[0] == "false" { options = append(options, "(gogoproto.stringer) = false") } options = append(options, fmt.Sprintf("%s = %s", key, v[0])) } } // protobuf.as allows a type to have the same message contents as another Go type case k == "protobuf.as": fields = nil if alias = b.locator.GoTypeForName(types.Name{Name: v[0]}); alias == nil { return fmt.Errorf("type %v references alias %q which does not exist", b.t, v[0]) } // protobuf.embed instructs the generator to use the named type in this package // as an embedded message. case k == "protobuf.embed": fields = []protoField{ { Tag: 1, Name: v[0], Type: &types.Type{ Name: types.Name{ Name: v[0], Package: b.localPackage.Package, Path: b.localPackage.Path, }, }, }, } } } if alias == nil { alias = b.t } // If we don't explicitly embed anything, generate fields by traversing fields. if fields == nil { memberFields, err := membersToFields(b.locator, alias, b.localPackage, b.omitFieldTypes) if err != nil { return fmt.Errorf("type %v cannot be converted to protobuf: %v", b.t, err) } fields = memberFields } out := sw.Out() genComment(out, b.t.CommentLines, "") sw.Do(`message $.Name.Name$ { `, b.t) if len(options) > 0 { sort.Strings(options) for _, s := range options { fmt.Fprintf(out, " option %s;\n", s) } fmt.Fprintln(out) } for i, field := range fields { genComment(out, field.CommentLines, " ") fmt.Fprintf(out, " ") switch { case field.Map: case field.Repeated: fmt.Fprintf(out, "repeated ") case field.Required: fmt.Fprintf(out, "required ") default: fmt.Fprintf(out, "optional ") } sw.Do(`$.Type|local$ $.Name$ = $.Tag$`, field) if len(field.Extras) > 0 { extras := []string{} for k, v := range field.Extras { if b.omitGogo && strings.HasPrefix(k, "(gogoproto.") { continue } extras = append(extras, fmt.Sprintf("%s = %s", k, v)) } sort.Strings(extras) if len(extras) > 0 { fmt.Fprintf(out, " [") fmt.Fprint(out, strings.Join(extras, ", ")) fmt.Fprintf(out, "]") } } fmt.Fprintf(out, ";\n") if i != len(fields)-1 { fmt.Fprintf(out, "\n") } } fmt.Fprintf(out, "}\n\n") return nil } type protoField struct { LocalPackage types.Name Tag int Name string Type *types.Type Map bool Repeated bool Optional bool Required bool Nullable bool Extras map[string]string CommentLines []string } var ( errUnrecognizedType = fmt.Errorf("did not recognize the provided type") ) func isFundamentalProtoType(t *types.Type) (*types.Type, bool) { // TODO: when we enable proto3, also include other fundamental types in the google.protobuf package // switch { // case t.Kind == types.Struct && t.Name == types.Name{Package: "time", Name: "Time"}: // return &types.Type{ // Kind: types.Protobuf, // Name: types.Name{Path: "google/protobuf/timestamp.proto", Package: "google.protobuf", Name: "Timestamp"}, // }, true // } switch t.Kind { case types.Slice: if t.Elem.Name.Name == "byte" && len(t.Elem.Name.Package) == 0 { return &types.Type{Name: types.Name{Name: "bytes"}, Kind: types.Protobuf}, true } case types.Builtin: switch t.Name.Name { case "string", "uint32", "int32", "uint64", "int64", "bool": return &types.Type{Name: types.Name{Name: t.Name.Name}, Kind: types.Protobuf}, true case "int": return &types.Type{Name: types.Name{Name: "int64"}, Kind: types.Protobuf}, true case "uint": return &types.Type{Name: types.Name{Name: "uint64"}, Kind: types.Protobuf}, true case "float64", "float": return &types.Type{Name: types.Name{Name: "double"}, Kind: types.Protobuf}, true case "float32": return &types.Type{Name: types.Name{Name: "float"}, Kind: types.Protobuf}, true case "uintptr": return &types.Type{Name: types.Name{Name: "uint64"}, Kind: types.Protobuf}, true } // TODO: complex? } return t, false } func memberTypeToProtobufField(locator ProtobufLocator, field *protoField, t *types.Type) error { var err error switch t.Kind { case types.Protobuf: field.Type, err = locator.ProtoTypeFor(t) case types.Builtin: field.Type, err = locator.ProtoTypeFor(t) case types.Map: valueField := &protoField{} if err := memberTypeToProtobufField(locator, valueField, t.Elem); err != nil { return err } keyField := &protoField{} if err := memberTypeToProtobufField(locator, keyField, t.Key); err != nil { return err } // All other protobuf types have kind types.Protobuf, so setting types.Map // here would be very misleading. field.Type = &types.Type{ Kind: types.Protobuf, Key: keyField.Type, Elem: valueField.Type, } if !strings.HasPrefix(t.Name.Name, "map[") { field.Extras["(gogoproto.casttype)"] = strconv.Quote(locator.CastTypeName(t.Name)) } if k, ok := keyField.Extras["(gogoproto.casttype)"]; ok { field.Extras["(gogoproto.castkey)"] = k } if v, ok := valueField.Extras["(gogoproto.casttype)"]; ok { field.Extras["(gogoproto.castvalue)"] = v } field.Map = true case types.Pointer: if err := memberTypeToProtobufField(locator, field, t.Elem); err != nil { return err } field.Nullable = true case types.Alias: if isOptionalAlias(t) { field.Type, err = locator.ProtoTypeFor(t) field.Nullable = true } else { if err := memberTypeToProtobufField(locator, field, t.Underlying); err != nil { log.Printf("failed to alias: %s %s: err %v", t.Name, t.Underlying.Name, err) return err } // If this is not an alias to a slice, cast to the alias if !field.Repeated { if field.Extras == nil { field.Extras = make(map[string]string) } field.Extras["(gogoproto.casttype)"] = strconv.Quote(locator.CastTypeName(t.Name)) } } case types.Slice: if t.Elem.Name.Name == "byte" && len(t.Elem.Name.Package) == 0 { field.Type = &types.Type{Name: types.Name{Name: "bytes"}, Kind: types.Protobuf} return nil } if err := memberTypeToProtobufField(locator, field, t.Elem); err != nil { return err } field.Repeated = true case types.Struct: if len(t.Name.Name) == 0 { return errUnrecognizedType } field.Type, err = locator.ProtoTypeFor(t) field.Nullable = false default: return errUnrecognizedType } return err } // protobufTagToField extracts information from an existing protobuf tag func protobufTagToField(tag string, field *protoField, m types.Member, t *types.Type, localPackage types.Name) error { if len(tag) == 0 || tag == "-" { return nil } // protobuf:"bytes,3,opt,name=Id,customtype=github.com/gogo/protobuf/test.Uuid" parts := strings.Split(tag, ",") if len(parts) < 3 { return fmt.Errorf("member %q of %q malformed 'protobuf' tag, not enough segments\n", m.Name, t.Name) } protoTag, err := strconv.Atoi(parts[1]) if err != nil { return fmt.Errorf("member %q of %q malformed 'protobuf' tag, field ID is %q which is not an integer: %v\n", m.Name, t.Name, parts[1], err) } field.Tag = protoTag // In general there is doesn't make sense to parse the protobuf tags to get the type, // as all auto-generated once will have wire type "bytes", "varint" or "fixed64". // However, sometimes we explicitly set them to have a custom serialization, e.g.: // type Time struct { // time.Time `protobuf:"Timestamp,1,req,name=time"` // } // to force the generator to use a given type (that we manually wrote serialization & // deserialization methods for). switch parts[0] { case "varint", "fixed32", "fixed64", "bytes", "group": default: var name types.Name if last := strings.LastIndex(parts[0], "."); last != -1 { prefix := parts[0][:last] name = types.Name{ Name: parts[0][last+1:], Package: prefix, Path: strings.Replace(prefix, ".", "/", -1), } } else { name = types.Name{ Name: parts[0], Package: localPackage.Package, Path: localPackage.Path, } } field.Type = &types.Type{ Name: name, Kind: types.Protobuf, } } protoExtra := make(map[string]string) for i, extra := range parts[3:] { parts := strings.SplitN(extra, "=", 2) if len(parts) != 2 { return fmt.Errorf("member %q of %q malformed 'protobuf' tag, tag %d should be key=value, got %q\n", m.Name, t.Name, i+4, extra) } switch parts[0] { case "name": protoExtra[parts[0]] = parts[1] case "casttype", "castkey", "castvalue": parts[0] = fmt.Sprintf("(gogoproto.%s)", parts[0]) protoExtra[parts[0]] = strconv.Quote(parts[1]) } } field.Extras = protoExtra if name, ok := protoExtra["name"]; ok { field.Name = name delete(protoExtra, "name") } return nil } func membersToFields(locator ProtobufLocator, t *types.Type, localPackage types.Name, omitFieldTypes map[types.Name]struct{}) ([]protoField, error) { fields := []protoField{} for _, m := range t.Members { if namer.IsPrivateGoName(m.Name) { // skip private fields continue } if _, ok := omitFieldTypes[types.Name{Name: m.Type.Name.Name, Package: m.Type.Name.Package}]; ok { continue } tags := reflect.StructTag(m.Tags) field := protoField{ LocalPackage: localPackage, Tag: -1, Extras: make(map[string]string), } protobufTag := tags.Get("protobuf") if protobufTag == "-" { continue } if err := protobufTagToField(protobufTag, &field, m, t, localPackage); err != nil { return nil, err } // extract information from JSON field tag if tag := tags.Get("json"); len(tag) > 0 { parts := strings.Split(tag, ",") if len(field.Name) == 0 && len(parts[0]) != 0 { field.Name = parts[0] } if field.Tag == -1 && field.Name == "-" { continue } } if field.Type == nil { if err := memberTypeToProtobufField(locator, &field, m.Type); err != nil { return nil, fmt.Errorf("unable to embed type %q as field %q in %q: %v", m.Type, field.Name, t.Name, err) } } if len(field.Name) == 0 { field.Name = namer.IL(m.Name) } if field.Map && field.Repeated { // maps cannot be repeated field.Repeated = false field.Nullable = true } if !field.Nullable { field.Extras["(gogoproto.nullable)"] = "false" } if (field.Type.Name.Name == "bytes" && field.Type.Name.Package == "") || (field.Repeated && field.Type.Name.Package == "" && namer.IsPrivateGoName(field.Type.Name.Name)) { delete(field.Extras, "(gogoproto.nullable)") } if field.Name != m.Name { field.Extras["(gogoproto.customname)"] = strconv.Quote(m.Name) } field.CommentLines = m.CommentLines fields = append(fields, field) } // assign tags highest := 0 byTag := make(map[int]*protoField) // fields are in Go struct order, which we preserve for i := range fields { field := &fields[i] tag := field.Tag if tag != -1 { if existing, ok := byTag[tag]; ok { return nil, fmt.Errorf("field %q and %q both have tag %d", field.Name, existing.Name, tag) } byTag[tag] = field } if tag > highest { highest = tag } } // starting from the highest observed tag, assign new field tags for i := range fields { field := &fields[i] if field.Tag != -1 { continue } highest++ field.Tag = highest byTag[field.Tag] = field } return fields, nil } func genComment(out io.Writer, lines []string, indent string) { for { l := len(lines) if l == 0 || len(lines[l-1]) != 0 { break } lines = lines[:l-1] } for _, c := range lines { if len(c) == 0 { fmt.Fprintf(out, "%s//\n", indent) // avoid trailing whitespace continue } fmt.Fprintf(out, "%s// %s\n", indent, c) } } func formatProtoFile(source []byte) ([]byte, error) { // TODO; Is there any protobuf formatter? return source, nil } func assembleProtoFile(w io.Writer, f *generator.File) { w.Write(f.Header) fmt.Fprint(w, "syntax = \"proto2\";\n\n") if len(f.PackageName) > 0 { fmt.Fprintf(w, "package %s;\n\n", f.PackageName) } if len(f.Imports) > 0 { imports := []string{} for i := range f.Imports { imports = append(imports, i) } sort.Strings(imports) for _, s := range imports { fmt.Fprintf(w, "import %q;\n", s) } fmt.Fprint(w, "\n") } if f.Vars.Len() > 0 { fmt.Fprintf(w, "%s\n", f.Vars.String()) } w.Write(f.Body.Bytes()) } func NewProtoFile() *generator.DefaultFileType { return &generator.DefaultFileType{ Format: formatProtoFile, Assemble: assembleProtoFile, } } go-to-protobuf: use full path for go_package /* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package protobuf import ( "fmt" "io" "log" "reflect" "sort" "strconv" "strings" "k8s.io/klog/v2" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" ) // genProtoIDL produces a .proto IDL. type genProtoIDL struct { generator.DefaultGen localPackage types.Name localGoPackage types.Name imports namer.ImportTracker generateAll bool omitGogo bool omitFieldTypes map[types.Name]struct{} } func (g *genProtoIDL) PackageVars(c *generator.Context) []string { if g.omitGogo { return []string{ fmt.Sprintf("option go_package = %q;", g.localGoPackage.Package), } } return []string{ "option (gogoproto.marshaler_all) = true;", "option (gogoproto.stable_marshaler_all) = true;", "option (gogoproto.sizer_all) = true;", "option (gogoproto.goproto_stringer_all) = false;", "option (gogoproto.stringer_all) = true;", "option (gogoproto.unmarshaler_all) = true;", "option (gogoproto.goproto_unrecognized_all) = false;", "option (gogoproto.goproto_enum_prefix_all) = false;", "option (gogoproto.goproto_getters_all) = false;", fmt.Sprintf("option go_package = %q;", g.localGoPackage.Package), } } func (g *genProtoIDL) Filename() string { return g.OptionalName + ".proto" } func (g *genProtoIDL) FileType() string { return "protoidl" } func (g *genProtoIDL) Namers(c *generator.Context) namer.NameSystems { return namer.NameSystems{ // The local namer returns the correct protobuf name for a proto type // in the context of a package "local": localNamer{g.localPackage}, } } // Filter ignores types that are identified as not exportable. func (g *genProtoIDL) Filter(c *generator.Context, t *types.Type) bool { tagVals := types.ExtractCommentTags("+", t.CommentLines)["protobuf"] if tagVals != nil { if tagVals[0] == "false" { // Type specified "false". return false } if tagVals[0] == "true" { // Type specified "true". return true } klog.Fatalf(`Comment tag "protobuf" must be true or false, found: %q`, tagVals[0]) } if !g.generateAll { // We're not generating everything. return false } seen := map[*types.Type]bool{} ok := isProtoable(seen, t) return ok } func isProtoable(seen map[*types.Type]bool, t *types.Type) bool { if seen[t] { // be optimistic in the case of type cycles. return true } seen[t] = true switch t.Kind { case types.Builtin: return true case types.Alias: return isProtoable(seen, t.Underlying) case types.Slice, types.Pointer: return isProtoable(seen, t.Elem) case types.Map: return isProtoable(seen, t.Key) && isProtoable(seen, t.Elem) case types.Struct: if len(t.Members) == 0 { return true } for _, m := range t.Members { if isProtoable(seen, m.Type) { return true } } return false case types.Func, types.Chan: return false case types.DeclarationOf, types.Unknown, types.Unsupported: return false case types.Interface: return false default: log.Printf("WARNING: type %q is not portable: %s", t.Kind, t.Name) return false } } // isOptionalAlias should return true if the specified type has an underlying type // (is an alias) of a map or slice and has the comment tag protobuf.nullable=true, // indicating that the type should be nullable in protobuf. func isOptionalAlias(t *types.Type) bool { if t.Underlying == nil || (t.Underlying.Kind != types.Map && t.Underlying.Kind != types.Slice) { return false } if extractBoolTagOrDie("protobuf.nullable", t.CommentLines) == false { return false } return true } func (g *genProtoIDL) Imports(c *generator.Context) (imports []string) { lines := []string{} // TODO: this could be expressed more cleanly for _, line := range g.imports.ImportLines() { if g.omitGogo && line == "github.com/gogo/protobuf/gogoproto/gogo.proto" { continue } lines = append(lines, line) } return lines } // GenerateType makes the body of a file implementing a set for type t. func (g *genProtoIDL) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") b := bodyGen{ locator: &protobufLocator{ namer: c.Namers["proto"].(ProtobufFromGoNamer), tracker: g.imports, universe: c.Universe, localGoPackage: g.localGoPackage.Package, }, localPackage: g.localPackage, omitGogo: g.omitGogo, omitFieldTypes: g.omitFieldTypes, t: t, } switch t.Kind { case types.Alias: return b.doAlias(sw) case types.Struct: return b.doStruct(sw) default: return b.unknown(sw) } } // ProtobufFromGoNamer finds the protobuf name of a type (and its package, and // the package path) from its Go name. type ProtobufFromGoNamer interface { GoNameToProtoName(name types.Name) types.Name } type ProtobufLocator interface { ProtoTypeFor(t *types.Type) (*types.Type, error) GoTypeForName(name types.Name) *types.Type CastTypeName(name types.Name) string } type protobufLocator struct { namer ProtobufFromGoNamer tracker namer.ImportTracker universe types.Universe localGoPackage string } // CastTypeName returns the cast type name of a Go type // TODO: delegate to a new localgo namer? func (p protobufLocator) CastTypeName(name types.Name) string { if name.Package == p.localGoPackage { return name.Name } return name.String() } func (p protobufLocator) GoTypeForName(name types.Name) *types.Type { if len(name.Package) == 0 { name.Package = p.localGoPackage } return p.universe.Type(name) } // ProtoTypeFor locates a Protobuf type for the provided Go type (if possible). func (p protobufLocator) ProtoTypeFor(t *types.Type) (*types.Type, error) { switch { // we've already converted the type, or it's a map case t.Kind == types.Protobuf || t.Kind == types.Map: p.tracker.AddType(t) return t, nil } // it's a fundamental type if t, ok := isFundamentalProtoType(t); ok { p.tracker.AddType(t) return t, nil } // it's a message if t.Kind == types.Struct || isOptionalAlias(t) { t := &types.Type{ Name: p.namer.GoNameToProtoName(t.Name), Kind: types.Protobuf, CommentLines: t.CommentLines, } p.tracker.AddType(t) return t, nil } return nil, errUnrecognizedType } type bodyGen struct { locator ProtobufLocator localPackage types.Name omitGogo bool omitFieldTypes map[types.Name]struct{} t *types.Type } func (b bodyGen) unknown(sw *generator.SnippetWriter) error { return fmt.Errorf("not sure how to generate: %#v", b.t) } func (b bodyGen) doAlias(sw *generator.SnippetWriter) error { if !isOptionalAlias(b.t) { return nil } var kind string switch b.t.Underlying.Kind { case types.Map: kind = "map" default: kind = "slice" } optional := &types.Type{ Name: b.t.Name, Kind: types.Struct, CommentLines: b.t.CommentLines, SecondClosestCommentLines: b.t.SecondClosestCommentLines, Members: []types.Member{ { Name: "Items", CommentLines: []string{fmt.Sprintf("items, if empty, will result in an empty %s\n", kind)}, Type: b.t.Underlying, }, }, } nested := b nested.t = optional return nested.doStruct(sw) } func (b bodyGen) doStruct(sw *generator.SnippetWriter) error { if len(b.t.Name.Name) == 0 { return nil } if namer.IsPrivateGoName(b.t.Name.Name) { return nil } var alias *types.Type var fields []protoField options := []string{} allOptions := types.ExtractCommentTags("+", b.t.CommentLines) for k, v := range allOptions { switch { case strings.HasPrefix(k, "protobuf.options."): key := strings.TrimPrefix(k, "protobuf.options.") switch key { case "marshal": if v[0] == "false" { if !b.omitGogo { options = append(options, "(gogoproto.marshaler) = false", "(gogoproto.unmarshaler) = false", "(gogoproto.sizer) = false", ) } } default: if !b.omitGogo || !strings.HasPrefix(key, "(gogoproto.") { if key == "(gogoproto.goproto_stringer)" && v[0] == "false" { options = append(options, "(gogoproto.stringer) = false") } options = append(options, fmt.Sprintf("%s = %s", key, v[0])) } } // protobuf.as allows a type to have the same message contents as another Go type case k == "protobuf.as": fields = nil if alias = b.locator.GoTypeForName(types.Name{Name: v[0]}); alias == nil { return fmt.Errorf("type %v references alias %q which does not exist", b.t, v[0]) } // protobuf.embed instructs the generator to use the named type in this package // as an embedded message. case k == "protobuf.embed": fields = []protoField{ { Tag: 1, Name: v[0], Type: &types.Type{ Name: types.Name{ Name: v[0], Package: b.localPackage.Package, Path: b.localPackage.Path, }, }, }, } } } if alias == nil { alias = b.t } // If we don't explicitly embed anything, generate fields by traversing fields. if fields == nil { memberFields, err := membersToFields(b.locator, alias, b.localPackage, b.omitFieldTypes) if err != nil { return fmt.Errorf("type %v cannot be converted to protobuf: %v", b.t, err) } fields = memberFields } out := sw.Out() genComment(out, b.t.CommentLines, "") sw.Do(`message $.Name.Name$ { `, b.t) if len(options) > 0 { sort.Strings(options) for _, s := range options { fmt.Fprintf(out, " option %s;\n", s) } fmt.Fprintln(out) } for i, field := range fields { genComment(out, field.CommentLines, " ") fmt.Fprintf(out, " ") switch { case field.Map: case field.Repeated: fmt.Fprintf(out, "repeated ") case field.Required: fmt.Fprintf(out, "required ") default: fmt.Fprintf(out, "optional ") } sw.Do(`$.Type|local$ $.Name$ = $.Tag$`, field) if len(field.Extras) > 0 { extras := []string{} for k, v := range field.Extras { if b.omitGogo && strings.HasPrefix(k, "(gogoproto.") { continue } extras = append(extras, fmt.Sprintf("%s = %s", k, v)) } sort.Strings(extras) if len(extras) > 0 { fmt.Fprintf(out, " [") fmt.Fprint(out, strings.Join(extras, ", ")) fmt.Fprintf(out, "]") } } fmt.Fprintf(out, ";\n") if i != len(fields)-1 { fmt.Fprintf(out, "\n") } } fmt.Fprintf(out, "}\n\n") return nil } type protoField struct { LocalPackage types.Name Tag int Name string Type *types.Type Map bool Repeated bool Optional bool Required bool Nullable bool Extras map[string]string CommentLines []string } var ( errUnrecognizedType = fmt.Errorf("did not recognize the provided type") ) func isFundamentalProtoType(t *types.Type) (*types.Type, bool) { // TODO: when we enable proto3, also include other fundamental types in the google.protobuf package // switch { // case t.Kind == types.Struct && t.Name == types.Name{Package: "time", Name: "Time"}: // return &types.Type{ // Kind: types.Protobuf, // Name: types.Name{Path: "google/protobuf/timestamp.proto", Package: "google.protobuf", Name: "Timestamp"}, // }, true // } switch t.Kind { case types.Slice: if t.Elem.Name.Name == "byte" && len(t.Elem.Name.Package) == 0 { return &types.Type{Name: types.Name{Name: "bytes"}, Kind: types.Protobuf}, true } case types.Builtin: switch t.Name.Name { case "string", "uint32", "int32", "uint64", "int64", "bool": return &types.Type{Name: types.Name{Name: t.Name.Name}, Kind: types.Protobuf}, true case "int": return &types.Type{Name: types.Name{Name: "int64"}, Kind: types.Protobuf}, true case "uint": return &types.Type{Name: types.Name{Name: "uint64"}, Kind: types.Protobuf}, true case "float64", "float": return &types.Type{Name: types.Name{Name: "double"}, Kind: types.Protobuf}, true case "float32": return &types.Type{Name: types.Name{Name: "float"}, Kind: types.Protobuf}, true case "uintptr": return &types.Type{Name: types.Name{Name: "uint64"}, Kind: types.Protobuf}, true } // TODO: complex? } return t, false } func memberTypeToProtobufField(locator ProtobufLocator, field *protoField, t *types.Type) error { var err error switch t.Kind { case types.Protobuf: field.Type, err = locator.ProtoTypeFor(t) case types.Builtin: field.Type, err = locator.ProtoTypeFor(t) case types.Map: valueField := &protoField{} if err := memberTypeToProtobufField(locator, valueField, t.Elem); err != nil { return err } keyField := &protoField{} if err := memberTypeToProtobufField(locator, keyField, t.Key); err != nil { return err } // All other protobuf types have kind types.Protobuf, so setting types.Map // here would be very misleading. field.Type = &types.Type{ Kind: types.Protobuf, Key: keyField.Type, Elem: valueField.Type, } if !strings.HasPrefix(t.Name.Name, "map[") { field.Extras["(gogoproto.casttype)"] = strconv.Quote(locator.CastTypeName(t.Name)) } if k, ok := keyField.Extras["(gogoproto.casttype)"]; ok { field.Extras["(gogoproto.castkey)"] = k } if v, ok := valueField.Extras["(gogoproto.casttype)"]; ok { field.Extras["(gogoproto.castvalue)"] = v } field.Map = true case types.Pointer: if err := memberTypeToProtobufField(locator, field, t.Elem); err != nil { return err } field.Nullable = true case types.Alias: if isOptionalAlias(t) { field.Type, err = locator.ProtoTypeFor(t) field.Nullable = true } else { if err := memberTypeToProtobufField(locator, field, t.Underlying); err != nil { log.Printf("failed to alias: %s %s: err %v", t.Name, t.Underlying.Name, err) return err } // If this is not an alias to a slice, cast to the alias if !field.Repeated { if field.Extras == nil { field.Extras = make(map[string]string) } field.Extras["(gogoproto.casttype)"] = strconv.Quote(locator.CastTypeName(t.Name)) } } case types.Slice: if t.Elem.Name.Name == "byte" && len(t.Elem.Name.Package) == 0 { field.Type = &types.Type{Name: types.Name{Name: "bytes"}, Kind: types.Protobuf} return nil } if err := memberTypeToProtobufField(locator, field, t.Elem); err != nil { return err } field.Repeated = true case types.Struct: if len(t.Name.Name) == 0 { return errUnrecognizedType } field.Type, err = locator.ProtoTypeFor(t) field.Nullable = false default: return errUnrecognizedType } return err } // protobufTagToField extracts information from an existing protobuf tag func protobufTagToField(tag string, field *protoField, m types.Member, t *types.Type, localPackage types.Name) error { if len(tag) == 0 || tag == "-" { return nil } // protobuf:"bytes,3,opt,name=Id,customtype=github.com/gogo/protobuf/test.Uuid" parts := strings.Split(tag, ",") if len(parts) < 3 { return fmt.Errorf("member %q of %q malformed 'protobuf' tag, not enough segments\n", m.Name, t.Name) } protoTag, err := strconv.Atoi(parts[1]) if err != nil { return fmt.Errorf("member %q of %q malformed 'protobuf' tag, field ID is %q which is not an integer: %v\n", m.Name, t.Name, parts[1], err) } field.Tag = protoTag // In general there is doesn't make sense to parse the protobuf tags to get the type, // as all auto-generated once will have wire type "bytes", "varint" or "fixed64". // However, sometimes we explicitly set them to have a custom serialization, e.g.: // type Time struct { // time.Time `protobuf:"Timestamp,1,req,name=time"` // } // to force the generator to use a given type (that we manually wrote serialization & // deserialization methods for). switch parts[0] { case "varint", "fixed32", "fixed64", "bytes", "group": default: var name types.Name if last := strings.LastIndex(parts[0], "."); last != -1 { prefix := parts[0][:last] name = types.Name{ Name: parts[0][last+1:], Package: prefix, Path: strings.Replace(prefix, ".", "/", -1), } } else { name = types.Name{ Name: parts[0], Package: localPackage.Package, Path: localPackage.Path, } } field.Type = &types.Type{ Name: name, Kind: types.Protobuf, } } protoExtra := make(map[string]string) for i, extra := range parts[3:] { parts := strings.SplitN(extra, "=", 2) if len(parts) != 2 { return fmt.Errorf("member %q of %q malformed 'protobuf' tag, tag %d should be key=value, got %q\n", m.Name, t.Name, i+4, extra) } switch parts[0] { case "name": protoExtra[parts[0]] = parts[1] case "casttype", "castkey", "castvalue": parts[0] = fmt.Sprintf("(gogoproto.%s)", parts[0]) protoExtra[parts[0]] = strconv.Quote(parts[1]) } } field.Extras = protoExtra if name, ok := protoExtra["name"]; ok { field.Name = name delete(protoExtra, "name") } return nil } func membersToFields(locator ProtobufLocator, t *types.Type, localPackage types.Name, omitFieldTypes map[types.Name]struct{}) ([]protoField, error) { fields := []protoField{} for _, m := range t.Members { if namer.IsPrivateGoName(m.Name) { // skip private fields continue } if _, ok := omitFieldTypes[types.Name{Name: m.Type.Name.Name, Package: m.Type.Name.Package}]; ok { continue } tags := reflect.StructTag(m.Tags) field := protoField{ LocalPackage: localPackage, Tag: -1, Extras: make(map[string]string), } protobufTag := tags.Get("protobuf") if protobufTag == "-" { continue } if err := protobufTagToField(protobufTag, &field, m, t, localPackage); err != nil { return nil, err } // extract information from JSON field tag if tag := tags.Get("json"); len(tag) > 0 { parts := strings.Split(tag, ",") if len(field.Name) == 0 && len(parts[0]) != 0 { field.Name = parts[0] } if field.Tag == -1 && field.Name == "-" { continue } } if field.Type == nil { if err := memberTypeToProtobufField(locator, &field, m.Type); err != nil { return nil, fmt.Errorf("unable to embed type %q as field %q in %q: %v", m.Type, field.Name, t.Name, err) } } if len(field.Name) == 0 { field.Name = namer.IL(m.Name) } if field.Map && field.Repeated { // maps cannot be repeated field.Repeated = false field.Nullable = true } if !field.Nullable { field.Extras["(gogoproto.nullable)"] = "false" } if (field.Type.Name.Name == "bytes" && field.Type.Name.Package == "") || (field.Repeated && field.Type.Name.Package == "" && namer.IsPrivateGoName(field.Type.Name.Name)) { delete(field.Extras, "(gogoproto.nullable)") } if field.Name != m.Name { field.Extras["(gogoproto.customname)"] = strconv.Quote(m.Name) } field.CommentLines = m.CommentLines fields = append(fields, field) } // assign tags highest := 0 byTag := make(map[int]*protoField) // fields are in Go struct order, which we preserve for i := range fields { field := &fields[i] tag := field.Tag if tag != -1 { if existing, ok := byTag[tag]; ok { return nil, fmt.Errorf("field %q and %q both have tag %d", field.Name, existing.Name, tag) } byTag[tag] = field } if tag > highest { highest = tag } } // starting from the highest observed tag, assign new field tags for i := range fields { field := &fields[i] if field.Tag != -1 { continue } highest++ field.Tag = highest byTag[field.Tag] = field } return fields, nil } func genComment(out io.Writer, lines []string, indent string) { for { l := len(lines) if l == 0 || len(lines[l-1]) != 0 { break } lines = lines[:l-1] } for _, c := range lines { if len(c) == 0 { fmt.Fprintf(out, "%s//\n", indent) // avoid trailing whitespace continue } fmt.Fprintf(out, "%s// %s\n", indent, c) } } func formatProtoFile(source []byte) ([]byte, error) { // TODO; Is there any protobuf formatter? return source, nil } func assembleProtoFile(w io.Writer, f *generator.File) { w.Write(f.Header) fmt.Fprint(w, "syntax = \"proto2\";\n\n") if len(f.PackageName) > 0 { fmt.Fprintf(w, "package %s;\n\n", f.PackageName) } if len(f.Imports) > 0 { imports := []string{} for i := range f.Imports { imports = append(imports, i) } sort.Strings(imports) for _, s := range imports { fmt.Fprintf(w, "import %q;\n", s) } fmt.Fprint(w, "\n") } if f.Vars.Len() > 0 { fmt.Fprintf(w, "%s\n", f.Vars.String()) } w.Write(f.Body.Bytes()) } func NewProtoFile() *generator.DefaultFileType { return &generator.DefaultFileType{ Format: formatProtoFile, Assemble: assembleProtoFile, } }
package main import ( "os" "path/filepath" "strings" "testing" "github.com/urfave/cli" "golang.org/x/xerrors" ) func TestCommandList(t *testing.T) { _, _, err := capture(func() { app := cli.NewApp() flagSet := flagSet("list", commandList.Flags) c := cli.NewContext(app, flagSet, nil) doList(c) }) if err != nil { t.Errorf("error should be nil, but: %s", err) } } func TestCommandListUnique(t *testing.T) { _, _, err := capture(func() { app := cli.NewApp() flagSet := flagSet("list", commandList.Flags) flagSet.Parse([]string{"--unique"}) c := cli.NewContext(app, flagSet, nil) doList(c) }) if err != nil { t.Errorf("error should be nil, but: %s", err) } } func TestCommandListUnknown(t *testing.T) { _, _, err := capture(func() { app := cli.NewApp() flagSet := flagSet("list", commandList.Flags) flagSet.Parse([]string{"--unknown-flag"}) c := cli.NewContext(app, flagSet, nil) doList(c) }) if err != nil { t.Errorf("error should be nil, but: %s", err) } } func TestDoList_query(t *testing.T) { gitRepos := []string{ "github.com/motemen/ghq", "github.com/motemen/gobump", "github.com/motemen/gore", "github.com/Songmu/gobump", "golang.org/x/crypt", "golang.org/x/image", } svnRepos := []string{ "github.com/msh5/svntest", } testCases := []struct { name string args []string expect string }{{ name: "repo match", args: []string{"ghq"}, expect: "github.com/motemen/ghq\n", }, { name: "unique", args: []string{"--unique", "ghq"}, expect: "ghq\n", }, { name: "host only doesn't match", args: []string{"github.com"}, expect: "", }, { name: "host and slash match", args: []string{"golang.org/"}, expect: "golang.org/x/crypt\ngolang.org/x/image\n", }, { name: "host and user", args: []string{"github.com/Songmu"}, expect: "github.com/Songmu/gobump\n", }, { name: "with scheme", args: []string{"https://github.com/motemen/ghq"}, expect: "github.com/motemen/ghq\n", }, { name: "exact", args: []string{"-e", "gobump"}, expect: "github.com/Songmu/gobump\ngithub.com/motemen/gobump\n", }, { name: "query", args: []string{"men/go"}, expect: "github.com/motemen/gobump\ngithub.com/motemen/gore\n", }, { name: "exact query", args: []string{"-e", "men/go"}, expect: "", }, { name: "vcs", args: []string{"--vcs", "svn"}, expect: "github.com/msh5/svntest\n", }} withFakeGitBackend(t, func(t *testing.T, tmproot string, _ *_cloneArgs, _ *_updateArgs) { for _, r := range gitRepos { os.MkdirAll(filepath.Join(tmproot, r, ".git"), 0755) } for _, r := range svnRepos { os.MkdirAll(filepath.Join(tmproot, r, ".svn"), 0755) } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { args := append([]string{"ghq", "list"}, tc.args...) out, _, _ := capture(func() { newApp().Run(args) }) if out != tc.expect { t.Errorf("got:\n%s\nexpect:\n%s", out, tc.expect) } if strings.Contains(tc.name, "unique") { return } argsFull := append([]string{"ghq", "list", "--full-path"}, tc.args...) fullExpect := tc.expect if fullExpect != "" { fullExpect = tmproot + "/" + strings.TrimSpace(fullExpect) fullExpect = strings.ReplaceAll(fullExpect, "\n", "\n"+tmproot+"/") fullExpect += "\n" } out, _, _ = capture(func() { newApp().Run(argsFull) }) if out != fullExpect { t.Errorf("got:\n%s\nexpect:\n%s", out, fullExpect) } }) } }) } func TestDoList_unique(t *testing.T) { defer func(orig []string) { _localRepositoryRoots = orig }(_localRepositoryRoots) defer func(orig string) { os.Setenv("GHQ_ROOT", orig) }(os.Getenv("GHQ_ROOT")) tmp1 := newTempDir(t) defer os.RemoveAll(tmp1) tmp2 := newTempDir(t) defer os.RemoveAll(tmp2) _localRepositoryRoots = nil rootPaths := []string{tmp1, tmp2} os.Setenv("GHQ_ROOT", strings.Join(rootPaths, string(os.PathListSeparator))) for _, rootPath := range rootPaths { os.MkdirAll(filepath.Join(rootPath, "github.com/motemen/ghq/.git"), 0755) } out, _, _ := capture(func() { newApp().Run([]string{"ghq", "list", "--unique"}) }) if out != "ghq\n" { t.Errorf("got: %s, expect: ghq\n", out) } } func TestDoList_unknownRoot(t *testing.T) { defer func(orig []string) { _localRepositoryRoots = orig }(_localRepositoryRoots) defer tmpEnv("GHQ_ROOT", "/path/to/unknown-ghq")() _localRepositoryRoots = nil err := newApp().Run([]string{"ghq", "list"}) if err != nil { t.Errorf("error should be nil, but: %s", err) } } func TestDoList_notPermittedRoot(t *testing.T) { defer func(orig []string) { _localRepositoryRoots = orig }(_localRepositoryRoots) tmpdir := newTempDir(t) defer func(dir string) { os.Chmod(dir, 0755) os.RemoveAll(dir) }(tmpdir) defer tmpEnv("GHQ_ROOT", tmpdir)() _localRepositoryRoots = nil os.Chmod(tmpdir, 0000) err := newApp().Run([]string{"ghq", "list"}) if !os.IsPermission(xerrors.Unwrap(err)) { t.Errorf("error should be ErrNotExist, but: %s", err) } } func TestDoList_withSystemHiddenDir(t *testing.T) { defer func(orig []string) { _localRepositoryRoots = orig }(_localRepositoryRoots) tmpdir := newTempDir(t) systemHidden := filepath.Join(tmpdir, ".system") os.MkdirAll(systemHidden, 0000) defer func(dir string) { os.Chmod(systemHidden, 0755) os.RemoveAll(dir) }(tmpdir) defer tmpEnv("GHQ_ROOT", tmpdir)() _localRepositoryRoots = nil err := newApp().Run([]string{"ghq", "list"}) if err != nil { t.Errorf("error should be nil, but: %s", err) } } Fix test package main import ( "os" "path/filepath" "sort" "strings" "testing" "github.com/urfave/cli" "golang.org/x/xerrors" ) func TestCommandList(t *testing.T) { _, _, err := capture(func() { app := cli.NewApp() flagSet := flagSet("list", commandList.Flags) c := cli.NewContext(app, flagSet, nil) doList(c) }) if err != nil { t.Errorf("error should be nil, but: %s", err) } } func TestCommandListUnique(t *testing.T) { _, _, err := capture(func() { app := cli.NewApp() flagSet := flagSet("list", commandList.Flags) flagSet.Parse([]string{"--unique"}) c := cli.NewContext(app, flagSet, nil) doList(c) }) if err != nil { t.Errorf("error should be nil, but: %s", err) } } func TestCommandListUnknown(t *testing.T) { _, _, err := capture(func() { app := cli.NewApp() flagSet := flagSet("list", commandList.Flags) flagSet.Parse([]string{"--unknown-flag"}) c := cli.NewContext(app, flagSet, nil) doList(c) }) if err != nil { t.Errorf("error should be nil, but: %s", err) } } func sortLines(s string) string { ss := strings.Split(s, "\n") sort.Strings(ss) return strings.Join(ss, "\n") } func TestDoList_query(t *testing.T) { gitRepos := []string{ "github.com/motemen/ghq", "github.com/motemen/gobump", "github.com/motemen/gore", "github.com/Songmu/gobump", "golang.org/x/crypt", "golang.org/x/image", } svnRepos := []string{ "github.com/msh5/svntest", } testCases := []struct { name string args []string expect string }{{ name: "repo match", args: []string{"ghq"}, expect: "github.com/motemen/ghq\n", }, { name: "unique", args: []string{"--unique", "ghq"}, expect: "ghq\n", }, { name: "host only doesn't match", args: []string{"github.com"}, expect: "", }, { name: "host and slash match", args: []string{"golang.org/"}, expect: "golang.org/x/crypt\ngolang.org/x/image\n", }, { name: "host and user", args: []string{"github.com/Songmu"}, expect: "github.com/Songmu/gobump\n", }, { name: "with scheme", args: []string{"https://github.com/motemen/ghq"}, expect: "github.com/motemen/ghq\n", }, { name: "exact", args: []string{"-e", "gobump"}, expect: "github.com/Songmu/gobump\ngithub.com/motemen/gobump\n", }, { name: "query", args: []string{"men/go"}, expect: "github.com/motemen/gobump\ngithub.com/motemen/gore\n", }, { name: "exact query", args: []string{"-e", "men/go"}, expect: "", }, { name: "vcs", args: []string{"--vcs", "svn"}, expect: "github.com/msh5/svntest\n", }} withFakeGitBackend(t, func(t *testing.T, tmproot string, _ *_cloneArgs, _ *_updateArgs) { for _, r := range gitRepos { os.MkdirAll(filepath.Join(tmproot, r, ".git"), 0755) } for _, r := range svnRepos { os.MkdirAll(filepath.Join(tmproot, r, ".svn"), 0755) } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { args := append([]string{"ghq", "list"}, tc.args...) out, _, _ := capture(func() { newApp().Run(args) }) if sortLines(out) != sortLines(tc.expect) { t.Errorf("got:\n%s\nexpect:\n%s", out, tc.expect) } if strings.Contains(tc.name, "unique") { return } argsFull := append([]string{"ghq", "list", "--full-path"}, tc.args...) fullExpect := tc.expect if fullExpect != "" { fullExpect = tmproot + string(filepath.Separator) + strings.TrimSpace(fullExpect) fullExpect = strings.ReplaceAll(fullExpect, "\n", "\n"+tmproot+string(filepath.Separator)) fullExpect += "\n" } out, _, _ = capture(func() { newApp().Run(argsFull) }) if sortLines(out) != sortLines(fullExpect) { t.Errorf("got:\n%s\nexpect:\n%s", out, fullExpect) } }) } }) } func TestDoList_unique(t *testing.T) { defer func(orig []string) { _localRepositoryRoots = orig }(_localRepositoryRoots) defer func(orig string) { os.Setenv("GHQ_ROOT", orig) }(os.Getenv("GHQ_ROOT")) tmp1 := newTempDir(t) defer os.RemoveAll(tmp1) tmp2 := newTempDir(t) defer os.RemoveAll(tmp2) _localRepositoryRoots = nil rootPaths := []string{tmp1, tmp2} os.Setenv("GHQ_ROOT", strings.Join(rootPaths, string(os.PathListSeparator))) for _, rootPath := range rootPaths { os.MkdirAll(filepath.Join(rootPath, "github.com/motemen/ghq/.git"), 0755) } out, _, _ := capture(func() { newApp().Run([]string{"ghq", "list", "--unique"}) }) if out != "ghq\n" { t.Errorf("got: %s, expect: ghq\n", out) } } func TestDoList_unknownRoot(t *testing.T) { defer func(orig []string) { _localRepositoryRoots = orig }(_localRepositoryRoots) defer tmpEnv("GHQ_ROOT", "/path/to/unknown-ghq")() _localRepositoryRoots = nil err := newApp().Run([]string{"ghq", "list"}) if err != nil { t.Errorf("error should be nil, but: %s", err) } } func TestDoList_notPermittedRoot(t *testing.T) { defer func(orig []string) { _localRepositoryRoots = orig }(_localRepositoryRoots) tmpdir := newTempDir(t) defer func(dir string) { os.Chmod(dir, 0755) os.RemoveAll(dir) }(tmpdir) defer tmpEnv("GHQ_ROOT", tmpdir)() _localRepositoryRoots = nil os.Chmod(tmpdir, 0000) err := newApp().Run([]string{"ghq", "list"}) if !os.IsPermission(xerrors.Unwrap(err)) { t.Errorf("error should be ErrNotExist, but: %s", err) } } func TestDoList_withSystemHiddenDir(t *testing.T) { defer func(orig []string) { _localRepositoryRoots = orig }(_localRepositoryRoots) tmpdir := newTempDir(t) systemHidden := filepath.Join(tmpdir, ".system") os.MkdirAll(systemHidden, 0000) defer func(dir string) { os.Chmod(systemHidden, 0755) os.RemoveAll(dir) }(tmpdir) defer tmpEnv("GHQ_ROOT", tmpdir)() _localRepositoryRoots = nil err := newApp().Run([]string{"ghq", "list"}) if err != nil { t.Errorf("error should be nil, but: %s", err) } }
// Copyright (C) 2016 Arista Networks, Inc. // Use of this source code is governed by the Apache License 2.0 // that can be found in the COPYING file. package kafka import ( "os" "github.com/Shopify/sarama" ) // NewClient returns a Kafka client func NewClient(addresses []string) (sarama.Client, error) { config := sarama.NewConfig() hostname, err := os.Hostname() if err != nil { hostname = "" } config.ClientID = hostname config.Producer.Compression = sarama.CompressionSnappy config.Producer.Return.Successes = true return sarama.NewClient(addresses, config) } kafka: handle sarama.ErrOutOfBrokers with retries Change-Id: I354e7a21429307ac397a1a9d9c5faacbda5529c9 // Copyright (C) 2016 Arista Networks, Inc. // Use of this source code is governed by the Apache License 2.0 // that can be found in the COPYING file. package kafka import ( "os" "time" "github.com/Shopify/sarama" "github.com/aristanetworks/glog" ) const ( outOfBrokersBackoff = 30 * time.Second outOfBrokersRetries = 5 ) // NewClient returns a Kafka client func NewClient(addresses []string) (sarama.Client, error) { config := sarama.NewConfig() hostname, err := os.Hostname() if err != nil { hostname = "" } config.ClientID = hostname config.Producer.Compression = sarama.CompressionSnappy config.Producer.Return.Successes = true var client sarama.Client retries := outOfBrokersRetries + 1 for retries > 0 { client, err = sarama.NewClient(addresses, config) retries-- if err == sarama.ErrOutOfBrokers { glog.Errorf("Can't connect to the Kafka cluster (%d retries left): %s", retries, err) time.Sleep(outOfBrokersBackoff) } else { break } } return client, err }
package llvm /* #include <llvm-c/Core.h> #include <stdlib.h> */ import "C" import "unsafe" import "errors" // TODO: Add comments // TODO: Use Go's reflection in order to simplify bindings? // TODO: Add type safety? type ( // I'm using these weird structs here, because *Ref types are pointers // and Go's spec says that I can't use a pointer as a receiver base // type. Context struct { C C.LLVMContextRef } Module struct { C C.LLVMModuleRef } Type struct { C C.LLVMTypeRef } Value struct { C C.LLVMValueRef } BasicBlock struct { C C.LLVMBasicBlockRef } Builder struct { C C.LLVMBuilderRef } ModuleProvider struct { C C.LLVMModuleProviderRef } MemoryBuffer struct { C C.LLVMMemoryBufferRef } PassManager struct { C C.LLVMPassManagerRef } Use struct { C C.LLVMUseRef } Attribute C.LLVMAttribute Opcode C.LLVMOpcode TypeKind C.LLVMTypeKind Linkage C.LLVMLinkage Visibility C.LLVMVisibility CallConv C.LLVMCallConv IntPredicate C.LLVMIntPredicate FloatPredicate C.LLVMRealPredicate ) func (c Context) IsNil() bool { return c.C == nil } func (c Module) IsNil() bool { return c.C == nil } func (c Type) IsNil() bool { return c.C == nil } func (c Value) IsNil() bool { return c.C == nil } func (c BasicBlock) IsNil() bool { return c.C == nil } func (c Builder) IsNil() bool { return c.C == nil } func (c ModuleProvider) IsNil() bool { return c.C == nil } func (c MemoryBuffer) IsNil() bool { return c.C == nil } func (c PassManager) IsNil() bool { return c.C == nil } func (c Use) IsNil() bool { return c.C == nil } // helpers func llvmTypeRefPtr(t *Type) *C.LLVMTypeRef { return (*C.LLVMTypeRef)(unsafe.Pointer(t)) } func llvmValueRefPtr(t *Value) *C.LLVMValueRef { return (*C.LLVMValueRef)(unsafe.Pointer(t)) } func llvmBasicBlockRefPtr(t *BasicBlock) *C.LLVMBasicBlockRef { return (*C.LLVMBasicBlockRef)(unsafe.Pointer(t)) } func boolToLLVMBool(b bool) C.LLVMBool { if b { return C.LLVMBool(1) } return C.LLVMBool(0) } func llvmValueRefs(values []Value) (*C.LLVMValueRef, C.unsigned) { var pt *C.LLVMValueRef ptlen := C.unsigned(len(values)) if ptlen > 0 { pt = llvmValueRefPtr(&values[0]) } return pt, ptlen } //------------------------------------------------------------------------- // llvm.Attribute //------------------------------------------------------------------------- const ( NoneAttribute Attribute = 0 ZExtAttribute Attribute = C.LLVMZExtAttribute SExtAttribute Attribute = C.LLVMSExtAttribute NoReturnAttribute Attribute = C.LLVMNoReturnAttribute InRegAttribute Attribute = C.LLVMInRegAttribute StructRetAttribute Attribute = C.LLVMStructRetAttribute NoUnwindAttribute Attribute = C.LLVMNoUnwindAttribute NoAliasAttribute Attribute = C.LLVMNoAliasAttribute ByValAttribute Attribute = C.LLVMByValAttribute NestAttribute Attribute = C.LLVMNestAttribute ReadNoneAttribute Attribute = C.LLVMReadNoneAttribute ReadOnlyAttribute Attribute = C.LLVMReadOnlyAttribute NoInlineAttribute Attribute = C.LLVMNoInlineAttribute AlwaysInlineAttribute Attribute = C.LLVMAlwaysInlineAttribute OptimizeForSizeAttribute Attribute = C.LLVMOptimizeForSizeAttribute StackProtectAttribute Attribute = C.LLVMStackProtectAttribute StackProtectReqAttribute Attribute = C.LLVMStackProtectReqAttribute Alignment Attribute = C.LLVMAlignment NoCaptureAttribute Attribute = C.LLVMNoCaptureAttribute NoRedZoneAttribute Attribute = C.LLVMNoRedZoneAttribute NoImplicitFloatAttribute Attribute = C.LLVMNoImplicitFloatAttribute NakedAttribute Attribute = C.LLVMNakedAttribute InlineHintAttribute Attribute = C.LLVMInlineHintAttribute StackAlignment Attribute = C.LLVMStackAlignment ) //------------------------------------------------------------------------- // llvm.Opcode //------------------------------------------------------------------------- const ( Ret Opcode = C.LLVMRet Br Opcode = C.LLVMBr Switch Opcode = C.LLVMSwitch IndirectBr Opcode = C.LLVMIndirectBr Invoke Opcode = C.LLVMInvoke Unreachable Opcode = C.LLVMUnreachable // Standard Binary Operators Add Opcode = C.LLVMAdd FAdd Opcode = C.LLVMFAdd Sub Opcode = C.LLVMSub FSub Opcode = C.LLVMFSub Mul Opcode = C.LLVMMul FMul Opcode = C.LLVMFMul UDiv Opcode = C.LLVMUDiv SDiv Opcode = C.LLVMSDiv FDiv Opcode = C.LLVMFDiv URem Opcode = C.LLVMURem SRem Opcode = C.LLVMSRem FRem Opcode = C.LLVMFRem // Logical Operators Shl Opcode = C.LLVMShl LShr Opcode = C.LLVMLShr AShr Opcode = C.LLVMAShr And Opcode = C.LLVMAnd Or Opcode = C.LLVMOr Xor Opcode = C.LLVMXor // Memory Operators Alloca Opcode = C.LLVMAlloca Load Opcode = C.LLVMLoad Store Opcode = C.LLVMStore GetElementPtr Opcode = C.LLVMGetElementPtr // Cast Operators Trunc Opcode = C.LLVMTrunc ZExt Opcode = C.LLVMZExt SExt Opcode = C.LLVMSExt FPToUI Opcode = C.LLVMFPToUI FPToSI Opcode = C.LLVMFPToSI UIToFP Opcode = C.LLVMUIToFP SIToFP Opcode = C.LLVMSIToFP FPTrunc Opcode = C.LLVMFPTrunc FPExt Opcode = C.LLVMFPExt PtrToInt Opcode = C.LLVMPtrToInt IntToPtr Opcode = C.LLVMIntToPtr BitCast Opcode = C.LLVMBitCast // Other Operators ICmp Opcode = C.LLVMICmp FCmp Opcode = C.LLVMFCmp PHI Opcode = C.LLVMPHI Call Opcode = C.LLVMCall Select Opcode = C.LLVMSelect // UserOp1 // UserOp2 VAArg Opcode = C.LLVMVAArg ExtractElement Opcode = C.LLVMExtractElement InsertElement Opcode = C.LLVMInsertElement ShuffleVector Opcode = C.LLVMShuffleVector ExtractValue Opcode = C.LLVMExtractValue InsertValue Opcode = C.LLVMInsertValue ) //------------------------------------------------------------------------- // llvm.TypeKind //------------------------------------------------------------------------- const ( VoidTypeKind TypeKind = C.LLVMVoidTypeKind FloatTypeKind TypeKind = C.LLVMFloatTypeKind DoubleTypeKind TypeKind = C.LLVMDoubleTypeKind X86_FP80TypeKind TypeKind = C.LLVMX86_FP80TypeKind FP128TypeKind TypeKind = C.LLVMFP128TypeKind PPC_FP128TypeKind TypeKind = C.LLVMPPC_FP128TypeKind LabelTypeKind TypeKind = C.LLVMLabelTypeKind IntegerTypeKind TypeKind = C.LLVMIntegerTypeKind FunctionTypeKind TypeKind = C.LLVMFunctionTypeKind StructTypeKind TypeKind = C.LLVMStructTypeKind ArrayTypeKind TypeKind = C.LLVMArrayTypeKind PointerTypeKind TypeKind = C.LLVMPointerTypeKind VectorTypeKind TypeKind = C.LLVMVectorTypeKind MetadataTypeKind TypeKind = C.LLVMMetadataTypeKind ) //------------------------------------------------------------------------- // llvm.Linkage //------------------------------------------------------------------------- const ( ExternalLinkage Linkage = C.LLVMExternalLinkage AvailableExternallyLinkage Linkage = C.LLVMAvailableExternallyLinkage LinkOnceAnyLinkage Linkage = C.LLVMLinkOnceAnyLinkage LinkOnceODRLinkage Linkage = C.LLVMLinkOnceODRLinkage WeakAnyLinkage Linkage = C.LLVMWeakAnyLinkage WeakODRLinkage Linkage = C.LLVMWeakODRLinkage AppendingLinkage Linkage = C.LLVMAppendingLinkage InternalLinkage Linkage = C.LLVMInternalLinkage PrivateLinkage Linkage = C.LLVMPrivateLinkage DLLImportLinkage Linkage = C.LLVMDLLImportLinkage DLLExportLinkage Linkage = C.LLVMDLLExportLinkage ExternalWeakLinkage Linkage = C.LLVMExternalWeakLinkage GhostLinkage Linkage = C.LLVMGhostLinkage CommonLinkage Linkage = C.LLVMCommonLinkage LinkerPrivateLinkage Linkage = C.LLVMLinkerPrivateLinkage LinkerPrivateWeakLinkage Linkage = C.LLVMLinkerPrivateWeakLinkage //LinkerPrivateWeakDefAutoLinkage Linkage = C.LLVMLinkerPrivateWeakDefAutoLinkage ) //------------------------------------------------------------------------- // llvm.Visibility //------------------------------------------------------------------------- const ( DefaultVisibility Visibility = C.LLVMDefaultVisibility HiddenVisibility Visibility = C.LLVMHiddenVisibility ProtectedVisibility Visibility = C.LLVMProtectedVisibility ) //------------------------------------------------------------------------- // llvm.CallConv //------------------------------------------------------------------------- const ( CCallConv CallConv = C.LLVMCCallConv FastCallConv CallConv = C.LLVMFastCallConv ColdCallConv CallConv = C.LLVMColdCallConv X86StdcallCallConv CallConv = C.LLVMX86StdcallCallConv X86FastcallCallConv CallConv = C.LLVMX86FastcallCallConv ) //------------------------------------------------------------------------- // llvm.IntPredicate //------------------------------------------------------------------------- const ( IntEQ IntPredicate = C.LLVMIntEQ IntNE IntPredicate = C.LLVMIntNE IntUGT IntPredicate = C.LLVMIntUGT IntUGE IntPredicate = C.LLVMIntUGE IntULT IntPredicate = C.LLVMIntULT IntULE IntPredicate = C.LLVMIntULE IntSGT IntPredicate = C.LLVMIntSGT IntSGE IntPredicate = C.LLVMIntSGE IntSLT IntPredicate = C.LLVMIntSLT IntSLE IntPredicate = C.LLVMIntSLE ) //------------------------------------------------------------------------- // llvm.FloatPredicate //------------------------------------------------------------------------- const ( FloatPredicateFalse FloatPredicate = C.LLVMRealPredicateFalse FloatOEQ FloatPredicate = C.LLVMRealOEQ FloatOGT FloatPredicate = C.LLVMRealOGT FloatOGE FloatPredicate = C.LLVMRealOGE FloatOLT FloatPredicate = C.LLVMRealOLT FloatOLE FloatPredicate = C.LLVMRealOLE FloatONE FloatPredicate = C.LLVMRealONE FloatORD FloatPredicate = C.LLVMRealORD FloatUNO FloatPredicate = C.LLVMRealUNO FloatUEQ FloatPredicate = C.LLVMRealUEQ FloatUGT FloatPredicate = C.LLVMRealUGT FloatUGE FloatPredicate = C.LLVMRealUGE FloatULT FloatPredicate = C.LLVMRealULT FloatULE FloatPredicate = C.LLVMRealULE FloatUNE FloatPredicate = C.LLVMRealUNE FloatPredicateTrue FloatPredicate = C.LLVMRealPredicateTrue ) //------------------------------------------------------------------------- // llvm.Context //------------------------------------------------------------------------- func NewContext() Context { return Context{C.LLVMContextCreate()} } func GlobalContext() Context { return Context{C.LLVMGetGlobalContext()} } func (c Context) Dispose() { C.LLVMContextDispose(c.C) } func (c Context) MDKindID(name string) (id int) { cname := C.CString(name) id = int(C.LLVMGetMDKindIDInContext(c.C, cname, C.unsigned(len(name)))) C.free(unsafe.Pointer(cname)) return } func MDKindID(name string) (id int) { cname := C.CString(name) id = int(C.LLVMGetMDKindID(cname, C.unsigned(len(name)))) C.free(unsafe.Pointer(cname)) return } //------------------------------------------------------------------------- // llvm.Module //------------------------------------------------------------------------- // Create and destroy modules. // See llvm::Module::Module. func NewModule(name string) (m Module) { cname := C.CString(name) m.C = C.LLVMModuleCreateWithName(cname) C.free(unsafe.Pointer(cname)) return } func (c Context) NewModule(name string) (m Module) { cname := C.CString(name) m.C = C.LLVMModuleCreateWithNameInContext(cname, c.C) C.free(unsafe.Pointer(cname)) return } // See llvm::Module::~Module func (m Module) Dispose() { C.LLVMDisposeModule(m.C) } // Data layout. See Module::getDataLayout. func (m Module) DataLayout() string { clayout := C.LLVMGetDataLayout(m.C) return C.GoString(clayout) } func (m Module) SetDataLayout(layout string) { clayout := C.CString(layout) C.LLVMSetDataLayout(m.C, clayout) C.free(unsafe.Pointer(clayout)) } // Target triple. See Module::getTargetTriple. func (m Module) Target() string { ctarget := C.LLVMGetTarget(m.C) return C.GoString(ctarget) } func (m Module) SetTarget(target string) { ctarget := C.CString(target) C.LLVMSetTarget(m.C, ctarget) C.free(unsafe.Pointer(ctarget)) } func (m Module) GetTypeByName(name string) (t Type) { cname := C.CString(name) t.C = C.LLVMGetTypeByName(m.C, cname) C.free(unsafe.Pointer(cname)) return } // See Module::dump. func (m Module) Dump() { C.LLVMDumpModule(m.C) } // See Module::setModuleInlineAsm. func (m Module) SetInlineAsm(asm string) { casm := C.CString(asm) C.LLVMSetModuleInlineAsm(m.C, casm) C.free(unsafe.Pointer(casm)) } func (m Module) AddNamedMetadataOperand(name string, operand Value) { cname := C.CString(name) C.LLVMAddNamedMetadataOperand(m.C, cname, operand.C) C.free(unsafe.Pointer(cname)) } //------------------------------------------------------------------------- // llvm.Type //------------------------------------------------------------------------- // LLVM types conform to the following hierarchy: // // types: // integer type // real type // function type // sequence types: // array type // pointer type // vector type // void type // label type // opaque type // See llvm::LLVMTypeKind::getTypeID. func (t Type) TypeKind() TypeKind { return TypeKind(C.LLVMGetTypeKind(t.C)) } // See llvm::LLVMType::getContext. func (t Type) Context() (c Context) { c.C = C.LLVMGetTypeContext(t.C) return } // Operations on integer types func (c Context) Int1Type() (t Type) { t.C = C.LLVMInt1TypeInContext(c.C); return } func (c Context) Int8Type() (t Type) { t.C = C.LLVMInt8TypeInContext(c.C); return } func (c Context) Int16Type() (t Type) { t.C = C.LLVMInt16TypeInContext(c.C); return } func (c Context) Int32Type() (t Type) { t.C = C.LLVMInt32TypeInContext(c.C); return } func (c Context) Int64Type() (t Type) { t.C = C.LLVMInt64TypeInContext(c.C); return } func (c Context) IntType() (t Type, numbits int) { t.C = C.LLVMIntTypeInContext(c.C, C.unsigned(numbits)) return } func Int1Type() (t Type) { t.C = C.LLVMInt1Type(); return } func Int8Type() (t Type) { t.C = C.LLVMInt8Type(); return } func Int16Type() (t Type) { t.C = C.LLVMInt16Type(); return } func Int32Type() (t Type) { t.C = C.LLVMInt32Type(); return } func Int64Type() (t Type) { t.C = C.LLVMInt64Type(); return } func IntType(numbits int) (t Type) { t.C = C.LLVMIntType(C.unsigned(numbits)) return } func (t Type) IntTypeWidth() int { return int(C.LLVMGetIntTypeWidth(t.C)) } // Operations on real types func (c Context) FloatType() (t Type) { t.C = C.LLVMFloatTypeInContext(c.C); return } func (c Context) DoubleType() (t Type) { t.C = C.LLVMDoubleTypeInContext(c.C); return } func (c Context) X86FP80Type() (t Type) { t.C = C.LLVMX86FP80TypeInContext(c.C); return } func (c Context) FP128Type() (t Type) { t.C = C.LLVMFP128TypeInContext(c.C); return } func (c Context) PPCFP128Type() (t Type) { t.C = C.LLVMPPCFP128TypeInContext(c.C); return } func FloatType() (t Type) { t.C = C.LLVMFloatType(); return } func DoubleType() (t Type) { t.C = C.LLVMDoubleType(); return } func X86FP80Type() (t Type) { t.C = C.LLVMX86FP80Type(); return } func FP128Type() (t Type) { t.C = C.LLVMFP128Type(); return } func PPCFP128Type() (t Type) { t.C = C.LLVMPPCFP128Type(); return } // Operations on function types func FunctionType(returnType Type, paramTypes []Type, isVarArg bool) (t Type) { var pt *C.LLVMTypeRef var ptlen C.unsigned if len(paramTypes) > 0 { pt = llvmTypeRefPtr(&paramTypes[0]) ptlen = C.unsigned(len(paramTypes)) } t.C = C.LLVMFunctionType(returnType.C, pt, ptlen, boolToLLVMBool(isVarArg)) return } func (t Type) IsFunctionVarArg() bool { return C.LLVMIsFunctionVarArg(t.C) != 0 } func (t Type) ReturnType() (rt Type) { rt.C = C.LLVMGetReturnType(t.C); return } func (t Type) ParamTypesCount() int { return int(C.LLVMCountParamTypes(t.C)) } func (t Type) ParamTypes() []Type { count := t.ParamTypesCount() if count > 0 { out := make([]Type, count) C.LLVMGetParamTypes(t.C, llvmTypeRefPtr(&out[0])) return out } return nil } // Operations on struct types func (c Context) StructType(elementTypes []Type, packed bool) (t Type) { var pt *C.LLVMTypeRef var ptlen C.unsigned if len(elementTypes) > 0 { pt = llvmTypeRefPtr(&elementTypes[0]) ptlen = C.unsigned(len(elementTypes)) } t.C = C.LLVMStructTypeInContext(c.C, pt, ptlen, boolToLLVMBool(packed)) return } func StructType(elementTypes []Type, packed bool) (t Type) { var pt *C.LLVMTypeRef var ptlen C.unsigned if len(elementTypes) > 0 { pt = llvmTypeRefPtr(&elementTypes[0]) ptlen = C.unsigned(len(elementTypes)) } t.C = C.LLVMStructType(pt, ptlen, boolToLLVMBool(packed)) return } func (c Context) StructCreateNamed(name string) (t Type) { cname := C.CString(name) t.C = C.LLVMStructCreateNamed(c.C, cname) C.free(unsafe.Pointer(cname)) return } func (t Type) StructSetBody(elementTypes []Type, packed bool) { var pt *C.LLVMTypeRef var ptlen C.unsigned if len(elementTypes) > 0 { pt = llvmTypeRefPtr(&elementTypes[0]) ptlen = C.unsigned(len(elementTypes)) } C.LLVMStructSetBody(t.C, pt, ptlen, boolToLLVMBool(packed)) } func (t Type) IsStructPacked() bool { return C.LLVMIsPackedStruct(t.C) != 0 } func (t Type) StructElementTypesCount() int { return int(C.LLVMCountStructElementTypes(t.C)) } func (t Type) StructElementTypes() []Type { out := make([]Type, t.StructElementTypesCount()) if len(out) > 0 { C.LLVMGetStructElementTypes(t.C, llvmTypeRefPtr(&out[0])) } return out } // Operations on array, pointer, and vector types (sequence types) func ArrayType(elementType Type, elementCount int) (t Type) { t.C = C.LLVMArrayType(elementType.C, C.unsigned(elementCount)) return } func PointerType(elementType Type, addressSpace int) (t Type) { t.C = C.LLVMPointerType(elementType.C, C.unsigned(addressSpace)) return } func VectorType(elementType Type, elementCount int) (t Type) { t.C = C.LLVMVectorType(elementType.C, C.unsigned(elementCount)) return } func (t Type) ElementType() (rt Type) { rt.C = C.LLVMGetElementType(t.C); return } func (t Type) ArrayLength() int { return int(C.LLVMGetArrayLength(t.C)) } func (t Type) PointerAddressSpace() int { return int(C.LLVMGetPointerAddressSpace(t.C)) } func (t Type) VectorSize() int { return int(C.LLVMGetVectorSize(t.C)) } // Operations on other types func (c Context) VoidType() (t Type) { t.C = C.LLVMVoidTypeInContext(c.C); return } func (c Context) LabelType() (t Type) { t.C = C.LLVMLabelTypeInContext(c.C); return } func VoidType() (t Type) { t.C = C.LLVMVoidType(); return } func LabelType() (t Type) { t.C = C.LLVMLabelType(); return } //------------------------------------------------------------------------- // llvm.Value //------------------------------------------------------------------------- // Operations on all values func (v Value) Type() (t Type) { t.C = C.LLVMTypeOf(v.C); return } func (v Value) Name() string { return C.GoString(C.LLVMGetValueName(v.C)) } func (v Value) SetName(name string) { cname := C.CString(name) C.LLVMSetValueName(v.C, cname) C.free(unsafe.Pointer(cname)) } func (v Value) Dump() { C.LLVMDumpValue(v.C) } func (v Value) ReplaceAllUsesWith(nv Value) { C.LLVMReplaceAllUsesWith(v.C, nv.C) } func (v Value) HasMetadata() bool { return C.LLVMHasMetadata(v.C) != 0 } func (v Value) Metadata(kind int) (rv Value) { rv.C = C.LLVMGetMetadata(v.C, C.unsigned(kind)) return } func (v Value) SetMetadata(kind int, node Value) { C.LLVMSetMetadata(v.C, C.unsigned(kind), node.C) } // The bulk of LLVM's object model consists of values, which comprise a very // rich type hierarchy. //#define LLVM_FOR_EACH_VALUE_SUBCLASS(macro) \ // macro(Argument) \ // macro(BasicBlock) \ // macro(InlineAsm) \ // macro(User) \ // macro(Constant) \ // macro(ConstantAggregateZero) \ // macro(ConstantArray) \ // macro(ConstantExpr) \ // macro(ConstantFP) \ // macro(ConstantInt) \ // macro(ConstantPointerNull) \ // macro(ConstantStruct) \ // macro(ConstantVector) \ // macro(GlobalValue) \ // macro(Function) \ // macro(GlobalAlias) \ // macro(GlobalVariable) \ // macro(UndefValue) \ // macro(Instruction) \ // macro(BinaryOperator) \ // macro(CallInst) \ // macro(IntrinsicInst) \ // macro(DbgInfoIntrinsic) \ // macro(DbgDeclareInst) \ // macro(EHSelectorInst) \ // macro(MemIntrinsic) \ // macro(MemCpyInst) \ // macro(MemMoveInst) \ // macro(MemSetInst) \ // macro(CmpInst) \ // macro(FCmpInst) \ // macro(ICmpInst) \ // macro(ExtractElementInst) \ // macro(GetElementPtrInst) \ // macro(InsertElementInst) \ // macro(InsertValueInst) \ // macro(PHINode) \ // macro(SelectInst) \ // macro(ShuffleVectorInst) \ // macro(StoreInst) \ // macro(TerminatorInst) \ // macro(BranchInst) \ // macro(InvokeInst) \ // macro(ReturnInst) \ // macro(SwitchInst) \ // macro(UnreachableInst) \ // macro(UnwindInst) \ // macro(UnaryInstruction) \ // macro(AllocaInst) \ // macro(CastInst) \ // macro(BitCastInst) \ // macro(FPExtInst) \ // macro(FPToSIInst) \ // macro(FPToUIInst) \ // macro(FPTruncInst) \ // macro(IntToPtrInst) \ // macro(PtrToIntInst) \ // macro(SExtInst) \ // macro(SIToFPInst) \ // macro(TruncInst) \ // macro(UIToFPInst) \ // macro(ZExtInst) \ // macro(ExtractValueInst) \ // macro(LoadInst) \ // macro(VAArgInst) //#define LLVM_DECLARE_VALUE_CAST(name) \ // func (v Value) IsA##name() (rv Value) { rv.C = C.LLVMIsA##name(v.C); return } || //LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DECLARE_VALUE_CAST) // Conversion functions. Generated using preprocess statements above. Return // the input value if it is an instance of the specified class, otherwise NULL. // See llvm::dyn_cast_or_null<>. func (v Value) IsAArgument() (rv Value) { rv.C = C.LLVMIsAArgument(v.C); return } func (v Value) IsABasicBlock() (rv Value) { rv.C = C.LLVMIsABasicBlock(v.C); return } func (v Value) IsAInlineAsm() (rv Value) { rv.C = C.LLVMIsAInlineAsm(v.C); return } func (v Value) IsAUser() (rv Value) { rv.C = C.LLVMIsAUser(v.C); return } func (v Value) IsAConstant() (rv Value) { rv.C = C.LLVMIsAConstant(v.C); return } func (v Value) IsAConstantAggregateZero() (rv Value) { rv.C = C.LLVMIsAConstantAggregateZero(v.C) return } func (v Value) IsAConstantArray() (rv Value) { rv.C = C.LLVMIsAConstantArray(v.C); return } func (v Value) IsAConstantExpr() (rv Value) { rv.C = C.LLVMIsAConstantExpr(v.C); return } func (v Value) IsAConstantFP() (rv Value) { rv.C = C.LLVMIsAConstantFP(v.C); return } func (v Value) IsAConstantInt() (rv Value) { rv.C = C.LLVMIsAConstantInt(v.C); return } func (v Value) IsAConstantPointerNull() (rv Value) { rv.C = C.LLVMIsAConstantPointerNull(v.C); return } func (v Value) IsAConstantStruct() (rv Value) { rv.C = C.LLVMIsAConstantStruct(v.C); return } func (v Value) IsAConstantVector() (rv Value) { rv.C = C.LLVMIsAConstantVector(v.C); return } func (v Value) IsAGlobalValue() (rv Value) { rv.C = C.LLVMIsAGlobalValue(v.C); return } func (v Value) IsAFunction() (rv Value) { rv.C = C.LLVMIsAFunction(v.C); return } func (v Value) IsAGlobalAlias() (rv Value) { rv.C = C.LLVMIsAGlobalAlias(v.C); return } func (v Value) IsAGlobalVariable() (rv Value) { rv.C = C.LLVMIsAGlobalVariable(v.C); return } func (v Value) IsAUndefValue() (rv Value) { rv.C = C.LLVMIsAUndefValue(v.C); return } func (v Value) IsAInstruction() (rv Value) { rv.C = C.LLVMIsAInstruction(v.C); return } func (v Value) IsABinaryOperator() (rv Value) { rv.C = C.LLVMIsABinaryOperator(v.C); return } func (v Value) IsACallInst() (rv Value) { rv.C = C.LLVMIsACallInst(v.C); return } func (v Value) IsAIntrinsicInst() (rv Value) { rv.C = C.LLVMIsAIntrinsicInst(v.C); return } func (v Value) IsADbgInfoIntrinsic() (rv Value) { rv.C = C.LLVMIsADbgInfoIntrinsic(v.C); return } func (v Value) IsADbgDeclareInst() (rv Value) { rv.C = C.LLVMIsADbgDeclareInst(v.C); return } func (v Value) IsAMemIntrinsic() (rv Value) { rv.C = C.LLVMIsAMemIntrinsic(v.C); return } func (v Value) IsAMemCpyInst() (rv Value) { rv.C = C.LLVMIsAMemCpyInst(v.C); return } func (v Value) IsAMemMoveInst() (rv Value) { rv.C = C.LLVMIsAMemMoveInst(v.C); return } func (v Value) IsAMemSetInst() (rv Value) { rv.C = C.LLVMIsAMemSetInst(v.C); return } func (v Value) IsACmpInst() (rv Value) { rv.C = C.LLVMIsACmpInst(v.C); return } func (v Value) IsAFCmpInst() (rv Value) { rv.C = C.LLVMIsAFCmpInst(v.C); return } func (v Value) IsAICmpInst() (rv Value) { rv.C = C.LLVMIsAICmpInst(v.C); return } func (v Value) IsAExtractElementInst() (rv Value) { rv.C = C.LLVMIsAExtractElementInst(v.C); return } func (v Value) IsAGetElementPtrInst() (rv Value) { rv.C = C.LLVMIsAGetElementPtrInst(v.C); return } func (v Value) IsAInsertElementInst() (rv Value) { rv.C = C.LLVMIsAInsertElementInst(v.C); return } func (v Value) IsAInsertValueInst() (rv Value) { rv.C = C.LLVMIsAInsertValueInst(v.C); return } func (v Value) IsAPHINode() (rv Value) { rv.C = C.LLVMIsAPHINode(v.C); return } func (v Value) IsASelectInst() (rv Value) { rv.C = C.LLVMIsASelectInst(v.C); return } func (v Value) IsAShuffleVectorInst() (rv Value) { rv.C = C.LLVMIsAShuffleVectorInst(v.C); return } func (v Value) IsAStoreInst() (rv Value) { rv.C = C.LLVMIsAStoreInst(v.C); return } func (v Value) IsATerminatorInst() (rv Value) { rv.C = C.LLVMIsATerminatorInst(v.C); return } func (v Value) IsABranchInst() (rv Value) { rv.C = C.LLVMIsABranchInst(v.C); return } func (v Value) IsAInvokeInst() (rv Value) { rv.C = C.LLVMIsAInvokeInst(v.C); return } func (v Value) IsAReturnInst() (rv Value) { rv.C = C.LLVMIsAReturnInst(v.C); return } func (v Value) IsASwitchInst() (rv Value) { rv.C = C.LLVMIsASwitchInst(v.C); return } func (v Value) IsAUnreachableInst() (rv Value) { rv.C = C.LLVMIsAUnreachableInst(v.C); return } func (v Value) IsAUnaryInstruction() (rv Value) { rv.C = C.LLVMIsAUnaryInstruction(v.C); return } func (v Value) IsAAllocaInst() (rv Value) { rv.C = C.LLVMIsAAllocaInst(v.C); return } func (v Value) IsACastInst() (rv Value) { rv.C = C.LLVMIsACastInst(v.C); return } func (v Value) IsABitCastInst() (rv Value) { rv.C = C.LLVMIsABitCastInst(v.C); return } func (v Value) IsAFPExtInst() (rv Value) { rv.C = C.LLVMIsAFPExtInst(v.C); return } func (v Value) IsAFPToSIInst() (rv Value) { rv.C = C.LLVMIsAFPToSIInst(v.C); return } func (v Value) IsAFPToUIInst() (rv Value) { rv.C = C.LLVMIsAFPToUIInst(v.C); return } func (v Value) IsAFPTruncInst() (rv Value) { rv.C = C.LLVMIsAFPTruncInst(v.C); return } func (v Value) IsAIntToPtrInst() (rv Value) { rv.C = C.LLVMIsAIntToPtrInst(v.C); return } func (v Value) IsAPtrToIntInst() (rv Value) { rv.C = C.LLVMIsAPtrToIntInst(v.C); return } func (v Value) IsASExtInst() (rv Value) { rv.C = C.LLVMIsASExtInst(v.C); return } func (v Value) IsASIToFPInst() (rv Value) { rv.C = C.LLVMIsASIToFPInst(v.C); return } func (v Value) IsATruncInst() (rv Value) { rv.C = C.LLVMIsATruncInst(v.C); return } func (v Value) IsAUIToFPInst() (rv Value) { rv.C = C.LLVMIsAUIToFPInst(v.C); return } func (v Value) IsAZExtInst() (rv Value) { rv.C = C.LLVMIsAZExtInst(v.C); return } func (v Value) IsAExtractValueInst() (rv Value) { rv.C = C.LLVMIsAExtractValueInst(v.C); return } func (v Value) IsALoadInst() (rv Value) { rv.C = C.LLVMIsALoadInst(v.C); return } func (v Value) IsAVAArgInst() (rv Value) { rv.C = C.LLVMIsAVAArgInst(v.C); return } // Operations on Uses func (v Value) FirstUse() (u Use) { u.C = C.LLVMGetFirstUse(v.C); return } func (u Use) NextUse() (ru Use) { ru.C = C.LLVMGetNextUse(u.C); return } func (u Use) User() (v Value) { v.C = C.LLVMGetUser(u.C); return } func (u Use) UsedValue() (v Value) { v.C = C.LLVMGetUsedValue(u.C); return } // Operations on Users func (v Value) Operand(i int) (rv Value) { rv.C = C.LLVMGetOperand(v.C, C.unsigned(i)); return } func (v Value) SetOperand(i int, op Value) { C.LLVMSetOperand(v.C, C.unsigned(i), op.C) } func (v Value) OperandsCount() int { return int(C.LLVMGetNumOperands(v.C)) } // Operations on constants of any type func ConstNull(t Type) (v Value) { v.C = C.LLVMConstNull(t.C); return } func ConstAllOnes(t Type) (v Value) { v.C = C.LLVMConstAllOnes(t.C); return } func Undef(t Type) (v Value) { v.C = C.LLVMGetUndef(t.C); return } func (v Value) IsConstant() bool { return C.LLVMIsConstant(v.C) != 0 } func (v Value) IsNull() bool { return C.LLVMIsNull(v.C) != 0 } func (v Value) IsUndef() bool { return C.LLVMIsUndef(v.C) != 0 } func ConstPointerNull(t Type) (v Value) { v.C = C.LLVMConstPointerNull(t.C); return } // Operations on metadata func (c Context) MDString(str string) (v Value) { cstr := C.CString(str) v.C = C.LLVMMDStringInContext(c.C, cstr, C.unsigned(len(str))) C.free(unsafe.Pointer(cstr)) return } func MDString(str string) (v Value) { cstr := C.CString(str) v.C = C.LLVMMDString(cstr, C.unsigned(len(str))) C.free(unsafe.Pointer(cstr)) return } func (c Context) MDNode(vals []Value) (v Value) { ptr, nvals := llvmValueRefs(vals) v.C = C.LLVMMDNodeInContext(c.C, ptr, nvals) return } func MDNode(vals []Value) (v Value) { ptr, nvals := llvmValueRefs(vals) v.C = C.LLVMMDNode(ptr, nvals) return } // Operations on scalar constants func ConstInt(t Type, n uint64, signExtend bool) (v Value) { v.C = C.LLVMConstInt(t.C, C.ulonglong(n), boolToLLVMBool(signExtend)) return } func ConstIntFromString(t Type, str string, radix int) (v Value) { cstr := C.CString(str) v.C = C.LLVMConstIntOfString(t.C, cstr, C.uint8_t(radix)) C.free(unsafe.Pointer(cstr)) return } func ConstFloat(t Type, n float64) (v Value) { v.C = C.LLVMConstReal(t.C, C.double(n)) return } func ConstFloatFromString(t Type, str string) (v Value) { cstr := C.CString(str) v.C = C.LLVMConstRealOfString(t.C, cstr) C.free(unsafe.Pointer(cstr)) return } func (v Value) ZExtValue() uint64 { return uint64(C.LLVMConstIntGetZExtValue(v.C)) } func (v Value) SExtValue() int64 { return int64(C.LLVMConstIntGetSExtValue(v.C)) } // Operations on composite constants func (c Context) ConstString(str string, addnull bool) (v Value) { cstr := C.CString(str) v.C = C.LLVMConstStringInContext(c.C, cstr, C.unsigned(len(str)), boolToLLVMBool(!addnull)) C.free(unsafe.Pointer(cstr)) return } func (c Context) ConstStruct(constVals []Value, packed bool) (v Value) { ptr, nvals := llvmValueRefs(constVals) v.C = C.LLVMConstStructInContext(c.C, ptr, nvals, boolToLLVMBool(packed)) return } func ConstString(str string, addnull bool) (v Value) { cstr := C.CString(str) v.C = C.LLVMConstString(cstr, C.unsigned(len(str)), boolToLLVMBool(!addnull)) C.free(unsafe.Pointer(cstr)) return } func ConstArray(t Type, constVals []Value) (v Value) { ptr, nvals := llvmValueRefs(constVals) v.C = C.LLVMConstArray(t.C, ptr, nvals) return } func ConstStruct(constVals []Value, packed bool) (v Value) { ptr, nvals := llvmValueRefs(constVals) v.C = C.LLVMConstStruct(ptr, nvals, boolToLLVMBool(packed)) return } func ConstVector(scalarConstVals []Value, packed bool) (v Value) { ptr, nvals := llvmValueRefs(scalarConstVals) v.C = C.LLVMConstVector(ptr, nvals) return } // Constant expressions func (v Value) Opcode() Opcode { return Opcode(C.LLVMGetConstOpcode(v.C)) } func (v Value) InstructionOpcode() Opcode { return Opcode(C.LLVMGetInstructionOpcode(v.C)) } func AlignOf(t Type) (v Value) { v.C = C.LLVMAlignOf(t.C); return } func SizeOf(t Type) (v Value) { v.C = C.LLVMSizeOf(t.C); return } func ConstNeg(v Value) (rv Value) { rv.C = C.LLVMConstNeg(v.C); return } func ConstNSWNeg(v Value) (rv Value) { rv.C = C.LLVMConstNSWNeg(v.C); return } func ConstNUWNeg(v Value) (rv Value) { rv.C = C.LLVMConstNUWNeg(v.C); return } func ConstFNeg(v Value) (rv Value) { rv.C = C.LLVMConstFNeg(v.C); return } func ConstNot(v Value) (rv Value) { rv.C = C.LLVMConstNot(v.C); return } func ConstAdd(lhs, rhs Value) (v Value) { v.C = C.LLVMConstAdd(lhs.C, rhs.C); return } func ConstNSWAdd(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNSWAdd(lhs.C, rhs.C); return } func ConstNUWAdd(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNUWAdd(lhs.C, rhs.C); return } func ConstFAdd(lhs, rhs Value) (v Value) { v.C = C.LLVMConstFAdd(lhs.C, rhs.C); return } func ConstSub(lhs, rhs Value) (v Value) { v.C = C.LLVMConstSub(lhs.C, rhs.C); return } func ConstNSWSub(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNSWSub(lhs.C, rhs.C); return } func ConstNUWSub(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNUWSub(lhs.C, rhs.C); return } func ConstFSub(lhs, rhs Value) (v Value) { v.C = C.LLVMConstFSub(lhs.C, rhs.C); return } func ConstMul(lhs, rhs Value) (v Value) { v.C = C.LLVMConstMul(lhs.C, rhs.C); return } func ConstNSWMul(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNSWMul(lhs.C, rhs.C); return } func ConstNUWMul(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNUWMul(lhs.C, rhs.C); return } func ConstFMul(lhs, rhs Value) (v Value) { v.C = C.LLVMConstFMul(lhs.C, rhs.C); return } func ConstUDiv(lhs, rhs Value) (v Value) { v.C = C.LLVMConstUDiv(lhs.C, rhs.C); return } func ConstSDiv(lhs, rhs Value) (v Value) { v.C = C.LLVMConstSDiv(lhs.C, rhs.C); return } func ConstExactSDiv(lhs, rhs Value) (v Value) { v.C = C.LLVMConstExactSDiv(lhs.C, rhs.C); return } func ConstFDiv(lhs, rhs Value) (v Value) { v.C = C.LLVMConstFDiv(lhs.C, rhs.C); return } func ConstURem(lhs, rhs Value) (v Value) { v.C = C.LLVMConstURem(lhs.C, rhs.C); return } func ConstSRem(lhs, rhs Value) (v Value) { v.C = C.LLVMConstSRem(lhs.C, rhs.C); return } func ConstFRem(lhs, rhs Value) (v Value) { v.C = C.LLVMConstFRem(lhs.C, rhs.C); return } func ConstAnd(lhs, rhs Value) (v Value) { v.C = C.LLVMConstAnd(lhs.C, rhs.C); return } func ConstOr(lhs, rhs Value) (v Value) { v.C = C.LLVMConstOr(lhs.C, rhs.C); return } func ConstXor(lhs, rhs Value) (v Value) { v.C = C.LLVMConstXor(lhs.C, rhs.C); return } func ConstICmp(pred IntPredicate, lhs, rhs Value) (v Value) { v.C = C.LLVMConstICmp(C.LLVMIntPredicate(pred), lhs.C, rhs.C) return } func ConstFCmp(pred FloatPredicate, lhs, rhs Value) (v Value) { v.C = C.LLVMConstFCmp(C.LLVMRealPredicate(pred), lhs.C, rhs.C) return } func ConstShl(lhs, rhs Value) (v Value) { v.C = C.LLVMConstShl(lhs.C, rhs.C); return } func ConstLShr(lhs, rhs Value) (v Value) { v.C = C.LLVMConstLShr(lhs.C, rhs.C); return } func ConstAShr(lhs, rhs Value) (v Value) { v.C = C.LLVMConstAShr(lhs.C, rhs.C); return } func ConstGEP(v Value, indices []Value) (rv Value) { ptr, nvals := llvmValueRefs(indices) rv.C = C.LLVMConstGEP(v.C, ptr, nvals) return } func ConstInBoundsGEP(v Value, indices []Value) (rv Value) { ptr, nvals := llvmValueRefs(indices) rv.C = C.LLVMConstInBoundsGEP(v.C, ptr, nvals) return } func ConstTrunc(v Value, t Type) (rv Value) { rv.C = C.LLVMConstTrunc(v.C, t.C); return } func ConstSExt(v Value, t Type) (rv Value) { rv.C = C.LLVMConstSExt(v.C, t.C); return } func ConstZExt(v Value, t Type) (rv Value) { rv.C = C.LLVMConstZExt(v.C, t.C); return } func ConstFPTrunc(v Value, t Type) (rv Value) { rv.C = C.LLVMConstFPTrunc(v.C, t.C); return } func ConstFPExt(v Value, t Type) (rv Value) { rv.C = C.LLVMConstFPExt(v.C, t.C); return } func ConstUIToFP(v Value, t Type) (rv Value) { rv.C = C.LLVMConstUIToFP(v.C, t.C); return } func ConstSIToFP(v Value, t Type) (rv Value) { rv.C = C.LLVMConstSIToFP(v.C, t.C); return } func ConstFPToUI(v Value, t Type) (rv Value) { rv.C = C.LLVMConstFPToUI(v.C, t.C); return } func ConstFPToSI(v Value, t Type) (rv Value) { rv.C = C.LLVMConstFPToSI(v.C, t.C); return } func ConstPtrToInt(v Value, t Type) (rv Value) { rv.C = C.LLVMConstPtrToInt(v.C, t.C); return } func ConstIntToPtr(v Value, t Type) (rv Value) { rv.C = C.LLVMConstIntToPtr(v.C, t.C); return } func ConstBitCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstBitCast(v.C, t.C); return } func ConstZExtOrBitCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstZExtOrBitCast(v.C, t.C); return } func ConstSExtOrBitCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstSExtOrBitCast(v.C, t.C); return } func ConstTruncOrBitCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstTruncOrBitCast(v.C, t.C) return } func ConstPointerCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstPointerCast(v.C, t.C); return } func ConstIntCast(v Value, t Type, signed bool) (rv Value) { rv.C = C.LLVMConstIntCast(v.C, t.C, boolToLLVMBool(signed)) return } func ConstFPCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstFPCast(v.C, t.C); return } func ConstSelect(cond, iftrue, iffalse Value) (rv Value) { rv.C = C.LLVMConstSelect(cond.C, iftrue.C, iffalse.C) return } func ConstExtractElement(vec, i Value) (rv Value) { rv.C = C.LLVMConstExtractElement(vec.C, i.C) return } func ConstInsertElement(vec, elem, i Value) (rv Value) { rv.C = C.LLVMConstInsertElement(vec.C, elem.C, i.C) return } func ConstShuffleVector(veca, vecb, mask Value) (rv Value) { rv.C = C.LLVMConstShuffleVector(veca.C, vecb.C, mask.C) return } //TODO //LLVMValueRef LLVMConstExtractValue(LLVMValueRef AggConstant, unsigned *IdxList, // unsigned NumIdx); func ConstExtractValue(agg Value, indices []uint32) (rv Value) { n := len(indices) if n == 0 { panic("one or more indices are required") } ptr := (*C.unsigned)(&indices[0]) rv.C = C.LLVMConstExtractValue(agg.C, ptr, C.unsigned(n)) return } func ConstInsertValue(agg, val Value, indices []uint32) (rv Value) { n := len(indices) if n == 0 { panic("one or more indices are required") } ptr := (*C.unsigned)(&indices[0]) rv.C = C.LLVMConstInsertValue(agg.C, val.C, ptr, C.unsigned(n)) return } func BlockAddress(f Value, bb BasicBlock) (v Value) { v.C = C.LLVMBlockAddress(f.C, bb.C) return } // Operations on global variables, functions, and aliases (globals) func (v Value) GlobalParent() (m Module) { m.C = C.LLVMGetGlobalParent(v.C); return } func (v Value) IsDeclaration() bool { return C.LLVMIsDeclaration(v.C) != 0 } func (v Value) Linkage() Linkage { return Linkage(C.LLVMGetLinkage(v.C)) } func (v Value) SetLinkage(l Linkage) { C.LLVMSetLinkage(v.C, C.LLVMLinkage(l)) } func (v Value) Section() string { return C.GoString(C.LLVMGetSection(v.C)) } func (v Value) SetSection(str string) { cstr := C.CString(str) C.LLVMSetSection(v.C, cstr) C.free(unsafe.Pointer(cstr)) } func (v Value) Visibility() Visibility { return Visibility(C.LLVMGetVisibility(v.C)) } func (v Value) SetVisibility(vi Visibility) { C.LLVMSetVisibility(v.C, C.LLVMVisibility(vi)) } func (v Value) Alignment() int { return int(C.LLVMGetAlignment(v.C)) } func (v Value) SetAlignment(a int) { C.LLVMSetAlignment(v.C, C.unsigned(a)) } // Operations on global variables func AddGlobal(m Module, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMAddGlobal(m.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func AddGlobalInAddressSpace(m Module, t Type, name string, addressSpace int) (v Value) { cname := C.CString(name) v.C = C.LLVMAddGlobalInAddressSpace(m.C, t.C, cname, C.unsigned(addressSpace)) C.free(unsafe.Pointer(cname)) return } func (m Module) NamedGlobal(name string) (v Value) { cname := C.CString(name) v.C = C.LLVMGetNamedGlobal(m.C, cname) C.free(unsafe.Pointer(cname)) return } func (m Module) FirstGlobal() (v Value) { v.C = C.LLVMGetFirstGlobal(m.C); return } func (m Module) LastGlobal() (v Value) { v.C = C.LLVMGetLastGlobal(m.C); return } func NextGlobal(v Value) (rv Value) { rv.C = C.LLVMGetNextGlobal(v.C); return } func PrevGlobal(v Value) (rv Value) { rv.C = C.LLVMGetPreviousGlobal(v.C); return } func (v Value) EraseFromParentAsGlobal() { C.LLVMDeleteGlobal(v.C) } func (v Value) Initializer() (rv Value) { rv.C = C.LLVMGetInitializer(v.C); return } func (v Value) SetInitializer(cv Value) { C.LLVMSetInitializer(v.C, cv.C) } func (v Value) IsThreadLocal() bool { return C.LLVMIsThreadLocal(v.C) != 0 } func (v Value) SetThreadLocal(tl bool) { C.LLVMSetThreadLocal(v.C, boolToLLVMBool(tl)) } func (v Value) IsGlobalConstant() bool { return C.LLVMIsGlobalConstant(v.C) != 0 } func (v Value) SetGlobalConstant(gc bool) { C.LLVMSetGlobalConstant(v.C, boolToLLVMBool(gc)) } // Operations on aliases func AddAlias(m Module, t Type, aliasee Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMAddAlias(m.C, t.C, aliasee.C, cname) C.free(unsafe.Pointer(cname)) return } // Operations on functions func AddFunction(m Module, name string, ft Type) (v Value) { cname := C.CString(name) v.C = C.LLVMAddFunction(m.C, cname, ft.C) C.free(unsafe.Pointer(cname)) return } func (m Module) NamedFunction(name string) (v Value) { cname := C.CString(name) v.C = C.LLVMGetNamedFunction(m.C, cname) C.free(unsafe.Pointer(cname)) return } func (m Module) FirstFunction() (v Value) { v.C = C.LLVMGetFirstFunction(m.C); return } func (m Module) LastFunction() (v Value) { v.C = C.LLVMGetLastFunction(m.C); return } func NextFunction(v Value) (rv Value) { rv.C = C.LLVMGetNextFunction(v.C); return } func PrevFunction(v Value) (rv Value) { rv.C = C.LLVMGetPreviousFunction(v.C); return } func (v Value) EraseFromParentAsFunction() { C.LLVMDeleteFunction(v.C) } func (v Value) IntrinsicID() int { return int(C.LLVMGetIntrinsicID(v.C)) } func (v Value) FunctionCallConv() CallConv { return CallConv(C.LLVMCallConv(C.LLVMGetFunctionCallConv(v.C))) } func (v Value) SetFunctionCallConv(cc CallConv) { C.LLVMSetFunctionCallConv(v.C, C.unsigned(cc)) } func (v Value) GC() string { return C.GoString(C.LLVMGetGC(v.C)) } func (v Value) SetGC(name string) { cname := C.CString(name) C.LLVMSetGC(v.C, cname) C.free(unsafe.Pointer(cname)) } func (v Value) AddFunctionAttr(a Attribute) { C.LLVMAddFunctionAttr(v.C, C.LLVMAttribute(a)) } func (v Value) FunctionAttr() Attribute { return Attribute(C.LLVMGetFunctionAttr(v.C)) } func (v Value) RemoveFunctionAttr(a Attribute) { C.LLVMRemoveFunctionAttr(v.C, C.LLVMAttribute(a)) } // Operations on parameters func (v Value) ParamsCount() int { return int(C.LLVMCountParams(v.C)) } func (v Value) Params() []Value { out := make([]Value, v.ParamsCount()) if len(out) > 0 { C.LLVMGetParams(v.C, llvmValueRefPtr(&out[0])) } return out } func (v Value) Param(i int) (rv Value) { rv.C = C.LLVMGetParam(v.C, C.unsigned(i)); return } func (v Value) ParamParent() (rv Value) { rv.C = C.LLVMGetParamParent(v.C); return } func (v Value) FirstParam() (rv Value) { rv.C = C.LLVMGetFirstParam(v.C); return } func (v Value) LastParam() (rv Value) { rv.C = C.LLVMGetLastParam(v.C); return } func NextParam(v Value) (rv Value) { rv.C = C.LLVMGetNextParam(v.C); return } func PrevParam(v Value) (rv Value) { rv.C = C.LLVMGetPreviousParam(v.C); return } func (v Value) AddAttribute(a Attribute) { C.LLVMAddAttribute(v.C, C.LLVMAttribute(a)) } func (v Value) RemoveAttribute(a Attribute) { C.LLVMRemoveAttribute(v.C, C.LLVMAttribute(a)) } func (v Value) Attribute() Attribute { return Attribute(C.LLVMGetAttribute(v.C)) } func (v Value) SetParamAlignment(align int) { C.LLVMSetParamAlignment(v.C, C.unsigned(align)) } // Operations on basic blocks func (bb BasicBlock) AsValue() (v Value) { v.C = C.LLVMBasicBlockAsValue(bb.C); return } func (v Value) IsBasicBlock() bool { return C.LLVMValueIsBasicBlock(v.C) != 0 } func (v Value) AsBasicBlock() (bb BasicBlock) { bb.C = C.LLVMValueAsBasicBlock(v.C); return } func (bb BasicBlock) Parent() (v Value) { v.C = C.LLVMGetBasicBlockParent(bb.C); return } func (v Value) BasicBlocksCount() int { return int(C.LLVMCountBasicBlocks(v.C)) } func (v Value) BasicBlocks() []BasicBlock { out := make([]BasicBlock, v.BasicBlocksCount()) C.LLVMGetBasicBlocks(v.C, llvmBasicBlockRefPtr(&out[0])) return out } func (v Value) FirstBasicBlock() (bb BasicBlock) { bb.C = C.LLVMGetFirstBasicBlock(v.C); return } func (v Value) LastBasicBlock() (bb BasicBlock) { bb.C = C.LLVMGetLastBasicBlock(v.C); return } func NextBasicBlock(bb BasicBlock) (rbb BasicBlock) { rbb.C = C.LLVMGetNextBasicBlock(bb.C); return } func PrevBasicBlock(bb BasicBlock) (rbb BasicBlock) { rbb.C = C.LLVMGetPreviousBasicBlock(bb.C); return } func (v Value) EntryBasicBlock() (bb BasicBlock) { bb.C = C.LLVMGetEntryBasicBlock(v.C); return } func (c Context) AddBasicBlock(f Value, name string) (bb BasicBlock) { cname := C.CString(name) bb.C = C.LLVMAppendBasicBlockInContext(c.C, f.C, cname) C.free(unsafe.Pointer(cname)) return } func (c Context) InsertBasicBlock(ref BasicBlock, name string) (bb BasicBlock) { cname := C.CString(name) bb.C = C.LLVMInsertBasicBlockInContext(c.C, ref.C, cname) C.free(unsafe.Pointer(cname)) return } func AddBasicBlock(f Value, name string) (bb BasicBlock) { cname := C.CString(name) bb.C = C.LLVMAppendBasicBlock(f.C, cname) C.free(unsafe.Pointer(cname)) return } func InsertBasicBlock(ref BasicBlock, name string) (bb BasicBlock) { cname := C.CString(name) bb.C = C.LLVMInsertBasicBlock(ref.C, cname) C.free(unsafe.Pointer(cname)) return } func (bb BasicBlock) EraseFromParent() { C.LLVMDeleteBasicBlock(bb.C) } func (bb BasicBlock) MoveBefore(pos BasicBlock) { C.LLVMMoveBasicBlockBefore(bb.C, pos.C) } func (bb BasicBlock) MoveAfter(pos BasicBlock) { C.LLVMMoveBasicBlockAfter(bb.C, pos.C) } // Operations on instructions func (v Value) InstructionParent() (bb BasicBlock) { bb.C = C.LLVMGetInstructionParent(v.C); return } func (bb BasicBlock) FirstInstruction() (v Value) { v.C = C.LLVMGetFirstInstruction(bb.C); return } func (bb BasicBlock) LastInstruction() (v Value) { v.C = C.LLVMGetLastInstruction(bb.C); return } func NextInstruction(v Value) (rv Value) { rv.C = C.LLVMGetNextInstruction(v.C); return } func PrevInstruction(v Value) (rv Value) { rv.C = C.LLVMGetPreviousInstruction(v.C); return } // Operations on call sites func (v Value) SetInstructionCallConv(cc CallConv) { C.LLVMSetInstructionCallConv(v.C, C.unsigned(cc)) } func (v Value) InstructionCallConv() CallConv { return CallConv(C.LLVMCallConv(C.LLVMGetInstructionCallConv(v.C))) } func (v Value) AddInstrAttribute(i int, a Attribute) { C.LLVMAddInstrAttribute(v.C, C.unsigned(i), C.LLVMAttribute(a)) } func (v Value) RemoveInstrAttribute(i int, a Attribute) { C.LLVMRemoveInstrAttribute(v.C, C.unsigned(i), C.LLVMAttribute(a)) } func (v Value) SetInstrParamAlignment(i int, align int) { C.LLVMSetInstrParamAlignment(v.C, C.unsigned(i), C.unsigned(align)) } // Operations on call instructions (only) func (v Value) IsTailCall() bool { return C.LLVMIsTailCall(v.C) != 0 } func (v Value) SetTailCall(is bool) { C.LLVMSetTailCall(v.C, boolToLLVMBool(is)) } // Operations on phi nodes func (v Value) AddIncoming(vals []Value, blocks []BasicBlock) { ptr, nvals := llvmValueRefs(vals) C.LLVMAddIncoming(v.C, ptr, llvmBasicBlockRefPtr(&blocks[0]), nvals) } func (v Value) IncomingCount() int { return int(C.LLVMCountIncoming(v.C)) } func (v Value) IncomingValue(i int) (rv Value) { rv.C = C.LLVMGetIncomingValue(v.C, C.unsigned(i)) return } func (v Value) IncomingBlock(i int) (bb BasicBlock) { bb.C = C.LLVMGetIncomingBlock(v.C, C.unsigned(i)) return } //------------------------------------------------------------------------- // llvm.Builder //------------------------------------------------------------------------- // An instruction builder represents a point within a basic block, and is the // exclusive means of building instructions using the C interface. func (c Context) NewBuilder() (b Builder) { b.C = C.LLVMCreateBuilderInContext(c.C); return } func NewBuilder() (b Builder) { b.C = C.LLVMCreateBuilder(); return } func (b Builder) SetInsertPoint(block BasicBlock, instr Value) { C.LLVMPositionBuilder(b.C, block.C, instr.C) } func (b Builder) SetInsertPointBefore(instr Value) { C.LLVMPositionBuilderBefore(b.C, instr.C) } func (b Builder) SetInsertPointAtEnd(block BasicBlock) { C.LLVMPositionBuilderAtEnd(b.C, block.C) } func (b Builder) GetInsertBlock() (bb BasicBlock) { bb.C = C.LLVMGetInsertBlock(b.C); return } func (b Builder) ClearInsertionPoint() { C.LLVMClearInsertionPosition(b.C) } func (b Builder) Insert(instr Value) { C.LLVMInsertIntoBuilder(b.C, instr.C) } func (b Builder) InsertWithName(instr Value, name string) { cname := C.CString(name) C.LLVMInsertIntoBuilderWithName(b.C, instr.C, cname) C.free(unsafe.Pointer(cname)) } func (b Builder) Dispose() { C.LLVMDisposeBuilder(b.C) } // Metadata func (b Builder) SetCurrentDebugLocation(v Value) { C.LLVMSetCurrentDebugLocation(b.C, v.C) } func (b Builder) CurrentDebugLocation() (v Value) { v.C = C.LLVMGetCurrentDebugLocation(b.C); return } func (b Builder) SetInstDebugLocation(v Value) { C.LLVMSetCurrentDebugLocation(b.C, v.C) } // Terminators func (b Builder) CreateRetVoid() (rv Value) { rv.C = C.LLVMBuildRetVoid(b.C); return } func (b Builder) CreateRet(v Value) (rv Value) { rv.C = C.LLVMBuildRet(b.C, v.C); return } func (b Builder) CreateAggregateRet(vs []Value) (rv Value) { ptr, nvals := llvmValueRefs(vs) rv.C = C.LLVMBuildAggregateRet(b.C, ptr, nvals) return } func (b Builder) CreateBr(bb BasicBlock) (rv Value) { rv.C = C.LLVMBuildBr(b.C, bb.C); return } func (b Builder) CreateCondBr(ifv Value, thenb, elseb BasicBlock) (rv Value) { rv.C = C.LLVMBuildCondBr(b.C, ifv.C, thenb.C, elseb.C) return } func (b Builder) CreateSwitch(v Value, elseb BasicBlock, numCases int) (rv Value) { rv.C = C.LLVMBuildSwitch(b.C, v.C, elseb.C, C.unsigned(numCases)) return } func (b Builder) CreateIndirectBr(addr Value, numDests int) (rv Value) { rv.C = C.LLVMBuildIndirectBr(b.C, addr.C, C.unsigned(numDests)) return } func (b Builder) CreateInvoke(fn Value, args []Value, then, catch BasicBlock, name string) (rv Value) { cname := C.CString(name) ptr, nvals := llvmValueRefs(args) rv.C = C.LLVMBuildInvoke(b.C, fn.C, ptr, nvals, then.C, catch.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateUnreachable() (rv Value) { rv.C = C.LLVMBuildUnreachable(b.C); return } // Add a case to the switch instruction func (v Value) AddCase(on Value, dest BasicBlock) { C.LLVMAddCase(v.C, on.C, dest.C) } // Add a destination to the indirectbr instruction func (v Value) AddDest(dest BasicBlock) { C.LLVMAddDestination(v.C, dest.C) } // Arithmetic func (b Builder) CreateAdd(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildAdd(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNSWAdd(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNSWAdd(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNUWAdd(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNUWAdd(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFAdd(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFAdd(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSub(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSub(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNSWSub(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNSWSub(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNUWSub(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNUWSub(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFSub(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFSub(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateMul(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildMul(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNSWMul(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNSWMul(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNUWMul(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNUWMul(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFMul(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFMul(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateUDiv(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildUDiv(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSDiv(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSDiv(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateExactSDiv(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildExactSDiv(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFDiv(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFDiv(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateURem(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildURem(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSRem(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSRem(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFRem(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFRem(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateShl(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildShl(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateLShr(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildLShr(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateAShr(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildAShr(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateAnd(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildAnd(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateOr(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildOr(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateXor(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildXor(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateBinOp(op Opcode, lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildBinOp(b.C, C.LLVMOpcode(op), lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNeg(v Value, name string) (rv Value) { cname := C.CString(name) rv.C = C.LLVMBuildNeg(b.C, v.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNSWNeg(v Value, name string) (rv Value) { cname := C.CString(name) rv.C = C.LLVMBuildNSWNeg(b.C, v.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNUWNeg(v Value, name string) (rv Value) { cname := C.CString(name) rv.C = C.LLVMBuildNUWNeg(b.C, v.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFNeg(v Value, name string) (rv Value) { cname := C.CString(name) rv.C = C.LLVMBuildFNeg(b.C, v.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNot(v Value, name string) (rv Value) { cname := C.CString(name) rv.C = C.LLVMBuildNot(b.C, v.C, cname) C.free(unsafe.Pointer(cname)) return } // Memory func (b Builder) CreateMalloc(t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildMalloc(b.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateArrayMalloc(t Type, val Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildArrayMalloc(b.C, t.C, val.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateAlloca(t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildAlloca(b.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateArrayAlloca(t Type, val Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildArrayAlloca(b.C, t.C, val.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFree(p Value) (v Value) { v.C = C.LLVMBuildFree(b.C, p.C) return } func (b Builder) CreateLoad(p Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildLoad(b.C, p.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateStore(val Value, p Value) (v Value) { v.C = C.LLVMBuildStore(b.C, val.C, p.C) return } func (b Builder) CreateGEP(p Value, indices []Value, name string) (v Value) { cname := C.CString(name) ptr, nvals := llvmValueRefs(indices) v.C = C.LLVMBuildGEP(b.C, p.C, ptr, nvals, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateInBoundsGEP(p Value, indices []Value, name string) (v Value) { cname := C.CString(name) ptr, nvals := llvmValueRefs(indices) v.C = C.LLVMBuildInBoundsGEP(b.C, p.C, ptr, nvals, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateStructGEP(p Value, i int, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildStructGEP(b.C, p.C, C.unsigned(i), cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateGlobalString(str, name string) (v Value) { cstr := C.CString(str) cname := C.CString(name) v.C = C.LLVMBuildGlobalString(b.C, cstr, cname) C.free(unsafe.Pointer(cname)) C.free(unsafe.Pointer(cstr)) return } func (b Builder) CreateGlobalStringPtr(str, name string) (v Value) { cstr := C.CString(str) cname := C.CString(name) v.C = C.LLVMBuildGlobalStringPtr(b.C, cstr, cname) C.free(unsafe.Pointer(cname)) C.free(unsafe.Pointer(cstr)) return } // Casts func (b Builder) CreateTrunc(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildTrunc(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateZExt(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildZExt(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSExt(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSExt(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFPToUI(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFPToUI(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFPToSI(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFPToSI(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateUIToFP(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildUIToFP(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSIToFP(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSIToFP(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFPTrunc(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFPTrunc(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFPExt(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFPExt(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreatePtrToInt(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildPtrToInt(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateIntToPtr(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildIntToPtr(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateBitCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildBitCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateZExtOrBitCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildZExtOrBitCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSExtOrBitCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSExtOrBitCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateTruncOrBitCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildTruncOrBitCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateCast(val Value, op Opcode, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildCast(b.C, C.LLVMOpcode(op), val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } // func (b Builder) CreatePointerCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildPointerCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateIntCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildIntCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFPCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFPCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } // Comparisons func (b Builder) CreateICmp(pred IntPredicate, lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildICmp(b.C, C.LLVMIntPredicate(pred), lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFCmp(pred FloatPredicate, lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFCmp(b.C, C.LLVMRealPredicate(pred), lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } // Miscellaneous instructions func (b Builder) CreatePHI(t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildPhi(b.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateCall(fn Value, args []Value, name string) (v Value) { cname := C.CString(name) ptr, nvals := llvmValueRefs(args) v.C = C.LLVMBuildCall(b.C, fn.C, ptr, nvals, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSelect(ifv, thenv, elsev Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSelect(b.C, ifv.C, thenv.C, elsev.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateVAArg(list Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildVAArg(b.C, list.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateExtractElement(vec, i Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildExtractElement(b.C, vec.C, i.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateInsertElement(vec, elt, i Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildInsertElement(b.C, vec.C, elt.C, i.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateShuffleVector(v1, v2, mask Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildShuffleVector(b.C, v1.C, v2.C, mask.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateExtractValue(agg Value, i int, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildExtractValue(b.C, agg.C, C.unsigned(i), cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateInsertValue(agg, elt Value, i int, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildInsertValue(b.C, agg.C, elt.C, C.unsigned(i), cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateIsNull(val Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildIsNull(b.C, val.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateIsNotNull(val Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildIsNotNull(b.C, val.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreatePtrDiff(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildPtrDiff(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } //------------------------------------------------------------------------- // llvm.ModuleProvider //------------------------------------------------------------------------- // Changes the type of M so it can be passed to FunctionPassManagers and the // JIT. They take ModuleProviders for historical reasons. func NewModuleProviderForModule(m Module) (mp ModuleProvider) { mp.C = C.LLVMCreateModuleProviderForExistingModule(m.C) return } // Destroys the module M. func (mp ModuleProvider) Dispose() { C.LLVMDisposeModuleProvider(mp.C) } //------------------------------------------------------------------------- // llvm.MemoryBuffer //------------------------------------------------------------------------- func NewMemoryBufferFromFile(path string) (b MemoryBuffer, err error) { var cmsg *C.char cpath := C.CString(path) fail := C.LLVMCreateMemoryBufferWithContentsOfFile(cpath, &b.C, &cmsg) if fail != 0 { b.C = nil err = errors.New(C.GoString(cmsg)) C.LLVMDisposeMessage(cmsg) } else { err = nil } C.free(unsafe.Pointer(cpath)) return } func NewMemoryBufferFromStdin() (b MemoryBuffer, err error) { var cmsg *C.char fail := C.LLVMCreateMemoryBufferWithSTDIN(&b.C, &cmsg) if fail != 0 { b.C = nil err = errors.New(C.GoString(cmsg)) C.LLVMDisposeMessage(cmsg) } else { err = nil } return } func (b MemoryBuffer) Dispose() { C.LLVMDisposeMemoryBuffer(b.C) } //------------------------------------------------------------------------- // llvm.PassManager //------------------------------------------------------------------------- // Constructs a new whole-module pass pipeline. This type of pipeline is // suitable for link-time optimization and whole-module transformations. // See llvm::PassManager::PassManager. func NewPassManager() (pm PassManager) { pm.C = C.LLVMCreatePassManager(); return } // Constructs a new function-by-function pass pipeline over the module // provider. It does not take ownership of the module provider. This type of // pipeline is suitable for code generation and JIT compilation tasks. // See llvm::FunctionPassManager::FunctionPassManager. func NewFunctionPassManagerForModule(m Module) (pm PassManager) { pm.C = C.LLVMCreateFunctionPassManagerForModule(m.C) return } // Deprecated: Use LLVMCreateFunctionPassManagerForModule instead. //LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef MP); //XXX: don't port this // Initializes, executes on the provided module, and finalizes all of the // passes scheduled in the pass manager. Returns 1 if any of the passes // modified the module, 0 otherwise. See llvm::PassManager::run(Module&). func (pm PassManager) Run(m Module) bool { return C.LLVMRunPassManager(pm.C, m.C) != 0 } // Initializes all of the function passes scheduled in the function pass // manager. Returns 1 if any of the passes modified the module, 0 otherwise. // See llvm::FunctionPassManager::doInitialization. func (pm PassManager) InitializeFunc() bool { return C.LLVMInitializeFunctionPassManager(pm.C) != 0 } // Executes all of the function passes scheduled in the function pass manager // on the provided function. Returns 1 if any of the passes modified the // function, false otherwise. // See llvm::FunctionPassManager::run(Function&). func (pm PassManager) RunFunc(f Value) bool { return C.LLVMRunFunctionPassManager(pm.C, f.C) != 0 } // Finalizes all of the function passes scheduled in in the function pass // manager. Returns 1 if any of the passes modified the module, 0 otherwise. // See llvm::FunctionPassManager::doFinalization. func (pm PassManager) FinalizeFunc() bool { return C.LLVMFinalizeFunctionPassManager(pm.C) != 0 } // Frees the memory of a pass pipeline. For function pipelines, does not free // the module provider. // See llvm::PassManagerBase::~PassManagerBase. func (pm PassManager) Dispose() { C.LLVMDisposePassManager(pm.C) } // vim: set ft=go: add some missing Attribute constants package llvm /* #include <llvm-c/Core.h> #include <stdlib.h> */ import "C" import "unsafe" import "errors" // TODO: Add comments // TODO: Use Go's reflection in order to simplify bindings? // TODO: Add type safety? type ( // I'm using these weird structs here, because *Ref types are pointers // and Go's spec says that I can't use a pointer as a receiver base // type. Context struct { C C.LLVMContextRef } Module struct { C C.LLVMModuleRef } Type struct { C C.LLVMTypeRef } Value struct { C C.LLVMValueRef } BasicBlock struct { C C.LLVMBasicBlockRef } Builder struct { C C.LLVMBuilderRef } ModuleProvider struct { C C.LLVMModuleProviderRef } MemoryBuffer struct { C C.LLVMMemoryBufferRef } PassManager struct { C C.LLVMPassManagerRef } Use struct { C C.LLVMUseRef } Attribute C.LLVMAttribute Opcode C.LLVMOpcode TypeKind C.LLVMTypeKind Linkage C.LLVMLinkage Visibility C.LLVMVisibility CallConv C.LLVMCallConv IntPredicate C.LLVMIntPredicate FloatPredicate C.LLVMRealPredicate ) func (c Context) IsNil() bool { return c.C == nil } func (c Module) IsNil() bool { return c.C == nil } func (c Type) IsNil() bool { return c.C == nil } func (c Value) IsNil() bool { return c.C == nil } func (c BasicBlock) IsNil() bool { return c.C == nil } func (c Builder) IsNil() bool { return c.C == nil } func (c ModuleProvider) IsNil() bool { return c.C == nil } func (c MemoryBuffer) IsNil() bool { return c.C == nil } func (c PassManager) IsNil() bool { return c.C == nil } func (c Use) IsNil() bool { return c.C == nil } // helpers func llvmTypeRefPtr(t *Type) *C.LLVMTypeRef { return (*C.LLVMTypeRef)(unsafe.Pointer(t)) } func llvmValueRefPtr(t *Value) *C.LLVMValueRef { return (*C.LLVMValueRef)(unsafe.Pointer(t)) } func llvmBasicBlockRefPtr(t *BasicBlock) *C.LLVMBasicBlockRef { return (*C.LLVMBasicBlockRef)(unsafe.Pointer(t)) } func boolToLLVMBool(b bool) C.LLVMBool { if b { return C.LLVMBool(1) } return C.LLVMBool(0) } func llvmValueRefs(values []Value) (*C.LLVMValueRef, C.unsigned) { var pt *C.LLVMValueRef ptlen := C.unsigned(len(values)) if ptlen > 0 { pt = llvmValueRefPtr(&values[0]) } return pt, ptlen } //------------------------------------------------------------------------- // llvm.Attribute //------------------------------------------------------------------------- const ( NoneAttribute Attribute = 0 ZExtAttribute Attribute = C.LLVMZExtAttribute SExtAttribute Attribute = C.LLVMSExtAttribute NoReturnAttribute Attribute = C.LLVMNoReturnAttribute InRegAttribute Attribute = C.LLVMInRegAttribute StructRetAttribute Attribute = C.LLVMStructRetAttribute NoUnwindAttribute Attribute = C.LLVMNoUnwindAttribute NoAliasAttribute Attribute = C.LLVMNoAliasAttribute ByValAttribute Attribute = C.LLVMByValAttribute NestAttribute Attribute = C.LLVMNestAttribute ReadNoneAttribute Attribute = C.LLVMReadNoneAttribute ReadOnlyAttribute Attribute = C.LLVMReadOnlyAttribute NoInlineAttribute Attribute = C.LLVMNoInlineAttribute AlwaysInlineAttribute Attribute = C.LLVMAlwaysInlineAttribute OptimizeForSizeAttribute Attribute = C.LLVMOptimizeForSizeAttribute StackProtectAttribute Attribute = C.LLVMStackProtectAttribute StackProtectReqAttribute Attribute = C.LLVMStackProtectReqAttribute Alignment Attribute = C.LLVMAlignment NoCaptureAttribute Attribute = C.LLVMNoCaptureAttribute NoRedZoneAttribute Attribute = C.LLVMNoRedZoneAttribute NoImplicitFloatAttribute Attribute = C.LLVMNoImplicitFloatAttribute NakedAttribute Attribute = C.LLVMNakedAttribute InlineHintAttribute Attribute = C.LLVMInlineHintAttribute StackAlignment Attribute = C.LLVMStackAlignment ReturnsTwiceAttribute Attribute = C.LLVMReturnsTwice UWTableAttribute Attribute = C.LLVMUWTable NonLazyBindAttribute Attribute = C.LLVMNonLazyBind ) //------------------------------------------------------------------------- // llvm.Opcode //------------------------------------------------------------------------- const ( Ret Opcode = C.LLVMRet Br Opcode = C.LLVMBr Switch Opcode = C.LLVMSwitch IndirectBr Opcode = C.LLVMIndirectBr Invoke Opcode = C.LLVMInvoke Unreachable Opcode = C.LLVMUnreachable // Standard Binary Operators Add Opcode = C.LLVMAdd FAdd Opcode = C.LLVMFAdd Sub Opcode = C.LLVMSub FSub Opcode = C.LLVMFSub Mul Opcode = C.LLVMMul FMul Opcode = C.LLVMFMul UDiv Opcode = C.LLVMUDiv SDiv Opcode = C.LLVMSDiv FDiv Opcode = C.LLVMFDiv URem Opcode = C.LLVMURem SRem Opcode = C.LLVMSRem FRem Opcode = C.LLVMFRem // Logical Operators Shl Opcode = C.LLVMShl LShr Opcode = C.LLVMLShr AShr Opcode = C.LLVMAShr And Opcode = C.LLVMAnd Or Opcode = C.LLVMOr Xor Opcode = C.LLVMXor // Memory Operators Alloca Opcode = C.LLVMAlloca Load Opcode = C.LLVMLoad Store Opcode = C.LLVMStore GetElementPtr Opcode = C.LLVMGetElementPtr // Cast Operators Trunc Opcode = C.LLVMTrunc ZExt Opcode = C.LLVMZExt SExt Opcode = C.LLVMSExt FPToUI Opcode = C.LLVMFPToUI FPToSI Opcode = C.LLVMFPToSI UIToFP Opcode = C.LLVMUIToFP SIToFP Opcode = C.LLVMSIToFP FPTrunc Opcode = C.LLVMFPTrunc FPExt Opcode = C.LLVMFPExt PtrToInt Opcode = C.LLVMPtrToInt IntToPtr Opcode = C.LLVMIntToPtr BitCast Opcode = C.LLVMBitCast // Other Operators ICmp Opcode = C.LLVMICmp FCmp Opcode = C.LLVMFCmp PHI Opcode = C.LLVMPHI Call Opcode = C.LLVMCall Select Opcode = C.LLVMSelect // UserOp1 // UserOp2 VAArg Opcode = C.LLVMVAArg ExtractElement Opcode = C.LLVMExtractElement InsertElement Opcode = C.LLVMInsertElement ShuffleVector Opcode = C.LLVMShuffleVector ExtractValue Opcode = C.LLVMExtractValue InsertValue Opcode = C.LLVMInsertValue ) //------------------------------------------------------------------------- // llvm.TypeKind //------------------------------------------------------------------------- const ( VoidTypeKind TypeKind = C.LLVMVoidTypeKind FloatTypeKind TypeKind = C.LLVMFloatTypeKind DoubleTypeKind TypeKind = C.LLVMDoubleTypeKind X86_FP80TypeKind TypeKind = C.LLVMX86_FP80TypeKind FP128TypeKind TypeKind = C.LLVMFP128TypeKind PPC_FP128TypeKind TypeKind = C.LLVMPPC_FP128TypeKind LabelTypeKind TypeKind = C.LLVMLabelTypeKind IntegerTypeKind TypeKind = C.LLVMIntegerTypeKind FunctionTypeKind TypeKind = C.LLVMFunctionTypeKind StructTypeKind TypeKind = C.LLVMStructTypeKind ArrayTypeKind TypeKind = C.LLVMArrayTypeKind PointerTypeKind TypeKind = C.LLVMPointerTypeKind VectorTypeKind TypeKind = C.LLVMVectorTypeKind MetadataTypeKind TypeKind = C.LLVMMetadataTypeKind ) //------------------------------------------------------------------------- // llvm.Linkage //------------------------------------------------------------------------- const ( ExternalLinkage Linkage = C.LLVMExternalLinkage AvailableExternallyLinkage Linkage = C.LLVMAvailableExternallyLinkage LinkOnceAnyLinkage Linkage = C.LLVMLinkOnceAnyLinkage LinkOnceODRLinkage Linkage = C.LLVMLinkOnceODRLinkage WeakAnyLinkage Linkage = C.LLVMWeakAnyLinkage WeakODRLinkage Linkage = C.LLVMWeakODRLinkage AppendingLinkage Linkage = C.LLVMAppendingLinkage InternalLinkage Linkage = C.LLVMInternalLinkage PrivateLinkage Linkage = C.LLVMPrivateLinkage DLLImportLinkage Linkage = C.LLVMDLLImportLinkage DLLExportLinkage Linkage = C.LLVMDLLExportLinkage ExternalWeakLinkage Linkage = C.LLVMExternalWeakLinkage GhostLinkage Linkage = C.LLVMGhostLinkage CommonLinkage Linkage = C.LLVMCommonLinkage LinkerPrivateLinkage Linkage = C.LLVMLinkerPrivateLinkage LinkerPrivateWeakLinkage Linkage = C.LLVMLinkerPrivateWeakLinkage //LinkerPrivateWeakDefAutoLinkage Linkage = C.LLVMLinkerPrivateWeakDefAutoLinkage ) //------------------------------------------------------------------------- // llvm.Visibility //------------------------------------------------------------------------- const ( DefaultVisibility Visibility = C.LLVMDefaultVisibility HiddenVisibility Visibility = C.LLVMHiddenVisibility ProtectedVisibility Visibility = C.LLVMProtectedVisibility ) //------------------------------------------------------------------------- // llvm.CallConv //------------------------------------------------------------------------- const ( CCallConv CallConv = C.LLVMCCallConv FastCallConv CallConv = C.LLVMFastCallConv ColdCallConv CallConv = C.LLVMColdCallConv X86StdcallCallConv CallConv = C.LLVMX86StdcallCallConv X86FastcallCallConv CallConv = C.LLVMX86FastcallCallConv ) //------------------------------------------------------------------------- // llvm.IntPredicate //------------------------------------------------------------------------- const ( IntEQ IntPredicate = C.LLVMIntEQ IntNE IntPredicate = C.LLVMIntNE IntUGT IntPredicate = C.LLVMIntUGT IntUGE IntPredicate = C.LLVMIntUGE IntULT IntPredicate = C.LLVMIntULT IntULE IntPredicate = C.LLVMIntULE IntSGT IntPredicate = C.LLVMIntSGT IntSGE IntPredicate = C.LLVMIntSGE IntSLT IntPredicate = C.LLVMIntSLT IntSLE IntPredicate = C.LLVMIntSLE ) //------------------------------------------------------------------------- // llvm.FloatPredicate //------------------------------------------------------------------------- const ( FloatPredicateFalse FloatPredicate = C.LLVMRealPredicateFalse FloatOEQ FloatPredicate = C.LLVMRealOEQ FloatOGT FloatPredicate = C.LLVMRealOGT FloatOGE FloatPredicate = C.LLVMRealOGE FloatOLT FloatPredicate = C.LLVMRealOLT FloatOLE FloatPredicate = C.LLVMRealOLE FloatONE FloatPredicate = C.LLVMRealONE FloatORD FloatPredicate = C.LLVMRealORD FloatUNO FloatPredicate = C.LLVMRealUNO FloatUEQ FloatPredicate = C.LLVMRealUEQ FloatUGT FloatPredicate = C.LLVMRealUGT FloatUGE FloatPredicate = C.LLVMRealUGE FloatULT FloatPredicate = C.LLVMRealULT FloatULE FloatPredicate = C.LLVMRealULE FloatUNE FloatPredicate = C.LLVMRealUNE FloatPredicateTrue FloatPredicate = C.LLVMRealPredicateTrue ) //------------------------------------------------------------------------- // llvm.Context //------------------------------------------------------------------------- func NewContext() Context { return Context{C.LLVMContextCreate()} } func GlobalContext() Context { return Context{C.LLVMGetGlobalContext()} } func (c Context) Dispose() { C.LLVMContextDispose(c.C) } func (c Context) MDKindID(name string) (id int) { cname := C.CString(name) id = int(C.LLVMGetMDKindIDInContext(c.C, cname, C.unsigned(len(name)))) C.free(unsafe.Pointer(cname)) return } func MDKindID(name string) (id int) { cname := C.CString(name) id = int(C.LLVMGetMDKindID(cname, C.unsigned(len(name)))) C.free(unsafe.Pointer(cname)) return } //------------------------------------------------------------------------- // llvm.Module //------------------------------------------------------------------------- // Create and destroy modules. // See llvm::Module::Module. func NewModule(name string) (m Module) { cname := C.CString(name) m.C = C.LLVMModuleCreateWithName(cname) C.free(unsafe.Pointer(cname)) return } func (c Context) NewModule(name string) (m Module) { cname := C.CString(name) m.C = C.LLVMModuleCreateWithNameInContext(cname, c.C) C.free(unsafe.Pointer(cname)) return } // See llvm::Module::~Module func (m Module) Dispose() { C.LLVMDisposeModule(m.C) } // Data layout. See Module::getDataLayout. func (m Module) DataLayout() string { clayout := C.LLVMGetDataLayout(m.C) return C.GoString(clayout) } func (m Module) SetDataLayout(layout string) { clayout := C.CString(layout) C.LLVMSetDataLayout(m.C, clayout) C.free(unsafe.Pointer(clayout)) } // Target triple. See Module::getTargetTriple. func (m Module) Target() string { ctarget := C.LLVMGetTarget(m.C) return C.GoString(ctarget) } func (m Module) SetTarget(target string) { ctarget := C.CString(target) C.LLVMSetTarget(m.C, ctarget) C.free(unsafe.Pointer(ctarget)) } func (m Module) GetTypeByName(name string) (t Type) { cname := C.CString(name) t.C = C.LLVMGetTypeByName(m.C, cname) C.free(unsafe.Pointer(cname)) return } // See Module::dump. func (m Module) Dump() { C.LLVMDumpModule(m.C) } // See Module::setModuleInlineAsm. func (m Module) SetInlineAsm(asm string) { casm := C.CString(asm) C.LLVMSetModuleInlineAsm(m.C, casm) C.free(unsafe.Pointer(casm)) } func (m Module) AddNamedMetadataOperand(name string, operand Value) { cname := C.CString(name) C.LLVMAddNamedMetadataOperand(m.C, cname, operand.C) C.free(unsafe.Pointer(cname)) } //------------------------------------------------------------------------- // llvm.Type //------------------------------------------------------------------------- // LLVM types conform to the following hierarchy: // // types: // integer type // real type // function type // sequence types: // array type // pointer type // vector type // void type // label type // opaque type // See llvm::LLVMTypeKind::getTypeID. func (t Type) TypeKind() TypeKind { return TypeKind(C.LLVMGetTypeKind(t.C)) } // See llvm::LLVMType::getContext. func (t Type) Context() (c Context) { c.C = C.LLVMGetTypeContext(t.C) return } // Operations on integer types func (c Context) Int1Type() (t Type) { t.C = C.LLVMInt1TypeInContext(c.C); return } func (c Context) Int8Type() (t Type) { t.C = C.LLVMInt8TypeInContext(c.C); return } func (c Context) Int16Type() (t Type) { t.C = C.LLVMInt16TypeInContext(c.C); return } func (c Context) Int32Type() (t Type) { t.C = C.LLVMInt32TypeInContext(c.C); return } func (c Context) Int64Type() (t Type) { t.C = C.LLVMInt64TypeInContext(c.C); return } func (c Context) IntType() (t Type, numbits int) { t.C = C.LLVMIntTypeInContext(c.C, C.unsigned(numbits)) return } func Int1Type() (t Type) { t.C = C.LLVMInt1Type(); return } func Int8Type() (t Type) { t.C = C.LLVMInt8Type(); return } func Int16Type() (t Type) { t.C = C.LLVMInt16Type(); return } func Int32Type() (t Type) { t.C = C.LLVMInt32Type(); return } func Int64Type() (t Type) { t.C = C.LLVMInt64Type(); return } func IntType(numbits int) (t Type) { t.C = C.LLVMIntType(C.unsigned(numbits)) return } func (t Type) IntTypeWidth() int { return int(C.LLVMGetIntTypeWidth(t.C)) } // Operations on real types func (c Context) FloatType() (t Type) { t.C = C.LLVMFloatTypeInContext(c.C); return } func (c Context) DoubleType() (t Type) { t.C = C.LLVMDoubleTypeInContext(c.C); return } func (c Context) X86FP80Type() (t Type) { t.C = C.LLVMX86FP80TypeInContext(c.C); return } func (c Context) FP128Type() (t Type) { t.C = C.LLVMFP128TypeInContext(c.C); return } func (c Context) PPCFP128Type() (t Type) { t.C = C.LLVMPPCFP128TypeInContext(c.C); return } func FloatType() (t Type) { t.C = C.LLVMFloatType(); return } func DoubleType() (t Type) { t.C = C.LLVMDoubleType(); return } func X86FP80Type() (t Type) { t.C = C.LLVMX86FP80Type(); return } func FP128Type() (t Type) { t.C = C.LLVMFP128Type(); return } func PPCFP128Type() (t Type) { t.C = C.LLVMPPCFP128Type(); return } // Operations on function types func FunctionType(returnType Type, paramTypes []Type, isVarArg bool) (t Type) { var pt *C.LLVMTypeRef var ptlen C.unsigned if len(paramTypes) > 0 { pt = llvmTypeRefPtr(&paramTypes[0]) ptlen = C.unsigned(len(paramTypes)) } t.C = C.LLVMFunctionType(returnType.C, pt, ptlen, boolToLLVMBool(isVarArg)) return } func (t Type) IsFunctionVarArg() bool { return C.LLVMIsFunctionVarArg(t.C) != 0 } func (t Type) ReturnType() (rt Type) { rt.C = C.LLVMGetReturnType(t.C); return } func (t Type) ParamTypesCount() int { return int(C.LLVMCountParamTypes(t.C)) } func (t Type) ParamTypes() []Type { count := t.ParamTypesCount() if count > 0 { out := make([]Type, count) C.LLVMGetParamTypes(t.C, llvmTypeRefPtr(&out[0])) return out } return nil } // Operations on struct types func (c Context) StructType(elementTypes []Type, packed bool) (t Type) { var pt *C.LLVMTypeRef var ptlen C.unsigned if len(elementTypes) > 0 { pt = llvmTypeRefPtr(&elementTypes[0]) ptlen = C.unsigned(len(elementTypes)) } t.C = C.LLVMStructTypeInContext(c.C, pt, ptlen, boolToLLVMBool(packed)) return } func StructType(elementTypes []Type, packed bool) (t Type) { var pt *C.LLVMTypeRef var ptlen C.unsigned if len(elementTypes) > 0 { pt = llvmTypeRefPtr(&elementTypes[0]) ptlen = C.unsigned(len(elementTypes)) } t.C = C.LLVMStructType(pt, ptlen, boolToLLVMBool(packed)) return } func (c Context) StructCreateNamed(name string) (t Type) { cname := C.CString(name) t.C = C.LLVMStructCreateNamed(c.C, cname) C.free(unsafe.Pointer(cname)) return } func (t Type) StructSetBody(elementTypes []Type, packed bool) { var pt *C.LLVMTypeRef var ptlen C.unsigned if len(elementTypes) > 0 { pt = llvmTypeRefPtr(&elementTypes[0]) ptlen = C.unsigned(len(elementTypes)) } C.LLVMStructSetBody(t.C, pt, ptlen, boolToLLVMBool(packed)) } func (t Type) IsStructPacked() bool { return C.LLVMIsPackedStruct(t.C) != 0 } func (t Type) StructElementTypesCount() int { return int(C.LLVMCountStructElementTypes(t.C)) } func (t Type) StructElementTypes() []Type { out := make([]Type, t.StructElementTypesCount()) if len(out) > 0 { C.LLVMGetStructElementTypes(t.C, llvmTypeRefPtr(&out[0])) } return out } // Operations on array, pointer, and vector types (sequence types) func ArrayType(elementType Type, elementCount int) (t Type) { t.C = C.LLVMArrayType(elementType.C, C.unsigned(elementCount)) return } func PointerType(elementType Type, addressSpace int) (t Type) { t.C = C.LLVMPointerType(elementType.C, C.unsigned(addressSpace)) return } func VectorType(elementType Type, elementCount int) (t Type) { t.C = C.LLVMVectorType(elementType.C, C.unsigned(elementCount)) return } func (t Type) ElementType() (rt Type) { rt.C = C.LLVMGetElementType(t.C); return } func (t Type) ArrayLength() int { return int(C.LLVMGetArrayLength(t.C)) } func (t Type) PointerAddressSpace() int { return int(C.LLVMGetPointerAddressSpace(t.C)) } func (t Type) VectorSize() int { return int(C.LLVMGetVectorSize(t.C)) } // Operations on other types func (c Context) VoidType() (t Type) { t.C = C.LLVMVoidTypeInContext(c.C); return } func (c Context) LabelType() (t Type) { t.C = C.LLVMLabelTypeInContext(c.C); return } func VoidType() (t Type) { t.C = C.LLVMVoidType(); return } func LabelType() (t Type) { t.C = C.LLVMLabelType(); return } //------------------------------------------------------------------------- // llvm.Value //------------------------------------------------------------------------- // Operations on all values func (v Value) Type() (t Type) { t.C = C.LLVMTypeOf(v.C); return } func (v Value) Name() string { return C.GoString(C.LLVMGetValueName(v.C)) } func (v Value) SetName(name string) { cname := C.CString(name) C.LLVMSetValueName(v.C, cname) C.free(unsafe.Pointer(cname)) } func (v Value) Dump() { C.LLVMDumpValue(v.C) } func (v Value) ReplaceAllUsesWith(nv Value) { C.LLVMReplaceAllUsesWith(v.C, nv.C) } func (v Value) HasMetadata() bool { return C.LLVMHasMetadata(v.C) != 0 } func (v Value) Metadata(kind int) (rv Value) { rv.C = C.LLVMGetMetadata(v.C, C.unsigned(kind)) return } func (v Value) SetMetadata(kind int, node Value) { C.LLVMSetMetadata(v.C, C.unsigned(kind), node.C) } // The bulk of LLVM's object model consists of values, which comprise a very // rich type hierarchy. //#define LLVM_FOR_EACH_VALUE_SUBCLASS(macro) \ // macro(Argument) \ // macro(BasicBlock) \ // macro(InlineAsm) \ // macro(User) \ // macro(Constant) \ // macro(ConstantAggregateZero) \ // macro(ConstantArray) \ // macro(ConstantExpr) \ // macro(ConstantFP) \ // macro(ConstantInt) \ // macro(ConstantPointerNull) \ // macro(ConstantStruct) \ // macro(ConstantVector) \ // macro(GlobalValue) \ // macro(Function) \ // macro(GlobalAlias) \ // macro(GlobalVariable) \ // macro(UndefValue) \ // macro(Instruction) \ // macro(BinaryOperator) \ // macro(CallInst) \ // macro(IntrinsicInst) \ // macro(DbgInfoIntrinsic) \ // macro(DbgDeclareInst) \ // macro(EHSelectorInst) \ // macro(MemIntrinsic) \ // macro(MemCpyInst) \ // macro(MemMoveInst) \ // macro(MemSetInst) \ // macro(CmpInst) \ // macro(FCmpInst) \ // macro(ICmpInst) \ // macro(ExtractElementInst) \ // macro(GetElementPtrInst) \ // macro(InsertElementInst) \ // macro(InsertValueInst) \ // macro(PHINode) \ // macro(SelectInst) \ // macro(ShuffleVectorInst) \ // macro(StoreInst) \ // macro(TerminatorInst) \ // macro(BranchInst) \ // macro(InvokeInst) \ // macro(ReturnInst) \ // macro(SwitchInst) \ // macro(UnreachableInst) \ // macro(UnwindInst) \ // macro(UnaryInstruction) \ // macro(AllocaInst) \ // macro(CastInst) \ // macro(BitCastInst) \ // macro(FPExtInst) \ // macro(FPToSIInst) \ // macro(FPToUIInst) \ // macro(FPTruncInst) \ // macro(IntToPtrInst) \ // macro(PtrToIntInst) \ // macro(SExtInst) \ // macro(SIToFPInst) \ // macro(TruncInst) \ // macro(UIToFPInst) \ // macro(ZExtInst) \ // macro(ExtractValueInst) \ // macro(LoadInst) \ // macro(VAArgInst) //#define LLVM_DECLARE_VALUE_CAST(name) \ // func (v Value) IsA##name() (rv Value) { rv.C = C.LLVMIsA##name(v.C); return } || //LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DECLARE_VALUE_CAST) // Conversion functions. Generated using preprocess statements above. Return // the input value if it is an instance of the specified class, otherwise NULL. // See llvm::dyn_cast_or_null<>. func (v Value) IsAArgument() (rv Value) { rv.C = C.LLVMIsAArgument(v.C); return } func (v Value) IsABasicBlock() (rv Value) { rv.C = C.LLVMIsABasicBlock(v.C); return } func (v Value) IsAInlineAsm() (rv Value) { rv.C = C.LLVMIsAInlineAsm(v.C); return } func (v Value) IsAUser() (rv Value) { rv.C = C.LLVMIsAUser(v.C); return } func (v Value) IsAConstant() (rv Value) { rv.C = C.LLVMIsAConstant(v.C); return } func (v Value) IsAConstantAggregateZero() (rv Value) { rv.C = C.LLVMIsAConstantAggregateZero(v.C) return } func (v Value) IsAConstantArray() (rv Value) { rv.C = C.LLVMIsAConstantArray(v.C); return } func (v Value) IsAConstantExpr() (rv Value) { rv.C = C.LLVMIsAConstantExpr(v.C); return } func (v Value) IsAConstantFP() (rv Value) { rv.C = C.LLVMIsAConstantFP(v.C); return } func (v Value) IsAConstantInt() (rv Value) { rv.C = C.LLVMIsAConstantInt(v.C); return } func (v Value) IsAConstantPointerNull() (rv Value) { rv.C = C.LLVMIsAConstantPointerNull(v.C); return } func (v Value) IsAConstantStruct() (rv Value) { rv.C = C.LLVMIsAConstantStruct(v.C); return } func (v Value) IsAConstantVector() (rv Value) { rv.C = C.LLVMIsAConstantVector(v.C); return } func (v Value) IsAGlobalValue() (rv Value) { rv.C = C.LLVMIsAGlobalValue(v.C); return } func (v Value) IsAFunction() (rv Value) { rv.C = C.LLVMIsAFunction(v.C); return } func (v Value) IsAGlobalAlias() (rv Value) { rv.C = C.LLVMIsAGlobalAlias(v.C); return } func (v Value) IsAGlobalVariable() (rv Value) { rv.C = C.LLVMIsAGlobalVariable(v.C); return } func (v Value) IsAUndefValue() (rv Value) { rv.C = C.LLVMIsAUndefValue(v.C); return } func (v Value) IsAInstruction() (rv Value) { rv.C = C.LLVMIsAInstruction(v.C); return } func (v Value) IsABinaryOperator() (rv Value) { rv.C = C.LLVMIsABinaryOperator(v.C); return } func (v Value) IsACallInst() (rv Value) { rv.C = C.LLVMIsACallInst(v.C); return } func (v Value) IsAIntrinsicInst() (rv Value) { rv.C = C.LLVMIsAIntrinsicInst(v.C); return } func (v Value) IsADbgInfoIntrinsic() (rv Value) { rv.C = C.LLVMIsADbgInfoIntrinsic(v.C); return } func (v Value) IsADbgDeclareInst() (rv Value) { rv.C = C.LLVMIsADbgDeclareInst(v.C); return } func (v Value) IsAMemIntrinsic() (rv Value) { rv.C = C.LLVMIsAMemIntrinsic(v.C); return } func (v Value) IsAMemCpyInst() (rv Value) { rv.C = C.LLVMIsAMemCpyInst(v.C); return } func (v Value) IsAMemMoveInst() (rv Value) { rv.C = C.LLVMIsAMemMoveInst(v.C); return } func (v Value) IsAMemSetInst() (rv Value) { rv.C = C.LLVMIsAMemSetInst(v.C); return } func (v Value) IsACmpInst() (rv Value) { rv.C = C.LLVMIsACmpInst(v.C); return } func (v Value) IsAFCmpInst() (rv Value) { rv.C = C.LLVMIsAFCmpInst(v.C); return } func (v Value) IsAICmpInst() (rv Value) { rv.C = C.LLVMIsAICmpInst(v.C); return } func (v Value) IsAExtractElementInst() (rv Value) { rv.C = C.LLVMIsAExtractElementInst(v.C); return } func (v Value) IsAGetElementPtrInst() (rv Value) { rv.C = C.LLVMIsAGetElementPtrInst(v.C); return } func (v Value) IsAInsertElementInst() (rv Value) { rv.C = C.LLVMIsAInsertElementInst(v.C); return } func (v Value) IsAInsertValueInst() (rv Value) { rv.C = C.LLVMIsAInsertValueInst(v.C); return } func (v Value) IsAPHINode() (rv Value) { rv.C = C.LLVMIsAPHINode(v.C); return } func (v Value) IsASelectInst() (rv Value) { rv.C = C.LLVMIsASelectInst(v.C); return } func (v Value) IsAShuffleVectorInst() (rv Value) { rv.C = C.LLVMIsAShuffleVectorInst(v.C); return } func (v Value) IsAStoreInst() (rv Value) { rv.C = C.LLVMIsAStoreInst(v.C); return } func (v Value) IsATerminatorInst() (rv Value) { rv.C = C.LLVMIsATerminatorInst(v.C); return } func (v Value) IsABranchInst() (rv Value) { rv.C = C.LLVMIsABranchInst(v.C); return } func (v Value) IsAInvokeInst() (rv Value) { rv.C = C.LLVMIsAInvokeInst(v.C); return } func (v Value) IsAReturnInst() (rv Value) { rv.C = C.LLVMIsAReturnInst(v.C); return } func (v Value) IsASwitchInst() (rv Value) { rv.C = C.LLVMIsASwitchInst(v.C); return } func (v Value) IsAUnreachableInst() (rv Value) { rv.C = C.LLVMIsAUnreachableInst(v.C); return } func (v Value) IsAUnaryInstruction() (rv Value) { rv.C = C.LLVMIsAUnaryInstruction(v.C); return } func (v Value) IsAAllocaInst() (rv Value) { rv.C = C.LLVMIsAAllocaInst(v.C); return } func (v Value) IsACastInst() (rv Value) { rv.C = C.LLVMIsACastInst(v.C); return } func (v Value) IsABitCastInst() (rv Value) { rv.C = C.LLVMIsABitCastInst(v.C); return } func (v Value) IsAFPExtInst() (rv Value) { rv.C = C.LLVMIsAFPExtInst(v.C); return } func (v Value) IsAFPToSIInst() (rv Value) { rv.C = C.LLVMIsAFPToSIInst(v.C); return } func (v Value) IsAFPToUIInst() (rv Value) { rv.C = C.LLVMIsAFPToUIInst(v.C); return } func (v Value) IsAFPTruncInst() (rv Value) { rv.C = C.LLVMIsAFPTruncInst(v.C); return } func (v Value) IsAIntToPtrInst() (rv Value) { rv.C = C.LLVMIsAIntToPtrInst(v.C); return } func (v Value) IsAPtrToIntInst() (rv Value) { rv.C = C.LLVMIsAPtrToIntInst(v.C); return } func (v Value) IsASExtInst() (rv Value) { rv.C = C.LLVMIsASExtInst(v.C); return } func (v Value) IsASIToFPInst() (rv Value) { rv.C = C.LLVMIsASIToFPInst(v.C); return } func (v Value) IsATruncInst() (rv Value) { rv.C = C.LLVMIsATruncInst(v.C); return } func (v Value) IsAUIToFPInst() (rv Value) { rv.C = C.LLVMIsAUIToFPInst(v.C); return } func (v Value) IsAZExtInst() (rv Value) { rv.C = C.LLVMIsAZExtInst(v.C); return } func (v Value) IsAExtractValueInst() (rv Value) { rv.C = C.LLVMIsAExtractValueInst(v.C); return } func (v Value) IsALoadInst() (rv Value) { rv.C = C.LLVMIsALoadInst(v.C); return } func (v Value) IsAVAArgInst() (rv Value) { rv.C = C.LLVMIsAVAArgInst(v.C); return } // Operations on Uses func (v Value) FirstUse() (u Use) { u.C = C.LLVMGetFirstUse(v.C); return } func (u Use) NextUse() (ru Use) { ru.C = C.LLVMGetNextUse(u.C); return } func (u Use) User() (v Value) { v.C = C.LLVMGetUser(u.C); return } func (u Use) UsedValue() (v Value) { v.C = C.LLVMGetUsedValue(u.C); return } // Operations on Users func (v Value) Operand(i int) (rv Value) { rv.C = C.LLVMGetOperand(v.C, C.unsigned(i)); return } func (v Value) SetOperand(i int, op Value) { C.LLVMSetOperand(v.C, C.unsigned(i), op.C) } func (v Value) OperandsCount() int { return int(C.LLVMGetNumOperands(v.C)) } // Operations on constants of any type func ConstNull(t Type) (v Value) { v.C = C.LLVMConstNull(t.C); return } func ConstAllOnes(t Type) (v Value) { v.C = C.LLVMConstAllOnes(t.C); return } func Undef(t Type) (v Value) { v.C = C.LLVMGetUndef(t.C); return } func (v Value) IsConstant() bool { return C.LLVMIsConstant(v.C) != 0 } func (v Value) IsNull() bool { return C.LLVMIsNull(v.C) != 0 } func (v Value) IsUndef() bool { return C.LLVMIsUndef(v.C) != 0 } func ConstPointerNull(t Type) (v Value) { v.C = C.LLVMConstPointerNull(t.C); return } // Operations on metadata func (c Context) MDString(str string) (v Value) { cstr := C.CString(str) v.C = C.LLVMMDStringInContext(c.C, cstr, C.unsigned(len(str))) C.free(unsafe.Pointer(cstr)) return } func MDString(str string) (v Value) { cstr := C.CString(str) v.C = C.LLVMMDString(cstr, C.unsigned(len(str))) C.free(unsafe.Pointer(cstr)) return } func (c Context) MDNode(vals []Value) (v Value) { ptr, nvals := llvmValueRefs(vals) v.C = C.LLVMMDNodeInContext(c.C, ptr, nvals) return } func MDNode(vals []Value) (v Value) { ptr, nvals := llvmValueRefs(vals) v.C = C.LLVMMDNode(ptr, nvals) return } // Operations on scalar constants func ConstInt(t Type, n uint64, signExtend bool) (v Value) { v.C = C.LLVMConstInt(t.C, C.ulonglong(n), boolToLLVMBool(signExtend)) return } func ConstIntFromString(t Type, str string, radix int) (v Value) { cstr := C.CString(str) v.C = C.LLVMConstIntOfString(t.C, cstr, C.uint8_t(radix)) C.free(unsafe.Pointer(cstr)) return } func ConstFloat(t Type, n float64) (v Value) { v.C = C.LLVMConstReal(t.C, C.double(n)) return } func ConstFloatFromString(t Type, str string) (v Value) { cstr := C.CString(str) v.C = C.LLVMConstRealOfString(t.C, cstr) C.free(unsafe.Pointer(cstr)) return } func (v Value) ZExtValue() uint64 { return uint64(C.LLVMConstIntGetZExtValue(v.C)) } func (v Value) SExtValue() int64 { return int64(C.LLVMConstIntGetSExtValue(v.C)) } // Operations on composite constants func (c Context) ConstString(str string, addnull bool) (v Value) { cstr := C.CString(str) v.C = C.LLVMConstStringInContext(c.C, cstr, C.unsigned(len(str)), boolToLLVMBool(!addnull)) C.free(unsafe.Pointer(cstr)) return } func (c Context) ConstStruct(constVals []Value, packed bool) (v Value) { ptr, nvals := llvmValueRefs(constVals) v.C = C.LLVMConstStructInContext(c.C, ptr, nvals, boolToLLVMBool(packed)) return } func ConstString(str string, addnull bool) (v Value) { cstr := C.CString(str) v.C = C.LLVMConstString(cstr, C.unsigned(len(str)), boolToLLVMBool(!addnull)) C.free(unsafe.Pointer(cstr)) return } func ConstArray(t Type, constVals []Value) (v Value) { ptr, nvals := llvmValueRefs(constVals) v.C = C.LLVMConstArray(t.C, ptr, nvals) return } func ConstStruct(constVals []Value, packed bool) (v Value) { ptr, nvals := llvmValueRefs(constVals) v.C = C.LLVMConstStruct(ptr, nvals, boolToLLVMBool(packed)) return } func ConstVector(scalarConstVals []Value, packed bool) (v Value) { ptr, nvals := llvmValueRefs(scalarConstVals) v.C = C.LLVMConstVector(ptr, nvals) return } // Constant expressions func (v Value) Opcode() Opcode { return Opcode(C.LLVMGetConstOpcode(v.C)) } func (v Value) InstructionOpcode() Opcode { return Opcode(C.LLVMGetInstructionOpcode(v.C)) } func AlignOf(t Type) (v Value) { v.C = C.LLVMAlignOf(t.C); return } func SizeOf(t Type) (v Value) { v.C = C.LLVMSizeOf(t.C); return } func ConstNeg(v Value) (rv Value) { rv.C = C.LLVMConstNeg(v.C); return } func ConstNSWNeg(v Value) (rv Value) { rv.C = C.LLVMConstNSWNeg(v.C); return } func ConstNUWNeg(v Value) (rv Value) { rv.C = C.LLVMConstNUWNeg(v.C); return } func ConstFNeg(v Value) (rv Value) { rv.C = C.LLVMConstFNeg(v.C); return } func ConstNot(v Value) (rv Value) { rv.C = C.LLVMConstNot(v.C); return } func ConstAdd(lhs, rhs Value) (v Value) { v.C = C.LLVMConstAdd(lhs.C, rhs.C); return } func ConstNSWAdd(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNSWAdd(lhs.C, rhs.C); return } func ConstNUWAdd(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNUWAdd(lhs.C, rhs.C); return } func ConstFAdd(lhs, rhs Value) (v Value) { v.C = C.LLVMConstFAdd(lhs.C, rhs.C); return } func ConstSub(lhs, rhs Value) (v Value) { v.C = C.LLVMConstSub(lhs.C, rhs.C); return } func ConstNSWSub(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNSWSub(lhs.C, rhs.C); return } func ConstNUWSub(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNUWSub(lhs.C, rhs.C); return } func ConstFSub(lhs, rhs Value) (v Value) { v.C = C.LLVMConstFSub(lhs.C, rhs.C); return } func ConstMul(lhs, rhs Value) (v Value) { v.C = C.LLVMConstMul(lhs.C, rhs.C); return } func ConstNSWMul(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNSWMul(lhs.C, rhs.C); return } func ConstNUWMul(lhs, rhs Value) (v Value) { v.C = C.LLVMConstNUWMul(lhs.C, rhs.C); return } func ConstFMul(lhs, rhs Value) (v Value) { v.C = C.LLVMConstFMul(lhs.C, rhs.C); return } func ConstUDiv(lhs, rhs Value) (v Value) { v.C = C.LLVMConstUDiv(lhs.C, rhs.C); return } func ConstSDiv(lhs, rhs Value) (v Value) { v.C = C.LLVMConstSDiv(lhs.C, rhs.C); return } func ConstExactSDiv(lhs, rhs Value) (v Value) { v.C = C.LLVMConstExactSDiv(lhs.C, rhs.C); return } func ConstFDiv(lhs, rhs Value) (v Value) { v.C = C.LLVMConstFDiv(lhs.C, rhs.C); return } func ConstURem(lhs, rhs Value) (v Value) { v.C = C.LLVMConstURem(lhs.C, rhs.C); return } func ConstSRem(lhs, rhs Value) (v Value) { v.C = C.LLVMConstSRem(lhs.C, rhs.C); return } func ConstFRem(lhs, rhs Value) (v Value) { v.C = C.LLVMConstFRem(lhs.C, rhs.C); return } func ConstAnd(lhs, rhs Value) (v Value) { v.C = C.LLVMConstAnd(lhs.C, rhs.C); return } func ConstOr(lhs, rhs Value) (v Value) { v.C = C.LLVMConstOr(lhs.C, rhs.C); return } func ConstXor(lhs, rhs Value) (v Value) { v.C = C.LLVMConstXor(lhs.C, rhs.C); return } func ConstICmp(pred IntPredicate, lhs, rhs Value) (v Value) { v.C = C.LLVMConstICmp(C.LLVMIntPredicate(pred), lhs.C, rhs.C) return } func ConstFCmp(pred FloatPredicate, lhs, rhs Value) (v Value) { v.C = C.LLVMConstFCmp(C.LLVMRealPredicate(pred), lhs.C, rhs.C) return } func ConstShl(lhs, rhs Value) (v Value) { v.C = C.LLVMConstShl(lhs.C, rhs.C); return } func ConstLShr(lhs, rhs Value) (v Value) { v.C = C.LLVMConstLShr(lhs.C, rhs.C); return } func ConstAShr(lhs, rhs Value) (v Value) { v.C = C.LLVMConstAShr(lhs.C, rhs.C); return } func ConstGEP(v Value, indices []Value) (rv Value) { ptr, nvals := llvmValueRefs(indices) rv.C = C.LLVMConstGEP(v.C, ptr, nvals) return } func ConstInBoundsGEP(v Value, indices []Value) (rv Value) { ptr, nvals := llvmValueRefs(indices) rv.C = C.LLVMConstInBoundsGEP(v.C, ptr, nvals) return } func ConstTrunc(v Value, t Type) (rv Value) { rv.C = C.LLVMConstTrunc(v.C, t.C); return } func ConstSExt(v Value, t Type) (rv Value) { rv.C = C.LLVMConstSExt(v.C, t.C); return } func ConstZExt(v Value, t Type) (rv Value) { rv.C = C.LLVMConstZExt(v.C, t.C); return } func ConstFPTrunc(v Value, t Type) (rv Value) { rv.C = C.LLVMConstFPTrunc(v.C, t.C); return } func ConstFPExt(v Value, t Type) (rv Value) { rv.C = C.LLVMConstFPExt(v.C, t.C); return } func ConstUIToFP(v Value, t Type) (rv Value) { rv.C = C.LLVMConstUIToFP(v.C, t.C); return } func ConstSIToFP(v Value, t Type) (rv Value) { rv.C = C.LLVMConstSIToFP(v.C, t.C); return } func ConstFPToUI(v Value, t Type) (rv Value) { rv.C = C.LLVMConstFPToUI(v.C, t.C); return } func ConstFPToSI(v Value, t Type) (rv Value) { rv.C = C.LLVMConstFPToSI(v.C, t.C); return } func ConstPtrToInt(v Value, t Type) (rv Value) { rv.C = C.LLVMConstPtrToInt(v.C, t.C); return } func ConstIntToPtr(v Value, t Type) (rv Value) { rv.C = C.LLVMConstIntToPtr(v.C, t.C); return } func ConstBitCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstBitCast(v.C, t.C); return } func ConstZExtOrBitCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstZExtOrBitCast(v.C, t.C); return } func ConstSExtOrBitCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstSExtOrBitCast(v.C, t.C); return } func ConstTruncOrBitCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstTruncOrBitCast(v.C, t.C) return } func ConstPointerCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstPointerCast(v.C, t.C); return } func ConstIntCast(v Value, t Type, signed bool) (rv Value) { rv.C = C.LLVMConstIntCast(v.C, t.C, boolToLLVMBool(signed)) return } func ConstFPCast(v Value, t Type) (rv Value) { rv.C = C.LLVMConstFPCast(v.C, t.C); return } func ConstSelect(cond, iftrue, iffalse Value) (rv Value) { rv.C = C.LLVMConstSelect(cond.C, iftrue.C, iffalse.C) return } func ConstExtractElement(vec, i Value) (rv Value) { rv.C = C.LLVMConstExtractElement(vec.C, i.C) return } func ConstInsertElement(vec, elem, i Value) (rv Value) { rv.C = C.LLVMConstInsertElement(vec.C, elem.C, i.C) return } func ConstShuffleVector(veca, vecb, mask Value) (rv Value) { rv.C = C.LLVMConstShuffleVector(veca.C, vecb.C, mask.C) return } //TODO //LLVMValueRef LLVMConstExtractValue(LLVMValueRef AggConstant, unsigned *IdxList, // unsigned NumIdx); func ConstExtractValue(agg Value, indices []uint32) (rv Value) { n := len(indices) if n == 0 { panic("one or more indices are required") } ptr := (*C.unsigned)(&indices[0]) rv.C = C.LLVMConstExtractValue(agg.C, ptr, C.unsigned(n)) return } func ConstInsertValue(agg, val Value, indices []uint32) (rv Value) { n := len(indices) if n == 0 { panic("one or more indices are required") } ptr := (*C.unsigned)(&indices[0]) rv.C = C.LLVMConstInsertValue(agg.C, val.C, ptr, C.unsigned(n)) return } func BlockAddress(f Value, bb BasicBlock) (v Value) { v.C = C.LLVMBlockAddress(f.C, bb.C) return } // Operations on global variables, functions, and aliases (globals) func (v Value) GlobalParent() (m Module) { m.C = C.LLVMGetGlobalParent(v.C); return } func (v Value) IsDeclaration() bool { return C.LLVMIsDeclaration(v.C) != 0 } func (v Value) Linkage() Linkage { return Linkage(C.LLVMGetLinkage(v.C)) } func (v Value) SetLinkage(l Linkage) { C.LLVMSetLinkage(v.C, C.LLVMLinkage(l)) } func (v Value) Section() string { return C.GoString(C.LLVMGetSection(v.C)) } func (v Value) SetSection(str string) { cstr := C.CString(str) C.LLVMSetSection(v.C, cstr) C.free(unsafe.Pointer(cstr)) } func (v Value) Visibility() Visibility { return Visibility(C.LLVMGetVisibility(v.C)) } func (v Value) SetVisibility(vi Visibility) { C.LLVMSetVisibility(v.C, C.LLVMVisibility(vi)) } func (v Value) Alignment() int { return int(C.LLVMGetAlignment(v.C)) } func (v Value) SetAlignment(a int) { C.LLVMSetAlignment(v.C, C.unsigned(a)) } // Operations on global variables func AddGlobal(m Module, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMAddGlobal(m.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func AddGlobalInAddressSpace(m Module, t Type, name string, addressSpace int) (v Value) { cname := C.CString(name) v.C = C.LLVMAddGlobalInAddressSpace(m.C, t.C, cname, C.unsigned(addressSpace)) C.free(unsafe.Pointer(cname)) return } func (m Module) NamedGlobal(name string) (v Value) { cname := C.CString(name) v.C = C.LLVMGetNamedGlobal(m.C, cname) C.free(unsafe.Pointer(cname)) return } func (m Module) FirstGlobal() (v Value) { v.C = C.LLVMGetFirstGlobal(m.C); return } func (m Module) LastGlobal() (v Value) { v.C = C.LLVMGetLastGlobal(m.C); return } func NextGlobal(v Value) (rv Value) { rv.C = C.LLVMGetNextGlobal(v.C); return } func PrevGlobal(v Value) (rv Value) { rv.C = C.LLVMGetPreviousGlobal(v.C); return } func (v Value) EraseFromParentAsGlobal() { C.LLVMDeleteGlobal(v.C) } func (v Value) Initializer() (rv Value) { rv.C = C.LLVMGetInitializer(v.C); return } func (v Value) SetInitializer(cv Value) { C.LLVMSetInitializer(v.C, cv.C) } func (v Value) IsThreadLocal() bool { return C.LLVMIsThreadLocal(v.C) != 0 } func (v Value) SetThreadLocal(tl bool) { C.LLVMSetThreadLocal(v.C, boolToLLVMBool(tl)) } func (v Value) IsGlobalConstant() bool { return C.LLVMIsGlobalConstant(v.C) != 0 } func (v Value) SetGlobalConstant(gc bool) { C.LLVMSetGlobalConstant(v.C, boolToLLVMBool(gc)) } // Operations on aliases func AddAlias(m Module, t Type, aliasee Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMAddAlias(m.C, t.C, aliasee.C, cname) C.free(unsafe.Pointer(cname)) return } // Operations on functions func AddFunction(m Module, name string, ft Type) (v Value) { cname := C.CString(name) v.C = C.LLVMAddFunction(m.C, cname, ft.C) C.free(unsafe.Pointer(cname)) return } func (m Module) NamedFunction(name string) (v Value) { cname := C.CString(name) v.C = C.LLVMGetNamedFunction(m.C, cname) C.free(unsafe.Pointer(cname)) return } func (m Module) FirstFunction() (v Value) { v.C = C.LLVMGetFirstFunction(m.C); return } func (m Module) LastFunction() (v Value) { v.C = C.LLVMGetLastFunction(m.C); return } func NextFunction(v Value) (rv Value) { rv.C = C.LLVMGetNextFunction(v.C); return } func PrevFunction(v Value) (rv Value) { rv.C = C.LLVMGetPreviousFunction(v.C); return } func (v Value) EraseFromParentAsFunction() { C.LLVMDeleteFunction(v.C) } func (v Value) IntrinsicID() int { return int(C.LLVMGetIntrinsicID(v.C)) } func (v Value) FunctionCallConv() CallConv { return CallConv(C.LLVMCallConv(C.LLVMGetFunctionCallConv(v.C))) } func (v Value) SetFunctionCallConv(cc CallConv) { C.LLVMSetFunctionCallConv(v.C, C.unsigned(cc)) } func (v Value) GC() string { return C.GoString(C.LLVMGetGC(v.C)) } func (v Value) SetGC(name string) { cname := C.CString(name) C.LLVMSetGC(v.C, cname) C.free(unsafe.Pointer(cname)) } func (v Value) AddFunctionAttr(a Attribute) { C.LLVMAddFunctionAttr(v.C, C.LLVMAttribute(a)) } func (v Value) FunctionAttr() Attribute { return Attribute(C.LLVMGetFunctionAttr(v.C)) } func (v Value) RemoveFunctionAttr(a Attribute) { C.LLVMRemoveFunctionAttr(v.C, C.LLVMAttribute(a)) } // Operations on parameters func (v Value) ParamsCount() int { return int(C.LLVMCountParams(v.C)) } func (v Value) Params() []Value { out := make([]Value, v.ParamsCount()) if len(out) > 0 { C.LLVMGetParams(v.C, llvmValueRefPtr(&out[0])) } return out } func (v Value) Param(i int) (rv Value) { rv.C = C.LLVMGetParam(v.C, C.unsigned(i)); return } func (v Value) ParamParent() (rv Value) { rv.C = C.LLVMGetParamParent(v.C); return } func (v Value) FirstParam() (rv Value) { rv.C = C.LLVMGetFirstParam(v.C); return } func (v Value) LastParam() (rv Value) { rv.C = C.LLVMGetLastParam(v.C); return } func NextParam(v Value) (rv Value) { rv.C = C.LLVMGetNextParam(v.C); return } func PrevParam(v Value) (rv Value) { rv.C = C.LLVMGetPreviousParam(v.C); return } func (v Value) AddAttribute(a Attribute) { C.LLVMAddAttribute(v.C, C.LLVMAttribute(a)) } func (v Value) RemoveAttribute(a Attribute) { C.LLVMRemoveAttribute(v.C, C.LLVMAttribute(a)) } func (v Value) Attribute() Attribute { return Attribute(C.LLVMGetAttribute(v.C)) } func (v Value) SetParamAlignment(align int) { C.LLVMSetParamAlignment(v.C, C.unsigned(align)) } // Operations on basic blocks func (bb BasicBlock) AsValue() (v Value) { v.C = C.LLVMBasicBlockAsValue(bb.C); return } func (v Value) IsBasicBlock() bool { return C.LLVMValueIsBasicBlock(v.C) != 0 } func (v Value) AsBasicBlock() (bb BasicBlock) { bb.C = C.LLVMValueAsBasicBlock(v.C); return } func (bb BasicBlock) Parent() (v Value) { v.C = C.LLVMGetBasicBlockParent(bb.C); return } func (v Value) BasicBlocksCount() int { return int(C.LLVMCountBasicBlocks(v.C)) } func (v Value) BasicBlocks() []BasicBlock { out := make([]BasicBlock, v.BasicBlocksCount()) C.LLVMGetBasicBlocks(v.C, llvmBasicBlockRefPtr(&out[0])) return out } func (v Value) FirstBasicBlock() (bb BasicBlock) { bb.C = C.LLVMGetFirstBasicBlock(v.C); return } func (v Value) LastBasicBlock() (bb BasicBlock) { bb.C = C.LLVMGetLastBasicBlock(v.C); return } func NextBasicBlock(bb BasicBlock) (rbb BasicBlock) { rbb.C = C.LLVMGetNextBasicBlock(bb.C); return } func PrevBasicBlock(bb BasicBlock) (rbb BasicBlock) { rbb.C = C.LLVMGetPreviousBasicBlock(bb.C); return } func (v Value) EntryBasicBlock() (bb BasicBlock) { bb.C = C.LLVMGetEntryBasicBlock(v.C); return } func (c Context) AddBasicBlock(f Value, name string) (bb BasicBlock) { cname := C.CString(name) bb.C = C.LLVMAppendBasicBlockInContext(c.C, f.C, cname) C.free(unsafe.Pointer(cname)) return } func (c Context) InsertBasicBlock(ref BasicBlock, name string) (bb BasicBlock) { cname := C.CString(name) bb.C = C.LLVMInsertBasicBlockInContext(c.C, ref.C, cname) C.free(unsafe.Pointer(cname)) return } func AddBasicBlock(f Value, name string) (bb BasicBlock) { cname := C.CString(name) bb.C = C.LLVMAppendBasicBlock(f.C, cname) C.free(unsafe.Pointer(cname)) return } func InsertBasicBlock(ref BasicBlock, name string) (bb BasicBlock) { cname := C.CString(name) bb.C = C.LLVMInsertBasicBlock(ref.C, cname) C.free(unsafe.Pointer(cname)) return } func (bb BasicBlock) EraseFromParent() { C.LLVMDeleteBasicBlock(bb.C) } func (bb BasicBlock) MoveBefore(pos BasicBlock) { C.LLVMMoveBasicBlockBefore(bb.C, pos.C) } func (bb BasicBlock) MoveAfter(pos BasicBlock) { C.LLVMMoveBasicBlockAfter(bb.C, pos.C) } // Operations on instructions func (v Value) InstructionParent() (bb BasicBlock) { bb.C = C.LLVMGetInstructionParent(v.C); return } func (bb BasicBlock) FirstInstruction() (v Value) { v.C = C.LLVMGetFirstInstruction(bb.C); return } func (bb BasicBlock) LastInstruction() (v Value) { v.C = C.LLVMGetLastInstruction(bb.C); return } func NextInstruction(v Value) (rv Value) { rv.C = C.LLVMGetNextInstruction(v.C); return } func PrevInstruction(v Value) (rv Value) { rv.C = C.LLVMGetPreviousInstruction(v.C); return } // Operations on call sites func (v Value) SetInstructionCallConv(cc CallConv) { C.LLVMSetInstructionCallConv(v.C, C.unsigned(cc)) } func (v Value) InstructionCallConv() CallConv { return CallConv(C.LLVMCallConv(C.LLVMGetInstructionCallConv(v.C))) } func (v Value) AddInstrAttribute(i int, a Attribute) { C.LLVMAddInstrAttribute(v.C, C.unsigned(i), C.LLVMAttribute(a)) } func (v Value) RemoveInstrAttribute(i int, a Attribute) { C.LLVMRemoveInstrAttribute(v.C, C.unsigned(i), C.LLVMAttribute(a)) } func (v Value) SetInstrParamAlignment(i int, align int) { C.LLVMSetInstrParamAlignment(v.C, C.unsigned(i), C.unsigned(align)) } // Operations on call instructions (only) func (v Value) IsTailCall() bool { return C.LLVMIsTailCall(v.C) != 0 } func (v Value) SetTailCall(is bool) { C.LLVMSetTailCall(v.C, boolToLLVMBool(is)) } // Operations on phi nodes func (v Value) AddIncoming(vals []Value, blocks []BasicBlock) { ptr, nvals := llvmValueRefs(vals) C.LLVMAddIncoming(v.C, ptr, llvmBasicBlockRefPtr(&blocks[0]), nvals) } func (v Value) IncomingCount() int { return int(C.LLVMCountIncoming(v.C)) } func (v Value) IncomingValue(i int) (rv Value) { rv.C = C.LLVMGetIncomingValue(v.C, C.unsigned(i)) return } func (v Value) IncomingBlock(i int) (bb BasicBlock) { bb.C = C.LLVMGetIncomingBlock(v.C, C.unsigned(i)) return } //------------------------------------------------------------------------- // llvm.Builder //------------------------------------------------------------------------- // An instruction builder represents a point within a basic block, and is the // exclusive means of building instructions using the C interface. func (c Context) NewBuilder() (b Builder) { b.C = C.LLVMCreateBuilderInContext(c.C); return } func NewBuilder() (b Builder) { b.C = C.LLVMCreateBuilder(); return } func (b Builder) SetInsertPoint(block BasicBlock, instr Value) { C.LLVMPositionBuilder(b.C, block.C, instr.C) } func (b Builder) SetInsertPointBefore(instr Value) { C.LLVMPositionBuilderBefore(b.C, instr.C) } func (b Builder) SetInsertPointAtEnd(block BasicBlock) { C.LLVMPositionBuilderAtEnd(b.C, block.C) } func (b Builder) GetInsertBlock() (bb BasicBlock) { bb.C = C.LLVMGetInsertBlock(b.C); return } func (b Builder) ClearInsertionPoint() { C.LLVMClearInsertionPosition(b.C) } func (b Builder) Insert(instr Value) { C.LLVMInsertIntoBuilder(b.C, instr.C) } func (b Builder) InsertWithName(instr Value, name string) { cname := C.CString(name) C.LLVMInsertIntoBuilderWithName(b.C, instr.C, cname) C.free(unsafe.Pointer(cname)) } func (b Builder) Dispose() { C.LLVMDisposeBuilder(b.C) } // Metadata func (b Builder) SetCurrentDebugLocation(v Value) { C.LLVMSetCurrentDebugLocation(b.C, v.C) } func (b Builder) CurrentDebugLocation() (v Value) { v.C = C.LLVMGetCurrentDebugLocation(b.C); return } func (b Builder) SetInstDebugLocation(v Value) { C.LLVMSetCurrentDebugLocation(b.C, v.C) } // Terminators func (b Builder) CreateRetVoid() (rv Value) { rv.C = C.LLVMBuildRetVoid(b.C); return } func (b Builder) CreateRet(v Value) (rv Value) { rv.C = C.LLVMBuildRet(b.C, v.C); return } func (b Builder) CreateAggregateRet(vs []Value) (rv Value) { ptr, nvals := llvmValueRefs(vs) rv.C = C.LLVMBuildAggregateRet(b.C, ptr, nvals) return } func (b Builder) CreateBr(bb BasicBlock) (rv Value) { rv.C = C.LLVMBuildBr(b.C, bb.C); return } func (b Builder) CreateCondBr(ifv Value, thenb, elseb BasicBlock) (rv Value) { rv.C = C.LLVMBuildCondBr(b.C, ifv.C, thenb.C, elseb.C) return } func (b Builder) CreateSwitch(v Value, elseb BasicBlock, numCases int) (rv Value) { rv.C = C.LLVMBuildSwitch(b.C, v.C, elseb.C, C.unsigned(numCases)) return } func (b Builder) CreateIndirectBr(addr Value, numDests int) (rv Value) { rv.C = C.LLVMBuildIndirectBr(b.C, addr.C, C.unsigned(numDests)) return } func (b Builder) CreateInvoke(fn Value, args []Value, then, catch BasicBlock, name string) (rv Value) { cname := C.CString(name) ptr, nvals := llvmValueRefs(args) rv.C = C.LLVMBuildInvoke(b.C, fn.C, ptr, nvals, then.C, catch.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateUnreachable() (rv Value) { rv.C = C.LLVMBuildUnreachable(b.C); return } // Add a case to the switch instruction func (v Value) AddCase(on Value, dest BasicBlock) { C.LLVMAddCase(v.C, on.C, dest.C) } // Add a destination to the indirectbr instruction func (v Value) AddDest(dest BasicBlock) { C.LLVMAddDestination(v.C, dest.C) } // Arithmetic func (b Builder) CreateAdd(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildAdd(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNSWAdd(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNSWAdd(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNUWAdd(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNUWAdd(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFAdd(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFAdd(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSub(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSub(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNSWSub(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNSWSub(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNUWSub(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNUWSub(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFSub(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFSub(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateMul(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildMul(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNSWMul(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNSWMul(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNUWMul(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildNUWMul(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFMul(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFMul(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateUDiv(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildUDiv(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSDiv(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSDiv(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateExactSDiv(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildExactSDiv(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFDiv(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFDiv(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateURem(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildURem(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSRem(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSRem(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFRem(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFRem(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateShl(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildShl(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateLShr(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildLShr(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateAShr(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildAShr(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateAnd(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildAnd(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateOr(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildOr(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateXor(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildXor(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateBinOp(op Opcode, lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildBinOp(b.C, C.LLVMOpcode(op), lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNeg(v Value, name string) (rv Value) { cname := C.CString(name) rv.C = C.LLVMBuildNeg(b.C, v.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNSWNeg(v Value, name string) (rv Value) { cname := C.CString(name) rv.C = C.LLVMBuildNSWNeg(b.C, v.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNUWNeg(v Value, name string) (rv Value) { cname := C.CString(name) rv.C = C.LLVMBuildNUWNeg(b.C, v.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFNeg(v Value, name string) (rv Value) { cname := C.CString(name) rv.C = C.LLVMBuildFNeg(b.C, v.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateNot(v Value, name string) (rv Value) { cname := C.CString(name) rv.C = C.LLVMBuildNot(b.C, v.C, cname) C.free(unsafe.Pointer(cname)) return } // Memory func (b Builder) CreateMalloc(t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildMalloc(b.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateArrayMalloc(t Type, val Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildArrayMalloc(b.C, t.C, val.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateAlloca(t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildAlloca(b.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateArrayAlloca(t Type, val Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildArrayAlloca(b.C, t.C, val.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFree(p Value) (v Value) { v.C = C.LLVMBuildFree(b.C, p.C) return } func (b Builder) CreateLoad(p Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildLoad(b.C, p.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateStore(val Value, p Value) (v Value) { v.C = C.LLVMBuildStore(b.C, val.C, p.C) return } func (b Builder) CreateGEP(p Value, indices []Value, name string) (v Value) { cname := C.CString(name) ptr, nvals := llvmValueRefs(indices) v.C = C.LLVMBuildGEP(b.C, p.C, ptr, nvals, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateInBoundsGEP(p Value, indices []Value, name string) (v Value) { cname := C.CString(name) ptr, nvals := llvmValueRefs(indices) v.C = C.LLVMBuildInBoundsGEP(b.C, p.C, ptr, nvals, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateStructGEP(p Value, i int, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildStructGEP(b.C, p.C, C.unsigned(i), cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateGlobalString(str, name string) (v Value) { cstr := C.CString(str) cname := C.CString(name) v.C = C.LLVMBuildGlobalString(b.C, cstr, cname) C.free(unsafe.Pointer(cname)) C.free(unsafe.Pointer(cstr)) return } func (b Builder) CreateGlobalStringPtr(str, name string) (v Value) { cstr := C.CString(str) cname := C.CString(name) v.C = C.LLVMBuildGlobalStringPtr(b.C, cstr, cname) C.free(unsafe.Pointer(cname)) C.free(unsafe.Pointer(cstr)) return } // Casts func (b Builder) CreateTrunc(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildTrunc(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateZExt(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildZExt(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSExt(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSExt(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFPToUI(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFPToUI(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFPToSI(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFPToSI(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateUIToFP(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildUIToFP(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSIToFP(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSIToFP(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFPTrunc(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFPTrunc(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFPExt(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFPExt(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreatePtrToInt(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildPtrToInt(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateIntToPtr(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildIntToPtr(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateBitCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildBitCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateZExtOrBitCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildZExtOrBitCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSExtOrBitCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSExtOrBitCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateTruncOrBitCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildTruncOrBitCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateCast(val Value, op Opcode, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildCast(b.C, C.LLVMOpcode(op), val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } // func (b Builder) CreatePointerCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildPointerCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateIntCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildIntCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFPCast(val Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFPCast(b.C, val.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } // Comparisons func (b Builder) CreateICmp(pred IntPredicate, lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildICmp(b.C, C.LLVMIntPredicate(pred), lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateFCmp(pred FloatPredicate, lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildFCmp(b.C, C.LLVMRealPredicate(pred), lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } // Miscellaneous instructions func (b Builder) CreatePHI(t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildPhi(b.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateCall(fn Value, args []Value, name string) (v Value) { cname := C.CString(name) ptr, nvals := llvmValueRefs(args) v.C = C.LLVMBuildCall(b.C, fn.C, ptr, nvals, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateSelect(ifv, thenv, elsev Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildSelect(b.C, ifv.C, thenv.C, elsev.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateVAArg(list Value, t Type, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildVAArg(b.C, list.C, t.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateExtractElement(vec, i Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildExtractElement(b.C, vec.C, i.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateInsertElement(vec, elt, i Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildInsertElement(b.C, vec.C, elt.C, i.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateShuffleVector(v1, v2, mask Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildShuffleVector(b.C, v1.C, v2.C, mask.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateExtractValue(agg Value, i int, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildExtractValue(b.C, agg.C, C.unsigned(i), cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateInsertValue(agg, elt Value, i int, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildInsertValue(b.C, agg.C, elt.C, C.unsigned(i), cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateIsNull(val Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildIsNull(b.C, val.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreateIsNotNull(val Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildIsNotNull(b.C, val.C, cname) C.free(unsafe.Pointer(cname)) return } func (b Builder) CreatePtrDiff(lhs, rhs Value, name string) (v Value) { cname := C.CString(name) v.C = C.LLVMBuildPtrDiff(b.C, lhs.C, rhs.C, cname) C.free(unsafe.Pointer(cname)) return } //------------------------------------------------------------------------- // llvm.ModuleProvider //------------------------------------------------------------------------- // Changes the type of M so it can be passed to FunctionPassManagers and the // JIT. They take ModuleProviders for historical reasons. func NewModuleProviderForModule(m Module) (mp ModuleProvider) { mp.C = C.LLVMCreateModuleProviderForExistingModule(m.C) return } // Destroys the module M. func (mp ModuleProvider) Dispose() { C.LLVMDisposeModuleProvider(mp.C) } //------------------------------------------------------------------------- // llvm.MemoryBuffer //------------------------------------------------------------------------- func NewMemoryBufferFromFile(path string) (b MemoryBuffer, err error) { var cmsg *C.char cpath := C.CString(path) fail := C.LLVMCreateMemoryBufferWithContentsOfFile(cpath, &b.C, &cmsg) if fail != 0 { b.C = nil err = errors.New(C.GoString(cmsg)) C.LLVMDisposeMessage(cmsg) } else { err = nil } C.free(unsafe.Pointer(cpath)) return } func NewMemoryBufferFromStdin() (b MemoryBuffer, err error) { var cmsg *C.char fail := C.LLVMCreateMemoryBufferWithSTDIN(&b.C, &cmsg) if fail != 0 { b.C = nil err = errors.New(C.GoString(cmsg)) C.LLVMDisposeMessage(cmsg) } else { err = nil } return } func (b MemoryBuffer) Dispose() { C.LLVMDisposeMemoryBuffer(b.C) } //------------------------------------------------------------------------- // llvm.PassManager //------------------------------------------------------------------------- // Constructs a new whole-module pass pipeline. This type of pipeline is // suitable for link-time optimization and whole-module transformations. // See llvm::PassManager::PassManager. func NewPassManager() (pm PassManager) { pm.C = C.LLVMCreatePassManager(); return } // Constructs a new function-by-function pass pipeline over the module // provider. It does not take ownership of the module provider. This type of // pipeline is suitable for code generation and JIT compilation tasks. // See llvm::FunctionPassManager::FunctionPassManager. func NewFunctionPassManagerForModule(m Module) (pm PassManager) { pm.C = C.LLVMCreateFunctionPassManagerForModule(m.C) return } // Deprecated: Use LLVMCreateFunctionPassManagerForModule instead. //LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef MP); //XXX: don't port this // Initializes, executes on the provided module, and finalizes all of the // passes scheduled in the pass manager. Returns 1 if any of the passes // modified the module, 0 otherwise. See llvm::PassManager::run(Module&). func (pm PassManager) Run(m Module) bool { return C.LLVMRunPassManager(pm.C, m.C) != 0 } // Initializes all of the function passes scheduled in the function pass // manager. Returns 1 if any of the passes modified the module, 0 otherwise. // See llvm::FunctionPassManager::doInitialization. func (pm PassManager) InitializeFunc() bool { return C.LLVMInitializeFunctionPassManager(pm.C) != 0 } // Executes all of the function passes scheduled in the function pass manager // on the provided function. Returns 1 if any of the passes modified the // function, false otherwise. // See llvm::FunctionPassManager::run(Function&). func (pm PassManager) RunFunc(f Value) bool { return C.LLVMRunFunctionPassManager(pm.C, f.C) != 0 } // Finalizes all of the function passes scheduled in in the function pass // manager. Returns 1 if any of the passes modified the module, 0 otherwise. // See llvm::FunctionPassManager::doFinalization. func (pm PassManager) FinalizeFunc() bool { return C.LLVMFinalizeFunctionPassManager(pm.C) != 0 } // Frees the memory of a pass pipeline. For function pipelines, does not free // the module provider. // See llvm::PassManagerBase::~PassManagerBase. func (pm PassManager) Dispose() { C.LLVMDisposePassManager(pm.C) } // vim: set ft=go:
package orm import ( "context" "database/sql" "fmt" "os" "github.com/inconshreveable/log15" "github.com/pkg/errors" "github.com/rbastic/dyndao/object" "github.com/rbastic/dyndao/schema" ) // NOTE: For foreign key filling, we do not check to see if there are conflicts // with regards to the uniqueness of primary key names. func pkQueryValsFromKV(obj *object.Object, sch *schema.Schema, parentTableName string) (map[string]interface{}, error) { qv := make(map[string]interface{}) schemaTable := sch.GetTable(parentTableName) if schemaTable == nil { return nil, fmt.Errorf("pkQueryValsFromKV: no schema table for table named %s", parentTableName) } schemaPrimary := schemaTable.Primary for fName, field := range schemaTable.Fields { if field.IsIdentity || field.IsForeignKey || field.Name == schemaPrimary { qv[fName] = obj.Get(fName) } } return qv, nil } func (o ORM) recurseAndSave(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { rowsAff, err := o.SaveObject(ctx, tx, obj) if err != nil { return 0, err } table := o.s.GetTable(obj.Type) pkVal := obj.Get(table.Primary) for _, v := range obj.Children { for _, childObj := range v { // set the primary key in the child object, if it exists in the child object's table childTable, ok := o.s.Tables[childObj.Type] if !ok { return 0, fmt.Errorf("recurseAndSave: Unknown child object type %s for parent type %s", childObj.Type, obj.Type) } // TODO: support propagation of additional primary keys that are // saved from previous recursive saves // ... check if the child schema table contains // the parent's primary key field as a name _, ok = childTable.Fields[table.Primary] if ok { // set in the child object if the table contains the primary childObj.Set(table.Primary, pkVal) } aff, err := o.recurseAndSave(ctx, tx, childObj) if err != nil { return rowsAff + aff, err } } } return rowsAff, err } // SaveAllInsideTx will attempt to save an entire nested object structure inside of a single transaction. func (o ORM) SaveAllInsideTx(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { // TODO: Review this code for how it uses transactions / rollbacks. rowsAff, err := o.recurseAndSave(ctx, tx, obj) if err != nil { err2 := tx.Rollback() if err2 != nil { // TODO: Not sure if this wrap is right. return 0, errors.Wrap(err, err2.Error()) } return 0, err } return rowsAff, nil } // SaveAll will attempt to save an entire nested object structure inside of a single transaction. // It begins the transaction, attempts to recursively save the object and all of it's children, // and any of the children's children, and then will finally rollback/commit as necessary. func (o ORM) SaveAll(ctx context.Context, obj *object.Object) (int64, error) { tx, err := o.RawConn.BeginTx(ctx, nil) if err != nil { return 0, err } rowsAff, err := o.SaveAllInsideTx(ctx, tx, obj) if err != nil { rollErr := tx.Rollback() if rollErr != nil { // TODO: Not sure if this wrap is right. return 0, errors.Wrap(err, rollErr.Error()) } return 0, err } err = tx.Commit() if err != nil { rollErr := tx.Rollback() if rollErr != nil { // TODO: Not sure if this wrap is right. return 0, errors.Wrap(err, rollErr.Error()) } return 0, err } return rowsAff, nil } // SaveObjectButErrorIfInsert function will UPDATE a record and error if it // appears that an INSERT should have been performed. This could be necessary in // situations where an INSERT would compromise the integrity of the data. If // given a transaction, it will use that to attempt to insert the data. func (o ORM) SaveObjectButErrorIfInsert(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { objTable := o.s.GetTable(obj.Type) if objTable == nil { return 0, errors.New("SaveObjectButErrorIfInsert: unknown object table " + obj.Type) } if obj.GetSaved() { return 0, nil } fieldMap := objTable.Fields pk := objTable.Primary if pk == "" { return 0, errors.New("SaveObjectButErrorIfInsert: empty primary key for " + obj.Type) } f := fieldMap[pk] if f == nil { return 0, errors.New("SaveObjectButErrorIfInsert: empty field " + pk + " for " + obj.Type) } // Check the primary key to see if we should insert or update _, ok := obj.KV[f.Name] if !ok { return 0, fmt.Errorf("SaveObjectButErrorIfInsert: Expected to perform Update on obj: %v", obj) } return o.Update(ctx, tx, obj) } // SaveObject function will INSERT or UPDATE a record. It does not attempt to // save any of the children. If given a transaction, it will use that to // attempt to insert the data. func (o ORM) SaveObject(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { objTable := o.s.GetTable(obj.Type) if objTable == nil { return 0, errors.New("SaveObject: unknown object table " + obj.Type) } if obj.GetSaved() { return 0, nil } fieldMap := objTable.Fields pk := objTable.Primary if pk == "" { return 0, errors.New("SaveObject: empty primary key for " + obj.Type) } f := fieldMap[pk] if f == nil { return 0, errors.New("SaveObject: empty field " + pk + " for " + obj.Type) } // Check the primary key to see if we should insert or update _, ok := obj.KV[f.Name] if !ok { return o.Insert(ctx, tx, obj) } return o.Update(ctx, tx, obj) } func stmtFromDbOrTx(ctx context.Context, o ORM, tx *sql.Tx, sqlStr string) (*sql.Stmt, error) { var stmt *sql.Stmt var err error if tx != nil { stmt, err = tx.PrepareContext(ctx, sqlStr) } else { stmt, err = o.RawConn.PrepareContext(ctx, sqlStr) } return stmt, err } // Insert function will INSERT a record, given an optional transaction and an object. // It returns the number of rows affected (int64) and any error that may have occurred. func (o ORM) Insert(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { objTable := o.s.GetTable(obj.Type) if objTable == nil { if os.Getenv("DEBUG_INSERT") != "" { log15.Info("orm/save error", "error", "thing was unknown") } return 0, errors.New("Insert: unknown object table " + obj.Type) } sqlStr, bindArgs, err := o.sqlGen.BindingInsert(o.s, obj.Type, obj.KV) if err != nil { if os.Getenv("DEBUG_INSERT") != "" { log15.Info("orm/save error", "error", err) } return 0, err } if os.Getenv("DEBUG_INSERT") != "" { fmt.Println("Insert/sqlStr=", sqlStr, "bindArgs=", bindArgs) } // FIXME: Possible bug in rana ora.v4? I wouldn't have expected that I'd // have to append a parameter like this, based on reading the code. if !o.sqlGen.CallerSuppliesPrimaryKey() { if o.sqlGen.FixLastInsertIDbug() { var lastID int64 bindArgs = append(bindArgs, &lastID) } } stmt, err := stmtFromDbOrTx(ctx, o, tx, sqlStr) if err != nil { if os.Getenv("DEBUG_INSERT") != "" { log15.Info("orm/save error", "error", err) } return 0, err } defer func() { err := stmt.Close() if err != nil { fmt.Println(err) // TODO: logging implementation } }() res, err := stmt.ExecContext(ctx, bindArgs...) if err != nil { if os.Getenv("DEBUG_INSERT") != "" { fmt.Println("orm/save error", err) } return 0, errors.Wrap(err, "Insert/ExecContext") } // If we are not expecting the caller to supply the primary key, // then we should not try to capture the last value (for example, // using LAST_INSERT_ID() with MySQL..) // TODO: Should CallerSuppliesPrimaryKey be per-table? if !o.sqlGen.CallerSuppliesPrimaryKey() { newID, err := res.LastInsertId() if err != nil { if os.Getenv("DEBUG_INSERT") != "" { fmt.Println("orm/save error", err) } return 0, err } if os.Getenv("DEBUG_INSERT") != "" { fmt.Println("DEBUG Insert received newID=", newID) } obj.Set(objTable.Primary, newID) // Set the new primary key in the object } rowsAff, err := res.RowsAffected() if err != nil { if os.Getenv("DEBUG_INSERT") != "" { fmt.Println("orm/save error", err) } return 0, err } obj.SetSaved(true) // Note that the object has been recently saved obj.ResetChangedFields() // Reset the 'changed fields', if any return rowsAff, nil } // Update function will UPDATE a record ... func (o ORM) Update(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { sqlStr, bindArgs, bindWhere, err := o.sqlGen.BindingUpdate(o.s, obj) if err != nil { if os.Getenv("DEBUG_UPDATE") != "" { fmt.Println("Update/sqlStr, err=", err) } return 0, err } if os.Getenv("DEBUG_UPDATE") != "" { fmt.Println("Update/sqlStr=", sqlStr, "bindArgs=", bindArgs, "bindWhere=", bindWhere) } stmt, err := stmtFromDbOrTx(ctx, o, tx, sqlStr) if err != nil { return 0, err } defer func() { err := stmt.Close() if err != nil { fmt.Println(err) // TODO logging implementation } }() allBind := append(bindArgs, bindWhere...) res, err := stmt.ExecContext(ctx, allBind...) if err != nil { return 0, errors.Wrap(err, "Update") } rowsAff, err := res.RowsAffected() if err != nil { return 0, err } obj.SetSaved(true) // Note that the object has been recently saved obj.ResetChangedFields() // Reset the 'changed fields', if any return rowsAff, nil } add a few comments package orm import ( "context" "database/sql" "fmt" "os" "github.com/inconshreveable/log15" "github.com/pkg/errors" "github.com/rbastic/dyndao/object" "github.com/rbastic/dyndao/schema" ) // NOTE: For foreign key filling, we do not check to see if there are conflicts // with regards to the uniqueness of primary key names. func pkQueryValsFromKV(obj *object.Object, sch *schema.Schema, parentTableName string) (map[string]interface{}, error) { qv := make(map[string]interface{}) schemaTable := sch.GetTable(parentTableName) if schemaTable == nil { return nil, fmt.Errorf("pkQueryValsFromKV: no schema table for table named %s", parentTableName) } schemaPrimary := schemaTable.Primary for fName, field := range schemaTable.Fields { if field.IsIdentity || field.IsForeignKey || field.Name == schemaPrimary { qv[fName] = obj.Get(fName) } } return qv, nil } func (o ORM) recurseAndSave(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { rowsAff, err := o.SaveObject(ctx, tx, obj) if err != nil { return 0, err } table := o.s.GetTable(obj.Type) pkVal := obj.Get(table.Primary) for _, v := range obj.Children { for _, childObj := range v { // set the primary key in the child object, if it exists in the child object's table childTable, ok := o.s.Tables[childObj.Type] if !ok { return 0, fmt.Errorf("recurseAndSave: Unknown child object type %s for parent type %s", childObj.Type, obj.Type) } // TODO: support propagation of additional primary keys that are // saved from previous recursive saves // ... check if the child schema table contains // the parent's primary key field as a name _, ok = childTable.Fields[table.Primary] if ok { // set in the child object if the table contains the primary childObj.Set(table.Primary, pkVal) } aff, err := o.recurseAndSave(ctx, tx, childObj) if err != nil { return rowsAff + aff, err } } } return rowsAff, err } // SaveAllInsideTx will attempt to save an entire nested object structure inside of a single transaction. func (o ORM) SaveAllInsideTx(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { // TODO: Review this code for how it uses transactions / rollbacks. rowsAff, err := o.recurseAndSave(ctx, tx, obj) if err != nil { err2 := tx.Rollback() if err2 != nil { // TODO: Not sure if this wrap is right. return 0, errors.Wrap(err, err2.Error()) } return 0, err } return rowsAff, nil } // SaveAll will attempt to save an entire nested object structure inside of a single transaction. // It begins the transaction, attempts to recursively save the object and all of it's children, // and any of the children's children, and then will finally rollback/commit as necessary. func (o ORM) SaveAll(ctx context.Context, obj *object.Object) (int64, error) { tx, err := o.RawConn.BeginTx(ctx, nil) if err != nil { return 0, err } rowsAff, err := o.SaveAllInsideTx(ctx, tx, obj) if err != nil { rollErr := tx.Rollback() if rollErr != nil { // TODO: Not sure if this wrap is right. return 0, errors.Wrap(err, rollErr.Error()) } return 0, err } err = tx.Commit() if err != nil { rollErr := tx.Rollback() if rollErr != nil { // TODO: Not sure if this wrap is right. return 0, errors.Wrap(err, rollErr.Error()) } return 0, err } return rowsAff, nil } // SaveObjectButErrorIfInsert function will UPDATE a record and error if it // appears that an INSERT should have been performed. This could be necessary in // situations where an INSERT would compromise the integrity of the data. If // given a transaction, it will use that to attempt to insert the data. func (o ORM) SaveObjectButErrorIfInsert(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { objTable := o.s.GetTable(obj.Type) // skip if object has invalid type if objTable == nil { return 0, errors.New("SaveObjectButErrorIfInsert: unknown object table " + obj.Type) } // skip objects that are saved if obj.GetSaved() { return 0, nil } // ensure we have a primary key pk := objTable.Primary if pk == "" { return 0, errors.New("SaveObjectButErrorIfInsert: empty primary key for " + obj.Type) } // ensure the primary key has a field config fieldMap := objTable.Fields f := fieldMap[pk] if f == nil { return 0, errors.New("SaveObjectButErrorIfInsert: empty field " + pk + " for " + obj.Type) } // Check the primary key to see if we should insert or update _, ok := obj.KV[f.Name] if !ok { return 0, fmt.Errorf("SaveObjectButErrorIfInsert: Expected to perform Update on obj: %v", obj) } return o.Update(ctx, tx, obj) } // SaveObject function will INSERT or UPDATE a record. It does not attempt to // save any of the children. If given a transaction, it will use that to // attempt to insert the data. func (o ORM) SaveObject(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { objTable := o.s.GetTable(obj.Type) // skip if object has invalid type if objTable == nil { return 0, errors.New("SaveObject: unknown object table " + obj.Type) } // skip if object is saved if obj.GetSaved() { return 0, nil } // retrieve primary key value pk := objTable.Primary if pk == "" { return 0, errors.New("SaveObject: empty primary key for " + obj.Type) } // skip if primary key has no field configuration in table schema fieldMap := objTable.Fields f := fieldMap[pk] if f == nil { return 0, errors.New("SaveObject: empty field " + pk + " for " + obj.Type) } // Check the primary key to see if we should insert or update _, ok := obj.KV[f.Name] if !ok { return o.Insert(ctx, tx, obj) } return o.Update(ctx, tx, obj) } // use transaction if needed, otherwise just execute a non-transactionalized operation func stmtFromDbOrTx(ctx context.Context, o ORM, tx *sql.Tx, sqlStr string) (*sql.Stmt, error) { var stmt *sql.Stmt var err error if tx != nil { stmt, err = tx.PrepareContext(ctx, sqlStr) } else { stmt, err = o.RawConn.PrepareContext(ctx, sqlStr) } return stmt, err } // Insert function will INSERT a record, given an optional transaction and an object. // It returns the number of rows affected (int64) and any error that may have occurred. func (o ORM) Insert(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { objTable := o.s.GetTable(obj.Type) if objTable == nil { if os.Getenv("DEBUG_INSERT") != "" { log15.Info("orm/save error", "error", "thing was unknown") } return 0, errors.New("Insert: unknown object table " + obj.Type) } sqlStr, bindArgs, err := o.sqlGen.BindingInsert(o.s, obj.Type, obj.KV) if err != nil { if os.Getenv("DEBUG_INSERT") != "" { log15.Info("orm/save error", "error", err) } return 0, err } if os.Getenv("DEBUG_INSERT") != "" { fmt.Println("Insert/sqlStr=", sqlStr, "bindArgs=", bindArgs) } // FIXME: Possible bug in rana ora.v4? I wouldn't have expected that I'd // have to append a parameter like this, based on reading the code. if !o.sqlGen.CallerSuppliesPrimaryKey() { if o.sqlGen.FixLastInsertIDbug() { var lastID int64 bindArgs = append(bindArgs, &lastID) } } stmt, err := stmtFromDbOrTx(ctx, o, tx, sqlStr) if err != nil { if os.Getenv("DEBUG_INSERT") != "" { log15.Info("orm/save error", "error", err) } return 0, err } defer func() { err := stmt.Close() if err != nil { fmt.Println(err) // TODO: logging implementation } }() res, err := stmt.ExecContext(ctx, bindArgs...) if err != nil { if os.Getenv("DEBUG_INSERT") != "" { fmt.Println("orm/save error", err) } return 0, errors.Wrap(err, "Insert/ExecContext") } // If we are not expecting the caller to supply the primary key, // then we should not try to capture the last value (for example, // using LAST_INSERT_ID() with MySQL..) // TODO: Should CallerSuppliesPrimaryKey be per-table? if !o.sqlGen.CallerSuppliesPrimaryKey() { newID, err := res.LastInsertId() if err != nil { if os.Getenv("DEBUG_INSERT") != "" { fmt.Println("orm/save error", err) } return 0, err } if os.Getenv("DEBUG_INSERT") != "" { fmt.Println("DEBUG Insert received newID=", newID) } obj.Set(objTable.Primary, newID) // Set the new primary key in the object } rowsAff, err := res.RowsAffected() if err != nil { if os.Getenv("DEBUG_INSERT") != "" { fmt.Println("orm/save error", err) } return 0, err } obj.SetSaved(true) // Note that the object has been recently saved obj.ResetChangedFields() // Reset the 'changed fields', if any return rowsAff, nil } // Update function will UPDATE a record ... func (o ORM) Update(ctx context.Context, tx *sql.Tx, obj *object.Object) (int64, error) { sqlStr, bindArgs, bindWhere, err := o.sqlGen.BindingUpdate(o.s, obj) if err != nil { if os.Getenv("DEBUG_UPDATE") != "" { fmt.Println("Update/sqlStr, err=", err) } return 0, err } if os.Getenv("DEBUG_UPDATE") != "" { fmt.Println("Update/sqlStr=", sqlStr, "bindArgs=", bindArgs, "bindWhere=", bindWhere) } stmt, err := stmtFromDbOrTx(ctx, o, tx, sqlStr) if err != nil { return 0, err } defer func() { err := stmt.Close() if err != nil { fmt.Println(err) // TODO logging implementation } }() allBind := append(bindArgs, bindWhere...) res, err := stmt.ExecContext(ctx, allBind...) if err != nil { return 0, errors.Wrap(err, "Update") } rowsAff, err := res.RowsAffected() if err != nil { return 0, err } obj.SetSaved(true) // Note that the object has been recently saved obj.ResetChangedFields() // Reset the 'changed fields', if any return rowsAff, nil }
// Package pagination is usually used to compute the web pagination. package pagination import "math" // Pagination is a struct used to get a pagination result. type Pagination struct { // The number of the current page Current int // The number of the elements in every page Limit int // The total number of the pages Total int // The number of the first page in the page group. Start int // The number of the last page in the page group. End int // The number of the previous page outside the page group. // It's 0 if there is no previous page. Prev int // The number of the next page outside the page group. // It's 0 if there is no next page. Next int // Whether there is the previous page. HasPrev bool // Whether there is the next page. HasNext bool // The page group, which contains all the produced pages. Pages []Page } // Page stands for a page. type Page struct { // Whether this page is the current page. Active bool // The number of this page. Number int } // NewPagination creates a new pagination. // // total is the total number of all the elements. currentPage is the current // page number, that's, which page the current is. limit is the number of the // elements in every page. number is the total number of the pages to be // produced. // // For example, there are 100 elements in total, and there are 10 elements in // every page. The current is on page one, and we want to produce 5 pages. // // p := NewPagination(100, 1, 10, 5) func NewPagination(total, currentPage, limit, number int) Pagination { p := Pagination{Current: currentPage, Limit: limit} if currentPage < 1 { currentPage = 1 } if limit < 1 { limit = 10 } p.Total = int(math.Ceil(float64(total) / float64(limit))) if p.Total >= 1 { // Compute the Start ant the End page number. if p.Total <= number { p.Start = 1 p.End = p.Total } else { p.Start = currentPage - int(math.Floor(float64(number)/float64(2))) p.End = currentPage + int(math.Floor(float64(number)/float64(2))) if p.Start < 1 { p.End += int(math.Abs(float64(p.Start))) + 1 p.Start = 1 } else if p.End > p.Total { p.Start -= (p.End - number) p.End = number } } // Compute whether there are the previous and the next pages. if p.Start > 1 { p.HasPrev = true p.Prev = p.Start - 1 } if p.End < p.Total { p.HasNext = true p.Next = p.End + 1 } // Compute the number of each page from start to end. for i := p.Start; i <= p.End; i++ { page := Page{Number: i} if i == currentPage { page.Active = true } p.Pages = append(p.Pages, page) } } else { p.Total = 0 } return p } Avoid to calculate repeatedly // Package pagination is usually used to compute the web pagination. package pagination import "math" // Pagination is a struct used to get a pagination result. type Pagination struct { // The number of the current page Current int // The number of the elements in every page Limit int // The total number of the pages Total int // The number of the first page in the page group. Start int // The number of the last page in the page group. End int // The number of the previous page outside the page group. // It's 0 if there is no previous page. Prev int // The number of the next page outside the page group. // It's 0 if there is no next page. Next int // Whether there is the previous page. HasPrev bool // Whether there is the next page. HasNext bool // The page group, which contains all the produced pages. Pages []Page } // Page stands for a page. type Page struct { // Whether this page is the current page. Active bool // The number of this page. Number int } // NewPagination creates a new pagination. // // total is the total number of all the elements. currentPage is the current // page number, that's, which page the current is. limit is the number of the // elements in every page. number is the total number of the pages to be // produced. // // For example, there are 100 elements in total, and there are 10 elements in // every page. The current is on page one, and we want to produce 5 pages. // // p := NewPagination(100, 1, 10, 5) func NewPagination(total, currentPage, limit, number int) Pagination { p := Pagination{Current: currentPage, Limit: limit} if currentPage < 1 { currentPage = 1 } if limit < 1 { limit = 10 } p.Total = int(math.Ceil(float64(total) / float64(limit))) if p.Total >= 1 { // Compute the Start ant the End page number. if p.Total <= number { p.Start = 1 p.End = p.Total } else { half := int(math.Floor(float64(number) / float64(2))) p.Start = currentPage - half p.End = currentPage + half if p.Start < 1 { p.End += int(math.Abs(float64(p.Start))) + 1 p.Start = 1 } else if p.End > p.Total { p.Start -= (p.End - number) p.End = number } } // Compute whether there are the previous and the next pages. if p.Start > 1 { p.HasPrev = true p.Prev = p.Start - 1 } if p.End < p.Total { p.HasNext = true p.Next = p.End + 1 } // Compute the number of each page from start to end. for i := p.Start; i <= p.End; i++ { page := Page{Number: i} if i == currentPage { page.Active = true } p.Pages = append(p.Pages, page) } } else { p.Total = 0 } return p }
package daemon import ( "errors" "fmt" "os" "syscall" ) var ( // ErrWoldBlock indicates on locking pid-file by another process. ErrWouldBlock = errors.New("Daemon: Resource temporarily unavailable") ) // LockFile wraps *os.File and provide functions for locking of files. type LockFile struct { *os.File } // NewLockFile returns a new LockFile with the given File. func NewLockFile(file *os.File) *LockFile { return &LockFile{file} } // CreatePidFile opens the named file, applies exclusive lock and writes // current process id to file. func CreatePidFile(name string, perm os.FileMode) (lock *LockFile, err error) { if lock, err = OpenLockFile(name, perm); err != nil { return } if err = lock.Lock(); err != nil { lock.Remove() return } if err = lock.WritePid(); err != nil { lock.Remove() } return } // OpenLockFile opens the named file with flags os.O_RDWR|os.O_CREATE and specified perm. // If successful, function returns LockFile for opened file. func OpenLockFile(name string, perm os.FileMode) (lock *LockFile, err error) { var file *os.File if file, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE, perm); err == nil { lock = &LockFile{file} } return } // Lock apply exclusive lock on an open file. If file already locked, returns error. func (file *LockFile) Lock() error { return lockFile(file.Fd()) } // Unlock remove exclusive lock on an open file. func (file *LockFile) Unlock() error { return unlockFile(file.Fd()) } // ReadPidFile reads process id from file with give name and returns pid. // If unable read from a file, returns error. func ReadPidFile(name string) (pid int, err error) { var file *os.File if file, err = os.OpenFile(name, os.O_RDONLY, 0640); err != nil { return } defer file.Close() lock := &LockFile{file} pid, err = lock.ReadPid() return } // WritePid writes current process id to an open file. func (file *LockFile) WritePid() (err error) { if _, err = file.Seek(0, os.SEEK_SET); err != nil { return } var fileLen int if fileLen, err = fmt.Fprint(file, os.Getpid()); err != nil { return } if err = file.Truncate(int64(fileLen)); err != nil { return } err = file.Sync() return } // ReadPid reads process id from file and returns pid. // If unable read from a file, returns error. func (file *LockFile) ReadPid() (pid int, err error) { if _, err = file.Seek(0, os.SEEK_SET); err != nil { return } _, err = fmt.Fscan(file, &pid) return } // Remove removes lock, closes and removes an open file. func (file *LockFile) Remove() error { defer file.Close() if err := file.Unlock(); err != nil { return err } // TODO(yar): keep filename? name, err := GetFdName(file.Fd()) if err != nil { return err } err = syscall.Unlink(name) return err } // GetFdName returns file name for given descriptor. // // BUG(yar): GetFdName returns an error for some *nix platforms when full name length of the file is greater than 0x1000. func GetFdName(fd uintptr) (name string, err error) { return getFdName(fd) } Fix ErrWouldBlock error message package daemon import ( "errors" "fmt" "os" "syscall" ) var ( // ErrWoldBlock indicates on locking pid-file by another process. ErrWouldBlock = errors.New("daemon: Resource temporarily unavailable") ) // LockFile wraps *os.File and provide functions for locking of files. type LockFile struct { *os.File } // NewLockFile returns a new LockFile with the given File. func NewLockFile(file *os.File) *LockFile { return &LockFile{file} } // CreatePidFile opens the named file, applies exclusive lock and writes // current process id to file. func CreatePidFile(name string, perm os.FileMode) (lock *LockFile, err error) { if lock, err = OpenLockFile(name, perm); err != nil { return } if err = lock.Lock(); err != nil { lock.Remove() return } if err = lock.WritePid(); err != nil { lock.Remove() } return } // OpenLockFile opens the named file with flags os.O_RDWR|os.O_CREATE and specified perm. // If successful, function returns LockFile for opened file. func OpenLockFile(name string, perm os.FileMode) (lock *LockFile, err error) { var file *os.File if file, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE, perm); err == nil { lock = &LockFile{file} } return } // Lock apply exclusive lock on an open file. If file already locked, returns error. func (file *LockFile) Lock() error { return lockFile(file.Fd()) } // Unlock remove exclusive lock on an open file. func (file *LockFile) Unlock() error { return unlockFile(file.Fd()) } // ReadPidFile reads process id from file with give name and returns pid. // If unable read from a file, returns error. func ReadPidFile(name string) (pid int, err error) { var file *os.File if file, err = os.OpenFile(name, os.O_RDONLY, 0640); err != nil { return } defer file.Close() lock := &LockFile{file} pid, err = lock.ReadPid() return } // WritePid writes current process id to an open file. func (file *LockFile) WritePid() (err error) { if _, err = file.Seek(0, os.SEEK_SET); err != nil { return } var fileLen int if fileLen, err = fmt.Fprint(file, os.Getpid()); err != nil { return } if err = file.Truncate(int64(fileLen)); err != nil { return } err = file.Sync() return } // ReadPid reads process id from file and returns pid. // If unable read from a file, returns error. func (file *LockFile) ReadPid() (pid int, err error) { if _, err = file.Seek(0, os.SEEK_SET); err != nil { return } _, err = fmt.Fscan(file, &pid) return } // Remove removes lock, closes and removes an open file. func (file *LockFile) Remove() error { defer file.Close() if err := file.Unlock(); err != nil { return err } // TODO(yar): keep filename? name, err := GetFdName(file.Fd()) if err != nil { return err } err = syscall.Unlink(name) return err } // GetFdName returns file name for given descriptor. // // BUG(yar): GetFdName returns an error for some *nix platforms when full name length of the file is greater than 0x1000. func GetFdName(fd uintptr) (name string, err error) { return getFdName(fd) }
// This file definse the logOuter interface and several types of logOuter. // // emptyOuter = logOuter where both Out and Outf are noops // lineOuter = logOuter where a newline is inserted after every call to // Out and Outf // fatalLineOuter = logOuter that logs message with inserted newline then // exits with call to os.EXIT(1) package golog import ( "io" "os" "sync" ) type LogOuter interface { // Output a LogMessage (to a file, to stderr, to a tester, etc). Output // must be safe to call from multiple threads. Output(*LogMessage) } type writerLogOuter struct { lock sync.Mutex io.Writer } func (f *writerLogOuter) Output(m *LogMessage) { f.lock.Lock() defer f.lock.Unlock() // TODO(awreece) Handle short write? // Make sure to insert a newline. f.Write([]byte(formatLogMessage(m, true))) } // Returns a LogOuter wrapping the io.Writer. func NewWriterLogOuter(f io.Writer) LogOuter { return &writerLogOuter{io.Writer: f} } // Returns a LogOuter wrapping the file, or an error if the file cannot be // opened. func NewFileLogOuter(filename string) (LogOuter, os.Error) { if file, err := os.Create(filename); err != nil { return nil, err } else { return NewWriterLogOuter(file), nil } panic("Code never reaches here, this mollifies the compiler.") } // We want to allow an abitrary testing framework. type TestController interface { // We will assume that testers insert newlines in manner similar to // the FEATURE of testing.T where it inserts extra newlines. >.< Log(...interface{}) FailNow() } type testLogOuter struct { TestController } func (t *testLogOuter) Output(m *LogMessage) { // Don't insert an additional log message since the tester inserts them // for us. t.Log(formatLogMessage(m, false)) } // Return a LogOuter wrapping the TestControlller. func NewTestLogOuter(t TestController) LogOuter { return &testLogOuter{t} } Implement UDP logger // This file definse the logOuter interface and several types of logOuter. // // emptyOuter = logOuter where both Out and Outf are noops // lineOuter = logOuter where a newline is inserted after every call to // Out and Outf // fatalLineOuter = logOuter that logs message with inserted newline then // exits with call to os.EXIT(1) package golog import ( "io" "json" "net" "os" "sync" ) type LogOuter interface { // Output a LogMessage (to a file, to stderr, to a tester, etc). Output // must be safe to call from multiple threads. Output(*LogMessage) } type writerLogOuter struct { lock sync.Mutex io.Writer } func (f *writerLogOuter) Output(m *LogMessage) { f.lock.Lock() defer f.lock.Unlock() // TODO(awreece) Handle short write? // Make sure to insert a newline. f.Write([]byte(formatLogMessage(m, true))) } // Returns a LogOuter wrapping the io.Writer. func NewWriterLogOuter(f io.Writer) LogOuter { return &writerLogOuter{io.Writer: f} } // Returns a LogOuter wrapping the file, or an error if the file cannot be // opened. func NewFileLogOuter(filename string) (LogOuter, os.Error) { if file, err := os.Create(filename); err != nil { return nil, err } else { return NewWriterLogOuter(file), nil } panic("Code never reaches here, this mollifies the compiler.") } // We want to allow an abitrary testing framework. type TestController interface { // We will assume that testers insert newlines in manner similar to // the FEATURE of testing.T where it inserts extra newlines. >.< Log(...interface{}) FailNow() } type testLogOuter struct { TestController } func (t *testLogOuter) Output(m *LogMessage) { // Don't insert an additional log message since the tester inserts them // for us. t.Log(formatLogMessage(m, false)) } // Return a LogOuter wrapping the TestControlller. func NewTestLogOuter(t TestController) LogOuter { return &testLogOuter{t} } type udpLogOuter struct { conn net.PacketConn raddr net.Addr } // Returns a LogOuter that forwards LogMessages in json format to UDP network // address. TODO(awreece): Use protobuf? func NewUDPLogOuter(raddr string) (LogOuter, os.Error) { var addr *net.UDPAddr var err os.Error var conn net.PacketConn if addr, err = net.ResolveUDPAddr("udp", raddr); err != nil { return nil, err } if conn, err = net.DialUDP("udp", nil, addr); err != nil { return nil, err } return &udpLogOuter{conn, addr}, nil } func (o *udpLogOuter) Output(m *LogMessage) { if bytes, err := json.Marshal(m); err == nil { // TODO Handle error? o.conn.WriteTo(bytes, o.raddr) } }
/* Copyright 2021 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package logr import ( "context" "testing" ) // testLogSink is a Logger just for testing that does nothing. type testLogSink struct{} func (l *testLogSink) Init(RuntimeInfo) { } func (l *testLogSink) Enabled(int) bool { return false } func (l *testLogSink) Info(level int, msg string, keysAndValues ...interface{}) { } func (l *testLogSink) Error(err error, msg string, keysAndValues ...interface{}) { } func (l *testLogSink) WithValues(keysAndValues ...interface{}) LogSink { return l } func (l *testLogSink) WithName(name string) LogSink { return l } // Verify that it actually implements the interface var _ LogSink = &testLogSink{} func TestContext(t *testing.T) { ctx := context.TODO() if out, err := FromContext(ctx); err == nil { t.Errorf("expected error, got %#v", out) } out := FromContextOrDiscard(ctx) if _, ok := out.sink.(discardLogger); !ok { t.Errorf("expected a discardLogger, got %#v", out) } sink := &testLogSink{} logger := New(sink) lctx := NewContext(ctx, logger) if out, err := FromContext(lctx); err != nil { t.Errorf("unexpected error: %v", err) } else if p := out.sink.(*testLogSink); p != sink { t.Errorf("expected output to be the same as input: got in=%p, out=%p", sink, p) } out = FromContextOrDiscard(lctx) if p := out.sink.(*testLogSink); p != sink { t.Errorf("expected output to be the same as input: got in=%p, out=%p", sink, p) } } // testCallDepthLogSink is a Logger just for testing that does nothing. type testCallDepthLogSink struct { *testLogSink depth int } func (l *testCallDepthLogSink) WithCallDepth(depth int) LogSink { return &testCallDepthLogSink{l.testLogSink, l.depth + depth} } // Verify that it actually implements the interface var _ CallDepthLogSink = &testCallDepthLogSink{} func TestWithCallDepth(t *testing.T) { // Test an impl that does not support it. t.Run("not supported", func(t *testing.T) { in := &testLogSink{} l := New(in) out := l.WithCallDepth(42) if p := out.sink.(*testLogSink); p != in { t.Errorf("expected output to be the same as input: got in=%p, out=%p", in, p) } }) // Test an impl that does support it. t.Run("supported", func(t *testing.T) { in := &testCallDepthLogSink{&testLogSink{}, 0} l := New(in) out := l.WithCallDepth(42) if out.sink.(*testCallDepthLogSink) == in { t.Errorf("expected output to be different than input: got in=out=%p", in) } if cdl := out.sink.(*testCallDepthLogSink); cdl.depth != 42 { t.Errorf("expected depth=42, got %d", cdl.depth) } }) } Add test for notFoundError /* Copyright 2021 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package logr import ( "context" "testing" ) // testLogSink is a Logger just for testing that does nothing. type testLogSink struct{} func (l *testLogSink) Init(RuntimeInfo) { } func (l *testLogSink) Enabled(int) bool { return false } func (l *testLogSink) Info(level int, msg string, keysAndValues ...interface{}) { } func (l *testLogSink) Error(err error, msg string, keysAndValues ...interface{}) { } func (l *testLogSink) WithValues(keysAndValues ...interface{}) LogSink { return l } func (l *testLogSink) WithName(name string) LogSink { return l } // Verify that it actually implements the interface var _ LogSink = &testLogSink{} func TestContext(t *testing.T) { ctx := context.TODO() if out, err := FromContext(ctx); err == nil { t.Errorf("expected error, got %#v", out) } else if _, ok := err.(notFoundError); !ok { t.Errorf("expected a notFoundError, got %#v", err) } out := FromContextOrDiscard(ctx) if _, ok := out.sink.(discardLogger); !ok { t.Errorf("expected a discardLogger, got %#v", out) } sink := &testLogSink{} logger := New(sink) lctx := NewContext(ctx, logger) if out, err := FromContext(lctx); err != nil { t.Errorf("unexpected error: %v", err) } else if p := out.sink.(*testLogSink); p != sink { t.Errorf("expected output to be the same as input: got in=%p, out=%p", sink, p) } out = FromContextOrDiscard(lctx) if p := out.sink.(*testLogSink); p != sink { t.Errorf("expected output to be the same as input: got in=%p, out=%p", sink, p) } } // testCallDepthLogSink is a Logger just for testing that does nothing. type testCallDepthLogSink struct { *testLogSink depth int } func (l *testCallDepthLogSink) WithCallDepth(depth int) LogSink { return &testCallDepthLogSink{l.testLogSink, l.depth + depth} } // Verify that it actually implements the interface var _ CallDepthLogSink = &testCallDepthLogSink{} func TestWithCallDepth(t *testing.T) { // Test an impl that does not support it. t.Run("not supported", func(t *testing.T) { in := &testLogSink{} l := New(in) out := l.WithCallDepth(42) if p := out.sink.(*testLogSink); p != in { t.Errorf("expected output to be the same as input: got in=%p, out=%p", in, p) } }) // Test an impl that does support it. t.Run("supported", func(t *testing.T) { in := &testCallDepthLogSink{&testLogSink{}, 0} l := New(in) out := l.WithCallDepth(42) if out.sink.(*testCallDepthLogSink) == in { t.Errorf("expected output to be different than input: got in=out=%p", in) } if cdl := out.sink.(*testCallDepthLogSink); cdl.depth != 42 { t.Errorf("expected depth=42, got %d", cdl.depth) } }) }
package main import ( "fmt" "os" "path/filepath" "code.cloudfoundry.org/lager" "github.com/concourse/baggageclaim/baggageclaimcmd" concourseWorker "github.com/concourse/concourse/worker" "github.com/concourse/concourse/worker/beacon" workerConfig "github.com/concourse/concourse/worker/start" "github.com/concourse/concourse/worker/sweeper" "github.com/concourse/concourse/worker/tsa" "github.com/concourse/flag" "github.com/tedsuo/ifrit" "github.com/tedsuo/ifrit/grouper" "github.com/tedsuo/ifrit/sigmon" ) type WorkerCommand struct { Worker workerConfig.Config TSA tsa.Config `group:"TSA Configuration" namespace:"tsa"` Certs Certs WorkDir flag.Dir `long:"work-dir" required:"true" description:"Directory in which to place container data."` BindIP flag.IP `long:"bind-ip" default:"127.0.0.1" description:"IP address on which to listen for the Garden server."` BindPort uint16 `long:"bind-port" default:"7777" description:"Port on which to listen for the Garden server."` PeerIP flag.IP `long:"peer-ip" description:"IP used to reach this worker from the ATC nodes."` Garden GardenBackend `group:"Garden Configuration" namespace:"garden"` Baggageclaim baggageclaimcmd.BaggageclaimCommand `group:"Baggageclaim Configuration" namespace:"baggageclaim"` ResourceTypes flag.Dir `long:"resource-types" description:"Path to directory containing resource types the worker should advertise."` Logger flag.Lager } func (cmd *WorkerCommand) Execute(args []string) error { runner, err := cmd.Runner(args) if err != nil { return err } return <-ifrit.Invoke(sigmon.New(runner)).Wait() } func (cmd *WorkerCommand) Runner(args []string) (ifrit.Runner, error) { logger, _ := cmd.Logger.Logger("worker") worker, gardenRunner, err := cmd.gardenRunner(logger.Session("garden")) if err != nil { return nil, err } worker.Version = WorkerVersion baggageclaimRunner, err := cmd.baggageclaimRunner(logger.Session("baggageclaim")) if err != nil { return nil, err } members := grouper.Members{ { Name: "garden", Runner: gardenRunner, }, { Name: "baggageclaim", Runner: baggageclaimRunner, }, } if cmd.TSA.WorkerPrivateKey != nil { beaconConfig := beacon.Config{ TSAConfig: cmd.TSA, } if cmd.PeerIP.IP != nil { worker.GardenAddr = fmt.Sprintf("%s:%d", cmd.PeerIP.IP, cmd.BindPort) worker.BaggageclaimURL = fmt.Sprintf("http://%s:%d", cmd.PeerIP.IP, cmd.Baggageclaim.BindPort) beaconConfig.RegistrationMode = "direct" } else { beaconConfig.RegistrationMode = "forward" beaconConfig.GardenForwardAddr = fmt.Sprintf("%s:%d", cmd.BindIP.IP, cmd.BindPort) beaconConfig.BaggageclaimForwardAddr = fmt.Sprintf("%s:%d", cmd.Baggageclaim.BindIP.IP, cmd.Baggageclaim.BindPort) worker.GardenAddr = beaconConfig.GardenForwardAddr worker.BaggageclaimURL = fmt.Sprintf("http://%s", beaconConfig.BaggageclaimForwardAddr) } members = append(members, grouper.Member{ Name: "beacon", Runner: concourseWorker.BeaconRunner( logger.Session("beacon"), worker, beaconConfig, ), }) members = append(members, grouper.Member{ Name: "sweeper", Runner: sweeper.NewSweeperRunner( logger, worker, beaconConfig, ), }) } return grouper.NewParallel(os.Interrupt, members), nil } func (cmd *WorkerCommand) workerName() (string, error) { if cmd.Worker.Name != "" { return cmd.Worker.Name, nil } return os.Hostname() } func (cmd *WorkerCommand) baggageclaimRunner(logger lager.Logger) (ifrit.Runner, error) { volumesDir := filepath.Join(cmd.WorkDir.Path(), "volumes") err := os.MkdirAll(volumesDir, 0755) if err != nil { return nil, err } cmd.Baggageclaim.VolumesDir = flag.Dir(volumesDir) cmd.Baggageclaim.OverlaysDir = filepath.Join(cmd.WorkDir.Path(), "overlays") return cmd.Baggageclaim.Runner(nil) } bin: fix beacon config field assignment package main import ( "fmt" "os" "path/filepath" "code.cloudfoundry.org/lager" "github.com/concourse/baggageclaim/baggageclaimcmd" concourseWorker "github.com/concourse/concourse/worker" "github.com/concourse/concourse/worker/beacon" workerConfig "github.com/concourse/concourse/worker/start" "github.com/concourse/concourse/worker/sweeper" "github.com/concourse/concourse/worker/tsa" "github.com/concourse/flag" "github.com/tedsuo/ifrit" "github.com/tedsuo/ifrit/grouper" "github.com/tedsuo/ifrit/sigmon" ) type WorkerCommand struct { Worker workerConfig.Config TSA tsa.Config `group:"TSA Configuration" namespace:"tsa"` Certs Certs WorkDir flag.Dir `long:"work-dir" required:"true" description:"Directory in which to place container data."` BindIP flag.IP `long:"bind-ip" default:"127.0.0.1" description:"IP address on which to listen for the Garden server."` BindPort uint16 `long:"bind-port" default:"7777" description:"Port on which to listen for the Garden server."` PeerIP flag.IP `long:"peer-ip" description:"IP used to reach this worker from the ATC nodes."` Garden GardenBackend `group:"Garden Configuration" namespace:"garden"` Baggageclaim baggageclaimcmd.BaggageclaimCommand `group:"Baggageclaim Configuration" namespace:"baggageclaim"` ResourceTypes flag.Dir `long:"resource-types" description:"Path to directory containing resource types the worker should advertise."` Logger flag.Lager } func (cmd *WorkerCommand) Execute(args []string) error { runner, err := cmd.Runner(args) if err != nil { return err } return <-ifrit.Invoke(sigmon.New(runner)).Wait() } func (cmd *WorkerCommand) Runner(args []string) (ifrit.Runner, error) { logger, _ := cmd.Logger.Logger("worker") worker, gardenRunner, err := cmd.gardenRunner(logger.Session("garden")) if err != nil { return nil, err } worker.Version = WorkerVersion baggageclaimRunner, err := cmd.baggageclaimRunner(logger.Session("baggageclaim")) if err != nil { return nil, err } members := grouper.Members{ { Name: "garden", Runner: gardenRunner, }, { Name: "baggageclaim", Runner: baggageclaimRunner, }, } if cmd.TSA.WorkerPrivateKey != nil { beaconConfig := beacon.Config{ TSAConfig: cmd.TSA, } if cmd.PeerIP.IP != nil { worker.GardenAddr = fmt.Sprintf("%s:%d", cmd.PeerIP.IP, cmd.BindPort) worker.BaggageclaimURL = fmt.Sprintf("http://%s:%d", cmd.PeerIP.IP, cmd.Baggageclaim.BindPort) beaconConfig.Registration.Mode = "direct" } else { beaconConfig.Registration.Mode = "forward" beaconConfig.GardenForwardAddr = fmt.Sprintf("%s:%d", cmd.BindIP.IP, cmd.BindPort) beaconConfig.BaggageclaimForwardAddr = fmt.Sprintf("%s:%d", cmd.Baggageclaim.BindIP.IP, cmd.Baggageclaim.BindPort) worker.GardenAddr = beaconConfig.GardenForwardAddr worker.BaggageclaimURL = fmt.Sprintf("http://%s", beaconConfig.BaggageclaimForwardAddr) } members = append(members, grouper.Member{ Name: "beacon", Runner: concourseWorker.BeaconRunner( logger.Session("beacon"), worker, beaconConfig, ), }) members = append(members, grouper.Member{ Name: "sweeper", Runner: sweeper.NewSweeperRunner( logger, worker, beaconConfig, ), }) } return grouper.NewParallel(os.Interrupt, members), nil } func (cmd *WorkerCommand) workerName() (string, error) { if cmd.Worker.Name != "" { return cmd.Worker.Name, nil } return os.Hostname() } func (cmd *WorkerCommand) baggageclaimRunner(logger lager.Logger) (ifrit.Runner, error) { volumesDir := filepath.Join(cmd.WorkDir.Path(), "volumes") err := os.MkdirAll(volumesDir, 0755) if err != nil { return nil, err } cmd.Baggageclaim.VolumesDir = flag.Dir(volumesDir) cmd.Baggageclaim.OverlaysDir = filepath.Join(cmd.WorkDir.Path(), "overlays") return cmd.Baggageclaim.Runner(nil) }
package gcng import ( "errors" "time" "code.cloudfoundry.org/garden" "code.cloudfoundry.org/garden/client" "code.cloudfoundry.org/garden/client/connection" "code.cloudfoundry.org/lager" "github.com/concourse/atc/dbng" ) const HijackedContainerTimeout = 5 * time.Minute //go:generate counterfeiter . containerFactory type containerFactory interface { MarkContainersForDeletion() error FindContainersMarkedForDeletion() ([]dbng.DestroyingContainer, error) FindHijackedContainersForDeletion() ([]dbng.CreatedContainer, error) } type containerCollector struct { logger lager.Logger containerFactory containerFactory workerProvider dbng.WorkerFactory gardenClientFactory GardenClientFactory } func NewContainerCollector( logger lager.Logger, containerFactory containerFactory, workerProvider dbng.WorkerFactory, gardenClientFactory GardenClientFactory, ) Collector { return &containerCollector{ logger: logger, containerFactory: containerFactory, workerProvider: workerProvider, gardenClientFactory: gardenClientFactory, } } type GardenClientFactory func(*dbng.Worker) (garden.Client, error) func NewGardenClientFactory() GardenClientFactory { return func(w *dbng.Worker) (garden.Client, error) { if w.GardenAddr == nil { return nil, errors.New("worker-does-not-have-garden-address") } gconn := connection.New("tcp", *w.GardenAddr) return client.New(gconn), nil } } func (c *containerCollector) Run() error { workers, err := c.workerProvider.Workers() if err != nil { c.logger.Error("failed-to-get-workers", err) return err } workersByName := map[string]*dbng.Worker{} for _, w := range workers { workersByName[w.Name] = w } err = c.markHijackedContainersAsDestroying(workersByName) if err != nil { return err } err = c.containerFactory.MarkContainersForDeletion() if err != nil { c.logger.Error("marking-build-containers-for-deletion", err) } containersToDelete, err := c.findContainersToDelete() if err != nil { return err } for _, container := range containersToDelete { c.tryToDestroyContainer(container, workersByName) } c.logger.Debug("completed-deleting-containers") return nil } func (c *containerCollector) markHijackedContainersAsDestroying(workersByName map[string]*dbng.Worker) error { hijackedContainersForDeletion, err := c.containerFactory.FindHijackedContainersForDeletion() if err != nil { c.logger.Error("failed-to-get-hijacked-containers-for-deletion", err) return err } for _, hijackedContainer := range hijackedContainersForDeletion { w, found := workersByName[hijackedContainer.WorkerName()] if !found { c.logger.Info("worker-not-found", lager.Data{ "worker-name": hijackedContainer.WorkerName(), }) continue } gclient, err := c.gardenClientFactory(w) if err != nil { c.logger.Error("failed-to-get-garden-client-for-worker", err, lager.Data{ "worker-name": hijackedContainer.WorkerName(), }) continue } gardenContainer, err := gclient.Lookup(hijackedContainer.Handle()) if err != nil { if _, ok := err.(garden.ContainerNotFoundError); ok { c.logger.Debug("hijacked-container-not-found-in-garden", lager.Data{ "worker-name": hijackedContainer.WorkerName(), "handle": hijackedContainer.Handle(), }) _, err = hijackedContainer.Destroying() if err != nil { c.logger.Error("failed-to-mark-container-as-destroying", err, lager.Data{ "worker-name": hijackedContainer.WorkerName(), "handle": hijackedContainer.Handle(), }) continue } } c.logger.Error("failed-to-lookup-garden-container", err, lager.Data{ "worker-name": hijackedContainer.WorkerName(), "handle": hijackedContainer.Handle(), }) continue } else { err = gardenContainer.SetGraceTime(HijackedContainerTimeout) if err != nil { c.logger.Error("failed-to-set-grace-time-on-hijacked-container", err, lager.Data{ "worker-name": hijackedContainer.WorkerName(), "handle": hijackedContainer.Handle(), }) continue } _, err = hijackedContainer.Discontinue() if err != nil { c.logger.Error("failed-to-mark-container-as-destroying", err, lager.Data{ "worker-name": hijackedContainer.WorkerName(), "handle": hijackedContainer.Handle(), }) continue } } } return nil } func (c *containerCollector) findContainersToDelete() ([]dbng.DestroyingContainer, error) { containers, err := c.containerFactory.FindContainersMarkedForDeletion() if err != nil { c.logger.Error("find-containers-for-deletion", err) return nil, err } containerHandles := []string{} for _, container := range containers { containerHandles = append(containerHandles, container.Handle()) } c.logger.Debug("found-containers-for-deletion", lager.Data{ "containers": containerHandles, }) return containers, nil } func (c *containerCollector) tryToDestroyContainer(container dbng.DestroyingContainer, workersByName map[string]*dbng.Worker) { w, found := workersByName[container.WorkerName()] if !found { c.logger.Info("worker-not-found", lager.Data{ "worker-name": container.WorkerName(), }) return } gclient, err := c.gardenClientFactory(w) if err != nil { c.logger.Error("failed-to-get-garden-client-for-worker", err, lager.Data{ "worker-name": container.WorkerName(), }) return } if container.IsDiscontinued() { _, err := gclient.Lookup(container.Handle()) if err != nil { if _, ok := err.(garden.ContainerNotFoundError); ok { c.logger.Debug("discontinued-container-no-longer-present-in-garden", lager.Data{ "handle": container.Handle(), }) } else { c.logger.Error("failed-to-lookup-container-in-garden", err, lager.Data{ "worker-name": container.WorkerName(), }) return } } else { c.logger.Debug("discontinued-container-still-present-in-garden", lager.Data{ "handle": container.Handle(), }) return } } else { err = gclient.Destroy(container.Handle()) if err != nil { if _, ok := err.(garden.ContainerNotFoundError); ok { c.logger.Debug("container-no-longer-present-in-garden", lager.Data{ "handle": container.Handle(), }) } else { c.logger.Error("failed-to-destroy-garden-container", err, lager.Data{ "worker-name": container.WorkerName(), "handle": container.Handle(), }) return } } } ok, err := container.Destroy() if err != nil { c.logger.Error("failed-to-destroy-database-container", err, lager.Data{ "handle": container.Handle(), }) return } if !ok { c.logger.Info("container-provider-container-not-found", lager.Data{ "handle": container.Handle(), }) return } c.logger.Debug("completed-deleting-container", lager.Data{ "handle": container.Handle(), }) } do not hyphenate errors package gcng import ( "errors" "time" "code.cloudfoundry.org/garden" "code.cloudfoundry.org/garden/client" "code.cloudfoundry.org/garden/client/connection" "code.cloudfoundry.org/lager" "github.com/concourse/atc/dbng" ) const HijackedContainerTimeout = 5 * time.Minute //go:generate counterfeiter . containerFactory type containerFactory interface { MarkContainersForDeletion() error FindContainersMarkedForDeletion() ([]dbng.DestroyingContainer, error) FindHijackedContainersForDeletion() ([]dbng.CreatedContainer, error) } type containerCollector struct { logger lager.Logger containerFactory containerFactory workerProvider dbng.WorkerFactory gardenClientFactory GardenClientFactory } func NewContainerCollector( logger lager.Logger, containerFactory containerFactory, workerProvider dbng.WorkerFactory, gardenClientFactory GardenClientFactory, ) Collector { return &containerCollector{ logger: logger, containerFactory: containerFactory, workerProvider: workerProvider, gardenClientFactory: gardenClientFactory, } } type GardenClientFactory func(*dbng.Worker) (garden.Client, error) func NewGardenClientFactory() GardenClientFactory { return func(w *dbng.Worker) (garden.Client, error) { if w.GardenAddr == nil { return nil, errors.New("worker does not have a garden address") } gconn := connection.New("tcp", *w.GardenAddr) return client.New(gconn), nil } } func (c *containerCollector) Run() error { workers, err := c.workerProvider.Workers() if err != nil { c.logger.Error("failed-to-get-workers", err) return err } workersByName := map[string]*dbng.Worker{} for _, w := range workers { workersByName[w.Name] = w } err = c.markHijackedContainersAsDestroying(workersByName) if err != nil { return err } err = c.containerFactory.MarkContainersForDeletion() if err != nil { c.logger.Error("marking-build-containers-for-deletion", err) } containersToDelete, err := c.findContainersToDelete() if err != nil { return err } for _, container := range containersToDelete { c.tryToDestroyContainer(container, workersByName) } c.logger.Debug("completed-deleting-containers") return nil } func (c *containerCollector) markHijackedContainersAsDestroying(workersByName map[string]*dbng.Worker) error { hijackedContainersForDeletion, err := c.containerFactory.FindHijackedContainersForDeletion() if err != nil { c.logger.Error("failed-to-get-hijacked-containers-for-deletion", err) return err } for _, hijackedContainer := range hijackedContainersForDeletion { w, found := workersByName[hijackedContainer.WorkerName()] if !found { c.logger.Info("worker-not-found", lager.Data{ "worker-name": hijackedContainer.WorkerName(), }) continue } gclient, err := c.gardenClientFactory(w) if err != nil { c.logger.Error("failed-to-get-garden-client-for-worker", err, lager.Data{ "worker-name": hijackedContainer.WorkerName(), }) continue } gardenContainer, err := gclient.Lookup(hijackedContainer.Handle()) if err != nil { if _, ok := err.(garden.ContainerNotFoundError); ok { c.logger.Debug("hijacked-container-not-found-in-garden", lager.Data{ "worker-name": hijackedContainer.WorkerName(), "handle": hijackedContainer.Handle(), }) _, err = hijackedContainer.Destroying() if err != nil { c.logger.Error("failed-to-mark-container-as-destroying", err, lager.Data{ "worker-name": hijackedContainer.WorkerName(), "handle": hijackedContainer.Handle(), }) continue } } c.logger.Error("failed-to-lookup-garden-container", err, lager.Data{ "worker-name": hijackedContainer.WorkerName(), "handle": hijackedContainer.Handle(), }) continue } else { err = gardenContainer.SetGraceTime(HijackedContainerTimeout) if err != nil { c.logger.Error("failed-to-set-grace-time-on-hijacked-container", err, lager.Data{ "worker-name": hijackedContainer.WorkerName(), "handle": hijackedContainer.Handle(), }) continue } _, err = hijackedContainer.Discontinue() if err != nil { c.logger.Error("failed-to-mark-container-as-destroying", err, lager.Data{ "worker-name": hijackedContainer.WorkerName(), "handle": hijackedContainer.Handle(), }) continue } } } return nil } func (c *containerCollector) findContainersToDelete() ([]dbng.DestroyingContainer, error) { containers, err := c.containerFactory.FindContainersMarkedForDeletion() if err != nil { c.logger.Error("find-containers-for-deletion", err) return nil, err } containerHandles := []string{} for _, container := range containers { containerHandles = append(containerHandles, container.Handle()) } c.logger.Debug("found-containers-for-deletion", lager.Data{ "containers": containerHandles, }) return containers, nil } func (c *containerCollector) tryToDestroyContainer(container dbng.DestroyingContainer, workersByName map[string]*dbng.Worker) { w, found := workersByName[container.WorkerName()] if !found { c.logger.Info("worker-not-found", lager.Data{ "worker-name": container.WorkerName(), }) return } gclient, err := c.gardenClientFactory(w) if err != nil { c.logger.Error("failed-to-get-garden-client-for-worker", err, lager.Data{ "worker-name": container.WorkerName(), }) return } if container.IsDiscontinued() { _, err := gclient.Lookup(container.Handle()) if err != nil { if _, ok := err.(garden.ContainerNotFoundError); ok { c.logger.Debug("discontinued-container-no-longer-present-in-garden", lager.Data{ "handle": container.Handle(), }) } else { c.logger.Error("failed-to-lookup-container-in-garden", err, lager.Data{ "worker-name": container.WorkerName(), }) return } } else { c.logger.Debug("discontinued-container-still-present-in-garden", lager.Data{ "handle": container.Handle(), }) return } } else { err = gclient.Destroy(container.Handle()) if err != nil { if _, ok := err.(garden.ContainerNotFoundError); ok { c.logger.Debug("container-no-longer-present-in-garden", lager.Data{ "handle": container.Handle(), }) } else { c.logger.Error("failed-to-destroy-garden-container", err, lager.Data{ "worker-name": container.WorkerName(), "handle": container.Handle(), }) return } } } ok, err := container.Destroy() if err != nil { c.logger.Error("failed-to-destroy-database-container", err, lager.Data{ "handle": container.Handle(), }) return } if !ok { c.logger.Info("container-provider-container-not-found", lager.Data{ "handle": container.Handle(), }) return } c.logger.Debug("completed-deleting-container", lager.Data{ "handle": container.Handle(), }) }
package torrent import ( "bufio" "bytes" "errors" "fmt" "io" "math/rand" "net" "sort" "strconv" "strings" "sync/atomic" "time" "github.com/RoaringBitmap/roaring" "github.com/anacrolix/log" "github.com/anacrolix/missinggo/iter" "github.com/anacrolix/missinggo/v2/bitmap" "github.com/anacrolix/multiless" "github.com/anacrolix/chansync" "github.com/anacrolix/torrent/bencode" "github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/mse" pp "github.com/anacrolix/torrent/peer_protocol" request_strategy "github.com/anacrolix/torrent/request-strategy" ) type PeerSource string const ( PeerSourceTracker = "Tr" PeerSourceIncoming = "I" PeerSourceDhtGetPeers = "Hg" // Peers we found by searching a DHT. PeerSourceDhtAnnouncePeer = "Ha" // Peers that were announced to us by a DHT. PeerSourcePex = "X" // The peer was given directly, such as through a magnet link. PeerSourceDirect = "M" ) type peerRequestState struct { data []byte } type PeerRemoteAddr interface { String() string } // Since we have to store all the requests in memory, we can't reasonably exceed what would be // indexable with the memory space available. type ( maxRequests = int requestState = request_strategy.PeerRequestState ) type Peer struct { // First to ensure 64-bit alignment for atomics. See #262. _stats ConnStats t *Torrent peerImpl callbacks *Callbacks outgoing bool Network string RemoteAddr PeerRemoteAddr // True if the connection is operating over MSE obfuscation. headerEncrypted bool cryptoMethod mse.CryptoMethod Discovery PeerSource trusted bool closed chansync.SetOnce // Set true after we've added our ConnStats generated during handshake to // other ConnStat instances as determined when the *Torrent became known. reconciledHandshakeStats bool lastMessageReceived time.Time completedHandshake time.Time lastUsefulChunkReceived time.Time lastChunkSent time.Time // Stuff controlled by the local peer. needRequestUpdate string requestState requestState updateRequestsTimer *time.Timer lastBecameInterested time.Time priorInterest time.Duration lastStartedExpectingToReceiveChunks time.Time cumulativeExpectedToReceiveChunks time.Duration _chunksReceivedWhileExpecting int64 choking bool piecesReceivedSinceLastRequestUpdate maxRequests maxPiecesReceivedBetweenRequestUpdates maxRequests // Chunks that we might reasonably expect to receive from the peer. Due to latency, buffering, // and implementation differences, we may receive chunks that are no longer in the set of // requests actually want. This could use a roaring.BSI if the memory use becomes noticeable. validReceiveChunks map[RequestIndex]int // Indexed by metadata piece, set to true if posted and pending a // response. metadataRequests []bool sentHaves bitmap.Bitmap // Stuff controlled by the remote peer. peerInterested bool peerChoking bool peerRequests map[Request]*peerRequestState PeerPrefersEncryption bool // as indicated by 'e' field in extension handshake PeerListenPort int // The highest possible number of pieces the torrent could have based on // communication with the peer. Generally only useful until we have the // torrent info. peerMinPieces pieceIndex // Pieces we've accepted chunks for from the peer. peerTouchedPieces map[pieceIndex]struct{} peerAllowedFast roaring.Bitmap PeerMaxRequests maxRequests // Maximum pending requests the peer allows. PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber PeerClientName atomic.Value logger log.Logger } // Maintains the state of a BitTorrent-protocol based connection with a peer. type PeerConn struct { Peer // A string that should identify the PeerConn's net.Conn endpoints. The net.Conn could // be wrapping WebRTC, uTP, or TCP etc. Used in writing the conn status for peers. connString string // See BEP 3 etc. PeerID PeerID PeerExtensionBytes pp.PeerExtensionBits // The actual Conn, used for closing, and setting socket options. Do not use methods on this // while holding any mutexes. conn net.Conn // The Reader and Writer for this Conn, with hooks installed for stats, // limiting, deadlines etc. w io.Writer r io.Reader messageWriter peerConnMsgWriter uploadTimer *time.Timer pex pexConnState // The pieces the peer has claimed to have. _peerPieces roaring.Bitmap // The peer has everything. This can occur due to a special message, when // we may not even know the number of pieces in the torrent yet. peerSentHaveAll bool } func (cn *PeerConn) connStatusString() string { return fmt.Sprintf("%+-55q %s %s", cn.PeerID, cn.PeerExtensionBytes, cn.connString) } func (cn *Peer) updateExpectingChunks() { if cn.expectingChunks() { if cn.lastStartedExpectingToReceiveChunks.IsZero() { cn.lastStartedExpectingToReceiveChunks = time.Now() } } else { if !cn.lastStartedExpectingToReceiveChunks.IsZero() { cn.cumulativeExpectedToReceiveChunks += time.Since(cn.lastStartedExpectingToReceiveChunks) cn.lastStartedExpectingToReceiveChunks = time.Time{} } } } func (cn *Peer) expectingChunks() bool { if cn.requestState.Requests.IsEmpty() { return false } if !cn.requestState.Interested { return false } if !cn.peerChoking { return true } haveAllowedFastRequests := false cn.peerAllowedFast.Iterate(func(i uint32) bool { haveAllowedFastRequests = roaringBitmapRangeCardinality( &cn.requestState.Requests, cn.t.pieceRequestIndexOffset(pieceIndex(i)), cn.t.pieceRequestIndexOffset(pieceIndex(i+1)), ) == 0 return !haveAllowedFastRequests }) return haveAllowedFastRequests } func (cn *Peer) remoteChokingPiece(piece pieceIndex) bool { return cn.peerChoking && !cn.peerAllowedFast.Contains(bitmap.BitIndex(piece)) } // Returns true if the connection is over IPv6. func (cn *PeerConn) ipv6() bool { ip := cn.remoteIp() if ip.To4() != nil { return false } return len(ip) == net.IPv6len } // Returns true the if the dialer/initiator has the lower client peer ID. TODO: Find the // specification for this. func (cn *PeerConn) isPreferredDirection() bool { return bytes.Compare(cn.t.cl.peerID[:], cn.PeerID[:]) < 0 == cn.outgoing } // Returns whether the left connection should be preferred over the right one, // considering only their networking properties. If ok is false, we can't // decide. func (l *PeerConn) hasPreferredNetworkOver(r *PeerConn) (left, ok bool) { var ml multiLess ml.NextBool(l.isPreferredDirection(), r.isPreferredDirection()) ml.NextBool(!l.utp(), !r.utp()) ml.NextBool(l.ipv6(), r.ipv6()) return ml.FinalOk() } func (cn *Peer) cumInterest() time.Duration { ret := cn.priorInterest if cn.requestState.Interested { ret += time.Since(cn.lastBecameInterested) } return ret } func (cn *PeerConn) peerHasAllPieces() (all bool, known bool) { if cn.peerSentHaveAll { return true, true } if !cn.t.haveInfo() { return false, false } return cn._peerPieces.GetCardinality() == uint64(cn.t.numPieces()), true } func (cn *Peer) locker() *lockWithDeferreds { return cn.t.cl.locker() } func (cn *Peer) supportsExtension(ext pp.ExtensionName) bool { _, ok := cn.PeerExtensionIDs[ext] return ok } // The best guess at number of pieces in the torrent for this peer. func (cn *Peer) bestPeerNumPieces() pieceIndex { if cn.t.haveInfo() { return cn.t.numPieces() } return cn.peerMinPieces } func (cn *Peer) completedString() string { have := pieceIndex(cn.peerPieces().GetCardinality()) if all, _ := cn.peerHasAllPieces(); all { have = cn.bestPeerNumPieces() } return fmt.Sprintf("%d/%d", have, cn.bestPeerNumPieces()) } func (cn *PeerConn) onGotInfo(info *metainfo.Info) { cn.setNumPieces(info.NumPieces()) } // Correct the PeerPieces slice length. Return false if the existing slice is invalid, such as by // receiving badly sized BITFIELD, or invalid HAVE messages. func (cn *PeerConn) setNumPieces(num pieceIndex) { cn._peerPieces.RemoveRange(bitmap.BitRange(num), bitmap.ToEnd) cn.peerPiecesChanged() } func (cn *PeerConn) peerPieces() *roaring.Bitmap { return &cn._peerPieces } func eventAgeString(t time.Time) string { if t.IsZero() { return "never" } return fmt.Sprintf("%.2fs ago", time.Since(t).Seconds()) } func (cn *PeerConn) connectionFlags() (ret string) { c := func(b byte) { ret += string([]byte{b}) } if cn.cryptoMethod == mse.CryptoMethodRC4 { c('E') } else if cn.headerEncrypted { c('e') } ret += string(cn.Discovery) if cn.utp() { c('U') } return } func (cn *PeerConn) utp() bool { return parseNetworkString(cn.Network).Udp } // Inspired by https://github.com/transmission/transmission/wiki/Peer-Status-Text. func (cn *Peer) statusFlags() (ret string) { c := func(b byte) { ret += string([]byte{b}) } if cn.requestState.Interested { c('i') } if cn.choking { c('c') } c('-') ret += cn.connectionFlags() c('-') if cn.peerInterested { c('i') } if cn.peerChoking { c('c') } return } func (cn *Peer) downloadRate() float64 { num := cn._stats.BytesReadUsefulData.Int64() if num == 0 { return 0 } return float64(num) / cn.totalExpectingTime().Seconds() } func (cn *Peer) numRequestsByPiece() (ret map[pieceIndex]int) { ret = make(map[pieceIndex]int) cn.requestState.Requests.Iterate(func(x uint32) bool { ret[pieceIndex(x/cn.t.chunksPerRegularPiece())]++ return true }) return } func (cn *Peer) writeStatus(w io.Writer, t *Torrent) { // \t isn't preserved in <pre> blocks? if cn.closed.IsSet() { fmt.Fprint(w, "CLOSED: ") } fmt.Fprintln(w, cn.connStatusString()) prio, err := cn.peerPriority() prioStr := fmt.Sprintf("%08x", prio) if err != nil { prioStr += ": " + err.Error() } fmt.Fprintf(w, " bep40-prio: %v\n", prioStr) fmt.Fprintf(w, " last msg: %s, connected: %s, last helpful: %s, itime: %s, etime: %s\n", eventAgeString(cn.lastMessageReceived), eventAgeString(cn.completedHandshake), eventAgeString(cn.lastHelpful()), cn.cumInterest(), cn.totalExpectingTime(), ) fmt.Fprintf(w, " %s completed, %d pieces touched, good chunks: %v/%v:%v reqq: %d+%v/(%d/%d):%d/%d, flags: %s, dr: %.1f KiB/s\n", cn.completedString(), len(cn.peerTouchedPieces), &cn._stats.ChunksReadUseful, &cn._stats.ChunksRead, &cn._stats.ChunksWritten, cn.requestState.Requests.GetCardinality(), cn.requestState.Cancelled.GetCardinality(), cn.nominalMaxRequests(), cn.PeerMaxRequests, len(cn.peerRequests), localClientReqq, cn.statusFlags(), cn.downloadRate()/(1<<10), ) fmt.Fprintf(w, " requested pieces:") type pieceNumRequestsType struct { piece pieceIndex numRequests int } var pieceNumRequests []pieceNumRequestsType for piece, count := range cn.numRequestsByPiece() { pieceNumRequests = append(pieceNumRequests, pieceNumRequestsType{piece, count}) } sort.Slice(pieceNumRequests, func(i, j int) bool { return pieceNumRequests[i].piece < pieceNumRequests[j].piece }) for _, elem := range pieceNumRequests { fmt.Fprintf(w, " %v(%v)", elem.piece, elem.numRequests) } fmt.Fprintf(w, "\n") } func (p *Peer) close() { if !p.closed.Set() { return } if p.updateRequestsTimer != nil { p.updateRequestsTimer.Stop() } p.peerImpl.onClose() if p.t != nil { p.t.decPeerPieceAvailability(p) } for _, f := range p.callbacks.PeerClosed { f(p) } } func (cn *PeerConn) onClose() { if cn.pex.IsEnabled() { cn.pex.Close() } cn.tickleWriter() if cn.conn != nil { go cn.conn.Close() } if cb := cn.callbacks.PeerConnClosed; cb != nil { cb(cn) } } // Peer definitely has a piece, for purposes of requesting. So it's not sufficient that we think // they do (known=true). func (cn *Peer) peerHasPiece(piece pieceIndex) bool { if all, known := cn.peerHasAllPieces(); all && known { return true } return cn.peerPieces().ContainsInt(piece) } // 64KiB, but temporarily less to work around an issue with WebRTC. TODO: Update when // https://github.com/pion/datachannel/issues/59 is fixed. const writeBufferHighWaterLen = 1 << 15 // Writes a message into the write buffer. Returns whether it's okay to keep writing. Writing is // done asynchronously, so it may be that we're not able to honour backpressure from this method. func (cn *PeerConn) write(msg pp.Message) bool { torrent.Add(fmt.Sprintf("messages written of type %s", msg.Type.String()), 1) // We don't need to track bytes here because the connection's Writer has that behaviour injected // (although there's some delay between us buffering the message, and the connection writer // flushing it out.). notFull := cn.messageWriter.write(msg) // Last I checked only Piece messages affect stats, and we don't write those. cn.wroteMsg(&msg) cn.tickleWriter() return notFull } func (cn *PeerConn) requestMetadataPiece(index int) { eID := cn.PeerExtensionIDs[pp.ExtensionNameMetadata] if eID == pp.ExtensionDeleteNumber { return } if index < len(cn.metadataRequests) && cn.metadataRequests[index] { return } cn.logger.WithDefaultLevel(log.Debug).Printf("requesting metadata piece %d", index) cn.write(pp.MetadataExtensionRequestMsg(eID, index)) for index >= len(cn.metadataRequests) { cn.metadataRequests = append(cn.metadataRequests, false) } cn.metadataRequests[index] = true } func (cn *PeerConn) requestedMetadataPiece(index int) bool { return index < len(cn.metadataRequests) && cn.metadataRequests[index] } // The actual value to use as the maximum outbound requests. func (cn *Peer) nominalMaxRequests() (ret maxRequests) { return maxRequests(clamp(1, int64(cn.PeerMaxRequests), 2048)) } func (cn *Peer) totalExpectingTime() (ret time.Duration) { ret = cn.cumulativeExpectedToReceiveChunks if !cn.lastStartedExpectingToReceiveChunks.IsZero() { ret += time.Since(cn.lastStartedExpectingToReceiveChunks) } return } func (cn *PeerConn) onPeerSentCancel(r Request) { if _, ok := cn.peerRequests[r]; !ok { torrent.Add("unexpected cancels received", 1) return } if cn.fastEnabled() { cn.reject(r) } else { delete(cn.peerRequests, r) } } func (cn *PeerConn) choke(msg messageWriter) (more bool) { if cn.choking { return true } cn.choking = true more = msg(pp.Message{ Type: pp.Choke, }) if cn.fastEnabled() { for r := range cn.peerRequests { // TODO: Don't reject pieces in allowed fast set. cn.reject(r) } } else { cn.peerRequests = nil } return } func (cn *PeerConn) unchoke(msg func(pp.Message) bool) bool { if !cn.choking { return true } cn.choking = false return msg(pp.Message{ Type: pp.Unchoke, }) } func (cn *Peer) setInterested(interested bool) bool { if cn.requestState.Interested == interested { return true } cn.requestState.Interested = interested if interested { cn.lastBecameInterested = time.Now() } else if !cn.lastBecameInterested.IsZero() { cn.priorInterest += time.Since(cn.lastBecameInterested) } cn.updateExpectingChunks() // log.Printf("%p: setting interest: %v", cn, interested) return cn.writeInterested(interested) } func (pc *PeerConn) writeInterested(interested bool) bool { return pc.write(pp.Message{ Type: func() pp.MessageType { if interested { return pp.Interested } else { return pp.NotInterested } }(), }) } // The function takes a message to be sent, and returns true if more messages // are okay. type messageWriter func(pp.Message) bool // This function seems to only used by Peer.request. It's all logic checks, so maybe we can no-op it // when we want to go fast. func (cn *Peer) shouldRequest(r RequestIndex) error { pi := pieceIndex(r / cn.t.chunksPerRegularPiece()) if !cn.peerHasPiece(pi) { return errors.New("requesting piece peer doesn't have") } if !cn.t.peerIsActive(cn) { panic("requesting but not in active conns") } if cn.closed.IsSet() { panic("requesting when connection is closed") } if cn.t.hashingPiece(pi) { panic("piece is being hashed") } if cn.t.pieceQueuedForHash(pi) { panic("piece is queued for hash") } if cn.peerChoking && !cn.peerAllowedFast.Contains(bitmap.BitIndex(pi)) { // This could occur if we made a request with the fast extension, and then got choked and // haven't had the request rejected yet. if !cn.requestState.Requests.Contains(r) { panic("peer choking and piece not allowed fast") } } return nil } func (cn *Peer) mustRequest(r RequestIndex) bool { more, err := cn.request(r) if err != nil { panic(err) } return more } func (cn *Peer) request(r RequestIndex) (more bool, err error) { if err := cn.shouldRequest(r); err != nil { panic(err) } if cn.requestState.Requests.Contains(r) { return true, nil } if maxRequests(cn.requestState.Requests.GetCardinality()) >= cn.nominalMaxRequests() { return true, errors.New("too many outstanding requests") } cn.requestState.Requests.Add(r) if cn.validReceiveChunks == nil { cn.validReceiveChunks = make(map[RequestIndex]int) } cn.validReceiveChunks[r]++ cn.t.pendingRequests[r] = cn cn.t.lastRequested[r] = time.Now() cn.updateExpectingChunks() ppReq := cn.t.requestIndexToRequest(r) for _, f := range cn.callbacks.SentRequest { f(PeerRequestEvent{cn, ppReq}) } return cn.peerImpl._request(ppReq), nil } func (me *PeerConn) _request(r Request) bool { return me.write(pp.Message{ Type: pp.Request, Index: r.Index, Begin: r.Begin, Length: r.Length, }) } func (me *Peer) cancel(r RequestIndex) { if !me.deleteRequest(r) { panic("request not existing should have been guarded") } if me._cancel(r) { if !me.requestState.Cancelled.CheckedAdd(r) { panic("request already cancelled") } } if me.isLowOnRequests() { me.updateRequests("Peer.cancel") } } func (me *PeerConn) _cancel(r RequestIndex) bool { me.write(makeCancelMessage(me.t.requestIndexToRequest(r))) // Transmission does not send rejects for received cancels. See // https://github.com/transmission/transmission/pull/2275. return me.fastEnabled() && !me.remoteIsTransmission() } func (cn *PeerConn) fillWriteBuffer() { if !cn.maybeUpdateActualRequestState() { return } if cn.pex.IsEnabled() { if flow := cn.pex.Share(cn.write); !flow { return } } cn.upload(cn.write) } func (cn *PeerConn) have(piece pieceIndex) { if cn.sentHaves.Get(bitmap.BitIndex(piece)) { return } cn.write(pp.Message{ Type: pp.Have, Index: pp.Integer(piece), }) cn.sentHaves.Add(bitmap.BitIndex(piece)) } func (cn *PeerConn) postBitfield() { if cn.sentHaves.Len() != 0 { panic("bitfield must be first have-related message sent") } if !cn.t.haveAnyPieces() { return } cn.write(pp.Message{ Type: pp.Bitfield, Bitfield: cn.t.bitfield(), }) cn.sentHaves = bitmap.Bitmap{cn.t._completedPieces.Clone()} } // Sets a reason to update requests, and if there wasn't already one, handle it. func (cn *Peer) updateRequests(reason string) { if cn.needRequestUpdate != "" { return } cn.needRequestUpdate = reason cn.handleUpdateRequests() } func (cn *PeerConn) handleUpdateRequests() { // The writer determines the request state as needed when it can write. cn.tickleWriter() } // Emits the indices in the Bitmaps bms in order, never repeating any index. // skip is mutated during execution, and its initial values will never be // emitted. func iterBitmapsDistinct(skip *bitmap.Bitmap, bms ...bitmap.Bitmap) iter.Func { return func(cb iter.Callback) { for _, bm := range bms { if !iter.All( func(_i interface{}) bool { i := _i.(int) if skip.Contains(bitmap.BitIndex(i)) { return true } skip.Add(bitmap.BitIndex(i)) return cb(i) }, bm.Iter, ) { return } } } } func (cn *Peer) peerPiecesChanged() { cn.t.maybeDropMutuallyCompletePeer(cn) } func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) { if newMin > cn.peerMinPieces { cn.peerMinPieces = newMin } } func (cn *PeerConn) peerSentHave(piece pieceIndex) error { if cn.t.haveInfo() && piece >= cn.t.numPieces() || piece < 0 { return errors.New("invalid piece") } if cn.peerHasPiece(piece) { return nil } cn.raisePeerMinPieces(piece + 1) if !cn.peerHasPiece(piece) { cn.t.incPieceAvailability(piece) } cn._peerPieces.Add(uint32(piece)) if cn.t.wantPieceIndex(piece) { cn.updateRequests("have") } cn.peerPiecesChanged() return nil } func (cn *PeerConn) peerSentBitfield(bf []bool) error { if len(bf)%8 != 0 { panic("expected bitfield length divisible by 8") } // We know that the last byte means that at most the last 7 bits are wasted. cn.raisePeerMinPieces(pieceIndex(len(bf) - 7)) if cn.t.haveInfo() && len(bf) > int(cn.t.numPieces()) { // Ignore known excess pieces. bf = bf[:cn.t.numPieces()] } pp := cn.newPeerPieces() cn.peerSentHaveAll = false for i, have := range bf { if have { cn.raisePeerMinPieces(pieceIndex(i) + 1) if !pp.Contains(bitmap.BitIndex(i)) { cn.t.incPieceAvailability(i) } } else { if pp.Contains(bitmap.BitIndex(i)) { cn.t.decPieceAvailability(i) } } if have { cn._peerPieces.Add(uint32(i)) if cn.t.wantPieceIndex(i) { cn.updateRequests("bitfield") } } else { cn._peerPieces.Remove(uint32(i)) } } cn.peerPiecesChanged() return nil } func (cn *PeerConn) onPeerHasAllPieces() { t := cn.t if t.haveInfo() { npp, pc := cn.newPeerPieces(), t.numPieces() for i := 0; i < pc; i += 1 { if !npp.Contains(bitmap.BitIndex(i)) { t.incPieceAvailability(i) } } } cn.peerSentHaveAll = true cn._peerPieces.Clear() if !cn.t._pendingPieces.IsEmpty() { cn.updateRequests("Peer.onPeerHasAllPieces") } cn.peerPiecesChanged() } func (cn *PeerConn) onPeerSentHaveAll() error { cn.onPeerHasAllPieces() return nil } func (cn *PeerConn) peerSentHaveNone() error { cn.t.decPeerPieceAvailability(&cn.Peer) cn._peerPieces.Clear() cn.peerSentHaveAll = false cn.peerPiecesChanged() return nil } func (c *PeerConn) requestPendingMetadata() { if c.t.haveInfo() { return } if c.PeerExtensionIDs[pp.ExtensionNameMetadata] == 0 { // Peer doesn't support this. return } // Request metadata pieces that we don't have in a random order. var pending []int for index := 0; index < c.t.metadataPieceCount(); index++ { if !c.t.haveMetadataPiece(index) && !c.requestedMetadataPiece(index) { pending = append(pending, index) } } rand.Shuffle(len(pending), func(i, j int) { pending[i], pending[j] = pending[j], pending[i] }) for _, i := range pending { c.requestMetadataPiece(i) } } func (cn *PeerConn) wroteMsg(msg *pp.Message) { torrent.Add(fmt.Sprintf("messages written of type %s", msg.Type.String()), 1) if msg.Type == pp.Extended { for name, id := range cn.PeerExtensionIDs { if id != msg.ExtendedID { continue } torrent.Add(fmt.Sprintf("Extended messages written for protocol %q", name), 1) } } cn.allStats(func(cs *ConnStats) { cs.wroteMsg(msg) }) } // After handshake, we know what Torrent and Client stats to include for a // connection. func (cn *Peer) postHandshakeStats(f func(*ConnStats)) { t := cn.t f(&t.stats) f(&t.cl.stats) } // All ConnStats that include this connection. Some objects are not known // until the handshake is complete, after which it's expected to reconcile the // differences. func (cn *Peer) allStats(f func(*ConnStats)) { f(&cn._stats) if cn.reconciledHandshakeStats { cn.postHandshakeStats(f) } } func (cn *PeerConn) wroteBytes(n int64) { cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesWritten })) } func (cn *Peer) readBytes(n int64) { cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesRead })) } // Returns whether the connection could be useful to us. We're seeding and // they want data, we don't have metainfo and they can provide it, etc. func (c *Peer) useful() bool { t := c.t if c.closed.IsSet() { return false } if !t.haveInfo() { return c.supportsExtension("ut_metadata") } if t.seeding() && c.peerInterested { return true } if c.peerHasWantedPieces() { return true } return false } func (c *Peer) lastHelpful() (ret time.Time) { ret = c.lastUsefulChunkReceived if c.t.seeding() && c.lastChunkSent.After(ret) { ret = c.lastChunkSent } return } func (c *PeerConn) fastEnabled() bool { return c.PeerExtensionBytes.SupportsFast() && c.t.cl.config.Extensions.SupportsFast() } func (c *PeerConn) reject(r Request) { if !c.fastEnabled() { panic("fast not enabled") } c.write(r.ToMsg(pp.Reject)) delete(c.peerRequests, r) } func (c *PeerConn) onReadRequest(r Request) error { requestedChunkLengths.Add(strconv.FormatUint(r.Length.Uint64(), 10), 1) if _, ok := c.peerRequests[r]; ok { torrent.Add("duplicate requests received", 1) return nil } if c.choking { torrent.Add("requests received while choking", 1) if c.fastEnabled() { torrent.Add("requests rejected while choking", 1) c.reject(r) } return nil } // TODO: What if they've already requested this? if len(c.peerRequests) >= localClientReqq { torrent.Add("requests received while queue full", 1) if c.fastEnabled() { c.reject(r) } // BEP 6 says we may close here if we choose. return nil } if !c.t.havePiece(pieceIndex(r.Index)) { // This isn't necessarily them screwing up. We can drop pieces // from our storage, and can't communicate this to peers // except by reconnecting. requestsReceivedForMissingPieces.Add(1) return fmt.Errorf("peer requested piece we don't have: %v", r.Index.Int()) } // Check this after we know we have the piece, so that the piece length will be known. if r.Begin+r.Length > c.t.pieceLength(pieceIndex(r.Index)) { torrent.Add("bad requests received", 1) return errors.New("bad Request") } if c.peerRequests == nil { c.peerRequests = make(map[Request]*peerRequestState, localClientReqq) } value := &peerRequestState{} c.peerRequests[r] = value go c.peerRequestDataReader(r, value) // c.tickleWriter() return nil } func (c *PeerConn) peerRequestDataReader(r Request, prs *peerRequestState) { b, err := readPeerRequestData(r, c) c.locker().Lock() defer c.locker().Unlock() if err != nil { c.peerRequestDataReadFailed(err, r) } else { if b == nil { panic("data must be non-nil to trigger send") } prs.data = b c.tickleWriter() } } // If this is maintained correctly, we might be able to support optional synchronous reading for // chunk sending, the way it used to work. func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) { c.logger.WithDefaultLevel(log.Warning).Printf("error reading chunk for peer Request %v: %v", r, err) if c.t.closed.IsSet() { return } i := pieceIndex(r.Index) if c.t.pieceComplete(i) { // There used to be more code here that just duplicated the following break. Piece // completions are currently cached, so I'm not sure how helpful this update is, except to // pull any completion changes pushed to the storage backend in failed reads that got us // here. c.t.updatePieceCompletion(i) } // If we failed to send a chunk, choke the peer to ensure they flush all their requests. We've // probably dropped a piece from storage, but there's no way to communicate this to the peer. If // they ask for it again, we'll kick them to allow us to send them an updated bitfield on the // next connect. TODO: Support rejecting here too. if c.choking { c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly") } c.choke(c.write) } func readPeerRequestData(r Request, c *PeerConn) ([]byte, error) { b := make([]byte, r.Length) p := c.t.info.Piece(int(r.Index)) n, err := c.t.readAt(b, p.Offset()+int64(r.Begin)) if n == len(b) { if err == io.EOF { err = nil } } else { if err == nil { panic("expected error") } } return b, err } func runSafeExtraneous(f func()) { if true { go f() } else { f() } } func (c *PeerConn) logProtocolBehaviour(level log.Level, format string, arg ...interface{}) { c.logger.WithLevel(level).WithContextText(fmt.Sprintf( "peer id %q, ext v %q", c.PeerID, c.PeerClientName.Load(), )).SkipCallers(1).Printf(format, arg...) } // Processes incoming BitTorrent wire-protocol messages. The client lock is held upon entry and // exit. Returning will end the connection. func (c *PeerConn) mainReadLoop() (err error) { defer func() { if err != nil { torrent.Add("connection.mainReadLoop returned with error", 1) } else { torrent.Add("connection.mainReadLoop returned with no error", 1) } }() t := c.t cl := t.cl decoder := pp.Decoder{ R: bufio.NewReaderSize(c.r, 1<<17), MaxLength: 256 * 1024, Pool: &t.chunkPool, } for { var msg pp.Message func() { cl.unlock() defer cl.lock() err = decoder.Decode(&msg) }() if cb := c.callbacks.ReadMessage; cb != nil && err == nil { cb(c, &msg) } if t.closed.IsSet() || c.closed.IsSet() { return nil } if err != nil { return err } c.lastMessageReceived = time.Now() if msg.Keepalive { receivedKeepalives.Add(1) continue } messageTypesReceived.Add(msg.Type.String(), 1) if msg.Type.FastExtension() && !c.fastEnabled() { runSafeExtraneous(func() { torrent.Add("fast messages received when extension is disabled", 1) }) return fmt.Errorf("received fast extension message (type=%v) but extension is disabled", msg.Type) } switch msg.Type { case pp.Choke: if c.peerChoking { break } if !c.fastEnabled() { c.deleteAllRequests() } else { // We don't decrement pending requests here, let's wait for the peer to either // reject or satisfy the outstanding requests. Additionally, some peers may unchoke // us and resume where they left off, we don't want to have piled on to those chunks // in the meanwhile. I think a peer's ability to abuse this should be limited: they // could let us request a lot of stuff, then choke us and never reject, but they're // only a single peer, our chunk balancing should smooth over this abuse. } c.peerChoking = true c.updateExpectingChunks() case pp.Unchoke: if !c.peerChoking { // Some clients do this for some reason. Transmission doesn't error on this, so we // won't for consistency. c.logProtocolBehaviour(log.Debug, "received unchoke when already unchoked") break } c.peerChoking = false preservedCount := 0 c.requestState.Requests.Iterate(func(x uint32) bool { if !c.peerAllowedFast.Contains(x / c.t.chunksPerRegularPiece()) { preservedCount++ } return true }) if preservedCount != 0 { // TODO: Yes this is a debug log but I'm not happy with the state of the logging lib // right now. c.logger.WithLevel(log.Debug).Printf( "%v requests were preserved while being choked (fast=%v)", preservedCount, c.fastEnabled()) torrent.Add("requestsPreservedThroughChoking", int64(preservedCount)) } if !c.t._pendingPieces.IsEmpty() { c.updateRequests("unchoked") } c.updateExpectingChunks() case pp.Interested: c.peerInterested = true c.tickleWriter() case pp.NotInterested: c.peerInterested = false // We don't clear their requests since it isn't clear in the spec. // We'll probably choke them for this, which will clear them if // appropriate, and is clearly specified. case pp.Have: err = c.peerSentHave(pieceIndex(msg.Index)) case pp.Bitfield: err = c.peerSentBitfield(msg.Bitfield) case pp.Request: r := newRequestFromMessage(&msg) err = c.onReadRequest(r) case pp.Piece: c.doChunkReadStats(int64(len(msg.Piece))) err = c.receiveChunk(&msg) if len(msg.Piece) == int(t.chunkSize) { t.chunkPool.Put(&msg.Piece) } if err != nil { err = fmt.Errorf("receiving chunk: %w", err) } case pp.Cancel: req := newRequestFromMessage(&msg) c.onPeerSentCancel(req) case pp.Port: ipa, ok := tryIpPortFromNetAddr(c.RemoteAddr) if !ok { break } pingAddr := net.UDPAddr{ IP: ipa.IP, Port: ipa.Port, } if msg.Port != 0 { pingAddr.Port = int(msg.Port) } cl.eachDhtServer(func(s DhtServer) { go s.Ping(&pingAddr) }) case pp.Suggest: torrent.Add("suggests received", 1) log.Fmsg("peer suggested piece %d", msg.Index).AddValues(c, msg.Index).SetLevel(log.Debug).Log(c.t.logger) c.updateRequests("suggested") case pp.HaveAll: err = c.onPeerSentHaveAll() case pp.HaveNone: err = c.peerSentHaveNone() case pp.Reject: req := newRequestFromMessage(&msg) if !c.remoteRejectedRequest(c.t.requestIndexFromRequest(req)) { log.Printf("received invalid reject [request=%v, peer=%v]", req, c) err = fmt.Errorf("received invalid reject [request=%v]", req) } case pp.AllowedFast: torrent.Add("allowed fasts received", 1) log.Fmsg("peer allowed fast: %d", msg.Index).AddValues(c).SetLevel(log.Debug).Log(c.t.logger) c.updateRequests("PeerConn.mainReadLoop allowed fast") case pp.Extended: err = c.onReadExtendedMsg(msg.ExtendedID, msg.ExtendedPayload) default: err = fmt.Errorf("received unknown message type: %#v", msg.Type) } if err != nil { return err } } } // Returns true if it was valid to reject the request. func (c *Peer) remoteRejectedRequest(r RequestIndex) bool { if !c.deleteRequest(r) && !c.requestState.Cancelled.CheckedRemove(r) { return false } if c.isLowOnRequests() { c.updateRequests("Peer.remoteRejectedRequest") } c.decExpectedChunkReceive(r) return true } func (c *Peer) decExpectedChunkReceive(r RequestIndex) { count := c.validReceiveChunks[r] if count == 1 { delete(c.validReceiveChunks, r) } else if count > 1 { c.validReceiveChunks[r] = count - 1 } else { panic(r) } } func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err error) { defer func() { // TODO: Should we still do this? if err != nil { // These clients use their own extension IDs for outgoing message // types, which is incorrect. if bytes.HasPrefix(c.PeerID[:], []byte("-SD0100-")) || strings.HasPrefix(string(c.PeerID[:]), "-XL0012-") { err = nil } } }() t := c.t cl := t.cl switch id { case pp.HandshakeExtendedID: var d pp.ExtendedHandshakeMessage if err := bencode.Unmarshal(payload, &d); err != nil { c.logger.Printf("error parsing extended handshake message %q: %s", payload, err) return fmt.Errorf("unmarshalling extended handshake payload: %w", err) } if cb := c.callbacks.ReadExtendedHandshake; cb != nil { cb(c, &d) } // c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d)) if d.Reqq != 0 { c.PeerMaxRequests = d.Reqq } c.PeerClientName.Store(d.V) if c.PeerExtensionIDs == nil { c.PeerExtensionIDs = make(map[pp.ExtensionName]pp.ExtensionNumber, len(d.M)) } c.PeerListenPort = d.Port c.PeerPrefersEncryption = d.Encryption for name, id := range d.M { if _, ok := c.PeerExtensionIDs[name]; !ok { peersSupportingExtension.Add( // expvar.Var.String must produce valid JSON. "ut_payme\xeet_address" was being // entered here which caused problems later when unmarshalling. strconv.Quote(string(name)), 1) } c.PeerExtensionIDs[name] = id } if d.MetadataSize != 0 { if err = t.setMetadataSize(d.MetadataSize); err != nil { return fmt.Errorf("setting metadata size to %d: %w", d.MetadataSize, err) } } c.requestPendingMetadata() if !t.cl.config.DisablePEX { t.pex.Add(c) // we learnt enough now c.pex.Init(c) } return nil case metadataExtendedId: err := cl.gotMetadataExtensionMsg(payload, t, c) if err != nil { return fmt.Errorf("handling metadata extension message: %w", err) } return nil case pexExtendedId: if !c.pex.IsEnabled() { return nil // or hang-up maybe? } return c.pex.Recv(payload) default: return fmt.Errorf("unexpected extended message ID: %v", id) } } // Set both the Reader and Writer for the connection from a single ReadWriter. func (cn *PeerConn) setRW(rw io.ReadWriter) { cn.r = rw cn.w = rw } // Returns the Reader and Writer as a combined ReadWriter. func (cn *PeerConn) rw() io.ReadWriter { return struct { io.Reader io.Writer }{cn.r, cn.w} } func (c *Peer) doChunkReadStats(size int64) { c.allStats(func(cs *ConnStats) { cs.receivedChunk(size) }) } // Handle a received chunk from a peer. func (c *Peer) receiveChunk(msg *pp.Message) error { chunksReceived.Add("total", 1) ppReq := newRequestFromMessage(msg) req := c.t.requestIndexFromRequest(ppReq) if c.peerChoking { chunksReceived.Add("while choked", 1) } if c.validReceiveChunks[req] <= 0 { chunksReceived.Add("unexpected", 1) return errors.New("received unexpected chunk") } c.decExpectedChunkReceive(req) if c.peerChoking && c.peerAllowedFast.Contains(bitmap.BitIndex(ppReq.Index)) { chunksReceived.Add("due to allowed fast", 1) } // The request needs to be deleted immediately to prevent cancels occurring asynchronously when // have actually already received the piece, while we have the Client unlocked to write the data // out. intended := false { if c.requestState.Requests.Contains(req) { for _, f := range c.callbacks.ReceivedRequested { f(PeerMessageEvent{c, msg}) } } // Request has been satisfied. if c.deleteRequest(req) || c.requestState.Cancelled.CheckedRemove(req) { intended = true if !c.peerChoking { c._chunksReceivedWhileExpecting++ } if c.isLowOnRequests() { c.updateRequests("Peer.receiveChunk deleted request") } } else { chunksReceived.Add("unintended", 1) } } t := c.t cl := t.cl // Do we actually want this chunk? if t.haveChunk(ppReq) { // panic(fmt.Sprintf("%+v", ppReq)) chunksReceived.Add("redundant", 1) c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted })) return nil } piece := &t.pieces[ppReq.Index] c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadUseful })) c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulData })) if intended { c.piecesReceivedSinceLastRequestUpdate++ c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulIntendedData })) } for _, f := range c.t.cl.config.Callbacks.ReceivedUsefulData { f(ReceivedUsefulDataEvent{c, msg}) } c.lastUsefulChunkReceived = time.Now() // Need to record that it hasn't been written yet, before we attempt to do // anything with it. piece.incrementPendingWrites() // Record that we have the chunk, so we aren't trying to download it while // waiting for it to be written to storage. piece.unpendChunkIndex(chunkIndexFromChunkSpec(ppReq.ChunkSpec, t.chunkSize)) // Cancel pending requests for this chunk from *other* peers. if p := t.pendingRequests[req]; p != nil { if p == c { panic("should not be pending request from conn that just received it") } p.cancel(req) } err := func() error { cl.unlock() defer cl.lock() concurrentChunkWrites.Add(1) defer concurrentChunkWrites.Add(-1) // Write the chunk out. Note that the upper bound on chunk writing concurrency will be the // number of connections. We write inline with receiving the chunk (with this lock dance), // because we want to handle errors synchronously and I haven't thought of a nice way to // defer any concurrency to the storage and have that notify the client of errors. TODO: Do // that instead. return t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece) }() piece.decrementPendingWrites() if err != nil { c.logger.WithDefaultLevel(log.Error).Printf("writing received chunk %v: %v", req, err) t.pendRequest(req) // Necessary to pass TestReceiveChunkStorageFailureSeederFastExtensionDisabled. I think a // request update runs while we're writing the chunk that just failed. Then we never do a // fresh update after pending the failed request. c.updateRequests("Peer.receiveChunk error writing chunk") t.onWriteChunkErr(err) return nil } c.onDirtiedPiece(pieceIndex(ppReq.Index)) // We need to ensure the piece is only queued once, so only the last chunk writer gets this job. if t.pieceAllDirty(pieceIndex(ppReq.Index)) && piece.pendingWrites == 0 { t.queuePieceCheck(pieceIndex(ppReq.Index)) // We don't pend all chunks here anymore because we don't want code dependent on the dirty // chunk status (such as the haveChunk call above) to have to check all the various other // piece states like queued for hash, hashing etc. This does mean that we need to be sure // that chunk pieces are pended at an appropriate time later however. } cl.event.Broadcast() // We do this because we've written a chunk, and may change PieceState.Partial. t.publishPieceChange(pieceIndex(ppReq.Index)) return nil } func (c *Peer) onDirtiedPiece(piece pieceIndex) { if c.peerTouchedPieces == nil { c.peerTouchedPieces = make(map[pieceIndex]struct{}) } c.peerTouchedPieces[piece] = struct{}{} ds := &c.t.pieces[piece].dirtiers if *ds == nil { *ds = make(map[*Peer]struct{}) } (*ds)[c] = struct{}{} } func (c *PeerConn) uploadAllowed() bool { if c.t.cl.config.NoUpload { return false } if c.t.dataUploadDisallowed { return false } if c.t.seeding() { return true } if !c.peerHasWantedPieces() { return false } // Don't upload more than 100 KiB more than we download. if c._stats.BytesWrittenData.Int64() >= c._stats.BytesReadData.Int64()+100<<10 { return false } return true } func (c *PeerConn) setRetryUploadTimer(delay time.Duration) { if c.uploadTimer == nil { c.uploadTimer = time.AfterFunc(delay, c.tickleWriter) } else { c.uploadTimer.Reset(delay) } } // Also handles choking and unchoking of the remote peer. func (c *PeerConn) upload(msg func(pp.Message) bool) bool { // Breaking or completing this loop means we don't want to upload to the // peer anymore, and we choke them. another: for c.uploadAllowed() { // We want to upload to the peer. if !c.unchoke(msg) { return false } for r, state := range c.peerRequests { if state.data == nil { continue } res := c.t.cl.config.UploadRateLimiter.ReserveN(time.Now(), int(r.Length)) if !res.OK() { panic(fmt.Sprintf("upload rate limiter burst size < %d", r.Length)) } delay := res.Delay() if delay > 0 { res.Cancel() c.setRetryUploadTimer(delay) // Hard to say what to return here. return true } more := c.sendChunk(r, msg, state) delete(c.peerRequests, r) if !more { return false } goto another } return true } return c.choke(msg) } func (cn *PeerConn) drop() { cn.t.dropConnection(cn) } func (cn *Peer) netGoodPiecesDirtied() int64 { return cn._stats.PiecesDirtiedGood.Int64() - cn._stats.PiecesDirtiedBad.Int64() } func (c *Peer) peerHasWantedPieces() bool { if all, _ := c.peerHasAllPieces(); all { return !c.t.haveAllPieces() && !c.t._pendingPieces.IsEmpty() } if !c.t.haveInfo() { return !c.peerPieces().IsEmpty() } return c.peerPieces().Intersects(&c.t._pendingPieces) } // Returns true if an outstanding request is removed. Cancelled requests should be handled // separately. func (c *Peer) deleteRequest(r RequestIndex) bool { if !c.requestState.Requests.CheckedRemove(r) { return false } for _, f := range c.callbacks.DeletedRequest { f(PeerRequestEvent{c, c.t.requestIndexToRequest(r)}) } c.updateExpectingChunks() if c.t.requestingPeer(r) != c { panic("only one peer should have a given request at a time") } delete(c.t.pendingRequests, r) delete(c.t.lastRequested, r) return true } func (c *Peer) deleteAllRequests() { c.requestState.Requests.Clone().Iterate(func(x uint32) bool { if !c.deleteRequest(x) { panic("request should exist") } return true }) if !c.requestState.Requests.IsEmpty() { panic(c.requestState.Requests.GetCardinality()) } } // This is called when something has changed that should wake the writer, such as putting stuff into // the writeBuffer, or changing some state that the writer can act on. func (c *PeerConn) tickleWriter() { c.messageWriter.writeCond.Broadcast() } func (c *PeerConn) sendChunk(r Request, msg func(pp.Message) bool, state *peerRequestState) (more bool) { c.lastChunkSent = time.Now() return msg(pp.Message{ Type: pp.Piece, Index: r.Index, Begin: r.Begin, Piece: state.data, }) } func (c *PeerConn) setTorrent(t *Torrent) { if c.t != nil { panic("connection already associated with a torrent") } c.t = t c.logger.WithDefaultLevel(log.Debug).Printf("set torrent=%v", t) t.reconcileHandshakeStats(c) } func (c *Peer) peerPriority() (peerPriority, error) { return bep40Priority(c.remoteIpPort(), c.t.cl.publicAddr(c.remoteIp())) } func (c *Peer) remoteIp() net.IP { host, _, _ := net.SplitHostPort(c.RemoteAddr.String()) return net.ParseIP(host) } func (c *Peer) remoteIpPort() IpPort { ipa, _ := tryIpPortFromNetAddr(c.RemoteAddr) return IpPort{ipa.IP, uint16(ipa.Port)} } func (c *PeerConn) pexPeerFlags() pp.PexPeerFlags { f := pp.PexPeerFlags(0) if c.PeerPrefersEncryption { f |= pp.PexPrefersEncryption } if c.outgoing { f |= pp.PexOutgoingConn } if c.utp() { f |= pp.PexSupportsUtp } return f } // This returns the address to use if we want to dial the peer again. It incorporates the peer's // advertised listen port. func (c *PeerConn) dialAddr() PeerRemoteAddr { if !c.outgoing && c.PeerListenPort != 0 { switch addr := c.RemoteAddr.(type) { case *net.TCPAddr: dialAddr := *addr dialAddr.Port = c.PeerListenPort return &dialAddr case *net.UDPAddr: dialAddr := *addr dialAddr.Port = c.PeerListenPort return &dialAddr } } return c.RemoteAddr } func (c *PeerConn) pexEvent(t pexEventType) pexEvent { f := c.pexPeerFlags() addr := c.dialAddr() return pexEvent{t, addr, f, nil} } func (c *PeerConn) String() string { return fmt.Sprintf("%T %p [id=%q, exts=%v, v=%q]", c, c, c.PeerID, c.PeerExtensionBytes, c.PeerClientName.Load()) } func (c *Peer) trust() connectionTrust { return connectionTrust{c.trusted, c.netGoodPiecesDirtied()} } type connectionTrust struct { Implicit bool NetGoodPiecesDirted int64 } func (l connectionTrust) Less(r connectionTrust) bool { return multiless.New().Bool(l.Implicit, r.Implicit).Int64(l.NetGoodPiecesDirted, r.NetGoodPiecesDirted).Less() } // Returns the pieces the peer could have based on their claims. If we don't know how many pieces // are in the torrent, it could be a very large range the peer has sent HaveAll. func (cn *PeerConn) PeerPieces() *roaring.Bitmap { cn.locker().RLock() defer cn.locker().RUnlock() return cn.newPeerPieces() } // Returns a new Bitmap that includes bits for all pieces the peer could have based on their claims. func (cn *Peer) newPeerPieces() *roaring.Bitmap { // TODO: Can we use copy on write? ret := cn.peerPieces().Clone() if all, _ := cn.peerHasAllPieces(); all { if cn.t.haveInfo() { ret.AddRange(0, bitmap.BitRange(cn.t.numPieces())) } else { ret.AddRange(0, bitmap.ToEnd) } } return ret } func (cn *Peer) stats() *ConnStats { return &cn._stats } func (p *Peer) TryAsPeerConn() (*PeerConn, bool) { pc, ok := p.peerImpl.(*PeerConn) return pc, ok } func (pc *PeerConn) isLowOnRequests() bool { return pc.requestState.Requests.IsEmpty() && pc.requestState.Cancelled.IsEmpty() } func (p *Peer) uncancelledRequests() uint64 { return p.requestState.Requests.GetCardinality() } func (pc *PeerConn) remoteIsTransmission() bool { return bytes.HasPrefix(pc.PeerID[:], []byte("-TR")) && pc.PeerID[7] == '-' } Assert that a request isn't cancelled before requesting it again package torrent import ( "bufio" "bytes" "errors" "fmt" "io" "math/rand" "net" "sort" "strconv" "strings" "sync/atomic" "time" "github.com/RoaringBitmap/roaring" "github.com/anacrolix/log" "github.com/anacrolix/missinggo/iter" "github.com/anacrolix/missinggo/v2/bitmap" "github.com/anacrolix/multiless" "github.com/anacrolix/chansync" "github.com/anacrolix/torrent/bencode" "github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/mse" pp "github.com/anacrolix/torrent/peer_protocol" request_strategy "github.com/anacrolix/torrent/request-strategy" ) type PeerSource string const ( PeerSourceTracker = "Tr" PeerSourceIncoming = "I" PeerSourceDhtGetPeers = "Hg" // Peers we found by searching a DHT. PeerSourceDhtAnnouncePeer = "Ha" // Peers that were announced to us by a DHT. PeerSourcePex = "X" // The peer was given directly, such as through a magnet link. PeerSourceDirect = "M" ) type peerRequestState struct { data []byte } type PeerRemoteAddr interface { String() string } // Since we have to store all the requests in memory, we can't reasonably exceed what would be // indexable with the memory space available. type ( maxRequests = int requestState = request_strategy.PeerRequestState ) type Peer struct { // First to ensure 64-bit alignment for atomics. See #262. _stats ConnStats t *Torrent peerImpl callbacks *Callbacks outgoing bool Network string RemoteAddr PeerRemoteAddr // True if the connection is operating over MSE obfuscation. headerEncrypted bool cryptoMethod mse.CryptoMethod Discovery PeerSource trusted bool closed chansync.SetOnce // Set true after we've added our ConnStats generated during handshake to // other ConnStat instances as determined when the *Torrent became known. reconciledHandshakeStats bool lastMessageReceived time.Time completedHandshake time.Time lastUsefulChunkReceived time.Time lastChunkSent time.Time // Stuff controlled by the local peer. needRequestUpdate string requestState requestState updateRequestsTimer *time.Timer lastBecameInterested time.Time priorInterest time.Duration lastStartedExpectingToReceiveChunks time.Time cumulativeExpectedToReceiveChunks time.Duration _chunksReceivedWhileExpecting int64 choking bool piecesReceivedSinceLastRequestUpdate maxRequests maxPiecesReceivedBetweenRequestUpdates maxRequests // Chunks that we might reasonably expect to receive from the peer. Due to latency, buffering, // and implementation differences, we may receive chunks that are no longer in the set of // requests actually want. This could use a roaring.BSI if the memory use becomes noticeable. validReceiveChunks map[RequestIndex]int // Indexed by metadata piece, set to true if posted and pending a // response. metadataRequests []bool sentHaves bitmap.Bitmap // Stuff controlled by the remote peer. peerInterested bool peerChoking bool peerRequests map[Request]*peerRequestState PeerPrefersEncryption bool // as indicated by 'e' field in extension handshake PeerListenPort int // The highest possible number of pieces the torrent could have based on // communication with the peer. Generally only useful until we have the // torrent info. peerMinPieces pieceIndex // Pieces we've accepted chunks for from the peer. peerTouchedPieces map[pieceIndex]struct{} peerAllowedFast roaring.Bitmap PeerMaxRequests maxRequests // Maximum pending requests the peer allows. PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber PeerClientName atomic.Value logger log.Logger } // Maintains the state of a BitTorrent-protocol based connection with a peer. type PeerConn struct { Peer // A string that should identify the PeerConn's net.Conn endpoints. The net.Conn could // be wrapping WebRTC, uTP, or TCP etc. Used in writing the conn status for peers. connString string // See BEP 3 etc. PeerID PeerID PeerExtensionBytes pp.PeerExtensionBits // The actual Conn, used for closing, and setting socket options. Do not use methods on this // while holding any mutexes. conn net.Conn // The Reader and Writer for this Conn, with hooks installed for stats, // limiting, deadlines etc. w io.Writer r io.Reader messageWriter peerConnMsgWriter uploadTimer *time.Timer pex pexConnState // The pieces the peer has claimed to have. _peerPieces roaring.Bitmap // The peer has everything. This can occur due to a special message, when // we may not even know the number of pieces in the torrent yet. peerSentHaveAll bool } func (cn *PeerConn) connStatusString() string { return fmt.Sprintf("%+-55q %s %s", cn.PeerID, cn.PeerExtensionBytes, cn.connString) } func (cn *Peer) updateExpectingChunks() { if cn.expectingChunks() { if cn.lastStartedExpectingToReceiveChunks.IsZero() { cn.lastStartedExpectingToReceiveChunks = time.Now() } } else { if !cn.lastStartedExpectingToReceiveChunks.IsZero() { cn.cumulativeExpectedToReceiveChunks += time.Since(cn.lastStartedExpectingToReceiveChunks) cn.lastStartedExpectingToReceiveChunks = time.Time{} } } } func (cn *Peer) expectingChunks() bool { if cn.requestState.Requests.IsEmpty() { return false } if !cn.requestState.Interested { return false } if !cn.peerChoking { return true } haveAllowedFastRequests := false cn.peerAllowedFast.Iterate(func(i uint32) bool { haveAllowedFastRequests = roaringBitmapRangeCardinality( &cn.requestState.Requests, cn.t.pieceRequestIndexOffset(pieceIndex(i)), cn.t.pieceRequestIndexOffset(pieceIndex(i+1)), ) == 0 return !haveAllowedFastRequests }) return haveAllowedFastRequests } func (cn *Peer) remoteChokingPiece(piece pieceIndex) bool { return cn.peerChoking && !cn.peerAllowedFast.Contains(bitmap.BitIndex(piece)) } // Returns true if the connection is over IPv6. func (cn *PeerConn) ipv6() bool { ip := cn.remoteIp() if ip.To4() != nil { return false } return len(ip) == net.IPv6len } // Returns true the if the dialer/initiator has the lower client peer ID. TODO: Find the // specification for this. func (cn *PeerConn) isPreferredDirection() bool { return bytes.Compare(cn.t.cl.peerID[:], cn.PeerID[:]) < 0 == cn.outgoing } // Returns whether the left connection should be preferred over the right one, // considering only their networking properties. If ok is false, we can't // decide. func (l *PeerConn) hasPreferredNetworkOver(r *PeerConn) (left, ok bool) { var ml multiLess ml.NextBool(l.isPreferredDirection(), r.isPreferredDirection()) ml.NextBool(!l.utp(), !r.utp()) ml.NextBool(l.ipv6(), r.ipv6()) return ml.FinalOk() } func (cn *Peer) cumInterest() time.Duration { ret := cn.priorInterest if cn.requestState.Interested { ret += time.Since(cn.lastBecameInterested) } return ret } func (cn *PeerConn) peerHasAllPieces() (all bool, known bool) { if cn.peerSentHaveAll { return true, true } if !cn.t.haveInfo() { return false, false } return cn._peerPieces.GetCardinality() == uint64(cn.t.numPieces()), true } func (cn *Peer) locker() *lockWithDeferreds { return cn.t.cl.locker() } func (cn *Peer) supportsExtension(ext pp.ExtensionName) bool { _, ok := cn.PeerExtensionIDs[ext] return ok } // The best guess at number of pieces in the torrent for this peer. func (cn *Peer) bestPeerNumPieces() pieceIndex { if cn.t.haveInfo() { return cn.t.numPieces() } return cn.peerMinPieces } func (cn *Peer) completedString() string { have := pieceIndex(cn.peerPieces().GetCardinality()) if all, _ := cn.peerHasAllPieces(); all { have = cn.bestPeerNumPieces() } return fmt.Sprintf("%d/%d", have, cn.bestPeerNumPieces()) } func (cn *PeerConn) onGotInfo(info *metainfo.Info) { cn.setNumPieces(info.NumPieces()) } // Correct the PeerPieces slice length. Return false if the existing slice is invalid, such as by // receiving badly sized BITFIELD, or invalid HAVE messages. func (cn *PeerConn) setNumPieces(num pieceIndex) { cn._peerPieces.RemoveRange(bitmap.BitRange(num), bitmap.ToEnd) cn.peerPiecesChanged() } func (cn *PeerConn) peerPieces() *roaring.Bitmap { return &cn._peerPieces } func eventAgeString(t time.Time) string { if t.IsZero() { return "never" } return fmt.Sprintf("%.2fs ago", time.Since(t).Seconds()) } func (cn *PeerConn) connectionFlags() (ret string) { c := func(b byte) { ret += string([]byte{b}) } if cn.cryptoMethod == mse.CryptoMethodRC4 { c('E') } else if cn.headerEncrypted { c('e') } ret += string(cn.Discovery) if cn.utp() { c('U') } return } func (cn *PeerConn) utp() bool { return parseNetworkString(cn.Network).Udp } // Inspired by https://github.com/transmission/transmission/wiki/Peer-Status-Text. func (cn *Peer) statusFlags() (ret string) { c := func(b byte) { ret += string([]byte{b}) } if cn.requestState.Interested { c('i') } if cn.choking { c('c') } c('-') ret += cn.connectionFlags() c('-') if cn.peerInterested { c('i') } if cn.peerChoking { c('c') } return } func (cn *Peer) downloadRate() float64 { num := cn._stats.BytesReadUsefulData.Int64() if num == 0 { return 0 } return float64(num) / cn.totalExpectingTime().Seconds() } func (cn *Peer) numRequestsByPiece() (ret map[pieceIndex]int) { ret = make(map[pieceIndex]int) cn.requestState.Requests.Iterate(func(x uint32) bool { ret[pieceIndex(x/cn.t.chunksPerRegularPiece())]++ return true }) return } func (cn *Peer) writeStatus(w io.Writer, t *Torrent) { // \t isn't preserved in <pre> blocks? if cn.closed.IsSet() { fmt.Fprint(w, "CLOSED: ") } fmt.Fprintln(w, cn.connStatusString()) prio, err := cn.peerPriority() prioStr := fmt.Sprintf("%08x", prio) if err != nil { prioStr += ": " + err.Error() } fmt.Fprintf(w, " bep40-prio: %v\n", prioStr) fmt.Fprintf(w, " last msg: %s, connected: %s, last helpful: %s, itime: %s, etime: %s\n", eventAgeString(cn.lastMessageReceived), eventAgeString(cn.completedHandshake), eventAgeString(cn.lastHelpful()), cn.cumInterest(), cn.totalExpectingTime(), ) fmt.Fprintf(w, " %s completed, %d pieces touched, good chunks: %v/%v:%v reqq: %d+%v/(%d/%d):%d/%d, flags: %s, dr: %.1f KiB/s\n", cn.completedString(), len(cn.peerTouchedPieces), &cn._stats.ChunksReadUseful, &cn._stats.ChunksRead, &cn._stats.ChunksWritten, cn.requestState.Requests.GetCardinality(), cn.requestState.Cancelled.GetCardinality(), cn.nominalMaxRequests(), cn.PeerMaxRequests, len(cn.peerRequests), localClientReqq, cn.statusFlags(), cn.downloadRate()/(1<<10), ) fmt.Fprintf(w, " requested pieces:") type pieceNumRequestsType struct { piece pieceIndex numRequests int } var pieceNumRequests []pieceNumRequestsType for piece, count := range cn.numRequestsByPiece() { pieceNumRequests = append(pieceNumRequests, pieceNumRequestsType{piece, count}) } sort.Slice(pieceNumRequests, func(i, j int) bool { return pieceNumRequests[i].piece < pieceNumRequests[j].piece }) for _, elem := range pieceNumRequests { fmt.Fprintf(w, " %v(%v)", elem.piece, elem.numRequests) } fmt.Fprintf(w, "\n") } func (p *Peer) close() { if !p.closed.Set() { return } if p.updateRequestsTimer != nil { p.updateRequestsTimer.Stop() } p.peerImpl.onClose() if p.t != nil { p.t.decPeerPieceAvailability(p) } for _, f := range p.callbacks.PeerClosed { f(p) } } func (cn *PeerConn) onClose() { if cn.pex.IsEnabled() { cn.pex.Close() } cn.tickleWriter() if cn.conn != nil { go cn.conn.Close() } if cb := cn.callbacks.PeerConnClosed; cb != nil { cb(cn) } } // Peer definitely has a piece, for purposes of requesting. So it's not sufficient that we think // they do (known=true). func (cn *Peer) peerHasPiece(piece pieceIndex) bool { if all, known := cn.peerHasAllPieces(); all && known { return true } return cn.peerPieces().ContainsInt(piece) } // 64KiB, but temporarily less to work around an issue with WebRTC. TODO: Update when // https://github.com/pion/datachannel/issues/59 is fixed. const writeBufferHighWaterLen = 1 << 15 // Writes a message into the write buffer. Returns whether it's okay to keep writing. Writing is // done asynchronously, so it may be that we're not able to honour backpressure from this method. func (cn *PeerConn) write(msg pp.Message) bool { torrent.Add(fmt.Sprintf("messages written of type %s", msg.Type.String()), 1) // We don't need to track bytes here because the connection's Writer has that behaviour injected // (although there's some delay between us buffering the message, and the connection writer // flushing it out.). notFull := cn.messageWriter.write(msg) // Last I checked only Piece messages affect stats, and we don't write those. cn.wroteMsg(&msg) cn.tickleWriter() return notFull } func (cn *PeerConn) requestMetadataPiece(index int) { eID := cn.PeerExtensionIDs[pp.ExtensionNameMetadata] if eID == pp.ExtensionDeleteNumber { return } if index < len(cn.metadataRequests) && cn.metadataRequests[index] { return } cn.logger.WithDefaultLevel(log.Debug).Printf("requesting metadata piece %d", index) cn.write(pp.MetadataExtensionRequestMsg(eID, index)) for index >= len(cn.metadataRequests) { cn.metadataRequests = append(cn.metadataRequests, false) } cn.metadataRequests[index] = true } func (cn *PeerConn) requestedMetadataPiece(index int) bool { return index < len(cn.metadataRequests) && cn.metadataRequests[index] } // The actual value to use as the maximum outbound requests. func (cn *Peer) nominalMaxRequests() (ret maxRequests) { return maxRequests(clamp(1, int64(cn.PeerMaxRequests), 2048)) } func (cn *Peer) totalExpectingTime() (ret time.Duration) { ret = cn.cumulativeExpectedToReceiveChunks if !cn.lastStartedExpectingToReceiveChunks.IsZero() { ret += time.Since(cn.lastStartedExpectingToReceiveChunks) } return } func (cn *PeerConn) onPeerSentCancel(r Request) { if _, ok := cn.peerRequests[r]; !ok { torrent.Add("unexpected cancels received", 1) return } if cn.fastEnabled() { cn.reject(r) } else { delete(cn.peerRequests, r) } } func (cn *PeerConn) choke(msg messageWriter) (more bool) { if cn.choking { return true } cn.choking = true more = msg(pp.Message{ Type: pp.Choke, }) if cn.fastEnabled() { for r := range cn.peerRequests { // TODO: Don't reject pieces in allowed fast set. cn.reject(r) } } else { cn.peerRequests = nil } return } func (cn *PeerConn) unchoke(msg func(pp.Message) bool) bool { if !cn.choking { return true } cn.choking = false return msg(pp.Message{ Type: pp.Unchoke, }) } func (cn *Peer) setInterested(interested bool) bool { if cn.requestState.Interested == interested { return true } cn.requestState.Interested = interested if interested { cn.lastBecameInterested = time.Now() } else if !cn.lastBecameInterested.IsZero() { cn.priorInterest += time.Since(cn.lastBecameInterested) } cn.updateExpectingChunks() // log.Printf("%p: setting interest: %v", cn, interested) return cn.writeInterested(interested) } func (pc *PeerConn) writeInterested(interested bool) bool { return pc.write(pp.Message{ Type: func() pp.MessageType { if interested { return pp.Interested } else { return pp.NotInterested } }(), }) } // The function takes a message to be sent, and returns true if more messages // are okay. type messageWriter func(pp.Message) bool // This function seems to only used by Peer.request. It's all logic checks, so maybe we can no-op it // when we want to go fast. func (cn *Peer) shouldRequest(r RequestIndex) error { pi := pieceIndex(r / cn.t.chunksPerRegularPiece()) if cn.requestState.Cancelled.Contains(r) { return errors.New("request is cancelled and waiting acknowledgement") } if !cn.peerHasPiece(pi) { return errors.New("requesting piece peer doesn't have") } if !cn.t.peerIsActive(cn) { panic("requesting but not in active conns") } if cn.closed.IsSet() { panic("requesting when connection is closed") } if cn.t.hashingPiece(pi) { panic("piece is being hashed") } if cn.t.pieceQueuedForHash(pi) { panic("piece is queued for hash") } if cn.peerChoking && !cn.peerAllowedFast.Contains(bitmap.BitIndex(pi)) { // This could occur if we made a request with the fast extension, and then got choked and // haven't had the request rejected yet. if !cn.requestState.Requests.Contains(r) { panic("peer choking and piece not allowed fast") } } return nil } func (cn *Peer) mustRequest(r RequestIndex) bool { more, err := cn.request(r) if err != nil { panic(err) } return more } func (cn *Peer) request(r RequestIndex) (more bool, err error) { if err := cn.shouldRequest(r); err != nil { panic(err) } if cn.requestState.Requests.Contains(r) { return true, nil } if maxRequests(cn.requestState.Requests.GetCardinality()) >= cn.nominalMaxRequests() { return true, errors.New("too many outstanding requests") } cn.requestState.Requests.Add(r) if cn.validReceiveChunks == nil { cn.validReceiveChunks = make(map[RequestIndex]int) } cn.validReceiveChunks[r]++ cn.t.pendingRequests[r] = cn cn.t.lastRequested[r] = time.Now() cn.updateExpectingChunks() ppReq := cn.t.requestIndexToRequest(r) for _, f := range cn.callbacks.SentRequest { f(PeerRequestEvent{cn, ppReq}) } return cn.peerImpl._request(ppReq), nil } func (me *PeerConn) _request(r Request) bool { return me.write(pp.Message{ Type: pp.Request, Index: r.Index, Begin: r.Begin, Length: r.Length, }) } func (me *Peer) cancel(r RequestIndex) { if !me.deleteRequest(r) { panic("request not existing should have been guarded") } if me._cancel(r) { if !me.requestState.Cancelled.CheckedAdd(r) { panic("request already cancelled") } } if me.isLowOnRequests() { me.updateRequests("Peer.cancel") } } func (me *PeerConn) _cancel(r RequestIndex) bool { me.write(makeCancelMessage(me.t.requestIndexToRequest(r))) // Transmission does not send rejects for received cancels. See // https://github.com/transmission/transmission/pull/2275. return me.fastEnabled() && !me.remoteIsTransmission() } func (cn *PeerConn) fillWriteBuffer() { if !cn.maybeUpdateActualRequestState() { return } if cn.pex.IsEnabled() { if flow := cn.pex.Share(cn.write); !flow { return } } cn.upload(cn.write) } func (cn *PeerConn) have(piece pieceIndex) { if cn.sentHaves.Get(bitmap.BitIndex(piece)) { return } cn.write(pp.Message{ Type: pp.Have, Index: pp.Integer(piece), }) cn.sentHaves.Add(bitmap.BitIndex(piece)) } func (cn *PeerConn) postBitfield() { if cn.sentHaves.Len() != 0 { panic("bitfield must be first have-related message sent") } if !cn.t.haveAnyPieces() { return } cn.write(pp.Message{ Type: pp.Bitfield, Bitfield: cn.t.bitfield(), }) cn.sentHaves = bitmap.Bitmap{cn.t._completedPieces.Clone()} } // Sets a reason to update requests, and if there wasn't already one, handle it. func (cn *Peer) updateRequests(reason string) { if cn.needRequestUpdate != "" { return } cn.needRequestUpdate = reason cn.handleUpdateRequests() } func (cn *PeerConn) handleUpdateRequests() { // The writer determines the request state as needed when it can write. cn.tickleWriter() } // Emits the indices in the Bitmaps bms in order, never repeating any index. // skip is mutated during execution, and its initial values will never be // emitted. func iterBitmapsDistinct(skip *bitmap.Bitmap, bms ...bitmap.Bitmap) iter.Func { return func(cb iter.Callback) { for _, bm := range bms { if !iter.All( func(_i interface{}) bool { i := _i.(int) if skip.Contains(bitmap.BitIndex(i)) { return true } skip.Add(bitmap.BitIndex(i)) return cb(i) }, bm.Iter, ) { return } } } } func (cn *Peer) peerPiecesChanged() { cn.t.maybeDropMutuallyCompletePeer(cn) } func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) { if newMin > cn.peerMinPieces { cn.peerMinPieces = newMin } } func (cn *PeerConn) peerSentHave(piece pieceIndex) error { if cn.t.haveInfo() && piece >= cn.t.numPieces() || piece < 0 { return errors.New("invalid piece") } if cn.peerHasPiece(piece) { return nil } cn.raisePeerMinPieces(piece + 1) if !cn.peerHasPiece(piece) { cn.t.incPieceAvailability(piece) } cn._peerPieces.Add(uint32(piece)) if cn.t.wantPieceIndex(piece) { cn.updateRequests("have") } cn.peerPiecesChanged() return nil } func (cn *PeerConn) peerSentBitfield(bf []bool) error { if len(bf)%8 != 0 { panic("expected bitfield length divisible by 8") } // We know that the last byte means that at most the last 7 bits are wasted. cn.raisePeerMinPieces(pieceIndex(len(bf) - 7)) if cn.t.haveInfo() && len(bf) > int(cn.t.numPieces()) { // Ignore known excess pieces. bf = bf[:cn.t.numPieces()] } pp := cn.newPeerPieces() cn.peerSentHaveAll = false for i, have := range bf { if have { cn.raisePeerMinPieces(pieceIndex(i) + 1) if !pp.Contains(bitmap.BitIndex(i)) { cn.t.incPieceAvailability(i) } } else { if pp.Contains(bitmap.BitIndex(i)) { cn.t.decPieceAvailability(i) } } if have { cn._peerPieces.Add(uint32(i)) if cn.t.wantPieceIndex(i) { cn.updateRequests("bitfield") } } else { cn._peerPieces.Remove(uint32(i)) } } cn.peerPiecesChanged() return nil } func (cn *PeerConn) onPeerHasAllPieces() { t := cn.t if t.haveInfo() { npp, pc := cn.newPeerPieces(), t.numPieces() for i := 0; i < pc; i += 1 { if !npp.Contains(bitmap.BitIndex(i)) { t.incPieceAvailability(i) } } } cn.peerSentHaveAll = true cn._peerPieces.Clear() if !cn.t._pendingPieces.IsEmpty() { cn.updateRequests("Peer.onPeerHasAllPieces") } cn.peerPiecesChanged() } func (cn *PeerConn) onPeerSentHaveAll() error { cn.onPeerHasAllPieces() return nil } func (cn *PeerConn) peerSentHaveNone() error { cn.t.decPeerPieceAvailability(&cn.Peer) cn._peerPieces.Clear() cn.peerSentHaveAll = false cn.peerPiecesChanged() return nil } func (c *PeerConn) requestPendingMetadata() { if c.t.haveInfo() { return } if c.PeerExtensionIDs[pp.ExtensionNameMetadata] == 0 { // Peer doesn't support this. return } // Request metadata pieces that we don't have in a random order. var pending []int for index := 0; index < c.t.metadataPieceCount(); index++ { if !c.t.haveMetadataPiece(index) && !c.requestedMetadataPiece(index) { pending = append(pending, index) } } rand.Shuffle(len(pending), func(i, j int) { pending[i], pending[j] = pending[j], pending[i] }) for _, i := range pending { c.requestMetadataPiece(i) } } func (cn *PeerConn) wroteMsg(msg *pp.Message) { torrent.Add(fmt.Sprintf("messages written of type %s", msg.Type.String()), 1) if msg.Type == pp.Extended { for name, id := range cn.PeerExtensionIDs { if id != msg.ExtendedID { continue } torrent.Add(fmt.Sprintf("Extended messages written for protocol %q", name), 1) } } cn.allStats(func(cs *ConnStats) { cs.wroteMsg(msg) }) } // After handshake, we know what Torrent and Client stats to include for a // connection. func (cn *Peer) postHandshakeStats(f func(*ConnStats)) { t := cn.t f(&t.stats) f(&t.cl.stats) } // All ConnStats that include this connection. Some objects are not known // until the handshake is complete, after which it's expected to reconcile the // differences. func (cn *Peer) allStats(f func(*ConnStats)) { f(&cn._stats) if cn.reconciledHandshakeStats { cn.postHandshakeStats(f) } } func (cn *PeerConn) wroteBytes(n int64) { cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesWritten })) } func (cn *Peer) readBytes(n int64) { cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesRead })) } // Returns whether the connection could be useful to us. We're seeding and // they want data, we don't have metainfo and they can provide it, etc. func (c *Peer) useful() bool { t := c.t if c.closed.IsSet() { return false } if !t.haveInfo() { return c.supportsExtension("ut_metadata") } if t.seeding() && c.peerInterested { return true } if c.peerHasWantedPieces() { return true } return false } func (c *Peer) lastHelpful() (ret time.Time) { ret = c.lastUsefulChunkReceived if c.t.seeding() && c.lastChunkSent.After(ret) { ret = c.lastChunkSent } return } func (c *PeerConn) fastEnabled() bool { return c.PeerExtensionBytes.SupportsFast() && c.t.cl.config.Extensions.SupportsFast() } func (c *PeerConn) reject(r Request) { if !c.fastEnabled() { panic("fast not enabled") } c.write(r.ToMsg(pp.Reject)) delete(c.peerRequests, r) } func (c *PeerConn) onReadRequest(r Request) error { requestedChunkLengths.Add(strconv.FormatUint(r.Length.Uint64(), 10), 1) if _, ok := c.peerRequests[r]; ok { torrent.Add("duplicate requests received", 1) return nil } if c.choking { torrent.Add("requests received while choking", 1) if c.fastEnabled() { torrent.Add("requests rejected while choking", 1) c.reject(r) } return nil } // TODO: What if they've already requested this? if len(c.peerRequests) >= localClientReqq { torrent.Add("requests received while queue full", 1) if c.fastEnabled() { c.reject(r) } // BEP 6 says we may close here if we choose. return nil } if !c.t.havePiece(pieceIndex(r.Index)) { // This isn't necessarily them screwing up. We can drop pieces // from our storage, and can't communicate this to peers // except by reconnecting. requestsReceivedForMissingPieces.Add(1) return fmt.Errorf("peer requested piece we don't have: %v", r.Index.Int()) } // Check this after we know we have the piece, so that the piece length will be known. if r.Begin+r.Length > c.t.pieceLength(pieceIndex(r.Index)) { torrent.Add("bad requests received", 1) return errors.New("bad Request") } if c.peerRequests == nil { c.peerRequests = make(map[Request]*peerRequestState, localClientReqq) } value := &peerRequestState{} c.peerRequests[r] = value go c.peerRequestDataReader(r, value) // c.tickleWriter() return nil } func (c *PeerConn) peerRequestDataReader(r Request, prs *peerRequestState) { b, err := readPeerRequestData(r, c) c.locker().Lock() defer c.locker().Unlock() if err != nil { c.peerRequestDataReadFailed(err, r) } else { if b == nil { panic("data must be non-nil to trigger send") } prs.data = b c.tickleWriter() } } // If this is maintained correctly, we might be able to support optional synchronous reading for // chunk sending, the way it used to work. func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) { c.logger.WithDefaultLevel(log.Warning).Printf("error reading chunk for peer Request %v: %v", r, err) if c.t.closed.IsSet() { return } i := pieceIndex(r.Index) if c.t.pieceComplete(i) { // There used to be more code here that just duplicated the following break. Piece // completions are currently cached, so I'm not sure how helpful this update is, except to // pull any completion changes pushed to the storage backend in failed reads that got us // here. c.t.updatePieceCompletion(i) } // If we failed to send a chunk, choke the peer to ensure they flush all their requests. We've // probably dropped a piece from storage, but there's no way to communicate this to the peer. If // they ask for it again, we'll kick them to allow us to send them an updated bitfield on the // next connect. TODO: Support rejecting here too. if c.choking { c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly") } c.choke(c.write) } func readPeerRequestData(r Request, c *PeerConn) ([]byte, error) { b := make([]byte, r.Length) p := c.t.info.Piece(int(r.Index)) n, err := c.t.readAt(b, p.Offset()+int64(r.Begin)) if n == len(b) { if err == io.EOF { err = nil } } else { if err == nil { panic("expected error") } } return b, err } func runSafeExtraneous(f func()) { if true { go f() } else { f() } } func (c *PeerConn) logProtocolBehaviour(level log.Level, format string, arg ...interface{}) { c.logger.WithLevel(level).WithContextText(fmt.Sprintf( "peer id %q, ext v %q", c.PeerID, c.PeerClientName.Load(), )).SkipCallers(1).Printf(format, arg...) } // Processes incoming BitTorrent wire-protocol messages. The client lock is held upon entry and // exit. Returning will end the connection. func (c *PeerConn) mainReadLoop() (err error) { defer func() { if err != nil { torrent.Add("connection.mainReadLoop returned with error", 1) } else { torrent.Add("connection.mainReadLoop returned with no error", 1) } }() t := c.t cl := t.cl decoder := pp.Decoder{ R: bufio.NewReaderSize(c.r, 1<<17), MaxLength: 256 * 1024, Pool: &t.chunkPool, } for { var msg pp.Message func() { cl.unlock() defer cl.lock() err = decoder.Decode(&msg) }() if cb := c.callbacks.ReadMessage; cb != nil && err == nil { cb(c, &msg) } if t.closed.IsSet() || c.closed.IsSet() { return nil } if err != nil { return err } c.lastMessageReceived = time.Now() if msg.Keepalive { receivedKeepalives.Add(1) continue } messageTypesReceived.Add(msg.Type.String(), 1) if msg.Type.FastExtension() && !c.fastEnabled() { runSafeExtraneous(func() { torrent.Add("fast messages received when extension is disabled", 1) }) return fmt.Errorf("received fast extension message (type=%v) but extension is disabled", msg.Type) } switch msg.Type { case pp.Choke: if c.peerChoking { break } if !c.fastEnabled() { c.deleteAllRequests() } else { // We don't decrement pending requests here, let's wait for the peer to either // reject or satisfy the outstanding requests. Additionally, some peers may unchoke // us and resume where they left off, we don't want to have piled on to those chunks // in the meanwhile. I think a peer's ability to abuse this should be limited: they // could let us request a lot of stuff, then choke us and never reject, but they're // only a single peer, our chunk balancing should smooth over this abuse. } c.peerChoking = true c.updateExpectingChunks() case pp.Unchoke: if !c.peerChoking { // Some clients do this for some reason. Transmission doesn't error on this, so we // won't for consistency. c.logProtocolBehaviour(log.Debug, "received unchoke when already unchoked") break } c.peerChoking = false preservedCount := 0 c.requestState.Requests.Iterate(func(x uint32) bool { if !c.peerAllowedFast.Contains(x / c.t.chunksPerRegularPiece()) { preservedCount++ } return true }) if preservedCount != 0 { // TODO: Yes this is a debug log but I'm not happy with the state of the logging lib // right now. c.logger.WithLevel(log.Debug).Printf( "%v requests were preserved while being choked (fast=%v)", preservedCount, c.fastEnabled()) torrent.Add("requestsPreservedThroughChoking", int64(preservedCount)) } if !c.t._pendingPieces.IsEmpty() { c.updateRequests("unchoked") } c.updateExpectingChunks() case pp.Interested: c.peerInterested = true c.tickleWriter() case pp.NotInterested: c.peerInterested = false // We don't clear their requests since it isn't clear in the spec. // We'll probably choke them for this, which will clear them if // appropriate, and is clearly specified. case pp.Have: err = c.peerSentHave(pieceIndex(msg.Index)) case pp.Bitfield: err = c.peerSentBitfield(msg.Bitfield) case pp.Request: r := newRequestFromMessage(&msg) err = c.onReadRequest(r) case pp.Piece: c.doChunkReadStats(int64(len(msg.Piece))) err = c.receiveChunk(&msg) if len(msg.Piece) == int(t.chunkSize) { t.chunkPool.Put(&msg.Piece) } if err != nil { err = fmt.Errorf("receiving chunk: %w", err) } case pp.Cancel: req := newRequestFromMessage(&msg) c.onPeerSentCancel(req) case pp.Port: ipa, ok := tryIpPortFromNetAddr(c.RemoteAddr) if !ok { break } pingAddr := net.UDPAddr{ IP: ipa.IP, Port: ipa.Port, } if msg.Port != 0 { pingAddr.Port = int(msg.Port) } cl.eachDhtServer(func(s DhtServer) { go s.Ping(&pingAddr) }) case pp.Suggest: torrent.Add("suggests received", 1) log.Fmsg("peer suggested piece %d", msg.Index).AddValues(c, msg.Index).SetLevel(log.Debug).Log(c.t.logger) c.updateRequests("suggested") case pp.HaveAll: err = c.onPeerSentHaveAll() case pp.HaveNone: err = c.peerSentHaveNone() case pp.Reject: req := newRequestFromMessage(&msg) if !c.remoteRejectedRequest(c.t.requestIndexFromRequest(req)) { log.Printf("received invalid reject [request=%v, peer=%v]", req, c) err = fmt.Errorf("received invalid reject [request=%v]", req) } case pp.AllowedFast: torrent.Add("allowed fasts received", 1) log.Fmsg("peer allowed fast: %d", msg.Index).AddValues(c).SetLevel(log.Debug).Log(c.t.logger) c.updateRequests("PeerConn.mainReadLoop allowed fast") case pp.Extended: err = c.onReadExtendedMsg(msg.ExtendedID, msg.ExtendedPayload) default: err = fmt.Errorf("received unknown message type: %#v", msg.Type) } if err != nil { return err } } } // Returns true if it was valid to reject the request. func (c *Peer) remoteRejectedRequest(r RequestIndex) bool { if !c.deleteRequest(r) && !c.requestState.Cancelled.CheckedRemove(r) { return false } if c.isLowOnRequests() { c.updateRequests("Peer.remoteRejectedRequest") } c.decExpectedChunkReceive(r) return true } func (c *Peer) decExpectedChunkReceive(r RequestIndex) { count := c.validReceiveChunks[r] if count == 1 { delete(c.validReceiveChunks, r) } else if count > 1 { c.validReceiveChunks[r] = count - 1 } else { panic(r) } } func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err error) { defer func() { // TODO: Should we still do this? if err != nil { // These clients use their own extension IDs for outgoing message // types, which is incorrect. if bytes.HasPrefix(c.PeerID[:], []byte("-SD0100-")) || strings.HasPrefix(string(c.PeerID[:]), "-XL0012-") { err = nil } } }() t := c.t cl := t.cl switch id { case pp.HandshakeExtendedID: var d pp.ExtendedHandshakeMessage if err := bencode.Unmarshal(payload, &d); err != nil { c.logger.Printf("error parsing extended handshake message %q: %s", payload, err) return fmt.Errorf("unmarshalling extended handshake payload: %w", err) } if cb := c.callbacks.ReadExtendedHandshake; cb != nil { cb(c, &d) } // c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d)) if d.Reqq != 0 { c.PeerMaxRequests = d.Reqq } c.PeerClientName.Store(d.V) if c.PeerExtensionIDs == nil { c.PeerExtensionIDs = make(map[pp.ExtensionName]pp.ExtensionNumber, len(d.M)) } c.PeerListenPort = d.Port c.PeerPrefersEncryption = d.Encryption for name, id := range d.M { if _, ok := c.PeerExtensionIDs[name]; !ok { peersSupportingExtension.Add( // expvar.Var.String must produce valid JSON. "ut_payme\xeet_address" was being // entered here which caused problems later when unmarshalling. strconv.Quote(string(name)), 1) } c.PeerExtensionIDs[name] = id } if d.MetadataSize != 0 { if err = t.setMetadataSize(d.MetadataSize); err != nil { return fmt.Errorf("setting metadata size to %d: %w", d.MetadataSize, err) } } c.requestPendingMetadata() if !t.cl.config.DisablePEX { t.pex.Add(c) // we learnt enough now c.pex.Init(c) } return nil case metadataExtendedId: err := cl.gotMetadataExtensionMsg(payload, t, c) if err != nil { return fmt.Errorf("handling metadata extension message: %w", err) } return nil case pexExtendedId: if !c.pex.IsEnabled() { return nil // or hang-up maybe? } return c.pex.Recv(payload) default: return fmt.Errorf("unexpected extended message ID: %v", id) } } // Set both the Reader and Writer for the connection from a single ReadWriter. func (cn *PeerConn) setRW(rw io.ReadWriter) { cn.r = rw cn.w = rw } // Returns the Reader and Writer as a combined ReadWriter. func (cn *PeerConn) rw() io.ReadWriter { return struct { io.Reader io.Writer }{cn.r, cn.w} } func (c *Peer) doChunkReadStats(size int64) { c.allStats(func(cs *ConnStats) { cs.receivedChunk(size) }) } // Handle a received chunk from a peer. func (c *Peer) receiveChunk(msg *pp.Message) error { chunksReceived.Add("total", 1) ppReq := newRequestFromMessage(msg) req := c.t.requestIndexFromRequest(ppReq) if c.peerChoking { chunksReceived.Add("while choked", 1) } if c.validReceiveChunks[req] <= 0 { chunksReceived.Add("unexpected", 1) return errors.New("received unexpected chunk") } c.decExpectedChunkReceive(req) if c.peerChoking && c.peerAllowedFast.Contains(bitmap.BitIndex(ppReq.Index)) { chunksReceived.Add("due to allowed fast", 1) } // The request needs to be deleted immediately to prevent cancels occurring asynchronously when // have actually already received the piece, while we have the Client unlocked to write the data // out. intended := false { if c.requestState.Requests.Contains(req) { for _, f := range c.callbacks.ReceivedRequested { f(PeerMessageEvent{c, msg}) } } // Request has been satisfied. if c.deleteRequest(req) || c.requestState.Cancelled.CheckedRemove(req) { intended = true if !c.peerChoking { c._chunksReceivedWhileExpecting++ } if c.isLowOnRequests() { c.updateRequests("Peer.receiveChunk deleted request") } } else { chunksReceived.Add("unintended", 1) } } t := c.t cl := t.cl // Do we actually want this chunk? if t.haveChunk(ppReq) { // panic(fmt.Sprintf("%+v", ppReq)) chunksReceived.Add("redundant", 1) c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted })) return nil } piece := &t.pieces[ppReq.Index] c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadUseful })) c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulData })) if intended { c.piecesReceivedSinceLastRequestUpdate++ c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulIntendedData })) } for _, f := range c.t.cl.config.Callbacks.ReceivedUsefulData { f(ReceivedUsefulDataEvent{c, msg}) } c.lastUsefulChunkReceived = time.Now() // Need to record that it hasn't been written yet, before we attempt to do // anything with it. piece.incrementPendingWrites() // Record that we have the chunk, so we aren't trying to download it while // waiting for it to be written to storage. piece.unpendChunkIndex(chunkIndexFromChunkSpec(ppReq.ChunkSpec, t.chunkSize)) // Cancel pending requests for this chunk from *other* peers. if p := t.pendingRequests[req]; p != nil { if p == c { panic("should not be pending request from conn that just received it") } p.cancel(req) } err := func() error { cl.unlock() defer cl.lock() concurrentChunkWrites.Add(1) defer concurrentChunkWrites.Add(-1) // Write the chunk out. Note that the upper bound on chunk writing concurrency will be the // number of connections. We write inline with receiving the chunk (with this lock dance), // because we want to handle errors synchronously and I haven't thought of a nice way to // defer any concurrency to the storage and have that notify the client of errors. TODO: Do // that instead. return t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece) }() piece.decrementPendingWrites() if err != nil { c.logger.WithDefaultLevel(log.Error).Printf("writing received chunk %v: %v", req, err) t.pendRequest(req) // Necessary to pass TestReceiveChunkStorageFailureSeederFastExtensionDisabled. I think a // request update runs while we're writing the chunk that just failed. Then we never do a // fresh update after pending the failed request. c.updateRequests("Peer.receiveChunk error writing chunk") t.onWriteChunkErr(err) return nil } c.onDirtiedPiece(pieceIndex(ppReq.Index)) // We need to ensure the piece is only queued once, so only the last chunk writer gets this job. if t.pieceAllDirty(pieceIndex(ppReq.Index)) && piece.pendingWrites == 0 { t.queuePieceCheck(pieceIndex(ppReq.Index)) // We don't pend all chunks here anymore because we don't want code dependent on the dirty // chunk status (such as the haveChunk call above) to have to check all the various other // piece states like queued for hash, hashing etc. This does mean that we need to be sure // that chunk pieces are pended at an appropriate time later however. } cl.event.Broadcast() // We do this because we've written a chunk, and may change PieceState.Partial. t.publishPieceChange(pieceIndex(ppReq.Index)) return nil } func (c *Peer) onDirtiedPiece(piece pieceIndex) { if c.peerTouchedPieces == nil { c.peerTouchedPieces = make(map[pieceIndex]struct{}) } c.peerTouchedPieces[piece] = struct{}{} ds := &c.t.pieces[piece].dirtiers if *ds == nil { *ds = make(map[*Peer]struct{}) } (*ds)[c] = struct{}{} } func (c *PeerConn) uploadAllowed() bool { if c.t.cl.config.NoUpload { return false } if c.t.dataUploadDisallowed { return false } if c.t.seeding() { return true } if !c.peerHasWantedPieces() { return false } // Don't upload more than 100 KiB more than we download. if c._stats.BytesWrittenData.Int64() >= c._stats.BytesReadData.Int64()+100<<10 { return false } return true } func (c *PeerConn) setRetryUploadTimer(delay time.Duration) { if c.uploadTimer == nil { c.uploadTimer = time.AfterFunc(delay, c.tickleWriter) } else { c.uploadTimer.Reset(delay) } } // Also handles choking and unchoking of the remote peer. func (c *PeerConn) upload(msg func(pp.Message) bool) bool { // Breaking or completing this loop means we don't want to upload to the // peer anymore, and we choke them. another: for c.uploadAllowed() { // We want to upload to the peer. if !c.unchoke(msg) { return false } for r, state := range c.peerRequests { if state.data == nil { continue } res := c.t.cl.config.UploadRateLimiter.ReserveN(time.Now(), int(r.Length)) if !res.OK() { panic(fmt.Sprintf("upload rate limiter burst size < %d", r.Length)) } delay := res.Delay() if delay > 0 { res.Cancel() c.setRetryUploadTimer(delay) // Hard to say what to return here. return true } more := c.sendChunk(r, msg, state) delete(c.peerRequests, r) if !more { return false } goto another } return true } return c.choke(msg) } func (cn *PeerConn) drop() { cn.t.dropConnection(cn) } func (cn *Peer) netGoodPiecesDirtied() int64 { return cn._stats.PiecesDirtiedGood.Int64() - cn._stats.PiecesDirtiedBad.Int64() } func (c *Peer) peerHasWantedPieces() bool { if all, _ := c.peerHasAllPieces(); all { return !c.t.haveAllPieces() && !c.t._pendingPieces.IsEmpty() } if !c.t.haveInfo() { return !c.peerPieces().IsEmpty() } return c.peerPieces().Intersects(&c.t._pendingPieces) } // Returns true if an outstanding request is removed. Cancelled requests should be handled // separately. func (c *Peer) deleteRequest(r RequestIndex) bool { if !c.requestState.Requests.CheckedRemove(r) { return false } for _, f := range c.callbacks.DeletedRequest { f(PeerRequestEvent{c, c.t.requestIndexToRequest(r)}) } c.updateExpectingChunks() if c.t.requestingPeer(r) != c { panic("only one peer should have a given request at a time") } delete(c.t.pendingRequests, r) delete(c.t.lastRequested, r) return true } func (c *Peer) deleteAllRequests() { c.requestState.Requests.Clone().Iterate(func(x uint32) bool { if !c.deleteRequest(x) { panic("request should exist") } return true }) if !c.requestState.Requests.IsEmpty() { panic(c.requestState.Requests.GetCardinality()) } } // This is called when something has changed that should wake the writer, such as putting stuff into // the writeBuffer, or changing some state that the writer can act on. func (c *PeerConn) tickleWriter() { c.messageWriter.writeCond.Broadcast() } func (c *PeerConn) sendChunk(r Request, msg func(pp.Message) bool, state *peerRequestState) (more bool) { c.lastChunkSent = time.Now() return msg(pp.Message{ Type: pp.Piece, Index: r.Index, Begin: r.Begin, Piece: state.data, }) } func (c *PeerConn) setTorrent(t *Torrent) { if c.t != nil { panic("connection already associated with a torrent") } c.t = t c.logger.WithDefaultLevel(log.Debug).Printf("set torrent=%v", t) t.reconcileHandshakeStats(c) } func (c *Peer) peerPriority() (peerPriority, error) { return bep40Priority(c.remoteIpPort(), c.t.cl.publicAddr(c.remoteIp())) } func (c *Peer) remoteIp() net.IP { host, _, _ := net.SplitHostPort(c.RemoteAddr.String()) return net.ParseIP(host) } func (c *Peer) remoteIpPort() IpPort { ipa, _ := tryIpPortFromNetAddr(c.RemoteAddr) return IpPort{ipa.IP, uint16(ipa.Port)} } func (c *PeerConn) pexPeerFlags() pp.PexPeerFlags { f := pp.PexPeerFlags(0) if c.PeerPrefersEncryption { f |= pp.PexPrefersEncryption } if c.outgoing { f |= pp.PexOutgoingConn } if c.utp() { f |= pp.PexSupportsUtp } return f } // This returns the address to use if we want to dial the peer again. It incorporates the peer's // advertised listen port. func (c *PeerConn) dialAddr() PeerRemoteAddr { if !c.outgoing && c.PeerListenPort != 0 { switch addr := c.RemoteAddr.(type) { case *net.TCPAddr: dialAddr := *addr dialAddr.Port = c.PeerListenPort return &dialAddr case *net.UDPAddr: dialAddr := *addr dialAddr.Port = c.PeerListenPort return &dialAddr } } return c.RemoteAddr } func (c *PeerConn) pexEvent(t pexEventType) pexEvent { f := c.pexPeerFlags() addr := c.dialAddr() return pexEvent{t, addr, f, nil} } func (c *PeerConn) String() string { return fmt.Sprintf("%T %p [id=%q, exts=%v, v=%q]", c, c, c.PeerID, c.PeerExtensionBytes, c.PeerClientName.Load()) } func (c *Peer) trust() connectionTrust { return connectionTrust{c.trusted, c.netGoodPiecesDirtied()} } type connectionTrust struct { Implicit bool NetGoodPiecesDirted int64 } func (l connectionTrust) Less(r connectionTrust) bool { return multiless.New().Bool(l.Implicit, r.Implicit).Int64(l.NetGoodPiecesDirted, r.NetGoodPiecesDirted).Less() } // Returns the pieces the peer could have based on their claims. If we don't know how many pieces // are in the torrent, it could be a very large range the peer has sent HaveAll. func (cn *PeerConn) PeerPieces() *roaring.Bitmap { cn.locker().RLock() defer cn.locker().RUnlock() return cn.newPeerPieces() } // Returns a new Bitmap that includes bits for all pieces the peer could have based on their claims. func (cn *Peer) newPeerPieces() *roaring.Bitmap { // TODO: Can we use copy on write? ret := cn.peerPieces().Clone() if all, _ := cn.peerHasAllPieces(); all { if cn.t.haveInfo() { ret.AddRange(0, bitmap.BitRange(cn.t.numPieces())) } else { ret.AddRange(0, bitmap.ToEnd) } } return ret } func (cn *Peer) stats() *ConnStats { return &cn._stats } func (p *Peer) TryAsPeerConn() (*PeerConn, bool) { pc, ok := p.peerImpl.(*PeerConn) return pc, ok } func (pc *PeerConn) isLowOnRequests() bool { return pc.requestState.Requests.IsEmpty() && pc.requestState.Cancelled.IsEmpty() } func (p *Peer) uncancelledRequests() uint64 { return p.requestState.Requests.GetCardinality() } func (pc *PeerConn) remoteIsTransmission() bool { return bytes.HasPrefix(pc.PeerID[:], []byte("-TR")) && pc.PeerID[7] == '-' }
package borg // Think of borg events like inotify events. We're interested in changes to // them all. All events are sent to your instance of borg. The filtering // is done once they've arrived. Let's make keys look like directories to // aid this comparison. // // Example spec: // // /<slug_id>/proc/<type>/<upid>/... // // Example interactive session: // // $ borgd -i -a :9999 // >> ls /proc/123_a3c_a12b3c45/beanstalkd/12345/ // exe // env // lock // >> cat /proc/123_a3c_a12b3c45/beanstalkd/12345/* // beanstalkd -l 0.0.0.0 -p 4563 // PORT=4563 // 123.4.5.678:9999 // >> // // Example code: // // me, err := borg.ListenAndServe(listenAddr) // if err != nil { // log.Exitf("listen failed: %v", err) // } // // // Handle a specific type of key notification. // // The : signals a named variable part. // me.HandleFunc( // "/proc/:slug/beanstalkd/:upid/lock", // func (msg *borg.Message) { // if msg.Value == myId { // cmd := beanstalkd .... // ... launch beanstalkd ... // me.Echo(cmd, "/proc/<slug>/beanstalkd/<upid>/cmd") // } // }, // ) import ( "borg/paxos" "borg/proto" "borg/store" "borg/util" "bufio" "fmt" "io" "log" "net" "os" "strconv" "strings" ) const ( mSeqn = iota mFrom mTo mCmd mBody mNumParts ) // NOT IPv6-compatible. func getPort(addr string) uint64 { parts := strings.Split(addr, ":", -1) port, err := strconv.Btoui64(parts[len(parts) - 1], 10) if err != nil { fmt.Printf("error getting port from %q\n", addr) } return port } func RecvUdp(conn net.PacketConn, ch chan paxos.Msg) { for { pkt := make([]byte, 3000) // make sure it's big enough n, addr, err := conn.ReadFrom(pkt) if err != nil { fmt.Println(err) continue } msg := parse(string(pkt[0:n])) msg.From = getPort(addr.String()) ch <- msg } } func parse(s string) paxos.Msg { parts := strings.Split(s, ":", mNumParts) if len(parts) != mNumParts { panic(s) } seqn, err := strconv.Btoui64(parts[mSeqn], 10) if err != nil { panic(s) } from, err := strconv.Btoui64(parts[mFrom], 10) if err != nil { panic(s) } var to uint64 if parts[mTo] == "*" { to = 0 } else { to, err = strconv.Btoui64(parts[mTo], 10) if err != nil { panic(err) } } return paxos.Msg{seqn, from, to, parts[mCmd], parts[mBody]} } type FuncPutter func (paxos.Msg) func (f FuncPutter) Put(m paxos.Msg) { f(m) } func printMsg(m paxos.Msg) { fmt.Printf("should send %v\n", m) } func NewUdpPutter(me uint64, addrs []net.Addr, conn net.PacketConn) paxos.Putter { put := func(m paxos.Msg) { pkt := fmt.Sprintf("%d:%d:%d:%s:%s", m.Seqn, me, m.To, m.Cmd, m.Body) fmt.Printf("send udp packet %q\n", pkt) b := []byte(pkt) var to []net.Addr if m.To == 0 { to = addrs } else { to = []net.Addr{&net.UDPAddr{net.ParseIP("127.0.0.1"), int(m.To)}} } for _, addr := range to { n, err := conn.WriteTo(b, addr) if err != nil { fmt.Println(err) continue } if n != len(b) { fmt.Printf("sent <%d> bytes, wanted to send <%d>\n", n, len(b)) continue } } } return FuncPutter(put) } type Node struct { id string listenAddr string logger *log.Logger nodes []net.Addr store *store.Store manager *paxos.Manager } func New(id string, listenAddr string, logger *log.Logger) *Node { if id == "" { b := make([]byte, 8) util.RandBytes(b) id = fmt.Sprintf("%x", b) } return &Node{ listenAddr:listenAddr, logger:logger, store:store.New(logger), id:id, } } func (n *Node) Init() { var basePort int var err os.Error basePort, err = strconv.Atoi((n.listenAddr)[1:]) n.nodes = make([]net.Addr, 5) if err != nil { fmt.Println(err) return } n.nodes[0] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 0} n.nodes[1] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 1} n.nodes[2] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 2} n.nodes[3] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 3} n.nodes[4] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 4} nodeKey := "/node/" + n.id mut, err := store.EncodeSet(nodeKey, n.listenAddr) if err != nil { panic(err) } n.store.Apply(1, mut) n.logger.Logf("registered %s at %s\n", n.id, n.listenAddr) n.manager = paxos.NewManager(uint64(basePort), 2, uint64(len(n.nodes)), n.logger) } // TODO this function should take only an address and get all necessary info // from the other existing nodes. func (n *Node) Join(master string) { parts := strings.Split(master, "=", 2) if len(parts) < 2 { panic(fmt.Sprintf("bad master address: %s", master)) } mid, addr := parts[0], parts[1] var basePort int var err os.Error n.nodes = make([]net.Addr, 5) basePort, err = strconv.Atoi((addr)[1:]) if err != nil { fmt.Println(err) return } n.nodes[0] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 0} n.nodes[1] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 1} n.nodes[2] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 2} n.nodes[3] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 3} n.nodes[4] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 4} n.logger.Logf("attempting to attach to %v\n", n.nodes) n.logger.Logf("TODO: get a snapshot") // TODO remove all this fake stuff and talk to the other nodes // BEGIN FAKE STUFF nodeKey := "/node/" + mid mut, err := store.EncodeSet(nodeKey, addr) if err != nil { panic(err) } n.store.Apply(1, mut) // END OF FAKE STUFF selfport, err := strconv.Atoi((n.listenAddr)[1:]) n.manager = paxos.NewManager(uint64(selfport), 2, uint64(len(n.nodes)), n.logger) } func (n *Node) server(conn net.Conn) { br := bufio.NewReader(conn) for { parts, err := proto.Decode(br) if err != nil { n.logger.Log(err) continue } if len(parts) == 0 { continue } n.logger.Log("got", parts) switch parts[0] { case "set": mutation, err := store.EncodeSet(parts[1], parts[2]) if err != nil { io.WriteString(conn, fmt.Sprintf("-ERR: %s", err)) } v := n.manager.Propose(mutation) if v == mutation { proto.Encode(conn, "OK") } else { io.WriteString(conn, "-ERR: failed") } case "get": //read from store //return value default: io.WriteString(conn, "-ERR: unknown command") } } } func (n *Node) accept(l net.Listener) { for { c, err := l.Accept() if err != nil { n.logger.Log(err) continue } go n.server(c) } } func (n *Node) RunForever() { me, err := strconv.Btoui64((n.listenAddr)[1:], 10) if err != nil { fmt.Println(err) return } n.logger.Logf("attempting to listen on %s\n", n.listenAddr) tcpListener, err := net.Listen("tcp", n.listenAddr) if err != nil { n.logger.Log(err) return } go n.accept(tcpListener) udpConn, err := net.ListenPacket("udp", n.listenAddr) if err != nil { fmt.Println(err) return } udpCh := make(chan paxos.Msg) go RecvUdp(udpConn, udpCh) udpPutter := NewUdpPutter(me, n.nodes, udpConn) //n.manager.Init(FuncPutter(printMsg)) n.manager.Init(udpPutter) go func() { for pkt := range udpCh { fmt.Printf("got udp packet: %#v\n", pkt) n.manager.Put(pkt) } }() for { n.store.Apply(n.manager.Recv()) } } get client command package borg // Think of borg events like inotify events. We're interested in changes to // them all. All events are sent to your instance of borg. The filtering // is done once they've arrived. Let's make keys look like directories to // aid this comparison. // // Example spec: // // /<slug_id>/proc/<type>/<upid>/... // // Example interactive session: // // $ borgd -i -a :9999 // >> ls /proc/123_a3c_a12b3c45/beanstalkd/12345/ // exe // env // lock // >> cat /proc/123_a3c_a12b3c45/beanstalkd/12345/* // beanstalkd -l 0.0.0.0 -p 4563 // PORT=4563 // 123.4.5.678:9999 // >> // // Example code: // // me, err := borg.ListenAndServe(listenAddr) // if err != nil { // log.Exitf("listen failed: %v", err) // } // // // Handle a specific type of key notification. // // The : signals a named variable part. // me.HandleFunc( // "/proc/:slug/beanstalkd/:upid/lock", // func (msg *borg.Message) { // if msg.Value == myId { // cmd := beanstalkd .... // ... launch beanstalkd ... // me.Echo(cmd, "/proc/<slug>/beanstalkd/<upid>/cmd") // } // }, // ) import ( "borg/paxos" "borg/proto" "borg/store" "borg/util" "bufio" "fmt" "io" "log" "net" "os" "strconv" "strings" ) const ( mSeqn = iota mFrom mTo mCmd mBody mNumParts ) // NOT IPv6-compatible. func getPort(addr string) uint64 { parts := strings.Split(addr, ":", -1) port, err := strconv.Btoui64(parts[len(parts) - 1], 10) if err != nil { fmt.Printf("error getting port from %q\n", addr) } return port } func RecvUdp(conn net.PacketConn, ch chan paxos.Msg) { for { pkt := make([]byte, 3000) // make sure it's big enough n, addr, err := conn.ReadFrom(pkt) if err != nil { fmt.Println(err) continue } msg := parse(string(pkt[0:n])) msg.From = getPort(addr.String()) ch <- msg } } func parse(s string) paxos.Msg { parts := strings.Split(s, ":", mNumParts) if len(parts) != mNumParts { panic(s) } seqn, err := strconv.Btoui64(parts[mSeqn], 10) if err != nil { panic(s) } from, err := strconv.Btoui64(parts[mFrom], 10) if err != nil { panic(s) } var to uint64 if parts[mTo] == "*" { to = 0 } else { to, err = strconv.Btoui64(parts[mTo], 10) if err != nil { panic(err) } } return paxos.Msg{seqn, from, to, parts[mCmd], parts[mBody]} } type FuncPutter func (paxos.Msg) func (f FuncPutter) Put(m paxos.Msg) { f(m) } func printMsg(m paxos.Msg) { fmt.Printf("should send %v\n", m) } func NewUdpPutter(me uint64, addrs []net.Addr, conn net.PacketConn) paxos.Putter { put := func(m paxos.Msg) { pkt := fmt.Sprintf("%d:%d:%d:%s:%s", m.Seqn, me, m.To, m.Cmd, m.Body) fmt.Printf("send udp packet %q\n", pkt) b := []byte(pkt) var to []net.Addr if m.To == 0 { to = addrs } else { to = []net.Addr{&net.UDPAddr{net.ParseIP("127.0.0.1"), int(m.To)}} } for _, addr := range to { n, err := conn.WriteTo(b, addr) if err != nil { fmt.Println(err) continue } if n != len(b) { fmt.Printf("sent <%d> bytes, wanted to send <%d>\n", n, len(b)) continue } } } return FuncPutter(put) } type Node struct { id string listenAddr string logger *log.Logger nodes []net.Addr store *store.Store manager *paxos.Manager } func New(id string, listenAddr string, logger *log.Logger) *Node { if id == "" { b := make([]byte, 8) util.RandBytes(b) id = fmt.Sprintf("%x", b) } return &Node{ listenAddr:listenAddr, logger:logger, store:store.New(logger), id:id, } } func (n *Node) Init() { var basePort int var err os.Error basePort, err = strconv.Atoi((n.listenAddr)[1:]) n.nodes = make([]net.Addr, 5) if err != nil { fmt.Println(err) return } n.nodes[0] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 0} n.nodes[1] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 1} n.nodes[2] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 2} n.nodes[3] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 3} n.nodes[4] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 4} nodeKey := "/node/" + n.id mut, err := store.EncodeSet(nodeKey, n.listenAddr) if err != nil { panic(err) } n.store.Apply(1, mut) n.logger.Logf("registered %s at %s\n", n.id, n.listenAddr) n.manager = paxos.NewManager(uint64(basePort), 2, uint64(len(n.nodes)), n.logger) } // TODO this function should take only an address and get all necessary info // from the other existing nodes. func (n *Node) Join(master string) { parts := strings.Split(master, "=", 2) if len(parts) < 2 { panic(fmt.Sprintf("bad master address: %s", master)) } mid, addr := parts[0], parts[1] var basePort int var err os.Error n.nodes = make([]net.Addr, 5) basePort, err = strconv.Atoi((addr)[1:]) if err != nil { fmt.Println(err) return } n.nodes[0] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 0} n.nodes[1] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 1} n.nodes[2] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 2} n.nodes[3] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 3} n.nodes[4] = &net.UDPAddr{net.ParseIP("127.0.0.1"), basePort + 4} n.logger.Logf("attempting to attach to %v\n", n.nodes) n.logger.Logf("TODO: get a snapshot") // TODO remove all this fake stuff and talk to the other nodes // BEGIN FAKE STUFF nodeKey := "/node/" + mid mut, err := store.EncodeSet(nodeKey, addr) if err != nil { panic(err) } n.store.Apply(1, mut) // END OF FAKE STUFF selfport, err := strconv.Atoi((n.listenAddr)[1:]) n.manager = paxos.NewManager(uint64(selfport), 2, uint64(len(n.nodes)), n.logger) } func (n *Node) server(conn net.Conn) { br := bufio.NewReader(conn) for { parts, err := proto.Decode(br) if err != nil { n.logger.Log(err) continue } if len(parts) == 0 { continue } n.logger.Log("got", parts) switch parts[0] { case "set": mutation, err := store.EncodeSet(parts[1], parts[2]) if err != nil { io.WriteString(conn, fmt.Sprintf("-ERR: %s", err)) } v := n.manager.Propose(mutation) if v == mutation { proto.Encode(conn, "OK") } else { io.WriteString(conn, "-ERR: failed") } case "get": body, ok := n.store.Lookup(parts[1]) if ok { proto.Encode(conn, body) } else { io.WriteString(conn, "-ERR: missing") } default: io.WriteString(conn, "-ERR: unknown command") } } } func (n *Node) accept(l net.Listener) { for { c, err := l.Accept() if err != nil { n.logger.Log(err) continue } go n.server(c) } } func (n *Node) RunForever() { me, err := strconv.Btoui64((n.listenAddr)[1:], 10) if err != nil { fmt.Println(err) return } n.logger.Logf("attempting to listen on %s\n", n.listenAddr) tcpListener, err := net.Listen("tcp", n.listenAddr) if err != nil { n.logger.Log(err) return } go n.accept(tcpListener) udpConn, err := net.ListenPacket("udp", n.listenAddr) if err != nil { fmt.Println(err) return } udpCh := make(chan paxos.Msg) go RecvUdp(udpConn, udpCh) udpPutter := NewUdpPutter(me, n.nodes, udpConn) //n.manager.Init(FuncPutter(printMsg)) n.manager.Init(udpPutter) go func() { for pkt := range udpCh { fmt.Printf("got udp packet: %#v\n", pkt) n.manager.Put(pkt) } }() for { n.store.Apply(n.manager.Recv()) } }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package helm import ( "fmt" "io/ioutil" "path/filepath" "strings" "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chart/loader" "istio.io/istio/operator/pkg/util" "istio.io/istio/operator/pkg/vfs" ) const ( // DefaultProfileFilename is the name of the default profile yaml file. DefaultProfileFilename = "default.yaml" ChartsSubdirName = "charts" profilesRoot = "profiles" ) // VFSRenderer is a helm template renderer that uses compiled-in helm charts. type VFSRenderer struct { namespace string componentName string helmChartDirPath string chart *chart.Chart started bool } // NewVFSRenderer creates a VFSRenderer with the given relative path to helm charts, component name and namespace and // a base values YAML string. func NewVFSRenderer(helmChartDirPath, componentName, namespace string) *VFSRenderer { scope.Debugf("NewVFSRenderer with helmChart=%s, componentName=%s, namespace=%s", helmChartDirPath, componentName, namespace) return &VFSRenderer{ namespace: namespace, componentName: componentName, helmChartDirPath: helmChartDirPath, } } // Run implements the TemplateRenderer interface. func (h *VFSRenderer) Run() error { if err := CheckCompiledInCharts(); err != nil { return err } scope.Debugf("Run VFSRenderer with helmChart=%s, componentName=%s, namespace=%s", h.helmChartDirPath, h.componentName, h.namespace) if err := h.loadChart(); err != nil { return err } h.started = true return nil } // RenderManifest renders the current helm templates with the current values and returns the resulting YAML manifest // string. func (h *VFSRenderer) RenderManifest(values string) (string, error) { if !h.started { return "", fmt.Errorf("VFSRenderer for %s not started in renderChart", h.componentName) } return renderChart(h.namespace, values, h.chart) } // LoadValuesVFS loads the compiled in file corresponding to the given profile name. func LoadValuesVFS(profileName string) (string, error) { path := filepath.Join(profilesRoot, BuiltinProfileToFilename(profileName)) scope.Infof("Loading values from compiled in VFS at path %s", path) b, err := vfs.ReadFile(path) return string(b), err } func LoadValues(profileName string, chartsDir string) (string, error) { path := filepath.Join(chartsDir, profilesRoot, BuiltinProfileToFilename(profileName)) scope.Infof("Loading values at path %s", path) b, err := ioutil.ReadFile(path) return string(b), err } func readProfiles(chartsDir string) (map[string]bool, error) { profiles := map[string]bool{} switch chartsDir { case "": profilePaths, err := vfs.ReadDir(chartsDir) if err != nil { return nil, fmt.Errorf("failed to read profiles: %v", err) } for _, f := range profilePaths { profiles[strings.TrimSuffix(f, ".yaml")] = true } default: dir, err := ioutil.ReadDir(filepath.Join(chartsDir, profilesRoot)) if err != nil { return nil, fmt.Errorf("failed to read profiles: %v", err) } for _, f := range dir { profiles[strings.TrimSuffix(f.Name(), ".yaml")] = true } } return profiles, nil } // loadChart implements the TemplateRenderer interface. func (h *VFSRenderer) loadChart() error { prefix := h.helmChartDirPath fnames, err := vfs.GetFilesRecursive(prefix) if err != nil { return err } var bfs []*loader.BufferedFile for _, fname := range fnames { b, err := vfs.ReadFile(fname) if err != nil { return err } // Helm expects unix / separator, but on windows this will be \ name := strings.ReplaceAll(stripPrefix(fname, prefix), string(filepath.Separator), "/") bf := &loader.BufferedFile{ Name: name, Data: b, } bfs = append(bfs, bf) scope.Debugf("Chart loaded: %s", bf.Name) } h.chart, err = loader.LoadFiles(bfs) return err } func BuiltinProfileToFilename(name string) string { if name == "" { return DefaultProfileFilename } return name + ".yaml" } // stripPrefix removes the the given prefix from prefix. func stripPrefix(path, prefix string) string { pl := len(strings.Split(prefix, string(filepath.Separator))) pv := strings.Split(path, string(filepath.Separator)) return strings.Join(pv[pl:], string(filepath.Separator)) } // list all the profiles. func ListProfiles(charts string) ([]string, error) { profiles, err := readProfiles(charts) if err != nil { return nil, err } return util.StringBoolMapToSlice(profiles), nil } // CheckCompiledInCharts tests for the presence of compiled in charts. These can be missing if a developer creates // binaries using go build instead of make and tries to use compiled in charts. func CheckCompiledInCharts() error { if _, err := vfs.Stat(ChartsSubdirName); err != nil { return fmt.Errorf("compiled in charts not found in this development build, use --charts with local charts instead or run make gen-charts") } return nil } Add gen-charts warning message to all paths (#24744) * Add gen-charts warning message to all paths * Update warning message * Update warning message again * Remove check in local filesystem path // Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package helm import ( "fmt" "io/ioutil" "path/filepath" "strings" "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chart/loader" "istio.io/istio/operator/pkg/util" "istio.io/istio/operator/pkg/vfs" ) const ( // DefaultProfileFilename is the name of the default profile yaml file. DefaultProfileFilename = "default.yaml" ChartsSubdirName = "charts" profilesRoot = "profiles" ) // VFSRenderer is a helm template renderer that uses compiled-in helm charts. type VFSRenderer struct { namespace string componentName string helmChartDirPath string chart *chart.Chart started bool } // NewVFSRenderer creates a VFSRenderer with the given relative path to helm charts, component name and namespace and // a base values YAML string. func NewVFSRenderer(helmChartDirPath, componentName, namespace string) *VFSRenderer { scope.Debugf("NewVFSRenderer with helmChart=%s, componentName=%s, namespace=%s", helmChartDirPath, componentName, namespace) return &VFSRenderer{ namespace: namespace, componentName: componentName, helmChartDirPath: helmChartDirPath, } } // Run implements the TemplateRenderer interface. func (h *VFSRenderer) Run() error { if err := CheckCompiledInCharts(); err != nil { return err } scope.Debugf("Run VFSRenderer with helmChart=%s, componentName=%s, namespace=%s", h.helmChartDirPath, h.componentName, h.namespace) if err := h.loadChart(); err != nil { return err } h.started = true return nil } // RenderManifest renders the current helm templates with the current values and returns the resulting YAML manifest // string. func (h *VFSRenderer) RenderManifest(values string) (string, error) { if !h.started { return "", fmt.Errorf("VFSRenderer for %s not started in renderChart", h.componentName) } return renderChart(h.namespace, values, h.chart) } // LoadValuesVFS loads the compiled in file corresponding to the given profile name. func LoadValuesVFS(profileName string) (string, error) { if err := CheckCompiledInCharts(); err != nil { return "", err } path := filepath.Join(profilesRoot, BuiltinProfileToFilename(profileName)) scope.Infof("Loading values from compiled in VFS at path %s", path) b, err := vfs.ReadFile(path) return string(b), err } func LoadValues(profileName string, chartsDir string) (string, error) { path := filepath.Join(chartsDir, profilesRoot, BuiltinProfileToFilename(profileName)) scope.Infof("Loading values at path %s", path) b, err := ioutil.ReadFile(path) return string(b), err } func readProfiles(chartsDir string) (map[string]bool, error) { profiles := map[string]bool{} switch chartsDir { case "": if err := CheckCompiledInCharts(); err != nil { return nil, err } profilePaths, err := vfs.ReadDir(chartsDir) if err != nil { return nil, fmt.Errorf("failed to read profiles: %v", err) } for _, f := range profilePaths { profiles[strings.TrimSuffix(f, ".yaml")] = true } default: dir, err := ioutil.ReadDir(filepath.Join(chartsDir, profilesRoot)) if err != nil { return nil, fmt.Errorf("failed to read profiles: %v", err) } for _, f := range dir { profiles[strings.TrimSuffix(f.Name(), ".yaml")] = true } } return profiles, nil } // loadChart implements the TemplateRenderer interface. func (h *VFSRenderer) loadChart() error { prefix := h.helmChartDirPath fnames, err := vfs.GetFilesRecursive(prefix) if err != nil { return err } var bfs []*loader.BufferedFile for _, fname := range fnames { b, err := vfs.ReadFile(fname) if err != nil { return err } // Helm expects unix / separator, but on windows this will be \ name := strings.ReplaceAll(stripPrefix(fname, prefix), string(filepath.Separator), "/") bf := &loader.BufferedFile{ Name: name, Data: b, } bfs = append(bfs, bf) scope.Debugf("Chart loaded: %s", bf.Name) } h.chart, err = loader.LoadFiles(bfs) return err } func BuiltinProfileToFilename(name string) string { if name == "" { return DefaultProfileFilename } return name + ".yaml" } // stripPrefix removes the the given prefix from prefix. func stripPrefix(path, prefix string) string { pl := len(strings.Split(prefix, string(filepath.Separator))) pv := strings.Split(path, string(filepath.Separator)) return strings.Join(pv[pl:], string(filepath.Separator)) } // list all the profiles. func ListProfiles(charts string) ([]string, error) { profiles, err := readProfiles(charts) if err != nil { return nil, err } return util.StringBoolMapToSlice(profiles), nil } // CheckCompiledInCharts tests for the presence of compiled in charts. These can be missing if a developer creates // binaries using go build instead of make and tries to use compiled in charts. func CheckCompiledInCharts() error { if _, err := vfs.Stat(ChartsSubdirName); err != nil { return fmt.Errorf("compiled in charts not found in this development build, use --charts with " + "local charts instead (e.g. istioctl install --charts manifests/) or run make gen-charts and rebuild istioctl") } return nil }
package kiwitaxi import ( "encoding/csv" "github.com/gocarina/gocsv" "io" "net/http" "net/url" ) type KiwitaxiApi struct { token string log LoggerInterface } type LoggerInterface interface { Debug(...interface{}) } // NewKiwitaxiApi creates a new instance KiwitaxiApi. func NewKiwitaxiApi(token string) *KiwitaxiApi { gocsv.SetCSVReader(func(in io.Reader) *csv.Reader { csvin := csv.NewReader(in) csvin.Comma = '\t' csvin.LazyQuotes = true return csvin }) return &KiwitaxiApi{ token: token, } } type Coordinates struct { Lon float64 `json:"lon" bson:"lon"` Lan float64 `json:"lat" bson:"lat"` } func (a *KiwitaxiApi) SetLogger(logger LoggerInterface) { a.log = logger } func (a *KiwitaxiApi) getCsvWithParams(path string, args map[string]string, v interface{}) error { apiUrl, err := url.Parse("https://kiwitaxi.com/" + path) if err != nil { return err } params := url.Values{} params.Add("security_token", a.token) for k, v := range args { if v == "" { continue } params.Add(k, v) } apiUrl.RawQuery = params.Encode() if a.log != nil { a.log.Debug("API Send: " + apiUrl.String()) } client := &http.Client{} req, _ := http.NewRequest("GET", apiUrl.String(), nil) res, err := client.Do(req) if err != nil { return err } defer res.Body.Close() return gocsv.Unmarshal(NewFixerReader(res.Body), v) } func (a *KiwitaxiApi) getCsv(path string, v interface{}) error { return a.getCsvWithParams(path, map[string]string{}, v) } Delete unused code package kiwitaxi import ( "encoding/csv" "github.com/gocarina/gocsv" "io" "net/http" "net/url" ) type KiwitaxiApi struct { token string log LoggerInterface } type LoggerInterface interface { Debug(...interface{}) } // NewKiwitaxiApi creates a new instance KiwitaxiApi. func NewKiwitaxiApi(token string) *KiwitaxiApi { gocsv.SetCSVReader(func(in io.Reader) *csv.Reader { csvin := csv.NewReader(in) csvin.Comma = '\t' csvin.LazyQuotes = true return csvin }) return &KiwitaxiApi{ token: token, } } func (a *KiwitaxiApi) SetLogger(logger LoggerInterface) { a.log = logger } func (a *KiwitaxiApi) getCsvWithParams(path string, args map[string]string, v interface{}) error { apiUrl, err := url.Parse("https://kiwitaxi.com/" + path) if err != nil { return err } params := url.Values{} params.Add("security_token", a.token) for k, v := range args { if v == "" { continue } params.Add(k, v) } apiUrl.RawQuery = params.Encode() if a.log != nil { a.log.Debug("API Send: " + apiUrl.String()) } client := &http.Client{} req, _ := http.NewRequest("GET", apiUrl.String(), nil) res, err := client.Do(req) if err != nil { return err } defer res.Body.Close() return gocsv.Unmarshal(NewFixerReader(res.Body), v) } func (a *KiwitaxiApi) getCsv(path string, v interface{}) error { return a.getCsvWithParams(path, map[string]string{}, v) }
package twitch2go import ( "encoding/json" "time" ) type Direction string type SortBy string type VideoSort string const ( ASC Direction = "asc" DESC Direction = "desc" CreatedAt SortBy = "created_at" LastBroadcast SortBy = "last_broadcast" Login SortBy = "login" Views VideoSort = "views" Time VideoSort = "time" ) // Channel Twitch Channel Data type Channel struct { Mature bool `json:"mature"` Status string `json:"status"` BroadcasterLanguage string `json:"broadcaster_language"` DisplayName string `json:"display_name"` Game string `json:"game"` Language string `json:"language"` ID json.Number `json:"_id,number"` Name string `json:"name"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` Logo string `json:"logo"` VideoBanner string `json:"video_banner"` ProfileBanner string `json:"profile_banner"` ProfileBannerBackgroundColor string `json:"profile_banner_background_color"` Partner bool `json:"partner"` URL string `json:"url"` Views int64 `json:"views"` Followers int64 `json:"followers"` } type Post struct { ID json.Number `json:"id,number"` CreatedAt time.Time `json:"created_at"` Deleted bool `json:"deleted"` Emotes []string `json:"emotes"` Body string `json:"body"` User User `json:"user"` } // Follower data for twitch channel type Follow struct { CreatedAt time.Time `json:"created_at"` Links map[string]string `json:"_links"` Notifications bool `json:"notifications"` User User `json:"user"` Channel Channel `json:"channel"` } type Followers struct { Total int64 `json:"_total"` Cursor string `json:"_cursor"` Follows []Follow `json:"follows"` } type Subscription struct { ID json.Number `json:"_id,number"` CreatedAt time.Time `json:"created_at"` User User `json:"user"` } type Subscribers struct { Total int64 `json:"_total"` Cursor string `json:"_cursor"` Subscriptions []Subscription `json:"subscriptions"` } type UserSearchResult struct { Total int64 `json:"_total"` Users []User `json:"users"` } // User Twitch User Data type User struct { Type string `json:"type"` Name string `json:"name"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` Logo string `json:"logo"` ID json.Number `json:"_id,number"` DisplayName string `json:"display_name"` Bio string `json:"bio"` Email string `json:"email"` EmailVerified bool `json:"email_verified"` Partnered bool `json:"partnered"` TwitterConnected bool `json:"twitter_connected"` Notifications Notifications `json:"notifications"` } type Notifications struct { Email bool `json:"email"` Push bool `json:"push"` } type Editors struct { Users []User `json:"Users"` } type Videos struct { Total int `json:"_total"` Videos []Video `json:"videos"` } type Video struct { ID string `json:"_id"` BroadcastID json.Number `json:"broadcast_id,number"` BroadcastType string `json:"broadcast_type"` Channel Channel `json:"channel"` CreatedAt time.Time `json:"created_at"` Description string `json:"description"` DescriptionHTML string `json:"description_html"` Fps Fps `json:"fps"` Game string `json:"game"` Language string `json:"language"` Length int `json:"length"` Preview Preview `json:"preview"` PublishedAt time.Time `json:"published_at"` Resolutions Resolutions `json:"resolutions"` Status string `json:"status"` TagList string `json:"tag_list"` Thumbnails Thumbnails `json:"thumbnails"` Title string `json:"title"` URL string `json:"url"` Viewable string `json:"viewable"` ViewableAt interface{} `json:"viewable_at"` Views int `json:"views"` } type Thumbnail struct { Type string `json:"type"` URL string `json:"url"` } type Thumbnails struct { Large []Thumbnail `json:"large"` Medium []Thumbnail `json:"medium"` Small []Thumbnail `json:"small"` Template []Thumbnail `json:"template"` } type Resolutions struct { Chunked string `json:"chunked"` High string `json:"high"` Low string `json:"low"` Medium string `json:"medium"` Mobile string `json:"mobile"` } type Preview struct { Large string `json:"large"` Medium string `json:"medium"` Small string `json:"small"` Template string `json:"template"` } type Fps struct { Chunked float64 `json:"chunked"` High float64 `json:"high"` Low float64 `json:"low"` Medium float64 `json:"medium"` Mobile float64 `json:"mobile"` } type Stream struct { ID json.Number `json:"_id,number"` Game string `json:"game"` CommunityID string `json:"community_id"` Viewers int `json:"viewers"` VideoHeight int `json:"video_height"` AverageFps float64 `json:"average_fps"` Delay int `json:"delay"` CreatedAt time.Time `json:"created_at"` IsPlaylist bool `json:"is_playlist"` Preview Preview `json:"preview"` Channel Channel `json:"channel"` } type StreamResponse struct { Stream Stream `json:"stream"` } type FollowedStream struct { Total int64 `json:"_total"` Streams []Stream `json:"streams"` } type ResponseError struct { Error string `json:"error"` Message string `json:"message"` Status json.Number `json:"status,number"` } Changed all int and int64 to uint for sake of sameness Signed-off-by: Ken Herner <f8dc7dc5a2771d08866ac106f08d2a43aa090425@gmail.com> package twitch2go import ( "encoding/json" "time" ) type Direction string type SortBy string type VideoSort string const ( ASC Direction = "asc" DESC Direction = "desc" CreatedAt SortBy = "created_at" LastBroadcast SortBy = "last_broadcast" Login SortBy = "login" Views VideoSort = "views" Time VideoSort = "time" ) // Channel Twitch Channel Data type Channel struct { Mature bool `json:"mature"` Status string `json:"status"` BroadcasterLanguage string `json:"broadcaster_language"` DisplayName string `json:"display_name"` Game string `json:"game"` Language string `json:"language"` ID json.Number `json:"_id,number"` Name string `json:"name"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` Logo string `json:"logo"` VideoBanner string `json:"video_banner"` ProfileBanner string `json:"profile_banner"` ProfileBannerBackgroundColor string `json:"profile_banner_background_color"` Partner bool `json:"partner"` URL string `json:"url"` Views uint `json:"views"` Followers uint `json:"followers"` } type Post struct { ID json.Number `json:"id,number"` CreatedAt time.Time `json:"created_at"` Deleted bool `json:"deleted"` Emotes []string `json:"emotes"` Body string `json:"body"` User User `json:"user"` } // Follower data for twitch channel type Follow struct { CreatedAt time.Time `json:"created_at"` Links map[string]string `json:"_links"` Notifications bool `json:"notifications"` User User `json:"user"` Channel Channel `json:"channel"` } type Followers struct { Total uint `json:"_total"` Cursor string `json:"_cursor"` Follows []Follow `json:"follows"` } type Subscription struct { ID json.Number `json:"_id,number"` CreatedAt time.Time `json:"created_at"` User User `json:"user"` } type Subscribers struct { Total uint `json:"_total"` Cursor string `json:"_cursor"` Subscriptions []Subscription `json:"subscriptions"` } type UserSearchResult struct { Total uint `json:"_total"` Users []User `json:"users"` } // User Twitch User Data type User struct { Type string `json:"type"` Name string `json:"name"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` Logo string `json:"logo"` ID json.Number `json:"_id,number"` DisplayName string `json:"display_name"` Bio string `json:"bio"` Email string `json:"email"` EmailVerified bool `json:"email_verified"` Partnered bool `json:"partnered"` TwitterConnected bool `json:"twitter_connected"` Notifications Notifications `json:"notifications"` } type Notifications struct { Email bool `json:"email"` Push bool `json:"push"` } type Editors struct { Users []User `json:"Users"` } type Videos struct { Total uint `json:"_total"` Videos []Video `json:"videos"` } type Video struct { ID string `json:"_id"` BroadcastID json.Number `json:"broadcast_id,number"` BroadcastType string `json:"broadcast_type"` Channel Channel `json:"channel"` CreatedAt time.Time `json:"created_at"` Description string `json:"description"` DescriptionHTML string `json:"description_html"` Fps Fps `json:"fps"` Game string `json:"game"` Language string `json:"language"` Length uint `json:"length"` Preview Preview `json:"preview"` PublishedAt time.Time `json:"published_at"` Resolutions Resolutions `json:"resolutions"` Status string `json:"status"` TagList string `json:"tag_list"` Thumbnails Thumbnails `json:"thumbnails"` Title string `json:"title"` URL string `json:"url"` Viewable string `json:"viewable"` ViewableAt interface{} `json:"viewable_at"` Views uint `json:"views"` } type Thumbnail struct { Type string `json:"type"` URL string `json:"url"` } type Thumbnails struct { Large []Thumbnail `json:"large"` Medium []Thumbnail `json:"medium"` Small []Thumbnail `json:"small"` Template []Thumbnail `json:"template"` } type Resolutions struct { Chunked string `json:"chunked"` High string `json:"high"` Low string `json:"low"` Medium string `json:"medium"` Mobile string `json:"mobile"` } type Preview struct { Large string `json:"large"` Medium string `json:"medium"` Small string `json:"small"` Template string `json:"template"` } type Fps struct { Chunked float64 `json:"chunked"` High float64 `json:"high"` Low float64 `json:"low"` Medium float64 `json:"medium"` Mobile float64 `json:"mobile"` } type Stream struct { ID json.Number `json:"_id,number"` Game string `json:"game"` CommunityID string `json:"community_id"` Viewers uint `json:"viewers"` VideoHeight uint `json:"video_height"` AverageFps float64 `json:"average_fps"` Delay uint `json:"delay"` CreatedAt time.Time `json:"created_at"` IsPlaylist bool `json:"is_playlist"` Preview Preview `json:"preview"` Channel Channel `json:"channel"` } type StreamResponse struct { Stream Stream `json:"stream"` } type FollowedStream struct { Total uint `json:"_total"` Streams []Stream `json:"streams"` } type ResponseError struct { Error string `json:"error"` Message string `json:"message"` Status json.Number `json:"status,number"` }
package tripeg import ( "math" "math/rand" "time" ) //Hole struct that contains information //about a hole in the board, its location //and whether or not it has a peg in it. type Hole struct { Row int //make of 5 Col int //max of 9 Peg bool Links []*Hole //Other Holes the hole is connected to } //Jump moves a peg from one hole to another //If it can jump, it removes the peg from the //overHole hole. func (h *Hole) Jump(b *Board, overHole *Hole) bool { if !overHole.Peg { //If there is no peg in the overHole, no jump possible return false } rDif := h.Row - overHole.Row cDif := overHole.Col - h.Col if cDif == 0 && rDif == 0 { //Holes are the same, not valid return false } if math.Abs(float64(rDif)) > 1 { //You can't jump over more than 1 row horizontally return false } if rDif > 0 && math.Abs(float64(cDif)) > 1 { //You can't jump over more than 1 col vertically return false } if rDif == 0 && math.Abs(float64(cDif)) > 2 { return false //You can't jump more than 2 cols horizontally } targetR := 0 targetC := 0 if rDif == 0 { //This is a horizontal jump targetR = h.Row } if rDif > 0 { targetR = overHole.Row - 1 //This is a up } if rDif < 0 { targetR = overHole.Row + 1 //This is a jump down } if cDif < 0 { x := 1 if rDif == 0 { x = 2 } targetC = overHole.Col - x //This is a jump left } if cDif > 0 { x := 1 if rDif == 0 { x = 2 } targetC = overHole.Col + x //This is a jump right } targetHole := b.GetHole(targetR, targetC) if targetHole == nil { return false } if targetHole.Peg { return false } h.Peg = false overHole.Peg = false targetHole.Peg = true return true } //Board contains all the holes that contain the pegs type Board struct { Holes []*Hole MoveLog []string } //GetHole gets a pointer to a hole based on the row,col coordinates func (b Board) GetHole(r, c int) *Hole { if r < 0 || r > 6 || c < 0 || c > 9 { return nil } for _, v := range b.Holes { if v.Col == c && v.Row == r { return v } } return nil } //BuildBoard makes a board of peg holes. //All holes have a peg except one randomly assigned. //The top row has 1, then //2,3,4,5 for a total of 16 holes. func BuildBoard(empty int) Board { var b Board s2 := rand.NewSource(time.Now().UnixNano()) r2 := rand.New(s2) if empty == 0 { empty = r2.Intn(15) } else { empty-- } for r := 1; r < 6; r++ { for c := 1; c < r+1; c++ { col := 4 - (r) + (c * 2) h := Hole{Row: r, Col: col, Peg: true} if empty == len(b.Holes) { h.Peg = false } b.Holes = append(b.Holes, &h) } } return b } func (b *Board) Solve() { b.MoveLog = []string{} //Find out how many holes //can make a legal Move //randomly pick one of those holes //find out how many moves can be made from that hole //randomly pick one of those //try again until there are no moves to make //or 14 legal moves have been made, (winner) //Print out all the winning moves } func (b Board) String() string { result := "\n" for r := 1; r < 6; r++ { for c := 1; c < 10; c++ { h := b.GetHole(r, c) mark := " " if h != nil { mark = "O" if h.Peg { mark = "*" } } result += mark } result += "\n" } return result } func even(number int) bool { return number%2 == 0 } redoing solve package tripeg import ( "math" "math/rand" "time" ) //Hole struct that contains information //about a hole in the board, its location //and whether or not it has a peg in it. type Hole struct { Row int //make of 5 Col int //max of 9 Peg bool Links []*Hole //Other Holes the hole is connected to } //Jump moves a peg from one hole to another //If it can jump, it removes the peg from the //overHole hole. func (h *Hole) Jump(b *Board, overHole *Hole) bool { if !overHole.Peg { //If there is no peg in the overHole, no jump possible return false } rDif := h.Row - overHole.Row cDif := overHole.Col - h.Col if cDif == 0 && rDif == 0 { //Holes are the same, not valid return false } if math.Abs(float64(rDif)) > 1 { //You can't jump over more than 1 row horizontally return false } if rDif > 0 && math.Abs(float64(cDif)) > 1 { //You can't jump over more than 1 col vertically return false } if rDif == 0 && math.Abs(float64(cDif)) > 2 { return false //You can't jump more than 2 cols horizontally } targetR := 0 targetC := 0 if rDif == 0 { //This is a horizontal jump targetR = h.Row } if rDif > 0 { targetR = overHole.Row - 1 //This is a up } if rDif < 0 { targetR = overHole.Row + 1 //This is a jump down } if cDif < 0 { x := 1 if rDif == 0 { x = 2 } targetC = overHole.Col - x //This is a jump left } if cDif > 0 { x := 1 if rDif == 0 { x = 2 } targetC = overHole.Col + x //This is a jump right } targetHole := b.GetHole(targetR, targetC) if targetHole == nil { return false } if targetHole.Peg { return false } h.Peg = false overHole.Peg = false targetHole.Peg = true return true } //Board contains all the holes that contain the pegs type Board struct { Holes []*Hole MoveLog []string } //GetHole gets a pointer to a hole based on the row,col coordinates func (b Board) GetHole(r, c int) *Hole { if r < 0 || r > 6 || c < 0 || c > 9 { return nil } for _, v := range b.Holes { if v.Col == c && v.Row == r { return v } } return nil } //BuildBoard makes a board of peg holes. //All holes have a peg except one randomly assigned. //The top row has 1, then //2,3,4,5 for a total of 16 holes. func BuildBoard(empty int) Board { var b Board s2 := rand.NewSource(time.Now().UnixNano()) r2 := rand.New(s2) if empty == 0 { empty = r2.Intn(15) } else { empty-- } for r := 1; r < 6; r++ { for c := 1; c < r+1; c++ { col := 4 - (r) + (c * 2) h := Hole{Row: r, Col: col, Peg: true} if empty == len(b.Holes) { h.Peg = false } b.Holes = append(b.Holes, &h) } } return b } func (b *Board) Solve() { b.MoveLog = []string{} //Find out how many holes //can make a legal Move //randomly pick one of those holes //find out how many moves can be made from that hole //randomly pick one of those //try again until there are no moves to make //or 14 legal moves have been made, (winner) //Print out all the winning moves cMoves := []*Hole{} _ = cMoves for _, v := range b.Holes { if v.Peg == true { o := b.GetHole(v.Row-1, v.Col-2) if v.Jump(b, o) { //upleft } //upright //right //left //downleft //downright } } } func (b Board) String() string { result := "\n" for r := 1; r < 6; r++ { for c := 1; c < 10; c++ { h := b.GetHole(r, c) mark := " " if h != nil { mark = "O" if h.Peg { mark = "*" } } result += mark } result += "\n" } return result } func even(number int) bool { return number%2 == 0 }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Extended and bugfixes by Miek Gieben package dns import ( "encoding/base64" "fmt" "net" "strconv" "strings" "time" ) // Packet formats // Wire constants and supported types. const ( // valid RR_Header.Rrtype and Question.qtype TypeA uint16 = 1 TypeNS uint16 = 2 TypeMD uint16 = 3 TypeMF uint16 = 4 TypeCNAME uint16 = 5 TypeSOA uint16 = 6 TypeMB uint16 = 7 TypeMG uint16 = 8 TypeMR uint16 = 9 TypeNULL uint16 = 10 TypeWKS uint16 = 11 TypePTR uint16 = 12 TypeHINFO uint16 = 13 TypeMINFO uint16 = 14 TypeMX uint16 = 15 TypeTXT uint16 = 16 TypeRP uint16 = 17 TypeAFSDB uint16 = 18 TypeRT uint16 = 21 TypeSIG uint16 = 24 TypeKEY uint16 = 25 TypeAAAA uint16 = 28 TypeLOC uint16 = 29 TypeNXT uint16 = 30 TypeSRV uint16 = 33 TypeNAPTR uint16 = 35 TypeKX uint16 = 36 TypeCERT uint16 = 37 TypeDNAME uint16 = 39 TypeOPT uint16 = 41 // EDNS TypeDS uint16 = 43 TypeSSHFP uint16 = 44 TypeIPSECKEY uint16 = 45 TypeRRSIG uint16 = 46 TypeNSEC uint16 = 47 TypeDNSKEY uint16 = 48 TypeDHCID uint16 = 49 TypeNSEC3 uint16 = 50 TypeNSEC3PARAM uint16 = 51 TypeTLSA uint16 = 52 TypeHIP uint16 = 55 TypeTALINK uint16 = 58 TypeSPF uint16 = 99 TypeTKEY uint16 = 249 TypeTSIG uint16 = 250 // valid Question.Qtype only TypeIXFR uint16 = 251 TypeAXFR uint16 = 252 TypeMAILB uint16 = 253 TypeMAILA uint16 = 254 TypeANY uint16 = 255 TypeURI uint16 = 256 TypeTA uint16 = 32768 TypeDLV uint16 = 32769 // valid Question.Qclass ClassINET = 1 ClassCSNET = 2 ClassCHAOS = 3 ClassHESIOD = 4 ClassNONE = 254 ClassANY = 255 // Msg.rcode RcodeSuccess = 0 RcodeFormatError = 1 RcodeServerFailure = 2 RcodeNameError = 3 RcodeNotImplemented = 4 RcodeRefused = 5 RcodeYXDomain = 6 RcodeYXRrset = 7 RcodeNXRrset = 8 RcodeNotAuth = 9 RcodeNotZone = 10 RcodeBadSig = 16 // TSIG RcodeBadKey = 17 RcodeBadTime = 18 RcodeBadMode = 19 // TKEY RcodeBadName = 20 RcodeBadAlg = 21 RcodeBadTrunc = 22 // TSIG // Opcode OpcodeQuery = 0 OpcodeIQuery = 1 OpcodeStatus = 2 // There is no 3 OpcodeNotify = 4 OpcodeUpdate = 5 ) // The wire format for the DNS packet header. type Header struct { Id uint16 Bits uint16 Qdcount, Ancount, Nscount, Arcount uint16 } const ( // Header.Bits _QR = 1 << 15 // query/response (response=1) _AA = 1 << 10 // authoritative _TC = 1 << 9 // truncated _RD = 1 << 8 // recursion desired _RA = 1 << 7 // recursion available _Z = 1 << 6 // Z _AD = 1 << 5 // authticated data _CD = 1 << 4 // checking disabled _LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. ) // DNS queries. type Question struct { Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) Qtype uint16 Qclass uint16 } func (q *Question) String() (s string) { // prefix with ; (as in dig) if len(q.Name) == 0 { s = ";.\t" // root label } else { s = ";" + q.Name + "\t" } if _, ok := Class_str[q.Qclass]; ok { s += Class_str[q.Qclass] + "\t" } else { s += "CLASS" + strconv.Itoa(int(q.Qtype)) } if _, ok := Rr_str[q.Qtype]; ok { s += " " + Rr_str[q.Qtype] } else { s += " " + "TYPE" + strconv.Itoa(int(q.Qtype)) } return s } func (q *Question) Len() int { l := len(q.Name) + 1 return l + 4 } type RR_ANY struct { Hdr RR_Header // Does not have any rdata } func (rr *RR_ANY) Header() *RR_Header { return &rr.Hdr } func (rr *RR_ANY) String() string { return rr.Hdr.String() } func (rr *RR_ANY) Len() int { return rr.Hdr.Len() } func (rr *RR_ANY) Copy() RR { return &RR_ANY{*rr.Hdr.CopyHeader()} } type RR_CNAME struct { Hdr RR_Header Target string `dns:"cdomain-name"` } func (rr *RR_CNAME) Header() *RR_Header { return &rr.Hdr } func (rr *RR_CNAME) String() string { return rr.Hdr.String() + rr.Target } func (rr *RR_CNAME) Len() int { l := len(rr.Target) + 1 return rr.Hdr.Len() + l } func (rr *RR_CNAME) Copy() RR { return &RR_CNAME{*rr.Hdr.CopyHeader(), rr.Target} } type RR_HINFO struct { Hdr RR_Header Cpu string Os string } func (rr *RR_HINFO) Header() *RR_Header { return &rr.Hdr } func (rr *RR_HINFO) String() string { return rr.Hdr.String() + rr.Cpu + " " + rr.Os } func (rr *RR_HINFO) Len() int { return rr.Hdr.Len() + len(rr.Cpu) + len(rr.Os) } func (rr *RR_HINFO) Copy() RR { return &RR_HINFO{*rr.Hdr.CopyHeader(), rr.Cpu, rr.Os} } type RR_MB struct { Hdr RR_Header Mb string `dns:"cdomain-name"` } func (rr *RR_MB) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MB) String() string { return rr.Hdr.String() + rr.Mb } func (rr *RR_MB) Len() int { l := len(rr.Mb) + 1 return rr.Hdr.Len() + l } func (rr *RR_MB) Copy() RR { return &RR_MB{*rr.Hdr.CopyHeader(), rr.Mb} } type RR_MG struct { Hdr RR_Header Mg string `dns:"cdomain-name"` } func (rr *RR_MG) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MG) String() string { return rr.Hdr.String() + rr.Mg } func (rr *RR_MG) Len() int { l := len(rr.Mg) + 1 return rr.Hdr.Len() + l } func (rr *RR_MG) Copy() RR { return &RR_MG{*rr.Hdr.CopyHeader(), rr.Mg} } type RR_MINFO struct { Hdr RR_Header Rmail string `dns:"cdomain-name"` Email string `dns:"cdomain-name"` } func (rr *RR_MINFO) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MINFO) String() string { return rr.Hdr.String() + rr.Rmail + " " + rr.Email } func (rr *RR_MINFO) Len() int { l := len(rr.Rmail) + 1 n := len(rr.Email) + 1 return rr.Hdr.Len() + l + n } func (rr *RR_MINFO) Copy() RR { return &RR_MINFO{*rr.Hdr.CopyHeader(), rr.Rmail, rr.Email} } type RR_MR struct { Hdr RR_Header Mr string `dns:"cdomain-name"` } func (rr *RR_MR) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MR) String() string { return rr.Hdr.String() + rr.Mr } func (rr *RR_MR) Len() int { l := len(rr.Mr) + 1 return rr.Hdr.Len() + l } func (rr *RR_MR) Copy() RR { return &RR_MR{*rr.Hdr.CopyHeader(), rr.Mr} } type RR_MF struct { Hdr RR_Header Mf string `dns:"cdomain-name"` } func (rr *RR_MF) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MF) String() string { return rr.Hdr.String() + " " + rr.Mf } func (rr *RR_MF) Len() int { return rr.Hdr.Len() + len(rr.Mf) + 1 } func (rr *RR_MF) Copy() RR { return &RR_MF{*rr.Hdr.CopyHeader(), rr.Mf} } type RR_MD struct { Hdr RR_Header Md string `dns:"cdomain-name"` } func (rr *RR_MD) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MD) String() string { return rr.Hdr.String() + " " + rr.Md } func (rr *RR_MD) Len() int { return rr.Hdr.Len() + len(rr.Md) + 1 } func (rr *RR_MD) Copy() RR { return &RR_MD{*rr.Hdr.CopyHeader(), rr.Md} } type RR_MX struct { Hdr RR_Header Pref uint16 Mx string `dns:"cdomain-name"` } func (rr *RR_MX) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MX) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Pref)) + " " + rr.Mx } func (rr *RR_MX) Len() int { l := len(rr.Mx) + 1 return rr.Hdr.Len() + l + 2 } func (rr *RR_MX) Copy() RR { return &RR_MX{*rr.Hdr.CopyHeader(), rr.Pref, rr.Mx} } type RR_AFSDB struct { Hdr RR_Header Subtype uint16 Hostname string `dns:"cdomain-name"` } func (rr *RR_AFSDB) Header() *RR_Header { return &rr.Hdr } func (rr *RR_AFSDB) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + rr.Hostname } func (rr *RR_AFSDB) Len() int { l := len(rr.Hostname) + 1 return rr.Hdr.Len() + l + 2 } func (rr *RR_AFSDB) Copy() RR { return &RR_AFSDB{*rr.Hdr.CopyHeader(), rr.Subtype, rr.Hostname} } type RR_RT struct { Hdr RR_Header Preference uint16 Host string `dns:"cdomain-name"` } func (rr *RR_RT) Header() *RR_Header { return &rr.Hdr } func (rr *RR_RT) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + rr.Host } func (rr *RR_RT) Len() int { l := len(rr.Host) + 1 return rr.Hdr.Len() + l + 2 } func (rr *RR_RT) Copy() RR { return &RR_RT{*rr.Hdr.CopyHeader(), rr.Preference, rr.Host} } type RR_NS struct { Hdr RR_Header Ns string `dns:"cdomain-name"` } func (rr *RR_NS) Header() *RR_Header { return &rr.Hdr } func (rr *RR_NS) String() string { return rr.Hdr.String() + rr.Ns } func (rr *RR_NS) Len() int { l := len(rr.Ns) + 1 return rr.Hdr.Len() + l } func (rr *RR_NS) Copy() RR { return &RR_NS{*rr.Hdr.CopyHeader(), rr.Ns} } type RR_PTR struct { Hdr RR_Header Ptr string `dns:"cdomain-name"` } func (rr *RR_PTR) Header() *RR_Header { return &rr.Hdr } func (rr *RR_PTR) String() string { return rr.Hdr.String() + rr.Ptr } func (rr *RR_PTR) Len() int { l := len(rr.Ptr) + 1 return rr.Hdr.Len() + l } func (rr *RR_PTR) Copy() RR { return &RR_PTR{*rr.Hdr.CopyHeader(), rr.Ptr} } type RR_RP struct { Hdr RR_Header Mbox string `dns:"domain-name"` Txt string `dns:"domain-name"` } func (rr *RR_RP) Header() *RR_Header { return &rr.Hdr } func (rr *RR_RP) String() string { return rr.Hdr.String() + rr.Mbox + " " + rr.Txt } func (rr *RR_RP) Len() int { return rr.Hdr.Len() + len(rr.Mbox) + 1 + len(rr.Txt) + 1 } func (rr *RR_RP) Copy() RR { return &RR_RP{*rr.Hdr.CopyHeader(), rr.Mbox, rr.Txt} } type RR_SOA struct { Hdr RR_Header Ns string `dns:"cdomain-name"` Mbox string `dns:"cdomain-name"` Serial uint32 Refresh uint32 Retry uint32 Expire uint32 Minttl uint32 } func (rr *RR_SOA) Header() *RR_Header { return &rr.Hdr } func (rr *RR_SOA) String() string { return rr.Hdr.String() + rr.Ns + " " + rr.Mbox + " " + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.FormatInt(int64(rr.Refresh), 10) + " " + strconv.FormatInt(int64(rr.Retry), 10) + " " + strconv.FormatInt(int64(rr.Expire), 10) + " " + strconv.FormatInt(int64(rr.Minttl), 10) } func (rr *RR_SOA) Len() int { l := len(rr.Ns) + 1 n := len(rr.Mbox) + 1 return rr.Hdr.Len() + l + n + 20 } func (rr *RR_SOA) Copy() RR { return &RR_SOA{*rr.Hdr.CopyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} } type RR_TXT struct { Hdr RR_Header Txt []string `dns:"txt"` } func (rr *RR_TXT) Header() *RR_Header { return &rr.Hdr } func (rr *RR_TXT) String() string { s := rr.Hdr.String() for i, s1 := range rr.Txt { if i > 0 { s += " " + strconv.QuoteToASCII(s1) } else { s += strconv.QuoteToASCII(s1) } } return s } func (rr *RR_TXT) Len() int { l := rr.Hdr.Len() for _, t := range rr.Txt { l += len(t) } return l } func (rr *RR_TXT) Copy() RR { return &RR_TXT{*rr.Hdr.CopyHeader(), rr.Txt} } type RR_SPF struct { Hdr RR_Header Txt []string `dns:"txt"` } func (rr *RR_SPF) Header() *RR_Header { return &rr.Hdr } func (rr *RR_SPF) String() string { s := rr.Hdr.String() for i, s1 := range rr.Txt { if i > 0 { s += " " + "\"" + s1 + "\"" } else { s += "\"" + s1 + "\"" } } return s } func (rr *RR_SPF) Len() int { l := rr.Hdr.Len() for _, t := range rr.Txt { l += len(t) } return l } func (rr *RR_SPF) Copy() RR { return &RR_SPF{*rr.Hdr.CopyHeader(), rr.Txt} } type RR_SRV struct { Hdr RR_Header Priority uint16 Weight uint16 Port uint16 Target string `dns:"domain-name"` } func (rr *RR_SRV) Header() *RR_Header { return &rr.Hdr } func (rr *RR_SRV) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + " " + strconv.Itoa(int(rr.Weight)) + " " + strconv.Itoa(int(rr.Port)) + " " + rr.Target } func (rr *RR_SRV) Len() int { l := len(rr.Target) + 1 return rr.Hdr.Len() + l + 6 } func (rr *RR_SRV) Copy() RR { return &RR_SRV{*rr.Hdr.CopyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} } type RR_NAPTR struct { Hdr RR_Header Order uint16 Pref uint16 Flags string Service string Regexp string Replacement string `dns:"domain-name"` } func (rr *RR_NAPTR) Header() *RR_Header { return &rr.Hdr } func (rr *RR_NAPTR) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Order)) + " " + strconv.Itoa(int(rr.Pref)) + " " + "\"" + rr.Flags + "\" " + "\"" + rr.Service + "\" " + "\"" + rr.Regexp + "\" " + rr.Replacement } func (rr *RR_NAPTR) Len() int { return rr.Hdr.Len() + 4 + len(rr.Flags) + len(rr.Service) + len(rr.Regexp) + len(rr.Replacement) + 1 } func (rr *RR_NAPTR) Copy() RR { return &RR_NAPTR{*rr.Hdr.CopyHeader(), rr.Order, rr.Pref, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} } // See RFC 4398. type RR_CERT struct { Hdr RR_Header Type uint16 KeyTag uint16 Algorithm uint8 Certificate string `dns:"base64"` } func (rr *RR_CERT) Header() *RR_Header { return &rr.Hdr } func (rr *RR_CERT) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Type)) + " " + strconv.Itoa(int(rr.KeyTag)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + rr.Certificate } func (rr *RR_CERT) Len() int { return rr.Hdr.Len() + 5 + base64.StdEncoding.DecodedLen(len(rr.Certificate)) } func (rr *RR_CERT) Copy() RR { return &RR_CERT{*rr.Hdr.CopyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} } // See RFC 2672. type RR_DNAME struct { Hdr RR_Header Target string `dns:"domain-name"` } func (rr *RR_DNAME) Header() *RR_Header { return &rr.Hdr } func (rr *RR_DNAME) String() string { return rr.Hdr.String() + rr.Target } func (rr *RR_DNAME) Len() int { l := len(rr.Target) + 1 return rr.Hdr.Len() + l } func (rr *RR_DNAME) Copy() RR { return &RR_DNAME{*rr.Hdr.CopyHeader(), rr.Target} } type RR_A struct { Hdr RR_Header A net.IP `dns:"a"` } func (rr *RR_A) Header() *RR_Header { return &rr.Hdr } func (rr *RR_A) String() string { return rr.Hdr.String() + rr.A.String() } func (rr *RR_A) Len() int { return rr.Hdr.Len() + net.IPv4len } func (rr *RR_A) Copy() RR { return &RR_A{*rr.Hdr.CopyHeader(), rr.A} } type RR_AAAA struct { Hdr RR_Header AAAA net.IP `dns:"aaaa"` } func (rr *RR_AAAA) Header() *RR_Header { return &rr.Hdr } func (rr *RR_AAAA) String() string { return rr.Hdr.String() + rr.AAAA.String() } func (rr *RR_AAAA) Len() int { return rr.Hdr.Len() + net.IPv6len } func (rr *RR_AAAA) Copy() RR { return &RR_AAAA{*rr.Hdr.CopyHeader(), rr.AAAA} } type RR_LOC struct { Hdr RR_Header Version uint8 Size uint8 HorizPre uint8 VertPre uint8 Latitude uint32 Longitude uint32 Altitude uint32 } func (rr *RR_LOC) Header() *RR_Header { return &rr.Hdr } func (rr *RR_LOC) String() string { s := rr.Hdr.String() // Copied from ldns // Latitude lat := rr.Latitude north := "N" if lat > _LOC_EQUATOR { lat = lat - _LOC_EQUATOR } else { north = "S" lat = _LOC_EQUATOR - lat } h := lat / (1000 * 60 * 60) lat = lat % (1000 * 60 * 60) m := lat / (1000 * 60) lat = lat % (1000 * 60) s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float32(lat) / 1000), north) // Longitude lon := rr.Longitude east := "E" if lon > _LOC_EQUATOR { lon = lon - _LOC_EQUATOR } else { east = "W" lon = _LOC_EQUATOR - lon } h = lon / (1000 * 60 * 60) lon = lon % (1000 * 60 * 60) m = lon / (1000 * 60) lon = lon % (1000 * 60) s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float32(lon) / 1000), east) s1 := rr.Altitude / 100.00 s1 -= 100000 if rr.Altitude%100 == 0 { s += fmt.Sprintf("%.2fm ", float32(s1)) } else { s += fmt.Sprintf("%.0fm ", float32(s1)) } s += cmToString((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " s += cmToString((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " s += cmToString((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" return s } func (rr *RR_LOC) Len() int { return rr.Hdr.Len() + 4 + 12 } func (rr *RR_LOC) Copy() RR { return &RR_LOC{*rr.Hdr.CopyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} } type RR_RRSIG struct { Hdr RR_Header TypeCovered uint16 Algorithm uint8 Labels uint8 OrigTtl uint32 Expiration uint32 Inception uint32 KeyTag uint16 SignerName string `dns:"domain-name"` Signature string `dns:"base64"` } func (rr *RR_RRSIG) Header() *RR_Header { return &rr.Hdr } func (rr *RR_RRSIG) String() string { return rr.Hdr.String() + Rr_str[rr.TypeCovered] + " " + strconv.Itoa(int(rr.Algorithm)) + " " + strconv.Itoa(int(rr.Labels)) + " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + " " + TimeToString(rr.Expiration) + " " + TimeToString(rr.Inception) + " " + strconv.Itoa(int(rr.KeyTag)) + " " + rr.SignerName + " " + rr.Signature } func (rr *RR_RRSIG) Len() int { return rr.Hdr.Len() + len(rr.SignerName) + 1 + base64.StdEncoding.DecodedLen(len(rr.Signature)) + 18 } func (rr *RR_RRSIG) Copy() RR { return &RR_RRSIG{*rr.Hdr.CopyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} } type RR_NSEC struct { Hdr RR_Header NextDomain string `dns:"domain-name"` TypeBitMap []uint16 `dns:"nsec"` } func (rr *RR_NSEC) Header() *RR_Header { return &rr.Hdr } func (rr *RR_NSEC) String() string { s := rr.Hdr.String() + rr.NextDomain for i := 0; i < len(rr.TypeBitMap); i++ { if _, ok := Rr_str[rr.TypeBitMap[i]]; ok { s += " " + Rr_str[rr.TypeBitMap[i]] } else { s += " " + "TYPE" + strconv.Itoa(int(rr.TypeBitMap[i])) } } return s } func (rr *RR_NSEC) Len() int { l := len(rr.NextDomain) + 1 return rr.Hdr.Len() + l + 32 + 1 // TODO: +32 is max type bitmap } func (rr *RR_NSEC) Copy() RR { return &RR_NSEC{*rr.Hdr.CopyHeader(), rr.NextDomain, rr.TypeBitMap} } type RR_DS struct { Hdr RR_Header KeyTag uint16 Algorithm uint8 DigestType uint8 Digest string `dns:"hex"` } func (rr *RR_DS) Header() *RR_Header { return &rr.Hdr } func (rr *RR_DS) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + strconv.Itoa(int(rr.DigestType)) + " " + strings.ToUpper(rr.Digest) } func (rr *RR_DS) Len() int { return rr.Hdr.Len() + 4 + len(rr.Digest)/2 } func (rr *RR_DS) Copy() RR { return &RR_DS{*rr.Hdr.CopyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} } type RR_DLV struct { Hdr RR_Header KeyTag uint16 Algorithm uint8 DigestType uint8 Digest string `dns:"hex"` } func (rr *RR_DLV) Header() *RR_Header { return &rr.Hdr } func (rr *RR_DLV) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + strconv.Itoa(int(rr.DigestType)) + " " + strings.ToUpper(rr.Digest) } func (rr *RR_DLV) Len() int { return rr.Hdr.Len() + 4 + len(rr.Digest)/2 } func (rr *RR_DLV) Copy() RR { return &RR_DLV{*rr.Hdr.CopyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} } type RR_KX struct { Hdr RR_Header Pref uint16 Exchanger string `dns:"domain-name"` } func (rr *RR_KX) Header() *RR_Header { return &rr.Hdr } func (rr *RR_KX) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Pref)) + " " + rr.Exchanger } func (rr *RR_KX) Len() int { return 0 } func (rr *RR_KX) Copy() RR { return &RR_KX{*rr.Hdr.CopyHeader(), rr.Pref, rr.Exchanger} } type RR_TA struct { Hdr RR_Header KeyTag uint16 Algorithm uint8 DigestType uint8 Digest string `dns:"hex"` } func (rr *RR_TA) Header() *RR_Header { return &rr.Hdr } func (rr *RR_TA) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + strconv.Itoa(int(rr.DigestType)) + " " + strings.ToUpper(rr.Digest) } func (rr *RR_TA) Len() int { return rr.Hdr.Len() + 4 + len(rr.Digest)/2 } func (rr *RR_TA) Copy() RR { return &RR_TA{*rr.Hdr.CopyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} } type RR_TALINK struct { Hdr RR_Header PreviousName string `dns:"domain"` NextName string `dns:"domain"` } func (rr *RR_TALINK) Header() *RR_Header { return &rr.Hdr } func (rr *RR_TALINK) String() string { return rr.Hdr.String() + " " + rr.PreviousName + " " + rr.NextName } func (rr *RR_TALINK) Len() int { return rr.Hdr.Len() + len(rr.PreviousName) + len(rr.NextName) + 2 } func (rr *RR_TALINK) Copy() RR { return &RR_TALINK{*rr.Hdr.CopyHeader(), rr.PreviousName, rr.NextName} } type RR_SSHFP struct { Hdr RR_Header Algorithm uint8 Type uint8 FingerPrint string `dns:"hex"` } func (rr *RR_SSHFP) Header() *RR_Header { return &rr.Hdr } func (rr *RR_SSHFP) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + " " + strconv.Itoa(int(rr.Type)) + " " + strings.ToUpper(rr.FingerPrint) } func (rr *RR_SSHFP) Len() int { return rr.Hdr.Len() + 2 + len(rr.FingerPrint)/2 } func (rr *RR_SSHFP) Copy() RR { return &RR_SSHFP{*rr.Hdr.CopyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} } type RR_IPSECKEY struct { Hdr RR_Header Precedence uint8 GatewayType uint8 Algorithm uint8 Gateway string `dns:"ipseckey"` PublicKey string `dns:"base64"` } func (rr *RR_IPSECKEY) Header() *RR_Header { return &rr.Hdr } func (rr *RR_IPSECKEY) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + " " + strconv.Itoa(int(rr.GatewayType)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + rr.Gateway + " " + rr.PublicKey } func (rr *RR_IPSECKEY) Len() int { return rr.Hdr.Len() + 3 + len(rr.Gateway) + 1 + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) } func (rr *RR_IPSECKEY) Copy() RR { return &RR_IPSECKEY{*rr.Hdr.CopyHeader(), rr.Precedence, rr.GatewayType, rr.Algorithm, rr.Gateway, rr.PublicKey} } type RR_DNSKEY struct { Hdr RR_Header Flags uint16 Protocol uint8 Algorithm uint8 PublicKey string `dns:"base64"` } func (rr *RR_DNSKEY) Header() *RR_Header { return &rr.Hdr } func (rr *RR_DNSKEY) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + " " + strconv.Itoa(int(rr.Protocol)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + rr.PublicKey } func (rr *RR_DNSKEY) Len() int { return rr.Hdr.Len() + 4 + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) } func (rr *RR_DNSKEY) Copy() RR { return &RR_DNSKEY{*rr.Hdr.CopyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} } type RR_NSEC3 struct { Hdr RR_Header Hash uint8 Flags uint8 Iterations uint16 SaltLength uint8 Salt string `dns:"size-hex"` HashLength uint8 NextDomain string `dns:"size-base32"` TypeBitMap []uint16 `dns:"nsec"` } func (rr *RR_NSEC3) Header() *RR_Header { return &rr.Hdr } func (rr *RR_NSEC3) String() string { s := rr.Hdr.String() s += strconv.Itoa(int(rr.Hash)) + " " + strconv.Itoa(int(rr.Flags)) + " " + strconv.Itoa(int(rr.Iterations)) + " " + saltString(rr.Salt) + " " + rr.NextDomain for i := 0; i < len(rr.TypeBitMap); i++ { if _, ok := Rr_str[rr.TypeBitMap[i]]; ok { s += " " + Rr_str[rr.TypeBitMap[i]] } else { s += " " + "TYPE" + strconv.Itoa(int(rr.TypeBitMap[i])) } } return s } func (rr *RR_NSEC3) Len() int { return rr.Hdr.Len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 + 32 // TODO: +32 is MAX type bit map } func (rr *RR_NSEC3) Copy() RR { return &RR_NSEC3{*rr.Hdr.CopyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, rr.TypeBitMap} } type RR_NSEC3PARAM struct { Hdr RR_Header Hash uint8 Flags uint8 Iterations uint16 SaltLength uint8 Salt string `dns:"hex"` } func (rr *RR_NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } func (rr *RR_NSEC3PARAM) String() string { s := rr.Hdr.String() s += strconv.Itoa(int(rr.Hash)) + " " + strconv.Itoa(int(rr.Flags)) + " " + strconv.Itoa(int(rr.Iterations)) + " " + saltString(rr.Salt) return s } func (rr *RR_NSEC3PARAM) Len() int { return rr.Hdr.Len() + 2 + 4 + 1 + len(rr.Salt)/2 } func (rr *RR_NSEC3PARAM) Copy() RR { return &RR_NSEC3PARAM{*rr.Hdr.CopyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} } type RR_TKEY struct { Hdr RR_Header Algorithm string `dns:"domain-name"` Inception uint32 Expiration uint32 Mode uint16 Error uint16 KeySize uint16 Key string OtherLen uint16 OtherData string } func (rr *RR_TKEY) Header() *RR_Header { return &rr.Hdr } func (rr *RR_TKEY) String() string { // It has no presentation format return "" } func (rr *RR_TKEY) Len() int { return rr.Hdr.Len() + len(rr.Algorithm) + 1 + 4 + 4 + 6 + len(rr.Key) + 2 + len(rr.OtherData) } func (rr *RR_TKEY) Copy() RR { return &RR_TKEY{*rr.Hdr.CopyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} } // RR_RFC3597 representes an unknown RR. type RR_RFC3597 struct { Hdr RR_Header Rdata string `dns:"hex"` } func (rr *RR_RFC3597) Header() *RR_Header { return &rr.Hdr } func (rr *RR_RFC3597) String() string { s := rr.Hdr.String() s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata return s } func (rr *RR_RFC3597) Len() int { return rr.Hdr.Len() + len(rr.Rdata)/2 } func (rr *RR_RFC3597) Copy() RR { return &RR_RFC3597{*rr.Hdr.CopyHeader(), rr.Rdata} } type RR_URI struct { Hdr RR_Header Priority uint16 Weight uint16 Target string `dns:"txt"` } func (rr *RR_URI) Header() *RR_Header { return &rr.Hdr } func (rr *RR_URI) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + " " + strconv.Itoa(int(rr.Weight)) + " " + rr.Target } func (rr *RR_URI) Len() int { return rr.Hdr.Len() + 4 + len(rr.Target) + 1 } func (rr *RR_URI) Copy() RR { return &RR_URI{*rr.Hdr.CopyHeader(), rr.Weight, rr.Priority, rr.Target} } type RR_DHCID struct { Hdr RR_Header Digest string `dns:"base64"` } func (rr *RR_DHCID) Header() *RR_Header { return &rr.Hdr } func (rr *RR_DHCID) String() string { return rr.Hdr.String() + rr.Digest } func (rr *RR_DHCID) Len() int { return rr.Hdr.Len() + base64.StdEncoding.DecodedLen(len(rr.Digest)) } func (rr *RR_DHCID) Copy() RR { return &RR_DHCID{*rr.Hdr.CopyHeader(), rr.Digest} } type RR_TLSA struct { Hdr RR_Header Usage uint8 Selector uint8 MatchingType uint8 Certificate string `dns:"hex"` } func (rr *RR_TLSA) Header() *RR_Header { return &rr.Hdr } func (rr *RR_TLSA) String() string { return rr.Hdr.String() + " " + strconv.Itoa(int(rr.Usage)) + " " + strconv.Itoa(int(rr.Selector)) + " " + strconv.Itoa(int(rr.MatchingType)) + " " + rr.Certificate } func (rr *RR_TLSA) Len() int { return rr.Hdr.Len() + 3 + len(rr.Certificate)/2 } func (rr *RR_TLSA) Copy() RR { return &RR_TLSA{*rr.Hdr.CopyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} } type RR_HIP struct { Hdr RR_Header HitLength uint8 PublicKeyAlgorithm uint8 PublicKeyLength uint16 Hit string `dns:"hex"` PublicKey string `dns:"base64"` RendezvousServers []string `dns:"domain-name"` } func (rr *RR_HIP) Header() *RR_Header { return &rr.Hdr } func (rr *RR_HIP) String() string { s := rr.Hdr.String() + " " + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + " " + rr.Hit + " " + rr.PublicKey for _, d := range rr.RendezvousServers { s += " " + d } return s } func (rr *RR_HIP) Len() int { l := rr.Hdr.Len() + 4 + len(rr.Hit)/2 + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) for _, d := range rr.RendezvousServers { l += len(d) + 1 } return l } func (rr *RR_HIP) Copy() RR { return &RR_HIP{*rr.Hdr.CopyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, rr.RendezvousServers} } type RR_WKS struct { Hdr RR_Header Address net.IP `dns:"a"` Protocol uint8 BitMap []uint16 `dns:"wks"` } func (rr *RR_WKS) Header() *RR_Header { return &rr.Hdr } func (rr *RR_WKS) String() string { s := rr.Hdr.String() + rr.Address.String() for i := 0; i < len(rr.BitMap); i++ { // should lookup the port s += " " + strconv.Itoa(int(rr.BitMap[i])) } return s } func (rr *RR_WKS) Len() int { return rr.Hdr.Len() + net.IPv4len + 1 } func (rr *RR_WKS) Copy() RR { return &RR_WKS{*rr.Hdr.CopyHeader(), rr.Address, rr.Protocol, rr.BitMap} } // TimeToString translates the RRSIG's incep. and expir. times to the // string representation used when printing the record. // It takes serial arithmetic (RFC 1982) into account. func TimeToString(t uint32) string { mod := ((int64(t) - time.Now().Unix()) / year68) - 1 if mod < 0 { mod = 0 } ti := time.Unix(int64(t)-(mod*year68), 0).UTC() return ti.Format("20060102150405") } // StringToTime translates the RRSIG's incep. and expir. times from // string values like "20110403154150" to an 32 bit integer. // It takes serial arithmetic (RFC 1982) into account. func StringToTime(s string) (uint32, error) { t, e := time.Parse("20060102150405", s) if e != nil { return 0, e } mod := (t.Unix() / year68) - 1 if mod < 0 { mod = 0 } return uint32(t.Unix() - (mod * year68)), nil } // saltString converts a NSECX salt to uppercase and // returns "-" when it is empty func saltString(s string) string { if len(s) == 0 { return "-" } return strings.ToUpper(s) } func cmToString(mantissa, exponent uint8) string { switch exponent { case 0, 1: if exponent == 1 { mantissa *= 10 } return fmt.Sprintf("%.02f", float32(mantissa)) default: s := fmt.Sprintf("%d", mantissa) for i := uint8(0); i < exponent-2; i++ { s += "0" } return s } panic("dns: not reached") } // Map of constructors for each RR wire type. var rr_mk = map[uint16]func() RR{ TypeCNAME: func() RR { return new(RR_CNAME) }, TypeHINFO: func() RR { return new(RR_HINFO) }, TypeMB: func() RR { return new(RR_MB) }, TypeMG: func() RR { return new(RR_MG) }, TypeMD: func() RR { return new(RR_MD) }, TypeMF: func() RR { return new(RR_MF) }, TypeMINFO: func() RR { return new(RR_MINFO) }, TypeRP: func() RR { return new(RR_RP) }, TypeAFSDB: func() RR { return new(RR_AFSDB) }, TypeMR: func() RR { return new(RR_MR) }, TypeMX: func() RR { return new(RR_MX) }, TypeNS: func() RR { return new(RR_NS) }, TypePTR: func() RR { return new(RR_PTR) }, TypeSOA: func() RR { return new(RR_SOA) }, TypeRT: func() RR { return new(RR_RT) }, TypeTXT: func() RR { return new(RR_TXT) }, TypeSRV: func() RR { return new(RR_SRV) }, TypeNAPTR: func() RR { return new(RR_NAPTR) }, TypeDNAME: func() RR { return new(RR_DNAME) }, TypeA: func() RR { return new(RR_A) }, TypeWKS: func() RR { return new(RR_WKS) }, TypeAAAA: func() RR { return new(RR_AAAA) }, TypeLOC: func() RR { return new(RR_LOC) }, TypeOPT: func() RR { return new(RR_OPT) }, TypeDS: func() RR { return new(RR_DS) }, TypeCERT: func() RR { return new(RR_CERT) }, TypeKX: func() RR { return new(RR_KX) }, TypeSPF: func() RR { return new(RR_SPF) }, TypeTALINK: func() RR { return new(RR_TALINK) }, TypeSSHFP: func() RR { return new(RR_SSHFP) }, TypeRRSIG: func() RR { return new(RR_RRSIG) }, TypeNSEC: func() RR { return new(RR_NSEC) }, TypeDNSKEY: func() RR { return new(RR_DNSKEY) }, TypeNSEC3: func() RR { return new(RR_NSEC3) }, TypeDHCID: func() RR { return new(RR_DHCID) }, TypeNSEC3PARAM: func() RR { return new(RR_NSEC3PARAM) }, TypeTKEY: func() RR { return new(RR_TKEY) }, TypeTSIG: func() RR { return new(RR_TSIG) }, TypeURI: func() RR { return new(RR_URI) }, TypeTA: func() RR { return new(RR_TA) }, TypeDLV: func() RR { return new(RR_DLV) }, TypeTLSA: func() RR { return new(RR_TLSA) }, TypeHIP: func() RR { return new(RR_HIP) }, } add more time functions // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Extended and bugfixes by Miek Gieben package dns import ( "encoding/base64" "fmt" "net" "strconv" "strings" "time" ) // Packet formats // Wire constants and supported types. const ( // valid RR_Header.Rrtype and Question.qtype TypeA uint16 = 1 TypeNS uint16 = 2 TypeMD uint16 = 3 TypeMF uint16 = 4 TypeCNAME uint16 = 5 TypeSOA uint16 = 6 TypeMB uint16 = 7 TypeMG uint16 = 8 TypeMR uint16 = 9 TypeNULL uint16 = 10 TypeWKS uint16 = 11 TypePTR uint16 = 12 TypeHINFO uint16 = 13 TypeMINFO uint16 = 14 TypeMX uint16 = 15 TypeTXT uint16 = 16 TypeRP uint16 = 17 TypeAFSDB uint16 = 18 TypeRT uint16 = 21 TypeSIG uint16 = 24 TypeKEY uint16 = 25 TypeAAAA uint16 = 28 TypeLOC uint16 = 29 TypeNXT uint16 = 30 TypeSRV uint16 = 33 TypeNAPTR uint16 = 35 TypeKX uint16 = 36 TypeCERT uint16 = 37 TypeDNAME uint16 = 39 TypeOPT uint16 = 41 // EDNS TypeDS uint16 = 43 TypeSSHFP uint16 = 44 TypeIPSECKEY uint16 = 45 TypeRRSIG uint16 = 46 TypeNSEC uint16 = 47 TypeDNSKEY uint16 = 48 TypeDHCID uint16 = 49 TypeNSEC3 uint16 = 50 TypeNSEC3PARAM uint16 = 51 TypeTLSA uint16 = 52 TypeHIP uint16 = 55 TypeTALINK uint16 = 58 TypeSPF uint16 = 99 TypeTKEY uint16 = 249 TypeTSIG uint16 = 250 // valid Question.Qtype only TypeIXFR uint16 = 251 TypeAXFR uint16 = 252 TypeMAILB uint16 = 253 TypeMAILA uint16 = 254 TypeANY uint16 = 255 TypeURI uint16 = 256 TypeTA uint16 = 32768 TypeDLV uint16 = 32769 // valid Question.Qclass ClassINET = 1 ClassCSNET = 2 ClassCHAOS = 3 ClassHESIOD = 4 ClassNONE = 254 ClassANY = 255 // Msg.rcode RcodeSuccess = 0 RcodeFormatError = 1 RcodeServerFailure = 2 RcodeNameError = 3 RcodeNotImplemented = 4 RcodeRefused = 5 RcodeYXDomain = 6 RcodeYXRrset = 7 RcodeNXRrset = 8 RcodeNotAuth = 9 RcodeNotZone = 10 RcodeBadSig = 16 // TSIG RcodeBadKey = 17 RcodeBadTime = 18 RcodeBadMode = 19 // TKEY RcodeBadName = 20 RcodeBadAlg = 21 RcodeBadTrunc = 22 // TSIG // Opcode OpcodeQuery = 0 OpcodeIQuery = 1 OpcodeStatus = 2 // There is no 3 OpcodeNotify = 4 OpcodeUpdate = 5 ) // The wire format for the DNS packet header. type Header struct { Id uint16 Bits uint16 Qdcount, Ancount, Nscount, Arcount uint16 } const ( // Header.Bits _QR = 1 << 15 // query/response (response=1) _AA = 1 << 10 // authoritative _TC = 1 << 9 // truncated _RD = 1 << 8 // recursion desired _RA = 1 << 7 // recursion available _Z = 1 << 6 // Z _AD = 1 << 5 // authticated data _CD = 1 << 4 // checking disabled _LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. ) // DNS queries. type Question struct { Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) Qtype uint16 Qclass uint16 } func (q *Question) String() (s string) { // prefix with ; (as in dig) if len(q.Name) == 0 { s = ";.\t" // root label } else { s = ";" + q.Name + "\t" } if _, ok := Class_str[q.Qclass]; ok { s += Class_str[q.Qclass] + "\t" } else { s += "CLASS" + strconv.Itoa(int(q.Qtype)) } if _, ok := Rr_str[q.Qtype]; ok { s += " " + Rr_str[q.Qtype] } else { s += " " + "TYPE" + strconv.Itoa(int(q.Qtype)) } return s } func (q *Question) Len() int { l := len(q.Name) + 1 return l + 4 } type RR_ANY struct { Hdr RR_Header // Does not have any rdata } func (rr *RR_ANY) Header() *RR_Header { return &rr.Hdr } func (rr *RR_ANY) String() string { return rr.Hdr.String() } func (rr *RR_ANY) Len() int { return rr.Hdr.Len() } func (rr *RR_ANY) Copy() RR { return &RR_ANY{*rr.Hdr.CopyHeader()} } type RR_CNAME struct { Hdr RR_Header Target string `dns:"cdomain-name"` } func (rr *RR_CNAME) Header() *RR_Header { return &rr.Hdr } func (rr *RR_CNAME) String() string { return rr.Hdr.String() + rr.Target } func (rr *RR_CNAME) Len() int { l := len(rr.Target) + 1 return rr.Hdr.Len() + l } func (rr *RR_CNAME) Copy() RR { return &RR_CNAME{*rr.Hdr.CopyHeader(), rr.Target} } type RR_HINFO struct { Hdr RR_Header Cpu string Os string } func (rr *RR_HINFO) Header() *RR_Header { return &rr.Hdr } func (rr *RR_HINFO) String() string { return rr.Hdr.String() + rr.Cpu + " " + rr.Os } func (rr *RR_HINFO) Len() int { return rr.Hdr.Len() + len(rr.Cpu) + len(rr.Os) } func (rr *RR_HINFO) Copy() RR { return &RR_HINFO{*rr.Hdr.CopyHeader(), rr.Cpu, rr.Os} } type RR_MB struct { Hdr RR_Header Mb string `dns:"cdomain-name"` } func (rr *RR_MB) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MB) String() string { return rr.Hdr.String() + rr.Mb } func (rr *RR_MB) Len() int { l := len(rr.Mb) + 1 return rr.Hdr.Len() + l } func (rr *RR_MB) Copy() RR { return &RR_MB{*rr.Hdr.CopyHeader(), rr.Mb} } type RR_MG struct { Hdr RR_Header Mg string `dns:"cdomain-name"` } func (rr *RR_MG) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MG) String() string { return rr.Hdr.String() + rr.Mg } func (rr *RR_MG) Len() int { l := len(rr.Mg) + 1 return rr.Hdr.Len() + l } func (rr *RR_MG) Copy() RR { return &RR_MG{*rr.Hdr.CopyHeader(), rr.Mg} } type RR_MINFO struct { Hdr RR_Header Rmail string `dns:"cdomain-name"` Email string `dns:"cdomain-name"` } func (rr *RR_MINFO) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MINFO) String() string { return rr.Hdr.String() + rr.Rmail + " " + rr.Email } func (rr *RR_MINFO) Len() int { l := len(rr.Rmail) + 1 n := len(rr.Email) + 1 return rr.Hdr.Len() + l + n } func (rr *RR_MINFO) Copy() RR { return &RR_MINFO{*rr.Hdr.CopyHeader(), rr.Rmail, rr.Email} } type RR_MR struct { Hdr RR_Header Mr string `dns:"cdomain-name"` } func (rr *RR_MR) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MR) String() string { return rr.Hdr.String() + rr.Mr } func (rr *RR_MR) Len() int { l := len(rr.Mr) + 1 return rr.Hdr.Len() + l } func (rr *RR_MR) Copy() RR { return &RR_MR{*rr.Hdr.CopyHeader(), rr.Mr} } type RR_MF struct { Hdr RR_Header Mf string `dns:"cdomain-name"` } func (rr *RR_MF) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MF) String() string { return rr.Hdr.String() + " " + rr.Mf } func (rr *RR_MF) Len() int { return rr.Hdr.Len() + len(rr.Mf) + 1 } func (rr *RR_MF) Copy() RR { return &RR_MF{*rr.Hdr.CopyHeader(), rr.Mf} } type RR_MD struct { Hdr RR_Header Md string `dns:"cdomain-name"` } func (rr *RR_MD) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MD) String() string { return rr.Hdr.String() + " " + rr.Md } func (rr *RR_MD) Len() int { return rr.Hdr.Len() + len(rr.Md) + 1 } func (rr *RR_MD) Copy() RR { return &RR_MD{*rr.Hdr.CopyHeader(), rr.Md} } type RR_MX struct { Hdr RR_Header Pref uint16 Mx string `dns:"cdomain-name"` } func (rr *RR_MX) Header() *RR_Header { return &rr.Hdr } func (rr *RR_MX) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Pref)) + " " + rr.Mx } func (rr *RR_MX) Len() int { l := len(rr.Mx) + 1 return rr.Hdr.Len() + l + 2 } func (rr *RR_MX) Copy() RR { return &RR_MX{*rr.Hdr.CopyHeader(), rr.Pref, rr.Mx} } type RR_AFSDB struct { Hdr RR_Header Subtype uint16 Hostname string `dns:"cdomain-name"` } func (rr *RR_AFSDB) Header() *RR_Header { return &rr.Hdr } func (rr *RR_AFSDB) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + rr.Hostname } func (rr *RR_AFSDB) Len() int { l := len(rr.Hostname) + 1 return rr.Hdr.Len() + l + 2 } func (rr *RR_AFSDB) Copy() RR { return &RR_AFSDB{*rr.Hdr.CopyHeader(), rr.Subtype, rr.Hostname} } type RR_RT struct { Hdr RR_Header Preference uint16 Host string `dns:"cdomain-name"` } func (rr *RR_RT) Header() *RR_Header { return &rr.Hdr } func (rr *RR_RT) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + rr.Host } func (rr *RR_RT) Len() int { l := len(rr.Host) + 1 return rr.Hdr.Len() + l + 2 } func (rr *RR_RT) Copy() RR { return &RR_RT{*rr.Hdr.CopyHeader(), rr.Preference, rr.Host} } type RR_NS struct { Hdr RR_Header Ns string `dns:"cdomain-name"` } func (rr *RR_NS) Header() *RR_Header { return &rr.Hdr } func (rr *RR_NS) String() string { return rr.Hdr.String() + rr.Ns } func (rr *RR_NS) Len() int { l := len(rr.Ns) + 1 return rr.Hdr.Len() + l } func (rr *RR_NS) Copy() RR { return &RR_NS{*rr.Hdr.CopyHeader(), rr.Ns} } type RR_PTR struct { Hdr RR_Header Ptr string `dns:"cdomain-name"` } func (rr *RR_PTR) Header() *RR_Header { return &rr.Hdr } func (rr *RR_PTR) String() string { return rr.Hdr.String() + rr.Ptr } func (rr *RR_PTR) Len() int { l := len(rr.Ptr) + 1 return rr.Hdr.Len() + l } func (rr *RR_PTR) Copy() RR { return &RR_PTR{*rr.Hdr.CopyHeader(), rr.Ptr} } type RR_RP struct { Hdr RR_Header Mbox string `dns:"domain-name"` Txt string `dns:"domain-name"` } func (rr *RR_RP) Header() *RR_Header { return &rr.Hdr } func (rr *RR_RP) String() string { return rr.Hdr.String() + rr.Mbox + " " + rr.Txt } func (rr *RR_RP) Len() int { return rr.Hdr.Len() + len(rr.Mbox) + 1 + len(rr.Txt) + 1 } func (rr *RR_RP) Copy() RR { return &RR_RP{*rr.Hdr.CopyHeader(), rr.Mbox, rr.Txt} } type RR_SOA struct { Hdr RR_Header Ns string `dns:"cdomain-name"` Mbox string `dns:"cdomain-name"` Serial uint32 Refresh uint32 Retry uint32 Expire uint32 Minttl uint32 } func (rr *RR_SOA) Header() *RR_Header { return &rr.Hdr } func (rr *RR_SOA) String() string { return rr.Hdr.String() + rr.Ns + " " + rr.Mbox + " " + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.FormatInt(int64(rr.Refresh), 10) + " " + strconv.FormatInt(int64(rr.Retry), 10) + " " + strconv.FormatInt(int64(rr.Expire), 10) + " " + strconv.FormatInt(int64(rr.Minttl), 10) } func (rr *RR_SOA) Len() int { l := len(rr.Ns) + 1 n := len(rr.Mbox) + 1 return rr.Hdr.Len() + l + n + 20 } func (rr *RR_SOA) Copy() RR { return &RR_SOA{*rr.Hdr.CopyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} } type RR_TXT struct { Hdr RR_Header Txt []string `dns:"txt"` } func (rr *RR_TXT) Header() *RR_Header { return &rr.Hdr } func (rr *RR_TXT) String() string { s := rr.Hdr.String() for i, s1 := range rr.Txt { if i > 0 { s += " " + strconv.QuoteToASCII(s1) } else { s += strconv.QuoteToASCII(s1) } } return s } func (rr *RR_TXT) Len() int { l := rr.Hdr.Len() for _, t := range rr.Txt { l += len(t) } return l } func (rr *RR_TXT) Copy() RR { return &RR_TXT{*rr.Hdr.CopyHeader(), rr.Txt} } type RR_SPF struct { Hdr RR_Header Txt []string `dns:"txt"` } func (rr *RR_SPF) Header() *RR_Header { return &rr.Hdr } func (rr *RR_SPF) String() string { s := rr.Hdr.String() for i, s1 := range rr.Txt { if i > 0 { s += " " + "\"" + s1 + "\"" } else { s += "\"" + s1 + "\"" } } return s } func (rr *RR_SPF) Len() int { l := rr.Hdr.Len() for _, t := range rr.Txt { l += len(t) } return l } func (rr *RR_SPF) Copy() RR { return &RR_SPF{*rr.Hdr.CopyHeader(), rr.Txt} } type RR_SRV struct { Hdr RR_Header Priority uint16 Weight uint16 Port uint16 Target string `dns:"domain-name"` } func (rr *RR_SRV) Header() *RR_Header { return &rr.Hdr } func (rr *RR_SRV) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + " " + strconv.Itoa(int(rr.Weight)) + " " + strconv.Itoa(int(rr.Port)) + " " + rr.Target } func (rr *RR_SRV) Len() int { l := len(rr.Target) + 1 return rr.Hdr.Len() + l + 6 } func (rr *RR_SRV) Copy() RR { return &RR_SRV{*rr.Hdr.CopyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} } type RR_NAPTR struct { Hdr RR_Header Order uint16 Pref uint16 Flags string Service string Regexp string Replacement string `dns:"domain-name"` } func (rr *RR_NAPTR) Header() *RR_Header { return &rr.Hdr } func (rr *RR_NAPTR) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Order)) + " " + strconv.Itoa(int(rr.Pref)) + " " + "\"" + rr.Flags + "\" " + "\"" + rr.Service + "\" " + "\"" + rr.Regexp + "\" " + rr.Replacement } func (rr *RR_NAPTR) Len() int { return rr.Hdr.Len() + 4 + len(rr.Flags) + len(rr.Service) + len(rr.Regexp) + len(rr.Replacement) + 1 } func (rr *RR_NAPTR) Copy() RR { return &RR_NAPTR{*rr.Hdr.CopyHeader(), rr.Order, rr.Pref, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} } // See RFC 4398. type RR_CERT struct { Hdr RR_Header Type uint16 KeyTag uint16 Algorithm uint8 Certificate string `dns:"base64"` } func (rr *RR_CERT) Header() *RR_Header { return &rr.Hdr } func (rr *RR_CERT) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Type)) + " " + strconv.Itoa(int(rr.KeyTag)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + rr.Certificate } func (rr *RR_CERT) Len() int { return rr.Hdr.Len() + 5 + base64.StdEncoding.DecodedLen(len(rr.Certificate)) } func (rr *RR_CERT) Copy() RR { return &RR_CERT{*rr.Hdr.CopyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} } // See RFC 2672. type RR_DNAME struct { Hdr RR_Header Target string `dns:"domain-name"` } func (rr *RR_DNAME) Header() *RR_Header { return &rr.Hdr } func (rr *RR_DNAME) String() string { return rr.Hdr.String() + rr.Target } func (rr *RR_DNAME) Len() int { l := len(rr.Target) + 1 return rr.Hdr.Len() + l } func (rr *RR_DNAME) Copy() RR { return &RR_DNAME{*rr.Hdr.CopyHeader(), rr.Target} } type RR_A struct { Hdr RR_Header A net.IP `dns:"a"` } func (rr *RR_A) Header() *RR_Header { return &rr.Hdr } func (rr *RR_A) String() string { return rr.Hdr.String() + rr.A.String() } func (rr *RR_A) Len() int { return rr.Hdr.Len() + net.IPv4len } func (rr *RR_A) Copy() RR { return &RR_A{*rr.Hdr.CopyHeader(), rr.A} } type RR_AAAA struct { Hdr RR_Header AAAA net.IP `dns:"aaaa"` } func (rr *RR_AAAA) Header() *RR_Header { return &rr.Hdr } func (rr *RR_AAAA) String() string { return rr.Hdr.String() + rr.AAAA.String() } func (rr *RR_AAAA) Len() int { return rr.Hdr.Len() + net.IPv6len } func (rr *RR_AAAA) Copy() RR { return &RR_AAAA{*rr.Hdr.CopyHeader(), rr.AAAA} } type RR_LOC struct { Hdr RR_Header Version uint8 Size uint8 HorizPre uint8 VertPre uint8 Latitude uint32 Longitude uint32 Altitude uint32 } func (rr *RR_LOC) Header() *RR_Header { return &rr.Hdr } func (rr *RR_LOC) String() string { s := rr.Hdr.String() // Copied from ldns // Latitude lat := rr.Latitude north := "N" if lat > _LOC_EQUATOR { lat = lat - _LOC_EQUATOR } else { north = "S" lat = _LOC_EQUATOR - lat } h := lat / (1000 * 60 * 60) lat = lat % (1000 * 60 * 60) m := lat / (1000 * 60) lat = lat % (1000 * 60) s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float32(lat) / 1000), north) // Longitude lon := rr.Longitude east := "E" if lon > _LOC_EQUATOR { lon = lon - _LOC_EQUATOR } else { east = "W" lon = _LOC_EQUATOR - lon } h = lon / (1000 * 60 * 60) lon = lon % (1000 * 60 * 60) m = lon / (1000 * 60) lon = lon % (1000 * 60) s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float32(lon) / 1000), east) s1 := rr.Altitude / 100.00 s1 -= 100000 if rr.Altitude%100 == 0 { s += fmt.Sprintf("%.2fm ", float32(s1)) } else { s += fmt.Sprintf("%.0fm ", float32(s1)) } s += cmToString((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " s += cmToString((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " s += cmToString((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" return s } func (rr *RR_LOC) Len() int { return rr.Hdr.Len() + 4 + 12 } func (rr *RR_LOC) Copy() RR { return &RR_LOC{*rr.Hdr.CopyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} } type RR_RRSIG struct { Hdr RR_Header TypeCovered uint16 Algorithm uint8 Labels uint8 OrigTtl uint32 Expiration uint32 Inception uint32 KeyTag uint16 SignerName string `dns:"domain-name"` Signature string `dns:"base64"` } func (rr *RR_RRSIG) Header() *RR_Header { return &rr.Hdr } func (rr *RR_RRSIG) String() string { return rr.Hdr.String() + Rr_str[rr.TypeCovered] + " " + strconv.Itoa(int(rr.Algorithm)) + " " + strconv.Itoa(int(rr.Labels)) + " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + " " + TimeToString(rr.Expiration) + " " + TimeToString(rr.Inception) + " " + strconv.Itoa(int(rr.KeyTag)) + " " + rr.SignerName + " " + rr.Signature } func (rr *RR_RRSIG) Len() int { return rr.Hdr.Len() + len(rr.SignerName) + 1 + base64.StdEncoding.DecodedLen(len(rr.Signature)) + 18 } func (rr *RR_RRSIG) Copy() RR { return &RR_RRSIG{*rr.Hdr.CopyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} } type RR_NSEC struct { Hdr RR_Header NextDomain string `dns:"domain-name"` TypeBitMap []uint16 `dns:"nsec"` } func (rr *RR_NSEC) Header() *RR_Header { return &rr.Hdr } func (rr *RR_NSEC) String() string { s := rr.Hdr.String() + rr.NextDomain for i := 0; i < len(rr.TypeBitMap); i++ { if _, ok := Rr_str[rr.TypeBitMap[i]]; ok { s += " " + Rr_str[rr.TypeBitMap[i]] } else { s += " " + "TYPE" + strconv.Itoa(int(rr.TypeBitMap[i])) } } return s } func (rr *RR_NSEC) Len() int { l := len(rr.NextDomain) + 1 return rr.Hdr.Len() + l + 32 + 1 // TODO: +32 is max type bitmap } func (rr *RR_NSEC) Copy() RR { return &RR_NSEC{*rr.Hdr.CopyHeader(), rr.NextDomain, rr.TypeBitMap} } type RR_DS struct { Hdr RR_Header KeyTag uint16 Algorithm uint8 DigestType uint8 Digest string `dns:"hex"` } func (rr *RR_DS) Header() *RR_Header { return &rr.Hdr } func (rr *RR_DS) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + strconv.Itoa(int(rr.DigestType)) + " " + strings.ToUpper(rr.Digest) } func (rr *RR_DS) Len() int { return rr.Hdr.Len() + 4 + len(rr.Digest)/2 } func (rr *RR_DS) Copy() RR { return &RR_DS{*rr.Hdr.CopyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} } type RR_DLV struct { Hdr RR_Header KeyTag uint16 Algorithm uint8 DigestType uint8 Digest string `dns:"hex"` } func (rr *RR_DLV) Header() *RR_Header { return &rr.Hdr } func (rr *RR_DLV) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + strconv.Itoa(int(rr.DigestType)) + " " + strings.ToUpper(rr.Digest) } func (rr *RR_DLV) Len() int { return rr.Hdr.Len() + 4 + len(rr.Digest)/2 } func (rr *RR_DLV) Copy() RR { return &RR_DLV{*rr.Hdr.CopyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} } type RR_KX struct { Hdr RR_Header Pref uint16 Exchanger string `dns:"domain-name"` } func (rr *RR_KX) Header() *RR_Header { return &rr.Hdr } func (rr *RR_KX) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Pref)) + " " + rr.Exchanger } func (rr *RR_KX) Len() int { return 0 } func (rr *RR_KX) Copy() RR { return &RR_KX{*rr.Hdr.CopyHeader(), rr.Pref, rr.Exchanger} } type RR_TA struct { Hdr RR_Header KeyTag uint16 Algorithm uint8 DigestType uint8 Digest string `dns:"hex"` } func (rr *RR_TA) Header() *RR_Header { return &rr.Hdr } func (rr *RR_TA) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + strconv.Itoa(int(rr.DigestType)) + " " + strings.ToUpper(rr.Digest) } func (rr *RR_TA) Len() int { return rr.Hdr.Len() + 4 + len(rr.Digest)/2 } func (rr *RR_TA) Copy() RR { return &RR_TA{*rr.Hdr.CopyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} } type RR_TALINK struct { Hdr RR_Header PreviousName string `dns:"domain"` NextName string `dns:"domain"` } func (rr *RR_TALINK) Header() *RR_Header { return &rr.Hdr } func (rr *RR_TALINK) String() string { return rr.Hdr.String() + " " + rr.PreviousName + " " + rr.NextName } func (rr *RR_TALINK) Len() int { return rr.Hdr.Len() + len(rr.PreviousName) + len(rr.NextName) + 2 } func (rr *RR_TALINK) Copy() RR { return &RR_TALINK{*rr.Hdr.CopyHeader(), rr.PreviousName, rr.NextName} } type RR_SSHFP struct { Hdr RR_Header Algorithm uint8 Type uint8 FingerPrint string `dns:"hex"` } func (rr *RR_SSHFP) Header() *RR_Header { return &rr.Hdr } func (rr *RR_SSHFP) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + " " + strconv.Itoa(int(rr.Type)) + " " + strings.ToUpper(rr.FingerPrint) } func (rr *RR_SSHFP) Len() int { return rr.Hdr.Len() + 2 + len(rr.FingerPrint)/2 } func (rr *RR_SSHFP) Copy() RR { return &RR_SSHFP{*rr.Hdr.CopyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} } type RR_IPSECKEY struct { Hdr RR_Header Precedence uint8 GatewayType uint8 Algorithm uint8 Gateway string `dns:"ipseckey"` PublicKey string `dns:"base64"` } func (rr *RR_IPSECKEY) Header() *RR_Header { return &rr.Hdr } func (rr *RR_IPSECKEY) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + " " + strconv.Itoa(int(rr.GatewayType)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + rr.Gateway + " " + rr.PublicKey } func (rr *RR_IPSECKEY) Len() int { return rr.Hdr.Len() + 3 + len(rr.Gateway) + 1 + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) } func (rr *RR_IPSECKEY) Copy() RR { return &RR_IPSECKEY{*rr.Hdr.CopyHeader(), rr.Precedence, rr.GatewayType, rr.Algorithm, rr.Gateway, rr.PublicKey} } type RR_DNSKEY struct { Hdr RR_Header Flags uint16 Protocol uint8 Algorithm uint8 PublicKey string `dns:"base64"` } func (rr *RR_DNSKEY) Header() *RR_Header { return &rr.Hdr } func (rr *RR_DNSKEY) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + " " + strconv.Itoa(int(rr.Protocol)) + " " + strconv.Itoa(int(rr.Algorithm)) + " " + rr.PublicKey } func (rr *RR_DNSKEY) Len() int { return rr.Hdr.Len() + 4 + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) } func (rr *RR_DNSKEY) Copy() RR { return &RR_DNSKEY{*rr.Hdr.CopyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} } type RR_NSEC3 struct { Hdr RR_Header Hash uint8 Flags uint8 Iterations uint16 SaltLength uint8 Salt string `dns:"size-hex"` HashLength uint8 NextDomain string `dns:"size-base32"` TypeBitMap []uint16 `dns:"nsec"` } func (rr *RR_NSEC3) Header() *RR_Header { return &rr.Hdr } func (rr *RR_NSEC3) String() string { s := rr.Hdr.String() s += strconv.Itoa(int(rr.Hash)) + " " + strconv.Itoa(int(rr.Flags)) + " " + strconv.Itoa(int(rr.Iterations)) + " " + saltString(rr.Salt) + " " + rr.NextDomain for i := 0; i < len(rr.TypeBitMap); i++ { if _, ok := Rr_str[rr.TypeBitMap[i]]; ok { s += " " + Rr_str[rr.TypeBitMap[i]] } else { s += " " + "TYPE" + strconv.Itoa(int(rr.TypeBitMap[i])) } } return s } func (rr *RR_NSEC3) Len() int { return rr.Hdr.Len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 + 32 // TODO: +32 is MAX type bit map } func (rr *RR_NSEC3) Copy() RR { return &RR_NSEC3{*rr.Hdr.CopyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, rr.TypeBitMap} } type RR_NSEC3PARAM struct { Hdr RR_Header Hash uint8 Flags uint8 Iterations uint16 SaltLength uint8 Salt string `dns:"hex"` } func (rr *RR_NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } func (rr *RR_NSEC3PARAM) String() string { s := rr.Hdr.String() s += strconv.Itoa(int(rr.Hash)) + " " + strconv.Itoa(int(rr.Flags)) + " " + strconv.Itoa(int(rr.Iterations)) + " " + saltString(rr.Salt) return s } func (rr *RR_NSEC3PARAM) Len() int { return rr.Hdr.Len() + 2 + 4 + 1 + len(rr.Salt)/2 } func (rr *RR_NSEC3PARAM) Copy() RR { return &RR_NSEC3PARAM{*rr.Hdr.CopyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} } type RR_TKEY struct { Hdr RR_Header Algorithm string `dns:"domain-name"` Inception uint32 Expiration uint32 Mode uint16 Error uint16 KeySize uint16 Key string OtherLen uint16 OtherData string } func (rr *RR_TKEY) Header() *RR_Header { return &rr.Hdr } func (rr *RR_TKEY) String() string { // It has no presentation format return "" } func (rr *RR_TKEY) Len() int { return rr.Hdr.Len() + len(rr.Algorithm) + 1 + 4 + 4 + 6 + len(rr.Key) + 2 + len(rr.OtherData) } func (rr *RR_TKEY) Copy() RR { return &RR_TKEY{*rr.Hdr.CopyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} } // RR_RFC3597 representes an unknown RR. type RR_RFC3597 struct { Hdr RR_Header Rdata string `dns:"hex"` } func (rr *RR_RFC3597) Header() *RR_Header { return &rr.Hdr } func (rr *RR_RFC3597) String() string { s := rr.Hdr.String() s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata return s } func (rr *RR_RFC3597) Len() int { return rr.Hdr.Len() + len(rr.Rdata)/2 } func (rr *RR_RFC3597) Copy() RR { return &RR_RFC3597{*rr.Hdr.CopyHeader(), rr.Rdata} } type RR_URI struct { Hdr RR_Header Priority uint16 Weight uint16 Target string `dns:"txt"` } func (rr *RR_URI) Header() *RR_Header { return &rr.Hdr } func (rr *RR_URI) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + " " + strconv.Itoa(int(rr.Weight)) + " " + rr.Target } func (rr *RR_URI) Len() int { return rr.Hdr.Len() + 4 + len(rr.Target) + 1 } func (rr *RR_URI) Copy() RR { return &RR_URI{*rr.Hdr.CopyHeader(), rr.Weight, rr.Priority, rr.Target} } type RR_DHCID struct { Hdr RR_Header Digest string `dns:"base64"` } func (rr *RR_DHCID) Header() *RR_Header { return &rr.Hdr } func (rr *RR_DHCID) String() string { return rr.Hdr.String() + rr.Digest } func (rr *RR_DHCID) Len() int { return rr.Hdr.Len() + base64.StdEncoding.DecodedLen(len(rr.Digest)) } func (rr *RR_DHCID) Copy() RR { return &RR_DHCID{*rr.Hdr.CopyHeader(), rr.Digest} } type RR_TLSA struct { Hdr RR_Header Usage uint8 Selector uint8 MatchingType uint8 Certificate string `dns:"hex"` } func (rr *RR_TLSA) Header() *RR_Header { return &rr.Hdr } func (rr *RR_TLSA) String() string { return rr.Hdr.String() + " " + strconv.Itoa(int(rr.Usage)) + " " + strconv.Itoa(int(rr.Selector)) + " " + strconv.Itoa(int(rr.MatchingType)) + " " + rr.Certificate } func (rr *RR_TLSA) Len() int { return rr.Hdr.Len() + 3 + len(rr.Certificate)/2 } func (rr *RR_TLSA) Copy() RR { return &RR_TLSA{*rr.Hdr.CopyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} } type RR_HIP struct { Hdr RR_Header HitLength uint8 PublicKeyAlgorithm uint8 PublicKeyLength uint16 Hit string `dns:"hex"` PublicKey string `dns:"base64"` RendezvousServers []string `dns:"domain-name"` } func (rr *RR_HIP) Header() *RR_Header { return &rr.Hdr } func (rr *RR_HIP) String() string { s := rr.Hdr.String() + " " + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + " " + rr.Hit + " " + rr.PublicKey for _, d := range rr.RendezvousServers { s += " " + d } return s } func (rr *RR_HIP) Len() int { l := rr.Hdr.Len() + 4 + len(rr.Hit)/2 + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) for _, d := range rr.RendezvousServers { l += len(d) + 1 } return l } func (rr *RR_HIP) Copy() RR { return &RR_HIP{*rr.Hdr.CopyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, rr.RendezvousServers} } type RR_WKS struct { Hdr RR_Header Address net.IP `dns:"a"` Protocol uint8 BitMap []uint16 `dns:"wks"` } func (rr *RR_WKS) Header() *RR_Header { return &rr.Hdr } func (rr *RR_WKS) String() string { s := rr.Hdr.String() + rr.Address.String() for i := 0; i < len(rr.BitMap); i++ { // should lookup the port s += " " + strconv.Itoa(int(rr.BitMap[i])) } return s } func (rr *RR_WKS) Len() int { return rr.Hdr.Len() + net.IPv4len + 1 } func (rr *RR_WKS) Copy() RR { return &RR_WKS{*rr.Hdr.CopyHeader(), rr.Address, rr.Protocol, rr.BitMap} } // TimeToString translates the RRSIG's incep. and expir. times to the // string representation used when printing the record. // It takes serial arithmetic (RFC 1982) into account. func TimeToString(t uint32) string { mod := ((int64(t) - time.Now().Unix()) / year68) - 1 if mod < 0 { mod = 0 } ti := time.Unix(int64(t)-(mod*year68), 0).UTC() return ti.Format("20060102150405") } // StringToTime translates the RRSIG's incep. and expir. times from // string values like "20110403154150" to an 32 bit integer. // It takes serial arithmetic (RFC 1982) into account. func StringToTime(s string) (uint32, error) { t, e := time.Parse("20060102150405", s) if e != nil { return 0, e } mod := (t.Unix() / year68) - 1 if mod < 0 { mod = 0 } return uint32(t.Unix() - (mod * year68)), nil } // TimeToUint32 translates a time.Time to a 32 bit value which // can be used as the RRSIG's inception or expiration times. func TimeToUint32(t time.Time) uint32 { // TODO(mg): rfc1982 mod := (t.Unix() / year68) - 1 if mod < 0 { mod = 0 } return uint32(t.Unix() - (mod * year68)) } // saltString converts a NSECX salt to uppercase and // returns "-" when it is empty func saltString(s string) string { if len(s) == 0 { return "-" } return strings.ToUpper(s) } func cmToString(mantissa, exponent uint8) string { switch exponent { case 0, 1: if exponent == 1 { mantissa *= 10 } return fmt.Sprintf("%.02f", float32(mantissa)) default: s := fmt.Sprintf("%d", mantissa) for i := uint8(0); i < exponent-2; i++ { s += "0" } return s } panic("dns: not reached") } // Map of constructors for each RR wire type. var rr_mk = map[uint16]func() RR{ TypeCNAME: func() RR { return new(RR_CNAME) }, TypeHINFO: func() RR { return new(RR_HINFO) }, TypeMB: func() RR { return new(RR_MB) }, TypeMG: func() RR { return new(RR_MG) }, TypeMD: func() RR { return new(RR_MD) }, TypeMF: func() RR { return new(RR_MF) }, TypeMINFO: func() RR { return new(RR_MINFO) }, TypeRP: func() RR { return new(RR_RP) }, TypeAFSDB: func() RR { return new(RR_AFSDB) }, TypeMR: func() RR { return new(RR_MR) }, TypeMX: func() RR { return new(RR_MX) }, TypeNS: func() RR { return new(RR_NS) }, TypePTR: func() RR { return new(RR_PTR) }, TypeSOA: func() RR { return new(RR_SOA) }, TypeRT: func() RR { return new(RR_RT) }, TypeTXT: func() RR { return new(RR_TXT) }, TypeSRV: func() RR { return new(RR_SRV) }, TypeNAPTR: func() RR { return new(RR_NAPTR) }, TypeDNAME: func() RR { return new(RR_DNAME) }, TypeA: func() RR { return new(RR_A) }, TypeWKS: func() RR { return new(RR_WKS) }, TypeAAAA: func() RR { return new(RR_AAAA) }, TypeLOC: func() RR { return new(RR_LOC) }, TypeOPT: func() RR { return new(RR_OPT) }, TypeDS: func() RR { return new(RR_DS) }, TypeCERT: func() RR { return new(RR_CERT) }, TypeKX: func() RR { return new(RR_KX) }, TypeSPF: func() RR { return new(RR_SPF) }, TypeTALINK: func() RR { return new(RR_TALINK) }, TypeSSHFP: func() RR { return new(RR_SSHFP) }, TypeRRSIG: func() RR { return new(RR_RRSIG) }, TypeNSEC: func() RR { return new(RR_NSEC) }, TypeDNSKEY: func() RR { return new(RR_DNSKEY) }, TypeNSEC3: func() RR { return new(RR_NSEC3) }, TypeDHCID: func() RR { return new(RR_DHCID) }, TypeNSEC3PARAM: func() RR { return new(RR_NSEC3PARAM) }, TypeTKEY: func() RR { return new(RR_TKEY) }, TypeTSIG: func() RR { return new(RR_TSIG) }, TypeURI: func() RR { return new(RR_URI) }, TypeTA: func() RR { return new(RR_TA) }, TypeDLV: func() RR { return new(RR_DLV) }, TypeTLSA: func() RR { return new(RR_TLSA) }, TypeHIP: func() RR { return new(RR_HIP) }, }
package generator_test import ( "encoding/json" "fmt" "os" "path/filepath" "github.com/wolfmetr/mock-ass/generator" ) func ExampleRender() { type testTplJson struct { FirstName string `json:"first_name"` LastName string `json:"last_name"` EmptyField string `json:"empty_field"` } // *Chain fields is not actually random. Its has once random seed var testTemplateJson = `{ "first_name": "{{ FirstNameChain(1) }}", "last_name": "{{ LastNameChain(1) }}", "full_name": "{{ FullName() }}", "city": "{{ City() }}", "country": "{{ FullCountry() }}", "country2": "{{ TwoLetterCountry() }}", "country3": "{{ ThreeLetterCountry() }}", "isActive": {{ BooleanString() }}, "float": {{ Float(12) }}, "float2": {{ Float(10, 15) }}, "float3": {{ Float(10, 15, 2) }}, "float4": {{ Float(10, 15, 3) }}, "ip_v4": "{{ IPv4() }}", "person": [ {% for x in Range(5) %} { "first_name": "{{ FirstNameChain(forloop.Counter0) }}", "last_name": "{{ LastNameChain(forloop.Counter0) }}", "full_name": "{{ FullNameChain(forloop.Counter0) }}", "age": {{ Number(10, 100) }}, "email": "{{ Email() }}" }{% if not forloop.Last %}, {% endif %} {% endfor %} ] }` wd, _ := os.Getwd() path := filepath.Join(wd, "testdata") collection, err := generator.InitCollectionFromPath(path) if err != nil { panic(err) } out, err := generator.Render(testTemplateJson, "my random source hash", collection) if err != nil { panic(err) } var parsedTpl testTplJson err = json.Unmarshal([]byte(out), &parsedTpl) if err != nil { panic(err) } fmt.Printf("parsedTpl: %+v", parsedTpl) // Output: // parsedTpl: {FirstName:Grace LastName:Johnson EmptyField:} } exmaple for render json package generator_test import ( "encoding/json" "fmt" "os" "path/filepath" "github.com/wolfmetr/mock-ass/generator" ) func ExampleRender() { type testTplJson struct { FirstName string `json:"first_name"` LastName string `json:"last_name"` EmptyField string `json:"empty_field"` } // *Chain fields is not actually random. Its has once random seed var testTemplateJson = `{ "first_name": "{{ FirstNameChain(1) }}", "last_name": "{{ LastNameChain(1) }}", "full_name": "{{ FullName() }}", "city": "{{ City() }}", "country": "{{ FullCountry() }}", "country2": "{{ TwoLetterCountry() }}", "country3": "{{ ThreeLetterCountry() }}", "isActive": {{ BooleanString() }}, "float": {{ Float(12) }}, "float2": {{ Float(10, 15) }}, "float3": {{ Float(10, 15, 2) }}, "float4": {{ Float(10, 15, 3) }}, "ip_v4": "{{ IPv4() }}", "person": [ {% for x in Range(5) %} { "first_name": "{{ FirstNameChain(forloop.Counter0) }}", "last_name": "{{ LastNameChain(forloop.Counter0) }}", "full_name": "{{ FullNameChain(forloop.Counter0) }}", "age": {{ Number(10, 100) }}, "email": "{{ Email() }}" }{% if not forloop.Last %}, {% endif %} {% endfor %} ] }` wd, _ := os.Getwd() path := filepath.Join(wd, "testdata") collection, err := generator.InitCollectionFromPath(path) if err != nil { panic(err) } out, err := generator.Render(testTemplateJson, "my random source hash", collection) if err != nil { panic(err) } var parsedTpl testTplJson err = json.Unmarshal([]byte(out), &parsedTpl) if err != nil { panic(err) } // fmt.Println(out) // true random render fmt.Printf("parsedTpl: %+v", parsedTpl) // Output: // parsedTpl: {FirstName:Grace LastName:Johnson EmptyField:} }
// Copyright 2017, TCN Inc. // All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of TCN Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package templates const ReturnConvertHelpers= ` {{define "addr"}}{{if .IsMessage}}&{{end}}{{end}} {{define "base"}}{{if .IsEnum}}{{.EnumName}}({{.Name}}){{else}}{{.Name}}{{end}}{{end}} {{define "mapping"}}{{if .IsMapped}}.ToProto(){{end}}{{end}} ` const SqlUnaryMethodTemplate = `{{define "sql_unary_method"}}// sql unary {{.GetName}} func (s* {{.GetServiceName}}Impl) {{.GetName}} (ctx context.Context, req *{{.GetInputType}}) (*{{.GetOutputType}}, error) { var ( {{range $field, $type := .GetFieldsWithLocalTypesFor .GetOutputTypeStruct}} {{$field}} {{$type}}{{end}} ) err := s.SqlDB.QueryRow({{.GetQuery}} {{.GetQueryParamString true}}). Scan({{range $index,$t :=.GetTypeDescArrayForStruct .GetOutputTypeStruct}} &{{$t.Name}},{{end}}) if err != nil { if err == sql.ErrNoRows { return nil, grpc.Errorf(codes.NotFound, "%+v doesn't exist", req) } else if strings.Contains(err.Error(), "duplicate key") { return nil, grpc.Errorf(codes.AlreadyExists, "%+v already exists", req) } return nil, grpc.Errorf(codes.Unknown, err.Error()) } res := &{{.GetOutputType}}{ {{range $field, $type := .GetTypeDescForFieldsInStruct .GetOutputTypeStruct}} {{$field}}: {{template "addr" $type}}{{template "base" $type}}{{template "mapping" $type}},{{end}} } return res, nil } {{end}}` const SqlServerStreamingMethodTemplate = `{{define "sql_server_streaming_method"}}// sql server streaming {{.GetName}} func (s *{{.GetServiceName}}Impl) {{.GetName}}(req *{{.GetInputType}}, stream {{.GetServiceName}}_{{.GetName}}Server) error { var ( {{range $field, $type := .GetFieldsWithLocalTypesFor .GetOutputTypeStruct}} {{$field}} {{$type}}{{end}} ) rows, err := s.SqlDB.Query({{.GetQuery}} {{.GetQueryParamString true}}) if err != nil { return grpc.Errorf(codes.Unknown, err.Error()) } defer rows.Close() for rows.Next() { err = rows.Err() if err != nil { if err == sql.ErrNoRows { return grpc.Errorf(codes.NotFound, "%+v doesn't exist", req) } else if strings.Contains(err.Error(), "duplicate key") { return grpc.Errorf(codes.AlreadyExists, "%+v already exists", req) } return grpc.Errorf(codes.Unknown, err.Error()) } err := rows.Scan({{range $index,$t :=.GetTypeDescArrayForStruct .GetOutputTypeStruct}} &{{$t.Name}},{{end}}) if err != nil { return grpc.Errorf(codes.Unknown, err.Error()) } res := &{{.GetOutputType}}{ {{range $field, $type := .GetTypeDescForFieldsInStruct .GetOutputTypeStruct}} {{$field}}: {{template "addr" $type}}{{template "base" $type}}{{template "mapping" $type}},{{end}} } stream.Send(res) } return nil }{{end}}` const SqlClientStreamingMethodTemplate = `{{define "sql_client_streaming_method"}}// sql client streaming {{.GetName}} func (s *{{.GetServiceName}}Impl) {{.GetName}}(stream {{.GetServiceName}}_{{.GetName}}Server) error { tx, err := s.SqlDB.Begin() if err != nil { return err } stmt, err:= tx.Prepare({{.GetQuery}}) if err != nil { return err } totalAffected := int64(0) for { req, err := stream.Recv() if err == io.EOF { break } if err != nil { tx.Rollback() return grpc.Errorf(codes.Unknown, err.Error()) } affected, err := stmt.Exec({{.GetQueryParamString false}}) if err != nil { tx.Rollback() if err == sql.ErrNoRows { return grpc.Errorf(codes.NotFound, "%+v doesn't exist", req) } else if strings.Contains(err.Error(), "duplicate key") { return grpc.Errorf(codes.AlreadyExists, "%+v already exists", req) } return grpc.Errorf(codes.Unknown, err.Error()) } num, err := affected.RowsAffected() if err != nil { tx.Rollback() return grpc.Errorf(codes.Unknown, err.Error()) } totalAffected += num } err = tx.Commit() if err != nil { fmt.Println("Commiting transaction failed, rolling back...") return grpc.Errorf(codes.Unknown, err.Error()) } stream.SendAndClose(&{{.GetOutputType}}{ Count: totalAffected }) return nil }{{end}}` const SqlBidiStreamingMethodTemplate = `{{define "sql_bidi_streaming_method"}}// sql bidi streaming {{.GetName}} func (s *{{.GetServiceName}}Impl) {{.GetName}}(stream {{.GetServiceName}}_{{.GetName}}Server) error { stmt, err := s.SqlDB.Prepare({{.GetQuery}}) if err != nil { return err } defer stmt.Close() for { req, err := stream.Recv() if err == io.EOF { break } if err != nil { return grpc.Errorf(codes.Unknown, err.Error()) } var ( {{range $field, $type := .GetFieldsWithLocalTypesFor .GetOutputTypeStruct}} {{$field}} {{$type}}{{end}} ) err = stmt.QueryRow({{.GetQueryParamString false}}). Scan({{range $index,$t :=.GetTypeDescArrayForStruct .GetOutputTypeStruct}} &{{$t.Name}},{{end}}) if err != nil { if err == sql.ErrNoRows { return grpc.Errorf(codes.NotFound, "%+v doesn't exist", req) } else if strings.Contains(err.Error(), "duplicate key") { return grpc.Errorf(codes.AlreadyExists, "%+v already exists", req) } return grpc.Errorf(codes.Unknown, err.Error()) } res := &{{.GetOutputType}}{ {{range $field, $type := .GetTypeDescForFieldsInStruct .GetOutputTypeStruct}} {{$field}}: {{template "addr" $type}}{{template "base" $type}}{{template "mapping" $type}},{{end}} } if err := stream.Send(res); err != nil { return grpc.Errorf(codes.Unknown, err.Error()) } } return nil } {{end}}` added before hooks to templates // Copyright 2017, TCN Inc. // All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of TCN Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package templates const ReturnConvertHelpers= ` {{define "addr"}}{{if .IsMessage}}&{{end}}{{end}} {{define "base"}}{{if .IsEnum}}{{.EnumName}}({{.Name}}){{else}}{{.Name}}{{end}}{{end}} {{define "mapping"}}{{if .IsMapped}}.ToProto(){{end}}{{end}} {{define "before_hook"}} {{/* Our before hook template give it a Method as dot*/}} {{$before := .GetMethodOption.GetBefore}} {{if $before}} beforeRes, err := {{.GetGoPackage $before.GetPackage}}.{{$before.GetName}}(req) if err != nil { return nil, grpc.Errorf(codes.Unknown, err.Error()) } if beforeRes != nil { {{if .IsClientStreaming}} continue {{end}} {{if and .IsBidiStreaming (not .IsSpanner)}} err := stream.Send(beforeRes) if err != nil { return grpc.Errorf(codes.Unknown, err.Error()) } continue {{end}} {{if or .IsUnary .IsServerStreaming}} return beforeRes, nil {{end}} } {{end}} {{end}} ` const SqlUnaryMethodTemplate = `{{define "sql_unary_method"}}// sql unary {{.GetName}} func (s* {{.GetServiceName}}Impl) {{.GetName}} (ctx context.Context, req *{{.GetInputType}}) (*{{.GetOutputType}}, error) { var ( {{range $field, $type := .GetFieldsWithLocalTypesFor .GetOutputTypeStruct}} {{$field}} {{$type}}{{end}} ) {{template "before_hook" .}} err := s.SqlDB.QueryRow({{.GetQuery}} {{.GetQueryParamString true}}). Scan({{range $index,$t :=.GetTypeDescArrayForStruct .GetOutputTypeStruct}} &{{$t.Name}},{{end}}) if err != nil { if err == sql.ErrNoRows { return nil, grpc.Errorf(codes.NotFound, "%+v doesn't exist", req) } else if strings.Contains(err.Error(), "duplicate key") { return nil, grpc.Errorf(codes.AlreadyExists, "%+v already exists", req) } return nil, grpc.Errorf(codes.Unknown, err.Error()) } res := &{{.GetOutputType}}{ {{range $field, $type := .GetTypeDescForFieldsInStruct .GetOutputTypeStruct}} {{$field}}: {{template "addr" $type}}{{template "base" $type}}{{template "mapping" $type}},{{end}} } return res, nil } {{end}}` const SqlServerStreamingMethodTemplate = `{{define "sql_server_streaming_method"}}// sql server streaming {{.GetName}} func (s *{{.GetServiceName}}Impl) {{.GetName}}(req *{{.GetInputType}}, stream {{.GetServiceName}}_{{.GetName}}Server) error { var ( {{range $field, $type := .GetFieldsWithLocalTypesFor .GetOutputTypeStruct}} {{$field}} {{$type}}{{end}} ) {{template "before_hook" .}} rows, err := s.SqlDB.Query({{.GetQuery}} {{.GetQueryParamString true}}) if err != nil { return grpc.Errorf(codes.Unknown, err.Error()) } defer rows.Close() for rows.Next() { err = rows.Err() if err != nil { if err == sql.ErrNoRows { return grpc.Errorf(codes.NotFound, "%+v doesn't exist", req) } else if strings.Contains(err.Error(), "duplicate key") { return grpc.Errorf(codes.AlreadyExists, "%+v already exists", req) } return grpc.Errorf(codes.Unknown, err.Error()) } err := rows.Scan({{range $index,$t :=.GetTypeDescArrayForStruct .GetOutputTypeStruct}} &{{$t.Name}},{{end}}) if err != nil { return grpc.Errorf(codes.Unknown, err.Error()) } res := &{{.GetOutputType}}{ {{range $field, $type := .GetTypeDescForFieldsInStruct .GetOutputTypeStruct}} {{$field}}: {{template "addr" $type}}{{template "base" $type}}{{template "mapping" $type}},{{end}} } stream.Send(res) } return nil }{{end}}` const SqlClientStreamingMethodTemplate = `{{define "sql_client_streaming_method"}}// sql client streaming {{.GetName}} func (s *{{.GetServiceName}}Impl) {{.GetName}}(stream {{.GetServiceName}}_{{.GetName}}Server) error { tx, err := s.SqlDB.Begin() if err != nil { return err } stmt, err:= tx.Prepare({{.GetQuery}}) if err != nil { return err } totalAffected := int64(0) for { req, err := stream.Recv() if err == io.EOF { break } if err != nil { tx.Rollback() return grpc.Errorf(codes.Unknown, err.Error()) } {{template "before_hook" .}} affected, err := stmt.Exec({{.GetQueryParamString false}}) if err != nil { tx.Rollback() if err == sql.ErrNoRows { return grpc.Errorf(codes.NotFound, "%+v doesn't exist", req) } else if strings.Contains(err.Error(), "duplicate key") { return grpc.Errorf(codes.AlreadyExists, "%+v already exists", req) } return grpc.Errorf(codes.Unknown, err.Error()) } num, err := affected.RowsAffected() if err != nil { tx.Rollback() return grpc.Errorf(codes.Unknown, err.Error()) } totalAffected += num } err = tx.Commit() if err != nil { fmt.Println("Commiting transaction failed, rolling back...") return grpc.Errorf(codes.Unknown, err.Error()) } stream.SendAndClose(&{{.GetOutputType}}{ Count: totalAffected }) return nil }{{end}}` const SqlBidiStreamingMethodTemplate = `{{define "sql_bidi_streaming_method"}}// sql bidi streaming {{.GetName}} func (s *{{.GetServiceName}}Impl) {{.GetName}}(stream {{.GetServiceName}}_{{.GetName}}Server) error { stmt, err := s.SqlDB.Prepare({{.GetQuery}}) if err != nil { return err } defer stmt.Close() for { req, err := stream.Recv() if err == io.EOF { break } if err != nil { return grpc.Errorf(codes.Unknown, err.Error()) } {{template "before_hook" .}} var ( {{range $field, $type := .GetFieldsWithLocalTypesFor .GetOutputTypeStruct}} {{$field}} {{$type}}{{end}} ) err = stmt.QueryRow({{.GetQueryParamString false}}). Scan({{range $index,$t :=.GetTypeDescArrayForStruct .GetOutputTypeStruct}} &{{$t.Name}},{{end}}) if err != nil { if err == sql.ErrNoRows { return grpc.Errorf(codes.NotFound, "%+v doesn't exist", req) } else if strings.Contains(err.Error(), "duplicate key") { return grpc.Errorf(codes.AlreadyExists, "%+v already exists", req) } return grpc.Errorf(codes.Unknown, err.Error()) } res := &{{.GetOutputType}}{ {{range $field, $type := .GetTypeDescForFieldsInStruct .GetOutputTypeStruct}} {{$field}}: {{template "addr" $type}}{{template "base" $type}}{{template "mapping" $type}},{{end}} } if err := stream.Send(res); err != nil { return grpc.Errorf(codes.Unknown, err.Error()) } } return nil } {{end}}`
// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // +build go1.3 package lxd_test import ( "strings" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/environs" envtesting "github.com/juju/juju/environs/testing" "github.com/juju/juju/provider/lxd" ) var ( _ = gc.Suite(&providerSuite{}) _ = gc.Suite(&ProviderFunctionalSuite{}) ) type providerSuite struct { lxd.BaseSuite provider environs.EnvironProvider } func (s *providerSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) provider, err := environs.Provider("lxd") c.Check(err, jc.ErrorIsNil) s.provider = provider } func (s *providerSuite) TestRegistered(c *gc.C) { c.Assert(s.provider, gc.Equals, lxd.Provider) } func (s *providerSuite) TestValidate(c *gc.C) { validCfg, err := s.provider.Validate(s.Config, nil) c.Check(err, jc.ErrorIsNil) validAttrs := validCfg.AllAttrs() c.Assert(s.Config.AllAttrs(), gc.DeepEquals, validAttrs) } func (s *providerSuite) TestSecretAttrs(c *gc.C) { obtainedAttrs, err := s.provider.SecretAttrs(s.Config) c.Check(err, jc.ErrorIsNil) c.Assert(obtainedAttrs, gc.HasLen, 0) } func (s *providerSuite) TestBoilerplateConfig(c *gc.C) { // (wwitzel3) purposefully duplicated here so that this test will // fail if someone updates lxd/config.go without updating this test. var expected = ` lxd: type: lxd # namespace identifies the namespace to associate with containers # created by the provider. It is prepended to the container names. # By default the environment's name is used as the namespace. # # namespace: lxd # remote-url is the URL to the LXD API server to use for managing # containers, if any. If not specified then the locally running LXD # server is used. # # Note: Juju does not set up remotes for you. Run the following # commands on an LXD remote's host to install LXD: # # add-apt-repository ppa:ubuntu-lxc/lxd-stable # apt-get update # apt-get install lxd # # Before using a locally running LXD (the default for this provider) # after installing it, either through Juju or the LXD CLI ("lxc"), # you must either log out and back in or run this command: # # newgrp lxd # # You will also need to prepare the "ubuntu" image that Juju uses: # # lxc remote add images images.linuxcontainers.org # lxd-images import ubuntu --alias ubuntu # # See: https://linuxcontainers.org/lxd/getting-started-cli/ # # remote-url: # The cert and key the client should use to connect to the remote # may also be provided. If not then they are auto-generated. # # client-cert: # client-key: `[1:] boilerplateConfig := s.provider.BoilerplateConfig() c.Check(boilerplateConfig, gc.Equals, expected) c.Check(strings.Split(boilerplateConfig, "\n"), jc.DeepEquals, strings.Split(expected, "\n")) } type ProviderFunctionalSuite struct { lxd.BaseSuite provider environs.EnvironProvider } func (s *ProviderFunctionalSuite) SetUpTest(c *gc.C) { if !s.IsRunningLocally(c) { c.Skip("LXD not running locally") } s.BaseSuite.SetUpTest(c) provider, err := environs.Provider("lxd") c.Check(err, jc.ErrorIsNil) s.provider = provider } func (s *ProviderFunctionalSuite) TestOpen(c *gc.C) { env, err := s.provider.Open(s.Config) c.Check(err, jc.ErrorIsNil) envConfig := env.Config() c.Assert(envConfig.Name(), gc.Equals, "testenv") } func (s *ProviderFunctionalSuite) TestPrepareForBootstrap(c *gc.C) { env, err := s.provider.PrepareForBootstrap(envtesting.BootstrapContext(c), s.Config) c.Check(err, jc.ErrorIsNil) c.Check(env, gc.NotNil) } Use Check and Assert appropriately. // Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // +build go1.3 package lxd_test import ( "strings" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/environs" envtesting "github.com/juju/juju/environs/testing" "github.com/juju/juju/provider/lxd" ) var ( _ = gc.Suite(&providerSuite{}) _ = gc.Suite(&ProviderFunctionalSuite{}) ) type providerSuite struct { lxd.BaseSuite provider environs.EnvironProvider } func (s *providerSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) provider, err := environs.Provider("lxd") c.Assert(err, jc.ErrorIsNil) s.provider = provider } func (s *providerSuite) TestRegistered(c *gc.C) { c.Check(s.provider, gc.Equals, lxd.Provider) } func (s *providerSuite) TestValidate(c *gc.C) { validCfg, err := s.provider.Validate(s.Config, nil) c.Assert(err, jc.ErrorIsNil) validAttrs := validCfg.AllAttrs() c.Check(s.Config.AllAttrs(), gc.DeepEquals, validAttrs) } func (s *providerSuite) TestSecretAttrs(c *gc.C) { obtainedAttrs, err := s.provider.SecretAttrs(s.Config) c.Assert(err, jc.ErrorIsNil) c.Check(obtainedAttrs, gc.HasLen, 0) } func (s *providerSuite) TestBoilerplateConfig(c *gc.C) { // (wwitzel3) purposefully duplicated here so that this test will // fail if someone updates lxd/config.go without updating this test. var expected = ` lxd: type: lxd # namespace identifies the namespace to associate with containers # created by the provider. It is prepended to the container names. # By default the environment's name is used as the namespace. # # namespace: lxd # remote-url is the URL to the LXD API server to use for managing # containers, if any. If not specified then the locally running LXD # server is used. # # Note: Juju does not set up remotes for you. Run the following # commands on an LXD remote's host to install LXD: # # add-apt-repository ppa:ubuntu-lxc/lxd-stable # apt-get update # apt-get install lxd # # Before using a locally running LXD (the default for this provider) # after installing it, either through Juju or the LXD CLI ("lxc"), # you must either log out and back in or run this command: # # newgrp lxd # # You will also need to prepare the "ubuntu" image that Juju uses: # # lxc remote add images images.linuxcontainers.org # lxd-images import ubuntu --alias ubuntu # # See: https://linuxcontainers.org/lxd/getting-started-cli/ # # remote-url: # The cert and key the client should use to connect to the remote # may also be provided. If not then they are auto-generated. # # client-cert: # client-key: `[1:] boilerplateConfig := s.provider.BoilerplateConfig() c.Check(boilerplateConfig, gc.Equals, expected) c.Check(strings.Split(boilerplateConfig, "\n"), jc.DeepEquals, strings.Split(expected, "\n")) } type ProviderFunctionalSuite struct { lxd.BaseSuite provider environs.EnvironProvider } func (s *ProviderFunctionalSuite) SetUpTest(c *gc.C) { if !s.IsRunningLocally(c) { c.Skip("LXD not running locally") } s.BaseSuite.SetUpTest(c) provider, err := environs.Provider("lxd") c.Assert(err, jc.ErrorIsNil) s.provider = provider } func (s *ProviderFunctionalSuite) TestOpen(c *gc.C) { env, err := s.provider.Open(s.Config) c.Assert(err, jc.ErrorIsNil) envConfig := env.Config() c.Check(envConfig.Name(), gc.Equals, "testenv") } func (s *ProviderFunctionalSuite) TestPrepareForBootstrap(c *gc.C) { env, err := s.provider.PrepareForBootstrap(envtesting.BootstrapContext(c), s.Config) c.Assert(err, jc.ErrorIsNil) c.Check(env, gc.NotNil) }
// Copyright (c) 2016 Pulcy. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vagrant import ( "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "github.com/op/go-logging" "github.com/pulcy/quark/providers" "github.com/pulcy/quark/templates" ) const ( fileMode = os.FileMode(0775) cloudConfigTemplate = "templates/cloud-config.tmpl" vagrantFileTemplate = "templates/Vagrantfile.tmpl" vagrantFileName = "Vagrantfile" configTemplate = "templates/config.rb.tmpl" configFileName = "config.rb" userDataFileName = "user-data" ) var ( images = []string{ "coreos-alpha", "coreos-beta", "coreos-stable", } ) type vagrantProvider struct { Logger *logging.Logger folder string instanceCount int } func NewProvider(logger *logging.Logger, folder string) providers.CloudProvider { return &vagrantProvider{ Logger: logger, folder: folder, instanceCount: 3, } } func (vp *vagrantProvider) ShowInstanceTypes() error { return maskAny(NotImplementedError) } func (vp *vagrantProvider) ShowRegions() error { return maskAny(NotImplementedError) } func (vp *vagrantProvider) ShowImages() error { fmt.Printf("Images\n%s\n", strings.Join(images, "\n")) return nil } func (vp *vagrantProvider) ShowKeys() error { return maskAny(NotImplementedError) } // Create a machine instance func (vp *vagrantProvider) CreateInstance(log *logging.Logger, options providers.CreateInstanceOptions, dnsProvider providers.DnsProvider) (providers.ClusterInstance, error) { return providers.ClusterInstance{}, maskAny(NotImplementedError) } // Create an entire cluster func (vp *vagrantProvider) CreateCluster(log *logging.Logger, options providers.CreateClusterOptions, dnsProvider providers.DnsProvider) error { // Ensure folder exists if err := os.MkdirAll(vp.folder, fileMode|os.ModeDir); err != nil { return maskAny(err) } if _, err := os.Stat(filepath.Join(vp.folder, ".vagrant")); err == nil { return maskAny(fmt.Errorf("Vagrant in %s already exists", vp.folder)) } parts := strings.Split(options.ImageID, "-") if len(parts) != 2 || parts[0] != "coreos" { return maskAny(fmt.Errorf("Invalid image ID, expected 'coreos-alpha|beta|stable', got '%s'", options.ImageID)) } updateChannel := parts[1] vopts := struct { InstanceCount int UpdateChannel string }{ InstanceCount: options.InstanceCount, UpdateChannel: updateChannel, } vp.instanceCount = options.InstanceCount // Vagrantfile content, err := templates.Render(vagrantFileTemplate, vopts) if err != nil { return maskAny(err) } if err := ioutil.WriteFile(filepath.Join(vp.folder, vagrantFileName), []byte(content), fileMode); err != nil { return maskAny(err) } // config.rb content, err = templates.Render(configTemplate, vopts) if err != nil { return maskAny(err) } if err := ioutil.WriteFile(filepath.Join(vp.folder, configFileName), []byte(content), fileMode); err != nil { return maskAny(err) } // Fetch SSH keys sshKeys, err := providers.FetchSSHKeys(options.SSHKeyGithubAccount) if err != nil { return maskAny(err) } // Fetch vagrant insecure private key insecureKey, err := fetchVagrantInsecureSSHKey() if err != nil { return maskAny(err) } sshKeys = append(sshKeys, insecureKey) // user-data instanceOptions, err := options.NewCreateInstanceOptions(true, 0) if err != nil { return maskAny(err) } opts := instanceOptions.NewCloudConfigOptions() opts.PrivateIPv4 = "$private_ipv4" opts.SshKeys = sshKeys content, err = templates.Render(cloudConfigTemplate, opts) if err != nil { return maskAny(err) } if err := ioutil.WriteFile(filepath.Join(vp.folder, userDataFileName), []byte(content), fileMode); err != nil { return maskAny(err) } // Start cmd := exec.Command("vagrant", "up") cmd.Dir = vp.folder cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin if err := cmd.Run(); err != nil { return maskAny(err) } // Run initial setup instances, err := vp.GetInstances(providers.ClusterInfo{}) if err != nil { return maskAny(err) } clusterMembers, err := instances.AsClusterMemberList(log, nil) if err != nil { return maskAny(err) } for index, instance := range instances { iso := providers.InitialSetupOptions{ ClusterMembers: clusterMembers, FleetMetadata: instanceOptions.CreateFleetMetadata(true, index), } if err := instance.InitialSetup(log, instanceOptions, iso); err != nil { return maskAny(err) } } return nil } // Get names of instances of a cluster func (vp *vagrantProvider) GetInstances(info_ providers.ClusterInfo) (providers.ClusterInstanceList, error) { instances := providers.ClusterInstanceList{} for i := 1; i <= vp.instanceCount; i++ { instances = append(instances, providers.ClusterInstance{ Name: fmt.Sprintf("core-%02d", i), PrivateIpv4: fmt.Sprintf("192.168.33.%d", 100+i), PublicIpv4: fmt.Sprintf("192.168.33.%d", 100+i), PublicIpv6: "", PrivateClusterDevice: privateClusterDevice, }) } return instances, nil } // Remove all instances of a cluster func (vp *vagrantProvider) DeleteCluster(info providers.ClusterInfo, dnsProvider providers.DnsProvider) error { // Start cmd := exec.Command("vagrant", "destroy", "-f") cmd.Dir = vp.folder cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin if err := cmd.Run(); err != nil { return maskAny(err) } os.RemoveAll(filepath.Join(vp.folder, ".vagrant")) return nil } func (vp *vagrantProvider) DeleteInstance(info providers.ClusterInstanceInfo, dnsProvider providers.DnsProvider) error { return maskAny(NotImplementedError) } func (vp *vagrantProvider) ShowDomainRecords(domain string) error { return maskAny(NotImplementedError) } Vagrant provider returned instances even if the cluster was not created // Copyright (c) 2016 Pulcy. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vagrant import ( "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "github.com/op/go-logging" "github.com/pulcy/quark/providers" "github.com/pulcy/quark/templates" ) const ( fileMode = os.FileMode(0775) cloudConfigTemplate = "templates/cloud-config.tmpl" vagrantFileTemplate = "templates/Vagrantfile.tmpl" vagrantFileName = "Vagrantfile" configTemplate = "templates/config.rb.tmpl" configFileName = "config.rb" userDataFileName = "user-data" ) var ( images = []string{ "coreos-alpha", "coreos-beta", "coreos-stable", } ) type vagrantProvider struct { Logger *logging.Logger folder string instanceCount int } func NewProvider(logger *logging.Logger, folder string) providers.CloudProvider { return &vagrantProvider{ Logger: logger, folder: folder, instanceCount: 3, } } func (vp *vagrantProvider) ShowInstanceTypes() error { return maskAny(NotImplementedError) } func (vp *vagrantProvider) ShowRegions() error { return maskAny(NotImplementedError) } func (vp *vagrantProvider) ShowImages() error { fmt.Printf("Images\n%s\n", strings.Join(images, "\n")) return nil } func (vp *vagrantProvider) ShowKeys() error { return maskAny(NotImplementedError) } // Create a machine instance func (vp *vagrantProvider) CreateInstance(log *logging.Logger, options providers.CreateInstanceOptions, dnsProvider providers.DnsProvider) (providers.ClusterInstance, error) { return providers.ClusterInstance{}, maskAny(NotImplementedError) } // Create an entire cluster func (vp *vagrantProvider) CreateCluster(log *logging.Logger, options providers.CreateClusterOptions, dnsProvider providers.DnsProvider) error { // Ensure folder exists if err := os.MkdirAll(vp.folder, fileMode|os.ModeDir); err != nil { return maskAny(err) } if _, err := os.Stat(filepath.Join(vp.folder, ".vagrant")); err == nil { return maskAny(fmt.Errorf("Vagrant in %s already exists", vp.folder)) } parts := strings.Split(options.ImageID, "-") if len(parts) != 2 || parts[0] != "coreos" { return maskAny(fmt.Errorf("Invalid image ID, expected 'coreos-alpha|beta|stable', got '%s'", options.ImageID)) } updateChannel := parts[1] vopts := struct { InstanceCount int UpdateChannel string }{ InstanceCount: options.InstanceCount, UpdateChannel: updateChannel, } vp.instanceCount = options.InstanceCount // Vagrantfile content, err := templates.Render(vagrantFileTemplate, vopts) if err != nil { return maskAny(err) } if err := ioutil.WriteFile(filepath.Join(vp.folder, vagrantFileName), []byte(content), fileMode); err != nil { return maskAny(err) } // config.rb content, err = templates.Render(configTemplate, vopts) if err != nil { return maskAny(err) } if err := ioutil.WriteFile(filepath.Join(vp.folder, configFileName), []byte(content), fileMode); err != nil { return maskAny(err) } // Fetch SSH keys sshKeys, err := providers.FetchSSHKeys(options.SSHKeyGithubAccount) if err != nil { return maskAny(err) } // Fetch vagrant insecure private key insecureKey, err := fetchVagrantInsecureSSHKey() if err != nil { return maskAny(err) } sshKeys = append(sshKeys, insecureKey) // user-data instanceOptions, err := options.NewCreateInstanceOptions(true, 0) if err != nil { return maskAny(err) } opts := instanceOptions.NewCloudConfigOptions() opts.PrivateIPv4 = "$private_ipv4" opts.SshKeys = sshKeys content, err = templates.Render(cloudConfigTemplate, opts) if err != nil { return maskAny(err) } if err := ioutil.WriteFile(filepath.Join(vp.folder, userDataFileName), []byte(content), fileMode); err != nil { return maskAny(err) } // Start cmd := exec.Command("vagrant", "up") cmd.Dir = vp.folder cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin if err := cmd.Run(); err != nil { return maskAny(err) } // Run initial setup instances, err := vp.GetInstances(providers.ClusterInfo{}) if err != nil { return maskAny(err) } clusterMembers, err := instances.AsClusterMemberList(log, nil) if err != nil { return maskAny(err) } for index, instance := range instances { iso := providers.InitialSetupOptions{ ClusterMembers: clusterMembers, FleetMetadata: instanceOptions.CreateFleetMetadata(true, index), } if err := instance.InitialSetup(log, instanceOptions, iso); err != nil { return maskAny(err) } } return nil } // Get names of instances of a cluster func (vp *vagrantProvider) GetInstances(info_ providers.ClusterInfo) (providers.ClusterInstanceList, error) { if _, err := os.Stat(filepath.Join(vp.folder, ".vagrant")); os.IsNotExist(err) { // Cluster does not exist return nil, nil } instances := providers.ClusterInstanceList{} for i := 1; i <= vp.instanceCount; i++ { instances = append(instances, providers.ClusterInstance{ Name: fmt.Sprintf("core-%02d", i), PrivateIpv4: fmt.Sprintf("192.168.33.%d", 100+i), PublicIpv4: fmt.Sprintf("192.168.33.%d", 100+i), PublicIpv6: "", PrivateClusterDevice: privateClusterDevice, }) } return instances, nil } // Remove all instances of a cluster func (vp *vagrantProvider) DeleteCluster(info providers.ClusterInfo, dnsProvider providers.DnsProvider) error { // Start cmd := exec.Command("vagrant", "destroy", "-f") cmd.Dir = vp.folder cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin if err := cmd.Run(); err != nil { return maskAny(err) } os.RemoveAll(filepath.Join(vp.folder, ".vagrant")) return nil } func (vp *vagrantProvider) DeleteInstance(info providers.ClusterInstanceInfo, dnsProvider providers.DnsProvider) error { return maskAny(NotImplementedError) } func (vp *vagrantProvider) ShowDomainRecords(domain string) error { return maskAny(NotImplementedError) }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bytes // Simple byte buffer for marshaling data. import ( "io"; "os"; ) // Copy from string to byte array at offset doff. Assume there's room. func copyString(dst []byte, doff int, str string) { for soff := 0; soff < len(str); soff++ { dst[doff] = str[soff]; doff++; } } // A Buffer is a variable-sized buffer of bytes // with Read and Write methods. // The zero value for Buffer is an empty buffer ready to use. type Buffer struct { buf []byte; // contents are the bytes buf[off : len(buf)] off int; // read at &buf[off], write at &buf[len(buf)] oneByte [1]byte; // avoid allocation of slice on each WriteByte bootstrap [64]byte; // memory to hold first slice; helps small buffers (Printf) avoid allocation. } // Bytes returns the contents of the unread portion of the buffer; // len(b.Bytes()) == b.Len(). func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } // String returns the contents of the unread portion of the buffer // as a string. If the Buffer is a nil pointer, it returns "<nil>". func (b *Buffer) String() string { if b == nil { // Special case, useful in debugging. return "<nil>" } return string(b.buf[b.off:]); } // Len returns the number of bytes of the unread portion of the buffer; // b.Len() == len(b.Bytes()). func (b *Buffer) Len() int { return len(b.buf) - b.off } // Truncate discards all but the first n unread bytes from the buffer. // It is an error to call b.Truncate(n) with n > b.Len(). func (b *Buffer) Truncate(n int) { if n == 0 { // Reuse buffer space. b.off = 0 } b.buf = b.buf[0 : b.off+n]; } // Reset resets the buffer so it has no content. // b.Reset() is the same as b.Truncate(0). func (b *Buffer) Reset() { b.Truncate(0) } // Resize buffer to guarantee enough space for n more bytes. // After this call, the state of b.buf is inconsistent. // It must be fixed up as is done in Write and WriteString. func (b *Buffer) resize(n int) { var buf []byte; if b.buf == nil && n <= len(b.bootstrap) { buf = &b.bootstrap } else { buf = b.buf; if len(b.buf)+n > cap(b.buf) { // not enough space anywhere buf = make([]byte, 2*cap(b.buf)+n) } copy(buf, b.buf[b.off:]); } b.buf = buf; b.off = 0; } // Write appends the contents of p to the buffer. The return // value n is the length of p; err is always nil. func (b *Buffer) Write(p []byte) (n int, err os.Error) { m := b.Len(); n = len(p); if len(b.buf)+n > cap(b.buf) { b.resize(n) } b.buf = b.buf[0 : b.off+m+n]; copy(b.buf[b.off+m:], p); return n, nil; } // WriteString appends the contents of s to the buffer. The return // value n is the length of s; err is always nil. func (b *Buffer) WriteString(s string) (n int, err os.Error) { m := b.Len(); n = len(s); if len(b.buf)+n > cap(b.buf) { b.resize(n) } b.buf = b.buf[0 : b.off+m+n]; copyString(b.buf, b.off+m, s); return n, nil; } // MinRead is the minimum slice size passed to a Read call by // Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond // what is required to hold the contents of r, ReadFrom will not grow the // underlying buffer. const MinRead = 512 // ReadFrom reads data from r until EOF and appends it to the buffer. // The return value n is the number of bytes read. // Any error except os.EOF encountered during the read // is also returned. func (b *Buffer) ReadFrom(r io.Reader) (n int64, err os.Error) { for { if cap(b.buf)-len(b.buf) < MinRead { var newBuf []byte; // can we get space without allocation? if b.off+cap(b.buf)-len(b.buf) >= MinRead { // reuse beginning of buffer newBuf = b.buf[0 : len(b.buf)-b.off] } else { // not enough space at end; put space on end newBuf = make([]byte, len(b.buf)-b.off, 2*(cap(b.buf)-b.off)+MinRead) } copy(newBuf, b.buf[b.off:]); b.buf = newBuf; b.off = 0; } m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]); b.buf = b.buf[b.off : len(b.buf)+m]; n += int64(m); if e == os.EOF { break } if e != nil { return n, e } } return n, nil; // err is EOF, so return nil explicitly } // WriteTo writes data to w until the buffer is drained or an error // occurs. The return value n is the number of bytes written. // Any error encountered during the write is also returned. func (b *Buffer) WriteTo(w io.Writer) (n int64, err os.Error) { for b.off < len(b.buf) { m, e := w.Write(b.buf[b.off:]); n += int64(m); b.off += m; if e != nil { return n, e } } return; } // WriteByte appends the byte c to the buffer. // The returned error is always nil, but is included // to match bufio.Writer's WriteByte. func (b *Buffer) WriteByte(c byte) os.Error { b.oneByte[0] = c; b.Write(&b.oneByte); return nil; } // Read reads the next len(p) bytes from the buffer or until the buffer // is drained. The return value n is the number of bytes read. If the // buffer has no data to return, err is os.EOF even if len(p) is zero; // otherwise it is nil. func (b *Buffer) Read(p []byte) (n int, err os.Error) { if b.off >= len(b.buf) { return 0, os.EOF } m := b.Len(); n = len(p); if n > m { // more bytes requested than available n = m } copy(p, b.buf[b.off:b.off+n]); b.off += n; return n, err; } // ReadByte reads and returns the next byte from the buffer. // If no byte is available, it returns error os.EOF. func (b *Buffer) ReadByte() (c byte, err os.Error) { if b.off >= len(b.buf) { return 0, os.EOF } c = b.buf[b.off]; b.off++; return c, nil; } // NewBuffer creates and initializes a new Buffer // using buf as its initial contents. func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } // NewBufferString creates and initializes a new Buffer // using string s as its initial contents. func NewBufferString(s string) *Buffer { buf := make([]byte, len(s)); copyString(buf, 0, s); return &Buffer{buf: buf}; } When the buffer is empty, reset b.off to the beginning of the buffer to avoid growing unnecessarily. R=rsc CC=golang-dev http://codereview.appspot.com/176071 // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bytes // Simple byte buffer for marshaling data. import ( "io"; "os"; ) // Copy from string to byte array at offset doff. Assume there's room. func copyString(dst []byte, doff int, str string) { for soff := 0; soff < len(str); soff++ { dst[doff] = str[soff]; doff++; } } // A Buffer is a variable-sized buffer of bytes // with Read and Write methods. // The zero value for Buffer is an empty buffer ready to use. type Buffer struct { buf []byte; // contents are the bytes buf[off : len(buf)] off int; // read at &buf[off], write at &buf[len(buf)] oneByte [1]byte; // avoid allocation of slice on each WriteByte bootstrap [64]byte; // memory to hold first slice; helps small buffers (Printf) avoid allocation. } // Bytes returns the contents of the unread portion of the buffer; // len(b.Bytes()) == b.Len(). func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } // String returns the contents of the unread portion of the buffer // as a string. If the Buffer is a nil pointer, it returns "<nil>". func (b *Buffer) String() string { if b == nil { // Special case, useful in debugging. return "<nil>" } return string(b.buf[b.off:]); } // Len returns the number of bytes of the unread portion of the buffer; // b.Len() == len(b.Bytes()). func (b *Buffer) Len() int { return len(b.buf) - b.off } // Truncate discards all but the first n unread bytes from the buffer. // It is an error to call b.Truncate(n) with n > b.Len(). func (b *Buffer) Truncate(n int) { if n == 0 { // Reuse buffer space. b.off = 0 } b.buf = b.buf[0 : b.off+n]; } // Reset resets the buffer so it has no content. // b.Reset() is the same as b.Truncate(0). func (b *Buffer) Reset() { b.Truncate(0) } // Resize buffer to guarantee enough space for n more bytes. // After this call, the state of b.buf is inconsistent. // It must be fixed up as is done in Write and WriteString. func (b *Buffer) resize(n int) { var buf []byte; if b.buf == nil && n <= len(b.bootstrap) { buf = &b.bootstrap } else { buf = b.buf; if len(b.buf)+n > cap(b.buf) { // not enough space anywhere buf = make([]byte, 2*cap(b.buf)+n) } copy(buf, b.buf[b.off:]); } b.buf = buf; b.off = 0; } // Write appends the contents of p to the buffer. The return // value n is the length of p; err is always nil. func (b *Buffer) Write(p []byte) (n int, err os.Error) { m := b.Len(); // If buffer is empty, reset to recover space. if m == 0 && b.off != 0 { b.Truncate(0) } n = len(p); if len(b.buf)+n > cap(b.buf) { b.resize(n) } b.buf = b.buf[0 : b.off+m+n]; copy(b.buf[b.off+m:], p); return n, nil; } // WriteString appends the contents of s to the buffer. The return // value n is the length of s; err is always nil. func (b *Buffer) WriteString(s string) (n int, err os.Error) { m := b.Len(); // If buffer is empty, reset to recover space. if m == 0 && b.off != 0 { b.Truncate(0) } n = len(s); if len(b.buf)+n > cap(b.buf) { b.resize(n) } b.buf = b.buf[0 : b.off+m+n]; copyString(b.buf, b.off+m, s); return n, nil; } // MinRead is the minimum slice size passed to a Read call by // Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond // what is required to hold the contents of r, ReadFrom will not grow the // underlying buffer. const MinRead = 512 // ReadFrom reads data from r until EOF and appends it to the buffer. // The return value n is the number of bytes read. // Any error except os.EOF encountered during the read // is also returned. func (b *Buffer) ReadFrom(r io.Reader) (n int64, err os.Error) { // If buffer is empty, reset to recover space. if b.off >= len(b.buf) { b.Truncate(0) } for { if cap(b.buf)-len(b.buf) < MinRead { var newBuf []byte; // can we get space without allocation? if b.off+cap(b.buf)-len(b.buf) >= MinRead { // reuse beginning of buffer newBuf = b.buf[0 : len(b.buf)-b.off] } else { // not enough space at end; put space on end newBuf = make([]byte, len(b.buf)-b.off, 2*(cap(b.buf)-b.off)+MinRead) } copy(newBuf, b.buf[b.off:]); b.buf = newBuf; b.off = 0; } m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]); b.buf = b.buf[b.off : len(b.buf)+m]; n += int64(m); if e == os.EOF { break } if e != nil { return n, e } } return n, nil; // err is EOF, so return nil explicitly } // WriteTo writes data to w until the buffer is drained or an error // occurs. The return value n is the number of bytes written. // Any error encountered during the write is also returned. func (b *Buffer) WriteTo(w io.Writer) (n int64, err os.Error) { for b.off < len(b.buf) { m, e := w.Write(b.buf[b.off:]); n += int64(m); b.off += m; if e != nil { return n, e } } // Buffer is now empty; reset. b.Truncate(0); return; } // WriteByte appends the byte c to the buffer. // The returned error is always nil, but is included // to match bufio.Writer's WriteByte. func (b *Buffer) WriteByte(c byte) os.Error { b.oneByte[0] = c; b.Write(&b.oneByte); return nil; } // Read reads the next len(p) bytes from the buffer or until the buffer // is drained. The return value n is the number of bytes read. If the // buffer has no data to return, err is os.EOF even if len(p) is zero; // otherwise it is nil. func (b *Buffer) Read(p []byte) (n int, err os.Error) { if b.off >= len(b.buf) { // Buffer is empty, reset to recover space. b.Truncate(0); return 0, os.EOF; } m := b.Len(); n = len(p); if n > m { // more bytes requested than available n = m } copy(p, b.buf[b.off:b.off+n]); b.off += n; return n, err; } // ReadByte reads and returns the next byte from the buffer. // If no byte is available, it returns error os.EOF. func (b *Buffer) ReadByte() (c byte, err os.Error) { if b.off >= len(b.buf) { // Buffer is empty, reset to recover space. b.Truncate(0); return 0, os.EOF; } c = b.buf[b.off]; b.off++; return c, nil; } // NewBuffer creates and initializes a new Buffer // using buf as its initial contents. func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } // NewBufferString creates and initializes a new Buffer // using string s as its initial contents. func NewBufferString(s string) *Buffer { buf := make([]byte, len(s)); copyString(buf, 0, s); return &Buffer{buf: buf}; }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package fmt_test import ( "bytes" . "fmt" "io" "math" "runtime" // for the malloc count test only "strings" "testing" "time" ) type ( renamedBool bool renamedInt int renamedInt8 int8 renamedInt16 int16 renamedInt32 int32 renamedInt64 int64 renamedUint uint renamedUint8 uint8 renamedUint16 uint16 renamedUint32 uint32 renamedUint64 uint64 renamedUintptr uintptr renamedString string renamedBytes []byte renamedFloat32 float32 renamedFloat64 float64 renamedComplex64 complex64 renamedComplex128 complex128 ) func TestFmtInterface(t *testing.T) { var i1 interface{} i1 = "abc" s := Sprintf("%s", i1) if s != "abc" { t.Errorf(`Sprintf("%%s", empty("abc")) = %q want %q`, s, "abc") } } const b32 uint32 = 1<<32 - 1 const b64 uint64 = 1<<64 - 1 var array = [5]int{1, 2, 3, 4, 5} var iarray = [4]interface{}{1, "hello", 2.5, nil} var slice = array[:] var islice = iarray[:] type A struct { i int j uint s string x []int } type I int func (i I) String() string { return Sprintf("<%d>", int(i)) } type B struct { I I j int } type C struct { i int B } type F int func (f F) Format(s State, c rune) { Fprintf(s, "<%c=F(%d)>", c, int(f)) } type G int func (g G) GoString() string { return Sprintf("GoString(%d)", int(g)) } type S struct { F F // a struct field that Formats G G // a struct field that GoStrings } type SI struct { I interface{} } // A type with a String method with pointer receiver for testing %p type P int var pValue P func (p *P) String() string { return "String(p)" } var b byte var fmttests = []struct { fmt string val interface{} out string }{ {"%d", 12345, "12345"}, {"%v", 12345, "12345"}, {"%t", true, "true"}, // basic string {"%s", "abc", "abc"}, {"%x", "abc", "616263"}, {"%x", "xyz", "78797a"}, {"%X", "xyz", "78797A"}, {"%q", "abc", `"abc"`}, // basic bytes {"%s", []byte("abc"), "abc"}, {"%x", []byte("abc"), "616263"}, {"% x", []byte("abc\xff"), "61 62 63 ff"}, {"% X", []byte("abc\xff"), "61 62 63 FF"}, {"%x", []byte("xyz"), "78797a"}, {"%X", []byte("xyz"), "78797A"}, {"%q", []byte("abc"), `"abc"`}, // escaped strings {"%#q", `abc`, "`abc`"}, {"%#q", `"`, "`\"`"}, {"1 %#q", `\n`, "1 `\\n`"}, {"2 %#q", "\n", `2 "\n"`}, {"%q", `"`, `"\""`}, {"%q", "\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`}, {"%q", "abc\xffdef", `"abc\xffdef"`}, {"%q", "\u263a", `"☺"`}, {"%+q", "\u263a", `"\u263a"`}, {"%q", "\U0010ffff", `"\U0010ffff"`}, // escaped characters {"%q", 'x', `'x'`}, {"%q", 0, `'\x00'`}, {"%q", '\n', `'\n'`}, {"%q", '\u0e00', `'\u0e00'`}, // not a printable rune. {"%q", '\U000c2345', `'\U000c2345'`}, // not a printable rune. {"%q", int64(0x7FFFFFFF), `%!q(int64=2147483647)`}, {"%q", uint64(0xFFFFFFFF), `%!q(uint64=4294967295)`}, {"%q", '"', `'"'`}, {"%q", '\'', `'\''`}, {"%q", "\u263a", `"☺"`}, {"%+q", "\u263a", `"\u263a"`}, // width {"%5s", "abc", " abc"}, {"%2s", "\u263a", " ☺"}, {"%-5s", "abc", "abc "}, {"%-8q", "abc", `"abc" `}, {"%05s", "abc", "00abc"}, {"%08q", "abc", `000"abc"`}, {"%5s", "abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"}, {"%.5s", "abcdefghijklmnopqrstuvwxyz", "abcde"}, {"%.5s", "日本語日本語", "日本語日本"}, {"%.5s", []byte("日本語日本語"), "日本語日本"}, {"%.5q", "abcdefghijklmnopqrstuvwxyz", `"abcde"`}, {"%.3q", "日本語日本語", `"日本語"`}, {"%.3q", []byte("日本語日本語"), `"日本語"`}, {"%10.1q", "日本語日本語", ` "日"`}, // integers {"%d", 12345, "12345"}, {"%d", -12345, "-12345"}, {"%10d", 12345, " 12345"}, {"%10d", -12345, " -12345"}, {"%+10d", 12345, " +12345"}, {"%010d", 12345, "0000012345"}, {"%010d", -12345, "-000012345"}, {"%-10d", 12345, "12345 "}, {"%010.3d", 1, " 001"}, {"%010.3d", -1, " -001"}, {"%+d", 12345, "+12345"}, {"%+d", -12345, "-12345"}, {"%+d", 0, "+0"}, {"% d", 0, " 0"}, {"% d", 12345, " 12345"}, {"%.0d", 0, ""}, {"%.d", 0, ""}, // unicode format {"%U", 0x1, "U+0001"}, {"%U", uint(0x1), "U+0001"}, {"%.8U", 0x2, "U+00000002"}, {"%U", 0x1234, "U+1234"}, {"%U", 0x12345, "U+12345"}, {"%10.6U", 0xABC, " U+000ABC"}, {"%-10.6U", 0xABC, "U+000ABC "}, {"%U", '\n', `U+000A`}, {"%#U", '\n', `U+000A`}, {"%U", 'x', `U+0078`}, {"%#U", 'x', `U+0078 'x'`}, {"%U", '\u263a', `U+263A`}, {"%#U", '\u263a', `U+263A '☺'`}, // floats {"%+.3e", 0.0, "+0.000e+00"}, {"%+.3e", 1.0, "+1.000e+00"}, {"%+.3f", -1.0, "-1.000"}, {"% .3E", -1.0, "-1.000E+00"}, {"% .3e", 1.0, " 1.000e+00"}, {"%+.3g", 0.0, "+0"}, {"%+.3g", 1.0, "+1"}, {"%+.3g", -1.0, "-1"}, {"% .3g", -1.0, "-1"}, {"% .3g", 1.0, " 1"}, // complex values {"%+.3e", 0i, "(+0.000e+00+0.000e+00i)"}, {"%+.3f", 0i, "(+0.000+0.000i)"}, {"%+.3g", 0i, "(+0+0i)"}, {"%+.3e", 1 + 2i, "(+1.000e+00+2.000e+00i)"}, {"%+.3f", 1 + 2i, "(+1.000+2.000i)"}, {"%+.3g", 1 + 2i, "(+1+2i)"}, {"%.3e", 0i, "(0.000e+00+0.000e+00i)"}, {"%.3f", 0i, "(0.000+0.000i)"}, {"%.3g", 0i, "(0+0i)"}, {"%.3e", 1 + 2i, "(1.000e+00+2.000e+00i)"}, {"%.3f", 1 + 2i, "(1.000+2.000i)"}, {"%.3g", 1 + 2i, "(1+2i)"}, {"%.3e", -1 - 2i, "(-1.000e+00-2.000e+00i)"}, {"%.3f", -1 - 2i, "(-1.000-2.000i)"}, {"%.3g", -1 - 2i, "(-1-2i)"}, {"% .3E", -1 - 2i, "(-1.000E+00-2.000E+00i)"}, {"%+.3g", complex64(1 + 2i), "(+1+2i)"}, {"%+.3g", complex128(1 + 2i), "(+1+2i)"}, // erroneous formats {"", 2, "%!(EXTRA int=2)"}, {"%d", "hello", "%!d(string=hello)"}, // old test/fmt_test.go {"%d", 1234, "1234"}, {"%d", -1234, "-1234"}, {"%d", uint(1234), "1234"}, {"%d", uint32(b32), "4294967295"}, {"%d", uint64(b64), "18446744073709551615"}, {"%o", 01234, "1234"}, {"%#o", 01234, "01234"}, {"%o", uint32(b32), "37777777777"}, {"%o", uint64(b64), "1777777777777777777777"}, {"%x", 0x1234abcd, "1234abcd"}, {"%#x", 0x1234abcd, "0x1234abcd"}, {"%x", b32 - 0x1234567, "fedcba98"}, {"%X", 0x1234abcd, "1234ABCD"}, {"%X", b32 - 0x1234567, "FEDCBA98"}, {"%#X", 0, "0X0"}, {"%x", b64, "ffffffffffffffff"}, {"%b", 7, "111"}, {"%b", b64, "1111111111111111111111111111111111111111111111111111111111111111"}, {"%b", -6, "-110"}, {"%e", 1.0, "1.000000e+00"}, {"%e", 1234.5678e3, "1.234568e+06"}, {"%e", 1234.5678e-8, "1.234568e-05"}, {"%e", -7.0, "-7.000000e+00"}, {"%e", -1e-9, "-1.000000e-09"}, {"%f", 1234.5678e3, "1234567.800000"}, {"%f", 1234.5678e-8, "0.000012"}, {"%f", -7.0, "-7.000000"}, {"%f", -1e-9, "-0.000000"}, {"%g", 1234.5678e3, "1.2345678e+06"}, {"%g", float32(1234.5678e3), "1.2345678e+06"}, {"%g", 1234.5678e-8, "1.2345678e-05"}, {"%g", -7.0, "-7"}, {"%g", -1e-9, "-1e-09"}, {"%g", float32(-1e-9), "-1e-09"}, {"%E", 1.0, "1.000000E+00"}, {"%E", 1234.5678e3, "1.234568E+06"}, {"%E", 1234.5678e-8, "1.234568E-05"}, {"%E", -7.0, "-7.000000E+00"}, {"%E", -1e-9, "-1.000000E-09"}, {"%G", 1234.5678e3, "1.2345678E+06"}, {"%G", float32(1234.5678e3), "1.2345678E+06"}, {"%G", 1234.5678e-8, "1.2345678E-05"}, {"%G", -7.0, "-7"}, {"%G", -1e-9, "-1E-09"}, {"%G", float32(-1e-9), "-1E-09"}, {"%c", 'x', "x"}, {"%c", 0xe4, "ä"}, {"%c", 0x672c, "本"}, {"%c", '日', "日"}, {"%20.8d", 1234, " 00001234"}, {"%20.8d", -1234, " -00001234"}, {"%20d", 1234, " 1234"}, {"%-20.8d", 1234, "00001234 "}, {"%-20.8d", -1234, "-00001234 "}, {"%-#20.8x", 0x1234abc, "0x01234abc "}, {"%-#20.8X", 0x1234abc, "0X01234ABC "}, {"%-#20.8o", 01234, "00001234 "}, {"%.20b", 7, "00000000000000000111"}, {"%20.5s", "qwertyuiop", " qwert"}, {"%.5s", "qwertyuiop", "qwert"}, {"%-20.5s", "qwertyuiop", "qwert "}, {"%20c", 'x', " x"}, {"%-20c", 'x', "x "}, {"%20.6e", 1.2345e3, " 1.234500e+03"}, {"%20.6e", 1.2345e-3, " 1.234500e-03"}, {"%20e", 1.2345e3, " 1.234500e+03"}, {"%20e", 1.2345e-3, " 1.234500e-03"}, {"%20.8e", 1.2345e3, " 1.23450000e+03"}, {"%20f", 1.23456789e3, " 1234.567890"}, {"%20f", 1.23456789e-3, " 0.001235"}, {"%20f", 12345678901.23456789, " 12345678901.234568"}, {"%-20f", 1.23456789e3, "1234.567890 "}, {"%20.8f", 1.23456789e3, " 1234.56789000"}, {"%20.8f", 1.23456789e-3, " 0.00123457"}, {"%g", 1.23456789e3, "1234.56789"}, {"%g", 1.23456789e-3, "0.00123456789"}, {"%g", 1.23456789e20, "1.23456789e+20"}, {"%20e", math.Inf(1), " +Inf"}, {"%-20f", math.Inf(-1), "-Inf "}, {"%20g", math.NaN(), " NaN"}, // arrays {"%v", array, "[1 2 3 4 5]"}, {"%v", iarray, "[1 hello 2.5 <nil>]"}, {"%v", &array, "&[1 2 3 4 5]"}, {"%v", &iarray, "&[1 hello 2.5 <nil>]"}, // slices {"%v", slice, "[1 2 3 4 5]"}, {"%v", islice, "[1 hello 2.5 <nil>]"}, {"%v", &slice, "&[1 2 3 4 5]"}, {"%v", &islice, "&[1 hello 2.5 <nil>]"}, // complexes with %v {"%v", 1 + 2i, "(1+2i)"}, {"%v", complex64(1 + 2i), "(1+2i)"}, {"%v", complex128(1 + 2i), "(1+2i)"}, // structs {"%v", A{1, 2, "a", []int{1, 2}}, `{1 2 a [1 2]}`}, {"%+v", A{1, 2, "a", []int{1, 2}}, `{i:1 j:2 s:a x:[1 2]}`}, // +v on structs with Stringable items {"%+v", B{1, 2}, `{I:<1> j:2}`}, {"%+v", C{1, B{2, 3}}, `{i:1 B:{I:<2> j:3}}`}, // q on Stringable items {"%s", I(23), `<23>`}, {"%q", I(23), `"<23>"`}, {"%x", I(23), `3c32333e`}, {"%d", I(23), `23`}, // Stringer applies only to string formats. // go syntax {"%#v", A{1, 2, "a", []int{1, 2}}, `fmt_test.A{i:1, j:0x2, s:"a", x:[]int{1, 2}}`}, {"%#v", &b, "(*uint8)(0xPTR)"}, {"%#v", TestFmtInterface, "(func(*testing.T))(0xPTR)"}, {"%#v", make(chan int), "(chan int)(0xPTR)"}, {"%#v", uint64(1<<64 - 1), "0xffffffffffffffff"}, {"%#v", 1000000000, "1000000000"}, {"%#v", map[string]int{"a": 1}, `map[string]int{"a":1}`}, {"%#v", map[string]B{"a": {1, 2}}, `map[string]fmt_test.B{"a":fmt_test.B{I:1, j:2}}`}, {"%#v", []string{"a", "b"}, `[]string{"a", "b"}`}, {"%#v", SI{}, `fmt_test.SI{I:interface {}(nil)}`}, {"%#v", []int(nil), `[]int(nil)`}, {"%#v", []int{}, `[]int{}`}, {"%#v", array, `[5]int{1, 2, 3, 4, 5}`}, {"%#v", &array, `&[5]int{1, 2, 3, 4, 5}`}, {"%#v", iarray, `[4]interface {}{1, "hello", 2.5, interface {}(nil)}`}, {"%#v", &iarray, `&[4]interface {}{1, "hello", 2.5, interface {}(nil)}`}, {"%#v", map[int]byte(nil), `map[int]uint8(nil)`}, {"%#v", map[int]byte{}, `map[int]uint8{}`}, // slices with other formats {"%#x", []int{1, 2, 15}, `[0x1 0x2 0xf]`}, {"%x", []int{1, 2, 15}, `[1 2 f]`}, {"%d", []int{1, 2, 15}, `[1 2 15]`}, {"%d", []byte{1, 2, 15}, `[1 2 15]`}, {"%q", []string{"a", "b"}, `["a" "b"]`}, // renamings {"%v", renamedBool(true), "true"}, {"%d", renamedBool(true), "%!d(fmt_test.renamedBool=true)"}, {"%o", renamedInt(8), "10"}, {"%d", renamedInt8(-9), "-9"}, {"%v", renamedInt16(10), "10"}, {"%v", renamedInt32(-11), "-11"}, {"%X", renamedInt64(255), "FF"}, {"%v", renamedUint(13), "13"}, {"%o", renamedUint8(14), "16"}, {"%X", renamedUint16(15), "F"}, {"%d", renamedUint32(16), "16"}, {"%X", renamedUint64(17), "11"}, {"%o", renamedUintptr(18), "22"}, {"%x", renamedString("thing"), "7468696e67"}, {"%d", renamedBytes([]byte{1, 2, 15}), `[1 2 15]`}, {"%q", renamedBytes([]byte("hello")), `"hello"`}, {"%v", renamedFloat32(22), "22"}, {"%v", renamedFloat64(33), "33"}, {"%v", renamedComplex64(3 + 4i), "(3+4i)"}, {"%v", renamedComplex128(4 - 3i), "(4-3i)"}, // Formatter {"%x", F(1), "<x=F(1)>"}, {"%x", G(2), "2"}, {"%+v", S{F(4), G(5)}, "{F:<v=F(4)> G:5}"}, // GoStringer {"%#v", G(6), "GoString(6)"}, {"%#v", S{F(7), G(8)}, "fmt_test.S{F:<v=F(7)>, G:GoString(8)}"}, // %T {"%T", (4 - 3i), "complex128"}, {"%T", renamedComplex128(4 - 3i), "fmt_test.renamedComplex128"}, {"%T", intVal, "int"}, {"%6T", &intVal, " *int"}, // %p {"p0=%p", new(int), "p0=0xPTR"}, {"p1=%s", &pValue, "p1=String(p)"}, // String method... {"p2=%p", &pValue, "p2=0xPTR"}, // ... not called with %p {"p4=%#p", new(int), "p4=PTR"}, // %p on non-pointers {"%p", make(chan int), "0xPTR"}, {"%p", make(map[int]int), "0xPTR"}, {"%p", make([]int, 1), "0xPTR"}, {"%p", 27, "%!p(int=27)"}, // not a pointer at all // %d on Stringer should give integer if possible {"%s", time.Time{}.Month(), "January"}, {"%d", time.Time{}.Month(), "1"}, // erroneous things {"%s %", "hello", "hello %!(NOVERB)"}, {"%s %.2", "hello", "hello %!(NOVERB)"}, {"%d", "hello", "%!d(string=hello)"}, {"no args", "hello", "no args%!(EXTRA string=hello)"}, {"%s", nil, "%!s(<nil>)"}, {"%T", nil, "<nil>"}, {"%-1", 100, "%!(NOVERB)%!(EXTRA int=100)"}, } func TestSprintf(t *testing.T) { for _, tt := range fmttests { s := Sprintf(tt.fmt, tt.val) if i := strings.Index(tt.out, "PTR"); i >= 0 { j := i for ; j < len(s); j++ { c := s[j] if (c < '0' || c > '9') && (c < 'a' || c > 'f') && (c < 'A' || c > 'F') { break } } s = s[0:i] + "PTR" + s[j:] } if s != tt.out { if _, ok := tt.val.(string); ok { // Don't requote the already-quoted strings. // It's too confusing to read the errors. t.Errorf("Sprintf(%q, %q) = <%s> want <%s>", tt.fmt, tt.val, s, tt.out) } else { t.Errorf("Sprintf(%q, %v) = %q want %q", tt.fmt, tt.val, s, tt.out) } } } } func BenchmarkSprintfEmpty(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("") } } func BenchmarkSprintfString(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("%s", "hello") } } func BenchmarkSprintfInt(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("%d", 5) } } func BenchmarkSprintfIntInt(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("%d %d", 5, 6) } } func BenchmarkSprintfPrefixedInt(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("This is some meaningless prefix text that needs to be scanned %d", 6) } } func TestCountMallocs(t *testing.T) { if testing.Short() { return } runtime.UpdateMemStats() mallocs := 0 - runtime.MemStats.Mallocs for i := 0; i < 100; i++ { Sprintf("") } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"\"): %d\n", mallocs/100) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < 100; i++ { Sprintf("xxx") } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"xxx\"): %d\n", mallocs/100) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < 100; i++ { Sprintf("%x", i) } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"%%x\"): %d\n", mallocs/100) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < 100; i++ { Sprintf("%s", "hello") } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"%%s\"): %d\n", mallocs/100) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < 100; i++ { Sprintf("%x %x", i, i) } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"%%x %%x\"): %d\n", mallocs/100) buf := new(bytes.Buffer) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < 100; i++ { buf.Reset() Fprintf(buf, "%x %x %x", i, i, i) } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Fprintf(buf, \"%%x %%x %%x\"): %d\n", mallocs/100) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < 100; i++ { buf.Reset() Fprintf(buf, "%s", "hello") } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Fprintf(buf, \"%%s\"): %d\n", mallocs/100) } type flagPrinter struct{} func (*flagPrinter) Format(f State, c rune) { s := "%" for i := 0; i < 128; i++ { if f.Flag(i) { s += string(i) } } if w, ok := f.Width(); ok { s += Sprintf("%d", w) } if p, ok := f.Precision(); ok { s += Sprintf(".%d", p) } s += string(c) io.WriteString(f, "["+s+"]") } var flagtests = []struct { in string out string }{ {"%a", "[%a]"}, {"%-a", "[%-a]"}, {"%+a", "[%+a]"}, {"%#a", "[%#a]"}, {"% a", "[% a]"}, {"%0a", "[%0a]"}, {"%1.2a", "[%1.2a]"}, {"%-1.2a", "[%-1.2a]"}, {"%+1.2a", "[%+1.2a]"}, {"%-+1.2a", "[%+-1.2a]"}, {"%-+1.2abc", "[%+-1.2a]bc"}, {"%-1.2abc", "[%-1.2a]bc"}, } func TestFlagParser(t *testing.T) { var flagprinter flagPrinter for _, tt := range flagtests { s := Sprintf(tt.in, &flagprinter) if s != tt.out { t.Errorf("Sprintf(%q, &flagprinter) => %q, want %q", tt.in, s, tt.out) } } } func TestStructPrinter(t *testing.T) { var s struct { a string b string c int } s.a = "abc" s.b = "def" s.c = 123 var tests = []struct { fmt string out string }{ {"%v", "{abc def 123}"}, {"%+v", "{a:abc b:def c:123}"}, } for _, tt := range tests { out := Sprintf(tt.fmt, s) if out != tt.out { t.Errorf("Sprintf(%q, &s) = %q, want %q", tt.fmt, out, tt.out) } } } // Check map printing using substrings so we don't depend on the print order. func presentInMap(s string, a []string, t *testing.T) { for i := 0; i < len(a); i++ { loc := strings.Index(s, a[i]) if loc < 0 { t.Errorf("map print: expected to find %q in %q", a[i], s) } // make sure the match ends here loc += len(a[i]) if loc >= len(s) || (s[loc] != ' ' && s[loc] != ']') { t.Errorf("map print: %q not properly terminated in %q", a[i], s) } } } func TestMapPrinter(t *testing.T) { m0 := make(map[int]string) s := Sprint(m0) if s != "map[]" { t.Errorf("empty map printed as %q not %q", s, "map[]") } m1 := map[int]string{1: "one", 2: "two", 3: "three"} a := []string{"1:one", "2:two", "3:three"} presentInMap(Sprintf("%v", m1), a, t) presentInMap(Sprint(m1), a, t) } func TestEmptyMap(t *testing.T) { const emptyMapStr = "map[]" var m map[string]int s := Sprint(m) if s != emptyMapStr { t.Errorf("nil map printed as %q not %q", s, emptyMapStr) } m = make(map[string]int) s = Sprint(m) if s != emptyMapStr { t.Errorf("empty map printed as %q not %q", s, emptyMapStr) } } // Check that Sprint (and hence Print, Fprint) puts spaces in the right places, // that is, between arg pairs in which neither is a string. func TestBlank(t *testing.T) { got := Sprint("<", 1, ">:", 1, 2, 3, "!") expect := "<1>:1 2 3!" if got != expect { t.Errorf("got %q expected %q", got, expect) } } // Check that Sprintln (and hence Println, Fprintln) puts spaces in the right places, // that is, between all arg pairs. func TestBlankln(t *testing.T) { got := Sprintln("<", 1, ">:", 1, 2, 3, "!") expect := "< 1 >: 1 2 3 !\n" if got != expect { t.Errorf("got %q expected %q", got, expect) } } // Check Formatter with Sprint, Sprintln, Sprintf func TestFormatterPrintln(t *testing.T) { f := F(1) expect := "<v=F(1)>\n" s := Sprint(f, "\n") if s != expect { t.Errorf("Sprint wrong with Formatter: expected %q got %q", expect, s) } s = Sprintln(f) if s != expect { t.Errorf("Sprintln wrong with Formatter: expected %q got %q", expect, s) } s = Sprintf("%v\n", f) if s != expect { t.Errorf("Sprintf wrong with Formatter: expected %q got %q", expect, s) } } func args(a ...interface{}) []interface{} { return a } var startests = []struct { fmt string in []interface{} out string }{ {"%*d", args(4, 42), " 42"}, {"%.*d", args(4, 42), "0042"}, {"%*.*d", args(8, 4, 42), " 0042"}, {"%0*d", args(4, 42), "0042"}, {"%-*d", args(4, 42), "42 "}, // erroneous {"%*d", args(nil, 42), "%!(BADWIDTH)42"}, {"%.*d", args(nil, 42), "%!(BADPREC)42"}, {"%*d", args(5, "foo"), "%!d(string= foo)"}, {"%*% %d", args(20, 5), "% 5"}, {"%*", args(4), "%!(NOVERB)"}, {"%*d", args(int32(4), 42), "%!(BADWIDTH)42"}, } func TestWidthAndPrecision(t *testing.T) { for _, tt := range startests { s := Sprintf(tt.fmt, tt.in...) if s != tt.out { t.Errorf("%q: got %q expected %q", tt.fmt, s, tt.out) } } } // A type that panics in String. type Panic struct { message interface{} } // Value receiver. func (p Panic) GoString() string { panic(p.message) } // Value receiver. func (p Panic) String() string { panic(p.message) } // A type that panics in Format. type PanicF struct { message interface{} } // Value receiver. func (p PanicF) Format(f State, c rune) { panic(p.message) } var panictests = []struct { fmt string in interface{} out string }{ // String {"%s", (*Panic)(nil), "<nil>"}, // nil pointer special case {"%s", Panic{io.ErrUnexpectedEOF}, "%s(PANIC=unexpected EOF)"}, {"%s", Panic{3}, "%s(PANIC=3)"}, // GoString {"%#v", (*Panic)(nil), "<nil>"}, // nil pointer special case {"%#v", Panic{io.ErrUnexpectedEOF}, "%v(PANIC=unexpected EOF)"}, {"%#v", Panic{3}, "%v(PANIC=3)"}, // Format {"%s", (*PanicF)(nil), "<nil>"}, // nil pointer special case {"%s", PanicF{io.ErrUnexpectedEOF}, "%s(PANIC=unexpected EOF)"}, {"%s", PanicF{3}, "%s(PANIC=3)"}, } func TestPanics(t *testing.T) { for _, tt := range panictests { s := Sprintf(tt.fmt, tt.in) if s != tt.out { t.Errorf("%q: got %q expected %q", tt.fmt, s, tt.out) } } } fmt: benchmark floating point. mallocs per Sprintf("%x"): 1 mallocs per Sprintf("%g"): 4 R=golang-dev, gri CC=golang-dev https://golang.org/cl/5449106 // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package fmt_test import ( "bytes" . "fmt" "io" "math" "runtime" // for the malloc count test only "strings" "testing" "time" ) type ( renamedBool bool renamedInt int renamedInt8 int8 renamedInt16 int16 renamedInt32 int32 renamedInt64 int64 renamedUint uint renamedUint8 uint8 renamedUint16 uint16 renamedUint32 uint32 renamedUint64 uint64 renamedUintptr uintptr renamedString string renamedBytes []byte renamedFloat32 float32 renamedFloat64 float64 renamedComplex64 complex64 renamedComplex128 complex128 ) func TestFmtInterface(t *testing.T) { var i1 interface{} i1 = "abc" s := Sprintf("%s", i1) if s != "abc" { t.Errorf(`Sprintf("%%s", empty("abc")) = %q want %q`, s, "abc") } } const b32 uint32 = 1<<32 - 1 const b64 uint64 = 1<<64 - 1 var array = [5]int{1, 2, 3, 4, 5} var iarray = [4]interface{}{1, "hello", 2.5, nil} var slice = array[:] var islice = iarray[:] type A struct { i int j uint s string x []int } type I int func (i I) String() string { return Sprintf("<%d>", int(i)) } type B struct { I I j int } type C struct { i int B } type F int func (f F) Format(s State, c rune) { Fprintf(s, "<%c=F(%d)>", c, int(f)) } type G int func (g G) GoString() string { return Sprintf("GoString(%d)", int(g)) } type S struct { F F // a struct field that Formats G G // a struct field that GoStrings } type SI struct { I interface{} } // A type with a String method with pointer receiver for testing %p type P int var pValue P func (p *P) String() string { return "String(p)" } var b byte var fmttests = []struct { fmt string val interface{} out string }{ {"%d", 12345, "12345"}, {"%v", 12345, "12345"}, {"%t", true, "true"}, // basic string {"%s", "abc", "abc"}, {"%x", "abc", "616263"}, {"%x", "xyz", "78797a"}, {"%X", "xyz", "78797A"}, {"%q", "abc", `"abc"`}, // basic bytes {"%s", []byte("abc"), "abc"}, {"%x", []byte("abc"), "616263"}, {"% x", []byte("abc\xff"), "61 62 63 ff"}, {"% X", []byte("abc\xff"), "61 62 63 FF"}, {"%x", []byte("xyz"), "78797a"}, {"%X", []byte("xyz"), "78797A"}, {"%q", []byte("abc"), `"abc"`}, // escaped strings {"%#q", `abc`, "`abc`"}, {"%#q", `"`, "`\"`"}, {"1 %#q", `\n`, "1 `\\n`"}, {"2 %#q", "\n", `2 "\n"`}, {"%q", `"`, `"\""`}, {"%q", "\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`}, {"%q", "abc\xffdef", `"abc\xffdef"`}, {"%q", "\u263a", `"☺"`}, {"%+q", "\u263a", `"\u263a"`}, {"%q", "\U0010ffff", `"\U0010ffff"`}, // escaped characters {"%q", 'x', `'x'`}, {"%q", 0, `'\x00'`}, {"%q", '\n', `'\n'`}, {"%q", '\u0e00', `'\u0e00'`}, // not a printable rune. {"%q", '\U000c2345', `'\U000c2345'`}, // not a printable rune. {"%q", int64(0x7FFFFFFF), `%!q(int64=2147483647)`}, {"%q", uint64(0xFFFFFFFF), `%!q(uint64=4294967295)`}, {"%q", '"', `'"'`}, {"%q", '\'', `'\''`}, {"%q", "\u263a", `"☺"`}, {"%+q", "\u263a", `"\u263a"`}, // width {"%5s", "abc", " abc"}, {"%2s", "\u263a", " ☺"}, {"%-5s", "abc", "abc "}, {"%-8q", "abc", `"abc" `}, {"%05s", "abc", "00abc"}, {"%08q", "abc", `000"abc"`}, {"%5s", "abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"}, {"%.5s", "abcdefghijklmnopqrstuvwxyz", "abcde"}, {"%.5s", "日本語日本語", "日本語日本"}, {"%.5s", []byte("日本語日本語"), "日本語日本"}, {"%.5q", "abcdefghijklmnopqrstuvwxyz", `"abcde"`}, {"%.3q", "日本語日本語", `"日本語"`}, {"%.3q", []byte("日本語日本語"), `"日本語"`}, {"%10.1q", "日本語日本語", ` "日"`}, // integers {"%d", 12345, "12345"}, {"%d", -12345, "-12345"}, {"%10d", 12345, " 12345"}, {"%10d", -12345, " -12345"}, {"%+10d", 12345, " +12345"}, {"%010d", 12345, "0000012345"}, {"%010d", -12345, "-000012345"}, {"%-10d", 12345, "12345 "}, {"%010.3d", 1, " 001"}, {"%010.3d", -1, " -001"}, {"%+d", 12345, "+12345"}, {"%+d", -12345, "-12345"}, {"%+d", 0, "+0"}, {"% d", 0, " 0"}, {"% d", 12345, " 12345"}, {"%.0d", 0, ""}, {"%.d", 0, ""}, // unicode format {"%U", 0x1, "U+0001"}, {"%U", uint(0x1), "U+0001"}, {"%.8U", 0x2, "U+00000002"}, {"%U", 0x1234, "U+1234"}, {"%U", 0x12345, "U+12345"}, {"%10.6U", 0xABC, " U+000ABC"}, {"%-10.6U", 0xABC, "U+000ABC "}, {"%U", '\n', `U+000A`}, {"%#U", '\n', `U+000A`}, {"%U", 'x', `U+0078`}, {"%#U", 'x', `U+0078 'x'`}, {"%U", '\u263a', `U+263A`}, {"%#U", '\u263a', `U+263A '☺'`}, // floats {"%+.3e", 0.0, "+0.000e+00"}, {"%+.3e", 1.0, "+1.000e+00"}, {"%+.3f", -1.0, "-1.000"}, {"% .3E", -1.0, "-1.000E+00"}, {"% .3e", 1.0, " 1.000e+00"}, {"%+.3g", 0.0, "+0"}, {"%+.3g", 1.0, "+1"}, {"%+.3g", -1.0, "-1"}, {"% .3g", -1.0, "-1"}, {"% .3g", 1.0, " 1"}, // complex values {"%+.3e", 0i, "(+0.000e+00+0.000e+00i)"}, {"%+.3f", 0i, "(+0.000+0.000i)"}, {"%+.3g", 0i, "(+0+0i)"}, {"%+.3e", 1 + 2i, "(+1.000e+00+2.000e+00i)"}, {"%+.3f", 1 + 2i, "(+1.000+2.000i)"}, {"%+.3g", 1 + 2i, "(+1+2i)"}, {"%.3e", 0i, "(0.000e+00+0.000e+00i)"}, {"%.3f", 0i, "(0.000+0.000i)"}, {"%.3g", 0i, "(0+0i)"}, {"%.3e", 1 + 2i, "(1.000e+00+2.000e+00i)"}, {"%.3f", 1 + 2i, "(1.000+2.000i)"}, {"%.3g", 1 + 2i, "(1+2i)"}, {"%.3e", -1 - 2i, "(-1.000e+00-2.000e+00i)"}, {"%.3f", -1 - 2i, "(-1.000-2.000i)"}, {"%.3g", -1 - 2i, "(-1-2i)"}, {"% .3E", -1 - 2i, "(-1.000E+00-2.000E+00i)"}, {"%+.3g", complex64(1 + 2i), "(+1+2i)"}, {"%+.3g", complex128(1 + 2i), "(+1+2i)"}, // erroneous formats {"", 2, "%!(EXTRA int=2)"}, {"%d", "hello", "%!d(string=hello)"}, // old test/fmt_test.go {"%d", 1234, "1234"}, {"%d", -1234, "-1234"}, {"%d", uint(1234), "1234"}, {"%d", uint32(b32), "4294967295"}, {"%d", uint64(b64), "18446744073709551615"}, {"%o", 01234, "1234"}, {"%#o", 01234, "01234"}, {"%o", uint32(b32), "37777777777"}, {"%o", uint64(b64), "1777777777777777777777"}, {"%x", 0x1234abcd, "1234abcd"}, {"%#x", 0x1234abcd, "0x1234abcd"}, {"%x", b32 - 0x1234567, "fedcba98"}, {"%X", 0x1234abcd, "1234ABCD"}, {"%X", b32 - 0x1234567, "FEDCBA98"}, {"%#X", 0, "0X0"}, {"%x", b64, "ffffffffffffffff"}, {"%b", 7, "111"}, {"%b", b64, "1111111111111111111111111111111111111111111111111111111111111111"}, {"%b", -6, "-110"}, {"%e", 1.0, "1.000000e+00"}, {"%e", 1234.5678e3, "1.234568e+06"}, {"%e", 1234.5678e-8, "1.234568e-05"}, {"%e", -7.0, "-7.000000e+00"}, {"%e", -1e-9, "-1.000000e-09"}, {"%f", 1234.5678e3, "1234567.800000"}, {"%f", 1234.5678e-8, "0.000012"}, {"%f", -7.0, "-7.000000"}, {"%f", -1e-9, "-0.000000"}, {"%g", 1234.5678e3, "1.2345678e+06"}, {"%g", float32(1234.5678e3), "1.2345678e+06"}, {"%g", 1234.5678e-8, "1.2345678e-05"}, {"%g", -7.0, "-7"}, {"%g", -1e-9, "-1e-09"}, {"%g", float32(-1e-9), "-1e-09"}, {"%E", 1.0, "1.000000E+00"}, {"%E", 1234.5678e3, "1.234568E+06"}, {"%E", 1234.5678e-8, "1.234568E-05"}, {"%E", -7.0, "-7.000000E+00"}, {"%E", -1e-9, "-1.000000E-09"}, {"%G", 1234.5678e3, "1.2345678E+06"}, {"%G", float32(1234.5678e3), "1.2345678E+06"}, {"%G", 1234.5678e-8, "1.2345678E-05"}, {"%G", -7.0, "-7"}, {"%G", -1e-9, "-1E-09"}, {"%G", float32(-1e-9), "-1E-09"}, {"%c", 'x', "x"}, {"%c", 0xe4, "ä"}, {"%c", 0x672c, "本"}, {"%c", '日', "日"}, {"%20.8d", 1234, " 00001234"}, {"%20.8d", -1234, " -00001234"}, {"%20d", 1234, " 1234"}, {"%-20.8d", 1234, "00001234 "}, {"%-20.8d", -1234, "-00001234 "}, {"%-#20.8x", 0x1234abc, "0x01234abc "}, {"%-#20.8X", 0x1234abc, "0X01234ABC "}, {"%-#20.8o", 01234, "00001234 "}, {"%.20b", 7, "00000000000000000111"}, {"%20.5s", "qwertyuiop", " qwert"}, {"%.5s", "qwertyuiop", "qwert"}, {"%-20.5s", "qwertyuiop", "qwert "}, {"%20c", 'x', " x"}, {"%-20c", 'x', "x "}, {"%20.6e", 1.2345e3, " 1.234500e+03"}, {"%20.6e", 1.2345e-3, " 1.234500e-03"}, {"%20e", 1.2345e3, " 1.234500e+03"}, {"%20e", 1.2345e-3, " 1.234500e-03"}, {"%20.8e", 1.2345e3, " 1.23450000e+03"}, {"%20f", 1.23456789e3, " 1234.567890"}, {"%20f", 1.23456789e-3, " 0.001235"}, {"%20f", 12345678901.23456789, " 12345678901.234568"}, {"%-20f", 1.23456789e3, "1234.567890 "}, {"%20.8f", 1.23456789e3, " 1234.56789000"}, {"%20.8f", 1.23456789e-3, " 0.00123457"}, {"%g", 1.23456789e3, "1234.56789"}, {"%g", 1.23456789e-3, "0.00123456789"}, {"%g", 1.23456789e20, "1.23456789e+20"}, {"%20e", math.Inf(1), " +Inf"}, {"%-20f", math.Inf(-1), "-Inf "}, {"%20g", math.NaN(), " NaN"}, // arrays {"%v", array, "[1 2 3 4 5]"}, {"%v", iarray, "[1 hello 2.5 <nil>]"}, {"%v", &array, "&[1 2 3 4 5]"}, {"%v", &iarray, "&[1 hello 2.5 <nil>]"}, // slices {"%v", slice, "[1 2 3 4 5]"}, {"%v", islice, "[1 hello 2.5 <nil>]"}, {"%v", &slice, "&[1 2 3 4 5]"}, {"%v", &islice, "&[1 hello 2.5 <nil>]"}, // complexes with %v {"%v", 1 + 2i, "(1+2i)"}, {"%v", complex64(1 + 2i), "(1+2i)"}, {"%v", complex128(1 + 2i), "(1+2i)"}, // structs {"%v", A{1, 2, "a", []int{1, 2}}, `{1 2 a [1 2]}`}, {"%+v", A{1, 2, "a", []int{1, 2}}, `{i:1 j:2 s:a x:[1 2]}`}, // +v on structs with Stringable items {"%+v", B{1, 2}, `{I:<1> j:2}`}, {"%+v", C{1, B{2, 3}}, `{i:1 B:{I:<2> j:3}}`}, // q on Stringable items {"%s", I(23), `<23>`}, {"%q", I(23), `"<23>"`}, {"%x", I(23), `3c32333e`}, {"%d", I(23), `23`}, // Stringer applies only to string formats. // go syntax {"%#v", A{1, 2, "a", []int{1, 2}}, `fmt_test.A{i:1, j:0x2, s:"a", x:[]int{1, 2}}`}, {"%#v", &b, "(*uint8)(0xPTR)"}, {"%#v", TestFmtInterface, "(func(*testing.T))(0xPTR)"}, {"%#v", make(chan int), "(chan int)(0xPTR)"}, {"%#v", uint64(1<<64 - 1), "0xffffffffffffffff"}, {"%#v", 1000000000, "1000000000"}, {"%#v", map[string]int{"a": 1}, `map[string]int{"a":1}`}, {"%#v", map[string]B{"a": {1, 2}}, `map[string]fmt_test.B{"a":fmt_test.B{I:1, j:2}}`}, {"%#v", []string{"a", "b"}, `[]string{"a", "b"}`}, {"%#v", SI{}, `fmt_test.SI{I:interface {}(nil)}`}, {"%#v", []int(nil), `[]int(nil)`}, {"%#v", []int{}, `[]int{}`}, {"%#v", array, `[5]int{1, 2, 3, 4, 5}`}, {"%#v", &array, `&[5]int{1, 2, 3, 4, 5}`}, {"%#v", iarray, `[4]interface {}{1, "hello", 2.5, interface {}(nil)}`}, {"%#v", &iarray, `&[4]interface {}{1, "hello", 2.5, interface {}(nil)}`}, {"%#v", map[int]byte(nil), `map[int]uint8(nil)`}, {"%#v", map[int]byte{}, `map[int]uint8{}`}, // slices with other formats {"%#x", []int{1, 2, 15}, `[0x1 0x2 0xf]`}, {"%x", []int{1, 2, 15}, `[1 2 f]`}, {"%d", []int{1, 2, 15}, `[1 2 15]`}, {"%d", []byte{1, 2, 15}, `[1 2 15]`}, {"%q", []string{"a", "b"}, `["a" "b"]`}, // renamings {"%v", renamedBool(true), "true"}, {"%d", renamedBool(true), "%!d(fmt_test.renamedBool=true)"}, {"%o", renamedInt(8), "10"}, {"%d", renamedInt8(-9), "-9"}, {"%v", renamedInt16(10), "10"}, {"%v", renamedInt32(-11), "-11"}, {"%X", renamedInt64(255), "FF"}, {"%v", renamedUint(13), "13"}, {"%o", renamedUint8(14), "16"}, {"%X", renamedUint16(15), "F"}, {"%d", renamedUint32(16), "16"}, {"%X", renamedUint64(17), "11"}, {"%o", renamedUintptr(18), "22"}, {"%x", renamedString("thing"), "7468696e67"}, {"%d", renamedBytes([]byte{1, 2, 15}), `[1 2 15]`}, {"%q", renamedBytes([]byte("hello")), `"hello"`}, {"%v", renamedFloat32(22), "22"}, {"%v", renamedFloat64(33), "33"}, {"%v", renamedComplex64(3 + 4i), "(3+4i)"}, {"%v", renamedComplex128(4 - 3i), "(4-3i)"}, // Formatter {"%x", F(1), "<x=F(1)>"}, {"%x", G(2), "2"}, {"%+v", S{F(4), G(5)}, "{F:<v=F(4)> G:5}"}, // GoStringer {"%#v", G(6), "GoString(6)"}, {"%#v", S{F(7), G(8)}, "fmt_test.S{F:<v=F(7)>, G:GoString(8)}"}, // %T {"%T", (4 - 3i), "complex128"}, {"%T", renamedComplex128(4 - 3i), "fmt_test.renamedComplex128"}, {"%T", intVal, "int"}, {"%6T", &intVal, " *int"}, // %p {"p0=%p", new(int), "p0=0xPTR"}, {"p1=%s", &pValue, "p1=String(p)"}, // String method... {"p2=%p", &pValue, "p2=0xPTR"}, // ... not called with %p {"p4=%#p", new(int), "p4=PTR"}, // %p on non-pointers {"%p", make(chan int), "0xPTR"}, {"%p", make(map[int]int), "0xPTR"}, {"%p", make([]int, 1), "0xPTR"}, {"%p", 27, "%!p(int=27)"}, // not a pointer at all // %d on Stringer should give integer if possible {"%s", time.Time{}.Month(), "January"}, {"%d", time.Time{}.Month(), "1"}, // erroneous things {"%s %", "hello", "hello %!(NOVERB)"}, {"%s %.2", "hello", "hello %!(NOVERB)"}, {"%d", "hello", "%!d(string=hello)"}, {"no args", "hello", "no args%!(EXTRA string=hello)"}, {"%s", nil, "%!s(<nil>)"}, {"%T", nil, "<nil>"}, {"%-1", 100, "%!(NOVERB)%!(EXTRA int=100)"}, } func TestSprintf(t *testing.T) { for _, tt := range fmttests { s := Sprintf(tt.fmt, tt.val) if i := strings.Index(tt.out, "PTR"); i >= 0 { j := i for ; j < len(s); j++ { c := s[j] if (c < '0' || c > '9') && (c < 'a' || c > 'f') && (c < 'A' || c > 'F') { break } } s = s[0:i] + "PTR" + s[j:] } if s != tt.out { if _, ok := tt.val.(string); ok { // Don't requote the already-quoted strings. // It's too confusing to read the errors. t.Errorf("Sprintf(%q, %q) = <%s> want <%s>", tt.fmt, tt.val, s, tt.out) } else { t.Errorf("Sprintf(%q, %v) = %q want %q", tt.fmt, tt.val, s, tt.out) } } } } func BenchmarkSprintfEmpty(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("") } } func BenchmarkSprintfString(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("%s", "hello") } } func BenchmarkSprintfInt(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("%d", 5) } } func BenchmarkSprintfIntInt(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("%d %d", 5, 6) } } func BenchmarkSprintfPrefixedInt(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("This is some meaningless prefix text that needs to be scanned %d", 6) } } func BenchmarkSprintfFloat(b *testing.B) { for i := 0; i < b.N; i++ { Sprintf("%g", 5.23184) } } func TestCountMallocs(t *testing.T) { if testing.Short() { return } const N = 100 runtime.UpdateMemStats() mallocs := 0 - runtime.MemStats.Mallocs for i := 0; i < N; i++ { Sprintf("") } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"\"): %d\n", mallocs/N) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < N; i++ { Sprintf("xxx") } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"xxx\"): %d\n", mallocs/N) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < N; i++ { Sprintf("%x", i) } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"%%x\"): %d\n", mallocs/N) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < N; i++ { Sprintf("%s", "hello") } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"%%s\"): %d\n", mallocs/N) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < N; i++ { Sprintf("%x %x", i, i) } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"%%x %%x\"): %d\n", mallocs/N) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < N; i++ { Sprintf("%g", 3.14159) } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Sprintf(\"%%g\"): %d\n", mallocs/N) buf := new(bytes.Buffer) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < N; i++ { buf.Reset() Fprintf(buf, "%x %x %x", i, i, i) } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Fprintf(buf, \"%%x %%x %%x\"): %d\n", mallocs/N) runtime.UpdateMemStats() mallocs = 0 - runtime.MemStats.Mallocs for i := 0; i < N; i++ { buf.Reset() Fprintf(buf, "%s", "hello") } runtime.UpdateMemStats() mallocs += runtime.MemStats.Mallocs Printf("mallocs per Fprintf(buf, \"%%s\"): %d\n", mallocs/N) } type flagPrinter struct{} func (*flagPrinter) Format(f State, c rune) { s := "%" for i := 0; i < 128; i++ { if f.Flag(i) { s += string(i) } } if w, ok := f.Width(); ok { s += Sprintf("%d", w) } if p, ok := f.Precision(); ok { s += Sprintf(".%d", p) } s += string(c) io.WriteString(f, "["+s+"]") } var flagtests = []struct { in string out string }{ {"%a", "[%a]"}, {"%-a", "[%-a]"}, {"%+a", "[%+a]"}, {"%#a", "[%#a]"}, {"% a", "[% a]"}, {"%0a", "[%0a]"}, {"%1.2a", "[%1.2a]"}, {"%-1.2a", "[%-1.2a]"}, {"%+1.2a", "[%+1.2a]"}, {"%-+1.2a", "[%+-1.2a]"}, {"%-+1.2abc", "[%+-1.2a]bc"}, {"%-1.2abc", "[%-1.2a]bc"}, } func TestFlagParser(t *testing.T) { var flagprinter flagPrinter for _, tt := range flagtests { s := Sprintf(tt.in, &flagprinter) if s != tt.out { t.Errorf("Sprintf(%q, &flagprinter) => %q, want %q", tt.in, s, tt.out) } } } func TestStructPrinter(t *testing.T) { var s struct { a string b string c int } s.a = "abc" s.b = "def" s.c = 123 var tests = []struct { fmt string out string }{ {"%v", "{abc def 123}"}, {"%+v", "{a:abc b:def c:123}"}, } for _, tt := range tests { out := Sprintf(tt.fmt, s) if out != tt.out { t.Errorf("Sprintf(%q, &s) = %q, want %q", tt.fmt, out, tt.out) } } } // Check map printing using substrings so we don't depend on the print order. func presentInMap(s string, a []string, t *testing.T) { for i := 0; i < len(a); i++ { loc := strings.Index(s, a[i]) if loc < 0 { t.Errorf("map print: expected to find %q in %q", a[i], s) } // make sure the match ends here loc += len(a[i]) if loc >= len(s) || (s[loc] != ' ' && s[loc] != ']') { t.Errorf("map print: %q not properly terminated in %q", a[i], s) } } } func TestMapPrinter(t *testing.T) { m0 := make(map[int]string) s := Sprint(m0) if s != "map[]" { t.Errorf("empty map printed as %q not %q", s, "map[]") } m1 := map[int]string{1: "one", 2: "two", 3: "three"} a := []string{"1:one", "2:two", "3:three"} presentInMap(Sprintf("%v", m1), a, t) presentInMap(Sprint(m1), a, t) } func TestEmptyMap(t *testing.T) { const emptyMapStr = "map[]" var m map[string]int s := Sprint(m) if s != emptyMapStr { t.Errorf("nil map printed as %q not %q", s, emptyMapStr) } m = make(map[string]int) s = Sprint(m) if s != emptyMapStr { t.Errorf("empty map printed as %q not %q", s, emptyMapStr) } } // Check that Sprint (and hence Print, Fprint) puts spaces in the right places, // that is, between arg pairs in which neither is a string. func TestBlank(t *testing.T) { got := Sprint("<", 1, ">:", 1, 2, 3, "!") expect := "<1>:1 2 3!" if got != expect { t.Errorf("got %q expected %q", got, expect) } } // Check that Sprintln (and hence Println, Fprintln) puts spaces in the right places, // that is, between all arg pairs. func TestBlankln(t *testing.T) { got := Sprintln("<", 1, ">:", 1, 2, 3, "!") expect := "< 1 >: 1 2 3 !\n" if got != expect { t.Errorf("got %q expected %q", got, expect) } } // Check Formatter with Sprint, Sprintln, Sprintf func TestFormatterPrintln(t *testing.T) { f := F(1) expect := "<v=F(1)>\n" s := Sprint(f, "\n") if s != expect { t.Errorf("Sprint wrong with Formatter: expected %q got %q", expect, s) } s = Sprintln(f) if s != expect { t.Errorf("Sprintln wrong with Formatter: expected %q got %q", expect, s) } s = Sprintf("%v\n", f) if s != expect { t.Errorf("Sprintf wrong with Formatter: expected %q got %q", expect, s) } } func args(a ...interface{}) []interface{} { return a } var startests = []struct { fmt string in []interface{} out string }{ {"%*d", args(4, 42), " 42"}, {"%.*d", args(4, 42), "0042"}, {"%*.*d", args(8, 4, 42), " 0042"}, {"%0*d", args(4, 42), "0042"}, {"%-*d", args(4, 42), "42 "}, // erroneous {"%*d", args(nil, 42), "%!(BADWIDTH)42"}, {"%.*d", args(nil, 42), "%!(BADPREC)42"}, {"%*d", args(5, "foo"), "%!d(string= foo)"}, {"%*% %d", args(20, 5), "% 5"}, {"%*", args(4), "%!(NOVERB)"}, {"%*d", args(int32(4), 42), "%!(BADWIDTH)42"}, } func TestWidthAndPrecision(t *testing.T) { for _, tt := range startests { s := Sprintf(tt.fmt, tt.in...) if s != tt.out { t.Errorf("%q: got %q expected %q", tt.fmt, s, tt.out) } } } // A type that panics in String. type Panic struct { message interface{} } // Value receiver. func (p Panic) GoString() string { panic(p.message) } // Value receiver. func (p Panic) String() string { panic(p.message) } // A type that panics in Format. type PanicF struct { message interface{} } // Value receiver. func (p PanicF) Format(f State, c rune) { panic(p.message) } var panictests = []struct { fmt string in interface{} out string }{ // String {"%s", (*Panic)(nil), "<nil>"}, // nil pointer special case {"%s", Panic{io.ErrUnexpectedEOF}, "%s(PANIC=unexpected EOF)"}, {"%s", Panic{3}, "%s(PANIC=3)"}, // GoString {"%#v", (*Panic)(nil), "<nil>"}, // nil pointer special case {"%#v", Panic{io.ErrUnexpectedEOF}, "%v(PANIC=unexpected EOF)"}, {"%#v", Panic{3}, "%v(PANIC=3)"}, // Format {"%s", (*PanicF)(nil), "<nil>"}, // nil pointer special case {"%s", PanicF{io.ErrUnexpectedEOF}, "%s(PANIC=unexpected EOF)"}, {"%s", PanicF{3}, "%s(PANIC=3)"}, } func TestPanics(t *testing.T) { for _, tt := range panictests { s := Sprintf(tt.fmt, tt.in) if s != tt.out { t.Errorf("%q: got %q expected %q", tt.fmt, s, tt.out) } } }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file implements scopes and the objects they contain. package ast import ( "bytes" "fmt" "go/token" ) // A Scope maintains the set of named language entities declared // in the scope and a link to the immediately surrounding (outer) // scope. // type Scope struct { Outer *Scope Objects map[string]*Object } // NewScope creates a new scope nested in the outer scope. func NewScope(outer *Scope) *Scope { const n = 4 // initial scope capacity return &Scope{outer, make(map[string]*Object, n)} } // Lookup returns the object with the given name if it is // found in scope s, otherwise it returns nil. Outer scopes // are ignored. // func (s *Scope) Lookup(name string) *Object { return s.Objects[name] } // Insert attempts to insert a named object obj into the scope s. // If the scope already contains an object alt with the same name, // Insert leaves the scope unchanged and returns alt. Otherwise // it inserts obj and returns nil." // func (s *Scope) Insert(obj *Object) (alt *Object) { if alt = s.Objects[obj.Name]; alt == nil { s.Objects[obj.Name] = obj } return } // Debugging support func (s *Scope) String() string { var buf bytes.Buffer fmt.Fprintf(&buf, "scope %p {", s) if s != nil && len(s.Objects) > 0 { fmt.Fprintln(&buf) for _, obj := range s.Objects { fmt.Fprintf(&buf, "\t%s %s\n", obj.Kind, obj.Name) } } fmt.Fprintf(&buf, "}\n") return buf.String() } // ---------------------------------------------------------------------------- // Objects // An Object describes a named language entity such as a package, // constant, type, variable, function (incl. methods), or label. // // The Data fields contains object-specific data: // // Kind Data type Data value // Pkg *types.Package package scope // Con int iota for the respective declaration // Con != nil constant value // Typ *Scope (used as method scope during type checking - transient) // type Object struct { Kind ObjKind Name string // declared name Decl interface{} // corresponding Field, XxxSpec, FuncDecl, LabeledStmt, AssignStmt, Scope; or nil Data interface{} // object-specific data; or nil Type interface{} // place holder for type information; may be nil } // NewObj creates a new object of a given kind and name. func NewObj(kind ObjKind, name string) *Object { return &Object{Kind: kind, Name: name} } // Pos computes the source position of the declaration of an object name. // The result may be an invalid position if it cannot be computed // (obj.Decl may be nil or not correct). func (obj *Object) Pos() token.Pos { name := obj.Name switch d := obj.Decl.(type) { case *Field: for _, n := range d.Names { if n.Name == name { return n.Pos() } } case *ImportSpec: if d.Name != nil && d.Name.Name == name { return d.Name.Pos() } return d.Path.Pos() case *ValueSpec: for _, n := range d.Names { if n.Name == name { return n.Pos() } } case *TypeSpec: if d.Name.Name == name { return d.Name.Pos() } case *FuncDecl: if d.Name.Name == name { return d.Name.Pos() } case *LabeledStmt: if d.Label.Name == name { return d.Label.Pos() } case *AssignStmt: for _, x := range d.Lhs { if ident, isIdent := x.(*Ident); isIdent && ident.Name == name { return ident.Pos() } } case *Scope: // predeclared object - nothing to do for now } return token.NoPos } // ObjKind describes what an object represents. type ObjKind int // The list of possible Object kinds. const ( Bad ObjKind = iota // for error handling Pkg // package Con // constant Typ // type Var // variable Fun // function or method Lbl // label ) var objKindStrings = [...]string{ Bad: "bad", Pkg: "package", Con: "const", Typ: "type", Var: "var", Fun: "func", Lbl: "label", } func (kind ObjKind) String() string { return objKindStrings[kind] } go/ast: fix typo in comment LGTM=bradfitz R=golang-codereviews, bradfitz CC=golang-codereviews https://codereview.appspot.com/111330043 // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file implements scopes and the objects they contain. package ast import ( "bytes" "fmt" "go/token" ) // A Scope maintains the set of named language entities declared // in the scope and a link to the immediately surrounding (outer) // scope. // type Scope struct { Outer *Scope Objects map[string]*Object } // NewScope creates a new scope nested in the outer scope. func NewScope(outer *Scope) *Scope { const n = 4 // initial scope capacity return &Scope{outer, make(map[string]*Object, n)} } // Lookup returns the object with the given name if it is // found in scope s, otherwise it returns nil. Outer scopes // are ignored. // func (s *Scope) Lookup(name string) *Object { return s.Objects[name] } // Insert attempts to insert a named object obj into the scope s. // If the scope already contains an object alt with the same name, // Insert leaves the scope unchanged and returns alt. Otherwise // it inserts obj and returns nil." // func (s *Scope) Insert(obj *Object) (alt *Object) { if alt = s.Objects[obj.Name]; alt == nil { s.Objects[obj.Name] = obj } return } // Debugging support func (s *Scope) String() string { var buf bytes.Buffer fmt.Fprintf(&buf, "scope %p {", s) if s != nil && len(s.Objects) > 0 { fmt.Fprintln(&buf) for _, obj := range s.Objects { fmt.Fprintf(&buf, "\t%s %s\n", obj.Kind, obj.Name) } } fmt.Fprintf(&buf, "}\n") return buf.String() } // ---------------------------------------------------------------------------- // Objects // An Object describes a named language entity such as a package, // constant, type, variable, function (incl. methods), or label. // // The Data fields contains object-specific data: // // Kind Data type Data value // Pkg *types.Package package scope // Con int iota for the respective declaration // Con != nil constant value // Typ *Scope (used as method scope during type checking - transient) // type Object struct { Kind ObjKind Name string // declared name Decl interface{} // corresponding Field, XxxSpec, FuncDecl, LabeledStmt, AssignStmt, Scope; or nil Data interface{} // object-specific data; or nil Type interface{} // placeholder for type information; may be nil } // NewObj creates a new object of a given kind and name. func NewObj(kind ObjKind, name string) *Object { return &Object{Kind: kind, Name: name} } // Pos computes the source position of the declaration of an object name. // The result may be an invalid position if it cannot be computed // (obj.Decl may be nil or not correct). func (obj *Object) Pos() token.Pos { name := obj.Name switch d := obj.Decl.(type) { case *Field: for _, n := range d.Names { if n.Name == name { return n.Pos() } } case *ImportSpec: if d.Name != nil && d.Name.Name == name { return d.Name.Pos() } return d.Path.Pos() case *ValueSpec: for _, n := range d.Names { if n.Name == name { return n.Pos() } } case *TypeSpec: if d.Name.Name == name { return d.Name.Pos() } case *FuncDecl: if d.Name.Name == name { return d.Name.Pos() } case *LabeledStmt: if d.Label.Name == name { return d.Label.Pos() } case *AssignStmt: for _, x := range d.Lhs { if ident, isIdent := x.(*Ident); isIdent && ident.Name == name { return ident.Pos() } } case *Scope: // predeclared object - nothing to do for now } return token.NoPos } // ObjKind describes what an object represents. type ObjKind int // The list of possible Object kinds. const ( Bad ObjKind = iota // for error handling Pkg // package Con // constant Typ // type Var // variable Fun // function or method Lbl // label ) var objKindStrings = [...]string{ Bad: "bad", Pkg: "package", Con: "const", Typ: "type", Var: "var", Fun: "func", Lbl: "label", } func (kind ObjKind) String() string { return objKindStrings[kind] }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package reflect implements run-time reflection, allowing a program to // manipulate objects with arbitrary types. The typical use is to take a value // with static type interface{} and extract its dynamic type information by // calling TypeOf, which returns a Type. // // A call to ValueOf returns a Value representing the run-time data. // Zero takes a Type and returns a Value representing a zero value // for that type. // // See "The Laws of Reflection" for an introduction to reflection in Go: // http://golang.org/doc/articles/laws_of_reflection.html package reflect import ( "strconv" "sync" "unsafe" ) // Type is the representation of a Go type. // // Not all methods apply to all kinds of types. Restrictions, // if any, are noted in the documentation for each method. // Use the Kind method to find out the kind of type before // calling kind-specific methods. Calling a method // inappropriate to the kind of type causes a run-time panic. type Type interface { // Methods applicable to all types. // Align returns the alignment in bytes of a value of // this type when allocated in memory. Align() int // FieldAlign returns the alignment in bytes of a value of // this type when used as a field in a struct. FieldAlign() int // Method returns the i'th method in the type's method set. // It panics if i is not in the range [0, NumMethod()). // // For a non-interface type T or *T, the returned Method's Type and Func // fields describe a function whose first argument is the receiver. // // For an interface type, the returned Method's Type field gives the // method signature, without a receiver, and the Func field is nil. Method(int) Method // MethodByName returns the method with that name in the type's // method set and a boolean indicating if the method was found. // // For a non-interface type T or *T, the returned Method's Type and Func // fields describe a function whose first argument is the receiver. // // For an interface type, the returned Method's Type field gives the // method signature, without a receiver, and the Func field is nil. MethodByName(string) (Method, bool) // NumMethod returns the number of methods in the type's method set. NumMethod() int // Name returns the type's name within its package. // It returns an empty string for unnamed types. Name() string // PkgPath returns a named type's package path, that is, the import path // that uniquely identifies the package, such as "encoding/base64". // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int), // the package path will be the empty string. PkgPath() string // Size returns the number of bytes needed to store // a value of the given type; it is analogous to unsafe.Sizeof. Size() uintptr // String returns a string representation of the type. // The string representation may use shortened package names // (e.g., base64 instead of "encoding/base64") and is not // guaranteed to be unique among types. To test for equality, // compare the Types directly. String() string // Kind returns the specific kind of this type. Kind() Kind // Implements returns true if the type implements the interface type u. Implements(u Type) bool // AssignableTo returns true if a value of the type is assignable to type u. AssignableTo(u Type) bool // ConvertibleTo returns true if a value of the type is convertible to type u. ConvertibleTo(u Type) bool // Methods applicable only to some types, depending on Kind. // The methods allowed for each kind are: // // Int*, Uint*, Float*, Complex*: Bits // Array: Elem, Len // Chan: ChanDir, Elem // Func: In, NumIn, Out, NumOut, IsVariadic. // Map: Key, Elem // Ptr: Elem // Slice: Elem // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField // Bits returns the size of the type in bits. // It panics if the type's Kind is not one of the // sized or unsized Int, Uint, Float, or Complex kinds. Bits() int // ChanDir returns a channel type's direction. // It panics if the type's Kind is not Chan. ChanDir() ChanDir // IsVariadic returns true if a function type's final input parameter // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's // implicit actual type []T. // // For concreteness, if t represents func(x int, y ... float64), then // // t.NumIn() == 2 // t.In(0) is the reflect.Type for "int" // t.In(1) is the reflect.Type for "[]float64" // t.IsVariadic() == true // // IsVariadic panics if the type's Kind is not Func. IsVariadic() bool // Elem returns a type's element type. // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice. Elem() Type // Field returns a struct type's i'th field. // It panics if the type's Kind is not Struct. // It panics if i is not in the range [0, NumField()). Field(i int) StructField // FieldByIndex returns the nested field corresponding // to the index sequence. It is equivalent to calling Field // successively for each index i. // It panics if the type's Kind is not Struct. FieldByIndex(index []int) StructField // FieldByName returns the struct field with the given name // and a boolean indicating if the field was found. FieldByName(name string) (StructField, bool) // FieldByNameFunc returns the first struct field with a name // that satisfies the match function and a boolean indicating if // the field was found. FieldByNameFunc(match func(string) bool) (StructField, bool) // In returns the type of a function type's i'th input parameter. // It panics if the type's Kind is not Func. // It panics if i is not in the range [0, NumIn()). In(i int) Type // Key returns a map type's key type. // It panics if the type's Kind is not Map. Key() Type // Len returns an array type's length. // It panics if the type's Kind is not Array. Len() int // NumField returns a struct type's field count. // It panics if the type's Kind is not Struct. NumField() int // NumIn returns a function type's input parameter count. // It panics if the type's Kind is not Func. NumIn() int // NumOut returns a function type's output parameter count. // It panics if the type's Kind is not Func. NumOut() int // Out returns the type of a function type's i'th output parameter. // It panics if the type's Kind is not Func. // It panics if i is not in the range [0, NumOut()). Out(i int) Type runtimeType() *runtimeType common() *commonType uncommon() *uncommonType } /* * These data structures are known to the compiler (../../cmd/gc/reflect.c). * A few are known to ../runtime/type.go to convey to debuggers. * They are also known to ../runtime/type.h. */ // A Kind represents the specific kind of type that a Type represents. // The zero Kind is not a valid kind. type Kind uint const ( Invalid Kind = iota Bool Int Int8 Int16 Int32 Int64 Uint Uint8 Uint16 Uint32 Uint64 Uintptr Float32 Float64 Complex64 Complex128 Array Chan Func Interface Map Ptr Slice String Struct UnsafePointer ) // The compiler can only construct empty interface values at // compile time; non-empty interface values get created // during initialization. Type is an empty interface // so that the compiler can lay out references as data. // The underlying type is *reflect.ArrayType and so on. type runtimeType interface{} // commonType is the common implementation of most values. // It is embedded in other, public struct types, but always // with a unique tag like `reflect:"array"` or `reflect:"ptr"` // so that code cannot convert from, say, *arrayType to *ptrType. type commonType struct { size uintptr // size in bytes hash uint32 // hash of type; avoids computation in hash tables _ uint8 // unused/padding align uint8 // alignment of variable with this type fieldAlign uint8 // alignment of struct field with this type kind uint8 // enumeration for C alg *uintptr // algorithm table (../runtime/runtime.h:/Alg) gc uintptr // garbage collection data string *string // string form; unnecessary but undeniably useful *uncommonType // (relatively) uncommon fields ptrToThis *runtimeType // pointer to this type, if used in binary or has methods } // Method on non-interface type type method struct { name *string // name of method pkgPath *string // nil for exported Names; otherwise import path mtyp *runtimeType // method type (without receiver) typ *runtimeType // .(*FuncType) underneath (with receiver) ifn unsafe.Pointer // fn used in interface call (one-word receiver) tfn unsafe.Pointer // fn used for normal method call } // uncommonType is present only for types with names or methods // (if T is a named type, the uncommonTypes for T and *T have methods). // Using a pointer to this struct reduces the overall size required // to describe an unnamed type with no methods. type uncommonType struct { name *string // name of type pkgPath *string // import path; nil for built-in types like int, string methods []method // methods associated with type } // ChanDir represents a channel type's direction. type ChanDir int const ( RecvDir ChanDir = 1 << iota // <-chan SendDir // chan<- BothDir = RecvDir | SendDir // chan ) // arrayType represents a fixed array type. type arrayType struct { commonType `reflect:"array"` elem *runtimeType // array element type slice *runtimeType // slice type len uintptr } // chanType represents a channel type. type chanType struct { commonType `reflect:"chan"` elem *runtimeType // channel element type dir uintptr // channel direction (ChanDir) } // funcType represents a function type. type funcType struct { commonType `reflect:"func"` dotdotdot bool // last input parameter is ... in []*runtimeType // input parameter types out []*runtimeType // output parameter types } // imethod represents a method on an interface type type imethod struct { name *string // name of method pkgPath *string // nil for exported Names; otherwise import path typ *runtimeType // .(*FuncType) underneath } // interfaceType represents an interface type. type interfaceType struct { commonType `reflect:"interface"` methods []imethod // sorted by hash } // mapType represents a map type. type mapType struct { commonType `reflect:"map"` key *runtimeType // map key type elem *runtimeType // map element (value) type } // ptrType represents a pointer type. type ptrType struct { commonType `reflect:"ptr"` elem *runtimeType // pointer element (pointed at) type } // sliceType represents a slice type. type sliceType struct { commonType `reflect:"slice"` elem *runtimeType // slice element type } // Struct field type structField struct { name *string // nil for embedded fields pkgPath *string // nil for exported Names; otherwise import path typ *runtimeType // type of field tag *string // nil if no tag offset uintptr // byte offset of field within struct } // structType represents a struct type. type structType struct { commonType `reflect:"struct"` fields []structField // sorted by offset } /* * The compiler knows the exact layout of all the data structures above. * The compiler does not know about the data structures and methods below. */ // Method represents a single method. type Method struct { // Name is the method name. // PkgPath is the package path that qualifies a lower case (unexported) // method name. It is empty for upper case (exported) method names. // The combination of PkgPath and Name uniquely identifies a method // in a method set. // See http://golang.org/ref/spec#Uniqueness_of_identifiers Name string PkgPath string Type Type // method type Func Value // func with receiver as first argument Index int // index for Type.Method } // High bit says whether type has // embedded pointers,to help garbage collector. const kindMask = 0x7f func (k Kind) String() string { if int(k) < len(kindNames) { return kindNames[k] } return "kind" + strconv.Itoa(int(k)) } var kindNames = []string{ Invalid: "invalid", Bool: "bool", Int: "int", Int8: "int8", Int16: "int16", Int32: "int32", Int64: "int64", Uint: "uint", Uint8: "uint8", Uint16: "uint16", Uint32: "uint32", Uint64: "uint64", Uintptr: "uintptr", Float32: "float32", Float64: "float64", Complex64: "complex64", Complex128: "complex128", Array: "array", Chan: "chan", Func: "func", Interface: "interface", Map: "map", Ptr: "ptr", Slice: "slice", String: "string", Struct: "struct", UnsafePointer: "unsafe.Pointer", } func (t *uncommonType) uncommon() *uncommonType { return t } func (t *uncommonType) PkgPath() string { if t == nil || t.pkgPath == nil { return "" } return *t.pkgPath } func (t *uncommonType) Name() string { if t == nil || t.name == nil { return "" } return *t.name } func (t *commonType) toType() Type { if t == nil { return nil } return t } func (t *commonType) String() string { return *t.string } func (t *commonType) Size() uintptr { return t.size } func (t *commonType) Bits() int { if t == nil { panic("reflect: Bits of nil Type") } k := t.Kind() if k < Int || k > Complex128 { panic("reflect: Bits of non-arithmetic Type " + t.String()) } return int(t.size) * 8 } func (t *commonType) Align() int { return int(t.align) } func (t *commonType) FieldAlign() int { return int(t.fieldAlign) } func (t *commonType) Kind() Kind { return Kind(t.kind & kindMask) } func (t *commonType) common() *commonType { return t } func (t *uncommonType) Method(i int) (m Method) { if t == nil || i < 0 || i >= len(t.methods) { panic("reflect: Method index out of range") } p := &t.methods[i] if p.name != nil { m.Name = *p.name } fl := flag(Func) << flagKindShift if p.pkgPath != nil { m.PkgPath = *p.pkgPath fl |= flagRO } mt := toCommonType(p.typ) m.Type = mt fn := p.tfn m.Func = Value{mt, fn, fl} m.Index = i return } func (t *uncommonType) NumMethod() int { if t == nil { return 0 } return len(t.methods) } func (t *uncommonType) MethodByName(name string) (m Method, ok bool) { if t == nil { return } var p *method for i := range t.methods { p = &t.methods[i] if p.name != nil && *p.name == name { return t.Method(i), true } } return } // TODO(rsc): 6g supplies these, but they are not // as efficient as they could be: they have commonType // as the receiver instead of *commonType. func (t *commonType) NumMethod() int { if t.Kind() == Interface { tt := (*interfaceType)(unsafe.Pointer(t)) return tt.NumMethod() } return t.uncommonType.NumMethod() } func (t *commonType) Method(i int) (m Method) { if t.Kind() == Interface { tt := (*interfaceType)(unsafe.Pointer(t)) return tt.Method(i) } return t.uncommonType.Method(i) } func (t *commonType) MethodByName(name string) (m Method, ok bool) { if t.Kind() == Interface { tt := (*interfaceType)(unsafe.Pointer(t)) return tt.MethodByName(name) } return t.uncommonType.MethodByName(name) } func (t *commonType) PkgPath() string { return t.uncommonType.PkgPath() } func (t *commonType) Name() string { return t.uncommonType.Name() } func (t *commonType) ChanDir() ChanDir { if t.Kind() != Chan { panic("reflect: ChanDir of non-chan type") } tt := (*chanType)(unsafe.Pointer(t)) return ChanDir(tt.dir) } func (t *commonType) IsVariadic() bool { if t.Kind() != Func { panic("reflect: IsVariadic of non-func type") } tt := (*funcType)(unsafe.Pointer(t)) return tt.dotdotdot } func (t *commonType) Elem() Type { switch t.Kind() { case Array: tt := (*arrayType)(unsafe.Pointer(t)) return toType(tt.elem) case Chan: tt := (*chanType)(unsafe.Pointer(t)) return toType(tt.elem) case Map: tt := (*mapType)(unsafe.Pointer(t)) return toType(tt.elem) case Ptr: tt := (*ptrType)(unsafe.Pointer(t)) return toType(tt.elem) case Slice: tt := (*sliceType)(unsafe.Pointer(t)) return toType(tt.elem) } panic("reflect: Elem of invalid type") } func (t *commonType) Field(i int) StructField { if t.Kind() != Struct { panic("reflect: Field of non-struct type") } tt := (*structType)(unsafe.Pointer(t)) return tt.Field(i) } func (t *commonType) FieldByIndex(index []int) StructField { if t.Kind() != Struct { panic("reflect: FieldByIndex of non-struct type") } tt := (*structType)(unsafe.Pointer(t)) return tt.FieldByIndex(index) } func (t *commonType) FieldByName(name string) (StructField, bool) { if t.Kind() != Struct { panic("reflect: FieldByName of non-struct type") } tt := (*structType)(unsafe.Pointer(t)) return tt.FieldByName(name) } func (t *commonType) FieldByNameFunc(match func(string) bool) (StructField, bool) { if t.Kind() != Struct { panic("reflect: FieldByNameFunc of non-struct type") } tt := (*structType)(unsafe.Pointer(t)) return tt.FieldByNameFunc(match) } func (t *commonType) In(i int) Type { if t.Kind() != Func { panic("reflect: In of non-func type") } tt := (*funcType)(unsafe.Pointer(t)) return toType(tt.in[i]) } func (t *commonType) Key() Type { if t.Kind() != Map { panic("reflect: Key of non-map type") } tt := (*mapType)(unsafe.Pointer(t)) return toType(tt.key) } func (t *commonType) Len() int { if t.Kind() != Array { panic("reflect: Len of non-array type") } tt := (*arrayType)(unsafe.Pointer(t)) return int(tt.len) } func (t *commonType) NumField() int { if t.Kind() != Struct { panic("reflect: NumField of non-struct type") } tt := (*structType)(unsafe.Pointer(t)) return len(tt.fields) } func (t *commonType) NumIn() int { if t.Kind() != Func { panic("reflect: NumIn of non-func type") } tt := (*funcType)(unsafe.Pointer(t)) return len(tt.in) } func (t *commonType) NumOut() int { if t.Kind() != Func { panic("reflect: NumOut of non-func type") } tt := (*funcType)(unsafe.Pointer(t)) return len(tt.out) } func (t *commonType) Out(i int) Type { if t.Kind() != Func { panic("reflect: Out of non-func type") } tt := (*funcType)(unsafe.Pointer(t)) return toType(tt.out[i]) } func (d ChanDir) String() string { switch d { case SendDir: return "chan<-" case RecvDir: return "<-chan" case BothDir: return "chan" } return "ChanDir" + strconv.Itoa(int(d)) } // Method returns the i'th method in the type's method set. func (t *interfaceType) Method(i int) (m Method) { if i < 0 || i >= len(t.methods) { return } p := &t.methods[i] m.Name = *p.name if p.pkgPath != nil { m.PkgPath = *p.pkgPath } m.Type = toType(p.typ) m.Index = i return } // NumMethod returns the number of interface methods in the type's method set. func (t *interfaceType) NumMethod() int { return len(t.methods) } // MethodByName method with the given name in the type's method set. func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { if t == nil { return } var p *imethod for i := range t.methods { p = &t.methods[i] if *p.name == name { return t.Method(i), true } } return } // A StructField describes a single field in a struct. type StructField struct { // Name is the field name. // PkgPath is the package path that qualifies a lower case (unexported) // field name. It is empty for upper case (exported) field names. // See http://golang.org/ref/spec#Uniqueness_of_identifiers Name string PkgPath string Type Type // field type Tag StructTag // field tag string Offset uintptr // offset within struct, in bytes Index []int // index sequence for Type.FieldByIndex Anonymous bool // is an anonymous field } // A StructTag is the tag string in a struct field. // // By convention, tag strings are a concatenation of // optionally space-separated key:"value" pairs. // Each key is a non-empty string consisting of non-control // characters other than space (U+0020 ' '), quote (U+0022 '"'), // and colon (U+003A ':'). Each value is quoted using U+0022 '"' // characters and Go string literal syntax. type StructTag string // Get returns the value associated with key in the tag string. // If there is no such key in the tag, Get returns the empty string. // If the tag does not have the conventional format, the value // returned by Get is unspecified. func (tag StructTag) Get(key string) string { for tag != "" { // skip leading space i := 0 for i < len(tag) && tag[i] == ' ' { i++ } tag = tag[i:] if tag == "" { break } // scan to colon. // a space or a quote is a syntax error i = 0 for i < len(tag) && tag[i] != ' ' && tag[i] != ':' && tag[i] != '"' { i++ } if i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { break } name := string(tag[:i]) tag = tag[i+1:] // scan quoted string to find value i = 1 for i < len(tag) && tag[i] != '"' { if tag[i] == '\\' { i++ } i++ } if i >= len(tag) { break } qvalue := string(tag[:i+1]) tag = tag[i+1:] if key == name { value, _ := strconv.Unquote(qvalue) return value } } return "" } // Field returns the i'th struct field. func (t *structType) Field(i int) (f StructField) { if i < 0 || i >= len(t.fields) { return } p := &t.fields[i] f.Type = toType(p.typ) if p.name != nil { f.Name = *p.name } else { t := f.Type if t.Kind() == Ptr { t = t.Elem() } f.Name = t.Name() f.Anonymous = true } if p.pkgPath != nil { f.PkgPath = *p.pkgPath } if p.tag != nil { f.Tag = StructTag(*p.tag) } f.Offset = p.offset // NOTE(rsc): This is the only allocation in the interface // presented by a reflect.Type. It would be nice to avoid, // at least in the common cases, but we need to make sure // that misbehaving clients of reflect cannot affect other // uses of reflect. One possibility is CL 5371098, but we // postponed that ugliness until there is a demonstrated // need for the performance. This is issue 2320. f.Index = []int{i} return } // TODO(gri): Should there be an error/bool indicator if the index // is wrong for FieldByIndex? // FieldByIndex returns the nested field corresponding to index. func (t *structType) FieldByIndex(index []int) (f StructField) { f.Type = Type(t.toType()) for i, x := range index { if i > 0 { ft := f.Type if ft.Kind() == Ptr && ft.Elem().Kind() == Struct { ft = ft.Elem() } f.Type = ft } f = f.Type.Field(x) } return } // A fieldScan represents an item on the fieldByNameFunc scan work list. type fieldScan struct { typ *structType index []int } // FieldByNameFunc returns the struct field with a name that satisfies the // match function and a boolean to indicate if the field was found. func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { // This uses the same condition that the Go language does: there must be a unique instance // of the match at a given depth level. If there are multiple instances of a match at the // same depth, they annihilate each other and inhibit any possible match at a lower level. // The algorithm is breadth first search, one depth level at a time. // The current and next slices are work queues: // current lists the fields to visit on this depth level, // and next lists the fields on the next lower level. current := []fieldScan{} next := []fieldScan{{typ: t}} // nextCount records the number of times an embedded type has been // encountered and considered for queueing in the 'next' slice. // We only queue the first one, but we increment the count on each. // If a struct type T can be reached more than once at a given depth level, // then it annihilates itself and need not be considered at all when we // process that next depth level. var nextCount map[*structType]int // visited records the structs that have been considered already. // Embedded pointer fields can create cycles in the graph of // reachable embedded types; visited avoids following those cycles. // It also avoids duplicated effort: if we didn't find the field in an // embedded type T at level 2, we won't find it in one at level 4 either. visited := map[*structType]bool{} for len(next) > 0 { current, next = next, current[:0] count := nextCount nextCount = nil // Process all the fields at this depth, now listed in 'current'. // The loop queues embedded fields found in 'next', for processing during the next // iteration. The multiplicity of the 'current' field counts is recorded // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. for _, scan := range current { t := scan.typ if visited[t] { // We've looked through this type before, at a higher level. // That higher level would shadow the lower level we're now at, // so this one can't be useful to us. Ignore it. continue } visited[t] = true for i := range t.fields { f := &t.fields[i] // Find name and type for field f. var fname string var ntyp *commonType if f.name != nil { fname = *f.name } else { // Anonymous field of type T or *T. // Name taken from type. ntyp = toCommonType(f.typ) if ntyp.Kind() == Ptr { ntyp = ntyp.Elem().common() } fname = ntyp.Name() } // Does it match? if match(fname) { // Potential match if count[t] > 1 || ok { // Name appeared multiple times at this level: annihilate. return StructField{}, false } result = t.Field(i) result.Index = nil result.Index = append(result.Index, scan.index...) result.Index = append(result.Index, i) ok = true continue } // Queue embedded struct fields for processing with next level, // but only if we haven't seen a match yet at this level and only // if the embedded types haven't alredy been queued. if ok || ntyp == nil || ntyp.Kind() != Struct { continue } styp := (*structType)(unsafe.Pointer(ntyp)) if nextCount[styp] > 0 { nextCount[styp]++ continue } if nextCount == nil { nextCount = map[*structType]int{} } nextCount[styp] = 1 var index []int index = append(index, scan.index...) index = append(index, i) next = append(next, fieldScan{styp, index}) } } if ok { break } } return } // FieldByName returns the struct field with the given name // and a boolean to indicate if the field was found. func (t *structType) FieldByName(name string) (f StructField, present bool) { // Quick check for top-level name, or struct without anonymous fields. hasAnon := false if name != "" { for i := range t.fields { tf := &t.fields[i] if tf.name == nil { hasAnon = true continue } if *tf.name == name { return t.Field(i), true } } } if !hasAnon { return } return t.FieldByNameFunc(func(s string) bool { return s == name }) } // Convert runtime type to reflect type. func toCommonType(p *runtimeType) *commonType { if p == nil { return nil } return (*p).(*commonType) } func toType(p *runtimeType) Type { if p == nil { return nil } return (*p).(*commonType) } // TypeOf returns the reflection Type of the value in the interface{}. // TypeOf(nil) returns nil. func TypeOf(i interface{}) Type { eface := *(*emptyInterface)(unsafe.Pointer(&i)) return toType(eface.typ) } // ptrMap is the cache for PtrTo. var ptrMap struct { sync.RWMutex m map[*commonType]*ptrType } func (t *commonType) runtimeType() *runtimeType { // The runtimeType always precedes the commonType in memory. // Adjust pointer to find it. var rt struct { i runtimeType ct commonType } return (*runtimeType)(unsafe.Pointer(uintptr(unsafe.Pointer(t)) - unsafe.Offsetof(rt.ct))) } // PtrTo returns the pointer type with element t. // For example, if t represents type Foo, PtrTo(t) represents *Foo. func PtrTo(t Type) Type { return t.(*commonType).ptrTo() } func (ct *commonType) ptrTo() *commonType { if p := ct.ptrToThis; p != nil { return toCommonType(p) } // Otherwise, synthesize one. // This only happens for pointers with no methods. // We keep the mapping in a map on the side, because // this operation is rare and a separate map lets us keep // the type structures in read-only memory. ptrMap.RLock() if m := ptrMap.m; m != nil { if p := m[ct]; p != nil { ptrMap.RUnlock() return &p.commonType } } ptrMap.RUnlock() ptrMap.Lock() if ptrMap.m == nil { ptrMap.m = make(map[*commonType]*ptrType) } p := ptrMap.m[ct] if p != nil { // some other goroutine won the race and created it ptrMap.Unlock() return &p.commonType } var rt struct { i runtimeType ptrType } rt.i = &rt.commonType // initialize p using *byte's ptrType as a prototype. p = &rt.ptrType var ibyte interface{} = (*byte)(nil) bp := (*ptrType)(unsafe.Pointer((**(**runtimeType)(unsafe.Pointer(&ibyte))).(*commonType))) *p = *bp s := "*" + *ct.string p.string = &s // For the type structures linked into the binary, the // compiler provides a good hash of the string. // Create a good hash for the new string by using // the FNV-1 hash's mixing function to combine the // old hash and the new "*". p.hash = ct.hash*16777619 ^ '*' p.uncommonType = nil p.ptrToThis = nil p.elem = (*runtimeType)(unsafe.Pointer(uintptr(unsafe.Pointer(ct)) - unsafe.Offsetof(rt.ptrType))) ptrMap.m[ct] = p ptrMap.Unlock() return &p.commonType } func (t *commonType) Implements(u Type) bool { if u == nil { panic("reflect: nil type passed to Type.Implements") } if u.Kind() != Interface { panic("reflect: non-interface type passed to Type.Implements") } return implements(u.(*commonType), t) } func (t *commonType) AssignableTo(u Type) bool { if u == nil { panic("reflect: nil type passed to Type.AssignableTo") } uu := u.(*commonType) return directlyAssignable(uu, t) || implements(uu, t) } func (t *commonType) ConvertibleTo(u Type) bool { if u == nil { panic("reflect: nil type passed to Type.AssignableTo") } uu := u.(*commonType) return convertOp(uu, t) != nil } // implements returns true if the type V implements the interface type T. func implements(T, V *commonType) bool { if T.Kind() != Interface { return false } t := (*interfaceType)(unsafe.Pointer(T)) if len(t.methods) == 0 { return true } // The same algorithm applies in both cases, but the // method tables for an interface type and a concrete type // are different, so the code is duplicated. // In both cases the algorithm is a linear scan over the two // lists - T's methods and V's methods - simultaneously. // Since method tables are stored in a unique sorted order // (alphabetical, with no duplicate method names), the scan // through V's methods must hit a match for each of T's // methods along the way, or else V does not implement T. // This lets us run the scan in overall linear time instead of // the quadratic time a naive search would require. // See also ../runtime/iface.c. if V.Kind() == Interface { v := (*interfaceType)(unsafe.Pointer(V)) i := 0 for j := 0; j < len(v.methods); j++ { tm := &t.methods[i] vm := &v.methods[j] if vm.name == tm.name && vm.pkgPath == tm.pkgPath && vm.typ == tm.typ { if i++; i >= len(t.methods) { return true } } } return false } v := V.uncommon() if v == nil { return false } i := 0 for j := 0; j < len(v.methods); j++ { tm := &t.methods[i] vm := &v.methods[j] if vm.name == tm.name && vm.pkgPath == tm.pkgPath && vm.mtyp == tm.typ { if i++; i >= len(t.methods) { return true } } } return false } // directlyAssignable returns true if a value x of type V can be directly // assigned (using memmove) to a value of type T. // http://golang.org/doc/go_spec.html#Assignability // Ignoring the interface rules (implemented elsewhere) // and the ideal constant rules (no ideal constants at run time). func directlyAssignable(T, V *commonType) bool { // x's type V is identical to T? if T == V { return true } // Otherwise at least one of T and V must be unnamed // and they must have the same kind. if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() { return false } // x's type T and V must have identical underlying types. return haveIdenticalUnderlyingType(T, V) } func haveIdenticalUnderlyingType(T, V *commonType) bool { if T == V { return true } kind := T.Kind() if kind != V.Kind() { return false } // Non-composite types of equal kind have same underlying type // (the predefined instance of the type). if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { return true } // Composite types. switch kind { case Array: return T.Elem() == V.Elem() && T.Len() == V.Len() case Chan: // Special case: // x is a bidirectional channel value, T is a channel type, // and x's type V and T have identical element types. if V.ChanDir() == BothDir && T.Elem() == V.Elem() { return true } // Otherwise continue test for identical underlying type. return V.ChanDir() == T.ChanDir() && T.Elem() == V.Elem() case Func: t := (*funcType)(unsafe.Pointer(T)) v := (*funcType)(unsafe.Pointer(V)) if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) { return false } for i, typ := range t.in { if typ != v.in[i] { return false } } for i, typ := range t.out { if typ != v.out[i] { return false } } return true case Interface: t := (*interfaceType)(unsafe.Pointer(T)) v := (*interfaceType)(unsafe.Pointer(V)) if len(t.methods) == 0 && len(v.methods) == 0 { return true } // Might have the same methods but still // need a run time conversion. return false case Map: return T.Key() == V.Key() && T.Elem() == V.Elem() case Ptr, Slice: return T.Elem() == V.Elem() case Struct: t := (*structType)(unsafe.Pointer(T)) v := (*structType)(unsafe.Pointer(V)) if len(t.fields) != len(v.fields) { return false } for i := range t.fields { tf := &t.fields[i] vf := &v.fields[i] if tf.name != vf.name || tf.pkgPath != vf.pkgPath || tf.typ != vf.typ || tf.tag != vf.tag || tf.offset != vf.offset { return false } } return true } return false } reflect: fix mistake in error message Pointed out by James Chalfant after submit of CL 6500065. TBR=golang-dev CC=golang-dev http://codereview.appspot.com/6543067 // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package reflect implements run-time reflection, allowing a program to // manipulate objects with arbitrary types. The typical use is to take a value // with static type interface{} and extract its dynamic type information by // calling TypeOf, which returns a Type. // // A call to ValueOf returns a Value representing the run-time data. // Zero takes a Type and returns a Value representing a zero value // for that type. // // See "The Laws of Reflection" for an introduction to reflection in Go: // http://golang.org/doc/articles/laws_of_reflection.html package reflect import ( "strconv" "sync" "unsafe" ) // Type is the representation of a Go type. // // Not all methods apply to all kinds of types. Restrictions, // if any, are noted in the documentation for each method. // Use the Kind method to find out the kind of type before // calling kind-specific methods. Calling a method // inappropriate to the kind of type causes a run-time panic. type Type interface { // Methods applicable to all types. // Align returns the alignment in bytes of a value of // this type when allocated in memory. Align() int // FieldAlign returns the alignment in bytes of a value of // this type when used as a field in a struct. FieldAlign() int // Method returns the i'th method in the type's method set. // It panics if i is not in the range [0, NumMethod()). // // For a non-interface type T or *T, the returned Method's Type and Func // fields describe a function whose first argument is the receiver. // // For an interface type, the returned Method's Type field gives the // method signature, without a receiver, and the Func field is nil. Method(int) Method // MethodByName returns the method with that name in the type's // method set and a boolean indicating if the method was found. // // For a non-interface type T or *T, the returned Method's Type and Func // fields describe a function whose first argument is the receiver. // // For an interface type, the returned Method's Type field gives the // method signature, without a receiver, and the Func field is nil. MethodByName(string) (Method, bool) // NumMethod returns the number of methods in the type's method set. NumMethod() int // Name returns the type's name within its package. // It returns an empty string for unnamed types. Name() string // PkgPath returns a named type's package path, that is, the import path // that uniquely identifies the package, such as "encoding/base64". // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int), // the package path will be the empty string. PkgPath() string // Size returns the number of bytes needed to store // a value of the given type; it is analogous to unsafe.Sizeof. Size() uintptr // String returns a string representation of the type. // The string representation may use shortened package names // (e.g., base64 instead of "encoding/base64") and is not // guaranteed to be unique among types. To test for equality, // compare the Types directly. String() string // Kind returns the specific kind of this type. Kind() Kind // Implements returns true if the type implements the interface type u. Implements(u Type) bool // AssignableTo returns true if a value of the type is assignable to type u. AssignableTo(u Type) bool // ConvertibleTo returns true if a value of the type is convertible to type u. ConvertibleTo(u Type) bool // Methods applicable only to some types, depending on Kind. // The methods allowed for each kind are: // // Int*, Uint*, Float*, Complex*: Bits // Array: Elem, Len // Chan: ChanDir, Elem // Func: In, NumIn, Out, NumOut, IsVariadic. // Map: Key, Elem // Ptr: Elem // Slice: Elem // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField // Bits returns the size of the type in bits. // It panics if the type's Kind is not one of the // sized or unsized Int, Uint, Float, or Complex kinds. Bits() int // ChanDir returns a channel type's direction. // It panics if the type's Kind is not Chan. ChanDir() ChanDir // IsVariadic returns true if a function type's final input parameter // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's // implicit actual type []T. // // For concreteness, if t represents func(x int, y ... float64), then // // t.NumIn() == 2 // t.In(0) is the reflect.Type for "int" // t.In(1) is the reflect.Type for "[]float64" // t.IsVariadic() == true // // IsVariadic panics if the type's Kind is not Func. IsVariadic() bool // Elem returns a type's element type. // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice. Elem() Type // Field returns a struct type's i'th field. // It panics if the type's Kind is not Struct. // It panics if i is not in the range [0, NumField()). Field(i int) StructField // FieldByIndex returns the nested field corresponding // to the index sequence. It is equivalent to calling Field // successively for each index i. // It panics if the type's Kind is not Struct. FieldByIndex(index []int) StructField // FieldByName returns the struct field with the given name // and a boolean indicating if the field was found. FieldByName(name string) (StructField, bool) // FieldByNameFunc returns the first struct field with a name // that satisfies the match function and a boolean indicating if // the field was found. FieldByNameFunc(match func(string) bool) (StructField, bool) // In returns the type of a function type's i'th input parameter. // It panics if the type's Kind is not Func. // It panics if i is not in the range [0, NumIn()). In(i int) Type // Key returns a map type's key type. // It panics if the type's Kind is not Map. Key() Type // Len returns an array type's length. // It panics if the type's Kind is not Array. Len() int // NumField returns a struct type's field count. // It panics if the type's Kind is not Struct. NumField() int // NumIn returns a function type's input parameter count. // It panics if the type's Kind is not Func. NumIn() int // NumOut returns a function type's output parameter count. // It panics if the type's Kind is not Func. NumOut() int // Out returns the type of a function type's i'th output parameter. // It panics if the type's Kind is not Func. // It panics if i is not in the range [0, NumOut()). Out(i int) Type runtimeType() *runtimeType common() *commonType uncommon() *uncommonType } /* * These data structures are known to the compiler (../../cmd/gc/reflect.c). * A few are known to ../runtime/type.go to convey to debuggers. * They are also known to ../runtime/type.h. */ // A Kind represents the specific kind of type that a Type represents. // The zero Kind is not a valid kind. type Kind uint const ( Invalid Kind = iota Bool Int Int8 Int16 Int32 Int64 Uint Uint8 Uint16 Uint32 Uint64 Uintptr Float32 Float64 Complex64 Complex128 Array Chan Func Interface Map Ptr Slice String Struct UnsafePointer ) // The compiler can only construct empty interface values at // compile time; non-empty interface values get created // during initialization. Type is an empty interface // so that the compiler can lay out references as data. // The underlying type is *reflect.ArrayType and so on. type runtimeType interface{} // commonType is the common implementation of most values. // It is embedded in other, public struct types, but always // with a unique tag like `reflect:"array"` or `reflect:"ptr"` // so that code cannot convert from, say, *arrayType to *ptrType. type commonType struct { size uintptr // size in bytes hash uint32 // hash of type; avoids computation in hash tables _ uint8 // unused/padding align uint8 // alignment of variable with this type fieldAlign uint8 // alignment of struct field with this type kind uint8 // enumeration for C alg *uintptr // algorithm table (../runtime/runtime.h:/Alg) gc uintptr // garbage collection data string *string // string form; unnecessary but undeniably useful *uncommonType // (relatively) uncommon fields ptrToThis *runtimeType // pointer to this type, if used in binary or has methods } // Method on non-interface type type method struct { name *string // name of method pkgPath *string // nil for exported Names; otherwise import path mtyp *runtimeType // method type (without receiver) typ *runtimeType // .(*FuncType) underneath (with receiver) ifn unsafe.Pointer // fn used in interface call (one-word receiver) tfn unsafe.Pointer // fn used for normal method call } // uncommonType is present only for types with names or methods // (if T is a named type, the uncommonTypes for T and *T have methods). // Using a pointer to this struct reduces the overall size required // to describe an unnamed type with no methods. type uncommonType struct { name *string // name of type pkgPath *string // import path; nil for built-in types like int, string methods []method // methods associated with type } // ChanDir represents a channel type's direction. type ChanDir int const ( RecvDir ChanDir = 1 << iota // <-chan SendDir // chan<- BothDir = RecvDir | SendDir // chan ) // arrayType represents a fixed array type. type arrayType struct { commonType `reflect:"array"` elem *runtimeType // array element type slice *runtimeType // slice type len uintptr } // chanType represents a channel type. type chanType struct { commonType `reflect:"chan"` elem *runtimeType // channel element type dir uintptr // channel direction (ChanDir) } // funcType represents a function type. type funcType struct { commonType `reflect:"func"` dotdotdot bool // last input parameter is ... in []*runtimeType // input parameter types out []*runtimeType // output parameter types } // imethod represents a method on an interface type type imethod struct { name *string // name of method pkgPath *string // nil for exported Names; otherwise import path typ *runtimeType // .(*FuncType) underneath } // interfaceType represents an interface type. type interfaceType struct { commonType `reflect:"interface"` methods []imethod // sorted by hash } // mapType represents a map type. type mapType struct { commonType `reflect:"map"` key *runtimeType // map key type elem *runtimeType // map element (value) type } // ptrType represents a pointer type. type ptrType struct { commonType `reflect:"ptr"` elem *runtimeType // pointer element (pointed at) type } // sliceType represents a slice type. type sliceType struct { commonType `reflect:"slice"` elem *runtimeType // slice element type } // Struct field type structField struct { name *string // nil for embedded fields pkgPath *string // nil for exported Names; otherwise import path typ *runtimeType // type of field tag *string // nil if no tag offset uintptr // byte offset of field within struct } // structType represents a struct type. type structType struct { commonType `reflect:"struct"` fields []structField // sorted by offset } /* * The compiler knows the exact layout of all the data structures above. * The compiler does not know about the data structures and methods below. */ // Method represents a single method. type Method struct { // Name is the method name. // PkgPath is the package path that qualifies a lower case (unexported) // method name. It is empty for upper case (exported) method names. // The combination of PkgPath and Name uniquely identifies a method // in a method set. // See http://golang.org/ref/spec#Uniqueness_of_identifiers Name string PkgPath string Type Type // method type Func Value // func with receiver as first argument Index int // index for Type.Method } // High bit says whether type has // embedded pointers,to help garbage collector. const kindMask = 0x7f func (k Kind) String() string { if int(k) < len(kindNames) { return kindNames[k] } return "kind" + strconv.Itoa(int(k)) } var kindNames = []string{ Invalid: "invalid", Bool: "bool", Int: "int", Int8: "int8", Int16: "int16", Int32: "int32", Int64: "int64", Uint: "uint", Uint8: "uint8", Uint16: "uint16", Uint32: "uint32", Uint64: "uint64", Uintptr: "uintptr", Float32: "float32", Float64: "float64", Complex64: "complex64", Complex128: "complex128", Array: "array", Chan: "chan", Func: "func", Interface: "interface", Map: "map", Ptr: "ptr", Slice: "slice", String: "string", Struct: "struct", UnsafePointer: "unsafe.Pointer", } func (t *uncommonType) uncommon() *uncommonType { return t } func (t *uncommonType) PkgPath() string { if t == nil || t.pkgPath == nil { return "" } return *t.pkgPath } func (t *uncommonType) Name() string { if t == nil || t.name == nil { return "" } return *t.name } func (t *commonType) toType() Type { if t == nil { return nil } return t } func (t *commonType) String() string { return *t.string } func (t *commonType) Size() uintptr { return t.size } func (t *commonType) Bits() int { if t == nil { panic("reflect: Bits of nil Type") } k := t.Kind() if k < Int || k > Complex128 { panic("reflect: Bits of non-arithmetic Type " + t.String()) } return int(t.size) * 8 } func (t *commonType) Align() int { return int(t.align) } func (t *commonType) FieldAlign() int { return int(t.fieldAlign) } func (t *commonType) Kind() Kind { return Kind(t.kind & kindMask) } func (t *commonType) common() *commonType { return t } func (t *uncommonType) Method(i int) (m Method) { if t == nil || i < 0 || i >= len(t.methods) { panic("reflect: Method index out of range") } p := &t.methods[i] if p.name != nil { m.Name = *p.name } fl := flag(Func) << flagKindShift if p.pkgPath != nil { m.PkgPath = *p.pkgPath fl |= flagRO } mt := toCommonType(p.typ) m.Type = mt fn := p.tfn m.Func = Value{mt, fn, fl} m.Index = i return } func (t *uncommonType) NumMethod() int { if t == nil { return 0 } return len(t.methods) } func (t *uncommonType) MethodByName(name string) (m Method, ok bool) { if t == nil { return } var p *method for i := range t.methods { p = &t.methods[i] if p.name != nil && *p.name == name { return t.Method(i), true } } return } // TODO(rsc): 6g supplies these, but they are not // as efficient as they could be: they have commonType // as the receiver instead of *commonType. func (t *commonType) NumMethod() int { if t.Kind() == Interface { tt := (*interfaceType)(unsafe.Pointer(t)) return tt.NumMethod() } return t.uncommonType.NumMethod() } func (t *commonType) Method(i int) (m Method) { if t.Kind() == Interface { tt := (*interfaceType)(unsafe.Pointer(t)) return tt.Method(i) } return t.uncommonType.Method(i) } func (t *commonType) MethodByName(name string) (m Method, ok bool) { if t.Kind() == Interface { tt := (*interfaceType)(unsafe.Pointer(t)) return tt.MethodByName(name) } return t.uncommonType.MethodByName(name) } func (t *commonType) PkgPath() string { return t.uncommonType.PkgPath() } func (t *commonType) Name() string { return t.uncommonType.Name() } func (t *commonType) ChanDir() ChanDir { if t.Kind() != Chan { panic("reflect: ChanDir of non-chan type") } tt := (*chanType)(unsafe.Pointer(t)) return ChanDir(tt.dir) } func (t *commonType) IsVariadic() bool { if t.Kind() != Func { panic("reflect: IsVariadic of non-func type") } tt := (*funcType)(unsafe.Pointer(t)) return tt.dotdotdot } func (t *commonType) Elem() Type { switch t.Kind() { case Array: tt := (*arrayType)(unsafe.Pointer(t)) return toType(tt.elem) case Chan: tt := (*chanType)(unsafe.Pointer(t)) return toType(tt.elem) case Map: tt := (*mapType)(unsafe.Pointer(t)) return toType(tt.elem) case Ptr: tt := (*ptrType)(unsafe.Pointer(t)) return toType(tt.elem) case Slice: tt := (*sliceType)(unsafe.Pointer(t)) return toType(tt.elem) } panic("reflect: Elem of invalid type") } func (t *commonType) Field(i int) StructField { if t.Kind() != Struct { panic("reflect: Field of non-struct type") } tt := (*structType)(unsafe.Pointer(t)) return tt.Field(i) } func (t *commonType) FieldByIndex(index []int) StructField { if t.Kind() != Struct { panic("reflect: FieldByIndex of non-struct type") } tt := (*structType)(unsafe.Pointer(t)) return tt.FieldByIndex(index) } func (t *commonType) FieldByName(name string) (StructField, bool) { if t.Kind() != Struct { panic("reflect: FieldByName of non-struct type") } tt := (*structType)(unsafe.Pointer(t)) return tt.FieldByName(name) } func (t *commonType) FieldByNameFunc(match func(string) bool) (StructField, bool) { if t.Kind() != Struct { panic("reflect: FieldByNameFunc of non-struct type") } tt := (*structType)(unsafe.Pointer(t)) return tt.FieldByNameFunc(match) } func (t *commonType) In(i int) Type { if t.Kind() != Func { panic("reflect: In of non-func type") } tt := (*funcType)(unsafe.Pointer(t)) return toType(tt.in[i]) } func (t *commonType) Key() Type { if t.Kind() != Map { panic("reflect: Key of non-map type") } tt := (*mapType)(unsafe.Pointer(t)) return toType(tt.key) } func (t *commonType) Len() int { if t.Kind() != Array { panic("reflect: Len of non-array type") } tt := (*arrayType)(unsafe.Pointer(t)) return int(tt.len) } func (t *commonType) NumField() int { if t.Kind() != Struct { panic("reflect: NumField of non-struct type") } tt := (*structType)(unsafe.Pointer(t)) return len(tt.fields) } func (t *commonType) NumIn() int { if t.Kind() != Func { panic("reflect: NumIn of non-func type") } tt := (*funcType)(unsafe.Pointer(t)) return len(tt.in) } func (t *commonType) NumOut() int { if t.Kind() != Func { panic("reflect: NumOut of non-func type") } tt := (*funcType)(unsafe.Pointer(t)) return len(tt.out) } func (t *commonType) Out(i int) Type { if t.Kind() != Func { panic("reflect: Out of non-func type") } tt := (*funcType)(unsafe.Pointer(t)) return toType(tt.out[i]) } func (d ChanDir) String() string { switch d { case SendDir: return "chan<-" case RecvDir: return "<-chan" case BothDir: return "chan" } return "ChanDir" + strconv.Itoa(int(d)) } // Method returns the i'th method in the type's method set. func (t *interfaceType) Method(i int) (m Method) { if i < 0 || i >= len(t.methods) { return } p := &t.methods[i] m.Name = *p.name if p.pkgPath != nil { m.PkgPath = *p.pkgPath } m.Type = toType(p.typ) m.Index = i return } // NumMethod returns the number of interface methods in the type's method set. func (t *interfaceType) NumMethod() int { return len(t.methods) } // MethodByName method with the given name in the type's method set. func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { if t == nil { return } var p *imethod for i := range t.methods { p = &t.methods[i] if *p.name == name { return t.Method(i), true } } return } // A StructField describes a single field in a struct. type StructField struct { // Name is the field name. // PkgPath is the package path that qualifies a lower case (unexported) // field name. It is empty for upper case (exported) field names. // See http://golang.org/ref/spec#Uniqueness_of_identifiers Name string PkgPath string Type Type // field type Tag StructTag // field tag string Offset uintptr // offset within struct, in bytes Index []int // index sequence for Type.FieldByIndex Anonymous bool // is an anonymous field } // A StructTag is the tag string in a struct field. // // By convention, tag strings are a concatenation of // optionally space-separated key:"value" pairs. // Each key is a non-empty string consisting of non-control // characters other than space (U+0020 ' '), quote (U+0022 '"'), // and colon (U+003A ':'). Each value is quoted using U+0022 '"' // characters and Go string literal syntax. type StructTag string // Get returns the value associated with key in the tag string. // If there is no such key in the tag, Get returns the empty string. // If the tag does not have the conventional format, the value // returned by Get is unspecified. func (tag StructTag) Get(key string) string { for tag != "" { // skip leading space i := 0 for i < len(tag) && tag[i] == ' ' { i++ } tag = tag[i:] if tag == "" { break } // scan to colon. // a space or a quote is a syntax error i = 0 for i < len(tag) && tag[i] != ' ' && tag[i] != ':' && tag[i] != '"' { i++ } if i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { break } name := string(tag[:i]) tag = tag[i+1:] // scan quoted string to find value i = 1 for i < len(tag) && tag[i] != '"' { if tag[i] == '\\' { i++ } i++ } if i >= len(tag) { break } qvalue := string(tag[:i+1]) tag = tag[i+1:] if key == name { value, _ := strconv.Unquote(qvalue) return value } } return "" } // Field returns the i'th struct field. func (t *structType) Field(i int) (f StructField) { if i < 0 || i >= len(t.fields) { return } p := &t.fields[i] f.Type = toType(p.typ) if p.name != nil { f.Name = *p.name } else { t := f.Type if t.Kind() == Ptr { t = t.Elem() } f.Name = t.Name() f.Anonymous = true } if p.pkgPath != nil { f.PkgPath = *p.pkgPath } if p.tag != nil { f.Tag = StructTag(*p.tag) } f.Offset = p.offset // NOTE(rsc): This is the only allocation in the interface // presented by a reflect.Type. It would be nice to avoid, // at least in the common cases, but we need to make sure // that misbehaving clients of reflect cannot affect other // uses of reflect. One possibility is CL 5371098, but we // postponed that ugliness until there is a demonstrated // need for the performance. This is issue 2320. f.Index = []int{i} return } // TODO(gri): Should there be an error/bool indicator if the index // is wrong for FieldByIndex? // FieldByIndex returns the nested field corresponding to index. func (t *structType) FieldByIndex(index []int) (f StructField) { f.Type = Type(t.toType()) for i, x := range index { if i > 0 { ft := f.Type if ft.Kind() == Ptr && ft.Elem().Kind() == Struct { ft = ft.Elem() } f.Type = ft } f = f.Type.Field(x) } return } // A fieldScan represents an item on the fieldByNameFunc scan work list. type fieldScan struct { typ *structType index []int } // FieldByNameFunc returns the struct field with a name that satisfies the // match function and a boolean to indicate if the field was found. func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { // This uses the same condition that the Go language does: there must be a unique instance // of the match at a given depth level. If there are multiple instances of a match at the // same depth, they annihilate each other and inhibit any possible match at a lower level. // The algorithm is breadth first search, one depth level at a time. // The current and next slices are work queues: // current lists the fields to visit on this depth level, // and next lists the fields on the next lower level. current := []fieldScan{} next := []fieldScan{{typ: t}} // nextCount records the number of times an embedded type has been // encountered and considered for queueing in the 'next' slice. // We only queue the first one, but we increment the count on each. // If a struct type T can be reached more than once at a given depth level, // then it annihilates itself and need not be considered at all when we // process that next depth level. var nextCount map[*structType]int // visited records the structs that have been considered already. // Embedded pointer fields can create cycles in the graph of // reachable embedded types; visited avoids following those cycles. // It also avoids duplicated effort: if we didn't find the field in an // embedded type T at level 2, we won't find it in one at level 4 either. visited := map[*structType]bool{} for len(next) > 0 { current, next = next, current[:0] count := nextCount nextCount = nil // Process all the fields at this depth, now listed in 'current'. // The loop queues embedded fields found in 'next', for processing during the next // iteration. The multiplicity of the 'current' field counts is recorded // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. for _, scan := range current { t := scan.typ if visited[t] { // We've looked through this type before, at a higher level. // That higher level would shadow the lower level we're now at, // so this one can't be useful to us. Ignore it. continue } visited[t] = true for i := range t.fields { f := &t.fields[i] // Find name and type for field f. var fname string var ntyp *commonType if f.name != nil { fname = *f.name } else { // Anonymous field of type T or *T. // Name taken from type. ntyp = toCommonType(f.typ) if ntyp.Kind() == Ptr { ntyp = ntyp.Elem().common() } fname = ntyp.Name() } // Does it match? if match(fname) { // Potential match if count[t] > 1 || ok { // Name appeared multiple times at this level: annihilate. return StructField{}, false } result = t.Field(i) result.Index = nil result.Index = append(result.Index, scan.index...) result.Index = append(result.Index, i) ok = true continue } // Queue embedded struct fields for processing with next level, // but only if we haven't seen a match yet at this level and only // if the embedded types haven't alredy been queued. if ok || ntyp == nil || ntyp.Kind() != Struct { continue } styp := (*structType)(unsafe.Pointer(ntyp)) if nextCount[styp] > 0 { nextCount[styp]++ continue } if nextCount == nil { nextCount = map[*structType]int{} } nextCount[styp] = 1 var index []int index = append(index, scan.index...) index = append(index, i) next = append(next, fieldScan{styp, index}) } } if ok { break } } return } // FieldByName returns the struct field with the given name // and a boolean to indicate if the field was found. func (t *structType) FieldByName(name string) (f StructField, present bool) { // Quick check for top-level name, or struct without anonymous fields. hasAnon := false if name != "" { for i := range t.fields { tf := &t.fields[i] if tf.name == nil { hasAnon = true continue } if *tf.name == name { return t.Field(i), true } } } if !hasAnon { return } return t.FieldByNameFunc(func(s string) bool { return s == name }) } // Convert runtime type to reflect type. func toCommonType(p *runtimeType) *commonType { if p == nil { return nil } return (*p).(*commonType) } func toType(p *runtimeType) Type { if p == nil { return nil } return (*p).(*commonType) } // TypeOf returns the reflection Type of the value in the interface{}. // TypeOf(nil) returns nil. func TypeOf(i interface{}) Type { eface := *(*emptyInterface)(unsafe.Pointer(&i)) return toType(eface.typ) } // ptrMap is the cache for PtrTo. var ptrMap struct { sync.RWMutex m map[*commonType]*ptrType } func (t *commonType) runtimeType() *runtimeType { // The runtimeType always precedes the commonType in memory. // Adjust pointer to find it. var rt struct { i runtimeType ct commonType } return (*runtimeType)(unsafe.Pointer(uintptr(unsafe.Pointer(t)) - unsafe.Offsetof(rt.ct))) } // PtrTo returns the pointer type with element t. // For example, if t represents type Foo, PtrTo(t) represents *Foo. func PtrTo(t Type) Type { return t.(*commonType).ptrTo() } func (ct *commonType) ptrTo() *commonType { if p := ct.ptrToThis; p != nil { return toCommonType(p) } // Otherwise, synthesize one. // This only happens for pointers with no methods. // We keep the mapping in a map on the side, because // this operation is rare and a separate map lets us keep // the type structures in read-only memory. ptrMap.RLock() if m := ptrMap.m; m != nil { if p := m[ct]; p != nil { ptrMap.RUnlock() return &p.commonType } } ptrMap.RUnlock() ptrMap.Lock() if ptrMap.m == nil { ptrMap.m = make(map[*commonType]*ptrType) } p := ptrMap.m[ct] if p != nil { // some other goroutine won the race and created it ptrMap.Unlock() return &p.commonType } var rt struct { i runtimeType ptrType } rt.i = &rt.commonType // initialize p using *byte's ptrType as a prototype. p = &rt.ptrType var ibyte interface{} = (*byte)(nil) bp := (*ptrType)(unsafe.Pointer((**(**runtimeType)(unsafe.Pointer(&ibyte))).(*commonType))) *p = *bp s := "*" + *ct.string p.string = &s // For the type structures linked into the binary, the // compiler provides a good hash of the string. // Create a good hash for the new string by using // the FNV-1 hash's mixing function to combine the // old hash and the new "*". p.hash = ct.hash*16777619 ^ '*' p.uncommonType = nil p.ptrToThis = nil p.elem = (*runtimeType)(unsafe.Pointer(uintptr(unsafe.Pointer(ct)) - unsafe.Offsetof(rt.ptrType))) ptrMap.m[ct] = p ptrMap.Unlock() return &p.commonType } func (t *commonType) Implements(u Type) bool { if u == nil { panic("reflect: nil type passed to Type.Implements") } if u.Kind() != Interface { panic("reflect: non-interface type passed to Type.Implements") } return implements(u.(*commonType), t) } func (t *commonType) AssignableTo(u Type) bool { if u == nil { panic("reflect: nil type passed to Type.AssignableTo") } uu := u.(*commonType) return directlyAssignable(uu, t) || implements(uu, t) } func (t *commonType) ConvertibleTo(u Type) bool { if u == nil { panic("reflect: nil type passed to Type.ConvertibleTo") } uu := u.(*commonType) return convertOp(uu, t) != nil } // implements returns true if the type V implements the interface type T. func implements(T, V *commonType) bool { if T.Kind() != Interface { return false } t := (*interfaceType)(unsafe.Pointer(T)) if len(t.methods) == 0 { return true } // The same algorithm applies in both cases, but the // method tables for an interface type and a concrete type // are different, so the code is duplicated. // In both cases the algorithm is a linear scan over the two // lists - T's methods and V's methods - simultaneously. // Since method tables are stored in a unique sorted order // (alphabetical, with no duplicate method names), the scan // through V's methods must hit a match for each of T's // methods along the way, or else V does not implement T. // This lets us run the scan in overall linear time instead of // the quadratic time a naive search would require. // See also ../runtime/iface.c. if V.Kind() == Interface { v := (*interfaceType)(unsafe.Pointer(V)) i := 0 for j := 0; j < len(v.methods); j++ { tm := &t.methods[i] vm := &v.methods[j] if vm.name == tm.name && vm.pkgPath == tm.pkgPath && vm.typ == tm.typ { if i++; i >= len(t.methods) { return true } } } return false } v := V.uncommon() if v == nil { return false } i := 0 for j := 0; j < len(v.methods); j++ { tm := &t.methods[i] vm := &v.methods[j] if vm.name == tm.name && vm.pkgPath == tm.pkgPath && vm.mtyp == tm.typ { if i++; i >= len(t.methods) { return true } } } return false } // directlyAssignable returns true if a value x of type V can be directly // assigned (using memmove) to a value of type T. // http://golang.org/doc/go_spec.html#Assignability // Ignoring the interface rules (implemented elsewhere) // and the ideal constant rules (no ideal constants at run time). func directlyAssignable(T, V *commonType) bool { // x's type V is identical to T? if T == V { return true } // Otherwise at least one of T and V must be unnamed // and they must have the same kind. if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() { return false } // x's type T and V must have identical underlying types. return haveIdenticalUnderlyingType(T, V) } func haveIdenticalUnderlyingType(T, V *commonType) bool { if T == V { return true } kind := T.Kind() if kind != V.Kind() { return false } // Non-composite types of equal kind have same underlying type // (the predefined instance of the type). if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { return true } // Composite types. switch kind { case Array: return T.Elem() == V.Elem() && T.Len() == V.Len() case Chan: // Special case: // x is a bidirectional channel value, T is a channel type, // and x's type V and T have identical element types. if V.ChanDir() == BothDir && T.Elem() == V.Elem() { return true } // Otherwise continue test for identical underlying type. return V.ChanDir() == T.ChanDir() && T.Elem() == V.Elem() case Func: t := (*funcType)(unsafe.Pointer(T)) v := (*funcType)(unsafe.Pointer(V)) if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) { return false } for i, typ := range t.in { if typ != v.in[i] { return false } } for i, typ := range t.out { if typ != v.out[i] { return false } } return true case Interface: t := (*interfaceType)(unsafe.Pointer(T)) v := (*interfaceType)(unsafe.Pointer(V)) if len(t.methods) == 0 && len(v.methods) == 0 { return true } // Might have the same methods but still // need a run time conversion. return false case Map: return T.Key() == V.Key() && T.Elem() == V.Elem() case Ptr, Slice: return T.Elem() == V.Elem() case Struct: t := (*structType)(unsafe.Pointer(T)) v := (*structType)(unsafe.Pointer(V)) if len(t.fields) != len(v.fields) { return false } for i := range t.fields { tf := &t.fields[i] vf := &v.fields[i] if tf.name != vf.name || tf.pkgPath != vf.pkgPath || tf.typ != vf.typ || tf.tag != vf.tag || tf.offset != vf.offset { return false } } return true } return false }
// Copyright (c) 2015-2019 Magnus Bäck <magnus@noun.se> package main import ( "errors" "fmt" "os" "path/filepath" "runtime" "strings" semver "github.com/Masterminds/semver/v3" "github.com/alecthomas/kingpin" "github.com/magnusbaeck/logstash-filter-verifier/logging" "github.com/magnusbaeck/logstash-filter-verifier/logstash" "github.com/magnusbaeck/logstash-filter-verifier/testcase" "github.com/mattn/go-shellwords" oplogging "github.com/op/go-logging" ) var ( // GitSummary contains "git describe" output and is automatically // populated via linker options when building with govvv. GitSummary = "(unknown)" log = logging.MustGetLogger() loglevels = []string{"CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG"} autoVersion = "auto" defaultKeptEnvVars = []string{ "PATH", } defaultLogstashPaths = []string{ "/opt/logstash/bin/logstash", "/usr/share/logstash/bin/logstash", } // Flags diffCommand = kingpin. Flag("diff-command", "Set the command to run to compare two events. The command will receive the two files to compare as arguments."). Default("diff -u"). String() keptEnvVars = kingpin. Flag("keep-env", fmt.Sprintf("Add this environment variable to the list of variables that will be preserved from the calling process's environment. Initial list of variables: %s", strings.Join(defaultKeptEnvVars, ", "))). PlaceHolder("VARNAME"). Strings() loglevel = kingpin. Flag("loglevel", fmt.Sprintf("Set the desired level of logging (one of: %s).", strings.Join(loglevels, ", "))). Default("WARNING"). Enum(loglevels...) logstashArgs = kingpin. Flag("logstash-arg", "Command line arguments, which are passed to Logstash. Flag and value have to be provided as a flag each, e.g.: --logstash-arg=-n --logstash-arg=InstanceName"). PlaceHolder("ARG"). Strings() logstashOutput = kingpin. Flag("logstash-output", "Print the debug output of logstash."). Default("false"). Bool() logstashPaths = kingpin. Flag("logstash-path", "Add a path to the list of Logstash executable paths that will be tried in order (first match is used)."). PlaceHolder("PATH"). Strings() logstashVersion = kingpin. Flag("logstash-version", "The version of Logstash that's being targeted."). PlaceHolder("VERSION"). Default(autoVersion). String() unixSockets = kingpin. Flag("sockets", "Use Unix domain sockets for the communication with Logstash."). Default("false"). Bool() unixSocketCommTimeout = kingpin. Flag("sockets-timeout", "Timeout (duration) for the communication with Logstash via Unix domain sockets. Has no effect unless --sockets is used."). Default("60s"). Duration() // Arguments testcasePath = kingpin. Arg("testcases", "Test case file or a directory containing one or more test case files."). Required(). ExistingFileOrDir() configPaths = kingpin. Arg("config", "Logstash configuration file or a directory containing one or more configuration files."). Required(). ExistingFilesOrDirs() ) // findExecutable examines the passed file paths and returns the first // one that is an existing executable file. func findExecutable(paths []string) (string, error) { for _, p := range paths { stat, err := os.Stat(p) if err != nil { log.Debugf("Logstash path candidate rejected: %s", err) continue } if !stat.Mode().IsRegular() || stat.Mode().Perm()&0111 != 0111 { log.Debugf("Logstash path candidate not an executable regular file: %s", p) continue } log.Debugf("Logstash path candidate accepted: %s", p) return p, nil } return "", fmt.Errorf("no existing executable found among candidates: %s", strings.Join(paths, ", ")) } // runTests runs Logstash with a set of configuration files against a // slice of test cases and compares the actual events against the // expected set. Returns an error if at least one test case fails or // if there's a problem running the tests. func runTests(inv *logstash.Invocation, tests []testcase.TestCaseSet, diffCommand []string, keptEnvVars []string) error { ok := true for _, t := range tests { fmt.Printf("Running tests in %s...\n", filepath.Base(t.File)) p, err := logstash.NewProcess(inv, t.Codec, t.InputFields, keptEnvVars) if err != nil { return err } defer p.Release() if err = p.Start(); err != nil { return err } for _, line := range t.InputLines { _, err = p.Input.Write([]byte(line + "\n")) if err != nil { return err } } if err = p.Input.Close(); err != nil { return err } result, err := p.Wait() if err != nil || *logstashOutput { message := getLogstashOutputMessage(result.Output, result.Log) if err != nil { return fmt.Errorf("Error running Logstash: %s.%s", err, message) } userError("%s", message) } if err = t.Compare(result.Events, false, diffCommand); err != nil { userError("Testcase failed, continuing with the rest: %s", err) ok = false } } if !ok { return errors.New("one or more testcases failed") } return nil } // runParallelTests runs multiple set of configuration in a single // instance of Logstash against a slice of test cases and compares // the actual events against the expected set. Returns an error if // at least one test case fails or if there's a problem running the tests. func runParallelTests(inv *logstash.Invocation, tests []testcase.TestCaseSet, diffCommand []string, keptEnvVars []string) error { var testStreams []*logstash.TestStream badCodecs := map[string]string{ "json": "json_lines", "plain": "line", } for _, t := range tests { if repl, ok := badCodecs[t.Codec]; ok { log.Warning( "The testcase file %q uses the %q codec. That codec "+ "will most likely not work as expected when --sockets is used. Try %q instead.", t.File, t.Codec, repl) } } for _, t := range tests { ts, err := logstash.NewTestStream(t.Codec, t.InputFields, *unixSocketCommTimeout) if err != nil { logstash.CleanupTestStreams(testStreams) return err } testStreams = append(testStreams, ts) } p, err := logstash.NewParallelProcess(inv, testStreams, keptEnvVars) if err != nil { return err } defer p.Release() if err = p.Start(); err != nil { return err } for i, t := range tests { for _, line := range t.InputLines { _, err = testStreams[i].Write([]byte(line + "\n")) if err != nil { return err } } if err = testStreams[i].Close(); err != nil { return err } } result, err := p.Wait() if err != nil || *logstashOutput { message := getLogstashOutputMessage(result.Output, result.Log) if err != nil { return fmt.Errorf("Error running Logstash: %s.%s", err, message) } userError("%s", message) } ok := true for i, t := range tests { if err = t.Compare(result.Events[i], false, diffCommand); err != nil { userError("Testcase failed, continuing with the rest: %s", err) ok = false } } if !ok { return errors.New("one or more testcases failed") } return nil } // getLogstashOutputMessage examines the test result and prepares a // message describing the process's output, log output, or neither // (resulting in an empty string). func getLogstashOutputMessage(output string, log string) string { var message string if output != "" { message += fmt.Sprintf("\nProcess output:\n%s", output) } else { message += "\nThe process wrote nothing to stdout or stderr." } if log != "" { message += fmt.Sprintf("\nLog:\n%s", log) } else { message += "\nThe process wrote nothing to its logfile." } return message } // prefixedUserError prints an error message to stderr and prefixes it // with the name of the program file (e.g. "logstash-filter-verifier: // something bad happened."). func prefixedUserError(format string, a ...interface{}) { basename := filepath.Base(os.Args[0]) message := fmt.Sprintf(format, a...) if strings.HasSuffix(message, "\n") { fmt.Fprintf(os.Stderr, "%s: %s", basename, message) } else { fmt.Fprintf(os.Stderr, "%s: %s\n", basename, message) } } // userError prints an error message to stderr. func userError(format string, a ...interface{}) { if strings.HasSuffix(format, "\n") { fmt.Fprintf(os.Stderr, format, a...) } else { fmt.Fprintf(os.Stderr, format+"\n", a...) } } // mainEntrypoint functions as the main function of the program and // returns the desired exit code. func mainEntrypoint() int { kingpin.Version(fmt.Sprintf("%s %s", kingpin.CommandLine.Name, GitSummary)) kingpin.Parse() level, err := oplogging.LogLevel(*loglevel) if err != nil { prefixedUserError("Bad loglevel: %s", *loglevel) return 1 } logging.SetLevel(level) diffCmd, err := shellwords.NewParser().Parse(*diffCommand) if err != nil { userError("Error parsing diff command %q: %s", *diffCommand, err) return 1 } tests, err := testcase.DiscoverTests(*testcasePath) if err != nil { userError(err.Error()) return 1 } allKeptEnvVars := append(defaultKeptEnvVars, *keptEnvVars...) logstashPath, err := findExecutable(append(*logstashPaths, defaultLogstashPaths...)) if err != nil { userError("Error locating Logstash: %s", err) return 1 } var targetVersion *semver.Version if *logstashVersion == autoVersion { targetVersion, err = logstash.DetectVersion(logstashPath, allKeptEnvVars) if err != nil { userError("Could not auto-detect the Logstash version: %s", err) return 1 } } else { targetVersion, err = semver.NewVersion(*logstashVersion) if err != nil { userError("The given Logstash version %q could not be parsed as a version number (%s).", *logstashVersion, err) return 1 } } inv, err := logstash.NewInvocation(logstashPath, *logstashArgs, targetVersion, *configPaths...) if err != nil { userError("An error occurred while setting up the Logstash environment: %s", err) return 1 } defer inv.Release() if *unixSockets { if runtime.GOOS == "windows" { userError("Use of Unix domain sockets for communication with Logstash is not supported on Windows.") return 1 } fmt.Println("Use Unix domain sockets.") if err = runParallelTests(inv, tests, diffCmd, allKeptEnvVars); err != nil { userError(err.Error()) return 1 } } else { if err = runTests(inv, tests, diffCmd, allKeptEnvVars); err != nil { userError(err.Error()) return 1 } } return 0 } func main() { os.Exit(mainEntrypoint()) } do not check Logstash path is executable file if OS is windows // Copyright (c) 2015-2019 Magnus Bäck <magnus@noun.se> package main import ( "errors" "fmt" "os" "path/filepath" "runtime" "strings" semver "github.com/Masterminds/semver/v3" "github.com/alecthomas/kingpin" "github.com/magnusbaeck/logstash-filter-verifier/logging" "github.com/magnusbaeck/logstash-filter-verifier/logstash" "github.com/magnusbaeck/logstash-filter-verifier/testcase" "github.com/mattn/go-shellwords" oplogging "github.com/op/go-logging" ) var ( // GitSummary contains "git describe" output and is automatically // populated via linker options when building with govvv. GitSummary = "(unknown)" log = logging.MustGetLogger() loglevels = []string{"CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG"} autoVersion = "auto" defaultKeptEnvVars = []string{ "PATH", } defaultLogstashPaths = []string{ "/opt/logstash/bin/logstash", "/usr/share/logstash/bin/logstash", } // Flags diffCommand = kingpin. Flag("diff-command", "Set the command to run to compare two events. The command will receive the two files to compare as arguments."). Default("diff -u"). String() keptEnvVars = kingpin. Flag("keep-env", fmt.Sprintf("Add this environment variable to the list of variables that will be preserved from the calling process's environment. Initial list of variables: %s", strings.Join(defaultKeptEnvVars, ", "))). PlaceHolder("VARNAME"). Strings() loglevel = kingpin. Flag("loglevel", fmt.Sprintf("Set the desired level of logging (one of: %s).", strings.Join(loglevels, ", "))). Default("WARNING"). Enum(loglevels...) logstashArgs = kingpin. Flag("logstash-arg", "Command line arguments, which are passed to Logstash. Flag and value have to be provided as a flag each, e.g.: --logstash-arg=-n --logstash-arg=InstanceName"). PlaceHolder("ARG"). Strings() logstashOutput = kingpin. Flag("logstash-output", "Print the debug output of logstash."). Default("false"). Bool() logstashPaths = kingpin. Flag("logstash-path", "Add a path to the list of Logstash executable paths that will be tried in order (first match is used)."). PlaceHolder("PATH"). Strings() logstashVersion = kingpin. Flag("logstash-version", "The version of Logstash that's being targeted."). PlaceHolder("VERSION"). Default(autoVersion). String() unixSockets = kingpin. Flag("sockets", "Use Unix domain sockets for the communication with Logstash."). Default("false"). Bool() unixSocketCommTimeout = kingpin. Flag("sockets-timeout", "Timeout (duration) for the communication with Logstash via Unix domain sockets. Has no effect unless --sockets is used."). Default("60s"). Duration() // Arguments testcasePath = kingpin. Arg("testcases", "Test case file or a directory containing one or more test case files."). Required(). ExistingFileOrDir() configPaths = kingpin. Arg("config", "Logstash configuration file or a directory containing one or more configuration files."). Required(). ExistingFilesOrDirs() ) // findExecutable examines the passed file paths and returns the first // one that is an existing executable file. func findExecutable(paths []string) (string, error) { for _, p := range paths { stat, err := os.Stat(p) if err != nil { log.Debugf("Logstash path candidate rejected: %s", err) continue } if !stat.Mode().IsRegular() { log.Debugf("Logstash path candidate not a regular file: %s", p) continue } if runtime.GOOS != "windows" && stat.Mode().Perm()&0111 != 0111 { log.Debugf("Logstash path candidate not an executable file: %s", p) continue } log.Debugf("Logstash path candidate accepted: %s", p) return p, nil } return "", fmt.Errorf("no existing executable found among candidates: %s", strings.Join(paths, ", ")) } // runTests runs Logstash with a set of configuration files against a // slice of test cases and compares the actual events against the // expected set. Returns an error if at least one test case fails or // if there's a problem running the tests. func runTests(inv *logstash.Invocation, tests []testcase.TestCaseSet, diffCommand []string, keptEnvVars []string) error { ok := true for _, t := range tests { fmt.Printf("Running tests in %s...\n", filepath.Base(t.File)) p, err := logstash.NewProcess(inv, t.Codec, t.InputFields, keptEnvVars) if err != nil { return err } defer p.Release() if err = p.Start(); err != nil { return err } for _, line := range t.InputLines { _, err = p.Input.Write([]byte(line + "\n")) if err != nil { return err } } if err = p.Input.Close(); err != nil { return err } result, err := p.Wait() if err != nil || *logstashOutput { message := getLogstashOutputMessage(result.Output, result.Log) if err != nil { return fmt.Errorf("Error running Logstash: %s.%s", err, message) } userError("%s", message) } if err = t.Compare(result.Events, false, diffCommand); err != nil { userError("Testcase failed, continuing with the rest: %s", err) ok = false } } if !ok { return errors.New("one or more testcases failed") } return nil } // runParallelTests runs multiple set of configuration in a single // instance of Logstash against a slice of test cases and compares // the actual events against the expected set. Returns an error if // at least one test case fails or if there's a problem running the tests. func runParallelTests(inv *logstash.Invocation, tests []testcase.TestCaseSet, diffCommand []string, keptEnvVars []string) error { var testStreams []*logstash.TestStream badCodecs := map[string]string{ "json": "json_lines", "plain": "line", } for _, t := range tests { if repl, ok := badCodecs[t.Codec]; ok { log.Warning( "The testcase file %q uses the %q codec. That codec "+ "will most likely not work as expected when --sockets is used. Try %q instead.", t.File, t.Codec, repl) } } for _, t := range tests { ts, err := logstash.NewTestStream(t.Codec, t.InputFields, *unixSocketCommTimeout) if err != nil { logstash.CleanupTestStreams(testStreams) return err } testStreams = append(testStreams, ts) } p, err := logstash.NewParallelProcess(inv, testStreams, keptEnvVars) if err != nil { return err } defer p.Release() if err = p.Start(); err != nil { return err } for i, t := range tests { for _, line := range t.InputLines { _, err = testStreams[i].Write([]byte(line + "\n")) if err != nil { return err } } if err = testStreams[i].Close(); err != nil { return err } } result, err := p.Wait() if err != nil || *logstashOutput { message := getLogstashOutputMessage(result.Output, result.Log) if err != nil { return fmt.Errorf("Error running Logstash: %s.%s", err, message) } userError("%s", message) } ok := true for i, t := range tests { if err = t.Compare(result.Events[i], false, diffCommand); err != nil { userError("Testcase failed, continuing with the rest: %s", err) ok = false } } if !ok { return errors.New("one or more testcases failed") } return nil } // getLogstashOutputMessage examines the test result and prepares a // message describing the process's output, log output, or neither // (resulting in an empty string). func getLogstashOutputMessage(output string, log string) string { var message string if output != "" { message += fmt.Sprintf("\nProcess output:\n%s", output) } else { message += "\nThe process wrote nothing to stdout or stderr." } if log != "" { message += fmt.Sprintf("\nLog:\n%s", log) } else { message += "\nThe process wrote nothing to its logfile." } return message } // prefixedUserError prints an error message to stderr and prefixes it // with the name of the program file (e.g. "logstash-filter-verifier: // something bad happened."). func prefixedUserError(format string, a ...interface{}) { basename := filepath.Base(os.Args[0]) message := fmt.Sprintf(format, a...) if strings.HasSuffix(message, "\n") { fmt.Fprintf(os.Stderr, "%s: %s", basename, message) } else { fmt.Fprintf(os.Stderr, "%s: %s\n", basename, message) } } // userError prints an error message to stderr. func userError(format string, a ...interface{}) { if strings.HasSuffix(format, "\n") { fmt.Fprintf(os.Stderr, format, a...) } else { fmt.Fprintf(os.Stderr, format+"\n", a...) } } // mainEntrypoint functions as the main function of the program and // returns the desired exit code. func mainEntrypoint() int { kingpin.Version(fmt.Sprintf("%s %s", kingpin.CommandLine.Name, GitSummary)) kingpin.Parse() level, err := oplogging.LogLevel(*loglevel) if err != nil { prefixedUserError("Bad loglevel: %s", *loglevel) return 1 } logging.SetLevel(level) diffCmd, err := shellwords.NewParser().Parse(*diffCommand) if err != nil { userError("Error parsing diff command %q: %s", *diffCommand, err) return 1 } tests, err := testcase.DiscoverTests(*testcasePath) if err != nil { userError(err.Error()) return 1 } allKeptEnvVars := append(defaultKeptEnvVars, *keptEnvVars...) logstashPath, err := findExecutable(append(*logstashPaths, defaultLogstashPaths...)) if err != nil { userError("Error locating Logstash: %s", err) return 1 } var targetVersion *semver.Version if *logstashVersion == autoVersion { targetVersion, err = logstash.DetectVersion(logstashPath, allKeptEnvVars) if err != nil { userError("Could not auto-detect the Logstash version: %s", err) return 1 } } else { targetVersion, err = semver.NewVersion(*logstashVersion) if err != nil { userError("The given Logstash version %q could not be parsed as a version number (%s).", *logstashVersion, err) return 1 } } inv, err := logstash.NewInvocation(logstashPath, *logstashArgs, targetVersion, *configPaths...) if err != nil { userError("An error occurred while setting up the Logstash environment: %s", err) return 1 } defer inv.Release() if *unixSockets { if runtime.GOOS == "windows" { userError("Use of Unix domain sockets for communication with Logstash is not supported on Windows.") return 1 } fmt.Println("Use Unix domain sockets.") if err = runParallelTests(inv, tests, diffCmd, allKeptEnvVars); err != nil { userError(err.Error()) return 1 } } else { if err = runTests(inv, tests, diffCmd, allKeptEnvVars); err != nil { userError(err.Error()) return 1 } } return 0 } func main() { os.Exit(mainEntrypoint()) }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sync // An RWMutex is a reader/writer mutual exclusion lock. // The lock can be held by an arbitrary number of readers // or a single writer. // RWMutexes can be created as part of other // structures; the zero value for a RWMutex is // an unlocked mutex. // // Writers take priority over Readers: no new RLocks // are granted while a blocked Lock call is waiting. type RWMutex struct { w Mutex // held if there are pending readers or writers r Mutex // held if the w is being rd readerCount uint32 // number of pending readers } // RLock locks rw for reading. // If the lock is already locked for writing or there is a writer already waiting // to release the lock, RLock blocks until the writer has released the lock. func (rw *RWMutex) RLock() { // Use rw.r.Lock() to block granting the RLock if a goroutine // is waiting for its Lock. This is the prevent starvation of W in // this situation: // A: rw.RLock() // granted // W: rw.Lock() // waiting for rw.w().Lock() // B: rw.RLock() // granted // C: rw.RLock() // granted // B: rw.RUnlock() // ... (new readers come and go indefinitely, W is starving) rw.r.Lock() if xadd(&rw.readerCount, 1) == 1 { // The first reader locks rw.w, so writers will be blocked // while the readers have the RLock. rw.w.Lock() } rw.r.Unlock() } // RUnlock undoes a single RLock call; // it does not affect other simultaneous readers. // It is a run-time error if rw is not locked for reading // on entry to RUnlock. func (rw *RWMutex) RUnlock() { if xadd(&rw.readerCount, -1) == 0 { // last reader finished, enable writers rw.w.Unlock() } } // Lock locks rw for writing. // If the lock is already locked for reading or writing, // Lock blocks until the lock is available. // To ensure that the lock eventually becomes available, // a blocked Lock call excludes new readers from acquiring // the lock. func (rw *RWMutex) Lock() { rw.r.Lock() rw.w.Lock() rw.r.Unlock() } // Unlock unlocks rw for writing. // It is a run-time error if rw is not locked for writing // on entry to Unlock. // // Like for Mutexes, // a locked RWMutex is not associated with a particular goroutine. // It is allowed for one goroutine to RLock (Lock) an RWMutex and then // arrange for another goroutine to RUnlock (Unlock) it. func (rw *RWMutex) Unlock() { rw.w.Unlock() } sync: tidy a couple of comments. no semantic change. R=rsc CC=golang-dev http://codereview.appspot.com/4128048 // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sync // An RWMutex is a reader/writer mutual exclusion lock. // The lock can be held by an arbitrary number of readers // or a single writer. // RWMutexes can be created as part of other // structures; the zero value for a RWMutex is // an unlocked mutex. // // Writers take priority over Readers: no new RLocks // are granted while a blocked Lock call is waiting. type RWMutex struct { w Mutex // held if there are pending readers or writers r Mutex // held if the w is being rd readerCount uint32 // number of pending readers } // RLock locks rw for reading. // If the lock is already locked for writing or there is a writer already waiting // to release the lock, RLock blocks until the writer has released the lock. func (rw *RWMutex) RLock() { // Use rw.r.Lock() to block granting the RLock if a goroutine // is waiting for its Lock. This is the prevent starvation of W in // this situation: // A: rw.RLock() // granted // W: rw.Lock() // waiting for rw.w().Lock() // B: rw.RLock() // granted // C: rw.RLock() // granted // B: rw.RUnlock() // ... (new readers come and go indefinitely, W is starving) rw.r.Lock() if xadd(&rw.readerCount, 1) == 1 { // The first reader locks rw.w, so writers will be blocked // while the readers have the RLock. rw.w.Lock() } rw.r.Unlock() } // RUnlock undoes a single RLock call; // it does not affect other simultaneous readers. // It is a run-time error if rw is not locked for reading // on entry to RUnlock. func (rw *RWMutex) RUnlock() { if xadd(&rw.readerCount, -1) == 0 { // last reader finished, enable writers rw.w.Unlock() } } // Lock locks rw for writing. // If the lock is already locked for reading or writing, // Lock blocks until the lock is available. // To ensure that the lock eventually becomes available, // a blocked Lock call excludes new readers from acquiring // the lock. func (rw *RWMutex) Lock() { rw.r.Lock() rw.w.Lock() rw.r.Unlock() } // Unlock unlocks rw for writing. It is a run-time error if rw is // not locked for writing on entry to Unlock. // // As with Mutexes, a locked RWMutex is not associated with a particular // goroutine. One goroutine may RLock (Lock) an RWMutex and then // arrange for another goroutine to RUnlock (Unlock) it. func (rw *RWMutex) Unlock() { rw.w.Unlock() }
package update import ( "fmt" "github.com/BytemarkHosting/bytemark-client/cmd/bytemark/app" "github.com/BytemarkHosting/bytemark-client/cmd/bytemark/app/args" "github.com/BytemarkHosting/bytemark-client/cmd/bytemark/app/with" cutil "github.com/BytemarkHosting/bytemark-client/cmd/bytemark/commands/util" "github.com/BytemarkHosting/bytemark-client/cmd/bytemark/util" "github.com/BytemarkHosting/bytemark-client/lib" "github.com/urfave/cli" ) func init() { Commands = append(Commands, cli.Command{ Name: "server", Usage: "update a server's configuration", UsageText: "update server [flags] <server>", Description: `Updates the configuration of an existing Cloud Server. Note that for changes to cores, memory or hardware profile to take effect you will need to restart the server. --hw-profile the hardware profile used. Hardware profiles can be simply thought of as what virtual motherboard you're using - generally you want a pretty recent one for maximum speed, but if you're running a very old or experimental OS (e.g. DOS or OS/2 or something) you may require the compatibility one. See "bytemark hwprofiles" for which ones are currently available. Memory is specified in GiB by default, but can be suffixed with an M to indicate that it is provided in MiB. Updating a server's name also allows it to be moved between groups and accounts you administer. EXAMPLES bytemark update server --new-name boron oxygen This will rename the server called oxygen in your default group to boron, still in your default group. bytemark update server --new-name sunglasses.development sunglasses This will move the server called sunglasses into the development group, keeping its name as sunglasses, bytemark update server --new-name rennes.bretagne.france charata.chaco.argentina This will move the server called charata in the chaco group in the argentina account, placing it in the bretagne group in the france account and rename it to rennes.`, Flags: append(app.OutputFlags("server", "object"), cutil.ForceFlag, cli.GenericFlag{ Name: "memory", Value: new(util.SizeSpecFlag), Usage: "How much memory the server will have available, specified in GiB or with GiB/MiB units.", }, cli.StringFlag{ Name: "hw-profile", Usage: "The hardware profile to use. See `bytemark profiles` for a list of hardware profiles available.", }, cli.BoolFlag{ Name: "hw-profile-lock", Usage: "Locks the hardware profile (prevents it from being automatically upgraded when we release a newer version)", }, cli.GenericFlag{ Name: "new-name", Usage: "A new name for the server", Value: new(app.VirtualMachineNameFlag), }, cli.IntFlag{ Name: "cores", Usage: "the number of cores that should be available to the VM", }, cli.StringFlag{ Name: "cd-url", Usage: "An HTTP(S) URL for an ISO image file to attach as a cdrom.", }, cli.BoolFlag{ Name: "remove-cd", Usage: "Removes any current cdrom, as if the cd were ejected.", }, cli.GenericFlag{ Name: "server", Usage: "The server to update", Value: new(app.VirtualMachineNameFlag), }, ), Action: app.Action(args.Optional("new-name", "hwprofile", "memory"), with.RequiredFlags("server"), with.VirtualMachine("server"), with.Auth, updateServer), }) } func updateMemory(c *app.Context) error { vmName := c.VirtualMachineName("server") memory := c.Size("memory") if memory == 0 { return nil } if c.VirtualMachine.Memory < memory { if !c.Bool("force") && !util.PromptYesNo(c.Prompter(), fmt.Sprintf("You're increasing the memory by %dGiB - this may cost more, are you sure?", (memory-c.VirtualMachine.Memory)/1024)) { return util.UserRequestedExit{} } } return c.Client().SetVirtualMachineMemory(vmName, memory) } func updateHwProfile(c *app.Context) error { vmName := c.VirtualMachineName("server") hwProfile := c.String("hw-profile") hwProfileLock := c.Bool("hw-profile-lock") if hwProfile == "" { if hwProfileLock { return c.Help("Must specify a hardware profile to lock") } return nil } return c.Client().SetVirtualMachineHardwareProfile(vmName, hwProfile, hwProfileLock) } func updateCores(c *app.Context) error { vmName := c.VirtualMachineName("server") cores := c.Int("cores") if cores == 0 { return nil } if c.VirtualMachine.Cores < cores { if !c.Bool("force") && !util.PromptYesNo(c.Prompter(), fmt.Sprintf("You are increasing the number of cores from %d to %d. This may cause your VM to cost more, are you sure?", c.VirtualMachine.Cores, cores)) { return util.UserRequestedExit{} } } return c.Client().SetVirtualMachineCores(vmName, cores) } func updateName(c *app.Context) error { vmName := c.VirtualMachineName("server") newName := c.VirtualMachineName("new-name") if newName.VirtualMachine == "" { return nil } return c.Client().MoveVirtualMachine(vmName, newName) } func updateCdrom(c *app.Context) error { vmName := c.VirtualMachineName("server") cdURL := c.String("cd-url") removeCD := c.Bool("remove-cd") if cdURL == "" && !removeCD { return nil } err := c.Client().SetVirtualMachineCDROM(vmName, cdURL) if _, ok := err.(lib.InternalServerError); ok { return c.Help("Couldn't set the server's cdrom - check that you have provided a valid public HTTP url") } return err } func updateServer(c *app.Context) error { for _, err := range []error{ updateMemory(c), updateHwProfile(c), updateCores(c), updateCdrom(c), updateName(c), // needs to be last } { if err != nil { return err } } return nil } update server stops processing after encountering the first error package update import ( "fmt" "github.com/BytemarkHosting/bytemark-client/cmd/bytemark/app" "github.com/BytemarkHosting/bytemark-client/cmd/bytemark/app/args" "github.com/BytemarkHosting/bytemark-client/cmd/bytemark/app/with" cutil "github.com/BytemarkHosting/bytemark-client/cmd/bytemark/commands/util" "github.com/BytemarkHosting/bytemark-client/cmd/bytemark/util" "github.com/BytemarkHosting/bytemark-client/lib" "github.com/urfave/cli" ) func init() { Commands = append(Commands, cli.Command{ Name: "server", Usage: "update a server's configuration", UsageText: "update server [flags] <server>", Description: `Updates the configuration of an existing Cloud Server. Note that for changes to cores, memory or hardware profile to take effect you will need to restart the server. --hw-profile the hardware profile used. Hardware profiles can be simply thought of as what virtual motherboard you're using - generally you want a pretty recent one for maximum speed, but if you're running a very old or experimental OS (e.g. DOS or OS/2 or something) you may require the compatibility one. See "bytemark hwprofiles" for which ones are currently available. Memory is specified in GiB by default, but can be suffixed with an M to indicate that it is provided in MiB. Updating a server's name also allows it to be moved between groups and accounts you administer. EXAMPLES bytemark update server --new-name boron oxygen This will rename the server called oxygen in your default group to boron, still in your default group. bytemark update server --new-name sunglasses.development sunglasses This will move the server called sunglasses into the development group, keeping its name as sunglasses, bytemark update server --new-name rennes.bretagne.france charata.chaco.argentina This will move the server called charata in the chaco group in the argentina account, placing it in the bretagne group in the france account and rename it to rennes.`, Flags: append(app.OutputFlags("server", "object"), cutil.ForceFlag, cli.GenericFlag{ Name: "memory", Value: new(util.SizeSpecFlag), Usage: "How much memory the server will have available, specified in GiB or with GiB/MiB units.", }, cli.StringFlag{ Name: "hw-profile", Usage: "The hardware profile to use. See `bytemark profiles` for a list of hardware profiles available.", }, cli.BoolFlag{ Name: "hw-profile-lock", Usage: "Locks the hardware profile (prevents it from being automatically upgraded when we release a newer version)", }, cli.GenericFlag{ Name: "new-name", Usage: "A new name for the server", Value: new(app.VirtualMachineNameFlag), }, cli.IntFlag{ Name: "cores", Usage: "the number of cores that should be available to the VM", }, cli.StringFlag{ Name: "cd-url", Usage: "An HTTP(S) URL for an ISO image file to attach as a cdrom.", }, cli.BoolFlag{ Name: "remove-cd", Usage: "Removes any current cdrom, as if the cd were ejected.", }, cli.GenericFlag{ Name: "server", Usage: "The server to update", Value: new(app.VirtualMachineNameFlag), }, ), Action: app.Action(args.Optional("new-name", "hwprofile", "memory"), with.RequiredFlags("server"), with.VirtualMachine("server"), with.Auth, updateServer), }) } func updateMemory(c *app.Context) error { vmName := c.VirtualMachineName("server") memory := c.Size("memory") if memory == 0 { return nil } if c.VirtualMachine.Memory < memory { if !c.Bool("force") && !util.PromptYesNo(c.Prompter(), fmt.Sprintf("You're increasing the memory by %dGiB - this may cost more, are you sure?", (memory-c.VirtualMachine.Memory)/1024)) { return util.UserRequestedExit{} } } return c.Client().SetVirtualMachineMemory(vmName, memory) } func updateHwProfile(c *app.Context) error { vmName := c.VirtualMachineName("server") hwProfile := c.String("hw-profile") hwProfileLock := c.Bool("hw-profile-lock") if hwProfile == "" { if hwProfileLock { return c.Help("Must specify a hardware profile to lock") } return nil } return c.Client().SetVirtualMachineHardwareProfile(vmName, hwProfile, hwProfileLock) } func updateCores(c *app.Context) error { vmName := c.VirtualMachineName("server") cores := c.Int("cores") if cores == 0 { return nil } if c.VirtualMachine.Cores < cores { if !c.Bool("force") && !util.PromptYesNo(c.Prompter(), fmt.Sprintf("You are increasing the number of cores from %d to %d. This may cause your VM to cost more, are you sure?", c.VirtualMachine.Cores, cores)) { return util.UserRequestedExit{} } } return c.Client().SetVirtualMachineCores(vmName, cores) } func updateName(c *app.Context) error { vmName := c.VirtualMachineName("server") newName := c.VirtualMachineName("new-name") if newName.VirtualMachine == "" { return nil } return c.Client().MoveVirtualMachine(vmName, newName) } func updateCdrom(c *app.Context) error { vmName := c.VirtualMachineName("server") cdURL := c.String("cd-url") removeCD := c.Bool("remove-cd") if cdURL == "" && !removeCD { return nil } err := c.Client().SetVirtualMachineCDROM(vmName, cdURL) if _, ok := err.(lib.InternalServerError); ok { return c.Help("Couldn't set the server's cdrom - check that you have provided a valid public HTTP url") } return err } func updateServer(c *app.Context) error { for _, f := range [](func(*app.Context) error){ updateMemory, updateHwProfile, updateCores, updateCdrom, updateName, // needs to be last } { err := f(c) if err != nil { return err } } return nil }
package collectors import ( "encoding/csv" "fmt" "reflect" "strconv" "strings" "time" "github.com/StackExchange/scollector/metadata" "github.com/StackExchange/scollector/opentsdb" "github.com/StackExchange/scollector/util" ) func init() { collectors = append(collectors, &IntervalCollector{F: c_netbackup_jobs}) collectors = append(collectors, &IntervalCollector{F: c_netbackup_frequency}) } //jobtype // 0=backup, 1=archive, 2=restore, 3=verify, 4=duplicate, 5=import, 6=catalog backup, 7=vault, 8=label // 9=erase, 10=tape request, 11=tape clean, 12=format tape, 13=physical inventory, 14=qualification // 15=database recovery, 16=media contents, 17=image delete, 18=LiveUpdate //state // 0=queued, 1=active, 2=wait for retry, 3=done, 4=suspended, 5=incomplete //NOTE!! //This depends on the retention of the job log being greater than the jobs, else things are going to go unkonwn //See http://www.symantec.com/connect/forums/netbackup-75-activity-monitor-job-logs //In my case I created the two registry entries mentioned in that link (KEEP_JOB_HOURS) and (KEEP_JOBS_SUCCESSFUL_HOURS) //I also changed the rentention under "Clean-up" under the master server properties via the Java Admin Console. One of those //seems to have worked. This *is* netbackup, so I wish you the best of luck ;-) type nbJob struct { Jobid string Jobtype int State int Status string Class string Schedule string Client string Server string Started time.Time Elapsed string Ended time.Time Stunit string Try string Operation string Kbytes int Files int Pathlastwritten string Percent string Jobpid string Owner string Subtype string Classtype string Schedule_Type string Priority string Group string Masterserver string Retentionunits string Retentionperiod string Compression string Kbyteslastwritten string Fileslastwritten string } var timeType = reflect.TypeOf(time.Time{}) func nbUnmarhsall(reader *csv.Reader, v interface{}) error { record, err := reader.Read() if err != nil { return err } if len(record) < 32 { return fmt.Errorf("record too short, expected at least 32 fields, got %v", len(record)) } s := reflect.ValueOf(v).Elem() for i := 0; i < s.NumField(); i++ { f := s.Field(i) switch f.Kind() { case reflect.String: f.SetString(record[i]) case reflect.Int: var ival int64 if record[i] == "" { continue } ival, err = strconv.ParseInt(record[i], 10, 64) if err != nil { return err } f.SetInt(ival) case reflect.Struct: switch f.Type() { case timeType: ival, err := strconv.ParseInt(record[i], 10, 64) if err != nil { return err } t := time.Unix(ival, 0) f.Set(reflect.ValueOf(t)) default: return fmt.Errorf("unsupported type: %s", f.Type()) } default: return fmt.Errorf("unsupported type: %s", f.Type()) } } return nil } func c_netbackup_jobs() (opentsdb.MultiDataPoint, error) { var md opentsdb.MultiDataPoint latest := make(map[string]nbJob) if err := util.ReadCommand(func(line string) error { if len(line) < 32 { return nil } var r nbJob reader := csv.NewReader(strings.NewReader(line)) if err := nbUnmarhsall(reader, &r); err != nil { return err } if r.Jobtype != 0 { return nil } if r.State != 3 && r.State != 5 { return nil } key := r.Class + r.Schedule + r.Client if existing, ok := latest[key]; !ok { latest[key] = r } else if r.Started.After(existing.Started) { latest[key] = r } return nil }, "bpdbjobs", "-report", "-all_columns"); err != nil { return nil, err } now := time.Now() for _, r := range latest { tags := opentsdb.TagSet{"class": r.Class, "client": r.Client, "schedule": r.Schedule} Add(&md, "netbackup.backup.status", r.Status, tags, metadata.Gauge, metadata.StatusCode, "") Add(&md, "netbackup.backup.duration", r.Elapsed, tags, metadata.Gauge, metadata.Second, "") Add(&md, "netbackup.backup.attempt_age", now.Sub(r.Ended).Seconds(), tags, metadata.Gauge, metadata.Second, "") Add(&md, "netbackup.backup.duration", r.Elapsed, tags, metadata.Gauge, metadata.Second, "") Add(&md, "netbackup.backup.no_files", r.Files, tags, metadata.Gauge, metadata.Count, "") Add(&md, "netbackup.backup.kbytes", r.Kbytes, tags, metadata.Gauge, metadata.KBytes, "") } return md, nil } func c_netbackup_frequency() (opentsdb.MultiDataPoint, error) { var md opentsdb.MultiDataPoint var class, schedule string var clients []string if err := util.ReadCommand(func(line string) error { if strings.HasPrefix(line, "Policy Name:") { clients = nil f := strings.Fields(line) if len(f) == 3 { class = f[2] return nil } return fmt.Errorf("error parsing policy: %v", line) } if strings.HasPrefix(line, "Client/HW/OS/Pri/DMI/CIT:") { f := strings.Fields(line) if len(f) == 9 { clients = append(clients, f[1]) return nil } return fmt.Errorf("error parsing client") } if strings.HasPrefix(line, "Schedule:") { f := strings.Fields(line) if len(f) > 1 { schedule = f[1] return nil } return fmt.Errorf("error parsing client: %v", line) } if strings.HasPrefix(strings.TrimSpace(line), "Frequency:") { f := strings.Fields(line) if len(f) == 5 { freq := strings.TrimLeft(f[3], "(") for _, client := range clients { tags := opentsdb.TagSet{"class": class, "client": client, "schedule": schedule} Add(&md, "netbackup.backup.frequency", freq, tags, metadata.Gauge, metadata.Second, "") } return nil } return fmt.Errorf("error parsing frequency: %v", line) } return nil }, "bppllist", "-L", "-allpolicies"); err != nil { return nil, err } return md, nil } cmd/scollector: Format comment package collectors import ( "encoding/csv" "fmt" "reflect" "strconv" "strings" "time" "github.com/StackExchange/scollector/metadata" "github.com/StackExchange/scollector/opentsdb" "github.com/StackExchange/scollector/util" ) func init() { collectors = append(collectors, &IntervalCollector{F: c_netbackup_jobs}) collectors = append(collectors, &IntervalCollector{F: c_netbackup_frequency}) } //jobtype // 0=backup, 1=archive, 2=restore, 3=verify, 4=duplicate, 5=import, 6=catalog backup, 7=vault, 8=label // 9=erase, 10=tape request, 11=tape clean, 12=format tape, 13=physical inventory, 14=qualification // 15=database recovery, 16=media contents, 17=image delete, 18=LiveUpdate //state // 0=queued, 1=active, 2=wait for retry, 3=done, 4=suspended, 5=incomplete //NOTE!! // This depends on the retention of the job log being greater than the jobs, // else things are going to go unknown. See // http://www.symantec.com/connect/forums/netbackup-75-activity-monitor-job-logs // In my case I created the two registry entries mentioned in that link // (KEEP_JOB_HOURS) and (KEEP_JOBS_SUCCESSFUL_HOURS). I also changed the // rentention under "Clean-up" under the master server properties via the Java // Admin Console. One of those seems to have worked. This *is* netbackup, so I // wish you the best of luck ;-). type nbJob struct { Jobid string Jobtype int State int Status string Class string Schedule string Client string Server string Started time.Time Elapsed string Ended time.Time Stunit string Try string Operation string Kbytes int Files int Pathlastwritten string Percent string Jobpid string Owner string Subtype string Classtype string Schedule_Type string Priority string Group string Masterserver string Retentionunits string Retentionperiod string Compression string Kbyteslastwritten string Fileslastwritten string } var timeType = reflect.TypeOf(time.Time{}) func nbUnmarhsall(reader *csv.Reader, v interface{}) error { record, err := reader.Read() if err != nil { return err } if len(record) < 32 { return fmt.Errorf("record too short, expected at least 32 fields, got %v", len(record)) } s := reflect.ValueOf(v).Elem() for i := 0; i < s.NumField(); i++ { f := s.Field(i) switch f.Kind() { case reflect.String: f.SetString(record[i]) case reflect.Int: var ival int64 if record[i] == "" { continue } ival, err = strconv.ParseInt(record[i], 10, 64) if err != nil { return err } f.SetInt(ival) case reflect.Struct: switch f.Type() { case timeType: ival, err := strconv.ParseInt(record[i], 10, 64) if err != nil { return err } t := time.Unix(ival, 0) f.Set(reflect.ValueOf(t)) default: return fmt.Errorf("unsupported type: %s", f.Type()) } default: return fmt.Errorf("unsupported type: %s", f.Type()) } } return nil } func c_netbackup_jobs() (opentsdb.MultiDataPoint, error) { var md opentsdb.MultiDataPoint latest := make(map[string]nbJob) if err := util.ReadCommand(func(line string) error { if len(line) < 32 { return nil } var r nbJob reader := csv.NewReader(strings.NewReader(line)) if err := nbUnmarhsall(reader, &r); err != nil { return err } if r.Jobtype != 0 { return nil } if r.State != 3 && r.State != 5 { return nil } key := r.Class + r.Schedule + r.Client if existing, ok := latest[key]; !ok { latest[key] = r } else if r.Started.After(existing.Started) { latest[key] = r } return nil }, "bpdbjobs", "-report", "-all_columns"); err != nil { return nil, err } now := time.Now() for _, r := range latest { tags := opentsdb.TagSet{"class": r.Class, "client": r.Client, "schedule": r.Schedule} Add(&md, "netbackup.backup.status", r.Status, tags, metadata.Gauge, metadata.StatusCode, "") Add(&md, "netbackup.backup.duration", r.Elapsed, tags, metadata.Gauge, metadata.Second, "") Add(&md, "netbackup.backup.attempt_age", now.Sub(r.Ended).Seconds(), tags, metadata.Gauge, metadata.Second, "") Add(&md, "netbackup.backup.duration", r.Elapsed, tags, metadata.Gauge, metadata.Second, "") Add(&md, "netbackup.backup.no_files", r.Files, tags, metadata.Gauge, metadata.Count, "") Add(&md, "netbackup.backup.kbytes", r.Kbytes, tags, metadata.Gauge, metadata.KBytes, "") } return md, nil } func c_netbackup_frequency() (opentsdb.MultiDataPoint, error) { var md opentsdb.MultiDataPoint var class, schedule string var clients []string if err := util.ReadCommand(func(line string) error { if strings.HasPrefix(line, "Policy Name:") { clients = nil f := strings.Fields(line) if len(f) == 3 { class = f[2] return nil } return fmt.Errorf("error parsing policy: %v", line) } if strings.HasPrefix(line, "Client/HW/OS/Pri/DMI/CIT:") { f := strings.Fields(line) if len(f) == 9 { clients = append(clients, f[1]) return nil } return fmt.Errorf("error parsing client") } if strings.HasPrefix(line, "Schedule:") { f := strings.Fields(line) if len(f) > 1 { schedule = f[1] return nil } return fmt.Errorf("error parsing client: %v", line) } if strings.HasPrefix(strings.TrimSpace(line), "Frequency:") { f := strings.Fields(line) if len(f) == 5 { freq := strings.TrimLeft(f[3], "(") for _, client := range clients { tags := opentsdb.TagSet{"class": class, "client": client, "schedule": schedule} Add(&md, "netbackup.backup.frequency", freq, tags, metadata.Gauge, metadata.Second, "") } return nil } return fmt.Errorf("error parsing frequency: %v", line) } return nil }, "bppllist", "-L", "-allpolicies"); err != nil { return nil, err } return md, nil }
// Copyright 2015 go-swagger maintainers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package generate import ( "errors" "log" ) // Model the generate model file command type Model struct { shared Name []string `long:"name" short:"n" description:"the model to generate"` NoStruct bool `long:"skip-struct" description:"when present will not generate the model struct"` DumpData bool `long:"dump-data" description:"when present dumps the json for the template generator instead of generating files"` SkipValidation bool `long:"skip-validation" description:"skips validation of spec prior to generation"` } // Execute generates a model file func (m *Model) Execute(args []string) error { if m.DumpData && len(m.Name) > 1 { return errors.New("only 1 model at a time is supported for dumping data") } if m.ExistingModels != "" { log.Println("Warning: Ignoring existing-models flag when generating models.") } s := &Server{ shared: m.shared, Models: m.Name, DumpData: m.DumpData, ExcludeMain: true, ExcludeSpec: true, SkipSupport: true, SkipOperations: true, SkipModels: m.NoStruct, SkipValidation: m.SkipValidation, } return s.Execute(args) } support --skip-flatten for model generation // Copyright 2015 go-swagger maintainers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package generate import ( "errors" "log" ) // Model the generate model file command type Model struct { shared Name []string `long:"name" short:"n" description:"the model to generate"` NoStruct bool `long:"skip-struct" description:"when present will not generate the model struct"` DumpData bool `long:"dump-data" description:"when present dumps the json for the template generator instead of generating files"` SkipFlattening bool `long:"skip-flatten" description:"skips flattening of spec prior to generation"` SkipValidation bool `long:"skip-validation" description:"skips validation of spec prior to generation"` } // Execute generates a model file func (m *Model) Execute(args []string) error { if m.DumpData && len(m.Name) > 1 { return errors.New("only 1 model at a time is supported for dumping data") } if m.ExistingModels != "" { log.Println("Warning: Ignoring existing-models flag when generating models.") } s := &Server{ shared: m.shared, Models: m.Name, DumpData: m.DumpData, ExcludeMain: true, ExcludeSpec: true, SkipSupport: true, SkipOperations: true, SkipModels: m.NoStruct, SkipFlattening: m.SkipFlattening, SkipValidation: m.SkipValidation, } return s.Execute(args) }
// Copyright 2017 The Wuffs Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cgen import ( "fmt" "strings" a "github.com/google/wuffs/lang/ast" t "github.com/google/wuffs/lang/token" ) // genFilenameLineComments is whether to print "// foo.wuffs:123\n" comments in // the generated code. This can be useful for debugging, although it is not // enabled by default as it can lead to many spurious changes in the generated // C code (due to line numbers changing) when editing Wuffs code. const genFilenameLineComments = false func (g *gen) writeStatement(b *buffer, n *a.Node, depth uint32) error { if depth > a.MaxBodyDepth { return fmt.Errorf("body recursion depth too large") } depth++ if n.Kind() == a.KAssert { // Assertions only apply at compile-time. return nil } mightIntroduceTemporaries := false switch n.Kind() { case a.KAssign: n := n.AsAssign() mightIntroduceTemporaries = n.RHS().Effect().Coroutine() case a.KExpr: // TODO. } if mightIntroduceTemporaries { // Put n's code into its own block, to restrict the scope of the // tPrefix temporary variables. This helps avoid "jump bypasses // variable initialization" compiler warnings with the coroutine // suspension points. b.writes("{\n") defer b.writes("}\n") } if genFilenameLineComments { filename, line := n.AsRaw().FilenameLine() if i := strings.LastIndexByte(filename, '/'); i >= 0 { filename = filename[i+1:] } if i := strings.LastIndexByte(filename, '\\'); i >= 0 { filename = filename[i+1:] } b.printf("// %s:%d\n", filename, line) } switch n.Kind() { case a.KAssign: n := n.AsAssign() return g.writeStatementAssign(b, n.LHS(), n.LHS().MType(), n.Operator(), n.RHS(), depth) case a.KExpr: return g.writeStatementAssign(b, nil, nil, 0, n.AsExpr(), depth) case a.KIOBind: return g.writeStatementIOBind(b, n.AsIOBind(), depth) case a.KIf: return g.writeStatementIf(b, n.AsIf(), depth) case a.KIterate: return g.writeStatementIterate(b, n.AsIterate(), depth) case a.KJump: return g.writeStatementJump(b, n.AsJump(), depth) case a.KRet: return g.writeStatementRet(b, n.AsRet(), depth) case a.KVar: return nil case a.KWhile: return g.writeStatementWhile(b, n.AsWhile(), depth) } return fmt.Errorf("unrecognized ast.Kind (%s) for writeStatement", n.Kind()) } func (g *gen) writeStatementAssign(b *buffer, lhsExpr *a.Expr, lTyp *a.TypeExpr, op t.ID, rhsExpr *a.Expr, depth uint32) error { // TODO: clean this method body up. if depth > a.MaxExprDepth { return fmt.Errorf("expression recursion depth too large") } depth++ if op != 0 && rhsExpr.Effect().Coroutine() { if err := g.writeQuestionCall(b, rhsExpr, depth, op == t.IDEqQuestion); err != nil { return err } } closer := "" if lTyp == nil { // No-op. } else { lhs := buffer(nil) if err := g.writeExpr(&lhs, lhsExpr, depth); err != nil { return err } opName := "" if lTyp.IsArrayType() { b.writes("memcpy(") opName, closer = ",", fmt.Sprintf(", sizeof(%s))", lhs) } else { switch op { case t.IDTildeSatPlusEq, t.IDTildeSatMinusEq: uBits := uintBits(lTyp.QID()) if uBits == 0 { return fmt.Errorf("unsupported tilde-operator type %q", lTyp.Str(g.tm)) } uOp := "add" if op != t.IDTildeSatPlusEq { uOp = "sub" } b.printf("wuffs_base__u%d__sat_%s_indirect(&", uBits, uOp) opName, closer = ",", ")" default: opName = cOpName(op) if opName == "" { return fmt.Errorf("unrecognized operator %q", op.AmbiguousForm().Str(g.tm)) } } } b.writex(lhs) b.writes(opName) } if op == 0 { if rhsExpr.Effect().Coroutine() { if err := g.writeCoroSuspPoint(b, false); err != nil { return err } } if err := g.writeBuiltinQuestionCall(b, rhsExpr, depth); err != errNoSuchBuiltin { return err } if err := g.writeSaveExprDerivedVars(b, rhsExpr); err != nil { return err } if rhsExpr.Effect().Optional() { b.writes("status = ") } // TODO: drop the "Other" in writeExprOther. if err := g.writeExprOther(b, rhsExpr, depth); err != nil { return err } } else if err := g.writeExpr(b, rhsExpr, depth); err != nil { return err } b.writes(closer) b.writes(";\n") if op == 0 { if err := g.writeLoadExprDerivedVars(b, rhsExpr); err != nil { return err } if rhsExpr.Effect().Optional() { target := "exit" if rhsExpr.Effect().Coroutine() { target = "suspend" } b.printf("if (status) { goto %s; }\n", target) } } return nil } func (g *gen) writeStatementIOBind(b *buffer, n *a.IOBind, depth uint32) error { if g.currFunk.ioBinds > maxIOBinds { return fmt.Errorf("too many temporary variables required") } ioBindNum := g.currFunk.ioBinds g.currFunk.ioBinds++ // TODO: do these variables need to be func-scoped (bigger scope) // instead of block-scoped (smaller scope) if the coro_susp_point // switch can jump past this initialization?? b.writes("{\n") { e := n.IO() // TODO: restrict (in the type checker or parser) that e is either a // local variable or args.foo? prefix := vPrefix if e.Operator() != 0 { prefix = aPrefix } cTyp := "reader" if e.MType().QID()[1] == t.IDIOWriter { cTyp = "writer" } name := e.Ident().Str(g.tm) b.printf("wuffs_base__io_%s %s%d_%s%s = %s%s;\n", cTyp, oPrefix, ioBindNum, prefix, name, prefix, name) // TODO: save / restore all iop vars, not just for local IO vars? How // does this work if the io_bind body advances these pointers, either // directly or by calling other funcs? if e.Operator() == 0 { b.printf("uint8_t *%s%d_%s%s%s = %s%s%s;\n", oPrefix, ioBindNum, iopPrefix, prefix, name, iopPrefix, prefix, name) b.printf("uint8_t *%s%d_%s%s%s = %s%s%s;\n", oPrefix, ioBindNum, io1Prefix, prefix, name, io1Prefix, prefix, name) } if n.Keyword() == t.IDIOBind { b.printf("wuffs_base__io_%s__set(&%s%s, &u_%s, &iop_%s%s, &io1_%s%s,", cTyp, prefix, name, name, prefix, name, prefix, name) if err := g.writeExpr(b, n.Arg1(), 0); err != nil { return err } b.writes(");\n") } else { // TODO: restrict (in the type checker or parser) that e is // args.foo? b.printf("wuffs_base__io_%s__set_limit(&%s%s, iop_%s%s,\n", cTyp, prefix, name, prefix, name) if err := g.writeExpr(b, n.Arg1(), 0); err != nil { return err } b.writes(");\n") } } for _, o := range n.Body() { if err := g.writeStatement(b, o, depth); err != nil { return err } } { e := n.IO() prefix := vPrefix if e.Operator() != 0 { prefix = aPrefix } name := e.Ident().Str(g.tm) b.printf("%s%s = %s%d_%s%s;\n", prefix, name, oPrefix, ioBindNum, prefix, name) if e.Operator() == 0 { b.printf("%s%s%s = %s%d_%s%s%s;\n", iopPrefix, prefix, name, oPrefix, ioBindNum, iopPrefix, prefix, name) b.printf("%s%s%s = %s%d_%s%s%s;\n", io1Prefix, prefix, name, oPrefix, ioBindNum, io1Prefix, prefix, name) } } b.writes("}\n") return nil } func (g *gen) writeStatementIf(b *buffer, n *a.If, depth uint32) error { for { condition := buffer(nil) if err := g.writeExpr(&condition, n.Condition(), 0); err != nil { return err } // Calling trimParens avoids clang's -Wparentheses-equality warning. b.printf("if (%s) {\n", trimParens(condition)) for _, o := range n.BodyIfTrue() { if err := g.writeStatement(b, o, depth); err != nil { return err } } if bif := n.BodyIfFalse(); len(bif) > 0 { b.writes("} else {\n") for _, o := range bif { if err := g.writeStatement(b, o, depth); err != nil { return err } } break } n = n.ElseIf() if n == nil { break } b.writes("} else ") } b.writes("}\n") return nil } func (g *gen) writeStatementIterate(b *buffer, n *a.Iterate, depth uint32) error { assigns := n.Assigns() if len(assigns) == 0 { return nil } if len(assigns) != 1 { return fmt.Errorf("TODO: iterate over more than one assign") } o := assigns[0].AsAssign() name := o.LHS().Ident().Str(g.tm) b.writes("{\n") // TODO: don't assume that the slice is a slice of base.u8. In // particular, the code gen can be subtle if the slice element type has // zero size, such as the empty struct. b.printf("wuffs_base__slice_u8 %sslice_%s =", iPrefix, name) if err := g.writeExpr(b, o.RHS(), 0); err != nil { return err } b.writes(";\n") b.printf("%s%s = %sslice_%s;\n", vPrefix, name, iPrefix, name) // TODO: look at n.HasContinue() and n.HasBreak(). round := uint32(0) for ; n != nil; n = n.ElseIterate() { length := n.Length().SmallPowerOf2Value() unroll := n.Unroll().SmallPowerOf2Value() for { if err := g.writeIterateRound(b, name, n.Body(), round, depth, length, unroll); err != nil { return err } round++ if unroll == 1 { break } unroll = 1 } } b.writes("}\n") return nil } func (g *gen) writeStatementJump(b *buffer, n *a.Jump, depth uint32) error { jt, err := g.currFunk.jumpTarget(n.JumpTarget()) if err != nil { return err } keyword := "continue" if n.Keyword() == t.IDBreak { keyword = "break" } b.printf("goto label_%d_%s;\n", jt, keyword) return nil } func (g *gen) writeStatementRet(b *buffer, n *a.Ret, depth uint32) error { retExpr := n.Value() if g.currFunk.astFunc.Effect().Optional() { isError, isOK := false, false b.writes("status = ") if retExpr.Operator() == 0 && retExpr.Ident() == t.IDOk { b.writes("NULL") isOK = true } else { if retExpr.Ident().IsStrLiteral(g.tm) { msg, _ := t.Unescape(retExpr.Ident().Str(g.tm)) isError = statusMsgIsError(msg) isOK = statusMsgIsWarning(msg) } // TODO: check that retExpr has no call-suspendibles. if err := g.writeExpr( b, retExpr, depth); err != nil { return err } } b.writes(";") if n.Keyword() == t.IDYield { return g.writeCoroSuspPoint(b, true) } if isError { b.writes("goto exit;") } else if isOK { g.currFunk.hasGotoOK = true b.writes("goto ok;") } else { g.currFunk.hasGotoOK = true // TODO: the "goto exit"s can be "goto ok". b.writes("if (wuffs_base__status__is_error(status)) { goto exit; }" + "else if (wuffs_base__status__is_suspension(status)) { " + "status = wuffs_base__error__cannot_return_a_suspension; goto exit; } goto ok;") } return nil } b.writes("return ") if g.currFunk.astFunc.Out() == nil { return fmt.Errorf("TODO: allow empty return type (when not suspendible)") } else if err := g.writeExpr(b, retExpr, depth); err != nil { return err } b.writeb(';') return nil } func (g *gen) writeStatementWhile(b *buffer, n *a.While, depth uint32) error { if n.HasContinue() { jt, err := g.currFunk.jumpTarget(n) if err != nil { return err } b.printf("label_%d_continue:;\n", jt) } condition := buffer(nil) if err := g.writeExpr(&condition, n.Condition(), 0); err != nil { return err } // Calling trimParens avoids clang's -Wparentheses-equality warning. b.printf("while (%s) {\n", trimParens(condition)) for _, o := range n.Body() { if err := g.writeStatement(b, o, depth); err != nil { return err } } b.writes("}\n") if n.HasBreak() { jt, err := g.currFunk.jumpTarget(n) if err != nil { return err } b.printf("label_%d_break:;\n", jt) } return nil } func (g *gen) writeIterateRound(b *buffer, name string, body []*a.Node, round uint32, depth uint32, length int, unroll int) error { b.printf("%s%s.len = %d;\n", vPrefix, name, length) b.printf("uint8_t* %send%d_%s = %sslice_%s.ptr + (%sslice_%s.len / %d) * %d;\n", iPrefix, round, name, iPrefix, name, iPrefix, name, length*unroll, length*unroll) b.printf("while (%s%s.ptr < %send%d_%s) {\n", vPrefix, name, iPrefix, round, name) for i := 0; i < unroll; i++ { for _, o := range body { if err := g.writeStatement(b, o, depth); err != nil { return err } } b.printf("%s%s.ptr += %d;\n", vPrefix, name, length) } b.writes("}\n") return nil } func (g *gen) writeCoroSuspPoint(b *buffer, maybeSuspend bool) error { const maxCoroSuspPoint = 0xFFFFFFFF g.currFunk.coroSuspPoint++ if g.currFunk.coroSuspPoint == maxCoroSuspPoint { return fmt.Errorf("too many coroutine suspension points required") } macro := "" if maybeSuspend { macro = "_MAYBE_SUSPEND" } b.printf("WUFFS_BASE__COROUTINE_SUSPENSION_POINT%s(%d);\n", macro, g.currFunk.coroSuspPoint) return nil } func (g *gen) writeQuestionCall(b *buffer, n *a.Expr, depth uint32, eqQuestion bool) error { if depth > a.MaxExprDepth { return fmt.Errorf("expression recursion depth too large") } depth++ if !eqQuestion && n.Effect().Coroutine() { if err := g.writeCoroSuspPoint(b, false); err != nil { return err } } if err := g.writeBuiltinQuestionCall(b, n, depth); err != errNoSuchBuiltin { return err } if err := g.writeSaveExprDerivedVars(b, n); err != nil { return err } if eqQuestion { if g.currFunk.tempW > maxTemp { return fmt.Errorf("too many temporary variables required") } temp := g.currFunk.tempW g.currFunk.tempW++ b.printf("wuffs_base__status %s%d = ", tPrefix, temp) } else { b.writes("status = ") } if err := g.writeExprUserDefinedCall(b, n, depth); err != nil { return err } b.writes(";\n") if err := g.writeLoadExprDerivedVars(b, n); err != nil { return err } if !eqQuestion { b.writes("if (status) { goto suspend; }\n") } return nil } func trimParens(b []byte) []byte { if len(b) > 1 && b[0] == '(' && b[len(b)-1] == ')' { return b[1 : len(b)-1] } return b } Clean up some writeStatementAssign args // Copyright 2017 The Wuffs Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cgen import ( "fmt" "strings" a "github.com/google/wuffs/lang/ast" t "github.com/google/wuffs/lang/token" ) // genFilenameLineComments is whether to print "// foo.wuffs:123\n" comments in // the generated code. This can be useful for debugging, although it is not // enabled by default as it can lead to many spurious changes in the generated // C code (due to line numbers changing) when editing Wuffs code. const genFilenameLineComments = false func (g *gen) writeStatement(b *buffer, n *a.Node, depth uint32) error { if depth > a.MaxBodyDepth { return fmt.Errorf("body recursion depth too large") } depth++ if n.Kind() == a.KAssert { // Assertions only apply at compile-time. return nil } mightIntroduceTemporaries := false switch n.Kind() { case a.KAssign: n := n.AsAssign() mightIntroduceTemporaries = n.RHS().Effect().Coroutine() case a.KExpr: // TODO. } if mightIntroduceTemporaries { // Put n's code into its own block, to restrict the scope of the // tPrefix temporary variables. This helps avoid "jump bypasses // variable initialization" compiler warnings with the coroutine // suspension points. b.writes("{\n") defer b.writes("}\n") } if genFilenameLineComments { filename, line := n.AsRaw().FilenameLine() if i := strings.LastIndexByte(filename, '/'); i >= 0 { filename = filename[i+1:] } if i := strings.LastIndexByte(filename, '\\'); i >= 0 { filename = filename[i+1:] } b.printf("// %s:%d\n", filename, line) } switch n.Kind() { case a.KAssign: n := n.AsAssign() return g.writeStatementAssign(b, n.Operator(), n.LHS(), n.RHS(), depth) case a.KExpr: return g.writeStatementAssign(b, 0, nil, n.AsExpr(), depth) case a.KIOBind: return g.writeStatementIOBind(b, n.AsIOBind(), depth) case a.KIf: return g.writeStatementIf(b, n.AsIf(), depth) case a.KIterate: return g.writeStatementIterate(b, n.AsIterate(), depth) case a.KJump: return g.writeStatementJump(b, n.AsJump(), depth) case a.KRet: return g.writeStatementRet(b, n.AsRet(), depth) case a.KVar: return nil case a.KWhile: return g.writeStatementWhile(b, n.AsWhile(), depth) } return fmt.Errorf("unrecognized ast.Kind (%s) for writeStatement", n.Kind()) } func (g *gen) writeStatementAssign(b *buffer, op t.ID, lhs *a.Expr, rhs *a.Expr, depth uint32) error { // TODO: clean this method body up. if depth > a.MaxExprDepth { return fmt.Errorf("expression recursion depth too large") } depth++ if lhs != nil { if rhs.Effect().Coroutine() { if err := g.writeQuestionCall(b, rhs, depth, op == t.IDEqQuestion); err != nil { return err } } lhsBuf := buffer(nil) if err := g.writeExpr(&lhsBuf, lhs, depth); err != nil { return err } opName, closer := "", "" if lTyp := lhs.MType(); lTyp.IsArrayType() { b.writes("memcpy(") opName, closer = ",", fmt.Sprintf(", sizeof(%s))", lhsBuf) } else { switch op { case t.IDTildeSatPlusEq, t.IDTildeSatMinusEq: uBits := uintBits(lTyp.QID()) if uBits == 0 { return fmt.Errorf("unsupported tilde-operator type %q", lTyp.Str(g.tm)) } uOp := "add" if op != t.IDTildeSatPlusEq { uOp = "sub" } b.printf("wuffs_base__u%d__sat_%s_indirect(&", uBits, uOp) opName, closer = ",", ")" default: opName = cOpName(op) if opName == "" { return fmt.Errorf("unrecognized operator %q", op.AmbiguousForm().Str(g.tm)) } } } b.writex(lhsBuf) b.writes(opName) if err := g.writeExpr(b, rhs, depth); err != nil { return err } b.writes(closer) b.writes(";\n") } else { if rhs.Effect().Coroutine() { if err := g.writeCoroSuspPoint(b, false); err != nil { return err } } if err := g.writeBuiltinQuestionCall(b, rhs, depth); err != errNoSuchBuiltin { return err } if err := g.writeSaveExprDerivedVars(b, rhs); err != nil { return err } if rhs.Effect().Optional() { b.writes("status = ") } // TODO: drop the "Other" in writeExprOther. if err := g.writeExprOther(b, rhs, depth); err != nil { return err } b.writes(";\n") if err := g.writeLoadExprDerivedVars(b, rhs); err != nil { return err } if rhs.Effect().Optional() { target := "exit" if rhs.Effect().Coroutine() { target = "suspend" } b.printf("if (status) { goto %s; }\n", target) } } return nil } func (g *gen) writeStatementIOBind(b *buffer, n *a.IOBind, depth uint32) error { if g.currFunk.ioBinds > maxIOBinds { return fmt.Errorf("too many temporary variables required") } ioBindNum := g.currFunk.ioBinds g.currFunk.ioBinds++ // TODO: do these variables need to be func-scoped (bigger scope) // instead of block-scoped (smaller scope) if the coro_susp_point // switch can jump past this initialization?? b.writes("{\n") { e := n.IO() // TODO: restrict (in the type checker or parser) that e is either a // local variable or args.foo? prefix := vPrefix if e.Operator() != 0 { prefix = aPrefix } cTyp := "reader" if e.MType().QID()[1] == t.IDIOWriter { cTyp = "writer" } name := e.Ident().Str(g.tm) b.printf("wuffs_base__io_%s %s%d_%s%s = %s%s;\n", cTyp, oPrefix, ioBindNum, prefix, name, prefix, name) // TODO: save / restore all iop vars, not just for local IO vars? How // does this work if the io_bind body advances these pointers, either // directly or by calling other funcs? if e.Operator() == 0 { b.printf("uint8_t *%s%d_%s%s%s = %s%s%s;\n", oPrefix, ioBindNum, iopPrefix, prefix, name, iopPrefix, prefix, name) b.printf("uint8_t *%s%d_%s%s%s = %s%s%s;\n", oPrefix, ioBindNum, io1Prefix, prefix, name, io1Prefix, prefix, name) } if n.Keyword() == t.IDIOBind { b.printf("wuffs_base__io_%s__set(&%s%s, &u_%s, &iop_%s%s, &io1_%s%s,", cTyp, prefix, name, name, prefix, name, prefix, name) if err := g.writeExpr(b, n.Arg1(), 0); err != nil { return err } b.writes(");\n") } else { // TODO: restrict (in the type checker or parser) that e is // args.foo? b.printf("wuffs_base__io_%s__set_limit(&%s%s, iop_%s%s,\n", cTyp, prefix, name, prefix, name) if err := g.writeExpr(b, n.Arg1(), 0); err != nil { return err } b.writes(");\n") } } for _, o := range n.Body() { if err := g.writeStatement(b, o, depth); err != nil { return err } } { e := n.IO() prefix := vPrefix if e.Operator() != 0 { prefix = aPrefix } name := e.Ident().Str(g.tm) b.printf("%s%s = %s%d_%s%s;\n", prefix, name, oPrefix, ioBindNum, prefix, name) if e.Operator() == 0 { b.printf("%s%s%s = %s%d_%s%s%s;\n", iopPrefix, prefix, name, oPrefix, ioBindNum, iopPrefix, prefix, name) b.printf("%s%s%s = %s%d_%s%s%s;\n", io1Prefix, prefix, name, oPrefix, ioBindNum, io1Prefix, prefix, name) } } b.writes("}\n") return nil } func (g *gen) writeStatementIf(b *buffer, n *a.If, depth uint32) error { for { condition := buffer(nil) if err := g.writeExpr(&condition, n.Condition(), 0); err != nil { return err } // Calling trimParens avoids clang's -Wparentheses-equality warning. b.printf("if (%s) {\n", trimParens(condition)) for _, o := range n.BodyIfTrue() { if err := g.writeStatement(b, o, depth); err != nil { return err } } if bif := n.BodyIfFalse(); len(bif) > 0 { b.writes("} else {\n") for _, o := range bif { if err := g.writeStatement(b, o, depth); err != nil { return err } } break } n = n.ElseIf() if n == nil { break } b.writes("} else ") } b.writes("}\n") return nil } func (g *gen) writeStatementIterate(b *buffer, n *a.Iterate, depth uint32) error { assigns := n.Assigns() if len(assigns) == 0 { return nil } if len(assigns) != 1 { return fmt.Errorf("TODO: iterate over more than one assign") } o := assigns[0].AsAssign() name := o.LHS().Ident().Str(g.tm) b.writes("{\n") // TODO: don't assume that the slice is a slice of base.u8. In // particular, the code gen can be subtle if the slice element type has // zero size, such as the empty struct. b.printf("wuffs_base__slice_u8 %sslice_%s =", iPrefix, name) if err := g.writeExpr(b, o.RHS(), 0); err != nil { return err } b.writes(";\n") b.printf("%s%s = %sslice_%s;\n", vPrefix, name, iPrefix, name) // TODO: look at n.HasContinue() and n.HasBreak(). round := uint32(0) for ; n != nil; n = n.ElseIterate() { length := n.Length().SmallPowerOf2Value() unroll := n.Unroll().SmallPowerOf2Value() for { if err := g.writeIterateRound(b, name, n.Body(), round, depth, length, unroll); err != nil { return err } round++ if unroll == 1 { break } unroll = 1 } } b.writes("}\n") return nil } func (g *gen) writeStatementJump(b *buffer, n *a.Jump, depth uint32) error { jt, err := g.currFunk.jumpTarget(n.JumpTarget()) if err != nil { return err } keyword := "continue" if n.Keyword() == t.IDBreak { keyword = "break" } b.printf("goto label_%d_%s;\n", jt, keyword) return nil } func (g *gen) writeStatementRet(b *buffer, n *a.Ret, depth uint32) error { retExpr := n.Value() if g.currFunk.astFunc.Effect().Optional() { isError, isOK := false, false b.writes("status = ") if retExpr.Operator() == 0 && retExpr.Ident() == t.IDOk { b.writes("NULL") isOK = true } else { if retExpr.Ident().IsStrLiteral(g.tm) { msg, _ := t.Unescape(retExpr.Ident().Str(g.tm)) isError = statusMsgIsError(msg) isOK = statusMsgIsWarning(msg) } // TODO: check that retExpr has no call-suspendibles. if err := g.writeExpr( b, retExpr, depth); err != nil { return err } } b.writes(";") if n.Keyword() == t.IDYield { return g.writeCoroSuspPoint(b, true) } if isError { b.writes("goto exit;") } else if isOK { g.currFunk.hasGotoOK = true b.writes("goto ok;") } else { g.currFunk.hasGotoOK = true // TODO: the "goto exit"s can be "goto ok". b.writes("if (wuffs_base__status__is_error(status)) { goto exit; }" + "else if (wuffs_base__status__is_suspension(status)) { " + "status = wuffs_base__error__cannot_return_a_suspension; goto exit; } goto ok;") } return nil } b.writes("return ") if g.currFunk.astFunc.Out() == nil { return fmt.Errorf("TODO: allow empty return type (when not suspendible)") } else if err := g.writeExpr(b, retExpr, depth); err != nil { return err } b.writeb(';') return nil } func (g *gen) writeStatementWhile(b *buffer, n *a.While, depth uint32) error { if n.HasContinue() { jt, err := g.currFunk.jumpTarget(n) if err != nil { return err } b.printf("label_%d_continue:;\n", jt) } condition := buffer(nil) if err := g.writeExpr(&condition, n.Condition(), 0); err != nil { return err } // Calling trimParens avoids clang's -Wparentheses-equality warning. b.printf("while (%s) {\n", trimParens(condition)) for _, o := range n.Body() { if err := g.writeStatement(b, o, depth); err != nil { return err } } b.writes("}\n") if n.HasBreak() { jt, err := g.currFunk.jumpTarget(n) if err != nil { return err } b.printf("label_%d_break:;\n", jt) } return nil } func (g *gen) writeIterateRound(b *buffer, name string, body []*a.Node, round uint32, depth uint32, length int, unroll int) error { b.printf("%s%s.len = %d;\n", vPrefix, name, length) b.printf("uint8_t* %send%d_%s = %sslice_%s.ptr + (%sslice_%s.len / %d) * %d;\n", iPrefix, round, name, iPrefix, name, iPrefix, name, length*unroll, length*unroll) b.printf("while (%s%s.ptr < %send%d_%s) {\n", vPrefix, name, iPrefix, round, name) for i := 0; i < unroll; i++ { for _, o := range body { if err := g.writeStatement(b, o, depth); err != nil { return err } } b.printf("%s%s.ptr += %d;\n", vPrefix, name, length) } b.writes("}\n") return nil } func (g *gen) writeCoroSuspPoint(b *buffer, maybeSuspend bool) error { const maxCoroSuspPoint = 0xFFFFFFFF g.currFunk.coroSuspPoint++ if g.currFunk.coroSuspPoint == maxCoroSuspPoint { return fmt.Errorf("too many coroutine suspension points required") } macro := "" if maybeSuspend { macro = "_MAYBE_SUSPEND" } b.printf("WUFFS_BASE__COROUTINE_SUSPENSION_POINT%s(%d);\n", macro, g.currFunk.coroSuspPoint) return nil } func (g *gen) writeQuestionCall(b *buffer, n *a.Expr, depth uint32, eqQuestion bool) error { if depth > a.MaxExprDepth { return fmt.Errorf("expression recursion depth too large") } depth++ if !eqQuestion && n.Effect().Coroutine() { if err := g.writeCoroSuspPoint(b, false); err != nil { return err } } if err := g.writeBuiltinQuestionCall(b, n, depth); err != errNoSuchBuiltin { return err } if err := g.writeSaveExprDerivedVars(b, n); err != nil { return err } if eqQuestion { if g.currFunk.tempW > maxTemp { return fmt.Errorf("too many temporary variables required") } temp := g.currFunk.tempW g.currFunk.tempW++ b.printf("wuffs_base__status %s%d = ", tPrefix, temp) } else { b.writes("status = ") } if err := g.writeExprUserDefinedCall(b, n, depth); err != nil { return err } b.writes(";\n") if err := g.writeLoadExprDerivedVars(b, n); err != nil { return err } if !eqQuestion { b.writes("if (status) { goto suspend; }\n") } return nil } func trimParens(b []byte) []byte { if len(b) > 1 && b[0] == '(' && b[len(b)-1] == ')' { return b[1 : len(b)-1] } return b }
package proxy import ( "net/url" "testing" ) func TestEmptyBlacklist(t *testing.T) { bl := NewEmptyBlacklist() hostnames := []string{"http://localhost", "http://google.com"} for i := range hostnames { url, err := url.Parse(hostnames[i]) if err != nil { t.Fatalf("error with parsing url '%s'", err) } req := Request{ URL: *url, Method: GET, } e := bl.IsBlacklisted(req) if e != nil { t.Fatalf("EmptyBlacklist should never reject anything hostname - %s", req) } } } proxy: Fix 'f' format (from vet) package proxy import ( "net/url" "testing" ) func TestEmptyBlacklist(t *testing.T) { bl := NewEmptyBlacklist() hostnames := []string{"http://localhost", "http://google.com"} for i := range hostnames { url, err := url.Parse(hostnames[i]) if err != nil { t.Fatalf("error with parsing url '%s'", err) } req := Request{ URL: *url, Method: GET, } e := bl.IsBlacklisted(req) if e != nil { t.Fatalf("EmptyBlacklist should never reject anything hostname - %s", req.URL.String()) } } }
package protocol import ( "io" "github.com/v2ray/v2ray-core/common/alloc" "github.com/v2ray/v2ray-core/common/log" v2net "github.com/v2ray/v2ray-core/common/net" "github.com/v2ray/v2ray-core/proxy" "github.com/v2ray/v2ray-core/transport" ) const ( socksVersion = byte(0x05) socks4Version = byte(0x04) AuthNotRequired = byte(0x00) AuthGssApi = byte(0x01) AuthUserPass = byte(0x02) AuthNoMatchingMethod = byte(0xFF) Socks4RequestGranted = byte(90) Socks4RequestRejected = byte(91) ) // Authentication request header of Socks5 protocol type Socks5AuthenticationRequest struct { version byte nMethods byte authMethods [256]byte } func (request *Socks5AuthenticationRequest) HasAuthMethod(method byte) bool { for i := 0; i < int(request.nMethods); i++ { if request.authMethods[i] == method { return true } } return false } func ReadAuthentication(reader io.Reader) (auth Socks5AuthenticationRequest, auth4 Socks4AuthenticationRequest, err error) { buffer := alloc.NewSmallBuffer() defer buffer.Release() nBytes, err := reader.Read(buffer.Value) if err != nil { return } if nBytes < 2 { log.Warning("Socks: expected 2 bytes read, but only ", nBytes, " bytes read") err = transport.ErrCorruptedPacket return } if buffer.Value[0] == socks4Version { auth4.Version = buffer.Value[0] auth4.Command = buffer.Value[1] auth4.Port = v2net.PortFromBytes(buffer.Value[2:4]) copy(auth4.IP[:], buffer.Value[4:8]) err = Socks4Downgrade return } auth.version = buffer.Value[0] if auth.version != socksVersion { log.Warning("Socks: Unknown protocol version ", auth.version) err = proxy.ErrInvalidProtocolVersion return } auth.nMethods = buffer.Value[1] if auth.nMethods <= 0 { log.Warning("Socks: Zero length of authentication methods") err = proxy.ErrInvalidAuthentication return } if nBytes-2 != int(auth.nMethods) { log.Warning("Socks: Unmatching number of auth methods, expecting ", auth.nMethods, ", but got ", nBytes) err = proxy.ErrInvalidAuthentication return } copy(auth.authMethods[:], buffer.Value[2:nBytes]) return } type Socks5AuthenticationResponse struct { version byte authMethod byte } func NewAuthenticationResponse(authMethod byte) *Socks5AuthenticationResponse { return &Socks5AuthenticationResponse{ version: socksVersion, authMethod: authMethod, } } func WriteAuthentication(writer io.Writer, r *Socks5AuthenticationResponse) error { _, err := writer.Write([]byte{r.version, r.authMethod}) return err } type Socks5UserPassRequest struct { version byte username string password string } func (request Socks5UserPassRequest) Username() string { return request.username } func (request Socks5UserPassRequest) Password() string { return request.password } func (request Socks5UserPassRequest) AuthDetail() string { return request.username + ":" + request.password } func ReadUserPassRequest(reader io.Reader) (request Socks5UserPassRequest, err error) { buffer := alloc.NewSmallBuffer() defer buffer.Release() _, err = reader.Read(buffer.Value[0:2]) if err != nil { return } request.version = buffer.Value[0] nUsername := buffer.Value[1] nBytes, err := reader.Read(buffer.Value[:nUsername]) if err != nil { return } request.username = string(buffer.Value[:nBytes]) _, err = reader.Read(buffer.Value[0:1]) if err != nil { return } nPassword := buffer.Value[0] nBytes, err = reader.Read(buffer.Value[:nPassword]) if err != nil { return } request.password = string(buffer.Value[:nBytes]) return } type Socks5UserPassResponse struct { version byte status byte } func NewSocks5UserPassResponse(status byte) Socks5UserPassResponse { return Socks5UserPassResponse{ version: socksVersion, status: status, } } func WriteUserPassResponse(writer io.Writer, response Socks5UserPassResponse) error { _, err := writer.Write([]byte{response.version, response.status}) return err } const ( AddrTypeIPv4 = byte(0x01) AddrTypeIPv6 = byte(0x04) AddrTypeDomain = byte(0x03) CmdConnect = byte(0x01) CmdBind = byte(0x02) CmdUdpAssociate = byte(0x03) ) type Socks5Request struct { Version byte Command byte AddrType byte IPv4 [4]byte Domain string IPv6 [16]byte Port v2net.Port } func ReadRequest(reader io.Reader) (request *Socks5Request, err error) { buffer := alloc.NewSmallBuffer() defer buffer.Release() _, err = io.ReadFull(reader, buffer.Value[:4]) if err != nil { return } request = &Socks5Request{ Version: buffer.Value[0], Command: buffer.Value[1], // buffer[2] is a reserved field AddrType: buffer.Value[3], } switch request.AddrType { case AddrTypeIPv4: _, err = io.ReadFull(reader, request.IPv4[:]) if err != nil { return } case AddrTypeDomain: _, err = io.ReadFull(reader, buffer.Value[0:1]) if err != nil { return } domainLength := buffer.Value[0] _, err = io.ReadFull(reader, buffer.Value[:domainLength]) if err != nil { return } request.Domain = string(append([]byte(nil), buffer.Value[:domainLength]...)) case AddrTypeIPv6: _, err = io.ReadFull(reader, request.IPv6[:]) if err != nil { return } default: log.Warning("Socks: Unexpected address type ", request.AddrType) err = transport.ErrCorruptedPacket return } _, err = io.ReadFull(reader, buffer.Value[:2]) if err != nil { return } request.Port = v2net.PortFromBytes(buffer.Value[:2]) return } func (request *Socks5Request) Destination() v2net.Destination { switch request.AddrType { case AddrTypeIPv4: return v2net.TCPDestination(v2net.IPAddress(request.IPv4[:]), request.Port) case AddrTypeIPv6: return v2net.TCPDestination(v2net.IPAddress(request.IPv6[:]), request.Port) case AddrTypeDomain: return v2net.TCPDestination(v2net.ParseAddress(request.Domain), request.Port) default: panic("Unknown address type") } } const ( ErrorSuccess = byte(0x00) ErrorGeneralFailure = byte(0x01) ErrorConnectionNotAllowed = byte(0x02) ErrorNetworkUnreachable = byte(0x03) ErrorHostUnUnreachable = byte(0x04) ErrorConnectionRefused = byte(0x05) ErrorTTLExpired = byte(0x06) ErrorCommandNotSupported = byte(0x07) ErrorAddressTypeNotSupported = byte(0x08) ) type Socks5Response struct { Version byte Error byte AddrType byte IPv4 [4]byte Domain string IPv6 [16]byte Port v2net.Port } func NewSocks5Response() *Socks5Response { return &Socks5Response{ Version: socksVersion, } } func (r *Socks5Response) SetIPv4(ipv4 []byte) { r.AddrType = AddrTypeIPv4 copy(r.IPv4[:], ipv4) } func (r *Socks5Response) SetIPv6(ipv6 []byte) { r.AddrType = AddrTypeIPv6 copy(r.IPv6[:], ipv6) } func (r *Socks5Response) SetDomain(domain string) { r.AddrType = AddrTypeDomain r.Domain = domain } func (r *Socks5Response) Write(writer io.Writer) { writer.Write([]byte{r.Version, r.Error, 0x00 /* reserved */, r.AddrType}) switch r.AddrType { case 0x01: writer.Write(r.IPv4[:]) case 0x03: writer.Write([]byte{byte(len(r.Domain))}) writer.Write([]byte(r.Domain)) case 0x04: writer.Write(r.IPv6[:]) } writer.Write(r.Port.Bytes(nil)) } remove use of small buffer package protocol import ( "io" "github.com/v2ray/v2ray-core/common/alloc" "github.com/v2ray/v2ray-core/common/log" v2net "github.com/v2ray/v2ray-core/common/net" "github.com/v2ray/v2ray-core/proxy" "github.com/v2ray/v2ray-core/transport" ) const ( socksVersion = byte(0x05) socks4Version = byte(0x04) AuthNotRequired = byte(0x00) AuthGssApi = byte(0x01) AuthUserPass = byte(0x02) AuthNoMatchingMethod = byte(0xFF) Socks4RequestGranted = byte(90) Socks4RequestRejected = byte(91) ) // Authentication request header of Socks5 protocol type Socks5AuthenticationRequest struct { version byte nMethods byte authMethods [256]byte } func (request *Socks5AuthenticationRequest) HasAuthMethod(method byte) bool { for i := 0; i < int(request.nMethods); i++ { if request.authMethods[i] == method { return true } } return false } func ReadAuthentication(reader io.Reader) (auth Socks5AuthenticationRequest, auth4 Socks4AuthenticationRequest, err error) { buffer := make([]byte, 256) nBytes, err := reader.Read(buffer) if err != nil { return } if nBytes < 2 { log.Warning("Socks: expected 2 bytes read, but only ", nBytes, " bytes read") err = transport.ErrCorruptedPacket return } if buffer[0] == socks4Version { auth4.Version = buffer[0] auth4.Command = buffer[1] auth4.Port = v2net.PortFromBytes(buffer[2:4]) copy(auth4.IP[:], buffer[4:8]) err = Socks4Downgrade return } auth.version = buffer[0] if auth.version != socksVersion { log.Warning("Socks: Unknown protocol version ", auth.version) err = proxy.ErrInvalidProtocolVersion return } auth.nMethods = buffer[1] if auth.nMethods <= 0 { log.Warning("Socks: Zero length of authentication methods") err = proxy.ErrInvalidAuthentication return } if nBytes-2 != int(auth.nMethods) { log.Warning("Socks: Unmatching number of auth methods, expecting ", auth.nMethods, ", but got ", nBytes) err = proxy.ErrInvalidAuthentication return } copy(auth.authMethods[:], buffer[2:nBytes]) return } type Socks5AuthenticationResponse struct { version byte authMethod byte } func NewAuthenticationResponse(authMethod byte) *Socks5AuthenticationResponse { return &Socks5AuthenticationResponse{ version: socksVersion, authMethod: authMethod, } } func WriteAuthentication(writer io.Writer, r *Socks5AuthenticationResponse) error { _, err := writer.Write([]byte{r.version, r.authMethod}) return err } type Socks5UserPassRequest struct { version byte username string password string } func (request Socks5UserPassRequest) Username() string { return request.username } func (request Socks5UserPassRequest) Password() string { return request.password } func (request Socks5UserPassRequest) AuthDetail() string { return request.username + ":" + request.password } func ReadUserPassRequest(reader io.Reader) (request Socks5UserPassRequest, err error) { buffer := alloc.NewSmallBuffer() defer buffer.Release() _, err = reader.Read(buffer.Value[0:2]) if err != nil { return } request.version = buffer.Value[0] nUsername := buffer.Value[1] nBytes, err := reader.Read(buffer.Value[:nUsername]) if err != nil { return } request.username = string(buffer.Value[:nBytes]) _, err = reader.Read(buffer.Value[0:1]) if err != nil { return } nPassword := buffer.Value[0] nBytes, err = reader.Read(buffer.Value[:nPassword]) if err != nil { return } request.password = string(buffer.Value[:nBytes]) return } type Socks5UserPassResponse struct { version byte status byte } func NewSocks5UserPassResponse(status byte) Socks5UserPassResponse { return Socks5UserPassResponse{ version: socksVersion, status: status, } } func WriteUserPassResponse(writer io.Writer, response Socks5UserPassResponse) error { _, err := writer.Write([]byte{response.version, response.status}) return err } const ( AddrTypeIPv4 = byte(0x01) AddrTypeIPv6 = byte(0x04) AddrTypeDomain = byte(0x03) CmdConnect = byte(0x01) CmdBind = byte(0x02) CmdUdpAssociate = byte(0x03) ) type Socks5Request struct { Version byte Command byte AddrType byte IPv4 [4]byte Domain string IPv6 [16]byte Port v2net.Port } func ReadRequest(reader io.Reader) (request *Socks5Request, err error) { buffer := alloc.NewSmallBuffer() defer buffer.Release() _, err = io.ReadFull(reader, buffer.Value[:4]) if err != nil { return } request = &Socks5Request{ Version: buffer.Value[0], Command: buffer.Value[1], // buffer[2] is a reserved field AddrType: buffer.Value[3], } switch request.AddrType { case AddrTypeIPv4: _, err = io.ReadFull(reader, request.IPv4[:]) if err != nil { return } case AddrTypeDomain: _, err = io.ReadFull(reader, buffer.Value[0:1]) if err != nil { return } domainLength := buffer.Value[0] _, err = io.ReadFull(reader, buffer.Value[:domainLength]) if err != nil { return } request.Domain = string(append([]byte(nil), buffer.Value[:domainLength]...)) case AddrTypeIPv6: _, err = io.ReadFull(reader, request.IPv6[:]) if err != nil { return } default: log.Warning("Socks: Unexpected address type ", request.AddrType) err = transport.ErrCorruptedPacket return } _, err = io.ReadFull(reader, buffer.Value[:2]) if err != nil { return } request.Port = v2net.PortFromBytes(buffer.Value[:2]) return } func (request *Socks5Request) Destination() v2net.Destination { switch request.AddrType { case AddrTypeIPv4: return v2net.TCPDestination(v2net.IPAddress(request.IPv4[:]), request.Port) case AddrTypeIPv6: return v2net.TCPDestination(v2net.IPAddress(request.IPv6[:]), request.Port) case AddrTypeDomain: return v2net.TCPDestination(v2net.ParseAddress(request.Domain), request.Port) default: panic("Unknown address type") } } const ( ErrorSuccess = byte(0x00) ErrorGeneralFailure = byte(0x01) ErrorConnectionNotAllowed = byte(0x02) ErrorNetworkUnreachable = byte(0x03) ErrorHostUnUnreachable = byte(0x04) ErrorConnectionRefused = byte(0x05) ErrorTTLExpired = byte(0x06) ErrorCommandNotSupported = byte(0x07) ErrorAddressTypeNotSupported = byte(0x08) ) type Socks5Response struct { Version byte Error byte AddrType byte IPv4 [4]byte Domain string IPv6 [16]byte Port v2net.Port } func NewSocks5Response() *Socks5Response { return &Socks5Response{ Version: socksVersion, } } func (r *Socks5Response) SetIPv4(ipv4 []byte) { r.AddrType = AddrTypeIPv4 copy(r.IPv4[:], ipv4) } func (r *Socks5Response) SetIPv6(ipv6 []byte) { r.AddrType = AddrTypeIPv6 copy(r.IPv6[:], ipv6) } func (r *Socks5Response) SetDomain(domain string) { r.AddrType = AddrTypeDomain r.Domain = domain } func (r *Socks5Response) Write(writer io.Writer) { writer.Write([]byte{r.Version, r.Error, 0x00 /* reserved */, r.AddrType}) switch r.AddrType { case 0x01: writer.Write(r.IPv4[:]) case 0x03: writer.Write([]byte{byte(len(r.Domain))}) writer.Write([]byte(r.Domain)) case 0x04: writer.Write(r.IPv6[:]) } writer.Write(r.Port.Bytes(nil)) }
package medtronic import ( "log" ) // Carbs values are represented as either grams or 10x exchanges. type Carbs int const ( CarbUnits Command = 0x88 GlucoseUnits Command = 0x89 ) type CarbUnitsType byte //go:generate stringer -type CarbUnitsType const ( Grams CarbUnitsType = 1 Exchanges CarbUnitsType = 2 ) // Glucose values are represented as either mg/dL or μmol/L, // so all conversions must include a GlucoseUnitsType parameter. type Glucose int type GlucoseUnitsType byte const ( MgPerDeciLiter GlucoseUnitsType = 1 MmolPerLiter GlucoseUnitsType = 2 ) func (u GlucoseUnitsType) String() string { switch u { case MgPerDeciLiter: return "mg/dL" case MmolPerLiter: return "mmol/L" default: log.Panicf("unknown glucose unit %d", u) } panic("unreachable") } func (pump *Pump) whichUnits(cmd Command) byte { data := pump.Execute(cmd) if pump.Error() != nil { return 0 } if len(data) < 2 || data[0] != 1 { pump.BadResponse(cmd, data) return 0 } return data[1] } func intToGlucose(n int, t GlucoseUnitsType) Glucose { if t == MmolPerLiter { // Convert 10x mmol/L to μmol/L return Glucose(n) * 100 } else { return Glucose(n) } } func byteToGlucose(n byte, t GlucoseUnitsType) Glucose { return intToGlucose(int(n), t) } func (pump *Pump) CarbUnits() CarbUnitsType { return CarbUnitsType(pump.whichUnits(CarbUnits)) } func (pump *Pump) GlucoseUnits() GlucoseUnitsType { return GlucoseUnitsType(pump.whichUnits(GlucoseUnits)) } // Quantities and rates of insulin delivery are represented in milliunits. type Insulin int func milliUnitsPerStroke(newerPump bool) Insulin { if newerPump { return 25 } else { return 100 } } func intToInsulin(strokes int, newerPump bool) Insulin { return Insulin(strokes) * milliUnitsPerStroke(newerPump) } func byteToInsulin(strokes uint8, newerPump bool) Insulin { return intToInsulin(int(strokes), newerPump) } func twoByteInsulin(data []byte, newerPump bool) Insulin { return Insulin(twoByteUint(data)) * milliUnitsPerStroke(newerPump) } Print Insulin values as floating point number of units package medtronic import ( "fmt" "log" ) // Carbs values are represented as either grams or 10x exchanges. type Carbs int const ( CarbUnits Command = 0x88 GlucoseUnits Command = 0x89 ) type CarbUnitsType byte //go:generate stringer -type CarbUnitsType const ( Grams CarbUnitsType = 1 Exchanges CarbUnitsType = 2 ) // Glucose values are represented as either mg/dL or μmol/L, // so all conversions must include a GlucoseUnitsType parameter. type Glucose int type GlucoseUnitsType byte const ( MgPerDeciLiter GlucoseUnitsType = 1 MmolPerLiter GlucoseUnitsType = 2 ) func (u GlucoseUnitsType) String() string { switch u { case MgPerDeciLiter: return "mg/dL" case MmolPerLiter: return "mmol/L" default: log.Panicf("unknown glucose unit %d", u) } panic("unreachable") } func (pump *Pump) whichUnits(cmd Command) byte { data := pump.Execute(cmd) if pump.Error() != nil { return 0 } if len(data) < 2 || data[0] != 1 { pump.BadResponse(cmd, data) return 0 } return data[1] } func intToGlucose(n int, t GlucoseUnitsType) Glucose { if t == MmolPerLiter { // Convert 10x mmol/L to μmol/L return Glucose(n) * 100 } else { return Glucose(n) } } func byteToGlucose(n byte, t GlucoseUnitsType) Glucose { return intToGlucose(int(n), t) } func (pump *Pump) CarbUnits() CarbUnitsType { return CarbUnitsType(pump.whichUnits(CarbUnits)) } func (pump *Pump) GlucoseUnits() GlucoseUnitsType { return GlucoseUnitsType(pump.whichUnits(GlucoseUnits)) } // Quantities and rates of insulin delivery are represented in milliunits. type Insulin int func (r Insulin) String() string { return fmt.Sprintf("%g", float64(r)/1000) } func milliUnitsPerStroke(newerPump bool) Insulin { if newerPump { return 25 } else { return 100 } } func intToInsulin(strokes int, newerPump bool) Insulin { return Insulin(strokes) * milliUnitsPerStroke(newerPump) } func byteToInsulin(strokes uint8, newerPump bool) Insulin { return intToInsulin(int(strokes), newerPump) } func twoByteInsulin(data []byte, newerPump bool) Insulin { return Insulin(twoByteUint(data)) * milliUnitsPerStroke(newerPump) }
package instance import ( bosherr "github.com/cloudfoundry/bosh-utils/errors" "bosh-google-cpi/api" "bosh-google-cpi/util" computebeta "google.golang.org/api/compute/v0.beta" ) // The list of metadata key-value pairs that should be applied as labels var labelList []string = []string{"director", "name", "id", "deployment", "job"} func (i GoogleInstanceService) SetMetadata(id string, vmMetadata Metadata) error { // Find the instance instance, found, err := i.FindBeta(id, "") if err != nil { return err } if !found { return api.NewVMNotFoundError(id) } // We need to reuse the original instance metadata fingerprint and items metadata := instance.Metadata metadataMap := make(map[string]string) // Grab the original metadata items for _, item := range metadata.Items { metadataMap[item.Key] = item.Value } // Add or override the new metadata items. for key, value := range vmMetadata { metadataMap[key] = value.(string) } // Set the new metadata items var metadataItems []*computebeta.MetadataItems for key, value := range metadataMap { mValue := value metadataItems = append(metadataItems, &computebeta.MetadataItems{Key: key, Value: mValue}) } metadata.Items = metadataItems i.logger.Debug(googleInstanceServiceLogTag, "Setting metadata for Google Instance '%s'", id) operation, err := i.computeServiceB.Instances.SetMetadata(i.project, util.ResourceSplitter(instance.Zone), id, metadata).Do() if err != nil { return bosherr.WrapErrorf(err, "Failed to set metadata for Google Instance '%s'", id) } if _, err = i.operationService.WaiterB(operation, instance.Zone, ""); err != nil { return bosherr.WrapErrorf(err, "Failed to set metadata for Google Instance '%s'", id) } // Repeat the metadata process, but with labels labelsMap := make(map[string]string) for k, v := range instance.Labels { labelsMap[k] = v } for _, l := range labelList { if v, ok := vmMetadata[l]; ok { labelsMap[l] = v.(string) } } labelsRequest := &computebeta.InstancesSetLabelsRequest{ LabelFingerprint: instance.LabelFingerprint, Labels: labelsMap, } i.logger.Debug(googleInstanceServiceLogTag, "Setting labels for Google Instance '%s'", id) operation, err = i.computeServiceB.Instances.SetLabels(i.project, util.ResourceSplitter(instance.Zone), id, labelsRequest).Do() if err != nil { return bosherr.WrapErrorf(err, "Failed to set labels for Google Instance '%s'", id) } if _, err = i.operationService.WaiterB(operation, instance.Zone, ""); err != nil { return bosherr.WrapErrorf(err, "Failed to set labels for Google Instance '%s'", id) } return nil } Sanitize label strings package instance import ( "strings" "bosh-google-cpi/api" "bosh-google-cpi/util" bosherr "github.com/cloudfoundry/bosh-utils/errors" computebeta "google.golang.org/api/compute/v0.beta" ) // The list of metadata key-value pairs that should be applied as labels var labelList []string = []string{"director", "name", "id", "deployment", "job"} func (i GoogleInstanceService) SetMetadata(id string, vmMetadata Metadata) error { // Find the instance instance, found, err := i.FindBeta(id, "") if err != nil { return err } if !found { return api.NewVMNotFoundError(id) } // We need to reuse the original instance metadata fingerprint and items metadata := instance.Metadata metadataMap := make(map[string]string) // Grab the original metadata items for _, item := range metadata.Items { metadataMap[item.Key] = item.Value } // Add or override the new metadata items. for key, value := range vmMetadata { metadataMap[key] = value.(string) } // Set the new metadata items var metadataItems []*computebeta.MetadataItems for key, value := range metadataMap { mValue := value metadataItems = append(metadataItems, &computebeta.MetadataItems{Key: key, Value: mValue}) } metadata.Items = metadataItems i.logger.Debug(googleInstanceServiceLogTag, "Setting metadata for Google Instance '%s'", id) operation, err := i.computeServiceB.Instances.SetMetadata(i.project, util.ResourceSplitter(instance.Zone), id, metadata).Do() if err != nil { return bosherr.WrapErrorf(err, "Failed to set metadata for Google Instance '%s'", id) } if _, err = i.operationService.WaiterB(operation, instance.Zone, ""); err != nil { return bosherr.WrapErrorf(err, "Failed to set metadata for Google Instance '%s'", id) } // Repeat the metadata process, but with labels labelsMap := make(map[string]string) for k, v := range instance.Labels { labelsMap[k] = v } for _, l := range labelList { if v, ok := vmMetadata[l]; ok { labelsMap[l] = saveLabel(v.(string)) } } labelsRequest := &computebeta.InstancesSetLabelsRequest{ LabelFingerprint: instance.LabelFingerprint, Labels: labelsMap, } i.logger.Debug(googleInstanceServiceLogTag, "Setting labels for Google Instance '%s'", id) operation, err = i.computeServiceB.Instances.SetLabels(i.project, util.ResourceSplitter(instance.Zone), id, labelsRequest).Do() if err != nil { return bosherr.WrapErrorf(err, "Failed to set labels for Google Instance '%s'", id) } if _, err = i.operationService.WaiterB(operation, instance.Zone, ""); err != nil { return bosherr.WrapErrorf(err, "Failed to set labels for Google Instance '%s'", id) } return nil } func safeLabel(s string, maxlen int) string { s = strings.Replace(s, "/", "", -1) s = strings.Replace(s, "-", "", -1) if len(s) > maxlen { s = s[0:maxlen] } return s }
package main import ( "encoding/json" "errors" "fmt" "github.com/coreos/go-etcd/etcd" logging "github.com/op/go-logging" "koding/db/mongodb/modelhelper" "koding/newkite/dnode" "koding/newkite/kite" "koding/newkite/kodingkey" "koding/newkite/protocol" "koding/newkite/token" "koding/tools/config" stdlog "log" "net" "os" "strconv" "strings" "time" ) const ( HEARTBEAT_INTERVAL = 1 * time.Minute HEARTBEAT_DELAY = 2 * time.Minute ) var log = logging.MustGetLogger("Kontrol") type Kontrol struct { etcd *etcd.Client watcherHub *watcherHub } func NewKontrol() *Kontrol { return &Kontrol{ etcd: etcd.NewClient(nil), // TODO read machine list from config watcherHub: newWatcherHub(), } } func main() { setupLogging() kontrol := NewKontrol() options := &protocol.Options{ Kitename: "kontrol", Version: "1", Port: strconv.Itoa(config.Current.NewKontrol.Port), Region: "localhost", Environment: "development", } k := kite.New(options) k.KontrolEnabled = false k.Authenticators["kodingKey"] = kontrol.AuthenticateFromKodingKey k.Authenticators["sessionID"] = kontrol.AuthenticateFromSessionID k.HandleFunc("register", kontrol.handleRegister) k.HandleFunc("getKites", kontrol.handleGetKites) go kontrol.WatchEtcd() k.Run() } func setupLogging() { log.Module = "Kontrol" logging.SetFormatter(logging.MustStringFormatter("%{level:-8s} ▶ %{message}")) stderrBackend := logging.NewLogBackend(os.Stderr, "", stdlog.LstdFlags|stdlog.Lshortfile) stderrBackend.Color = true syslogBackend, _ := logging.NewSyslogBackend(log.Module) logging.SetBackend(stderrBackend, syslogBackend) } // registerValue is the type of the value that is saved to etcd. type registerValue struct { PublicIP string Port string KodingKey string } func (k *Kontrol) handleRegister(r *kite.Request) (interface{}, error) { log.Info("Register request from: %#v", r.RemoteKite.Kite) // Only accept requests with kodingKey because we need this info // for generating tokens for this kite. if r.Authentication.Type != "kodingKey" { return nil, fmt.Errorf("Unexpected authentication type: %s", r.Authentication.Type) } // Set PublicIP address if it's empty. if r.RemoteKite.PublicIP == "" { r.RemoteKite.PublicIP, _, _ = net.SplitHostPort(r.RemoteAddr) } key, err := getKiteKey(r.RemoteKite.Kite) if err != nil { return nil, err } rv := &registerValue{ PublicIP: r.RemoteKite.PublicIP, Port: r.RemoteKite.Port, KodingKey: r.Authentication.Key, } valueBytes, _ := json.Marshal(rv) value := string(valueBytes) ttl := uint64(HEARTBEAT_DELAY / time.Second) // setKey sets the value of the Kite in etcd. setKey := func() (prevValue string, err error) { resp, err := k.etcd.Set(key, value, ttl) if err != nil { log.Critical("etcd error: %s", err) return } prevValue = resp.PrevValue // Set the TTL for the username. Otherwise, empty dirs remain in etcd. _, err = k.etcd.UpdateDir("/kites/"+r.RemoteKite.Username, ttl) if err != nil { log.Critical("etcd error: %s", err) return } return } // Register to etcd. prev, err := setKey() if err != nil { return nil, errors.New("Internal error") } if prev != "" { log.Notice("Kite (%s) is already registered. Doing nothing.", key) } else { // Request heartbeat from the Kite. heartbeatFunc := func(p *dnode.Partial) { prev, err := setKey() if err == nil && prev == "" { log.Warning("Came heartbeat but the Kite (%s) is not registered. Re-registering it. It may be an indication that the heartbeat delay is too short.", key) } } heartbeatArgs := []interface{}{ HEARTBEAT_INTERVAL / time.Second, dnode.Callback(heartbeatFunc), } _, err := r.RemoteKite.Call("heartbeat", heartbeatArgs) if err != nil { return nil, err } } log.Info("Kite registered: %s", key) k.watcherHub.Notify(&r.RemoteKite.Kite, protocol.Register) r.RemoteKite.OnDisconnect(func() { // Delete from etcd, WatchEtcd() will get the event // and will notify watchers of this Kite for deregistration. k.etcd.Delete(key) }) // send response back to the kite, also identify him with the new name response := protocol.RegisterResult{ Result: protocol.AllowKite, Username: r.RemoteKite.Username, PublicIP: r.RemoteKite.PublicIP, } return response, nil } // getKiteKey returns a string representing the kite uniquely // that is suitable to use as a key for etcd. func getKiteKey(k protocol.Kite) (string, error) { // Order is important. fields := map[string]string{ "username": k.Username, "environment": k.Environment, "name": k.Name, "version": k.Version, "region": k.Region, "hostname": k.Hostname, "id": k.ID, } // Validate fields. for k, v := range fields { if v == "" { return "", fmt.Errorf("Empty Kite field: %s", k) } if strings.ContainsRune(v, '/') { return "", fmt.Errorf("Field \"%s\" must not contain '/'", k) } } // Build key. key := "/" for _, v := range fields { key = key + v + "/" } key = strings.TrimSuffix(key, "/") return "/kites" + key, nil } // getQueryKey returns the etcd key for the query. func getQueryKey(q *KontrolQuery) (string, error) { fields := []string{ q.Username, q.Environment, q.Name, q.Version, q.Region, q.Hostname, q.ID, } // Validate query and build key. path := "/" empty := false for _, f := range fields { if f == "" { empty = true } else { if empty { return "", errors.New("Invalid query") } path = path + f + "/" } } return "/kites" + path, nil } func (k *Kontrol) handleGetKites(r *kite.Request) (interface{}, error) { var args []*dnode.Partial err := r.Args.Unmarshal(&args) if err != nil { return nil, err } if len(args) != 1 && len(args) != 2 { return nil, errors.New("Invalid number of arguments") } var query KontrolQuery err = args[0].Unmarshal(&query) if err != nil { return nil, errors.New("Invalid query argument") } // To be called when a Kite is registered or deregistered matching the query. var watchCallback dnode.Function if len(args) == 2 { err = args[1].Unmarshal(&watchCallback) if err != nil { return nil, errors.New("Invalid callback argument") } } // We do not allow access to other's kites for now. if r.Username != query.Username { return nil, errors.New("Not your Kite") } return k.getKites(r, query, watchCallback) } func (k *Kontrol) getKites(r *kite.Request, query KontrolQuery, watchCallback dnode.Function) ([]protocol.KiteWithToken, error) { key, err := getQueryKey(&query) if err != nil { return nil, err } resp, err := k.etcd.GetAll(key, false) if err != nil { if etcdErr, ok := err.(etcd.EtcdError); ok { if etcdErr.ErrorCode == 100 { // Key Not Found return make([]protocol.KiteWithToken, 0), nil } } log.Critical("etcd error: %s", err) return nil, fmt.Errorf("Internal error") } kvs := flatten(resp.Kvs) kitesWithToken, err := addTokenToKites(kvs, r.Username) if err != nil { return nil, err } // Register callbacks to our watcher hub. // It will call them when a Kite registered/unregistered matching the query. if watchCallback != nil { k.watcherHub.RegisterWatcher(r.RemoteKite, &query, watchCallback) } return kitesWithToken, nil } // flatten converts the recursive etcd directory structure to flat one that contains Kites. func flatten(in []etcd.KeyValuePair) []etcd.KeyValuePair { var out []etcd.KeyValuePair for _, kv := range in { if kv.Dir { out = append(out, flatten(kv.KVPairs)...) continue } out = append(out, kv) } return out } func addTokenToKites(kvs []etcd.KeyValuePair, username string) ([]protocol.KiteWithToken, error) { kitesWithToken := make([]protocol.KiteWithToken, len(kvs)) for i, kv := range kvs { kite, kodingKey, err := kiteFromEtcdKV(kv.Key, kv.Value) if err != nil { return nil, err } // Generate token. key, err := kodingkey.FromString(kodingKey) if err != nil { return nil, fmt.Errorf("Koding Key is invalid at Kite: %s", key) } // username is from requester, key is from kite owner. tokenString, err := token.NewToken(username, kite.ID).EncryptString(key) if err != nil { return nil, errors.New("Server error: Cannot generate a token") } kitesWithToken[i] = protocol.KiteWithToken{ Kite: *kite, Token: tokenString, } } return kitesWithToken, nil } // kiteFromEtcdKV returns a *protocol.Kite and Koding Key string from an etcd key. // etcd key is like: /kites/devrim/development/mathworker/1/localhost/tardis.local/662ed473-351f-4c9f-786b-99cf02cdaadb func kiteFromEtcdKV(key, value string) (*protocol.Kite, string, error) { fields := strings.Split(strings.TrimPrefix(key, "/"), "/") if len(fields) != 8 || (len(fields) > 0 && fields[0] != "kites") { return nil, "", fmt.Errorf("Invalid Kite: %s", key) } kite := new(protocol.Kite) kite.Username = fields[1] kite.Environment = fields[2] kite.Name = fields[3] kite.Version = fields[4] kite.Region = fields[5] kite.Hostname = fields[6] kite.ID = fields[7] rv := new(registerValue) json.Unmarshal([]byte(value), rv) kite.PublicIP = rv.PublicIP kite.Port = rv.Port return kite, rv.KodingKey, nil } // WatchEtcd watches all Kite changes on etcd cluster // and notifies registered watchers on this Kontrol instance. func (k *Kontrol) WatchEtcd() { getIndex: resp, err := k.etcd.Set("/_kontrol_get_index", "OK", 1) if err != nil { log.Critical("etcd error 1: %s", err.Error()) time.Sleep(time.Second) goto getIndex } index := resp.ModifiedIndex log.Info("etcd: index = %d", index) receiver := make(chan *etcd.Response) go func() { watch: resp, err = k.etcd.WatchAll("/kites", index+1, receiver, nil) if err != nil { log.Critical("etcd error 2: %s", err) time.Sleep(time.Second) goto watch } }() // Channel is never closed. for resp := range receiver { // log.Debug("etcd: change received: %#v", resp) index = resp.ModifiedIndex // Notify deregistration events. if strings.HasPrefix(resp.Key, "/kites") && (resp.Action == "delete" || resp.Action == "expire") { kite, _, err := kiteFromEtcdKV(resp.Key, resp.Value) if err == nil { k.watcherHub.Notify(kite, protocol.Deregister) } } } } func (k *Kontrol) AuthenticateFromSessionID(options *kite.CallOptions) error { username, err := findUsernameFromSessionID(options.Authentication.Key) if err != nil { return err } options.Kite.Username = username return nil } func findUsernameFromSessionID(sessionID string) (string, error) { session, err := modelhelper.GetSession(sessionID) if err != nil { return "", err } return session.Username, nil } func (k *Kontrol) AuthenticateFromKodingKey(options *kite.CallOptions) error { username, err := findUsernameFromKey(options.Authentication.Key) if err != nil { return err } options.Kite.Username = username return nil } func findUsernameFromKey(key string) (string, error) { kodingKey, err := modelhelper.GetKodingKeysByKey(key) if err != nil { return "", fmt.Errorf("register kodingkey err %s", err) } account, err := modelhelper.GetAccountById(kodingKey.Owner) if err != nil { return "", fmt.Errorf("register get user err %s", err) } if account.Profile.Nickname == "" { return "", errors.New("nickname is empty, could not register kite") } return account.Profile.Nickname, nil } kite: read etcd config from config package main import ( "encoding/json" "errors" "fmt" "github.com/coreos/go-etcd/etcd" logging "github.com/op/go-logging" "koding/db/mongodb/modelhelper" "koding/newkite/dnode" "koding/newkite/kite" "koding/newkite/kodingkey" "koding/newkite/protocol" "koding/newkite/token" "koding/tools/config" stdlog "log" "net" "os" "strconv" "strings" "time" ) const ( HEARTBEAT_INTERVAL = 1 * time.Minute HEARTBEAT_DELAY = 2 * time.Minute ) var log = logging.MustGetLogger("Kontrol") type Kontrol struct { etcd *etcd.Client watcherHub *watcherHub } func NewKontrol() *Kontrol { machines := make([]string, len(config.Current.Etcd)) for i, s := range config.Current.Etcd { machines[i] = "http://" + s.Host + ":" + strconv.FormatUint(uint64(s.Port), 10) } fmt.Printf("--- machines: %#v\n", machines) return &Kontrol{ etcd: etcd.NewClient(machines), watcherHub: newWatcherHub(), } } func main() { setupLogging() kontrol := NewKontrol() options := &protocol.Options{ Kitename: "kontrol", Version: "1", Port: strconv.Itoa(config.Current.NewKontrol.Port), Region: "localhost", Environment: "development", } k := kite.New(options) k.KontrolEnabled = false k.Authenticators["kodingKey"] = kontrol.AuthenticateFromKodingKey k.Authenticators["sessionID"] = kontrol.AuthenticateFromSessionID k.HandleFunc("register", kontrol.handleRegister) k.HandleFunc("getKites", kontrol.handleGetKites) go kontrol.WatchEtcd() k.Run() } func setupLogging() { log.Module = "Kontrol" logging.SetFormatter(logging.MustStringFormatter("%{level:-8s} ▶ %{message}")) stderrBackend := logging.NewLogBackend(os.Stderr, "", stdlog.LstdFlags|stdlog.Lshortfile) stderrBackend.Color = true syslogBackend, _ := logging.NewSyslogBackend(log.Module) logging.SetBackend(stderrBackend, syslogBackend) } // registerValue is the type of the value that is saved to etcd. type registerValue struct { PublicIP string Port string KodingKey string } func (k *Kontrol) handleRegister(r *kite.Request) (interface{}, error) { log.Info("Register request from: %#v", r.RemoteKite.Kite) // Only accept requests with kodingKey because we need this info // for generating tokens for this kite. if r.Authentication.Type != "kodingKey" { return nil, fmt.Errorf("Unexpected authentication type: %s", r.Authentication.Type) } // Set PublicIP address if it's empty. if r.RemoteKite.PublicIP == "" { r.RemoteKite.PublicIP, _, _ = net.SplitHostPort(r.RemoteAddr) } key, err := getKiteKey(r.RemoteKite.Kite) if err != nil { return nil, err } rv := &registerValue{ PublicIP: r.RemoteKite.PublicIP, Port: r.RemoteKite.Port, KodingKey: r.Authentication.Key, } valueBytes, _ := json.Marshal(rv) value := string(valueBytes) ttl := uint64(HEARTBEAT_DELAY / time.Second) // setKey sets the value of the Kite in etcd. setKey := func() (prevValue string, err error) { resp, err := k.etcd.Set(key, value, ttl) if err != nil { log.Critical("etcd error: %s", err) return } prevValue = resp.PrevValue // Set the TTL for the username. Otherwise, empty dirs remain in etcd. _, err = k.etcd.UpdateDir("/kites/"+r.RemoteKite.Username, ttl) if err != nil { log.Critical("etcd error: %s", err) return } return } // Register to etcd. prev, err := setKey() if err != nil { return nil, errors.New("Internal error") } if prev != "" { log.Notice("Kite (%s) is already registered. Doing nothing.", key) } else { // Request heartbeat from the Kite. heartbeatFunc := func(p *dnode.Partial) { prev, err := setKey() if err == nil && prev == "" { log.Warning("Came heartbeat but the Kite (%s) is not registered. Re-registering it. It may be an indication that the heartbeat delay is too short.", key) } } heartbeatArgs := []interface{}{ HEARTBEAT_INTERVAL / time.Second, dnode.Callback(heartbeatFunc), } _, err := r.RemoteKite.Call("heartbeat", heartbeatArgs) if err != nil { return nil, err } } log.Info("Kite registered: %s", key) k.watcherHub.Notify(&r.RemoteKite.Kite, protocol.Register) r.RemoteKite.OnDisconnect(func() { // Delete from etcd, WatchEtcd() will get the event // and will notify watchers of this Kite for deregistration. k.etcd.Delete(key) }) // send response back to the kite, also identify him with the new name response := protocol.RegisterResult{ Result: protocol.AllowKite, Username: r.RemoteKite.Username, PublicIP: r.RemoteKite.PublicIP, } return response, nil } // getKiteKey returns a string representing the kite uniquely // that is suitable to use as a key for etcd. func getKiteKey(k protocol.Kite) (string, error) { // Order is important. fields := map[string]string{ "username": k.Username, "environment": k.Environment, "name": k.Name, "version": k.Version, "region": k.Region, "hostname": k.Hostname, "id": k.ID, } // Validate fields. for k, v := range fields { if v == "" { return "", fmt.Errorf("Empty Kite field: %s", k) } if strings.ContainsRune(v, '/') { return "", fmt.Errorf("Field \"%s\" must not contain '/'", k) } } // Build key. key := "/" for _, v := range fields { key = key + v + "/" } key = strings.TrimSuffix(key, "/") return "/kites" + key, nil } // getQueryKey returns the etcd key for the query. func getQueryKey(q *KontrolQuery) (string, error) { fields := []string{ q.Username, q.Environment, q.Name, q.Version, q.Region, q.Hostname, q.ID, } // Validate query and build key. path := "/" empty := false for _, f := range fields { if f == "" { empty = true } else { if empty { return "", errors.New("Invalid query") } path = path + f + "/" } } return "/kites" + path, nil } func (k *Kontrol) handleGetKites(r *kite.Request) (interface{}, error) { var args []*dnode.Partial err := r.Args.Unmarshal(&args) if err != nil { return nil, err } if len(args) != 1 && len(args) != 2 { return nil, errors.New("Invalid number of arguments") } var query KontrolQuery err = args[0].Unmarshal(&query) if err != nil { return nil, errors.New("Invalid query argument") } // To be called when a Kite is registered or deregistered matching the query. var watchCallback dnode.Function if len(args) == 2 { err = args[1].Unmarshal(&watchCallback) if err != nil { return nil, errors.New("Invalid callback argument") } } // We do not allow access to other's kites for now. if r.Username != query.Username { return nil, errors.New("Not your Kite") } return k.getKites(r, query, watchCallback) } func (k *Kontrol) getKites(r *kite.Request, query KontrolQuery, watchCallback dnode.Function) ([]protocol.KiteWithToken, error) { key, err := getQueryKey(&query) if err != nil { return nil, err } resp, err := k.etcd.GetAll(key, false) if err != nil { if etcdErr, ok := err.(etcd.EtcdError); ok { if etcdErr.ErrorCode == 100 { // Key Not Found return make([]protocol.KiteWithToken, 0), nil } } log.Critical("etcd error: %s", err) return nil, fmt.Errorf("Internal error") } kvs := flatten(resp.Kvs) kitesWithToken, err := addTokenToKites(kvs, r.Username) if err != nil { return nil, err } // Register callbacks to our watcher hub. // It will call them when a Kite registered/unregistered matching the query. if watchCallback != nil { k.watcherHub.RegisterWatcher(r.RemoteKite, &query, watchCallback) } return kitesWithToken, nil } // flatten converts the recursive etcd directory structure to flat one that contains Kites. func flatten(in []etcd.KeyValuePair) []etcd.KeyValuePair { var out []etcd.KeyValuePair for _, kv := range in { if kv.Dir { out = append(out, flatten(kv.KVPairs)...) continue } out = append(out, kv) } return out } func addTokenToKites(kvs []etcd.KeyValuePair, username string) ([]protocol.KiteWithToken, error) { kitesWithToken := make([]protocol.KiteWithToken, len(kvs)) for i, kv := range kvs { kite, kodingKey, err := kiteFromEtcdKV(kv.Key, kv.Value) if err != nil { return nil, err } // Generate token. key, err := kodingkey.FromString(kodingKey) if err != nil { return nil, fmt.Errorf("Koding Key is invalid at Kite: %s", key) } // username is from requester, key is from kite owner. tokenString, err := token.NewToken(username, kite.ID).EncryptString(key) if err != nil { return nil, errors.New("Server error: Cannot generate a token") } kitesWithToken[i] = protocol.KiteWithToken{ Kite: *kite, Token: tokenString, } } return kitesWithToken, nil } // kiteFromEtcdKV returns a *protocol.Kite and Koding Key string from an etcd key. // etcd key is like: /kites/devrim/development/mathworker/1/localhost/tardis.local/662ed473-351f-4c9f-786b-99cf02cdaadb func kiteFromEtcdKV(key, value string) (*protocol.Kite, string, error) { fields := strings.Split(strings.TrimPrefix(key, "/"), "/") if len(fields) != 8 || (len(fields) > 0 && fields[0] != "kites") { return nil, "", fmt.Errorf("Invalid Kite: %s", key) } kite := new(protocol.Kite) kite.Username = fields[1] kite.Environment = fields[2] kite.Name = fields[3] kite.Version = fields[4] kite.Region = fields[5] kite.Hostname = fields[6] kite.ID = fields[7] rv := new(registerValue) json.Unmarshal([]byte(value), rv) kite.PublicIP = rv.PublicIP kite.Port = rv.Port return kite, rv.KodingKey, nil } // WatchEtcd watches all Kite changes on etcd cluster // and notifies registered watchers on this Kontrol instance. func (k *Kontrol) WatchEtcd() { getIndex: resp, err := k.etcd.Set("/_kontrol_get_index", "OK", 1) if err != nil { log.Critical("etcd error 1: %s", err.Error()) time.Sleep(time.Second) goto getIndex } index := resp.ModifiedIndex log.Info("etcd: index = %d", index) receiver := make(chan *etcd.Response) go func() { watch: resp, err = k.etcd.WatchAll("/kites", index+1, receiver, nil) if err != nil { log.Critical("etcd error 2: %s", err) time.Sleep(time.Second) goto watch } }() // Channel is never closed. for resp := range receiver { // log.Debug("etcd: change received: %#v", resp) index = resp.ModifiedIndex // Notify deregistration events. if strings.HasPrefix(resp.Key, "/kites") && (resp.Action == "delete" || resp.Action == "expire") { kite, _, err := kiteFromEtcdKV(resp.Key, resp.Value) if err == nil { k.watcherHub.Notify(kite, protocol.Deregister) } } } } func (k *Kontrol) AuthenticateFromSessionID(options *kite.CallOptions) error { username, err := findUsernameFromSessionID(options.Authentication.Key) if err != nil { return err } options.Kite.Username = username return nil } func findUsernameFromSessionID(sessionID string) (string, error) { session, err := modelhelper.GetSession(sessionID) if err != nil { return "", err } return session.Username, nil } func (k *Kontrol) AuthenticateFromKodingKey(options *kite.CallOptions) error { username, err := findUsernameFromKey(options.Authentication.Key) if err != nil { return err } options.Kite.Username = username return nil } func findUsernameFromKey(key string) (string, error) { kodingKey, err := modelhelper.GetKodingKeysByKey(key) if err != nil { return "", fmt.Errorf("register kodingkey err %s", err) } account, err := modelhelper.GetAccountById(kodingKey.Owner) if err != nil { return "", fmt.Errorf("register get user err %s", err) } if account.Profile.Nickname == "" { return "", errors.New("nickname is empty, could not register kite") } return account.Profile.Nickname, nil }
package mpb import ( "bytes" "container/heap" "context" "fmt" "io" "math" "os" "sync" "time" "github.com/vbauerster/mpb/v8/cwriter" ) const ( prr = 150 * time.Millisecond // default RefreshRate ) // DoneError represents an error when `*mpb.Progress` is done but its functionality is requested. var DoneError = fmt.Errorf("%T instance can't be reused after it's done!", (*Progress)(nil)) // Progress represents a container that renders one or more progress bars. type Progress struct { ctx context.Context uwg *sync.WaitGroup cwg *sync.WaitGroup bwg *sync.WaitGroup operateState chan func(*pState) interceptIo chan func(io.Writer) done chan struct{} refreshCh chan time.Time once sync.Once cancel func() } // pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine. type pState struct { bHeap priorityQueue heapUpdated bool pMatrix map[int][]chan int aMatrix map[int][]chan int // for reuse purposes rows []io.Reader pool []*Bar // following are provided/overrided by user idCount int reqWidth int popPriority int popCompleted bool outputDiscarded bool rr time.Duration uwg *sync.WaitGroup externalRefresh <-chan interface{} renderDelay <-chan struct{} shutdownNotifier chan struct{} queueBars map[*Bar]*Bar output io.Writer debugOut io.Writer } // New creates new Progress container instance. It's not possible to // reuse instance after (*Progress).Wait method has been called. func New(options ...ContainerOption) *Progress { return NewWithContext(context.Background(), options...) } // NewWithContext creates new Progress container instance with provided // context. It's not possible to reuse instance after (*Progress).Wait // method has been called. func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { ctx, cancel := context.WithCancel(ctx) s := &pState{ bHeap: priorityQueue{}, rows: make([]io.Reader, 0, 64), pool: make([]*Bar, 0, 64), rr: prr, queueBars: make(map[*Bar]*Bar), output: os.Stdout, popPriority: math.MinInt32, } for _, opt := range options { if opt != nil { opt(s) } } p := &Progress{ ctx: ctx, uwg: s.uwg, cwg: new(sync.WaitGroup), bwg: new(sync.WaitGroup), operateState: make(chan func(*pState)), interceptIo: make(chan func(io.Writer)), done: make(chan struct{}), cancel: cancel, } p.cwg.Add(1) go p.serve(s, cwriter.New(s.output)) return p } // AddBar creates a bar with default bar filler. func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { return p.New(total, BarStyle(), options...) } // AddSpinner creates a bar with default spinner filler. func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { return p.New(total, SpinnerStyle(), options...) } // New creates a bar by calling `Build` method on provided `BarFillerBuilder`. func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar { return p.AddFiller(total, builder.Build(), options...) } // AddFiller creates a bar which renders itself by provided filler. // If `total <= 0` triggering complete event by increment methods is disabled. // Panics if *Progress instance is done, i.e. called after (*Progress).Wait(). func (p *Progress) AddFiller(total int64, filler BarFiller, options ...BarOption) *Bar { if filler == nil { filler = NopStyle().Build() } p.bwg.Add(1) result := make(chan *Bar) select { case p.operateState <- func(ps *pState) { bs := ps.makeBarState(total, filler, options...) bar := newBar(p, bs) if bs.wait.bar != nil { ps.queueBars[bs.wait.bar] = bar } else { heap.Push(&ps.bHeap, bar) ps.heapUpdated = true } ps.idCount++ result <- bar }: bar := <-result return bar case <-p.done: p.bwg.Done() panic(DoneError) } } func (p *Progress) traverseBars(cb func(b *Bar) bool) { sync := make(chan struct{}) select { case p.operateState <- func(s *pState) { for i := 0; i < s.bHeap.Len(); i++ { bar := s.bHeap[i] if !cb(bar) { break } } close(sync) }: <-sync case <-p.done: } } // UpdateBarPriority same as *Bar.SetPriority(int). func (p *Progress) UpdateBarPriority(b *Bar, priority int) { select { case p.operateState <- func(s *pState) { if b.index < 0 { return } b.priority = priority heap.Fix(&s.bHeap, b.index) }: case <-p.done: } } // BarCount returns bars count. func (p *Progress) BarCount() int { result := make(chan int) select { case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }: return <-result case <-p.done: return 0 } } // Write is implementation of io.Writer. // Writing to `*mpb.Progress` will print lines above a running bar. // Writes aren't flushed immediatly, but at next refresh cycle. // If Write is called after `*mpb.Progress` is done, `mpb.DoneError` // is returned. func (p *Progress) Write(b []byte) (int, error) { type result struct { n int err error } ch := make(chan *result) select { case p.interceptIo <- func(w io.Writer) { n, err := w.Write(b) ch <- &result{n, err} }: res := <-ch return res.n, res.err case <-p.done: return 0, DoneError } } // Wait waits for all bars to complete and finally shutdowns container. // After this method has been called, there is no way to reuse *Progress // instance. func (p *Progress) Wait() { // wait for user wg, if any if p.uwg != nil { p.uwg.Wait() } // wait for bars to quit, if any p.bwg.Wait() p.once.Do(p.shutdown) // wait for container to quit p.cwg.Wait() } func (p *Progress) shutdown() { close(p.done) } func (p *Progress) serve(s *pState, cw *cwriter.Writer) { defer p.cwg.Done() render := func(out io.Writer) { err := s.render(cw) for err != nil { if out != nil { _, err = fmt.Fprintln(out, err) } else { panic(err) } out = nil } } p.refreshCh = s.newTicker(p.done) for { select { case op := <-p.operateState: op(s) case fn := <-p.interceptIo: fn(cw) case <-p.refreshCh: render(s.debugOut) case <-s.shutdownNotifier: for s.heapUpdated { render(s.debugOut) } return } } } func (s *pState) render(cw *cwriter.Writer) error { if s.heapUpdated { s.updateSyncMatrix() s.heapUpdated = false } syncWidth(s.pMatrix) syncWidth(s.aMatrix) width, height, err := cw.GetTermSize() if err != nil { width = s.reqWidth height = s.bHeap.Len() } for i := 0; i < s.bHeap.Len(); i++ { bar := s.bHeap[i] go bar.render(width) } return s.flush(cw, height) } func (s *pState) flush(cw *cwriter.Writer, height int) error { var wg sync.WaitGroup var popCount int for s.bHeap.Len() > 0 { var usedRows int b := heap.Pop(&s.bHeap).(*Bar) frame := <-b.frameCh if frame.recovered { s.heapUpdated = true continue } for i := len(frame.rows) - 1; i >= 0; i-- { if row := frame.rows[i]; len(s.rows) < height { s.rows = append(s.rows, row) usedRows++ } else { wg.Add(1) go func() { _, _ = io.Copy(io.Discard, row) wg.Done() }() } } if frame.shutdown != 0 { b.Wait() // waiting for b.done, so it's safe to read b.bs drop := b.bs.dropOnComplete if qb, ok := s.queueBars[b]; ok { delete(s.queueBars, b) qb.priority = b.priority s.pool = append(s.pool, qb) drop = true } else if s.popCompleted && !b.bs.noPop { if frame.shutdown > 1 { popCount += usedRows drop = true } else { s.popPriority++ b.priority = s.popPriority } } if drop { s.heapUpdated = true continue } } s.pool = append(s.pool, b) } wg.Add(1) go func() { for _, b := range s.pool { heap.Push(&s.bHeap, b) } wg.Done() }() readRows := len(s.rows) for i := readRows - 1; i >= 0; i-- { _, err := cw.ReadFrom(s.rows[i]) if err != nil { if s.debugOut != nil { fmt.Fprintf(s.debugOut, "cw.ReadFrom: %s\n", err.Error()) } readRows-- } } err := cw.Flush(readRows - popCount) wg.Wait() s.rows = s.rows[:0] s.pool = s.pool[:0] return err } func (s *pState) newTicker(done <-chan struct{}) chan time.Time { ch := make(chan time.Time) if s.shutdownNotifier == nil { s.shutdownNotifier = make(chan struct{}) } go func() { if s.renderDelay != nil { <-s.renderDelay } var internalRefresh <-chan time.Time if !s.outputDiscarded { if s.externalRefresh == nil { ticker := time.NewTicker(s.rr) defer ticker.Stop() internalRefresh = ticker.C } } else { s.externalRefresh = nil } for { select { case t := <-internalRefresh: ch <- t case x := <-s.externalRefresh: if t, ok := x.(time.Time); ok { ch <- t } else { ch <- time.Now() } case <-done: close(s.shutdownNotifier) return } } }() return ch } func (s *pState) updateSyncMatrix() { s.pMatrix = make(map[int][]chan int) s.aMatrix = make(map[int][]chan int) for i := 0; i < s.bHeap.Len(); i++ { bar := s.bHeap[i] table := bar.wSyncTable() pRow, aRow := table[0], table[1] for i, ch := range pRow { s.pMatrix[i] = append(s.pMatrix[i], ch) } for i, ch := range aRow { s.aMatrix[i] = append(s.aMatrix[i], ch) } } } func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState { bs := &bState{ id: s.idCount, priority: s.idCount, reqWidth: s.reqWidth, total: total, filler: filler, debugOut: s.debugOut, } if total > 0 { bs.triggerComplete = true } for _, opt := range options { if opt != nil { opt(bs) } } if bs.middleware != nil { bs.filler = bs.middleware(filler) bs.middleware = nil } for i := 0; i < len(bs.buffers); i++ { bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512)) } bs.subscribeDecorators() return bs } func syncWidth(matrix map[int][]chan int) { for _, column := range matrix { go maxWidthDistributor(column) } } func maxWidthDistributor(column []chan int) { var maxWidth int for _, ch := range column { if w := <-ch; w > maxWidth { maxWidth = w } } for _, ch := range column { ch <- maxWidth } } handle frame.err in flush package mpb import ( "bytes" "container/heap" "context" "fmt" "io" "math" "os" "sync" "time" "github.com/vbauerster/mpb/v8/cwriter" ) const ( prr = 150 * time.Millisecond // default RefreshRate ) // DoneError represents an error when `*mpb.Progress` is done but its functionality is requested. var DoneError = fmt.Errorf("%T instance can't be reused after it's done!", (*Progress)(nil)) // Progress represents a container that renders one or more progress bars. type Progress struct { ctx context.Context uwg *sync.WaitGroup cwg *sync.WaitGroup bwg *sync.WaitGroup operateState chan func(*pState) interceptIo chan func(io.Writer) done chan struct{} refreshCh chan time.Time once sync.Once cancel func() } // pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine. type pState struct { bHeap priorityQueue heapUpdated bool pMatrix map[int][]chan int aMatrix map[int][]chan int // for reuse purposes rows []io.Reader pool []*Bar // following are provided/overrided by user idCount int reqWidth int popPriority int popCompleted bool outputDiscarded bool rr time.Duration uwg *sync.WaitGroup externalRefresh <-chan interface{} renderDelay <-chan struct{} shutdownNotifier chan struct{} queueBars map[*Bar]*Bar output io.Writer debugOut io.Writer } // New creates new Progress container instance. It's not possible to // reuse instance after (*Progress).Wait method has been called. func New(options ...ContainerOption) *Progress { return NewWithContext(context.Background(), options...) } // NewWithContext creates new Progress container instance with provided // context. It's not possible to reuse instance after (*Progress).Wait // method has been called. func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { ctx, cancel := context.WithCancel(ctx) s := &pState{ bHeap: priorityQueue{}, rows: make([]io.Reader, 0, 64), pool: make([]*Bar, 0, 64), rr: prr, queueBars: make(map[*Bar]*Bar), output: os.Stdout, popPriority: math.MinInt32, } for _, opt := range options { if opt != nil { opt(s) } } p := &Progress{ ctx: ctx, uwg: s.uwg, cwg: new(sync.WaitGroup), bwg: new(sync.WaitGroup), operateState: make(chan func(*pState)), interceptIo: make(chan func(io.Writer)), done: make(chan struct{}), cancel: cancel, } p.cwg.Add(1) go p.serve(s, cwriter.New(s.output)) return p } // AddBar creates a bar with default bar filler. func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { return p.New(total, BarStyle(), options...) } // AddSpinner creates a bar with default spinner filler. func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { return p.New(total, SpinnerStyle(), options...) } // New creates a bar by calling `Build` method on provided `BarFillerBuilder`. func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar { return p.AddFiller(total, builder.Build(), options...) } // AddFiller creates a bar which renders itself by provided filler. // If `total <= 0` triggering complete event by increment methods is disabled. // Panics if *Progress instance is done, i.e. called after (*Progress).Wait(). func (p *Progress) AddFiller(total int64, filler BarFiller, options ...BarOption) *Bar { if filler == nil { filler = NopStyle().Build() } p.bwg.Add(1) result := make(chan *Bar) select { case p.operateState <- func(ps *pState) { bs := ps.makeBarState(total, filler, options...) bar := newBar(p, bs) if bs.wait.bar != nil { ps.queueBars[bs.wait.bar] = bar } else { heap.Push(&ps.bHeap, bar) ps.heapUpdated = true } ps.idCount++ result <- bar }: bar := <-result return bar case <-p.done: p.bwg.Done() panic(DoneError) } } func (p *Progress) traverseBars(cb func(b *Bar) bool) { sync := make(chan struct{}) select { case p.operateState <- func(s *pState) { for i := 0; i < s.bHeap.Len(); i++ { bar := s.bHeap[i] if !cb(bar) { break } } close(sync) }: <-sync case <-p.done: } } // UpdateBarPriority same as *Bar.SetPriority(int). func (p *Progress) UpdateBarPriority(b *Bar, priority int) { select { case p.operateState <- func(s *pState) { if b.index < 0 { return } b.priority = priority heap.Fix(&s.bHeap, b.index) }: case <-p.done: } } // BarCount returns bars count. func (p *Progress) BarCount() int { result := make(chan int) select { case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }: return <-result case <-p.done: return 0 } } // Write is implementation of io.Writer. // Writing to `*mpb.Progress` will print lines above a running bar. // Writes aren't flushed immediatly, but at next refresh cycle. // If Write is called after `*mpb.Progress` is done, `mpb.DoneError` // is returned. func (p *Progress) Write(b []byte) (int, error) { type result struct { n int err error } ch := make(chan *result) select { case p.interceptIo <- func(w io.Writer) { n, err := w.Write(b) ch <- &result{n, err} }: res := <-ch return res.n, res.err case <-p.done: return 0, DoneError } } // Wait waits for all bars to complete and finally shutdowns container. // After this method has been called, there is no way to reuse *Progress // instance. func (p *Progress) Wait() { // wait for user wg, if any if p.uwg != nil { p.uwg.Wait() } // wait for bars to quit, if any p.bwg.Wait() p.once.Do(p.shutdown) // wait for container to quit p.cwg.Wait() } func (p *Progress) shutdown() { close(p.done) } func (p *Progress) serve(s *pState, cw *cwriter.Writer) { defer p.cwg.Done() render := func(out io.Writer) { err := s.render(cw) for err != nil { if out != nil { _, err = fmt.Fprintln(out, err) } else { panic(err) } out = nil } } p.refreshCh = s.newTicker(p.done) for { select { case op := <-p.operateState: op(s) case fn := <-p.interceptIo: fn(cw) case <-p.refreshCh: render(s.debugOut) case <-s.shutdownNotifier: for s.heapUpdated { render(s.debugOut) } return } } } func (s *pState) render(cw *cwriter.Writer) error { if s.heapUpdated { s.updateSyncMatrix() s.heapUpdated = false } syncWidth(s.pMatrix) syncWidth(s.aMatrix) width, height, err := cw.GetTermSize() if err != nil { width = s.reqWidth height = s.bHeap.Len() } for i := 0; i < s.bHeap.Len(); i++ { bar := s.bHeap[i] go bar.render(width) } return s.flush(cw, height) } func (s *pState) flush(cw *cwriter.Writer, height int) (err error) { var wg sync.WaitGroup var popCount int for s.bHeap.Len() > 0 { b := heap.Pop(&s.bHeap).(*Bar) frame := <-b.frameCh if frame.err != nil { if err == nil { err = frame.err } continue } var usedRows int for i := len(frame.rows) - 1; i >= 0; i-- { if row := frame.rows[i]; len(s.rows) < height { s.rows = append(s.rows, row) usedRows++ } else { wg.Add(1) go func() { _, _ = io.Copy(io.Discard, row) wg.Done() }() } } if frame.shutdown != 0 { b.Wait() // waiting for b.done, so it's safe to read b.bs drop := b.bs.dropOnComplete if qb, ok := s.queueBars[b]; ok { delete(s.queueBars, b) qb.priority = b.priority s.pool = append(s.pool, qb) drop = true } else if s.popCompleted && !b.bs.noPop { if frame.shutdown > 1 { popCount += usedRows drop = true } else { s.popPriority++ b.priority = s.popPriority } } if drop { s.heapUpdated = true continue } } s.pool = append(s.pool, b) } if err != nil { return err } wg.Add(1) go func() { for _, b := range s.pool { heap.Push(&s.bHeap, b) } wg.Done() }() for i := len(s.rows) - 1; i >= 0; i-- { _, err := cw.ReadFrom(s.rows[i]) if err != nil { wg.Wait() return err } } err = cw.Flush(len(s.rows) - popCount) wg.Wait() s.rows = s.rows[:0] s.pool = s.pool[:0] return err } func (s *pState) newTicker(done <-chan struct{}) chan time.Time { ch := make(chan time.Time) if s.shutdownNotifier == nil { s.shutdownNotifier = make(chan struct{}) } go func() { if s.renderDelay != nil { <-s.renderDelay } var internalRefresh <-chan time.Time if !s.outputDiscarded { if s.externalRefresh == nil { ticker := time.NewTicker(s.rr) defer ticker.Stop() internalRefresh = ticker.C } } else { s.externalRefresh = nil } for { select { case t := <-internalRefresh: ch <- t case x := <-s.externalRefresh: if t, ok := x.(time.Time); ok { ch <- t } else { ch <- time.Now() } case <-done: close(s.shutdownNotifier) return } } }() return ch } func (s *pState) updateSyncMatrix() { s.pMatrix = make(map[int][]chan int) s.aMatrix = make(map[int][]chan int) for i := 0; i < s.bHeap.Len(); i++ { bar := s.bHeap[i] table := bar.wSyncTable() pRow, aRow := table[0], table[1] for i, ch := range pRow { s.pMatrix[i] = append(s.pMatrix[i], ch) } for i, ch := range aRow { s.aMatrix[i] = append(s.aMatrix[i], ch) } } } func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState { bs := &bState{ id: s.idCount, priority: s.idCount, reqWidth: s.reqWidth, total: total, filler: filler, debugOut: s.debugOut, } if total > 0 { bs.triggerComplete = true } for _, opt := range options { if opt != nil { opt(bs) } } if bs.middleware != nil { bs.filler = bs.middleware(filler) bs.middleware = nil } for i := 0; i < len(bs.buffers); i++ { bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512)) } bs.subscribeDecorators() return bs } func syncWidth(matrix map[int][]chan int) { for _, column := range matrix { go maxWidthDistributor(column) } } func maxWidthDistributor(column []chan int) { var maxWidth int for _, ch := range column { if w := <-ch; w > maxWidth { maxWidth = w } } for _, ch := range column { ch <- maxWidth } }
package backup import ( "fmt" "io/ioutil" "os" "path/filepath" "gopkg.in/yaml.v2" "github.com/lxc/lxd/lxd/db" "github.com/lxc/lxd/shared" "github.com/lxc/lxd/shared/api" ) // Config represents the config of a backup that can be stored in a backup.yaml file (or embedded in index.yaml). type Config struct { Container *api.Instance `yaml:"container,omitempty"` // Used by VM backups too. Snapshots []*api.InstanceSnapshot `yaml:"snapshots,omitempty"` Pool *api.StoragePool `yaml:"pool,omitempty"` Volume *api.StorageVolume `yaml:"volume,omitempty"` } // ParseConfigYamlFile decodes the YAML file at path specified into a Config. func ParseConfigYamlFile(path string) (*Config, error) { data, err := ioutil.ReadFile(path) if err != nil { return nil, err } backup := Config{} if err := yaml.Unmarshal(data, &backup); err != nil { return nil, err } return &backup, nil } // updateRootDevicePool updates the root disk device in the supplied list of devices to the pool // specified. Returns true if a root disk device has been found and updated otherwise false. func updateRootDevicePool(devices map[string]map[string]string, poolName string) bool { if devices != nil { devName, _, err := shared.GetRootDiskDevice(devices) if err == nil { devices[devName]["pool"] = poolName return true } } return false } // UpdateInstanceConfigStoragePool changes the pool information in the backup.yaml to the pool specified in b.Pool. func UpdateInstanceConfigStoragePool(c *db.Cluster, b Info, mountPath string) error { // Load the storage pool. _, pool, err := c.GetStoragePool(b.Pool) if err != nil { return err } f := func(path string) error { // Read in the backup.yaml file. backup, err := ParseConfigYamlFile(path) if err != nil { return err } rootDiskDeviceFound := false // Change the pool in the backup.yaml. backup.Pool = pool if updateRootDevicePool(backup.Container.Devices, pool.Name) { rootDiskDeviceFound = true } if updateRootDevicePool(backup.Container.ExpandedDevices, pool.Name) { rootDiskDeviceFound = true } for _, snapshot := range backup.Snapshots { updateRootDevicePool(snapshot.Devices, pool.Name) updateRootDevicePool(snapshot.ExpandedDevices, pool.Name) } if !rootDiskDeviceFound { return fmt.Errorf("No root device could be found") } file, err := os.Create(path) if err != nil { return err } defer file.Close() data, err := yaml.Marshal(&backup) if err != nil { return err } _, err = file.Write(data) if err != nil { return err } return nil } err = f(filepath.Join(mountPath, "backup.yaml")) if err != nil { return err } return nil } lxd/backup/backup/config: Adds VolumeSnapshots to Config struct Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com> package backup import ( "fmt" "io/ioutil" "os" "path/filepath" "gopkg.in/yaml.v2" "github.com/lxc/lxd/lxd/db" "github.com/lxc/lxd/shared" "github.com/lxc/lxd/shared/api" ) // Config represents the config of a backup that can be stored in a backup.yaml file (or embedded in index.yaml). type Config struct { Container *api.Instance `yaml:"container,omitempty"` // Used by VM backups too. Snapshots []*api.InstanceSnapshot `yaml:"snapshots,omitempty"` Pool *api.StoragePool `yaml:"pool,omitempty"` Volume *api.StorageVolume `yaml:"volume,omitempty"` VolumeSnapshots []*api.StorageVolumeSnapshot `yaml:"volume_snapshots,omitempty"` } // ParseConfigYamlFile decodes the YAML file at path specified into a Config. func ParseConfigYamlFile(path string) (*Config, error) { data, err := ioutil.ReadFile(path) if err != nil { return nil, err } backup := Config{} if err := yaml.Unmarshal(data, &backup); err != nil { return nil, err } return &backup, nil } // updateRootDevicePool updates the root disk device in the supplied list of devices to the pool // specified. Returns true if a root disk device has been found and updated otherwise false. func updateRootDevicePool(devices map[string]map[string]string, poolName string) bool { if devices != nil { devName, _, err := shared.GetRootDiskDevice(devices) if err == nil { devices[devName]["pool"] = poolName return true } } return false } // UpdateInstanceConfigStoragePool changes the pool information in the backup.yaml to the pool specified in b.Pool. func UpdateInstanceConfigStoragePool(c *db.Cluster, b Info, mountPath string) error { // Load the storage pool. _, pool, err := c.GetStoragePool(b.Pool) if err != nil { return err } f := func(path string) error { // Read in the backup.yaml file. backup, err := ParseConfigYamlFile(path) if err != nil { return err } rootDiskDeviceFound := false // Change the pool in the backup.yaml. backup.Pool = pool if updateRootDevicePool(backup.Container.Devices, pool.Name) { rootDiskDeviceFound = true } if updateRootDevicePool(backup.Container.ExpandedDevices, pool.Name) { rootDiskDeviceFound = true } for _, snapshot := range backup.Snapshots { updateRootDevicePool(snapshot.Devices, pool.Name) updateRootDevicePool(snapshot.ExpandedDevices, pool.Name) } if !rootDiskDeviceFound { return fmt.Errorf("No root device could be found") } file, err := os.Create(path) if err != nil { return err } defer file.Close() data, err := yaml.Marshal(&backup) if err != nil { return err } _, err = file.Write(data) if err != nil { return err } return nil } err = f(filepath.Join(mountPath, "backup.yaml")) if err != nil { return err } return nil }
package db import ( "fmt" "go/ast" "net/url" "reflect" "sort" "strings" "github.com/lxc/lxd/lxd/db/generate/lex" "github.com/pkg/errors" ) // Packages returns the the AST packages in which to search for structs. // // By default it includes the lxd/db and shared/api packages. func Packages() (map[string]*ast.Package, error) { packages := map[string]*ast.Package{} for _, name := range defaultPackages { pkg, err := lex.Parse(name) if err != nil { return nil, errors.Wrapf(err, "Parse %q", name) } parts := strings.Split(name, "/") packages[parts[len(parts)-1]] = pkg } return packages, nil } var defaultPackages = []string{ "github.com/lxc/lxd/shared/api", "github.com/lxc/lxd/lxd/db", } // Filters parses all filtering statement defined for the given entity. It // returns all supported combinations of filters, sorted by number of criteria. func Filters(pkg *ast.Package, kind string, entity string) [][]string { objects := pkg.Scope.Objects filters := [][]string{} prefix := fmt.Sprintf("%s%sBy", lex.Minuscule(lex.Camel(entity)), lex.Camel(kind)) for name := range objects { if !strings.HasPrefix(name, prefix) { continue } rest := name[len(prefix):] filters = append(filters, strings.Split(rest, "And")) } return sortFilters(filters) } // RefFilters parses all filtering statement defined for the given entity reference. func RefFilters(pkg *ast.Package, entity string, ref string) [][]string { objects := pkg.Scope.Objects filters := [][]string{} prefix := fmt.Sprintf("%s%sRefBy", lex.Minuscule(lex.Camel(entity)), lex.Capital(ref)) for name := range objects { if !strings.HasPrefix(name, prefix) { continue } rest := name[len(prefix):] filters = append(filters, strings.Split(rest, "And")) } return sortFilters(filters) } func sortFilters(filters [][]string) [][]string { sort.Slice(filters, func(i, j int) bool { n1 := len(filters[i]) n2 := len(filters[j]) if n1 != n2 { return n1 > n2 } f1 := sortFilter(filters[i]) f2 := sortFilter(filters[j]) for k := range f1 { if f1[k] == f2[k] { continue } return f1[k] > f2[k] } panic("duplicate filter") }) return filters } func sortFilter(filter []string) []string { f := make([]string, len(filter)) copy(f, filter) sort.Sort(sort.Reverse(sort.StringSlice(f))) return f } // Criteria returns a list of criteria func Criteria(pkg *ast.Package, entity string) ([]string, error) { name := fmt.Sprintf("%sFilter", lex.Camel(entity)) str := findStruct(pkg.Scope, name) if str == nil { return nil, fmt.Errorf("No filter declared for %q", entity) } criteria := []string{} for _, f := range str.Fields.List { if len(f.Names) != 1 { return nil, fmt.Errorf("Unexpected fields number") } if !f.Names[0].IsExported() { return nil, fmt.Errorf("Unexported field name") } criteria = append(criteria, f.Names[0].Name) } return criteria, nil } // Parse the structure declaration with the given name found in the given Go // package. func Parse(pkg *ast.Package, name string) (*Mapping, error) { str := findStruct(pkg.Scope, name) if str == nil { return nil, fmt.Errorf("No declaration found for %q", name) } fields, err := parseStruct(str) if err != nil { return nil, errors.Wrapf(err, "Failed to parse %q", name) } m := &Mapping{ Package: pkg.Name, Name: name, Fields: fields, } return m, nil } // Find the StructType node for the structure with the given name func findStruct(scope *ast.Scope, name string) *ast.StructType { obj := scope.Lookup(name) if obj == nil { return nil } typ, ok := obj.Decl.(*ast.TypeSpec) if !ok { return nil } str, ok := typ.Type.(*ast.StructType) if !ok { return nil } return str } // Extract field information from the given structure. func parseStruct(str *ast.StructType) ([]*Field, error) { fields := make([]*Field, 0) for _, f := range str.Fields.List { if len(f.Names) == 0 { // Check if this is a parent struct. ident, ok := f.Type.(*ast.Ident) if !ok { continue } typ, ok := ident.Obj.Decl.(*ast.TypeSpec) if !ok { continue } parentStr, ok := typ.Type.(*ast.StructType) if !ok { continue } parentFields, err := parseStruct(parentStr) if err != nil { return nil, errors.Wrapf(err, "Failed to parse parent struct") } fields = append(fields, parentFields...) continue } if len(f.Names) != 1 { return nil, fmt.Errorf("Expected a single field name, got %q", f.Names) } field, err := parseField(f) if err != nil { return nil, err } // Don't add field if it has been ignored. if field != nil { fields = append(fields, field) } } return fields, nil } func parseField(f *ast.Field) (*Field, error) { name := f.Names[0] if !name.IsExported() { //return nil, fmt.Errorf("Unexported field name %q", name.Name) } // Ignore fields that are marked with a tag of `db:"ingore"` if f.Tag != nil { tag := f.Tag.Value tagValue := reflect.StructTag(tag[1 : len(tag)-1]).Get("db") if tagValue == "ignore" { return nil, nil } } typeName := parseType(f.Type) if typeName == "" { return nil, fmt.Errorf("Unsupported type for field %q", name.Name) } typeObj := Type{ Name: typeName, } if IsColumnType(typeName) { typeObj.Code = TypeColumn } else if strings.HasPrefix(typeName, "[]") { typeObj.Code = TypeSlice } else if strings.HasPrefix(typeName, "map[") { typeObj.Code = TypeMap } else { return nil, fmt.Errorf("Unsupported type for field %q", name.Name) } var config url.Values if f.Tag != nil { tag := f.Tag.Value var err error config, err = url.ParseQuery(reflect.StructTag(tag[1 : len(tag)-1]).Get("db")) if err != nil { return nil, errors.Wrap(err, "Parse 'db' structure tag") } } field := Field{ Name: name.Name, Type: typeObj, Config: config, } return &field, nil } func parseType(x ast.Expr) string { switch t := x.(type) { case *ast.StarExpr: // Pointers are not supported. return "" case *ast.SelectorExpr: return parseType(t.X) + "." + t.Sel.String() case *ast.Ident: s := t.String() if s == "byte" { return "uint8" } return s case *ast.ArrayType: return "[" + parseType(t.Len) + "]" + parseType(t.Elt) case *ast.MapType: return "map[" + parseType(t.Key) + "]" + parseType(t.Value) case *ast.BasicLit: return t.Value case nil: return "" default: return "" } } lxd/db/generate/db/parse: support pointers Signed-off-by: Max Asnaashari <291db2cb5046c5ac435c5cc205a1b9bc80a27f4b@canonical.com> package db import ( "fmt" "go/ast" "net/url" "reflect" "sort" "strings" "github.com/lxc/lxd/lxd/db/generate/lex" "github.com/pkg/errors" ) // Packages returns the the AST packages in which to search for structs. // // By default it includes the lxd/db and shared/api packages. func Packages() (map[string]*ast.Package, error) { packages := map[string]*ast.Package{} for _, name := range defaultPackages { pkg, err := lex.Parse(name) if err != nil { return nil, errors.Wrapf(err, "Parse %q", name) } parts := strings.Split(name, "/") packages[parts[len(parts)-1]] = pkg } return packages, nil } var defaultPackages = []string{ "github.com/lxc/lxd/shared/api", "github.com/lxc/lxd/lxd/db", } // Filters parses all filtering statement defined for the given entity. It // returns all supported combinations of filters, sorted by number of criteria. func Filters(pkg *ast.Package, kind string, entity string) [][]string { objects := pkg.Scope.Objects filters := [][]string{} prefix := fmt.Sprintf("%s%sBy", lex.Minuscule(lex.Camel(entity)), lex.Camel(kind)) for name := range objects { if !strings.HasPrefix(name, prefix) { continue } rest := name[len(prefix):] filters = append(filters, strings.Split(rest, "And")) } return sortFilters(filters) } // RefFilters parses all filtering statement defined for the given entity reference. func RefFilters(pkg *ast.Package, entity string, ref string) [][]string { objects := pkg.Scope.Objects filters := [][]string{} prefix := fmt.Sprintf("%s%sRefBy", lex.Minuscule(lex.Camel(entity)), lex.Capital(ref)) for name := range objects { if !strings.HasPrefix(name, prefix) { continue } rest := name[len(prefix):] filters = append(filters, strings.Split(rest, "And")) } return sortFilters(filters) } func sortFilters(filters [][]string) [][]string { sort.Slice(filters, func(i, j int) bool { n1 := len(filters[i]) n2 := len(filters[j]) if n1 != n2 { return n1 > n2 } f1 := sortFilter(filters[i]) f2 := sortFilter(filters[j]) for k := range f1 { if f1[k] == f2[k] { continue } return f1[k] > f2[k] } panic("duplicate filter") }) return filters } func sortFilter(filter []string) []string { f := make([]string, len(filter)) copy(f, filter) sort.Sort(sort.Reverse(sort.StringSlice(f))) return f } // Criteria returns a list of criteria func Criteria(pkg *ast.Package, entity string) ([]string, error) { name := fmt.Sprintf("%sFilter", lex.Camel(entity)) str := findStruct(pkg.Scope, name) if str == nil { return nil, fmt.Errorf("No filter declared for %q", entity) } criteria := []string{} for _, f := range str.Fields.List { if len(f.Names) != 1 { return nil, fmt.Errorf("Unexpected fields number") } if !f.Names[0].IsExported() { return nil, fmt.Errorf("Unexported field name") } criteria = append(criteria, f.Names[0].Name) } return criteria, nil } // Parse the structure declaration with the given name found in the given Go // package. func Parse(pkg *ast.Package, name string) (*Mapping, error) { str := findStruct(pkg.Scope, name) if str == nil { return nil, fmt.Errorf("No declaration found for %q", name) } fields, err := parseStruct(str) if err != nil { return nil, errors.Wrapf(err, "Failed to parse %q", name) } m := &Mapping{ Package: pkg.Name, Name: name, Fields: fields, } return m, nil } // Find the StructType node for the structure with the given name func findStruct(scope *ast.Scope, name string) *ast.StructType { obj := scope.Lookup(name) if obj == nil { return nil } typ, ok := obj.Decl.(*ast.TypeSpec) if !ok { return nil } str, ok := typ.Type.(*ast.StructType) if !ok { return nil } return str } // Extract field information from the given structure. func parseStruct(str *ast.StructType) ([]*Field, error) { fields := make([]*Field, 0) for _, f := range str.Fields.List { if len(f.Names) == 0 { // Check if this is a parent struct. ident, ok := f.Type.(*ast.Ident) if !ok { continue } typ, ok := ident.Obj.Decl.(*ast.TypeSpec) if !ok { continue } parentStr, ok := typ.Type.(*ast.StructType) if !ok { continue } parentFields, err := parseStruct(parentStr) if err != nil { return nil, errors.Wrapf(err, "Failed to parse parent struct") } fields = append(fields, parentFields...) continue } if len(f.Names) != 1 { return nil, fmt.Errorf("Expected a single field name, got %q", f.Names) } field, err := parseField(f) if err != nil { return nil, err } // Don't add field if it has been ignored. if field != nil { fields = append(fields, field) } } return fields, nil } func parseField(f *ast.Field) (*Field, error) { name := f.Names[0] if !name.IsExported() { //return nil, fmt.Errorf("Unexported field name %q", name.Name) } // Ignore fields that are marked with a tag of `db:"ingore"` if f.Tag != nil { tag := f.Tag.Value tagValue := reflect.StructTag(tag[1 : len(tag)-1]).Get("db") if tagValue == "ignore" { return nil, nil } } typeName := parseType(f.Type) if typeName == "" { return nil, fmt.Errorf("Unsupported type for field %q", name.Name) } typeObj := Type{ Name: typeName, } if IsColumnType(typeName) { typeObj.Code = TypeColumn } else if strings.HasPrefix(typeName, "[]") { typeObj.Code = TypeSlice } else if strings.HasPrefix(typeName, "map[") { typeObj.Code = TypeMap } else { return nil, fmt.Errorf("Unsupported type for field %q", name.Name) } var config url.Values if f.Tag != nil { tag := f.Tag.Value var err error config, err = url.ParseQuery(reflect.StructTag(tag[1 : len(tag)-1]).Get("db")) if err != nil { return nil, errors.Wrap(err, "Parse 'db' structure tag") } } field := Field{ Name: name.Name, Type: typeObj, Config: config, } return &field, nil } func parseType(x ast.Expr) string { switch t := x.(type) { case *ast.StarExpr: return parseType(t.X) case *ast.SelectorExpr: return parseType(t.X) + "." + t.Sel.String() case *ast.Ident: s := t.String() if s == "byte" { return "uint8" } return s case *ast.ArrayType: return "[" + parseType(t.Len) + "]" + parseType(t.Elt) case *ast.MapType: return "map[" + parseType(t.Key) + "]" + parseType(t.Value) case *ast.BasicLit: return t.Value case nil: return "" default: return "" } }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package main / gke.go provides the Google Container Engine (GKE) // kubetest deployer via newGKE(). // // TODO(zmerlynn): Pull this out to a separate package? package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "log" "os" "os/exec" "regexp" "sort" "strconv" "strings" "time" "k8s.io/test-infra/kubetest/util" ) const ( defaultPool = "default" e2eAllow = "tcp:22,tcp:80,tcp:8080,tcp:9090,tcp:30000-32767,udp:30000-32767" defaultCreate = "container clusters create --quiet" ) var ( gkeAdditionalZones = flag.String("gke-additional-zones", "", "(gke only) List of additional Google Compute Engine zones to use. Clusters are created symmetrically across zones by default, see --gke-shape for details.") gkeNodeLocations = flag.String("gke-node-locations", "", "(gke only) List of Google Compute Engine zones to use.") gkeEnvironment = flag.String("gke-environment", "", "(gke only) Container API endpoint to use, one of 'test', 'staging', 'prod', or a custom https:// URL") gkeShape = flag.String("gke-shape", `{"default":{"Nodes":3,"MachineType":"n1-standard-2"}}`, `(gke only) A JSON description of node pools to create. The node pool 'default' is required and used for initial cluster creation. All node pools are symmetric across zones, so the cluster total node count is {total nodes in --gke-shape} * {1 + (length of --gke-additional-zones)}. Example: '{"default":{"Nodes":999,"MachineType:":"n1-standard-1"},"heapster":{"Nodes":1, "MachineType":"n1-standard-8", "ExtraArgs": []}}`) gkeCreateArgs = flag.String("gke-create-args", "", "(gke only) (deprecated, use a modified --gke-create-command') Additional arguments passed directly to 'gcloud container clusters create'") gkeCommandGroup = flag.String("gke-command-group", "", "(gke only) Use a different gcloud track (e.g. 'alpha') for all 'gcloud container' commands. Note: This is added to --gke-create-command on create. You should only use --gke-command-group if you need to change the gcloud track for *every* gcloud container command.") gkeCreateCommand = flag.String("gke-create-command", defaultCreate, "(gke only) gcloud subcommand used to create a cluster. Modify if you need to pass arbitrary arguments to create.") gkeCustomSubnet = flag.String("gke-custom-subnet", "", "(gke only) if specified, we create a custom subnet with the specified options and use it for the gke cluster. The format should be '<subnet-name> --region=<subnet-gcp-region> --range=<subnet-cidr> <any other optional params>'.") gkeSubnetMode = flag.String("gke-subnet-mode", "auto", "(gke only) subnet creation mode of the GKE cluster network.") gkeReleaseChannel = flag.String("gke-release-channel", "", "(gke only) if specified, bring up GKE clusters from that release channel.") gkeSingleZoneNodeInstanceGroup = flag.Bool("gke-single-zone-node-instance-group", true, "(gke only) Add instance groups from a single zone to the NODE_INSTANCE_GROUP env variable.") gkeNodePorts = flag.String("gke-node-ports", "", "(gke only) List of ports on nodes to open, allowing e.g. master to connect to pods on private nodes. The format should be 'protocol[:port[-port]],[...]' as in gcloud compute firewall-rules create --allow.") gkeCreateNat = flag.Bool("gke-create-nat", false, "(gke only) Configure Cloud NAT allowing outbound connections in cluster with private nodes.") gkeNatMinPortsPerVm = flag.Int("gke-nat-min-ports-per-vm", 64, "(gke only) Specify number of ports per cluster VM for NAT router. Number of ports * number of nodes / 64k = number of auto-allocated IP addresses (there is a hard limit of 100 IPs).") // poolRe matches instance group URLs of the form `https://www.googleapis.com/compute/v1/projects/some-project/zones/a-zone/instanceGroupManagers/gke-some-cluster-some-pool-90fcb815-grp`. Match meaning: // m[0]: path starting with zones/ // m[1]: zone // m[2]: pool name (passed to e2es) // m[3]: unique hash (used as nonce for firewall rules) poolRe = regexp.MustCompile(`zones/([^/]+)/instanceGroupManagers/(gke-.*-([0-9a-f]{8})-grp)$`) urlRe = regexp.MustCompile(`https://.*/`) ) type gkeNodePool struct { Nodes int MachineType string ExtraArgs []string } type gkeDeployer struct { project string zone string region string location string additionalZones string nodeLocations string nodePorts string cluster string shape map[string]gkeNodePool network string subnetwork string subnetMode string subnetworkRegion string createNat bool natMinPortsPerVm int image string imageFamily string imageProject string commandGroup []string createCommand []string singleZoneNodeInstanceGroup bool sshProxyInstanceName string setup bool kubecfg string instanceGroups []*ig } type ig struct { path string zone string name string uniq string } var _ deployer = &gkeDeployer{} func newGKE(provider, project, zone, region, network, image, imageFamily, imageProject, cluster, sshProxyInstanceName string, testArgs *string, upgradeArgs *string) (*gkeDeployer, error) { if provider != "gke" { return nil, fmt.Errorf("--provider must be 'gke' for GKE deployment, found %q", provider) } g := &gkeDeployer{} if cluster == "" { return nil, fmt.Errorf("--cluster must be set for GKE deployment") } g.cluster = cluster if project == "" { return nil, fmt.Errorf("--gcp-project must be set for GKE deployment") } g.project = project if zone == "" && region == "" { return nil, fmt.Errorf("--gcp-zone or --gcp-region must be set for GKE deployment") } else if zone != "" && region != "" { return nil, fmt.Errorf("--gcp-zone and --gcp-region cannot both be set") } if zone != "" { g.zone = zone g.location = "--zone=" + zone } else if region != "" { g.region = region g.location = "--region=" + region } if network == "" { return nil, fmt.Errorf("--gcp-network must be set for GKE deployment") } g.network = network if strings.ToUpper(image) == "CUSTOM" { if imageFamily == "" || imageProject == "" { return nil, fmt.Errorf("--image-family and --image-project must be set for GKE deployment if --gcp-node-image=CUSTOM") } } g.imageFamily = imageFamily g.imageProject = imageProject g.image = image g.additionalZones = *gkeAdditionalZones g.nodeLocations = *gkeNodeLocations g.nodePorts = *gkeNodePorts g.createNat = *gkeCreateNat g.natMinPortsPerVm = *gkeNatMinPortsPerVm err := json.Unmarshal([]byte(*gkeShape), &g.shape) if err != nil { return nil, fmt.Errorf("--gke-shape must be valid JSON, unmarshal error: %v, JSON: %q", err, *gkeShape) } if _, ok := g.shape[defaultPool]; !ok { return nil, fmt.Errorf("--gke-shape must include a node pool named 'default', found %q", *gkeShape) } switch subnetMode := *gkeSubnetMode; subnetMode { case "auto", "custom": g.subnetMode = subnetMode default: return nil, fmt.Errorf("--gke-subnet-mode must be set either to 'auto' or 'custom', got: %s", subnetMode) } g.commandGroup = strings.Fields(*gkeCommandGroup) g.createCommand = append([]string{}, g.commandGroup...) g.createCommand = append(g.createCommand, strings.Fields(*gkeCreateCommand)...) createArgs := strings.Fields(*gkeCreateArgs) if len(createArgs) > 0 { log.Printf("--gke-create-args is deprecated, please use '--gke-create-command=%s %s'", defaultCreate, *gkeCreateArgs) } g.createCommand = append(g.createCommand, createArgs...) if err := util.MigrateOptions([]util.MigratedOption{{ Env: "CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER", Option: gkeEnvironment, Name: "--gke-environment", }}); err != nil { return nil, err } var endpoint string switch env := *gkeEnvironment; { case env == "test": endpoint = "https://test-container.sandbox.googleapis.com/" case env == "staging": endpoint = "https://staging-container.sandbox.googleapis.com/" case env == "staging2": endpoint = "https://staging2-container.sandbox.googleapis.com/" case env == "prod": endpoint = "https://container.googleapis.com/" case urlRe.MatchString(env): endpoint = env default: return nil, fmt.Errorf("--gke-environment must be one of {test,staging,prod} or match %v, found %q", urlRe, env) } if err := os.Setenv("CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER", endpoint); err != nil { return nil, err } // Override kubecfg to a temporary file rather than trashing the user's. f, err := ioutil.TempFile("", "gke-kubecfg") if err != nil { return nil, err } defer f.Close() kubecfg := f.Name() if err := f.Chmod(0600); err != nil { return nil, err } g.kubecfg = kubecfg // We want no KUBERNETES_PROVIDER, but to set // KUBERNETES_CONFORMANCE_PROVIDER and // KUBERNETES_CONFORMANCE_TEST. This prevents ginkgo-e2e.sh from // using the cluster/gke functions. // // We do this in the deployer constructor so that // cluster/gce/list-resources.sh outputs the same provider for the // extent of the binary. (It seems like it belongs in TestSetup, // but that way leads to madness.) // // TODO(zmerlynn): This is gross. if err := os.Unsetenv("KUBERNETES_PROVIDER"); err != nil { return nil, err } if err := os.Setenv("KUBERNETES_CONFORMANCE_TEST", "yes"); err != nil { return nil, err } if err := os.Setenv("KUBERNETES_CONFORMANCE_PROVIDER", "gke"); err != nil { return nil, err } // TODO(zmerlynn): Another snafu of cluster/gke/list-resources.sh: // Set KUBE_GCE_INSTANCE_PREFIX so that we don't accidentally pick // up CLUSTER_NAME later. if err := os.Setenv("KUBE_GCE_INSTANCE_PREFIX", "gke-"+g.cluster); err != nil { return nil, err } // set --num-nodes flag for ginkgo, since NUM_NODES is not set for gke deployer. numNodes := strconv.Itoa(g.shape[defaultPool].Nodes) // testArgs can be empty, and we need to support this case *testArgs = strings.Join(util.SetFieldDefault(strings.Fields(*testArgs), "--num-nodes", numNodes), " ") if *upgradeArgs != "" { // --upgrade-target will be passed to e2e upgrade framework to get a valid update version. // See usage from https://github.com/kubernetes/kubernetes/blob/master/hack/get-build.sh for supported targets. // Here we special case for gke-latest and will extract an actual valid gke version. // - gke-latest will be resolved to the latest gke version, and // - gke-latest-1.7 will be resolved to the latest 1.7 patch version supported on gke. fields, val, exist := util.ExtractField(strings.Fields(*upgradeArgs), "--upgrade-target") if exist { if strings.HasPrefix(val, "gke-latest") { releasePrefix := "" if strings.HasPrefix(val, "gke-latest-") { releasePrefix = strings.TrimPrefix(val, "gke-latest-") } if val, err = getLatestGKEVersion(project, zone, region, releasePrefix); err != nil { return nil, fmt.Errorf("fail to get latest gke version : %v", err) } } fields = util.SetFieldDefault(fields, "--upgrade-target", val) } *upgradeArgs = strings.Join(util.SetFieldDefault(fields, "--num-nodes", numNodes), " ") } g.singleZoneNodeInstanceGroup = *gkeSingleZoneNodeInstanceGroup g.sshProxyInstanceName = sshProxyInstanceName return g, nil } func (g *gkeDeployer) Up() error { // Create network if it doesn't exist. if control.NoOutput(exec.Command("gcloud", "compute", "networks", "describe", g.network, "--project="+g.project, "--format=value(name)")) != nil { // Assume error implies non-existent. log.Printf("Couldn't describe network '%s', assuming it doesn't exist and creating it", g.network) if err := control.FinishRunning(exec.Command("gcloud", "compute", "networks", "create", g.network, "--project="+g.project, "--subnet-mode="+g.subnetMode)); err != nil { return err } } // Create a custom subnet in that network if it was asked for. if *gkeCustomSubnet != "" { customSubnetFields := strings.Fields(*gkeCustomSubnet) createSubnetCommand := []string{"compute", "networks", "subnets", "create"} createSubnetCommand = append(createSubnetCommand, "--project="+g.project, "--network="+g.network) createSubnetCommand = append(createSubnetCommand, customSubnetFields...) if err := control.FinishRunning(exec.Command("gcloud", createSubnetCommand...)); err != nil { return err } g.subnetwork = customSubnetFields[0] g.subnetworkRegion = customSubnetFields[1] } def := g.shape[defaultPool] args := make([]string, len(g.createCommand)) for i := range args { args[i] = os.ExpandEnv(g.createCommand[i]) } args = append(args, "--project="+g.project, g.location, "--network="+g.network, ) if def.Nodes > 0 { args = append(args, "--num-nodes="+strconv.Itoa(def.Nodes)) } if def.MachineType != "" { args = append(args, "--machine-type="+def.MachineType) } if g.image != "" { args = append(args, "--image-type="+g.image) } args = append(args, def.ExtraArgs...) if strings.ToUpper(g.image) == "CUSTOM" { args = append(args, "--image-family="+g.imageFamily) args = append(args, "--image-project="+g.imageProject) // gcloud enables node auto-upgrade by default, which doesn't work with CUSTOM image. // We disable auto-upgrade explicitly here. args = append(args, "--no-enable-autoupgrade") // Custom images are not supported with shielded nodes (which is enaled by default) in GKE. args = append(args, "--no-enable-shielded-nodes") } if g.subnetwork != "" { args = append(args, "--subnetwork="+g.subnetwork) } if g.additionalZones != "" { args = append(args, "--additional-zones="+g.additionalZones) if err := os.Setenv("MULTIZONE", "true"); err != nil { return fmt.Errorf("error setting MULTIZONE env variable: %v", err) } } if g.nodeLocations != "" { args = append(args, "--node-locations="+g.nodeLocations) numNodeLocations := strings.Split(g.nodeLocations, ",") if len(numNodeLocations) > 1 { if err := os.Setenv("MULTIZONE", "true"); err != nil { return fmt.Errorf("error setting MULTIZONE env variable: %v", err) } } } if *gkeReleaseChannel != "" { args = append(args, "--release-channel="+*gkeReleaseChannel) } else { // TODO(zmerlynn): The version should be plumbed through Extract // or a separate flag rather than magic env variables. if v := os.Getenv("CLUSTER_API_VERSION"); v != "" { args = append(args, "--cluster-version="+v) } } args = append(args, g.cluster) if err := control.FinishRunning(exec.Command("gcloud", args...)); err != nil { return fmt.Errorf("error creating cluster: %v", err) } for poolName, pool := range g.shape { if poolName == defaultPool { continue } poolArgs := []string{"node-pools", "create", poolName, "--cluster=" + g.cluster, "--project=" + g.project, g.location, "--num-nodes=" + strconv.Itoa(pool.Nodes)} if pool.MachineType != "" { poolArgs = append(poolArgs, "--machine-type="+pool.MachineType) } poolArgs = append(poolArgs, pool.ExtraArgs...) if err := control.FinishRunning(exec.Command("gcloud", g.containerArgs(poolArgs...)...)); err != nil { return fmt.Errorf("error creating node pool %q: %v", poolName, err) } } return nil } func (g *gkeDeployer) IsUp() error { return isUp(g) } // DumpClusterLogs for GKE generates a small script that wraps // log-dump.sh with the appropriate shell-fu to get the cluster // dumped. // // TODO(zmerlynn): This whole path is really gross, but this seemed // the least gross hack to get this done. func (g *gkeDeployer) DumpClusterLogs(localPath, gcsPath string) error { // gkeLogDumpTemplate is a template of a shell script where // - %[1]s is the project // - %[2]s is the zone // - %[3]s is the OS distribution of nodes // - %[4]s is a filter composed of the instance groups // - %[5]s is the zone for logexporter daemonset (defined only for multizonal or regional clusters) // - %[6]s is the log-dump.sh command line const gkeLogDumpTemplate = ` function log_dump_custom_get_instances() { if [[ $1 == "master" ]]; then return 0; fi gcloud compute instances list '--project=%[1]s' '--filter=%[4]s' '--format=get(name)' } export -f log_dump_custom_get_instances # Set below vars that log-dump.sh expects in order to use scp with gcloud. export PROJECT=%[1]s export ZONE='%[2]s' export KUBERNETES_PROVIDER=gke export KUBE_NODE_OS_DISTRIBUTION='%[3]s' export LOGEXPORTER_ZONE='%[5]s' %[6]s ` // Prevent an obvious injection. if strings.Contains(localPath, "'") || strings.Contains(gcsPath, "'") { return fmt.Errorf("%q or %q contain single quotes - nice try", localPath, gcsPath) } // Generate a slice of filters to be OR'd together below if err := g.getInstanceGroups(); err != nil { return err } perZoneFilters := make(map[string][]string) for _, ig := range g.instanceGroups { filter := fmt.Sprintf("(metadata.created-by ~ %s)", ig.path) perZoneFilters[ig.zone] = append(perZoneFilters[ig.zone], filter) } isMultizonalOrRegional := len(perZoneFilters) > 1 // Generate the log-dump.sh command-line var dumpCmd string if gcsPath == "" { dumpCmd = fmt.Sprintf("./cluster/log-dump/log-dump.sh '%s'", localPath) } else { dumpCmd = fmt.Sprintf("./cluster/log-dump/log-dump.sh '%s' '%s'", localPath, gcsPath) } // Try to setup cluster access if it's possible. If credentials are already set, this will be no-op. Access to // GKE cluster is required for log-exporter. if err := g.getKubeConfig(); err != nil { log.Printf("error while setting up kubeconfig: %v", err) } // Make sure the firewall rule is created. It's needed so the log-dump.sh can ssh into nodes. // If cluster-up operation failed for some reasons (e.g. some nodes didn't register) the // firewall rule isn't automatically created as the TestSetup is not being executed. If firewall // rule was successfully created, the ensureFirewall call will be no-op. if err := g.ensureFirewall(); err != nil { log.Printf("error while ensuring firewall rule: %v", err) } var errorMessages []string for zone, filters := range perZoneFilters { logexporterZone := "" if isMultizonalOrRegional { logexporterZone = zone } err := control.FinishRunning(exec.Command("bash", "-c", fmt.Sprintf(gkeLogDumpTemplate, g.project, zone, os.Getenv("NODE_OS_DISTRIBUTION"), strings.Join(filters, " OR "), logexporterZone, dumpCmd))) if err != nil { errorMessages = append(errorMessages, err.Error()) } } if len(errorMessages) > 0 { return fmt.Errorf("errors while dumping logs: %s", strings.Join(errorMessages, ", ")) } return nil } func (g *gkeDeployer) TestSetup() error { if g.setup { // Ensure setup is a singleton. return nil } if err := g.getKubeConfig(); err != nil { return err } if err := g.getInstanceGroups(); err != nil { return err } if err := g.ensureFirewall(); err != nil { return err } if err := g.ensureNat(); err != nil { return err } if err := g.setupBastion(); err != nil { return err } if err := g.setupEnv(); err != nil { return err } g.setup = true return nil } func (g *gkeDeployer) setupBastion() error { if g.sshProxyInstanceName == "" { return nil } var filtersToTry []string // Use exact name first, VM does not have to belong to the cluster exactFilter := "name=" + g.sshProxyInstanceName filtersToTry = append(filtersToTry, exactFilter) // As a fallback - use proxy instance name as a regex but check only cluster nodes var igFilters []string for _, ig := range g.instanceGroups { igFilters = append(igFilters, fmt.Sprintf("(metadata.created-by ~ %s)", ig.path)) } fuzzyFilter := fmt.Sprintf("(name ~ %s) AND (%s)", g.sshProxyInstanceName, strings.Join(igFilters, " OR ")) filtersToTry = append(filtersToTry, fuzzyFilter) var bastion, zone string for _, filter := range filtersToTry { log.Printf("Checking for proxy instance with filter: %q", filter) output, err := exec.Command("gcloud", "compute", "instances", "list", "--filter="+filter, "--format=value(name,zone)", "--limit=1", "--project="+g.project).Output() if err != nil { return fmt.Errorf("listing instances failed: %s", util.ExecError(err)) } if len(output) == 0 { continue } // Proxy instance found fields := strings.Split(strings.TrimSpace(string(output)), "\t") if len(fields) != 2 { return fmt.Errorf("error parsing instances list output %q", output) } bastion, zone = fields[0], fields[1] break } if bastion == "" { return fmt.Errorf("proxy instance %q not found", g.sshProxyInstanceName) } log.Printf("Found proxy instance %q", bastion) log.Printf("Adding NAT access config if not present") control.NoOutput(exec.Command("gcloud", "compute", "instances", "add-access-config", bastion, "--zone="+zone, "--project="+g.project)) err := setKubeShhBastionEnv(g.project, zone, bastion) if err != nil { return fmt.Errorf("setting KUBE_SSH_BASTION variable failed: %s", util.ExecError(err)) } return nil } func (g *gkeDeployer) getKubeConfig() error { info, err := os.Stat(g.kubecfg) if err != nil { return err } if info.Size() > 0 { // Assume that if we already have it, it's good. return nil } if err := os.Setenv("KUBECONFIG", g.kubecfg); err != nil { return err } if err := control.FinishRunning(exec.Command("gcloud", g.containerArgs("clusters", "get-credentials", g.cluster, "--project="+g.project, g.location)...)); err != nil { return fmt.Errorf("error executing get-credentials: %v", err) } return nil } // setupEnv is to appease ginkgo-e2e.sh and other pieces of the e2e infrastructure. It // would be nice to handle this elsewhere, and not with env // variables. c.f. kubernetes/test-infra#3330. func (g *gkeDeployer) setupEnv() error { // If singleZoneNodeInstanceGroup is true, set NODE_INSTANCE_GROUP to the // names of instance groups that are in the same zone as the lexically first // instance group. Otherwise set NODE_INSTANCE_GROUP to the names of all // instance groups. var filt []string zone := g.instanceGroups[0].zone for _, ig := range g.instanceGroups { if !g.singleZoneNodeInstanceGroup || ig.zone == zone { filt = append(filt, ig.name) } } if err := os.Setenv("NODE_INSTANCE_GROUP", strings.Join(filt, ",")); err != nil { return fmt.Errorf("error setting NODE_INSTANCE_GROUP: %v", err) } return nil } func (g *gkeDeployer) ensureFirewall() error { if g.network == "default" { return nil } firewall, err := g.getClusterFirewall() if err != nil { return fmt.Errorf("error getting unique firewall: %v", err) } if control.NoOutput(exec.Command("gcloud", "compute", "firewall-rules", "describe", firewall, "--project="+g.project, "--format=value(name)")) == nil { // Assume that if this unique firewall exists, it's good to go. return nil } log.Printf("Couldn't describe firewall '%s', assuming it doesn't exist and creating it", firewall) tagOut, err := exec.Command("gcloud", "compute", "instances", "list", "--project="+g.project, "--filter=metadata.created-by ~ "+g.instanceGroups[0].path, "--limit=1", "--format=get(tags.items)").Output() if err != nil { return fmt.Errorf("instances list failed: %s", util.ExecError(err)) } tag := strings.TrimSpace(string(tagOut)) if tag == "" { return fmt.Errorf("instances list returned no instances (or instance has no tags)") } allowPorts := e2eAllow if g.nodePorts != "" { allowPorts += "," + g.nodePorts } if err := control.FinishRunning(exec.Command("gcloud", "compute", "firewall-rules", "create", firewall, "--project="+g.project, "--network="+g.network, "--allow="+allowPorts, "--target-tags="+tag)); err != nil { return fmt.Errorf("error creating e2e firewall: %v", err) } return nil } func (g *gkeDeployer) getInstanceGroups() error { if len(g.instanceGroups) > 0 { return nil } igs, err := exec.Command("gcloud", g.containerArgs("clusters", "describe", g.cluster, "--format=value(instanceGroupUrls)", "--project="+g.project, g.location)...).Output() if err != nil { return fmt.Errorf("instance group URL fetch failed: %s", util.ExecError(err)) } igURLs := strings.Split(strings.TrimSpace(string(igs)), ";") if len(igURLs) == 0 { return fmt.Errorf("no instance group URLs returned by gcloud, output %q", string(igs)) } sort.Strings(igURLs) for _, igURL := range igURLs { m := poolRe.FindStringSubmatch(igURL) if len(m) == 0 { return fmt.Errorf("instanceGroupUrl %q did not match regex %v", igURL, poolRe) } g.instanceGroups = append(g.instanceGroups, &ig{path: m[0], zone: m[1], name: m[2], uniq: m[3]}) } return nil } func (g *gkeDeployer) getClusterFirewall() (string, error) { if err := g.getInstanceGroups(); err != nil { return "", err } // We want to ensure that there's an e2e-ports-* firewall rule // that maps to the cluster nodes, but the target tag for the // nodes can be slow to get. Use the hash from the lexically first // node pool instead. return "e2e-ports-" + g.instanceGroups[0].uniq, nil } // This function ensures that all firewall-rules are deleted from specific network. // We also want to keep in logs that there were some resources leaking. func (g *gkeDeployer) cleanupNetworkFirewalls() (int, error) { fws, err := exec.Command("gcloud", "compute", "firewall-rules", "list", "--format=value(name)", "--project="+g.project, "--filter=network:"+g.network).Output() if err != nil { return 0, fmt.Errorf("firewall rules list failed: %s", util.ExecError(err)) } if len(fws) > 0 { fwList := strings.Split(strings.TrimSpace(string(fws)), "\n") log.Printf("Network %s has %v undeleted firewall rules %v", g.network, len(fwList), fwList) commandArgs := []string{"compute", "firewall-rules", "delete", "-q"} commandArgs = append(commandArgs, fwList...) commandArgs = append(commandArgs, "--project="+g.project) errFirewall := control.FinishRunning(exec.Command("gcloud", commandArgs...)) if errFirewall != nil { return 0, fmt.Errorf("error deleting firewall: %v", errFirewall) } return len(fwList), nil } return 0, nil } func (g *gkeDeployer) ensureNat() error { if !g.createNat { return nil } if g.network == "default" { return fmt.Errorf("NAT router should be set manually for the default network") } region, err := g.getRegion(g.region, g.zone) if err != nil { return fmt.Errorf("error finding region for NAT router: %v", err) } nat := g.getNatName() // Create this unique router only if it does not exist yet. if control.NoOutput(exec.Command("gcloud", "compute", "routers", "describe", nat, "--project="+g.project, "--region="+region, "--format=value(name)")) != nil { log.Printf("Couldn't describe router '%s', assuming it doesn't exist and creating it", nat) if err := control.FinishRunning(exec.Command("gcloud", "compute", "routers", "create", nat, "--project="+g.project, "--network="+g.network, "--region="+region)); err != nil { return fmt.Errorf("error creating NAT router: %v", err) } } // Create this unique NAT configuration only if it does not exist yet. if control.NoOutput(exec.Command("gcloud", "compute", "routers", "nats", "describe", nat, "--project="+g.project, "--router="+nat, "--router-region="+region, "--format=value(name)")) != nil { log.Printf("Couldn't describe NAT '%s', assuming it doesn't exist and creating it", nat) if err := control.FinishRunning(exec.Command("gcloud", "compute", "routers", "nats", "create", nat, "--project="+g.project, "--router="+nat, "--router-region="+region, "--auto-allocate-nat-external-ips", "--min-ports-per-vm="+strconv.Itoa(g.natMinPortsPerVm), "--nat-primary-subnet-ip-ranges")); err != nil { return fmt.Errorf("error adding NAT to a router: %v", err) } } return nil } func (g *gkeDeployer) getRegion(region, zone string) (string, error) { if region != "" { return region, nil } result, err := exec.Command("gcloud", "compute", "zones", "list", "--filter=name="+zone, "--format=value(region)", "--project="+g.project).Output() if err != nil { return "", fmt.Errorf("error resolving region of %s zone: %v", zone, err) } return strings.TrimSuffix(string(result), "\n"), nil } func (g *gkeDeployer) getNatName() string { return "nat-router-" + g.cluster } func (g *gkeDeployer) cleanupNat() error { if !g.createNat { return nil } region, err := g.getRegion(g.region, g.zone) if err != nil { return fmt.Errorf("error finding region for NAT router: %v", err) } nat := g.getNatName() // Delete NAT router. That will remove NAT configuration as well. if control.NoOutput(exec.Command("gcloud", "compute", "routers", "describe", nat, "--project="+g.project, "--region="+region, "--format=value(name)")) == nil { log.Printf("Found NAT router '%s', deleting", nat) err = control.FinishRunning(exec.Command("gcloud", "compute", "routers", "delete", "-q", nat, "--project="+g.project, "--region="+region)) if err != nil { return fmt.Errorf("error deleting NAT router: %v", err) } } else { log.Printf("Found no NAT router '%s', assuming resources are clean", nat) } return nil } func (g *gkeDeployer) Down() error { firewall, err := g.getClusterFirewall() if err != nil { // This is expected if the cluster doesn't exist. return nil } g.instanceGroups = nil // We best-effort try all of these and report errors as appropriate. errCluster := control.FinishRunning(exec.Command( "gcloud", g.containerArgs("clusters", "delete", "-q", g.cluster, "--project="+g.project, g.location)...)) // don't delete default network if g.network == "default" { if errCluster != nil { log.Printf("Error deleting cluster using default network, allow the error for now %s", errCluster) } return nil } var errFirewall error if control.NoOutput(exec.Command("gcloud", "compute", "firewall-rules", "describe", firewall, "--project="+g.project, "--format=value(name)")) == nil { log.Printf("Found rules for firewall '%s', deleting them", firewall) errFirewall = control.FinishRunning(exec.Command("gcloud", "compute", "firewall-rules", "delete", "-q", firewall, "--project="+g.project)) } else { log.Printf("Found no rules for firewall '%s', assuming resources are clean", firewall) } numLeakedFWRules, errCleanFirewalls := g.cleanupNetworkFirewalls() errNat := g.cleanupNat() var errSubnet error if g.subnetwork != "" { errSubnet = control.FinishRunning(exec.Command("gcloud", "compute", "networks", "subnets", "delete", "-q", g.subnetwork, g.subnetworkRegion, "--project="+g.project)) } errNetwork := control.FinishRunning(exec.Command("gcloud", "compute", "networks", "delete", "-q", g.network, "--project="+g.project)) if errCluster != nil { return fmt.Errorf("error deleting cluster: %v", errCluster) } if errFirewall != nil { return fmt.Errorf("error deleting firewall: %v", errFirewall) } if errCleanFirewalls != nil { return fmt.Errorf("error cleaning-up firewalls: %v", errCleanFirewalls) } if errNat != nil { return fmt.Errorf("error cleaning-up NAT: %v", errNat) } if errSubnet != nil { return fmt.Errorf("error deleting subnetwork: %v", errSubnet) } if errNetwork != nil { return fmt.Errorf("error deleting network: %v", errNetwork) } if numLeakedFWRules > 0 { // Leaked firewall rules are cleaned up already, print a warning instead of failing hard log.Println("Warning: leaked firewall rules") } return nil } func (g *gkeDeployer) containerArgs(args ...string) []string { return append(append(append([]string{}, g.commandGroup...), "container"), args...) } func (g *gkeDeployer) GetClusterCreated(gcpProject string) (time.Time, error) { res, err := control.Output(exec.Command( "gcloud", "compute", "instance-groups", "list", "--project="+gcpProject, "--format=json(name,creationTimestamp)")) if err != nil { return time.Time{}, fmt.Errorf("list instance-group failed : %v", err) } created, err := getLatestClusterUpTime(string(res)) if err != nil { return time.Time{}, fmt.Errorf("parse time failed : got gcloud res %s, err %v", string(res), err) } return created, nil } func (g *gkeDeployer) KubectlCommand() (*exec.Cmd, error) { return nil, nil } Clean up gke kubetest provider /* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package main / gke.go provides the Google Container Engine (GKE) // kubetest deployer via newGKE(). // // TODO(zmerlynn): Pull this out to a separate package? package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "log" "os" "os/exec" "regexp" "sort" "strconv" "strings" "time" "k8s.io/test-infra/kubetest/util" ) const ( defaultPool = "default" e2eAllow = "tcp:22,tcp:80,tcp:8080,tcp:9090,tcp:30000-32767,udp:30000-32767" defaultCreate = "container clusters create --quiet" ) var ( gkeAdditionalZones = flag.String("gke-additional-zones", "", "(gke only) List of additional Google Compute Engine zones to use. Clusters are created symmetrically across zones by default, see --gke-shape for details.") gkeNodeLocations = flag.String("gke-node-locations", "", "(gke only) List of Google Compute Engine zones to use.") gkeEnvironment = flag.String("gke-environment", "", "(gke only) Container API endpoint to use, one of 'test', 'staging', 'prod', or a custom https:// URL") gkeShape = flag.String("gke-shape", `{"default":{"Nodes":3,"MachineType":"n1-standard-2"}}`, `(gke only) A JSON description of node pools to create. The node pool 'default' is required and used for initial cluster creation. All node pools are symmetric across zones, so the cluster total node count is {total nodes in --gke-shape} * {1 + (length of --gke-additional-zones)}. Example: '{"default":{"Nodes":999,"MachineType:":"n1-standard-1"},"heapster":{"Nodes":1, "MachineType":"n1-standard-8", "ExtraArgs": []}}`) gkeCreateArgs = flag.String("gke-create-args", "", "(gke only) (deprecated, use a modified --gke-create-command') Additional arguments passed directly to 'gcloud container clusters create'") gkeCommandGroup = flag.String("gke-command-group", "", "(gke only) Use a different gcloud track (e.g. 'alpha') for all 'gcloud container' commands. Note: This is added to --gke-create-command on create. You should only use --gke-command-group if you need to change the gcloud track for *every* gcloud container command.") gkeCreateCommand = flag.String("gke-create-command", defaultCreate, "(gke only) gcloud subcommand used to create a cluster. Modify if you need to pass arbitrary arguments to create.") gkeCustomSubnet = flag.String("gke-custom-subnet", "", "(gke only) if specified, we create a custom subnet with the specified options and use it for the gke cluster. The format should be '<subnet-name> --region=<subnet-gcp-region> --range=<subnet-cidr> <any other optional params>'.") gkeSubnetMode = flag.String("gke-subnet-mode", "auto", "(gke only) subnet creation mode of the GKE cluster network.") gkeReleaseChannel = flag.String("gke-release-channel", "", "(gke only) if specified, bring up GKE clusters from that release channel.") gkeSingleZoneNodeInstanceGroup = flag.Bool("gke-single-zone-node-instance-group", true, "(gke only) Add instance groups from a single zone to the NODE_INSTANCE_GROUP env variable.") gkeNodePorts = flag.String("gke-node-ports", "", "(gke only) List of ports on nodes to open, allowing e.g. master to connect to pods on private nodes. The format should be 'protocol[:port[-port]],[...]' as in gcloud compute firewall-rules create --allow.") gkeCreateNat = flag.Bool("gke-create-nat", false, "(gke only) Configure Cloud NAT allowing outbound connections in cluster with private nodes.") gkeNatMinPortsPerVm = flag.Int("gke-nat-min-ports-per-vm", 64, "(gke only) Specify number of ports per cluster VM for NAT router. Number of ports * number of nodes / 64k = number of auto-allocated IP addresses (there is a hard limit of 100 IPs).") // poolRe matches instance group URLs of the form `https://www.googleapis.com/compute/v1/projects/some-project/zones/a-zone/instanceGroupManagers/gke-some-cluster-some-pool-90fcb815-grp`. Match meaning: // m[0]: path starting with zones/ // m[1]: zone // m[2]: pool name (passed to e2es) // m[3]: unique hash (used as nonce for firewall rules) poolRe = regexp.MustCompile(`zones/([^/]+)/instanceGroupManagers/(gke-.*-([0-9a-f]{8})-grp)$`) urlRe = regexp.MustCompile(`https://.*/`) ) type gkeNodePool struct { Nodes int MachineType string ExtraArgs []string } type gkeDeployer struct { project string zone string region string location string additionalZones string nodeLocations string nodePorts string cluster string shape map[string]gkeNodePool network string subnetwork string subnetMode string subnetworkRegion string createNat bool natMinPortsPerVm int image string imageFamily string imageProject string commandGroup []string createCommand []string singleZoneNodeInstanceGroup bool sshProxyInstanceName string setup bool kubecfg string instanceGroups []*ig } type ig struct { path string zone string name string uniq string } var _ deployer = &gkeDeployer{} func newGKE(provider, project, zone, region, network, image, imageFamily, imageProject, cluster, sshProxyInstanceName string, testArgs *string, upgradeArgs *string) (*gkeDeployer, error) { if provider != "gke" { return nil, fmt.Errorf("--provider must be 'gke' for GKE deployment, found %q", provider) } g := &gkeDeployer{} if cluster == "" { return nil, fmt.Errorf("--cluster must be set for GKE deployment") } g.cluster = cluster if project == "" { return nil, fmt.Errorf("--gcp-project must be set for GKE deployment") } g.project = project if zone == "" && region == "" { return nil, fmt.Errorf("--gcp-zone or --gcp-region must be set for GKE deployment") } else if zone != "" && region != "" { return nil, fmt.Errorf("--gcp-zone and --gcp-region cannot both be set") } if zone != "" { g.zone = zone g.location = "--zone=" + zone } else if region != "" { g.region = region g.location = "--region=" + region } if network == "" { return nil, fmt.Errorf("--gcp-network must be set for GKE deployment") } g.network = network if strings.ToUpper(image) == "CUSTOM" { if imageFamily == "" || imageProject == "" { return nil, fmt.Errorf("--image-family and --image-project must be set for GKE deployment if --gcp-node-image=CUSTOM") } } g.imageFamily = imageFamily g.imageProject = imageProject g.image = image g.additionalZones = *gkeAdditionalZones g.nodeLocations = *gkeNodeLocations g.nodePorts = *gkeNodePorts g.createNat = *gkeCreateNat g.natMinPortsPerVm = *gkeNatMinPortsPerVm err := json.Unmarshal([]byte(*gkeShape), &g.shape) if err != nil { return nil, fmt.Errorf("--gke-shape must be valid JSON, unmarshal error: %v, JSON: %q", err, *gkeShape) } if _, ok := g.shape[defaultPool]; !ok { return nil, fmt.Errorf("--gke-shape must include a node pool named 'default', found %q", *gkeShape) } switch subnetMode := *gkeSubnetMode; subnetMode { case "auto", "custom": g.subnetMode = subnetMode default: return nil, fmt.Errorf("--gke-subnet-mode must be set either to 'auto' or 'custom', got: %s", subnetMode) } g.commandGroup = strings.Fields(*gkeCommandGroup) g.createCommand = append([]string{}, g.commandGroup...) g.createCommand = append(g.createCommand, strings.Fields(*gkeCreateCommand)...) createArgs := strings.Fields(*gkeCreateArgs) if len(createArgs) > 0 { log.Printf("--gke-create-args is deprecated, please use '--gke-create-command=%s %s'", defaultCreate, *gkeCreateArgs) } g.createCommand = append(g.createCommand, createArgs...) if err := util.MigrateOptions([]util.MigratedOption{{ Env: "CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER", Option: gkeEnvironment, Name: "--gke-environment", }}); err != nil { return nil, err } var endpoint string switch env := *gkeEnvironment; { case env == "test": endpoint = "https://test-container.sandbox.googleapis.com/" case env == "staging": endpoint = "https://staging-container.sandbox.googleapis.com/" case env == "staging2": endpoint = "https://staging2-container.sandbox.googleapis.com/" case env == "prod": endpoint = "https://container.googleapis.com/" case urlRe.MatchString(env): endpoint = env default: return nil, fmt.Errorf("--gke-environment must be one of {test,staging,prod} or match %v, found %q", urlRe, env) } if err := os.Setenv("CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER", endpoint); err != nil { return nil, err } // Override kubecfg to a temporary file rather than trashing the user's. f, err := ioutil.TempFile("", "gke-kubecfg") if err != nil { return nil, err } defer f.Close() kubecfg := f.Name() if err := f.Chmod(0600); err != nil { return nil, err } g.kubecfg = kubecfg // We want no KUBERNETES_PROVIDER, but to set // KUBERNETES_CONFORMANCE_PROVIDER and // KUBERNETES_CONFORMANCE_TEST. This prevents ginkgo-e2e.sh from // using the cluster/gke functions. // // We do this in the deployer constructor so that // cluster/gce/list-resources.sh outputs the same provider for the // extent of the binary. (It seems like it belongs in TestSetup, // but that way leads to madness.) // // TODO(zmerlynn): This is gross. if err := os.Unsetenv("KUBERNETES_PROVIDER"); err != nil { return nil, err } if err := os.Setenv("KUBERNETES_CONFORMANCE_TEST", "yes"); err != nil { return nil, err } if err := os.Setenv("KUBERNETES_CONFORMANCE_PROVIDER", "gke"); err != nil { return nil, err } // TODO(zmerlynn): Another snafu of cluster/gke/list-resources.sh: // Set KUBE_GCE_INSTANCE_PREFIX so that we don't accidentally pick // up CLUSTER_NAME later. if err := os.Setenv("KUBE_GCE_INSTANCE_PREFIX", "gke-"+g.cluster); err != nil { return nil, err } // set --num-nodes flag for ginkgo, since NUM_NODES is not set for gke deployer. numNodes := strconv.Itoa(g.shape[defaultPool].Nodes) // testArgs can be empty, and we need to support this case *testArgs = strings.Join(util.SetFieldDefault(strings.Fields(*testArgs), "--num-nodes", numNodes), " ") if *upgradeArgs != "" { // --upgrade-target will be passed to e2e upgrade framework to get a valid update version. // See usage from https://github.com/kubernetes/kubernetes/blob/master/hack/get-build.sh for supported targets. // Here we special case for gke-latest and will extract an actual valid gke version. // - gke-latest will be resolved to the latest gke version, and // - gke-latest-1.7 will be resolved to the latest 1.7 patch version supported on gke. fields, val, exist := util.ExtractField(strings.Fields(*upgradeArgs), "--upgrade-target") if exist { if strings.HasPrefix(val, "gke-latest") { releasePrefix := "" if strings.HasPrefix(val, "gke-latest-") { releasePrefix = strings.TrimPrefix(val, "gke-latest-") } if val, err = getLatestGKEVersion(project, zone, region, releasePrefix); err != nil { return nil, fmt.Errorf("fail to get latest gke version : %v", err) } } fields = util.SetFieldDefault(fields, "--upgrade-target", val) } *upgradeArgs = strings.Join(util.SetFieldDefault(fields, "--num-nodes", numNodes), " ") } g.singleZoneNodeInstanceGroup = *gkeSingleZoneNodeInstanceGroup g.sshProxyInstanceName = sshProxyInstanceName return g, nil } func (g *gkeDeployer) Up() error { // Create network if it doesn't exist. if control.NoOutput(exec.Command("gcloud", "compute", "networks", "describe", g.network, "--project="+g.project, "--format=value(name)")) != nil { // Assume error implies non-existent. log.Printf("Couldn't describe network '%s', assuming it doesn't exist and creating it", g.network) if err := control.FinishRunning(exec.Command("gcloud", "compute", "networks", "create", g.network, "--project="+g.project, "--subnet-mode="+g.subnetMode)); err != nil { return err } } // Create a custom subnet in that network if it was asked for. if *gkeCustomSubnet != "" { customSubnetFields := strings.Fields(*gkeCustomSubnet) createSubnetCommand := []string{"compute", "networks", "subnets", "create"} createSubnetCommand = append(createSubnetCommand, "--project="+g.project, "--network="+g.network) createSubnetCommand = append(createSubnetCommand, customSubnetFields...) if err := control.FinishRunning(exec.Command("gcloud", createSubnetCommand...)); err != nil { return err } g.subnetwork = customSubnetFields[0] g.subnetworkRegion = customSubnetFields[1] } def := g.shape[defaultPool] args := make([]string, len(g.createCommand)) for i := range args { args[i] = os.ExpandEnv(g.createCommand[i]) } args = append(args, "--project="+g.project, g.location, "--network="+g.network, ) if def.Nodes > 0 { args = append(args, "--num-nodes="+strconv.Itoa(def.Nodes)) } if def.MachineType != "" { args = append(args, "--machine-type="+def.MachineType) } if g.image != "" { args = append(args, "--image-type="+g.image) } args = append(args, def.ExtraArgs...) if strings.ToUpper(g.image) == "CUSTOM" { args = append(args, "--image-family="+g.imageFamily) args = append(args, "--image-project="+g.imageProject) // gcloud enables node auto-upgrade by default, which doesn't work with CUSTOM image. // We disable auto-upgrade explicitly here. args = append(args, "--no-enable-autoupgrade") // Custom images are not supported with shielded nodes (which is enaled by default) in GKE. args = append(args, "--no-enable-shielded-nodes") } if g.subnetwork != "" { args = append(args, "--subnetwork="+g.subnetwork) } if g.additionalZones != "" { args = append(args, "--additional-zones="+g.additionalZones) if err := os.Setenv("MULTIZONE", "true"); err != nil { return fmt.Errorf("error setting MULTIZONE env variable: %v", err) } } if g.nodeLocations != "" { args = append(args, "--node-locations="+g.nodeLocations) numNodeLocations := strings.Split(g.nodeLocations, ",") if len(numNodeLocations) > 1 { if err := os.Setenv("MULTIZONE", "true"); err != nil { return fmt.Errorf("error setting MULTIZONE env variable: %v", err) } } } if *gkeReleaseChannel != "" { args = append(args, "--release-channel="+*gkeReleaseChannel) } else { // TODO(zmerlynn): The version should be plumbed through Extract // or a separate flag rather than magic env variables. if v := os.Getenv("CLUSTER_API_VERSION"); v != "" { args = append(args, "--cluster-version="+v) } } args = append(args, g.cluster) if err := control.FinishRunning(exec.Command("gcloud", args...)); err != nil { return fmt.Errorf("error creating cluster: %v", err) } for poolName, pool := range g.shape { if poolName == defaultPool { continue } poolArgs := []string{"node-pools", "create", poolName, "--cluster=" + g.cluster, "--project=" + g.project, g.location, "--num-nodes=" + strconv.Itoa(pool.Nodes)} if pool.MachineType != "" { poolArgs = append(poolArgs, "--machine-type="+pool.MachineType) } poolArgs = append(poolArgs, pool.ExtraArgs...) if err := control.FinishRunning(exec.Command("gcloud", g.containerArgs(poolArgs...)...)); err != nil { return fmt.Errorf("error creating node pool %q: %v", poolName, err) } } return nil } func (g *gkeDeployer) IsUp() error { return isUp(g) } // DumpClusterLogs for GKE generates a small script that wraps // log-dump.sh with the appropriate shell-fu to get the cluster // dumped. // // TODO(zmerlynn): This whole path is really gross, but this seemed // the least gross hack to get this done. func (g *gkeDeployer) DumpClusterLogs(localPath, gcsPath string) error { // gkeLogDumpTemplate is a template of a shell script where // - %[1]s is the project // - %[2]s is the zone // - %[3]s is the OS distribution of nodes // - %[4]s is a filter composed of the instance groups // - %[5]s is the log-dump.sh command line const gkeLogDumpTemplate = ` function log_dump_custom_get_instances() { if [[ $1 == "master" ]]; then return 0; fi gcloud compute instances list '--project=%[1]s' '--filter=%[4]s' '--format=get(name)' } export -f log_dump_custom_get_instances # Set below vars that log-dump.sh expects in order to use scp with gcloud. export PROJECT=%[1]s export ZONE='%[2]s' export KUBERNETES_PROVIDER=gke export KUBE_NODE_OS_DISTRIBUTION='%[3]s' %[5]s ` // Prevent an obvious injection. if strings.Contains(localPath, "'") || strings.Contains(gcsPath, "'") { return fmt.Errorf("%q or %q contain single quotes - nice try", localPath, gcsPath) } // Generate a slice of filters to be OR'd together below if err := g.getInstanceGroups(); err != nil { return err } perZoneFilters := make(map[string][]string) for _, ig := range g.instanceGroups { filter := fmt.Sprintf("(metadata.created-by ~ %s)", ig.path) perZoneFilters[ig.zone] = append(perZoneFilters[ig.zone], filter) } // Generate the log-dump.sh command-line var dumpCmd string if gcsPath == "" { dumpCmd = fmt.Sprintf("./cluster/log-dump/log-dump.sh '%s'", localPath) } else { dumpCmd = fmt.Sprintf("./cluster/log-dump/log-dump.sh '%s' '%s'", localPath, gcsPath) } // Try to setup cluster access if it's possible. If credentials are already set, this will be no-op. Access to // GKE cluster is required for log-exporter. if err := g.getKubeConfig(); err != nil { log.Printf("error while setting up kubeconfig: %v", err) } // Make sure the firewall rule is created. It's needed so the log-dump.sh can ssh into nodes. // If cluster-up operation failed for some reasons (e.g. some nodes didn't register) the // firewall rule isn't automatically created as the TestSetup is not being executed. If firewall // rule was successfully created, the ensureFirewall call will be no-op. if err := g.ensureFirewall(); err != nil { log.Printf("error while ensuring firewall rule: %v", err) } var errorMessages []string for zone, filters := range perZoneFilters { err := control.FinishRunning(exec.Command("bash", "-c", fmt.Sprintf(gkeLogDumpTemplate, g.project, zone, os.Getenv("NODE_OS_DISTRIBUTION"), strings.Join(filters, " OR "), dumpCmd))) if err != nil { errorMessages = append(errorMessages, err.Error()) } } if len(errorMessages) > 0 { return fmt.Errorf("errors while dumping logs: %s", strings.Join(errorMessages, ", ")) } return nil } func (g *gkeDeployer) TestSetup() error { if g.setup { // Ensure setup is a singleton. return nil } if err := g.getKubeConfig(); err != nil { return err } if err := g.getInstanceGroups(); err != nil { return err } if err := g.ensureFirewall(); err != nil { return err } if err := g.ensureNat(); err != nil { return err } if err := g.setupBastion(); err != nil { return err } if err := g.setupEnv(); err != nil { return err } g.setup = true return nil } func (g *gkeDeployer) setupBastion() error { if g.sshProxyInstanceName == "" { return nil } var filtersToTry []string // Use exact name first, VM does not have to belong to the cluster exactFilter := "name=" + g.sshProxyInstanceName filtersToTry = append(filtersToTry, exactFilter) // As a fallback - use proxy instance name as a regex but check only cluster nodes var igFilters []string for _, ig := range g.instanceGroups { igFilters = append(igFilters, fmt.Sprintf("(metadata.created-by ~ %s)", ig.path)) } fuzzyFilter := fmt.Sprintf("(name ~ %s) AND (%s)", g.sshProxyInstanceName, strings.Join(igFilters, " OR ")) filtersToTry = append(filtersToTry, fuzzyFilter) var bastion, zone string for _, filter := range filtersToTry { log.Printf("Checking for proxy instance with filter: %q", filter) output, err := exec.Command("gcloud", "compute", "instances", "list", "--filter="+filter, "--format=value(name,zone)", "--limit=1", "--project="+g.project).Output() if err != nil { return fmt.Errorf("listing instances failed: %s", util.ExecError(err)) } if len(output) == 0 { continue } // Proxy instance found fields := strings.Split(strings.TrimSpace(string(output)), "\t") if len(fields) != 2 { return fmt.Errorf("error parsing instances list output %q", output) } bastion, zone = fields[0], fields[1] break } if bastion == "" { return fmt.Errorf("proxy instance %q not found", g.sshProxyInstanceName) } log.Printf("Found proxy instance %q", bastion) log.Printf("Adding NAT access config if not present") control.NoOutput(exec.Command("gcloud", "compute", "instances", "add-access-config", bastion, "--zone="+zone, "--project="+g.project)) err := setKubeShhBastionEnv(g.project, zone, bastion) if err != nil { return fmt.Errorf("setting KUBE_SSH_BASTION variable failed: %s", util.ExecError(err)) } return nil } func (g *gkeDeployer) getKubeConfig() error { info, err := os.Stat(g.kubecfg) if err != nil { return err } if info.Size() > 0 { // Assume that if we already have it, it's good. return nil } if err := os.Setenv("KUBECONFIG", g.kubecfg); err != nil { return err } if err := control.FinishRunning(exec.Command("gcloud", g.containerArgs("clusters", "get-credentials", g.cluster, "--project="+g.project, g.location)...)); err != nil { return fmt.Errorf("error executing get-credentials: %v", err) } return nil } // setupEnv is to appease ginkgo-e2e.sh and other pieces of the e2e infrastructure. It // would be nice to handle this elsewhere, and not with env // variables. c.f. kubernetes/test-infra#3330. func (g *gkeDeployer) setupEnv() error { // If singleZoneNodeInstanceGroup is true, set NODE_INSTANCE_GROUP to the // names of instance groups that are in the same zone as the lexically first // instance group. Otherwise set NODE_INSTANCE_GROUP to the names of all // instance groups. var filt []string zone := g.instanceGroups[0].zone for _, ig := range g.instanceGroups { if !g.singleZoneNodeInstanceGroup || ig.zone == zone { filt = append(filt, ig.name) } } if err := os.Setenv("NODE_INSTANCE_GROUP", strings.Join(filt, ",")); err != nil { return fmt.Errorf("error setting NODE_INSTANCE_GROUP: %v", err) } return nil } func (g *gkeDeployer) ensureFirewall() error { if g.network == "default" { return nil } firewall, err := g.getClusterFirewall() if err != nil { return fmt.Errorf("error getting unique firewall: %v", err) } if control.NoOutput(exec.Command("gcloud", "compute", "firewall-rules", "describe", firewall, "--project="+g.project, "--format=value(name)")) == nil { // Assume that if this unique firewall exists, it's good to go. return nil } log.Printf("Couldn't describe firewall '%s', assuming it doesn't exist and creating it", firewall) tagOut, err := exec.Command("gcloud", "compute", "instances", "list", "--project="+g.project, "--filter=metadata.created-by ~ "+g.instanceGroups[0].path, "--limit=1", "--format=get(tags.items)").Output() if err != nil { return fmt.Errorf("instances list failed: %s", util.ExecError(err)) } tag := strings.TrimSpace(string(tagOut)) if tag == "" { return fmt.Errorf("instances list returned no instances (or instance has no tags)") } allowPorts := e2eAllow if g.nodePorts != "" { allowPorts += "," + g.nodePorts } if err := control.FinishRunning(exec.Command("gcloud", "compute", "firewall-rules", "create", firewall, "--project="+g.project, "--network="+g.network, "--allow="+allowPorts, "--target-tags="+tag)); err != nil { return fmt.Errorf("error creating e2e firewall: %v", err) } return nil } func (g *gkeDeployer) getInstanceGroups() error { if len(g.instanceGroups) > 0 { return nil } igs, err := exec.Command("gcloud", g.containerArgs("clusters", "describe", g.cluster, "--format=value(instanceGroupUrls)", "--project="+g.project, g.location)...).Output() if err != nil { return fmt.Errorf("instance group URL fetch failed: %s", util.ExecError(err)) } igURLs := strings.Split(strings.TrimSpace(string(igs)), ";") if len(igURLs) == 0 { return fmt.Errorf("no instance group URLs returned by gcloud, output %q", string(igs)) } sort.Strings(igURLs) for _, igURL := range igURLs { m := poolRe.FindStringSubmatch(igURL) if len(m) == 0 { return fmt.Errorf("instanceGroupUrl %q did not match regex %v", igURL, poolRe) } g.instanceGroups = append(g.instanceGroups, &ig{path: m[0], zone: m[1], name: m[2], uniq: m[3]}) } return nil } func (g *gkeDeployer) getClusterFirewall() (string, error) { if err := g.getInstanceGroups(); err != nil { return "", err } // We want to ensure that there's an e2e-ports-* firewall rule // that maps to the cluster nodes, but the target tag for the // nodes can be slow to get. Use the hash from the lexically first // node pool instead. return "e2e-ports-" + g.instanceGroups[0].uniq, nil } // This function ensures that all firewall-rules are deleted from specific network. // We also want to keep in logs that there were some resources leaking. func (g *gkeDeployer) cleanupNetworkFirewalls() (int, error) { fws, err := exec.Command("gcloud", "compute", "firewall-rules", "list", "--format=value(name)", "--project="+g.project, "--filter=network:"+g.network).Output() if err != nil { return 0, fmt.Errorf("firewall rules list failed: %s", util.ExecError(err)) } if len(fws) > 0 { fwList := strings.Split(strings.TrimSpace(string(fws)), "\n") log.Printf("Network %s has %v undeleted firewall rules %v", g.network, len(fwList), fwList) commandArgs := []string{"compute", "firewall-rules", "delete", "-q"} commandArgs = append(commandArgs, fwList...) commandArgs = append(commandArgs, "--project="+g.project) errFirewall := control.FinishRunning(exec.Command("gcloud", commandArgs...)) if errFirewall != nil { return 0, fmt.Errorf("error deleting firewall: %v", errFirewall) } return len(fwList), nil } return 0, nil } func (g *gkeDeployer) ensureNat() error { if !g.createNat { return nil } if g.network == "default" { return fmt.Errorf("NAT router should be set manually for the default network") } region, err := g.getRegion(g.region, g.zone) if err != nil { return fmt.Errorf("error finding region for NAT router: %v", err) } nat := g.getNatName() // Create this unique router only if it does not exist yet. if control.NoOutput(exec.Command("gcloud", "compute", "routers", "describe", nat, "--project="+g.project, "--region="+region, "--format=value(name)")) != nil { log.Printf("Couldn't describe router '%s', assuming it doesn't exist and creating it", nat) if err := control.FinishRunning(exec.Command("gcloud", "compute", "routers", "create", nat, "--project="+g.project, "--network="+g.network, "--region="+region)); err != nil { return fmt.Errorf("error creating NAT router: %v", err) } } // Create this unique NAT configuration only if it does not exist yet. if control.NoOutput(exec.Command("gcloud", "compute", "routers", "nats", "describe", nat, "--project="+g.project, "--router="+nat, "--router-region="+region, "--format=value(name)")) != nil { log.Printf("Couldn't describe NAT '%s', assuming it doesn't exist and creating it", nat) if err := control.FinishRunning(exec.Command("gcloud", "compute", "routers", "nats", "create", nat, "--project="+g.project, "--router="+nat, "--router-region="+region, "--auto-allocate-nat-external-ips", "--min-ports-per-vm="+strconv.Itoa(g.natMinPortsPerVm), "--nat-primary-subnet-ip-ranges")); err != nil { return fmt.Errorf("error adding NAT to a router: %v", err) } } return nil } func (g *gkeDeployer) getRegion(region, zone string) (string, error) { if region != "" { return region, nil } result, err := exec.Command("gcloud", "compute", "zones", "list", "--filter=name="+zone, "--format=value(region)", "--project="+g.project).Output() if err != nil { return "", fmt.Errorf("error resolving region of %s zone: %v", zone, err) } return strings.TrimSuffix(string(result), "\n"), nil } func (g *gkeDeployer) getNatName() string { return "nat-router-" + g.cluster } func (g *gkeDeployer) cleanupNat() error { if !g.createNat { return nil } region, err := g.getRegion(g.region, g.zone) if err != nil { return fmt.Errorf("error finding region for NAT router: %v", err) } nat := g.getNatName() // Delete NAT router. That will remove NAT configuration as well. if control.NoOutput(exec.Command("gcloud", "compute", "routers", "describe", nat, "--project="+g.project, "--region="+region, "--format=value(name)")) == nil { log.Printf("Found NAT router '%s', deleting", nat) err = control.FinishRunning(exec.Command("gcloud", "compute", "routers", "delete", "-q", nat, "--project="+g.project, "--region="+region)) if err != nil { return fmt.Errorf("error deleting NAT router: %v", err) } } else { log.Printf("Found no NAT router '%s', assuming resources are clean", nat) } return nil } func (g *gkeDeployer) Down() error { firewall, err := g.getClusterFirewall() if err != nil { // This is expected if the cluster doesn't exist. return nil } g.instanceGroups = nil // We best-effort try all of these and report errors as appropriate. errCluster := control.FinishRunning(exec.Command( "gcloud", g.containerArgs("clusters", "delete", "-q", g.cluster, "--project="+g.project, g.location)...)) // don't delete default network if g.network == "default" { if errCluster != nil { log.Printf("Error deleting cluster using default network, allow the error for now %s", errCluster) } return nil } var errFirewall error if control.NoOutput(exec.Command("gcloud", "compute", "firewall-rules", "describe", firewall, "--project="+g.project, "--format=value(name)")) == nil { log.Printf("Found rules for firewall '%s', deleting them", firewall) errFirewall = control.FinishRunning(exec.Command("gcloud", "compute", "firewall-rules", "delete", "-q", firewall, "--project="+g.project)) } else { log.Printf("Found no rules for firewall '%s', assuming resources are clean", firewall) } numLeakedFWRules, errCleanFirewalls := g.cleanupNetworkFirewalls() errNat := g.cleanupNat() var errSubnet error if g.subnetwork != "" { errSubnet = control.FinishRunning(exec.Command("gcloud", "compute", "networks", "subnets", "delete", "-q", g.subnetwork, g.subnetworkRegion, "--project="+g.project)) } errNetwork := control.FinishRunning(exec.Command("gcloud", "compute", "networks", "delete", "-q", g.network, "--project="+g.project)) if errCluster != nil { return fmt.Errorf("error deleting cluster: %v", errCluster) } if errFirewall != nil { return fmt.Errorf("error deleting firewall: %v", errFirewall) } if errCleanFirewalls != nil { return fmt.Errorf("error cleaning-up firewalls: %v", errCleanFirewalls) } if errNat != nil { return fmt.Errorf("error cleaning-up NAT: %v", errNat) } if errSubnet != nil { return fmt.Errorf("error deleting subnetwork: %v", errSubnet) } if errNetwork != nil { return fmt.Errorf("error deleting network: %v", errNetwork) } if numLeakedFWRules > 0 { // Leaked firewall rules are cleaned up already, print a warning instead of failing hard log.Println("Warning: leaked firewall rules") } return nil } func (g *gkeDeployer) containerArgs(args ...string) []string { return append(append(append([]string{}, g.commandGroup...), "container"), args...) } func (g *gkeDeployer) GetClusterCreated(gcpProject string) (time.Time, error) { res, err := control.Output(exec.Command( "gcloud", "compute", "instance-groups", "list", "--project="+gcpProject, "--format=json(name,creationTimestamp)")) if err != nil { return time.Time{}, fmt.Errorf("list instance-group failed : %v", err) } created, err := getLatestClusterUpTime(string(res)) if err != nil { return time.Time{}, fmt.Errorf("parse time failed : got gcloud res %s, err %v", string(res), err) } return created, nil } func (g *gkeDeployer) KubectlCommand() (*exec.Cmd, error) { return nil, nil }
package main import ( "crypto/rand" "encoding/base64" "log" ) // Users in chat type User struct { UserName string AuthCode string IsAuthorized bool } type Users map[int]*User // length of random code in bytes const CODE_BYTES_LENGTH = 15 // add new user if not exists func (users Users) AddNew(user_id int, user_name string) { if _, ok := users[user_id]; !ok { users[user_id] = &User{ UserName: user_name, AuthCode: "", IsAuthorized: false, } } } // generate code func (users Users) DoLogin(user_id int) { users[user_id].IsAuthorized = false users[user_id].AuthCode = getRandomCode() } // check code for user func (users Users) IsValidCode(user_id int, code string) bool { result := code != "" && code == users[user_id].AuthCode if result { users[user_id].IsAuthorized = true } return result } // check code for user func (users Users) IsAuthorized(user_id int) bool { allowExec := false if _, ok := users[user_id]; ok { allowExec = users[user_id].IsAuthorized } return allowExec } // generate random code for authorize user func getRandomCode() string { buffer := make([]byte, CODE_BYTES_LENGTH) _, err := rand.Read(buffer) if err != nil { log.Fatal("Get code error:", err) } return base64.URLEncoding.EncodeToString(buffer) } Refactoring IsAuthorized() package main import ( "crypto/rand" "encoding/base64" "log" ) // Users in chat type User struct { UserName string AuthCode string IsAuthorized bool } type Users map[int]*User // length of random code in bytes const CODE_BYTES_LENGTH = 15 // add new user if not exists func (users Users) AddNew(user_id int, user_name string) { if _, ok := users[user_id]; !ok { users[user_id] = &User{ UserName: user_name, AuthCode: "", IsAuthorized: false, } } } // generate code func (users Users) DoLogin(user_id int) { users[user_id].IsAuthorized = false users[user_id].AuthCode = getRandomCode() } // check code for user func (users Users) IsValidCode(user_id int, code string) bool { result := code != "" && code == users[user_id].AuthCode if result { users[user_id].IsAuthorized = true } return result } // check code for user func (users Users) IsAuthorized(user_id int) bool { isAuthorized := false if _, ok := users[user_id]; ok && users[user_id].IsAuthorized { isAuthorized = true } return isAuthorized } // generate random code for authorize user func getRandomCode() string { buffer := make([]byte, CODE_BYTES_LENGTH) _, err := rand.Read(buffer) if err != nil { log.Fatal("Get code error:", err) } return base64.URLEncoding.EncodeToString(buffer) }
package main /** * Utility methods/classes for sysdash */ import ( "bytes" "fmt" "log" "os/exec" "path/filepath" "regexp" "strconv" "strings" "syscall" "unicode/utf8" ui "github.com/gizak/termui" ) //////////////////////////////////////////// // Utility: Formatting //////////////////////////////////////////// /** * Make a string as wide as requested, with stuff left justified and right justified. * * width: How wide to get. * left: What text goes on the left? * right: What text goes on the right? * fillChar: What character to use as the filler. */ func fitAStringToWidth(width int, left string, right string, fillChar string) string { leftLen := utf8.RuneCountInString(left) rightLen := utf8.RuneCountInString(right) fillCharLen := utf8.RuneCountInString(fillChar) // Usually 1 // Figure out how many filler chars we need fillLen := width - (leftLen + rightLen) fillRunes := (fillLen - 1 + fillCharLen) / fillCharLen if fillRunes < 0 { fillRunes = 0 } fillStr := strings.Repeat(fillChar, fillRunes) return fmt.Sprintf("%s %s %s", left, fillStr, right) } func rightJustify(width int, str string) string { rightJustfyLen := width - utf8.RuneCountInString(str) var rightJustify = "" if rightJustfyLen > 0 { rightJustify = strings.Repeat(" ", rightJustfyLen) } return rightJustify + str } func centerString(width int, str string) string { start := (width / 2) - (utf8.RuneCountInString(str) / 2) if start > 0 { return fmt.Sprintf("%s%s", strings.Repeat(" ", start), str) } else { return str } } var ANSI_REGEXP = regexp.MustCompile(`\x1B\[(([0-9]{1,2})?(;)?([0-9]{1,2})?)?[m,K,H,f,J]`) func stripANSI(str string) string { return ANSI_REGEXP.ReplaceAllLiteralString(str, "") } func prettyPrintBytes(bytes uint64) string { if bytes > (1024 * 1024 * 1024) { gb := float64(bytes) / float64(1024*1024*1024) return fmt.Sprintf("%0.2fG", gb) } else if bytes > (1024 * 1024) { mb := float64(bytes) / float64(1024*1024) return fmt.Sprintf("%0.2fM", mb) } else if bytes > (1024) { kb := float64(bytes) / float64(1024) return fmt.Sprintf("%0.2fK", kb) } else { return fmt.Sprintf("%dbytes", bytes) } } var FG_BG_REGEXP = regexp.MustCompile("(fg|bg|FG|BG)-") // Colors according to where value is in the min/max range func percentToAttribute(value int, minValue int, maxValue int, invert bool) ui.Attribute { return ui.StringToAttribute(FG_BG_REGEXP.ReplaceAllLiteralString(percentToAttributeString(value, minValue, maxValue, invert), "")) } // Colors according to where value is in the min/max range func percentToAttributeString(value int, minValue int, maxValue int, invert bool) string { span := float64(maxValue - minValue) fvalue := float64(value) // If invert is set... if invert { // "good" is close to min and "bad" is closer to max if fvalue > 0.90*span { return "fg-red,fg-bold" } else if fvalue > 0.75*span { return "fg-red" } else if fvalue > 0.50*span { return "fg-yellow,fg-bold" } else if fvalue > 0.25*span { return "fg-green" } else if fvalue > 0.05*span { return "fg-green,fg-bold" } else { return "fg-blue,fg-bold" } } else { // "good" is close to max and "bad" is closer to min if fvalue < 0.10*span { return "fg-red,fg-bold" } else if fvalue < 0.25*span { return "fg-red" } else if fvalue < 0.50*span { return "fg-yellow,fg-bold" } else if fvalue < 0.75*span { return "fg-green" } else if fvalue < 0.95*span { return "fg-green,fg-bold" } else { return "fg-blue,fg-bold" } } } //////////////////////////////////////////// // Utility: Command Exec //////////////////////////////////////////// func execAndGetOutput(name string, workingDirectory *string, args ...string) (stdout string, exitCode int, err error) { cmd := exec.Command(name, args...) var out bytes.Buffer cmd.Stdout = &out if workingDirectory != nil { cmd.Dir = *workingDirectory } err = cmd.Run() // Getting the exit code is platform dependant, this code isn't portable exitCode = 0 if err != nil { // Based on: https://stackoverflow.com/questions/10385551/get-exit-code-go if exitError, ok := err.(*exec.ExitError); ok { ws := exitError.Sys().(syscall.WaitStatus) exitCode = ws.ExitStatus() } else { // Failed, but on a platform where this conversion doesn't work... exitCode = 1 } } stdout = out.String() return } //////////////////////////////////////////// // Utility: Paths //////////////////////////////////////////// func normalizePath(osPathname string) string { // Get absolute path with no symlinks nolinksPath, symErr := filepath.EvalSymlinks(osPathname) if symErr != nil { log.Printf("Error evaluating file symlinks (%v): %v", osPathname, symErr) return osPathname } else { fullName, pathErr := filepath.Abs(nolinksPath) if pathErr != nil { log.Printf("Error getting absolute path (%v): %v", nolinksPath, pathErr) return nolinksPath } else { return fullName } } } //////////////////////////////////////////// // Utility: 8-bit ANSI Colors //////////////////////////////////////////// /** * Converts 8-bit color into 3/4-bit color. * https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit */ func Color8BitAsString(index int) string { retval := "fg-black" if index < 16 { switch index { case 0: retval = "fg-black" case 1: retval = "fg-red" case 2: retval = "fg-green" case 3: retval = "fg-yellow" case 4: retval = "fg-blue" case 5: retval = "fg-magenta" case 6: retval = "fg-cyan" case 7: retval = "fg-white" case 8: retval = "fg-black,fg-bold" case 9: retval = "fg-red,fg-bold" case 10: retval = "fg-green,fg-bold" case 11: retval = "fg-yellow,fg-bold" case 12: retval = "fg-blue,fg-bold" case 13: retval = "fg-magenta,fg-bold" case 14: retval = "fg-cyan,fg-bold" case 15: retval = "fg-white,fg-bold" } } else if index < 232 { // Palletized colors i := index - 16 r := i / 36 i -= r * 36 g := i / 6 i -= g * 6 b := i smallColor := "fg-black" if r >= 3 { // Red on if g >= 3 { // Green on if b >= 3 { // Blue on smallColor = "fg-white,fg-bold" } else { // Blue off smallColor = "fg-yellow,fg-bold" } } else { // Green off if b >= 3 { // Blue on smallColor = "fg-magenta,fg-bold" } else { // Blue off smallColor = "fg-red,fg-bold" } } } else { // Red off if g >= 3 { // Green on if b >= 3 { // Blue on smallColor = "fg-cyan,fg-bold" } else { // Blue off smallColor = "fg-green,fg-bold" } } else { // Green off if b >= 3 { // Blue on smallColor = "fg-blue,fg-bold" } else { // Blue off smallColor = "fg-black" } } } retval = smallColor } else { // Grayscale colors if index < 238 { retval = "fg-black" } else if index < 244 { retval = "fg-white" } else if index < 250 { retval = "fg-black,fg-bold" } else if index < 256 { retval = "fg-white,fg-bold" } } return retval } ////////////////////////////////////////////// // Utility: Convert ANSI to (fg-color) syntax ////////////////////////////////////////////// var ANSI_COLOR_GROUPING_REGEXP = regexp.MustCompile(`\x1B\x5B(?P<sgr>(?:[0-9]+;?)+)m(?P<content>[^\x1B]+)\x1B\x5B0?m`) var ANSI_COLOR_MAPPINGS = map[int]string{ 1: "fg-bold", 30: "fg-black", 31: "fg-red", 32: "fg-green", 33: "fg-yellow", 34: "fg-blue", 35: "fg-magenta", 36: "fg-cyan", 37: "fg-white", 40: "fg-black", 41: "fg-red", 42: "fg-green", 43: "fg-yellow", 44: "fg-blue", 45: "fg-magenta", 46: "fg-cyan", 47: "fg-white", } func palletizedColorToString(index int) string { return Color8BitAsString(index) } func rgbColorToString(r int, g int, b int) string { log.Printf("We don't know how to handle RGB color yet. Color: #%02x%02x%02x)", r, g, b) return "fg-white" } // Returns how many elements were consumed and the color string func SGR256ColorToString(parts []int) (int, string) { if len(parts) < 1 { log.Printf("Error parsing 256-color SGR code (bad length). Length: %d, Parts: %v", len(parts), parts) return 1, "fg-white" } switch parts[0] { case 2: if len(parts) < 4 { log.Printf("Error parsing 256-color SGR code (not enough numbers for RGB). Parts: %v", parts) return 1, "fg-white" } else { return 4, rgbColorToString(parts[1], parts[2], parts[3]) } case 5: if len(parts) < 2 { log.Printf("Error parsing 256-color SGR code (no index for palette). Parts: %v", parts) return 1, "fg-white" } else { return 2, palletizedColorToString(parts[1]) } default: log.Printf("Error parsing 256-color SGR code (bad code). Code: %d, Parts: %v", parts[0], parts) return 1, "fg-white" } } func SGRToColorString(sgr string) string { parts := strings.Split(sgr, ";") iparts := make([]int, len(parts)) for i, x := range parts { iparts[i], _ = strconv.Atoi(x) } i := 0 retval := "" appendRet := func(str string) { if len(retval) > 0 { retval += "," + str } else { retval += str } } for i < len(iparts) { if val, ok := ANSI_COLOR_MAPPINGS[iparts[i]]; ok { // if it's in the map, use that appendRet(val) } else { switch iparts[i] { case 38: // Foreground palette or RGB relevantSlice := iparts[i+1:] consumed, color := SGR256ColorToString(relevantSlice) i += consumed appendRet(color) case 48: // Background palette or RGB relevantSlice := iparts[i+1:] consumed, color := SGR256ColorToString(relevantSlice) color = strings.Replace(color, "fg", "bg", -1) i += consumed appendRet(color) } } i++ } return retval } func ConvertANSIToColorStrings(ansi string) string { log.Printf("Looking for matches in '%v'", ansi) retval := ANSI_COLOR_GROUPING_REGEXP.ReplaceAllStringFunc(ansi, func(matchStr string) string { // matchStr should be the regexp, let's match it again to get the groupings matches := ANSI_COLOR_GROUPING_REGEXP.FindStringSubmatch(matchStr) // 0 is the whole string, 1+ are match groups sgr := matches[1] content := matches[2] colorStr := SGRToColorString(sgr) coloredContent := fmt.Sprintf("[%v](%v)", content, colorStr) return coloredContent }) return stripANSI(retval) } Unnecessary logging. package main /** * Utility methods/classes for sysdash */ import ( "bytes" "fmt" "log" "os/exec" "path/filepath" "regexp" "strconv" "strings" "syscall" "unicode/utf8" ui "github.com/gizak/termui" ) //////////////////////////////////////////// // Utility: Formatting //////////////////////////////////////////// /** * Make a string as wide as requested, with stuff left justified and right justified. * * width: How wide to get. * left: What text goes on the left? * right: What text goes on the right? * fillChar: What character to use as the filler. */ func fitAStringToWidth(width int, left string, right string, fillChar string) string { leftLen := utf8.RuneCountInString(left) rightLen := utf8.RuneCountInString(right) fillCharLen := utf8.RuneCountInString(fillChar) // Usually 1 // Figure out how many filler chars we need fillLen := width - (leftLen + rightLen) fillRunes := (fillLen - 1 + fillCharLen) / fillCharLen if fillRunes < 0 { fillRunes = 0 } fillStr := strings.Repeat(fillChar, fillRunes) return fmt.Sprintf("%s %s %s", left, fillStr, right) } func rightJustify(width int, str string) string { rightJustfyLen := width - utf8.RuneCountInString(str) var rightJustify = "" if rightJustfyLen > 0 { rightJustify = strings.Repeat(" ", rightJustfyLen) } return rightJustify + str } func centerString(width int, str string) string { start := (width / 2) - (utf8.RuneCountInString(str) / 2) if start > 0 { return fmt.Sprintf("%s%s", strings.Repeat(" ", start), str) } else { return str } } var ANSI_REGEXP = regexp.MustCompile(`\x1B\[(([0-9]{1,2})?(;)?([0-9]{1,2})?)?[m,K,H,f,J]`) func stripANSI(str string) string { return ANSI_REGEXP.ReplaceAllLiteralString(str, "") } func prettyPrintBytes(bytes uint64) string { if bytes > (1024 * 1024 * 1024) { gb := float64(bytes) / float64(1024*1024*1024) return fmt.Sprintf("%0.2fG", gb) } else if bytes > (1024 * 1024) { mb := float64(bytes) / float64(1024*1024) return fmt.Sprintf("%0.2fM", mb) } else if bytes > (1024) { kb := float64(bytes) / float64(1024) return fmt.Sprintf("%0.2fK", kb) } else { return fmt.Sprintf("%dbytes", bytes) } } var FG_BG_REGEXP = regexp.MustCompile("(fg|bg|FG|BG)-") // Colors according to where value is in the min/max range func percentToAttribute(value int, minValue int, maxValue int, invert bool) ui.Attribute { return ui.StringToAttribute(FG_BG_REGEXP.ReplaceAllLiteralString(percentToAttributeString(value, minValue, maxValue, invert), "")) } // Colors according to where value is in the min/max range func percentToAttributeString(value int, minValue int, maxValue int, invert bool) string { span := float64(maxValue - minValue) fvalue := float64(value) // If invert is set... if invert { // "good" is close to min and "bad" is closer to max if fvalue > 0.90*span { return "fg-red,fg-bold" } else if fvalue > 0.75*span { return "fg-red" } else if fvalue > 0.50*span { return "fg-yellow,fg-bold" } else if fvalue > 0.25*span { return "fg-green" } else if fvalue > 0.05*span { return "fg-green,fg-bold" } else { return "fg-blue,fg-bold" } } else { // "good" is close to max and "bad" is closer to min if fvalue < 0.10*span { return "fg-red,fg-bold" } else if fvalue < 0.25*span { return "fg-red" } else if fvalue < 0.50*span { return "fg-yellow,fg-bold" } else if fvalue < 0.75*span { return "fg-green" } else if fvalue < 0.95*span { return "fg-green,fg-bold" } else { return "fg-blue,fg-bold" } } } //////////////////////////////////////////// // Utility: Command Exec //////////////////////////////////////////// func execAndGetOutput(name string, workingDirectory *string, args ...string) (stdout string, exitCode int, err error) { cmd := exec.Command(name, args...) var out bytes.Buffer cmd.Stdout = &out if workingDirectory != nil { cmd.Dir = *workingDirectory } err = cmd.Run() // Getting the exit code is platform dependant, this code isn't portable exitCode = 0 if err != nil { // Based on: https://stackoverflow.com/questions/10385551/get-exit-code-go if exitError, ok := err.(*exec.ExitError); ok { ws := exitError.Sys().(syscall.WaitStatus) exitCode = ws.ExitStatus() } else { // Failed, but on a platform where this conversion doesn't work... exitCode = 1 } } stdout = out.String() return } //////////////////////////////////////////// // Utility: Paths //////////////////////////////////////////// func normalizePath(osPathname string) string { // Get absolute path with no symlinks nolinksPath, symErr := filepath.EvalSymlinks(osPathname) if symErr != nil { log.Printf("Error evaluating file symlinks (%v): %v", osPathname, symErr) return osPathname } else { fullName, pathErr := filepath.Abs(nolinksPath) if pathErr != nil { log.Printf("Error getting absolute path (%v): %v", nolinksPath, pathErr) return nolinksPath } else { return fullName } } } //////////////////////////////////////////// // Utility: 8-bit ANSI Colors //////////////////////////////////////////// /** * Converts 8-bit color into 3/4-bit color. * https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit */ func Color8BitAsString(index int) string { retval := "fg-black" if index < 16 { switch index { case 0: retval = "fg-black" case 1: retval = "fg-red" case 2: retval = "fg-green" case 3: retval = "fg-yellow" case 4: retval = "fg-blue" case 5: retval = "fg-magenta" case 6: retval = "fg-cyan" case 7: retval = "fg-white" case 8: retval = "fg-black,fg-bold" case 9: retval = "fg-red,fg-bold" case 10: retval = "fg-green,fg-bold" case 11: retval = "fg-yellow,fg-bold" case 12: retval = "fg-blue,fg-bold" case 13: retval = "fg-magenta,fg-bold" case 14: retval = "fg-cyan,fg-bold" case 15: retval = "fg-white,fg-bold" } } else if index < 232 { // Palletized colors i := index - 16 r := i / 36 i -= r * 36 g := i / 6 i -= g * 6 b := i smallColor := "fg-black" if r >= 3 { // Red on if g >= 3 { // Green on if b >= 3 { // Blue on smallColor = "fg-white,fg-bold" } else { // Blue off smallColor = "fg-yellow,fg-bold" } } else { // Green off if b >= 3 { // Blue on smallColor = "fg-magenta,fg-bold" } else { // Blue off smallColor = "fg-red,fg-bold" } } } else { // Red off if g >= 3 { // Green on if b >= 3 { // Blue on smallColor = "fg-cyan,fg-bold" } else { // Blue off smallColor = "fg-green,fg-bold" } } else { // Green off if b >= 3 { // Blue on smallColor = "fg-blue,fg-bold" } else { // Blue off smallColor = "fg-black" } } } retval = smallColor } else { // Grayscale colors if index < 238 { retval = "fg-black" } else if index < 244 { retval = "fg-white" } else if index < 250 { retval = "fg-black,fg-bold" } else if index < 256 { retval = "fg-white,fg-bold" } } return retval } ////////////////////////////////////////////// // Utility: Convert ANSI to (fg-color) syntax ////////////////////////////////////////////// var ANSI_COLOR_GROUPING_REGEXP = regexp.MustCompile(`\x1B\x5B(?P<sgr>(?:[0-9]+;?)+)m(?P<content>[^\x1B]+)\x1B\x5B0?m`) var ANSI_COLOR_MAPPINGS = map[int]string{ 1: "fg-bold", 30: "fg-black", 31: "fg-red", 32: "fg-green", 33: "fg-yellow", 34: "fg-blue", 35: "fg-magenta", 36: "fg-cyan", 37: "fg-white", 40: "fg-black", 41: "fg-red", 42: "fg-green", 43: "fg-yellow", 44: "fg-blue", 45: "fg-magenta", 46: "fg-cyan", 47: "fg-white", } func palletizedColorToString(index int) string { return Color8BitAsString(index) } func rgbColorToString(r int, g int, b int) string { log.Printf("We don't know how to handle RGB color yet. Color: #%02x%02x%02x)", r, g, b) return "fg-white" } // Returns how many elements were consumed and the color string func SGR256ColorToString(parts []int) (int, string) { if len(parts) < 1 { log.Printf("Error parsing 256-color SGR code (bad length). Length: %d, Parts: %v", len(parts), parts) return 1, "fg-white" } switch parts[0] { case 2: if len(parts) < 4 { log.Printf("Error parsing 256-color SGR code (not enough numbers for RGB). Parts: %v", parts) return 1, "fg-white" } else { return 4, rgbColorToString(parts[1], parts[2], parts[3]) } case 5: if len(parts) < 2 { log.Printf("Error parsing 256-color SGR code (no index for palette). Parts: %v", parts) return 1, "fg-white" } else { return 2, palletizedColorToString(parts[1]) } default: log.Printf("Error parsing 256-color SGR code (bad code). Code: %d, Parts: %v", parts[0], parts) return 1, "fg-white" } } func SGRToColorString(sgr string) string { parts := strings.Split(sgr, ";") iparts := make([]int, len(parts)) for i, x := range parts { iparts[i], _ = strconv.Atoi(x) } i := 0 retval := "" appendRet := func(str string) { if len(retval) > 0 { retval += "," + str } else { retval += str } } for i < len(iparts) { if val, ok := ANSI_COLOR_MAPPINGS[iparts[i]]; ok { // if it's in the map, use that appendRet(val) } else { switch iparts[i] { case 38: // Foreground palette or RGB relevantSlice := iparts[i+1:] consumed, color := SGR256ColorToString(relevantSlice) i += consumed appendRet(color) case 48: // Background palette or RGB relevantSlice := iparts[i+1:] consumed, color := SGR256ColorToString(relevantSlice) color = strings.Replace(color, "fg", "bg", -1) i += consumed appendRet(color) } } i++ } return retval } func ConvertANSIToColorStrings(ansi string) string { retval := ANSI_COLOR_GROUPING_REGEXP.ReplaceAllStringFunc(ansi, func(matchStr string) string { // matchStr should be the regexp, let's match it again to get the groupings matches := ANSI_COLOR_GROUPING_REGEXP.FindStringSubmatch(matchStr) // 0 is the whole string, 1+ are match groups sgr := matches[1] content := matches[2] colorStr := SGRToColorString(sgr) coloredContent := fmt.Sprintf("[%v](%v)", content, colorStr) return coloredContent }) return stripANSI(retval) }
package main import ( "bytes" "encoding/json" "image" "image/jpeg" "io/ioutil" "net/http" "os" "strings" "time" _ "image/png" "github.com/nfnt/resize" "github.com/serbe/ncp" ) type config struct { Nnm struct { Login string `json:"login"` Password string `json:"password"` } `json:"nnmclub"` Db struct { User string `json:"user"` Password string `json:"password"` Name string `json:"name"` // Sslmode string `json:"sslmode"` } `json:"postgresql"` Address string `json:"address"` ImgDir string `json:"imgdir"` Proxy string `json:"proxy"` Debug bool `json:"debug"` DebugDB bool `json:"debugdb"` } func getConfig() (config, error) { c := config{} file, err := ioutil.ReadFile("./config.json") if err != nil { return c, err } err = json.Unmarshal(file, &c) return c, err } // func contain(args []string, str string) bool { // result := false // for _, item := range args { // if item == str { // result = true // return result // } // } // return result // } func exit(err error) { if err == nil { os.Exit(0) } else { os.Exit(1) } } func (a *App) checkName(ncf ncp.Film) ncp.Film { if ncf.Name != strings.ToUpper(ncf.Name) { return ncf } name, err := a.getMovieName(ncf) if err == nil { ncf.Name = name return ncf } return ncf } func getFromURL(url string) ([]byte, error) { timeout := time.Duration(5) * time.Second client := http.Client{ Timeout: timeout, } resp, err := client.Get(url) if err != nil { return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } err = resp.Body.Close() return body, err } func decodeImage(url string, body []byte) (image.Image, error) { var img image.Image img, _, err := image.Decode(bytes.NewReader(body)) if err != nil { return img, err } n := resize.Resize(150, 0, img, resize.Lanczos3) return n, nil } func generateName(url string) string { name := strings.Replace(url, "/", "", -1) name = strings.Replace(name, ":", "", -1) if len(name) < 20 { name = name[:len(name)-4] } else { name = name[len(name)-20 : len(name)-4] } name = name + ".jpg" return name } func (a *App) getPoster(url string) (string, error) { body, err := getFromURL(url) if err != nil { return "", err } img, err := decodeImage(url, body) if err != nil { return "", err } posterName := generateName(url) out, err := os.Create(a.hd + posterName) if err != nil { return "", err } err = jpeg.Encode(out, img, nil) _ = out.Close() return posterName, err } func existsFile(path string) bool { _, err := os.Stat(path) if err == nil { return true } if os.IsNotExist(err) { return false } return true } func findStringInSlice(list []string, s string) int { for i, b := range list { if b == s { return i } } return -1 } func deleteFromSlice(list []string, s string) []string { sis := findStringInSlice(list, s) list = append(list[:sis], list[sis+1:]...) return list } func createDir(path string) error { if existsFile(path) { return nil } return os.Mkdir(path, 0777) } use gometalinter package main import ( "bytes" "encoding/json" "image" "image/jpeg" "io/ioutil" "net/http" "os" "strings" "time" _ "image/png" "github.com/nfnt/resize" "github.com/serbe/ncp" ) type config struct { Nnm struct { Login string `json:"login"` Password string `json:"password"` } `json:"nnmclub"` Db struct { User string `json:"user"` Password string `json:"password"` Name string `json:"name"` // Sslmode string `json:"sslmode"` } `json:"postgresql"` Address string `json:"address"` ImgDir string `json:"imgdir"` Proxy string `json:"proxy"` Debug bool `json:"debug"` DebugDB bool `json:"debugdb"` } func getConfig() (config, error) { c := config{} file, err := ioutil.ReadFile("./config.json") if err != nil { return c, err } err = json.Unmarshal(file, &c) return c, err } // func contain(args []string, str string) bool { // result := false // for _, item := range args { // if item == str { // result = true // return result // } // } // return result // } func exit(err error) { if err == nil { os.Exit(0) } else { os.Exit(1) } } func (a *App) checkName(ncf ncp.Film) ncp.Film { if ncf.Name != strings.ToUpper(ncf.Name) { return ncf } name, err := a.getMovieName(ncf) if err == nil { ncf.Name = name return ncf } return ncf } func getFromURL(url string) ([]byte, error) { timeout := time.Duration(5) * time.Second client := http.Client{ Timeout: timeout, } resp, err := client.Get(url) if err != nil { return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } err = resp.Body.Close() return body, err } func decodeImage(body []byte) (image.Image, error) { var img image.Image img, _, err := image.Decode(bytes.NewReader(body)) if err != nil { return img, err } n := resize.Resize(150, 0, img, resize.Lanczos3) return n, nil } func generateName(url string) string { name := strings.Replace(url, "/", "", -1) name = strings.Replace(name, ":", "", -1) if len(name) < 20 { name = name[:len(name)-4] } else { name = name[len(name)-20 : len(name)-4] } name = name + ".jpg" return name } func (a *App) getPoster(url string) (string, error) { body, err := getFromURL(url) if err != nil { return "", err } img, err := decodeImage(body) if err != nil { return "", err } posterName := generateName(url) out, err := os.Create(a.hd + posterName) if err != nil { return "", err } err = jpeg.Encode(out, img, nil) _ = out.Close() return posterName, err } func existsFile(path string) bool { _, err := os.Stat(path) if err == nil { return true } if os.IsNotExist(err) { return false } return true } func findStringInSlice(list []string, s string) int { for i, b := range list { if b == s { return i } } return -1 } func deleteFromSlice(list []string, s string) []string { sis := findStringInSlice(list, s) list = append(list[:sis], list[sis+1:]...) return list } func createDir(path string) error { if existsFile(path) { return nil } return os.Mkdir(path, 0700) }
package GoSDK import ( "bytes" "crypto/tls" "encoding/json" "fmt" "io/ioutil" "math/rand" "net/http" "net/url" "os" "os/exec" "strings" "time" cbErr "github.com/clearblade/go-utils/errors" mqttTypes "github.com/clearblade/mqtt_parsing" mqtt "github.com/clearblade/paho.mqtt.golang" ) var ( //CB_ADDR is the address of the ClearBlade Platform you are speaking with CB_ADDR = "https://platform.clearblade.com" //CB_MSG_ADDR is the messaging address you wish to speak to CB_MSG_ADDR = "platform.clearblade.com:1883" _HEADER_KEY_KEY = "ClearBlade-SystemKey" _HEADER_SECRET_KEY = "ClearBlade-SystemSecret" ) var tr = &http.Transport{ // TLSClientConfig: &tls.Config{InsecureSkipVerify: false}, TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } const ( createDevUser = iota createUser ) //Client is a convience interface for API consumers, if they want to use the same functions for both developer users and unprivleged users type Client interface { //session bookkeeping calls Authenticate() error Logout() error //data calls CreateData(string, interface{}) ([]interface{}, error) CreateDataByName(string, string, interface{}) ([]interface{}, error) InsertData(string, interface{}) error UpdateData(string, *Query, map[string]interface{}) error UpdateDataByName(string, string, *Query, map[string]interface{}) (UpdateResponse, error) GetData(string, *Query) (map[string]interface{}, error) GetDataByName(string, *Query) (map[string]interface{}, error) GetDataByKeyAndName(string, string, *Query) (map[string]interface{}, error) DeleteData(string, *Query) error GetItemCount(string) (int, error) GetDataTotal(string, *Query) (map[string]interface{}, error) GetColumns(string, string, string) ([]interface{}, error) //mqtt calls SetMqttClient(MqttClient) InitializeMQTT(string, string, int, *tls.Config, *LastWillPacket) error Publish(string, []byte, int) error Subscribe(string, int) (<-chan *mqttTypes.Publish, error) Unsubscribe(string) error Disconnect() error // Device calls GetDevices(string, *Query) ([]interface{}, error) GetDevice(string, string) (map[string]interface{}, error) CreateDevice(string, string, map[string]interface{}) (map[string]interface{}, error) UpdateDevice(string, string, map[string]interface{}) (map[string]interface{}, error) DeleteDevice(string, string) error UpdateDevices(string, *Query, map[string]interface{}) ([]interface{}, error) DeleteDevices(string, *Query) error // Adaptor calls GetAdaptors(string) ([]interface{}, error) GetAdaptor(string, string) (map[string]interface{}, error) CreateAdaptor(string, string, map[string]interface{}) (map[string]interface{}, error) UpdateAdaptor(string, string, map[string]interface{}) (map[string]interface{}, error) DeleteAdaptor(string, string) error DeployAdaptor(string, string, map[string]interface{}) (map[string]interface{}, error) ControlAdaptor(string, string, map[string]interface{}) (map[string]interface{}, error) // Adaptor File calls GetAdaptorFiles(string, string) ([]interface{}, error) GetAdaptorFile(string, string, string) (map[string]interface{}, error) CreateAdaptorFile(string, string, string, map[string]interface{}) (map[string]interface{}, error) UpdateAdaptorFile(string, string, string, map[string]interface{}) (map[string]interface{}, error) DeleteAdaptorFile(string, string, string) error } type MqttClient interface { mqtt.Client } //cbClient will supply various information that differs between privleged and unprivleged users //this interface is meant to be unexported type cbClient interface { credentials() ([][]string, error) //the inner slice is a tuple of "Header":"Value" preamble() string setToken(string) getToken() string getSystemInfo() (string, string) getMessageId() uint16 getHttpAddr() string getMqttAddr() string getEdgeProxy() *EdgeProxy } // receiver for methods that can be shared between users/devs/devices type client struct{} //UserClient is the type for users type UserClient struct { client UserToken string mrand *rand.Rand MQTTClient MqttClient SystemKey string SystemSecret string Email string Password string HttpAddr string MqttAddr string edgeProxy *EdgeProxy } type DeviceClient struct { client DeviceName string ActiveKey string DeviceToken string mrand *rand.Rand MQTTClient MqttClient SystemKey string SystemSecret string HttpAddr string MqttAddr string edgeProxy *EdgeProxy } //DevClient is the type for developers type DevClient struct { client DevToken string mrand *rand.Rand MQTTClient MqttClient Email string Password string HttpAddr string MqttAddr string edgeProxy *EdgeProxy } type EdgeProxy struct { SystemKey string EdgeName string } //CbReq is a wrapper around an HTTP request type CbReq struct { Body interface{} Method string Endpoint string QueryString string Headers map[string][]string HttpAddr string MqttAddr string } //CbResp is a wrapper around an HTTP response type CbResp struct { Body interface{} StatusCode int } func (u *UserClient) getHttpAddr() string { return u.HttpAddr } func (d *DevClient) getHttpAddr() string { return d.HttpAddr } func (u *UserClient) getMqttAddr() string { return u.MqttAddr } func (d *DevClient) getMqttAddr() string { return d.MqttAddr } func (u *UserClient) getEdgeProxy() *EdgeProxy { return u.edgeProxy } func (d *DevClient) getEdgeProxy() *EdgeProxy { return d.edgeProxy } func (d *DeviceClient) getEdgeProxy() *EdgeProxy { return d.edgeProxy } func (u *UserClient) SetMqttClient(c MqttClient) { u.MQTTClient = c } func (d *DevClient) SetMqttClient(c MqttClient) { d.MQTTClient = c } func (d *DeviceClient) SetMqttClient(c MqttClient) { d.MQTTClient = c } func NewDeviceClient(systemkey, systemsecret, deviceName, activeKey string) *DeviceClient { return &DeviceClient{ DeviceName: deviceName, DeviceToken: "", ActiveKey: activeKey, mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, SystemKey: systemkey, SystemSecret: systemsecret, HttpAddr: CB_ADDR, MqttAddr: CB_MSG_ADDR, } } //NewUserClient allocates a new UserClient struct func NewUserClient(systemkey, systemsecret, email, password string) *UserClient { return &UserClient{ UserToken: "", mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, SystemSecret: systemsecret, SystemKey: systemkey, Email: email, Password: password, HttpAddr: CB_ADDR, MqttAddr: CB_MSG_ADDR, } } //NewDevClient allocates a new DevClient struct func NewDevClient(email, password string) *DevClient { return &DevClient{ DevToken: "", mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, Email: email, Password: password, HttpAddr: CB_ADDR, MqttAddr: CB_MSG_ADDR, } } func NewDevClientWithToken(token, email string) *DevClient { return &DevClient{ DevToken: token, mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, Email: email, Password: "", HttpAddr: CB_ADDR, MqttAddr: CB_MSG_ADDR, } } func NewUserClientWithAddrs(httpAddr, mqttAddr, systemKey, systemSecret, email, password string) *UserClient { return &UserClient{ UserToken: "", mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, SystemSecret: systemSecret, SystemKey: systemKey, Email: email, Password: password, HttpAddr: httpAddr, MqttAddr: mqttAddr, } } func NewDevClientWithAddrs(httpAddr, mqttAddr, email, password string) *DevClient { return &DevClient{ DevToken: "", mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, Email: email, Password: password, HttpAddr: httpAddr, MqttAddr: mqttAddr, } } func NewDevClientWithTokenAndAddrs(httpAddr, mqttAddr, token, email string) *DevClient { return &DevClient{ DevToken: token, mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, Email: email, Password: "", HttpAddr: httpAddr, MqttAddr: mqttAddr, } } func NewDeviceClientWithAddrs(httpAddr, mqttAddr, systemkey, systemsecret, deviceName, activeKey string) *DeviceClient { return &DeviceClient{ DeviceName: deviceName, DeviceToken: "", ActiveKey: activeKey, mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, SystemKey: systemkey, SystemSecret: systemsecret, HttpAddr: httpAddr, MqttAddr: mqttAddr, } } func NewEdgeProxyDevClient(email, password, systemKey, edgeName string) (*DevClient, error) { d := NewDevClient(email, password) if err := d.startProxyToEdge(systemKey, edgeName); err != nil { return nil, err } return d, nil } func NewEdgeProxyUserClient(email, password, systemKey, systemSecret, edgeName string) (*UserClient, error) { u := NewUserClient(systemKey, systemSecret, email, password) if err := u.startProxyToEdge(systemKey, edgeName); err != nil { return nil, err } return u, nil } func NewEdgeProxyDeviceClient(systemkey, systemsecret, deviceName, activeKey, edgeName string) (*DeviceClient, error) { d := NewDeviceClient(systemkey, systemsecret, deviceName, activeKey) if err := d.startProxyToEdge(systemkey, edgeName); err != nil { return nil, err } return d, nil } func (u *UserClient) startProxyToEdge(systemKey, edgeName string) error { if systemKey == "" || edgeName == "" { return fmt.Errorf("systemKey and edgeName required") } u.edgeProxy = &EdgeProxy{systemKey, edgeName} return nil } func (u *UserClient) stopProxyToEdge() error { if u.edgeProxy == nil { return fmt.Errorf("Requests are not being proxied to edge") } u.edgeProxy = nil return nil } func (d *DevClient) startProxyToEdge(systemKey, edgeName string) error { if systemKey == "" || edgeName == "" { return fmt.Errorf("systemKey and edgeName required") } d.edgeProxy = &EdgeProxy{systemKey, edgeName} return nil } func (d *DevClient) stopProxyToEdge() error { if d.edgeProxy == nil { return fmt.Errorf("No edge proxy active") } d.edgeProxy = nil return nil } func (d *DeviceClient) startProxyToEdge(systemKey, edgeName string) error { if systemKey == "" || edgeName == "" { return fmt.Errorf("systemKey and edgeName required") } d.edgeProxy = &EdgeProxy{systemKey, edgeName} return nil } func (d *DeviceClient) stopProxyToEdge() error { if d.edgeProxy == nil { return fmt.Errorf("No edge proxy active") } d.edgeProxy = nil return nil } //Authenticate retrieves a token from the specified Clearblade Platform func (u *UserClient) Authenticate() error { return authenticate(u, u.Email, u.Password) } func (u *UserClient) AuthAnon() error { return authAnon(u) } //Authenticate retrieves a token from the specified Clearblade Platform func (d *DevClient) Authenticate() error { return authenticate(d, d.Email, d.Password) } //Register creates a new user func (u *UserClient) Register(username, password string) error { if u.UserToken == "" { return fmt.Errorf("Must be logged in to create users") } _, err := register(u, createUser, username, password, u.SystemKey, u.SystemSecret, "", "", "", "") return err } //RegisterUser creates a new user, returning the body of the response. func (u *UserClient) RegisterUser(username, password string) (map[string]interface{}, error) { if u.UserToken == "" { return nil, fmt.Errorf("Must be logged in to create users") } resp, err := register(u, createUser, username, password, u.SystemKey, u.SystemSecret, "", "", "", "") if err != nil { return nil, err } return resp, nil } //Registers a new developer func (d *DevClient) Register(username, password, fname, lname, org string) error { resp, err := register(d, createDevUser, username, password, "", "", fname, lname, org, "") if err != nil { return err } else { d.DevToken = resp["dev_token"].(string) return nil } } func (d *DevClient) RegisterNewUser(username, password, systemkey, systemsecret string) (map[string]interface{}, error) { if d.DevToken == "" { return nil, fmt.Errorf("Must authenticate first") } return register(d, createUser, username, password, systemkey, systemsecret, "", "", "", "") } //Register creates a new developer user func (d *DevClient) RegisterDevUser(username, password, fname, lname, org string) (map[string]interface{}, error) { resp, err := register(d, createDevUser, username, password, "", "", fname, lname, org, "") if err != nil { return nil, err } return resp, nil } //Register creates a new developer user func (d *DevClient) RegisterDevUserWithKey(username, password, fname, lname, org, key string) (map[string]interface{}, error) { resp, err := register(d, createDevUser, username, password, "", "", fname, lname, org, key) if err != nil { return nil, err } return resp, nil } //Logout ends the session func (u *UserClient) Logout() error { return logout(u) } //Logout ends the session func (d *DevClient) Logout() error { return logout(d) } //Check Auth of Developer func (d *DevClient) CheckAuth() error { return checkAuth(d) } func checkAuth(c cbClient) error { creds, err := c.credentials() if err != nil { return err } //log.Println("Checking user auth") resp, err := post(c, c.preamble()+"/checkauth", nil, creds, nil) if err != nil { return err } body := resp.Body.(map[string]interface{}) if body["is_authenticated"] != nil && body["is_authenticated"].(bool) { return nil } if resp.StatusCode != 200 { return cbErr.CreateResponseFromMap(resp.Body) } return nil } //Below are some shared functions func authenticate(c cbClient, username, password string) error { var creds [][]string switch c.(type) { case *UserClient: var err error creds, err = c.credentials() if err != nil { return err } case *DevClient: } resp, err := post(c, c.preamble()+"/auth", map[string]interface{}{ "email": username, "password": password, }, creds, nil) if err != nil { return err } if resp.StatusCode != 200 { return cbErr.CreateResponseFromMap(resp.Body) } var token string = "" switch c.(type) { case *UserClient: token = resp.Body.(map[string]interface{})["user_token"].(string) case *DevClient: token = resp.Body.(map[string]interface{})["dev_token"].(string) } if token == "" { return fmt.Errorf("Token not present i response from platform %+v", resp.Body) } c.setToken(token) return nil } func authAnon(c cbClient) error { creds, err := c.credentials() if err != nil { return fmt.Errorf("Invalid client: %+s", err.Error()) } resp, err := post(c, c.preamble()+"/anon", nil, creds, nil) if err != nil { return fmt.Errorf("Error retrieving anon user token: %s", err.Error()) } if resp.StatusCode != 200 { return cbErr.CreateResponseFromMap(resp.Body) } token := resp.Body.(map[string]interface{})["user_token"].(string) if token == "" { return fmt.Errorf("Token not present in response from platform %+v", resp.Body) } c.setToken(token) return nil } func register(c cbClient, kind int, username, password, syskey, syssec, fname, lname, org, key string) (map[string]interface{}, error) { payload := map[string]interface{}{ "email": username, "password": password, } var endpoint string headers := make(map[string][]string) var creds [][]string switch kind { case createDevUser: endpoint = "/admin/reg" payload["fname"] = fname payload["lname"] = lname payload["org"] = org if key != "" { payload["key"] = key } case createUser: switch c.(type) { case *DevClient: if syskey == "" { return nil, fmt.Errorf("System key required") } endpoint = fmt.Sprintf("/admin/user/%s", syskey) case *UserClient: if syskey == "" { return nil, fmt.Errorf("System key required") } if syssec == "" { return nil, fmt.Errorf("System secret required") } endpoint = "/api/v/1/user/reg" headers["Clearblade-Systemkey"] = []string{syskey} headers["Clearblade-Systemsecret"] = []string{syssec} default: return nil, fmt.Errorf("unreachable code detected") } var err error creds, err = c.credentials() if err != nil { return nil, err } default: return nil, fmt.Errorf("Cannot create that kind of user") } resp, err := post(c, endpoint, payload, creds, headers) if err != nil { return nil, err } if resp.StatusCode != 200 { return nil, cbErr.CreateResponseFromMap(resp.Body) } var token string = "" switch kind { case createDevUser: token = resp.Body.(map[string]interface{})["dev_token"].(string) case createUser: token = resp.Body.(map[string]interface{})["user_id"].(string) } if token == "" { return nil, fmt.Errorf("Token not present in response from platform %+v", resp.Body) } return resp.Body.(map[string]interface{}), nil } func logout(c cbClient) error { creds, err := c.credentials() if err != nil { return err } resp, err := post(c, c.preamble()+"/logout", nil, creds, nil) if err != nil { return err } if resp.StatusCode != 200 { return cbErr.CreateResponseFromMap(resp.Body) } return nil } func do(c cbClient, r *CbReq, creds [][]string) (*CbResp, error) { checkForEdgeProxy(c, r) var bodyToSend *bytes.Buffer if r.Body != nil { b, jsonErr := json.Marshal(r.Body) if jsonErr != nil { return nil, fmt.Errorf("JSON Encoding Error: %v", jsonErr) } bodyToSend = bytes.NewBuffer(b) } else { bodyToSend = nil } url := c.getHttpAddr() + r.Endpoint if r.QueryString != "" { url += "?" + r.QueryString } var req *http.Request var reqErr error if bodyToSend != nil { req, reqErr = http.NewRequest(r.Method, url, bodyToSend) } else { req, reqErr = http.NewRequest(r.Method, url, nil) } if reqErr != nil { return nil, fmt.Errorf("Request Creation Error: %s", reqErr) } req.Close = true for hed, val := range r.Headers { for _, vv := range val { req.Header.Add(hed, vv) } } for _, c := range creds { if len(c) != 2 { return nil, fmt.Errorf("Request Creation Error: Invalid credential header supplied") } req.Header.Add(c[0], c[1]) } cli := &http.Client{Transport: tr} resp, err := cli.Do(req) if err != nil { return nil, fmt.Errorf("Error Making Request: %v", err) } defer resp.Body.Close() body, readErr := ioutil.ReadAll(resp.Body) if readErr != nil { return nil, fmt.Errorf("Error Reading Response Body: %v", readErr) } var d interface{} if len(body) == 0 { return &CbResp{ Body: nil, StatusCode: resp.StatusCode, }, nil } buf := bytes.NewBuffer(body) dec := json.NewDecoder(buf) decErr := dec.Decode(&d) var bod interface{} if decErr != nil { // return nil, fmt.Errorf("JSON Decoding Error: %v\n With Body: %v\n", decErr, string(body)) bod = string(body) } switch d.(type) { case []interface{}: bod = d case map[string]interface{}: bod = d default: bod = string(body) } return &CbResp{ Body: bod, StatusCode: resp.StatusCode, }, nil } //standard http verbs func get(c cbClient, endpoint string, query map[string]string, creds [][]string, headers map[string][]string) (*CbResp, error) { req := &CbReq{ Body: nil, Method: "GET", Endpoint: endpoint, QueryString: query_to_string(query), Headers: headers, } return do(c, req, creds) } func post(c cbClient, endpoint string, body interface{}, creds [][]string, headers map[string][]string) (*CbResp, error) { req := &CbReq{ Body: body, Method: "POST", Endpoint: endpoint, QueryString: "", Headers: headers, } return do(c, req, creds) } func put(c cbClient, endpoint string, body interface{}, heads [][]string, headers map[string][]string) (*CbResp, error) { req := &CbReq{ Body: body, Method: "PUT", Endpoint: endpoint, QueryString: "", Headers: headers, } return do(c, req, heads) } func delete(c cbClient, endpoint string, query map[string]string, heads [][]string, headers map[string][]string) (*CbResp, error) { req := &CbReq{ Body: nil, Method: "DELETE", Endpoint: endpoint, Headers: headers, QueryString: query_to_string(query), } return do(c, req, heads) } func deleteWithBody(c cbClient, endpoint string, body interface{}, heads [][]string, headers map[string][]string) (*CbResp, error) { req := &CbReq{ Body: body, Method: "DELETE", Endpoint: endpoint, Headers: headers, QueryString: "", } return do(c, req, heads) } func query_to_string(query map[string]string) string { qryStr := "" for k, v := range query { qryStr += k + "=" + v + "&" } return strings.TrimSuffix(qryStr, "&") } func checkForEdgeProxy(c cbClient, r *CbReq) { edgeProxy := c.getEdgeProxy() if r.Headers == nil { r.Headers = map[string][]string{} } if edgeProxy != nil { r.Headers["Clearblade-Systemkey"] = []string{edgeProxy.SystemKey} r.Headers["Clearblade-Edge"] = []string{edgeProxy.EdgeName} } } func parseEdgeConfig(e EdgeConfig) *exec.Cmd { cmd := exec.Command("edge", "-edge-ip=localhost", "-edge-id="+e.EdgeName, "-edge-cookie="+e.EdgeToken, "-platform-ip="+e.PlatformIP, "-platform-port="+e.PlatformPort, "-parent-system="+e.ParentSystem, ) if p := e.HttpPort; p != "" { cmd.Args = append(cmd.Args, "-edge-listen-port="+p) } if p := e.MqttPort; p != "" { cmd.Args = append(cmd.Args, "-broker-tcp-port="+p) } if p := e.MqttTlsPort; p != "" { cmd.Args = append(cmd.Args, "-broker-tls-port="+p) } if p := e.WsPort; p != "" { cmd.Args = append(cmd.Args, "-broker-ws-port="+p) } if p := e.WssPort; p != "" { cmd.Args = append(cmd.Args, "-broker-wss-port="+p) } if p := e.AuthPort; p != "" { cmd.Args = append(cmd.Args, "-mqtt-auth-port="+p) } if p := e.AuthWsPort; p != "" { cmd.Args = append(cmd.Args, "-mqtt-ws-auth-port="+p) } if p := e.AdapterRootDir; p != "" { cmd.Args = append(cmd.Args, "-adaptors-root-dir="+p) } if e.Lean { cmd.Args = append(cmd.Args, "-lean-mode") } if e.Cache { cmd.Args = append(cmd.Args, "-local") } if p := e.LogLevel; p != "" { cmd.Args = append(cmd.Args, "-log-level="+p) } if e.Insecure { cmd.Args = append(cmd.Args, "-insecure=true") } if e.DevMode { cmd.Args = append(cmd.Args, "-development-mode=true") } if s := e.Stdout; s != nil { cmd.Stdout = s } else { cmd.Stdout = os.Stdout } if s := e.Stderr; s != nil { cmd.Stderr = s } else { cmd.Stderr = os.Stderr } return cmd } func makeSliceOfMaps(inIF interface{}) ([]map[string]interface{}, error) { switch inIF.(type) { case []interface{}: in := inIF.([]interface{}) rval := make([]map[string]interface{}, len(in)) for i, val := range in { valMap, ok := val.(map[string]interface{}) if !ok { return nil, fmt.Errorf("expected item to be a map, got %T", val) } rval[i] = valMap } return rval, nil case []map[string]interface{}: return inIF.([]map[string]interface{}), nil default: return nil, fmt.Errorf("Expected list of maps, got %T", inIF) } } func createQueryMap(query *Query) (map[string]string, error) { var qry map[string]string if query != nil { queryMap := query.serialize() queryBytes, err := json.Marshal(queryMap) if err != nil { return nil, err } qry = map[string]string{ "query": url.QueryEscape(string(queryBytes)), } } else { qry = nil } return qry, nil } added timeout to http calls package GoSDK import ( "bytes" "crypto/tls" "encoding/json" "fmt" "io/ioutil" "math/rand" "net/http" "net/url" "os" "os/exec" "strings" "time" cbErr "github.com/clearblade/go-utils/errors" mqttTypes "github.com/clearblade/mqtt_parsing" mqtt "github.com/clearblade/paho.mqtt.golang" ) var ( //CB_ADDR is the address of the ClearBlade Platform you are speaking with CB_ADDR = "https://platform.clearblade.com" //CB_MSG_ADDR is the messaging address you wish to speak to CB_MSG_ADDR = "platform.clearblade.com:1883" _HEADER_KEY_KEY = "ClearBlade-SystemKey" _HEADER_SECRET_KEY = "ClearBlade-SystemSecret" ) var tr = &http.Transport{ // TLSClientConfig: &tls.Config{InsecureSkipVerify: false}, TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } const ( createDevUser = iota createUser ) //Client is a convience interface for API consumers, if they want to use the same functions for both developer users and unprivleged users type Client interface { //session bookkeeping calls Authenticate() error Logout() error //data calls CreateData(string, interface{}) ([]interface{}, error) CreateDataByName(string, string, interface{}) ([]interface{}, error) InsertData(string, interface{}) error UpdateData(string, *Query, map[string]interface{}) error UpdateDataByName(string, string, *Query, map[string]interface{}) (UpdateResponse, error) GetData(string, *Query) (map[string]interface{}, error) GetDataByName(string, *Query) (map[string]interface{}, error) GetDataByKeyAndName(string, string, *Query) (map[string]interface{}, error) DeleteData(string, *Query) error GetItemCount(string) (int, error) GetDataTotal(string, *Query) (map[string]interface{}, error) GetColumns(string, string, string) ([]interface{}, error) //mqtt calls SetMqttClient(MqttClient) InitializeMQTT(string, string, int, *tls.Config, *LastWillPacket) error Publish(string, []byte, int) error Subscribe(string, int) (<-chan *mqttTypes.Publish, error) Unsubscribe(string) error Disconnect() error // Device calls GetDevices(string, *Query) ([]interface{}, error) GetDevice(string, string) (map[string]interface{}, error) CreateDevice(string, string, map[string]interface{}) (map[string]interface{}, error) UpdateDevice(string, string, map[string]interface{}) (map[string]interface{}, error) DeleteDevice(string, string) error UpdateDevices(string, *Query, map[string]interface{}) ([]interface{}, error) DeleteDevices(string, *Query) error // Adaptor calls GetAdaptors(string) ([]interface{}, error) GetAdaptor(string, string) (map[string]interface{}, error) CreateAdaptor(string, string, map[string]interface{}) (map[string]interface{}, error) UpdateAdaptor(string, string, map[string]interface{}) (map[string]interface{}, error) DeleteAdaptor(string, string) error DeployAdaptor(string, string, map[string]interface{}) (map[string]interface{}, error) ControlAdaptor(string, string, map[string]interface{}) (map[string]interface{}, error) // Adaptor File calls GetAdaptorFiles(string, string) ([]interface{}, error) GetAdaptorFile(string, string, string) (map[string]interface{}, error) CreateAdaptorFile(string, string, string, map[string]interface{}) (map[string]interface{}, error) UpdateAdaptorFile(string, string, string, map[string]interface{}) (map[string]interface{}, error) DeleteAdaptorFile(string, string, string) error } type MqttClient interface { mqtt.Client } //cbClient will supply various information that differs between privleged and unprivleged users //this interface is meant to be unexported type cbClient interface { credentials() ([][]string, error) //the inner slice is a tuple of "Header":"Value" preamble() string setToken(string) getToken() string getSystemInfo() (string, string) getMessageId() uint16 getHttpAddr() string getMqttAddr() string getEdgeProxy() *EdgeProxy } // receiver for methods that can be shared between users/devs/devices type client struct{} //UserClient is the type for users type UserClient struct { client UserToken string mrand *rand.Rand MQTTClient MqttClient SystemKey string SystemSecret string Email string Password string HttpAddr string MqttAddr string edgeProxy *EdgeProxy } type DeviceClient struct { client DeviceName string ActiveKey string DeviceToken string mrand *rand.Rand MQTTClient MqttClient SystemKey string SystemSecret string HttpAddr string MqttAddr string edgeProxy *EdgeProxy } //DevClient is the type for developers type DevClient struct { client DevToken string mrand *rand.Rand MQTTClient MqttClient Email string Password string HttpAddr string MqttAddr string edgeProxy *EdgeProxy } type EdgeProxy struct { SystemKey string EdgeName string } //CbReq is a wrapper around an HTTP request type CbReq struct { Body interface{} Method string Endpoint string QueryString string Headers map[string][]string HttpAddr string MqttAddr string } //CbResp is a wrapper around an HTTP response type CbResp struct { Body interface{} StatusCode int } func (u *UserClient) getHttpAddr() string { return u.HttpAddr } func (d *DevClient) getHttpAddr() string { return d.HttpAddr } func (u *UserClient) getMqttAddr() string { return u.MqttAddr } func (d *DevClient) getMqttAddr() string { return d.MqttAddr } func (u *UserClient) getEdgeProxy() *EdgeProxy { return u.edgeProxy } func (d *DevClient) getEdgeProxy() *EdgeProxy { return d.edgeProxy } func (d *DeviceClient) getEdgeProxy() *EdgeProxy { return d.edgeProxy } func (u *UserClient) SetMqttClient(c MqttClient) { u.MQTTClient = c } func (d *DevClient) SetMqttClient(c MqttClient) { d.MQTTClient = c } func (d *DeviceClient) SetMqttClient(c MqttClient) { d.MQTTClient = c } func NewDeviceClient(systemkey, systemsecret, deviceName, activeKey string) *DeviceClient { return &DeviceClient{ DeviceName: deviceName, DeviceToken: "", ActiveKey: activeKey, mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, SystemKey: systemkey, SystemSecret: systemsecret, HttpAddr: CB_ADDR, MqttAddr: CB_MSG_ADDR, } } //NewUserClient allocates a new UserClient struct func NewUserClient(systemkey, systemsecret, email, password string) *UserClient { return &UserClient{ UserToken: "", mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, SystemSecret: systemsecret, SystemKey: systemkey, Email: email, Password: password, HttpAddr: CB_ADDR, MqttAddr: CB_MSG_ADDR, } } //NewDevClient allocates a new DevClient struct func NewDevClient(email, password string) *DevClient { return &DevClient{ DevToken: "", mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, Email: email, Password: password, HttpAddr: CB_ADDR, MqttAddr: CB_MSG_ADDR, } } func NewDevClientWithToken(token, email string) *DevClient { return &DevClient{ DevToken: token, mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, Email: email, Password: "", HttpAddr: CB_ADDR, MqttAddr: CB_MSG_ADDR, } } func NewUserClientWithAddrs(httpAddr, mqttAddr, systemKey, systemSecret, email, password string) *UserClient { return &UserClient{ UserToken: "", mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, SystemSecret: systemSecret, SystemKey: systemKey, Email: email, Password: password, HttpAddr: httpAddr, MqttAddr: mqttAddr, } } func NewDevClientWithAddrs(httpAddr, mqttAddr, email, password string) *DevClient { return &DevClient{ DevToken: "", mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, Email: email, Password: password, HttpAddr: httpAddr, MqttAddr: mqttAddr, } } func NewDevClientWithTokenAndAddrs(httpAddr, mqttAddr, token, email string) *DevClient { return &DevClient{ DevToken: token, mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, Email: email, Password: "", HttpAddr: httpAddr, MqttAddr: mqttAddr, } } func NewDeviceClientWithAddrs(httpAddr, mqttAddr, systemkey, systemsecret, deviceName, activeKey string) *DeviceClient { return &DeviceClient{ DeviceName: deviceName, DeviceToken: "", ActiveKey: activeKey, mrand: rand.New(rand.NewSource(time.Now().UnixNano())), MQTTClient: nil, SystemKey: systemkey, SystemSecret: systemsecret, HttpAddr: httpAddr, MqttAddr: mqttAddr, } } func NewEdgeProxyDevClient(email, password, systemKey, edgeName string) (*DevClient, error) { d := NewDevClient(email, password) if err := d.startProxyToEdge(systemKey, edgeName); err != nil { return nil, err } return d, nil } func NewEdgeProxyUserClient(email, password, systemKey, systemSecret, edgeName string) (*UserClient, error) { u := NewUserClient(systemKey, systemSecret, email, password) if err := u.startProxyToEdge(systemKey, edgeName); err != nil { return nil, err } return u, nil } func NewEdgeProxyDeviceClient(systemkey, systemsecret, deviceName, activeKey, edgeName string) (*DeviceClient, error) { d := NewDeviceClient(systemkey, systemsecret, deviceName, activeKey) if err := d.startProxyToEdge(systemkey, edgeName); err != nil { return nil, err } return d, nil } func (u *UserClient) startProxyToEdge(systemKey, edgeName string) error { if systemKey == "" || edgeName == "" { return fmt.Errorf("systemKey and edgeName required") } u.edgeProxy = &EdgeProxy{systemKey, edgeName} return nil } func (u *UserClient) stopProxyToEdge() error { if u.edgeProxy == nil { return fmt.Errorf("Requests are not being proxied to edge") } u.edgeProxy = nil return nil } func (d *DevClient) startProxyToEdge(systemKey, edgeName string) error { if systemKey == "" || edgeName == "" { return fmt.Errorf("systemKey and edgeName required") } d.edgeProxy = &EdgeProxy{systemKey, edgeName} return nil } func (d *DevClient) stopProxyToEdge() error { if d.edgeProxy == nil { return fmt.Errorf("No edge proxy active") } d.edgeProxy = nil return nil } func (d *DeviceClient) startProxyToEdge(systemKey, edgeName string) error { if systemKey == "" || edgeName == "" { return fmt.Errorf("systemKey and edgeName required") } d.edgeProxy = &EdgeProxy{systemKey, edgeName} return nil } func (d *DeviceClient) stopProxyToEdge() error { if d.edgeProxy == nil { return fmt.Errorf("No edge proxy active") } d.edgeProxy = nil return nil } //Authenticate retrieves a token from the specified Clearblade Platform func (u *UserClient) Authenticate() error { return authenticate(u, u.Email, u.Password) } func (u *UserClient) AuthAnon() error { return authAnon(u) } //Authenticate retrieves a token from the specified Clearblade Platform func (d *DevClient) Authenticate() error { return authenticate(d, d.Email, d.Password) } //Register creates a new user func (u *UserClient) Register(username, password string) error { if u.UserToken == "" { return fmt.Errorf("Must be logged in to create users") } _, err := register(u, createUser, username, password, u.SystemKey, u.SystemSecret, "", "", "", "") return err } //RegisterUser creates a new user, returning the body of the response. func (u *UserClient) RegisterUser(username, password string) (map[string]interface{}, error) { if u.UserToken == "" { return nil, fmt.Errorf("Must be logged in to create users") } resp, err := register(u, createUser, username, password, u.SystemKey, u.SystemSecret, "", "", "", "") if err != nil { return nil, err } return resp, nil } //Registers a new developer func (d *DevClient) Register(username, password, fname, lname, org string) error { resp, err := register(d, createDevUser, username, password, "", "", fname, lname, org, "") if err != nil { return err } else { d.DevToken = resp["dev_token"].(string) return nil } } func (d *DevClient) RegisterNewUser(username, password, systemkey, systemsecret string) (map[string]interface{}, error) { if d.DevToken == "" { return nil, fmt.Errorf("Must authenticate first") } return register(d, createUser, username, password, systemkey, systemsecret, "", "", "", "") } //Register creates a new developer user func (d *DevClient) RegisterDevUser(username, password, fname, lname, org string) (map[string]interface{}, error) { resp, err := register(d, createDevUser, username, password, "", "", fname, lname, org, "") if err != nil { return nil, err } return resp, nil } //Register creates a new developer user func (d *DevClient) RegisterDevUserWithKey(username, password, fname, lname, org, key string) (map[string]interface{}, error) { resp, err := register(d, createDevUser, username, password, "", "", fname, lname, org, key) if err != nil { return nil, err } return resp, nil } //Logout ends the session func (u *UserClient) Logout() error { return logout(u) } //Logout ends the session func (d *DevClient) Logout() error { return logout(d) } //Check Auth of Developer func (d *DevClient) CheckAuth() error { return checkAuth(d) } func checkAuth(c cbClient) error { creds, err := c.credentials() if err != nil { return err } //log.Println("Checking user auth") resp, err := post(c, c.preamble()+"/checkauth", nil, creds, nil) if err != nil { return err } body := resp.Body.(map[string]interface{}) if body["is_authenticated"] != nil && body["is_authenticated"].(bool) { return nil } if resp.StatusCode != 200 { return cbErr.CreateResponseFromMap(resp.Body) } return nil } //Below are some shared functions func authenticate(c cbClient, username, password string) error { var creds [][]string switch c.(type) { case *UserClient: var err error creds, err = c.credentials() if err != nil { return err } case *DevClient: } resp, err := post(c, c.preamble()+"/auth", map[string]interface{}{ "email": username, "password": password, }, creds, nil) if err != nil { return err } if resp.StatusCode != 200 { return cbErr.CreateResponseFromMap(resp.Body) } var token string = "" switch c.(type) { case *UserClient: token = resp.Body.(map[string]interface{})["user_token"].(string) case *DevClient: token = resp.Body.(map[string]interface{})["dev_token"].(string) } if token == "" { return fmt.Errorf("Token not present i response from platform %+v", resp.Body) } c.setToken(token) return nil } func authAnon(c cbClient) error { creds, err := c.credentials() if err != nil { return fmt.Errorf("Invalid client: %+s", err.Error()) } resp, err := post(c, c.preamble()+"/anon", nil, creds, nil) if err != nil { return fmt.Errorf("Error retrieving anon user token: %s", err.Error()) } if resp.StatusCode != 200 { return cbErr.CreateResponseFromMap(resp.Body) } token := resp.Body.(map[string]interface{})["user_token"].(string) if token == "" { return fmt.Errorf("Token not present in response from platform %+v", resp.Body) } c.setToken(token) return nil } func register(c cbClient, kind int, username, password, syskey, syssec, fname, lname, org, key string) (map[string]interface{}, error) { payload := map[string]interface{}{ "email": username, "password": password, } var endpoint string headers := make(map[string][]string) var creds [][]string switch kind { case createDevUser: endpoint = "/admin/reg" payload["fname"] = fname payload["lname"] = lname payload["org"] = org if key != "" { payload["key"] = key } case createUser: switch c.(type) { case *DevClient: if syskey == "" { return nil, fmt.Errorf("System key required") } endpoint = fmt.Sprintf("/admin/user/%s", syskey) case *UserClient: if syskey == "" { return nil, fmt.Errorf("System key required") } if syssec == "" { return nil, fmt.Errorf("System secret required") } endpoint = "/api/v/1/user/reg" headers["Clearblade-Systemkey"] = []string{syskey} headers["Clearblade-Systemsecret"] = []string{syssec} default: return nil, fmt.Errorf("unreachable code detected") } var err error creds, err = c.credentials() if err != nil { return nil, err } default: return nil, fmt.Errorf("Cannot create that kind of user") } resp, err := post(c, endpoint, payload, creds, headers) if err != nil { return nil, err } if resp.StatusCode != 200 { return nil, cbErr.CreateResponseFromMap(resp.Body) } var token string = "" switch kind { case createDevUser: token = resp.Body.(map[string]interface{})["dev_token"].(string) case createUser: token = resp.Body.(map[string]interface{})["user_id"].(string) } if token == "" { return nil, fmt.Errorf("Token not present in response from platform %+v", resp.Body) } return resp.Body.(map[string]interface{}), nil } func logout(c cbClient) error { creds, err := c.credentials() if err != nil { return err } resp, err := post(c, c.preamble()+"/logout", nil, creds, nil) if err != nil { return err } if resp.StatusCode != 200 { return cbErr.CreateResponseFromMap(resp.Body) } return nil } func do(c cbClient, r *CbReq, creds [][]string) (*CbResp, error) { checkForEdgeProxy(c, r) var bodyToSend *bytes.Buffer if r.Body != nil { b, jsonErr := json.Marshal(r.Body) if jsonErr != nil { return nil, fmt.Errorf("JSON Encoding Error: %v", jsonErr) } bodyToSend = bytes.NewBuffer(b) } else { bodyToSend = nil } url := c.getHttpAddr() + r.Endpoint if r.QueryString != "" { url += "?" + r.QueryString } var req *http.Request var reqErr error if bodyToSend != nil { req, reqErr = http.NewRequest(r.Method, url, bodyToSend) } else { req, reqErr = http.NewRequest(r.Method, url, nil) } if reqErr != nil { return nil, fmt.Errorf("Request Creation Error: %s", reqErr) } req.Close = true for hed, val := range r.Headers { for _, vv := range val { req.Header.Add(hed, vv) } } for _, c := range creds { if len(c) != 2 { return nil, fmt.Errorf("Request Creation Error: Invalid credential header supplied") } req.Header.Add(c[0], c[1]) } cli := &http.Client{ Transport: tr, Timeout: time.Second * 30, } resp, err := cli.Do(req) if err != nil { return nil, fmt.Errorf("Error Making Request: %v", err) } defer resp.Body.Close() body, readErr := ioutil.ReadAll(resp.Body) if readErr != nil { return nil, fmt.Errorf("Error Reading Response Body: %v", readErr) } var d interface{} if len(body) == 0 { return &CbResp{ Body: nil, StatusCode: resp.StatusCode, }, nil } buf := bytes.NewBuffer(body) dec := json.NewDecoder(buf) decErr := dec.Decode(&d) var bod interface{} if decErr != nil { // return nil, fmt.Errorf("JSON Decoding Error: %v\n With Body: %v\n", decErr, string(body)) bod = string(body) } switch d.(type) { case []interface{}: bod = d case map[string]interface{}: bod = d default: bod = string(body) } return &CbResp{ Body: bod, StatusCode: resp.StatusCode, }, nil } //standard http verbs func get(c cbClient, endpoint string, query map[string]string, creds [][]string, headers map[string][]string) (*CbResp, error) { req := &CbReq{ Body: nil, Method: "GET", Endpoint: endpoint, QueryString: query_to_string(query), Headers: headers, } return do(c, req, creds) } func post(c cbClient, endpoint string, body interface{}, creds [][]string, headers map[string][]string) (*CbResp, error) { req := &CbReq{ Body: body, Method: "POST", Endpoint: endpoint, QueryString: "", Headers: headers, } return do(c, req, creds) } func put(c cbClient, endpoint string, body interface{}, heads [][]string, headers map[string][]string) (*CbResp, error) { req := &CbReq{ Body: body, Method: "PUT", Endpoint: endpoint, QueryString: "", Headers: headers, } return do(c, req, heads) } func delete(c cbClient, endpoint string, query map[string]string, heads [][]string, headers map[string][]string) (*CbResp, error) { req := &CbReq{ Body: nil, Method: "DELETE", Endpoint: endpoint, Headers: headers, QueryString: query_to_string(query), } return do(c, req, heads) } func deleteWithBody(c cbClient, endpoint string, body interface{}, heads [][]string, headers map[string][]string) (*CbResp, error) { req := &CbReq{ Body: body, Method: "DELETE", Endpoint: endpoint, Headers: headers, QueryString: "", } return do(c, req, heads) } func query_to_string(query map[string]string) string { qryStr := "" for k, v := range query { qryStr += k + "=" + v + "&" } return strings.TrimSuffix(qryStr, "&") } func checkForEdgeProxy(c cbClient, r *CbReq) { edgeProxy := c.getEdgeProxy() if r.Headers == nil { r.Headers = map[string][]string{} } if edgeProxy != nil { r.Headers["Clearblade-Systemkey"] = []string{edgeProxy.SystemKey} r.Headers["Clearblade-Edge"] = []string{edgeProxy.EdgeName} } } func parseEdgeConfig(e EdgeConfig) *exec.Cmd { cmd := exec.Command("edge", "-edge-ip=localhost", "-edge-id="+e.EdgeName, "-edge-cookie="+e.EdgeToken, "-platform-ip="+e.PlatformIP, "-platform-port="+e.PlatformPort, "-parent-system="+e.ParentSystem, ) if p := e.HttpPort; p != "" { cmd.Args = append(cmd.Args, "-edge-listen-port="+p) } if p := e.MqttPort; p != "" { cmd.Args = append(cmd.Args, "-broker-tcp-port="+p) } if p := e.MqttTlsPort; p != "" { cmd.Args = append(cmd.Args, "-broker-tls-port="+p) } if p := e.WsPort; p != "" { cmd.Args = append(cmd.Args, "-broker-ws-port="+p) } if p := e.WssPort; p != "" { cmd.Args = append(cmd.Args, "-broker-wss-port="+p) } if p := e.AuthPort; p != "" { cmd.Args = append(cmd.Args, "-mqtt-auth-port="+p) } if p := e.AuthWsPort; p != "" { cmd.Args = append(cmd.Args, "-mqtt-ws-auth-port="+p) } if p := e.AdapterRootDir; p != "" { cmd.Args = append(cmd.Args, "-adaptors-root-dir="+p) } if e.Lean { cmd.Args = append(cmd.Args, "-lean-mode") } if e.Cache { cmd.Args = append(cmd.Args, "-local") } if p := e.LogLevel; p != "" { cmd.Args = append(cmd.Args, "-log-level="+p) } if e.Insecure { cmd.Args = append(cmd.Args, "-insecure=true") } if e.DevMode { cmd.Args = append(cmd.Args, "-development-mode=true") } if s := e.Stdout; s != nil { cmd.Stdout = s } else { cmd.Stdout = os.Stdout } if s := e.Stderr; s != nil { cmd.Stderr = s } else { cmd.Stderr = os.Stderr } return cmd } func makeSliceOfMaps(inIF interface{}) ([]map[string]interface{}, error) { switch inIF.(type) { case []interface{}: in := inIF.([]interface{}) rval := make([]map[string]interface{}, len(in)) for i, val := range in { valMap, ok := val.(map[string]interface{}) if !ok { return nil, fmt.Errorf("expected item to be a map, got %T", val) } rval[i] = valMap } return rval, nil case []map[string]interface{}: return inIF.([]map[string]interface{}), nil default: return nil, fmt.Errorf("Expected list of maps, got %T", inIF) } } func createQueryMap(query *Query) (map[string]string, error) { var qry map[string]string if query != nil { queryMap := query.serialize() queryBytes, err := json.Marshal(queryMap) if err != nil { return nil, err } qry = map[string]string{ "query": url.QueryEscape(string(queryBytes)), } } else { qry = nil } return qry, nil }
package dukedb import ( "encoding/json" "errors" "fmt" "reflect" "sort" "strconv" "strings" "time" "github.com/theduke/go-apperror" ) /** * String utils. */ func Pluralize(str string) string { if str[len(str)-1] == 'y' { str = str[1:len(str)-1] + "ie" } if str[len(str)-1] != 's' { str += "s" } return str } // Convert a CamelCase string to underscore version, eg camel_case. func CamelCaseToUnderscore(str string) string { u := "" didChange := false for i, c := range str { if c >= 65 && c <= 90 { if i == 0 { u += string(byte(c + 32)) didChange = true continue } if !didChange { u += "_" didChange = true } u += string(byte(c + 32)) } else { u += string(byte(c)) didChange = false } } return u } func LowerCaseFirst(str string) string { if len(str) == 0 { return "" } newStr := "" doReplace := true for _, c := range str { x := int(c) if doReplace { if x >= 65 && x <= 90 { newStr += string(x + 32) } else { doReplace = false newStr += string(c) } } else { newStr += string(c) } } return newStr } // Given the internal name of a filter like "eq" or "lte", return a SQL operator like = or <. // WARNING: panics if an unsupported filter is given. func FilterToSqlCondition(filter string) (string, apperror.Error) { typ := "" switch filter { case "eq": typ = "=" case "neq": typ = "!=" case "lt": typ = "<" case "lte": typ = "<=" case "gt": typ = ">" case "gte": typ = ">=" case "like": typ = "LIKE" case "in": typ = "IN" default: return "", &apperror.Err{ Code: "unknown_filter", Message: "Unknown filter '" + filter + "'", } } return typ, nil } /** * Generic interface variable handling/comparison functions. */ func IsNumericKind(kind reflect.Kind) bool { switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64: return true default: return false } } func IsZero(val interface{}) bool { if val == nil { return true } reflVal := reflect.ValueOf(val) reflType := reflVal.Type() if reflType.Kind() == reflect.Slice { return reflVal.Len() < 1 } if reflType.Kind() == reflect.Map { return reflVal.Len() < 1 } return val == reflect.Zero(reflType).Interface() } func SaveConvert(val interface{}, typ reflect.Type) interface{} { defer func() { recover() }() return reflect.ValueOf(val).Convert(typ).Interface() } // Convert converts an arbitrary interface value to a different type. // The rawType argument may either be a reflect.Type or an acutal instance of the type. func Convert(value interface{}, rawType interface{}) (interface{}, error) { var typ reflect.Type if t, ok := rawType.(reflect.Type); ok { typ = t } else { typ = reflect.TypeOf(rawType) } kind := typ.Kind() reflVal := reflect.ValueOf(value) valType := reflect.TypeOf(value) valKind := valType.Kind() if typ == valType { // Same type, nothing to convert. return value, nil } isPointer := kind == reflect.Ptr var pointerType reflect.Type if isPointer { pointerType = typ.Elem() } // If target value is a pointer and the value is not (and the types match), // create a new pointer pointing to the value. if isPointer && valType == pointerType { newVal := reflect.New(valType) newVal.Elem().Set(reflVal) return newVal.Interface(), nil } // Parse dates into time.Time. isTime := kind == reflect.Struct && typ.PkgPath() == "time" && typ.Name() == "Time" isTimePointer := isPointer && pointerType.Kind() == reflect.Struct && pointerType.PkgPath() == "time" && pointerType.Name() == "Time" if (isTime || isTimePointer) && valKind == reflect.String { date, err := time.Parse(time.RFC3339, value.(string)) if err != nil { return nil, apperror.Wrap(err, "time_parse_error", "Invalid time format", true) } if isTime { return date, nil } else { return &date, nil } } // Special handling for string to bool. if kind == reflect.String && valKind == reflect.Bool { str := strings.TrimSpace(value.(string)) switch str { case "y", "yes", "1": return true, nil case "n", "no", "0": return false, nil } } // Special handling for string target. if kind == reflect.String { if bytes, ok := value.([]byte); ok { return string(bytes), nil } return fmt.Sprintf("%v", value), nil } // If value is string, and target type is numeric, // parse to float and then convert with reflect. if valType.Kind() == reflect.String && IsNumericKind(kind) { num, err := strconv.ParseFloat(value.(string), 64) if err != nil { return nil, err } return reflect.ValueOf(num).Convert(typ).Interface(), nil } // No custom handling worked, so try to convert with reflect. // We have to accept the panic. converted := SaveConvert(value, typ) if converted == nil { return nil, errors.New(fmt.Sprintf("Cannot convert %v to %v", valType.String(), kind)) } return converted, nil } // Convert a string value to the specified type if possible. // Returns an error for unsupported types. func ConvertStringToType(value string, typ reflect.Kind) (interface{}, error) { switch typ { case reflect.Int: x, err := strconv.Atoi(value) return interface{}(x), err case reflect.Int64: x, err := strconv.ParseInt(value, 10, 64) return interface{}(x), err case reflect.Uint: x, err := strconv.ParseUint(value, 10, 32) if err != nil { return nil, err } return uint(x), nil case reflect.Uint64: x, err := strconv.ParseUint(value, 10, 64) return interface{}(x), err case reflect.String: return interface{}(value), nil default: return nil, errors.New(fmt.Sprintf("cannot_convert_to_%v", typ)) } } func CompareValues(condition string, a, b interface{}) (bool, apperror.Error) { nilType := reflect.TypeOf(nil) typA := reflect.TypeOf(a) typB := reflect.TypeOf(b) if a == nil || typA == nilType { a = float64(0) typA = reflect.TypeOf(a) } if b == nil || typB == nilType { b = float64(0) typB = reflect.TypeOf(b) a, b = b, a typA, typB = typB, typA } kindA := typA.Kind() if kindA == reflect.Ptr { val := reflect.ValueOf(a) if !val.IsValid() || val.IsNil() { a = float64(0) } else { a = reflect.ValueOf(a).Elem().Interface() } typA = reflect.TypeOf(a) kindA = typA.Kind() } kindB := typB.Kind() if kindB == reflect.Ptr { val := reflect.ValueOf(b) if !val.IsValid() || val.IsNil() { b = float64(0) } else { b = reflect.ValueOf(b).Elem().Interface() } typB = reflect.TypeOf(b) kindB = typB.Kind() } // Compare time.Time values numerically. if kindA == reflect.Struct && typA.PkgPath() == "time" && typA.Name() == "Time" { t := a.(time.Time) if t.IsZero() { a = float64(0) } else { a = float64(t.UnixNano()) } typA = reflect.TypeOf(a) kindA = typA.Kind() } if kindB == reflect.Struct && typB.PkgPath() == "time" && typB.Name() == "Time" { t := b.(time.Time) if t.IsZero() { b = float64(0) } else { b = float64(t.UnixNano()) } typB = reflect.TypeOf(b) kindB = typB.Kind() } if IsNumericKind(kindA) || IsNumericKind(kindB) { var err error a, err = Convert(a, float64(0)) if err != nil { return false, apperror.New("conversion_error", err) } b, err = Convert(b, float64(0)) if err != nil { return false, apperror.New("conversion_error", err) } return CompareFloat64Values(condition, a.(float64), b.(float64)) } if kindA == reflect.String { return CompareStringValues(condition, a.(string), b.(string)) } if condition == "eq" || condition == "neq" { convertedB, err := Convert(b, a) if err != nil { return false, apperror.New(err.Error()) } if condition == "eq" { return a == convertedB, nil } else { return a != convertedB, nil } } return false, apperror.New( "impossible_comparison", fmt.Sprintf("Cannot compare type %v(value %v) to type %v(value %v)", kindA, a, kindB, b)) } func CompareStringValues(condition, a, b string) (bool, apperror.Error) { // Check different possible filters. switch condition { case "eq": return a == b, nil case "neq": return a != b, nil case "like": return strings.Contains(a, b), nil case "lt": return a < b, nil case "lte": return a <= b, nil case "gt": return a > b, nil case "gte": return a >= b, nil default: return false, &apperror.Err{ Code: "unknown_filter", Message: fmt.Sprintf("Unknown filter type '%v'", condition), } } } func CompareFloat64Values(condition string, a, b float64) (bool, apperror.Error) { // Check different possible filters. switch condition { case "eq": return a == b, nil case "neq": return a != b, nil case "like": return false, apperror.New("invalid_filter_comparison", "LIKE filter can only be used for string values, not numbers") case "lt": return a < b, nil case "lte": return a <= b, nil case "gt": return a > b, nil case "gte": return a >= b, nil default: return false, &apperror.Err{ Code: "unknown_filter", Message: fmt.Sprintf("Unknown filter type '%v'", condition), } } } func CompareNumericValues(condition string, a, b interface{}) (bool, apperror.Error) { typ := reflect.TypeOf(a).Kind() switch typ { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return CompareIntValues(condition, a, b) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return CompareUintValues(condition, a, b) case reflect.Float32, reflect.Float64: return CompareFloatValues(condition, a, b) default: return false, &apperror.Err{ Code: "unsupported_type_for_numeric_comparison", Message: fmt.Sprintf( "For a numeric comparision with %v, a numeric type is expected. Got: %v", condition, typ), } } } func NumericToInt64(x interface{}) (int64, apperror.Error) { var val int64 switch reflect.TypeOf(x).Kind() { case reflect.Int: val = int64(x.(int)) case reflect.Int8: val = int64(x.(int8)) case reflect.Int16: val = int64(x.(int16)) case reflect.Int32: val = int64(x.(int32)) case reflect.Int64: val = x.(int64) case reflect.Uint: val = int64(x.(uint)) case reflect.Uint8: val = int64(x.(uint8)) case reflect.Uint16: val = int64(x.(uint16)) case reflect.Uint32: val = int64(x.(uint32)) case reflect.Uint64: val = int64(x.(uint64)) case reflect.Float32: val = int64(x.(float32)) case reflect.Float64: val = int64(x.(float64)) case reflect.String: x, err := strconv.ParseInt(x.(string), 10, 64) if err != nil { return int64(0), &apperror.Err{Code: "non_numeric_string"} } val = x default: return int64(0), &apperror.Err{Code: "non_numeric_type"} } return val, nil } func NumericToUint64(x interface{}) (uint64, apperror.Error) { var val uint64 switch reflect.TypeOf(x).Kind() { case reflect.Int: val = uint64(x.(int)) case reflect.Int8: val = uint64(x.(int8)) case reflect.Int16: val = uint64(x.(int16)) case reflect.Int32: val = uint64(x.(int32)) case reflect.Int64: val = uint64(x.(int64)) case reflect.Uint: val = uint64(x.(uint)) case reflect.Uint8: val = uint64(x.(uint8)) case reflect.Uint16: val = uint64(x.(uint16)) case reflect.Uint32: val = uint64(x.(uint32)) case reflect.Uint64: val = x.(uint64) case reflect.Float32: val = uint64(x.(float32)) case reflect.Float64: val = uint64(x.(float64)) case reflect.String: x, err := strconv.ParseInt(x.(string), 10, 64) if err != nil { return uint64(0), &apperror.Err{Code: "non_numeric_string"} } val = uint64(x) default: panic("nonnumeric") return uint64(0), &apperror.Err{Code: "non_numeric_type"} } return val, nil } func NumericToFloat64(x interface{}) (float64, apperror.Error) { var val float64 switch reflect.TypeOf(x).Kind() { case reflect.Int: val = float64(x.(int)) case reflect.Int8: val = float64(x.(int8)) case reflect.Int16: val = float64(x.(int16)) case reflect.Int32: val = float64(x.(int32)) case reflect.Int64: val = float64(x.(int64)) case reflect.Uint: val = float64(x.(uint)) case reflect.Uint8: val = float64(x.(uint8)) case reflect.Uint16: val = float64(x.(uint16)) case reflect.Uint32: val = float64(x.(uint32)) case reflect.Uint64: val = float64(x.(uint64)) case reflect.Float32: val = float64(x.(float32)) case reflect.Float64: val = x.(float64) case reflect.String: x, err := strconv.ParseFloat(x.(string), 64) if err != nil { return val, &apperror.Err{Code: "non_numeric_string"} } val = x default: return float64(0), &apperror.Err{Code: "non_numeric_type"} } return val, nil } func CompareIntValues(condition string, a, b interface{}) (bool, apperror.Error) { aVal, errA := NumericToInt64(a) bVal, errB := NumericToInt64(b) if errA != nil { return false, errA } if errB != nil { return false, errB } switch condition { case "eq": return aVal == bVal, nil case "neq": return aVal != bVal, nil case "lt": return aVal < bVal, nil case "lte": return aVal <= bVal, nil case "gt": return aVal > bVal, nil case "gte": return aVal >= bVal, nil default: return false, &apperror.Err{ Code: "unknown_filter", Message: "Unknown filter type: " + condition, } } } func CompareUintValues(condition string, a, b interface{}) (bool, apperror.Error) { aVal, errA := NumericToUint64(a) bVal, errB := NumericToUint64(b) if errA != nil { return false, errA } if errB != nil { return false, errB } switch condition { case "eq": return aVal == bVal, nil case "neq": return aVal != bVal, nil case "lt": return aVal < bVal, nil case "lte": return aVal <= bVal, nil case "gt": return aVal > bVal, nil case "gte": return aVal >= bVal, nil default: return false, &apperror.Err{ Code: "unknown_filter", Message: "Unknown filter type: " + condition, } } } func CompareFloatValues(condition string, a, b interface{}) (bool, apperror.Error) { aVal, errA := NumericToFloat64(a) bVal, errB := NumericToFloat64(b) if errA != nil { return false, errA } if errB != nil { return false, errB } switch condition { case "eq": return aVal == bVal, nil case "neq": return aVal != bVal, nil case "lt": return aVal < bVal, nil case "lte": return aVal <= bVal, nil case "gt": return aVal > bVal, nil case "gte": return aVal >= bVal, nil default: return false, &apperror.Err{ Code: "unknown_filter", Message: "Unknown filter type: " + condition, } } } /** * Helpers for creating and converting structs and slices. */ // Set a pointer to a value with reflect. // If the value is a pointer to a type, and the the pointer target is not, // the value is automatically dereferenced. func SetPointer(ptr, val interface{}) { ptrVal := reflect.ValueOf(ptr) ptrType := ptrVal.Type() if ptrType.Kind() != reflect.Ptr { panic("Pointer expected") } target := ptrVal.Elem() targetType := target.Type() value := reflect.ValueOf(val) valueType := value.Type() if valueType.Kind() == reflect.Ptr && targetType.Kind() != reflect.Ptr { value = value.Elem() } target.Set(value) } func SetSlicePointer(ptr interface{}, values []interface{}) { target := reflect.ValueOf(ptr) if target.Type().Kind() != reflect.Ptr { panic("Must supply pointer to slice") } slice := target.Elem() sliceType := slice.Type() if sliceType.Kind() != reflect.Slice { panic("Must supply pointer to slice") } usePtr := sliceType.Elem().Kind() == reflect.Ptr for _, val := range values { if usePtr { slice = reflect.Append(slice, reflect.ValueOf(val)) } else { slice = reflect.Append(slice, reflect.ValueOf(val).Elem()) } } target.Elem().Set(slice) } // Returns pointer to a new struct with the same type as the given struct. func NewStruct(typ interface{}) (interface{}, error) { // Build new struct. item := reflect.ValueOf(typ) if item.Type().Kind() == reflect.Ptr { item = item.Elem() } if item.Type().Kind() != reflect.Struct { return nil, errors.New("struct_expected") } return reflect.New(reflect.TypeOf(item.Interface())).Interface(), nil } // Build a new slice that can contain elements of the given type. func NewSlice(typ interface{}) interface{} { // Build new array. // See http://stackoverflow.com/questions/25384640/why-golang-reflect-makeslice-returns-un-addressable-value // Create a slice to begin with var myType reflect.Type if t, ok := typ.(reflect.Type); ok { myType = t } else { myType = reflect.TypeOf(typ) } slice := reflect.MakeSlice(reflect.SliceOf(myType), 0, 0) // Create a pointer to a slice value and set it to the slice x := reflect.New(slice.Type()) x.Elem().Set(slice) return x.Elem().Interface() } func ConvertInterfaceToSlice(slice interface{}) ([]interface{}, error) { reflSlice := reflect.ValueOf(slice) if reflSlice.Type().Kind() == reflect.Ptr { reflSlice = reflSlice.Elem() } if reflSlice.Type().Kind() != reflect.Slice { return nil, errors.New("slice_expected") } result := make([]interface{}, 0) for i := 0; i < reflSlice.Len(); i++ { itemVal := reflSlice.Index(i) if itemVal.Type().Kind() == reflect.Struct { itemVal = itemVal.Addr() } result = append(result, itemVal.Interface()) } return result, nil } func InterfaceToTypedSlice(itemType reflect.Type, slice []interface{}) interface{} { newSlice := reflect.ValueOf(NewSlice(itemType)) for _, item := range slice { newSlice = reflect.Append(newSlice, reflect.ValueOf(item)) } return newSlice.Interface() } // Convert a slice of type interface{} to a []Model slice. func InterfaceToModelSlice(slice interface{}) ([]Model, error) { reflSlice := reflect.ValueOf(slice) if reflSlice.Type().Kind() == reflect.Ptr { reflSlice = reflSlice.Elem() } if reflSlice.Type().Kind() != reflect.Slice { return nil, errors.New("slice_expected") } result := make([]Model, 0) for i := 0; i < reflSlice.Len(); i++ { itemVal := reflSlice.Index(i) if itemVal.Type().Kind() == reflect.Struct { itemVal = itemVal.Addr() } item := itemVal.Interface() // Check that slice items actually implement model interface. // Only needed once. modelItem, ok := item.(Model) if i == 0 && !ok { return nil, errors.New("slice_values_do_not_implement_model_if") } result = append(result, modelItem) } return result, nil } // Convert a slice of type []Model to []interface{}. func ModelToInterfaceSlice(models []Model) []interface{} { slice := make([]interface{}, 0) for _, m := range models { slice = append(slice, m.(interface{})) } return slice } /** * Sorter for sorting structs by field. */ type structFieldSorter struct { items []interface{} field string ascending bool } func (s structFieldSorter) Len() int { return len(s.items) } func (s structFieldSorter) Swap(i, j int) { s.items[i], s.items[j] = s.items[j], s.items[i] } func (s structFieldSorter) Less(i, j int) bool { valA, err := GetStructFieldValue(s.items[i], s.field) if err != nil { panic("Sorting failure: " + err.Error()) } valB, err := GetStructFieldValue(s.items[j], s.field) if err != nil { panic("Sorting failure: " + err.Error()) } less, err := CompareValues("lt", valA, valB) if err != nil { panic("Sorting failure: " + err.Error()) } if s.ascending { return less } else { return !less } } func StructFieldSorter(items []interface{}, field string, asc bool) structFieldSorter { return structFieldSorter{ items: items, field: field, ascending: asc, } } func SortStructSlice(items []interface{}, field string, ascending bool) { sort.Sort(StructFieldSorter(items, field, ascending)) } /** * Setting and getting fields from a struct with reflect. */ // Given a struct or a pointer to a struct, retrieve the value of a field from // the struct with reflection. func GetStructFieldValue(s interface{}, fieldName string) (interface{}, apperror.Error) { // Check if struct is valid. if s == nil { return nil, &apperror.Err{Code: "pointer_or_struct_expected"} } // Check if it is a pointer, and if so, dereference it. v := reflect.ValueOf(s) if v.Type().Kind() == reflect.Ptr { v = v.Elem() } if v.Type().Kind() != reflect.Struct { return nil, &apperror.Err{Code: "struct_expected"} } field := v.FieldByName(fieldName) if !field.IsValid() { return nil, &apperror.Err{ Code: "field_not_found", Message: fmt.Sprintf("struct %v does not have field '%v'", v.Type(), fieldName), } } return field.Interface(), nil } func GetStructField(s interface{}, fieldName string) (reflect.Value, apperror.Error) { // Check if struct is valid. if s == nil { return reflect.Value{}, &apperror.Err{Code: "pointer_or_struct_expected"} } // Check if it is a pointer, and if so, dereference it. v := reflect.ValueOf(s) if v.Type().Kind() == reflect.Ptr { v = v.Elem() } if v.Type().Kind() != reflect.Struct { return reflect.Value{}, &apperror.Err{Code: "struct_expected"} } field := v.FieldByName(fieldName) if !field.IsValid() { return reflect.Value{}, &apperror.Err{ Code: "field_not_found", Message: fmt.Sprintf("struct does not have field '%v'", fieldName), } } return field, nil } // Given a pointer to a struct, set the given field to the given value. // If the target value is not a string, it will be automatically converted // to the proper type. // Returns an error if no pointer to a struct is given, if the field does not // exist, or if the string value can not be converted to the actual type. func SetStructFieldValueFromString(obj interface{}, fieldName string, val string) apperror.Error { objVal := reflect.ValueOf(obj) if objVal.Type().Kind() != reflect.Ptr { return &apperror.Err{Code: "pointer_expected"} } objVal = objVal.Elem() if objVal.Type().Kind() != reflect.Struct { return &apperror.Err{Code: "pointer_to_struct_expected"} } field := objVal.FieldByName(fieldName) if !field.IsValid() { return &apperror.Err{ Code: "unknown_field", Message: fmt.Sprintf("Field %v does not exist on %v", fieldName, objVal), } } //fieldType, _ := objType.FieldByName(fieldName) convertedVal, err := ConvertStringToType(val, field.Type().Kind()) if err != nil { return &apperror.Err{Code: err.Error()} } field.Set(reflect.ValueOf(convertedVal)) return nil } func GetModelCollection(model interface{}) (string, apperror.Error) { // If the model implements .Collection(), call it. if hook, ok := model.(ModelCollectionHook); ok { collection := hook.Collection() if collection != "" { return collection, nil } } typ := reflect.TypeOf(model) // Dereference pointer. if typ.Kind() == reflect.Ptr { typ = typ.Elem() } // Check if it is a struct. if typ.Kind() != reflect.Struct { return "", &apperror.Err{ Code: "invalid_model", Message: fmt.Sprintf("Expected model struct or pointer to struct, got %v", typ), } } collection := CamelCaseToUnderscore(typ.Name()) collection = Pluralize(collection) return collection, nil } func MustGetModelCollection(model interface{}) string { collection, err := GetModelCollection(model) if err != nil { panic("Could not determine collection: " + err.Error()) } return collection } func GetModelID(info *ModelInfo, m interface{}) (interface{}, apperror.Error) { val, err := GetStructFieldValue(m, info.PkField) if err != nil { return nil, err } return val, nil } func GetModelSliceFieldValues(models []interface{}, fieldName string) ([]interface{}, apperror.Error) { vals := make([]interface{}, 0) for _, model := range models { val, err := GetStructFieldValue(model, fieldName) if err != nil { return nil, err } vals = append(vals, val) } return vals, nil } // Set a struct field. // Returns an error if the object is not a struct or a pointer to a struct, or if // the field does not exist. func SetStructField(obj interface{}, fieldName string, value interface{}) error { val := reflect.ValueOf(obj) // Make sure obj is a pointer. if val.Type().Kind() != reflect.Ptr { return errors.New("pointer_to_struct_expected") } // Dereference pointer. val = val.Elem() // Make surre obj points to a struct. if val.Type().Kind() != reflect.Struct { return errors.New("struct_expected") } field := val.FieldByName(fieldName) if !field.IsValid() { return errors.New("unknown_field") } field.Set(reflect.ValueOf(value)) return nil } // Given a struct, set the specified field that contains either a single Model // or a model slice to the given models. // If the target field type is struct or pointer to struct, it will be set to // the first model in []models. // If it is a slice, it will be set to the models with the correct type. func SetStructModelField(obj interface{}, fieldName string, models []interface{}) error { objVal := reflect.ValueOf(obj) if objVal.Type().Kind() != reflect.Ptr { return errors.New("pointer_expected") } if objVal.Elem().Type().Kind() != reflect.Struct { return errors.New("pointer_to_struct_expected") } field := objVal.Elem().FieldByName(fieldName) if !field.IsValid() { return errors.New("unknown_field") } fieldType := field.Type().Kind() // TODO: in each clause, check that the target field conforms to the vale to set. if fieldType == reflect.Struct { field.Set(reflect.ValueOf(models[0]).Elem()) } else if fieldType == reflect.Ptr { ptr := reflect.New(reflect.ValueOf(models[0]).Type()) ptr.Elem().Set(reflect.ValueOf(models[0])) field.Set(ptr.Elem()) } else if fieldType == reflect.Slice { sliceType := field.Type().Elem() slice := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, 0) for _, model := range models { val := reflect.ValueOf(model) if val.Type().Kind() == reflect.Ptr { val = val.Elem() } if sliceType.Kind() == reflect.Ptr { slice = reflect.Append(slice, val.Addr()) } else { slice = reflect.Append(slice, val) } } field.Set(slice) } else { return errors.New("unsupported_field_type") } return nil } func ModelToMap(info *ModelInfo, model interface{}, forBackend, marshal bool, includeRelations bool) (map[string]interface{}, apperror.Error) { data := make(map[string]interface{}) for fieldName := range info.FieldInfo { field := info.FieldInfo[fieldName] if field.Ignore { continue } if field.IsRelation() && !includeRelations { continue } // Todo: avoid repeated work by GetStructFieldValue() val, err := GetStructFieldValue(model, fieldName) if err != nil { return nil, err } // Ignore zero values if specified. // Note that numeric fields are not included, since 0 is their zero value. // Also, primary keys are ignored. if field.IgnoreIfZero && !field.PrimaryKey && !IsNumericKind(field.Type.Kind()) && IsZero(val) { continue } reflVal := reflect.ValueOf(val) if reflVal.IsValid() { reflType := reflVal.Type() if reflType.Kind() == reflect.Ptr && forBackend { if reflVal.IsNil() { val = nil } else { val = reflVal.Elem().Interface() } } } if forBackend && field.Marshal { if IsZero(val) { continue } js, err := json.Marshal(val) if err != nil { return nil, &apperror.Err{ Code: "marshal_error", Message: fmt.Sprintf("Could not marshal %v.%v to json: %v", info.Name, fieldName, err), } } val = js } name := fieldName if forBackend { name = field.BackendName } else if marshal { name = field.MarshalName } data[name] = val } return data, nil } func ModelToJson(info *ModelInfo, model Model, includeRelations bool) ([]byte, apperror.Error) { if info == nil { var err apperror.Error info, err = BuildModelInfo(model) if err != nil { return nil, err } } data, err := ModelToMap(info, model, false, true, includeRelations) if err != nil { return nil, err } js, err2 := json.Marshal(data) if err2 != nil { return nil, &apperror.Err{ Code: "json_marshal_error", Message: err2.Error(), } } return js, nil } // ModelFieldDiff compares two models and returns a list of fields that are different. func ModelFieldDiff(info *ModelInfo, m1, m2 interface{}) []string { m1Data, _ := ModelToMap(info, m1, false, false, false) m2Data, _ := ModelToMap(info, m2, false, false, false) diff := make([]string, 0) for key, m1Val := range m1Data { if m2Val, ok := m2Data[key]; ok { if m1Val != m2Val { diff = append(diff, key) } } } return diff } func BuildModelFromMap(info *ModelInfo, data map[string]interface{}) (interface{}, apperror.Error) { model, err := NewStruct(info.Item) if err != nil { return nil, &apperror.Err{ Code: "model_build_error", Message: err.Error(), } } err = UpdateModelFromData(info, model, data) if err != nil { return nil, &apperror.Err{ Code: "model_update_error", Message: err.Error(), } } return model, nil } func UpdateModelFromData(info *ModelInfo, obj interface{}, data map[string]interface{}) apperror.Error { ptrVal := reflect.ValueOf(obj) if ptrVal.Type().Kind() != reflect.Ptr { return &apperror.Err{ Code: "pointer_expected", } } val := ptrVal.Elem() if val.Type().Kind() != reflect.Struct { return &apperror.Err{ Code: "pointer_to_struct_expected", } } for key := range data { // Try to find field by backend name. fieldInfo := info.FieldByBackendName(key) if fieldInfo == nil { // Does not match a backend name. // Try to find field by marshal name to support unmarshalled data. fieldInfo = info.FieldByMarshalName(key) // If key does not match a marshal name either, just assume it to be a plain struct field name. if fieldInfo == nil { fieldInfo = info.FieldInfo[key] } } if fieldInfo == nil { continue } if fieldInfo.Ignore { continue } // Need special handling for point type. if strings.HasSuffix(fieldInfo.StructType, "go-dukedb.Point") { p := new(Point) _, err := fmt.Sscanf(data[key].(string), "(%f,%f)", &p.Lat, &p.Lon) if err != nil { return &apperror.Err{ Code: "point_conversion_error", Message: fmt.Sprintf("Could not parse point specification: %v", data[key]), } } if fieldInfo.Type.Kind() == reflect.Ptr { data[key] = p } else { data[key] = *p } } // Handle marshalled fields. if fieldInfo.Marshal { var marshalledData []byte if strVal, ok := data[key].(string); ok { if strVal != "" { marshalledData = []byte(strVal) } } else if bytes, ok := data[key].([]byte); ok { if len(bytes) > 0 { marshalledData = bytes } } if marshalledData != nil { itemVal := reflect.New(fieldInfo.Type) itemPtr := itemVal.Interface() if err := json.Unmarshal(marshalledData, itemPtr); err != nil { return apperror.Wrap(err, "marshal_field_unmarshal_error", fmt.Sprintf("Could not unmarshal the content of field %v", fieldInfo.Name)) } data[key] = itemVal.Elem().Interface() } else { continue } } SetModelValue(fieldInfo, val.FieldByName(fieldInfo.Name), data[key]) } return nil } func SetModelValue(info *FieldInfo, field reflect.Value, rawValue interface{}) { val := reflect.ValueOf(rawValue) // Skip invalid. if !val.IsValid() { return } valKind := val.Type().Kind() fieldKind := info.Type.Kind() // For the same type, skip complicated comparison/casting. if valKind == fieldKind { field.Set(val) return } // Try to convert using Convert() method. convertedVal, err := Convert(rawValue, info.Type) if err == nil { field.Set(reflect.ValueOf(convertedVal)) return } // Nothing worked. panic(fmt.Sprintf("Could not convert type %v (%v) to type %v: %v", valKind, rawValue, fieldKind, err)) } func BuildModelSliceFromMap(info *ModelInfo, items []map[string]interface{}) (interface{}, apperror.Error) { slice := NewSlice(info.Item) sliceVal := reflect.ValueOf(slice) for _, data := range items { model, err := BuildModelFromMap(info, data) if err != nil { return nil, err } sliceVal = reflect.Append(sliceVal, reflect.ValueOf(model)) } return sliceVal.Interface(), nil } /** * Query related. */ func NormalizeSelectStatement(stmt *SelectStatement, info *ModelInfo, allInfo ModelInfos) apperror.Error { // Fix nesting (joins and fields). if err := stmt.FixNesting(); err != nil { return err } // Normalize fields. for _, field := range stmt.Fields { if err := NormalizeExpression(field, info, allInfo); err != nil { return err } } // Normalize filter. if err := NormalizeExpression(stmt.Filter, info, allInfo); err != nil { return err } // Normalize sorts. for _, sort := range stmt.Sorts { if err := NormalizeExpression(sort, info, allInfo); err != nil { return err } } // Normalize joins. for _, join := range stmt.Joins { if err := NormalizeExpression(join, info, allInfo); err != nil { return err } } return nil } func NormalizeExpression(expression Expression, info *ModelInfo, allInfo ModelInfos) apperror.Error { if expression == nil { return nil } switch expr := expression.(type) { case *UpdateStatement: if err := NormalizeExpression(&expr.MutationStmt, info, allInfo); err != nil { return err } if err := NormalizeExpression(expr.Select, info, allInfo); err != nil { return err } case MutationStatement: backendName := allInfo.FindBackendName(expr.GetCollection()) if backendName == "" { return apperror.New("unknown_collection", fmt.Sprintf("The collection %v does not exist", expr.GetCollection())) } expr.SetCollection(backendName) for _, fieldVal := range expr.GetValues() { if err := NormalizeExpression(fieldVal, info, allInfo); err != nil { return err } } case *SelectStatement: if err := NormalizeSelectStatement(expr, info, allInfo); err != nil { return err } case *JoinStatement: if err := NormalizeExpression(expr.JoinCondition, info, allInfo); err != nil { return err } if err := NormalizeExpression(&expr.SelectStatement, info, allInfo); err != nil { return err } case MultiExpression: for _, expr := range expr.GetExpressions() { if err := NormalizeExpression(expr, info, allInfo); err != nil { return err } } case NestedExpression: if err := NormalizeExpression(expr.GetExpression(), info, allInfo); err != nil { return err } case FilterExpression: if err := NormalizeExpression(expr.GetField(), info, allInfo); err != nil { return err } if err := NormalizeExpression(expr.GetClause(), info, allInfo); err != nil { return err } case *SortExpression: if err := NormalizeExpression(expr.Field, info, allInfo); err != nil { return err } case *FieldValueExpression: if err := NormalizeExpression(expr.Field, info, allInfo); err != nil { return err } case *CollectionFieldIdentifierExpression: colInfo := info // First, normalize the collection name, if set. if expr.Collection != "" { backendName := "" for _, info := range allInfo { if expr.Collection == info.Collection { backendName = info.BackendName colInfo = info break } else if expr.Collection == info.BackendName { backendName = info.BackendName colInfo = info break } else if expr.Collection == info.MarshalName { backendName = info.BackendName colInfo = info break } } if backendName == "" { return apperror.New("unknown_collection", fmt.Sprintf("The collection %v does not exist", expr.Collection), true) } expr.Collection = backendName } // We found a valid collection name. // Now normalize the field name. fieldName := colInfo.FindBackendName(expr.Field) if fieldName == "" { return apperror.New("unknown_field", fmt.Sprintf("The collection %v has no field %v", colInfo.Collection, expr.Field)) } expr.Field = fieldName case *IdentifierExpression: fieldName := info.FindBackendName(expr.Identifier) if fieldName == "" { return apperror.New("unknown_field", fmt.Sprintf("The collection %v has no field %v", info.Collection, expr.Identifier)) } default: panic(fmt.Sprintf("Unhandled expression type: %v\n", reflect.TypeOf(expr))) } return nil } /** * Model hooks. */ func ValidateModel(info *ModelInfo, m interface{}) apperror.Error { val := reflect.ValueOf(m).Elem() for fieldName, fieldInfo := range info.FieldInfo { // Fill in default values. if fieldInfo.Default != "" { fieldVal := val.FieldByName(fieldName) if IsZero(fieldVal.Interface()) { convertedVal, err := Convert(fieldInfo.Default, fieldInfo.Type) if err != nil { msg := fmt.Sprintf("Could not convert the default value '%v' for field %v to type %v", fieldInfo.Default, fieldName, fieldInfo.Type.Kind()) return apperror.Wrap(err, "default_value_conversion_error", msg) } fieldVal.Set(reflect.ValueOf(convertedVal)) } } // If NotNull is set to true, and the field is not a primary key, validate that it is // not zero. // Note: numeric fields will not be checked, since their zero value is "0", which might // be a valid field value. if fieldInfo.NotNull && !fieldInfo.PrimaryKey && !IsNumericKind(fieldInfo.Type.Kind()) { fieldVal := val.FieldByName(fieldName) if IsZero(fieldVal.Interface()) { return &apperror.Err{ Code: "empty_required_field", Message: fmt.Sprintf("The required field %v is empty", fieldName), Public: true, } } } else if fieldInfo.Min > 0 || fieldInfo.Max > 0 { // Either min or max is set, so check length. fieldVal := val.FieldByName(fieldName) var length float64 if fieldInfo.Type.Kind() == reflect.String { length = float64(fieldVal.Len()) } else if IsNumericKind(fieldInfo.Type.Kind()) { length = fieldVal.Convert(reflect.TypeOf(float64(0))).Interface().(float64) } else { // Not string or numeric, so can't check. continue } if fieldInfo.Min > 0 && length < fieldInfo.Min { return &apperror.Err{ Code: "shorter_than_min_length", Message: fmt.Sprintf("The field %v is shorter than the minimum length %v", fieldName, fieldInfo.Min), } } else if fieldInfo.Max > 0 && length > fieldInfo.Max { return &apperror.Err{ Code: "longer_than_max_length", Message: fmt.Sprintf("The field %v is longer than the maximum length %v", fieldName, fieldInfo.Max), } } } } // If the model implements ModelValidateHook, call it. if validator, ok := m.(ModelValidateHook); ok { if err := validator.Validate(); err != nil { // Check if error is an apperror, and return it if so. if apperr, ok := err.(apperror.Error); ok { return apperr } else { // Not an apperror, so create a new one. return apperror.New(err.Error()) } } } return nil } func CallModelHook(b Backend, m interface{}, hook string) apperror.Error { switch hook { case "Validate": if h, ok := m.(ModelValidateHook); ok { err := h.Validate() if err == nil { return nil } else if apperr, ok := err.(apperror.Error); ok { return apperr } else { return apperror.Wrap(err, "validation_error") } } return nil case "BeforeCreate": if h, ok := m.(ModelBeforeCreateHook); ok { err := h.BeforeCreate(b) if err == nil { return nil } else if apperr, ok := err.(apperror.Error); ok { return apperr } else { return apperror.Wrap(err, "before_create_error") } } return nil case "AfterCreate": if h, ok := m.(ModelAfterCreateHook); ok { h.AfterCreate(b) } return nil case "BeforeUpdate": if h, ok := m.(ModelBeforeUpdateHook); ok { err := h.BeforeUpdate(b) if err == nil { return nil } else if apperr, ok := err.(apperror.Error); ok { return apperr } else { return apperror.Wrap(err, "before_update_error") } } return nil case "AfterUpdate": if h, ok := m.(ModelAfterUpdateHook); ok { h.AfterUpdate(b) } return nil case "BeforeDelete": if h, ok := m.(ModelBeforeDeleteHook); ok { err := h.BeforeDelete(b) if err == nil { return nil } else if apperr, ok := err.(apperror.Error); ok { return apperr } else { return apperror.Wrap(err, "before_create_error") } } return nil case "AfterDelete": if h, ok := m.(ModelAfterDeleteHook); ok { h.AfterDelete(b) } return nil case "AfterQuery": if h, ok := m.(ModelAfterQueryHook); ok { h.AfterQuery(b) } return nil default: return &apperror.Err{ Code: "invalid_hook", Message: fmt.Sprintf("Unknown hook %v", hook), } } } fixed utils package dukedb import ( "encoding/json" "errors" "fmt" "reflect" "sort" "strconv" "strings" "time" "github.com/theduke/go-apperror" ) /** * String utils. */ func Pluralize(str string) string { if str[len(str)-1] == 'y' { str = str[1:len(str)-1] + "ie" } if str[len(str)-1] != 's' { str += "s" } return str } // Convert a CamelCase string to underscore version, eg camel_case. func CamelCaseToUnderscore(str string) string { u := "" didChange := false for i, c := range str { if c >= 65 && c <= 90 { if i == 0 { u += string(byte(c + 32)) didChange = true continue } if !didChange { u += "_" didChange = true } u += string(byte(c + 32)) } else { u += string(byte(c)) didChange = false } } return u } func LowerCaseFirst(str string) string { if len(str) == 0 { return "" } newStr := "" doReplace := true for _, c := range str { x := int(c) if doReplace { if x >= 65 && x <= 90 { newStr += string(x + 32) } else { doReplace = false newStr += string(c) } } else { newStr += string(c) } } return newStr } // Given the internal name of a filter like "eq" or "lte", return a SQL operator like = or <. // WARNING: panics if an unsupported filter is given. func FilterToSqlCondition(filter string) (string, apperror.Error) { typ := "" switch filter { case "eq": typ = "=" case "neq": typ = "!=" case "lt": typ = "<" case "lte": typ = "<=" case "gt": typ = ">" case "gte": typ = ">=" case "like": typ = "LIKE" case "in": typ = "IN" default: return "", &apperror.Err{ Code: "unknown_filter", Message: "Unknown filter '" + filter + "'", } } return typ, nil } /** * Generic interface variable handling/comparison functions. */ func IsNumericKind(kind reflect.Kind) bool { switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64: return true default: return false } } func IsZero(val interface{}) bool { if val == nil { return true } reflVal := reflect.ValueOf(val) reflType := reflVal.Type() if reflType.Kind() == reflect.Slice { return reflVal.Len() < 1 } if reflType.Kind() == reflect.Map { return reflVal.Len() < 1 } return val == reflect.Zero(reflType).Interface() } func SaveConvert(val interface{}, typ reflect.Type) interface{} { defer func() { recover() }() return reflect.ValueOf(val).Convert(typ).Interface() } // Convert converts an arbitrary interface value to a different type. // The rawType argument may either be a reflect.Type or an acutal instance of the type. func Convert(value interface{}, rawType interface{}) (interface{}, error) { var typ reflect.Type if t, ok := rawType.(reflect.Type); ok { typ = t } else { typ = reflect.TypeOf(rawType) } kind := typ.Kind() reflVal := reflect.ValueOf(value) valType := reflect.TypeOf(value) valKind := valType.Kind() if typ == valType { // Same type, nothing to convert. return value, nil } isPointer := kind == reflect.Ptr var pointerType reflect.Type if isPointer { pointerType = typ.Elem() } // If target value is a pointer and the value is not (and the types match), // create a new pointer pointing to the value. if isPointer && valType == pointerType { newVal := reflect.New(valType) newVal.Elem().Set(reflVal) return newVal.Interface(), nil } // Parse dates into time.Time. isTime := kind == reflect.Struct && typ.PkgPath() == "time" && typ.Name() == "Time" isTimePointer := isPointer && pointerType.Kind() == reflect.Struct && pointerType.PkgPath() == "time" && pointerType.Name() == "Time" if (isTime || isTimePointer) && valKind == reflect.String { date, err := time.Parse(time.RFC3339, value.(string)) if err != nil { return nil, apperror.Wrap(err, "time_parse_error", "Invalid time format", true) } if isTime { return date, nil } else { return &date, nil } } // Special handling for string to bool. if kind == reflect.String && valKind == reflect.Bool { str := strings.TrimSpace(value.(string)) switch str { case "y", "yes", "1": return true, nil case "n", "no", "0": return false, nil } } // Special handling for string target. if kind == reflect.String { if bytes, ok := value.([]byte); ok { return string(bytes), nil } return fmt.Sprintf("%v", value), nil } // If value is string, and target type is numeric, // parse to float and then convert with reflect. if valType.Kind() == reflect.String && IsNumericKind(kind) { num, err := strconv.ParseFloat(value.(string), 64) if err != nil { return nil, err } return reflect.ValueOf(num).Convert(typ).Interface(), nil } // No custom handling worked, so try to convert with reflect. // We have to accept the panic. converted := SaveConvert(value, typ) if converted == nil { return nil, errors.New(fmt.Sprintf("Cannot convert %v to %v", valType.String(), kind)) } return converted, nil } // Convert a string value to the specified type if possible. // Returns an error for unsupported types. func ConvertStringToType(value string, typ reflect.Kind) (interface{}, error) { switch typ { case reflect.Int: x, err := strconv.Atoi(value) return interface{}(x), err case reflect.Int64: x, err := strconv.ParseInt(value, 10, 64) return interface{}(x), err case reflect.Uint: x, err := strconv.ParseUint(value, 10, 32) if err != nil { return nil, err } return uint(x), nil case reflect.Uint64: x, err := strconv.ParseUint(value, 10, 64) return interface{}(x), err case reflect.String: return interface{}(value), nil default: return nil, errors.New(fmt.Sprintf("cannot_convert_to_%v", typ)) } } func CompareValues(condition string, a, b interface{}) (bool, apperror.Error) { nilType := reflect.TypeOf(nil) typA := reflect.TypeOf(a) typB := reflect.TypeOf(b) if a == nil || typA == nilType { a = float64(0) typA = reflect.TypeOf(a) } if b == nil || typB == nilType { b = float64(0) typB = reflect.TypeOf(b) a, b = b, a typA, typB = typB, typA } kindA := typA.Kind() if kindA == reflect.Ptr { val := reflect.ValueOf(a) if !val.IsValid() || val.IsNil() { a = float64(0) } else { a = reflect.ValueOf(a).Elem().Interface() } typA = reflect.TypeOf(a) kindA = typA.Kind() } kindB := typB.Kind() if kindB == reflect.Ptr { val := reflect.ValueOf(b) if !val.IsValid() || val.IsNil() { b = float64(0) } else { b = reflect.ValueOf(b).Elem().Interface() } typB = reflect.TypeOf(b) kindB = typB.Kind() } // Compare time.Time values numerically. if kindA == reflect.Struct && typA.PkgPath() == "time" && typA.Name() == "Time" { t := a.(time.Time) if t.IsZero() { a = float64(0) } else { a = float64(t.UnixNano()) } typA = reflect.TypeOf(a) kindA = typA.Kind() } if kindB == reflect.Struct && typB.PkgPath() == "time" && typB.Name() == "Time" { t := b.(time.Time) if t.IsZero() { b = float64(0) } else { b = float64(t.UnixNano()) } typB = reflect.TypeOf(b) kindB = typB.Kind() } if IsNumericKind(kindA) || IsNumericKind(kindB) { var err error a, err = Convert(a, float64(0)) if err != nil { return false, apperror.New("conversion_error", err) } b, err = Convert(b, float64(0)) if err != nil { return false, apperror.New("conversion_error", err) } return CompareFloat64Values(condition, a.(float64), b.(float64)) } if kindA == reflect.String { return CompareStringValues(condition, a.(string), b.(string)) } if condition == "eq" || condition == "neq" { convertedB, err := Convert(b, a) if err != nil { return false, apperror.New(err.Error()) } if condition == "eq" { return a == convertedB, nil } else { return a != convertedB, nil } } return false, apperror.New( "impossible_comparison", fmt.Sprintf("Cannot compare type %v(value %v) to type %v(value %v)", kindA, a, kindB, b)) } func CompareStringValues(condition, a, b string) (bool, apperror.Error) { // Check different possible filters. switch condition { case "eq": return a == b, nil case "neq": return a != b, nil case "like": return strings.Contains(a, b), nil case "lt": return a < b, nil case "lte": return a <= b, nil case "gt": return a > b, nil case "gte": return a >= b, nil default: return false, &apperror.Err{ Code: "unknown_filter", Message: fmt.Sprintf("Unknown filter type '%v'", condition), } } } func CompareFloat64Values(condition string, a, b float64) (bool, apperror.Error) { // Check different possible filters. switch condition { case "eq": return a == b, nil case "neq": return a != b, nil case "like": return false, apperror.New("invalid_filter_comparison", "LIKE filter can only be used for string values, not numbers") case "lt": return a < b, nil case "lte": return a <= b, nil case "gt": return a > b, nil case "gte": return a >= b, nil default: return false, &apperror.Err{ Code: "unknown_filter", Message: fmt.Sprintf("Unknown filter type '%v'", condition), } } } func CompareNumericValues(condition string, a, b interface{}) (bool, apperror.Error) { typ := reflect.TypeOf(a).Kind() switch typ { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return CompareIntValues(condition, a, b) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return CompareUintValues(condition, a, b) case reflect.Float32, reflect.Float64: return CompareFloatValues(condition, a, b) default: return false, &apperror.Err{ Code: "unsupported_type_for_numeric_comparison", Message: fmt.Sprintf( "For a numeric comparision with %v, a numeric type is expected. Got: %v", condition, typ), } } } func NumericToInt64(x interface{}) (int64, apperror.Error) { var val int64 switch reflect.TypeOf(x).Kind() { case reflect.Int: val = int64(x.(int)) case reflect.Int8: val = int64(x.(int8)) case reflect.Int16: val = int64(x.(int16)) case reflect.Int32: val = int64(x.(int32)) case reflect.Int64: val = x.(int64) case reflect.Uint: val = int64(x.(uint)) case reflect.Uint8: val = int64(x.(uint8)) case reflect.Uint16: val = int64(x.(uint16)) case reflect.Uint32: val = int64(x.(uint32)) case reflect.Uint64: val = int64(x.(uint64)) case reflect.Float32: val = int64(x.(float32)) case reflect.Float64: val = int64(x.(float64)) case reflect.String: x, err := strconv.ParseInt(x.(string), 10, 64) if err != nil { return int64(0), &apperror.Err{Code: "non_numeric_string"} } val = x default: return int64(0), &apperror.Err{Code: "non_numeric_type"} } return val, nil } func NumericToUint64(x interface{}) (uint64, apperror.Error) { var val uint64 switch reflect.TypeOf(x).Kind() { case reflect.Int: val = uint64(x.(int)) case reflect.Int8: val = uint64(x.(int8)) case reflect.Int16: val = uint64(x.(int16)) case reflect.Int32: val = uint64(x.(int32)) case reflect.Int64: val = uint64(x.(int64)) case reflect.Uint: val = uint64(x.(uint)) case reflect.Uint8: val = uint64(x.(uint8)) case reflect.Uint16: val = uint64(x.(uint16)) case reflect.Uint32: val = uint64(x.(uint32)) case reflect.Uint64: val = x.(uint64) case reflect.Float32: val = uint64(x.(float32)) case reflect.Float64: val = uint64(x.(float64)) case reflect.String: x, err := strconv.ParseInt(x.(string), 10, 64) if err != nil { return uint64(0), &apperror.Err{Code: "non_numeric_string"} } val = uint64(x) default: panic("nonnumeric") return uint64(0), &apperror.Err{Code: "non_numeric_type"} } return val, nil } func NumericToFloat64(x interface{}) (float64, apperror.Error) { var val float64 switch reflect.TypeOf(x).Kind() { case reflect.Int: val = float64(x.(int)) case reflect.Int8: val = float64(x.(int8)) case reflect.Int16: val = float64(x.(int16)) case reflect.Int32: val = float64(x.(int32)) case reflect.Int64: val = float64(x.(int64)) case reflect.Uint: val = float64(x.(uint)) case reflect.Uint8: val = float64(x.(uint8)) case reflect.Uint16: val = float64(x.(uint16)) case reflect.Uint32: val = float64(x.(uint32)) case reflect.Uint64: val = float64(x.(uint64)) case reflect.Float32: val = float64(x.(float32)) case reflect.Float64: val = x.(float64) case reflect.String: x, err := strconv.ParseFloat(x.(string), 64) if err != nil { return val, &apperror.Err{Code: "non_numeric_string"} } val = x default: return float64(0), &apperror.Err{Code: "non_numeric_type"} } return val, nil } func CompareIntValues(condition string, a, b interface{}) (bool, apperror.Error) { aVal, errA := NumericToInt64(a) bVal, errB := NumericToInt64(b) if errA != nil { return false, errA } if errB != nil { return false, errB } switch condition { case "eq": return aVal == bVal, nil case "neq": return aVal != bVal, nil case "lt": return aVal < bVal, nil case "lte": return aVal <= bVal, nil case "gt": return aVal > bVal, nil case "gte": return aVal >= bVal, nil default: return false, &apperror.Err{ Code: "unknown_filter", Message: "Unknown filter type: " + condition, } } } func CompareUintValues(condition string, a, b interface{}) (bool, apperror.Error) { aVal, errA := NumericToUint64(a) bVal, errB := NumericToUint64(b) if errA != nil { return false, errA } if errB != nil { return false, errB } switch condition { case "eq": return aVal == bVal, nil case "neq": return aVal != bVal, nil case "lt": return aVal < bVal, nil case "lte": return aVal <= bVal, nil case "gt": return aVal > bVal, nil case "gte": return aVal >= bVal, nil default: return false, &apperror.Err{ Code: "unknown_filter", Message: "Unknown filter type: " + condition, } } } func CompareFloatValues(condition string, a, b interface{}) (bool, apperror.Error) { aVal, errA := NumericToFloat64(a) bVal, errB := NumericToFloat64(b) if errA != nil { return false, errA } if errB != nil { return false, errB } switch condition { case "eq": return aVal == bVal, nil case "neq": return aVal != bVal, nil case "lt": return aVal < bVal, nil case "lte": return aVal <= bVal, nil case "gt": return aVal > bVal, nil case "gte": return aVal >= bVal, nil default: return false, &apperror.Err{ Code: "unknown_filter", Message: "Unknown filter type: " + condition, } } } /** * Helpers for creating and converting structs and slices. */ // Set a pointer to a value with reflect. // If the value is a pointer to a type, and the the pointer target is not, // the value is automatically dereferenced. func SetPointer(ptr, val interface{}) { ptrVal := reflect.ValueOf(ptr) ptrType := ptrVal.Type() if ptrType.Kind() != reflect.Ptr { panic("Pointer expected") } target := ptrVal.Elem() targetType := target.Type() value := reflect.ValueOf(val) valueType := value.Type() if valueType.Kind() == reflect.Ptr && targetType.Kind() != reflect.Ptr { value = value.Elem() } target.Set(value) } func SetSlicePointer(ptr interface{}, values []interface{}) { target := reflect.ValueOf(ptr) if target.Type().Kind() != reflect.Ptr { panic("Must supply pointer to slice") } slice := target.Elem() sliceType := slice.Type() if sliceType.Kind() != reflect.Slice { panic("Must supply pointer to slice") } usePtr := sliceType.Elem().Kind() == reflect.Ptr for _, val := range values { if usePtr { slice = reflect.Append(slice, reflect.ValueOf(val)) } else { slice = reflect.Append(slice, reflect.ValueOf(val).Elem()) } } target.Elem().Set(slice) } // Returns pointer to a new struct with the same type as the given struct. func NewStruct(typ interface{}) (interface{}, error) { // Build new struct. item := reflect.ValueOf(typ) if item.Type().Kind() == reflect.Ptr { item = item.Elem() } if item.Type().Kind() != reflect.Struct { return nil, errors.New("struct_expected") } return reflect.New(reflect.TypeOf(item.Interface())).Interface(), nil } // Build a new slice that can contain elements of the given type. func NewSlice(typ interface{}) interface{} { // Build new array. // See http://stackoverflow.com/questions/25384640/why-golang-reflect-makeslice-returns-un-addressable-value // Create a slice to begin with var myType reflect.Type if t, ok := typ.(reflect.Type); ok { myType = t } else { myType = reflect.TypeOf(typ) } slice := reflect.MakeSlice(reflect.SliceOf(myType), 0, 0) // Create a pointer to a slice value and set it to the slice x := reflect.New(slice.Type()) x.Elem().Set(slice) return x.Elem().Interface() } func ConvertInterfaceToSlice(slice interface{}) ([]interface{}, error) { reflSlice := reflect.ValueOf(slice) if reflSlice.Type().Kind() == reflect.Ptr { reflSlice = reflSlice.Elem() } if reflSlice.Type().Kind() != reflect.Slice { return nil, errors.New("slice_expected") } result := make([]interface{}, 0) for i := 0; i < reflSlice.Len(); i++ { itemVal := reflSlice.Index(i) if itemVal.Type().Kind() == reflect.Struct { itemVal = itemVal.Addr() } result = append(result, itemVal.Interface()) } return result, nil } func InterfaceToTypedSlice(itemType reflect.Type, slice []interface{}) interface{} { newSlice := reflect.ValueOf(NewSlice(itemType)) for _, item := range slice { newSlice = reflect.Append(newSlice, reflect.ValueOf(item)) } return newSlice.Interface() } // Convert a slice of type interface{} to a []Model slice. func InterfaceToModelSlice(slice interface{}) ([]Model, error) { reflSlice := reflect.ValueOf(slice) if reflSlice.Type().Kind() == reflect.Ptr { reflSlice = reflSlice.Elem() } if reflSlice.Type().Kind() != reflect.Slice { return nil, errors.New("slice_expected") } result := make([]Model, 0) for i := 0; i < reflSlice.Len(); i++ { itemVal := reflSlice.Index(i) if itemVal.Type().Kind() == reflect.Struct { itemVal = itemVal.Addr() } item := itemVal.Interface() // Check that slice items actually implement model interface. // Only needed once. modelItem, ok := item.(Model) if i == 0 && !ok { return nil, errors.New("slice_values_do_not_implement_model_if") } result = append(result, modelItem) } return result, nil } // Convert a slice of type []Model to []interface{}. func ModelToInterfaceSlice(models []Model) []interface{} { slice := make([]interface{}, 0) for _, m := range models { slice = append(slice, m.(interface{})) } return slice } /** * Sorter for sorting structs by field. */ type structFieldSorter struct { items []interface{} field string ascending bool } func (s structFieldSorter) Len() int { return len(s.items) } func (s structFieldSorter) Swap(i, j int) { s.items[i], s.items[j] = s.items[j], s.items[i] } func (s structFieldSorter) Less(i, j int) bool { valA, err := GetStructFieldValue(s.items[i], s.field) if err != nil { panic("Sorting failure: " + err.Error()) } valB, err := GetStructFieldValue(s.items[j], s.field) if err != nil { panic("Sorting failure: " + err.Error()) } less, err := CompareValues("lt", valA, valB) if err != nil { panic("Sorting failure: " + err.Error()) } if s.ascending { return less } else { return !less } } func StructFieldSorter(items []interface{}, field string, asc bool) structFieldSorter { return structFieldSorter{ items: items, field: field, ascending: asc, } } func SortStructSlice(items []interface{}, field string, ascending bool) { sort.Sort(StructFieldSorter(items, field, ascending)) } /** * Setting and getting fields from a struct with reflect. */ // Given a struct or a pointer to a struct, retrieve the value of a field from // the struct with reflection. func GetStructFieldValue(s interface{}, fieldName string) (interface{}, apperror.Error) { // Check if struct is valid. if s == nil { return nil, &apperror.Err{Code: "pointer_or_struct_expected"} } // Check if it is a pointer, and if so, dereference it. v := reflect.ValueOf(s) if v.Type().Kind() == reflect.Ptr { v = v.Elem() } if v.Type().Kind() != reflect.Struct { return nil, &apperror.Err{Code: "struct_expected"} } field := v.FieldByName(fieldName) if !field.IsValid() { return nil, &apperror.Err{ Code: "field_not_found", Message: fmt.Sprintf("struct %v does not have field '%v'", v.Type(), fieldName), } } return field.Interface(), nil } func GetStructField(s interface{}, fieldName string) (reflect.Value, apperror.Error) { // Check if struct is valid. if s == nil { return reflect.Value{}, &apperror.Err{Code: "pointer_or_struct_expected"} } // Check if it is a pointer, and if so, dereference it. v := reflect.ValueOf(s) if v.Type().Kind() == reflect.Ptr { v = v.Elem() } if v.Type().Kind() != reflect.Struct { return reflect.Value{}, &apperror.Err{Code: "struct_expected"} } field := v.FieldByName(fieldName) if !field.IsValid() { return reflect.Value{}, &apperror.Err{ Code: "field_not_found", Message: fmt.Sprintf("struct does not have field '%v'", fieldName), } } return field, nil } // Given a pointer to a struct, set the given field to the given value. // If the target value is not a string, it will be automatically converted // to the proper type. // Returns an error if no pointer to a struct is given, if the field does not // exist, or if the string value can not be converted to the actual type. func SetStructFieldValueFromString(obj interface{}, fieldName string, val string) apperror.Error { objVal := reflect.ValueOf(obj) if objVal.Type().Kind() != reflect.Ptr { return &apperror.Err{Code: "pointer_expected"} } objVal = objVal.Elem() if objVal.Type().Kind() != reflect.Struct { return &apperror.Err{Code: "pointer_to_struct_expected"} } field := objVal.FieldByName(fieldName) if !field.IsValid() { return &apperror.Err{ Code: "unknown_field", Message: fmt.Sprintf("Field %v does not exist on %v", fieldName, objVal), } } //fieldType, _ := objType.FieldByName(fieldName) convertedVal, err := ConvertStringToType(val, field.Type().Kind()) if err != nil { return &apperror.Err{Code: err.Error()} } field.Set(reflect.ValueOf(convertedVal)) return nil } func GetModelCollection(model interface{}) (string, apperror.Error) { // If the model implements .Collection(), call it. if hook, ok := model.(ModelCollectionHook); ok { collection := hook.Collection() if collection != "" { return collection, nil } } typ := reflect.TypeOf(model) // Dereference pointer. if typ.Kind() == reflect.Ptr { typ = typ.Elem() } // Check if it is a struct. if typ.Kind() != reflect.Struct { return "", &apperror.Err{ Code: "invalid_model", Message: fmt.Sprintf("Expected model struct or pointer to struct, got %v", typ), } } collection := CamelCaseToUnderscore(typ.Name()) collection = Pluralize(collection) return collection, nil } func MustGetModelCollection(model interface{}) string { collection, err := GetModelCollection(model) if err != nil { panic("Could not determine collection: " + err.Error()) } return collection } func GetModelID(info *ModelInfo, m interface{}) (interface{}, apperror.Error) { val, err := GetStructFieldValue(m, info.PkField) if err != nil { return nil, err } return val, nil } func GetModelSliceFieldValues(models []interface{}, fieldName string) ([]interface{}, apperror.Error) { vals := make([]interface{}, 0) for _, model := range models { val, err := GetStructFieldValue(model, fieldName) if err != nil { return nil, err } vals = append(vals, val) } return vals, nil } // Set a struct field. // Returns an error if the object is not a struct or a pointer to a struct, or if // the field does not exist. func SetStructField(obj interface{}, fieldName string, value interface{}) error { val := reflect.ValueOf(obj) // Make sure obj is a pointer. if val.Type().Kind() != reflect.Ptr { return errors.New("pointer_to_struct_expected") } // Dereference pointer. val = val.Elem() // Make surre obj points to a struct. if val.Type().Kind() != reflect.Struct { return errors.New("struct_expected") } field := val.FieldByName(fieldName) if !field.IsValid() { return errors.New("unknown_field") } field.Set(reflect.ValueOf(value)) return nil } // Given a struct, set the specified field that contains either a single Model // or a model slice to the given models. // If the target field type is struct or pointer to struct, it will be set to // the first model in []models. // If it is a slice, it will be set to the models with the correct type. func SetStructModelField(obj interface{}, fieldName string, models []interface{}) error { objVal := reflect.ValueOf(obj) if objVal.Type().Kind() != reflect.Ptr { return errors.New("pointer_expected") } if objVal.Elem().Type().Kind() != reflect.Struct { return errors.New("pointer_to_struct_expected") } field := objVal.Elem().FieldByName(fieldName) if !field.IsValid() { return errors.New("unknown_field") } fieldType := field.Type().Kind() // TODO: in each clause, check that the target field conforms to the vale to set. if fieldType == reflect.Struct { field.Set(reflect.ValueOf(models[0]).Elem()) } else if fieldType == reflect.Ptr { ptr := reflect.New(reflect.ValueOf(models[0]).Type()) ptr.Elem().Set(reflect.ValueOf(models[0])) field.Set(ptr.Elem()) } else if fieldType == reflect.Slice { sliceType := field.Type().Elem() slice := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, 0) for _, model := range models { val := reflect.ValueOf(model) if val.Type().Kind() == reflect.Ptr { val = val.Elem() } if sliceType.Kind() == reflect.Ptr { slice = reflect.Append(slice, val.Addr()) } else { slice = reflect.Append(slice, val) } } field.Set(slice) } else { return errors.New("unsupported_field_type") } return nil } func ModelToMap(info *ModelInfo, model interface{}, forBackend, marshal bool, includeRelations bool) (map[string]interface{}, apperror.Error) { data := make(map[string]interface{}) for fieldName := range info.FieldInfo { field := info.FieldInfo[fieldName] if field.Ignore { continue } if field.IsRelation() && !includeRelations { continue } // Todo: avoid repeated work by GetStructFieldValue() val, err := GetStructFieldValue(model, fieldName) if err != nil { return nil, err } // Ignore zero values if specified. // Note that numeric fields are not included, since 0 is their zero value. // Also, primary keys are ignored. if field.IgnoreIfZero && !field.PrimaryKey && !IsNumericKind(field.Type.Kind()) && IsZero(val) { continue } reflVal := reflect.ValueOf(val) if reflVal.IsValid() { reflType := reflVal.Type() if reflType.Kind() == reflect.Ptr && forBackend { if reflVal.IsNil() { val = nil } else { val = reflVal.Elem().Interface() } } } if forBackend && field.Marshal { if IsZero(val) { continue } js, err := json.Marshal(val) if err != nil { return nil, &apperror.Err{ Code: "marshal_error", Message: fmt.Sprintf("Could not marshal %v.%v to json: %v", info.Name, fieldName, err), } } val = js } name := fieldName if forBackend { name = field.BackendName } else if marshal { name = field.MarshalName } data[name] = val } return data, nil } func ModelToJson(info *ModelInfo, model Model, includeRelations bool) ([]byte, apperror.Error) { if info == nil { var err apperror.Error info, err = BuildModelInfo(model) if err != nil { return nil, err } } data, err := ModelToMap(info, model, false, true, includeRelations) if err != nil { return nil, err } js, err2 := json.Marshal(data) if err2 != nil { return nil, &apperror.Err{ Code: "json_marshal_error", Message: err2.Error(), } } return js, nil } // ModelFieldDiff compares two models and returns a list of fields that are different. func ModelFieldDiff(info *ModelInfo, m1, m2 interface{}) []string { m1Data, _ := ModelToMap(info, m1, false, false, false) m2Data, _ := ModelToMap(info, m2, false, false, false) diff := make([]string, 0) for key, m1Val := range m1Data { if m2Val, ok := m2Data[key]; ok { if m1Val != m2Val { diff = append(diff, key) } } } return diff } func BuildModelFromMap(info *ModelInfo, data map[string]interface{}) (interface{}, apperror.Error) { model, err := NewStruct(info.Item) if err != nil { return nil, &apperror.Err{ Code: "model_build_error", Message: err.Error(), } } err = UpdateModelFromData(info, model, data) if err != nil { return nil, &apperror.Err{ Code: "model_update_error", Message: err.Error(), } } return model, nil } func UpdateModelFromData(info *ModelInfo, obj interface{}, data map[string]interface{}) apperror.Error { ptrVal := reflect.ValueOf(obj) if ptrVal.Type().Kind() != reflect.Ptr { return &apperror.Err{ Code: "pointer_expected", } } val := ptrVal.Elem() if val.Type().Kind() != reflect.Struct { return &apperror.Err{ Code: "pointer_to_struct_expected", } } for key := range data { // Try to find field by backend name. fieldInfo := info.FieldByBackendName(key) if fieldInfo == nil { // Does not match a backend name. // Try to find field by marshal name to support unmarshalled data. fieldInfo = info.FieldByMarshalName(key) // If key does not match a marshal name either, just assume it to be a plain struct field name. if fieldInfo == nil { fieldInfo = info.FieldInfo[key] } } if fieldInfo == nil { continue } if fieldInfo.Ignore { continue } // Need special handling for point type. if strings.HasSuffix(fieldInfo.StructType, "go-dukedb.Point") { p := new(Point) _, err := fmt.Sscanf(data[key].(string), "(%f,%f)", &p.Lat, &p.Lon) if err != nil { return &apperror.Err{ Code: "point_conversion_error", Message: fmt.Sprintf("Could not parse point specification: %v", data[key]), } } if fieldInfo.Type.Kind() == reflect.Ptr { data[key] = p } else { data[key] = *p } } // Handle marshalled fields. if fieldInfo.Marshal { var marshalledData []byte if strVal, ok := data[key].(string); ok { if strVal != "" { marshalledData = []byte(strVal) } } else if bytes, ok := data[key].([]byte); ok { if len(bytes) > 0 { marshalledData = bytes } } if marshalledData != nil { itemVal := reflect.New(fieldInfo.Type) itemPtr := itemVal.Interface() if err := json.Unmarshal(marshalledData, itemPtr); err != nil { return apperror.Wrap(err, "marshal_field_unmarshal_error", fmt.Sprintf("Could not unmarshal the content of field %v", fieldInfo.Name)) } data[key] = itemVal.Elem().Interface() } else { continue } } SetModelValue(fieldInfo, val.FieldByName(fieldInfo.Name), data[key]) } return nil } func SetModelValue(info *FieldInfo, field reflect.Value, rawValue interface{}) { val := reflect.ValueOf(rawValue) // Skip invalid. if !val.IsValid() { return } valKind := val.Type().Kind() fieldKind := info.Type.Kind() // For the same type, skip complicated comparison/casting. if valKind == fieldKind { field.Set(val) return } // Try to convert using Convert() method. convertedVal, err := Convert(rawValue, info.Type) if err == nil { field.Set(reflect.ValueOf(convertedVal)) return } // Nothing worked. panic(fmt.Sprintf("Could not convert type %v (%v) to type %v: %v", valKind, rawValue, fieldKind, err)) } func BuildModelSliceFromMap(info *ModelInfo, items []map[string]interface{}) (interface{}, apperror.Error) { slice := NewSlice(info.Item) sliceVal := reflect.ValueOf(slice) for _, data := range items { model, err := BuildModelFromMap(info, data) if err != nil { return nil, err } sliceVal = reflect.Append(sliceVal, reflect.ValueOf(model)) } return sliceVal.Interface(), nil } /** * Query related. */ func NormalizeSelectStatement(stmt SelectStatement, info *ModelInfo, allInfo ModelInfos) apperror.Error { // Fix nesting (joins and fields). if err := stmt.FixNesting(); err != nil { return err } // Normalize fields. for _, field := range stmt.Fields() { if err := NormalizeExpression(field, info, allInfo); err != nil { return err } } // Normalize filter. if err := NormalizeExpression(stmt.Filter(), info, allInfo); err != nil { return err } // Normalize sorts. for _, sort := range stmt.Sorts() { if err := NormalizeExpression(sort, info, allInfo); err != nil { return err } } // Normalize joins. for _, join := range stmt.Joins() { if err := NormalizeExpression(join, info, allInfo); err != nil { return err } } return nil } func NormalizeExpression(expression Expression, info *ModelInfo, allInfo ModelInfos) apperror.Error { if expression == nil { return nil } switch expr := expression.(type) { case UpdateStatement: if err := NormalizeExpression(expr.Select(), info, allInfo); err != nil { return err } // Same as for CreateStatement. backendName := allInfo.FindBackendName(expr.Collection()) if backendName == "" { return apperror.New("unknown_collection", fmt.Sprintf("The collection %v does not exist", expr.Collection())) } expr.SetCollection(backendName) for _, fieldVal := range expr.Values() { if err := NormalizeExpression(fieldVal, info, allInfo); err != nil { return err } } case CreateStatement: backendName := allInfo.FindBackendName(expr.Collection()) if backendName == "" { return apperror.New("unknown_collection", fmt.Sprintf("The collection %v does not exist", expr.Collection())) } expr.SetCollection(backendName) for _, fieldVal := range expr.Values() { if err := NormalizeExpression(fieldVal, info, allInfo); err != nil { return err } } case SelectStatement: if err := NormalizeSelectStatement(expr, info, allInfo); err != nil { return err } case JoinStatement: if err := NormalizeExpression(expr.JoinCondition(), info, allInfo); err != nil { return err } if err := NormalizeExpression(expr.SelectStatement(), info, allInfo); err != nil { return err } case MultiExpression: for _, expr := range expr.Expressions() { if err := NormalizeExpression(expr, info, allInfo); err != nil { return err } } case NestedExpression: if err := NormalizeExpression(expr.Expression(), info, allInfo); err != nil { return err } case FilterExpression: if err := NormalizeExpression(expr.Field(), info, allInfo); err != nil { return err } if err := NormalizeExpression(expr.Clause(), info, allInfo); err != nil { return err } case SortExpression: // Ignore. // Should actually be handled by NestedExpression clause above. case FieldValueExpression: if err := NormalizeExpression(expr.Field(), info, allInfo); err != nil { return err } if err := NormalizeExpression(expr.Value(), info, allInfo); err != nil { return err } case CollectionFieldIdentifierExpression: colInfo := info // First, normalize the collection name, if set. if expr.Collection() != "" { backendName := "" for _, info := range allInfo { backendName = allInfo.FindBackendName(expr.Collection()) } if backendName == "" { return apperror.New("unknown_collection", fmt.Sprintf("The collection %v does not exist", expr.Collection), true) } expr.SetCollection(backendName) } // We found a valid collection name. // Now normalize the field name. fieldName := colInfo.FindBackendName(expr.Field()) if fieldName == "" { return apperror.New("unknown_field", fmt.Sprintf("The collection %v has no field %v", colInfo.Collection, expr.Field)) } expr.SetField(fieldName) case IdentifierExpression: fieldName := info.FindBackendName(expr.Identifier()) if fieldName == "" { return apperror.New("unknown_field", fmt.Sprintf("The collection %v has no field %v", info.Collection, expr.Identifier)) } expr.SetIdentifier(fieldName) default: panic(fmt.Sprintf("Unhandled expression type: %v\n", reflect.TypeOf(expr))) } return nil } /** * Model hooks. */ func ValidateModel(info *ModelInfo, m interface{}) apperror.Error { val := reflect.ValueOf(m).Elem() for fieldName, fieldInfo := range info.FieldInfo { // Fill in default values. if fieldInfo.Default != "" { fieldVal := val.FieldByName(fieldName) if IsZero(fieldVal.Interface()) { convertedVal, err := Convert(fieldInfo.Default, fieldInfo.Type) if err != nil { msg := fmt.Sprintf("Could not convert the default value '%v' for field %v to type %v", fieldInfo.Default, fieldName, fieldInfo.Type.Kind()) return apperror.Wrap(err, "default_value_conversion_error", msg) } fieldVal.Set(reflect.ValueOf(convertedVal)) } } // If NotNull is set to true, and the field is not a primary key, validate that it is // not zero. // Note: numeric fields will not be checked, since their zero value is "0", which might // be a valid field value. if fieldInfo.NotNull && !fieldInfo.PrimaryKey && !IsNumericKind(fieldInfo.Type.Kind()) { fieldVal := val.FieldByName(fieldName) if IsZero(fieldVal.Interface()) { return &apperror.Err{ Code: "empty_required_field", Message: fmt.Sprintf("The required field %v is empty", fieldName), Public: true, } } } else if fieldInfo.Min > 0 || fieldInfo.Max > 0 { // Either min or max is set, so check length. fieldVal := val.FieldByName(fieldName) var length float64 if fieldInfo.Type.Kind() == reflect.String { length = float64(fieldVal.Len()) } else if IsNumericKind(fieldInfo.Type.Kind()) { length = fieldVal.Convert(reflect.TypeOf(float64(0))).Interface().(float64) } else { // Not string or numeric, so can't check. continue } if fieldInfo.Min > 0 && length < fieldInfo.Min { return &apperror.Err{ Code: "shorter_than_min_length", Message: fmt.Sprintf("The field %v is shorter than the minimum length %v", fieldName, fieldInfo.Min), } } else if fieldInfo.Max > 0 && length > fieldInfo.Max { return &apperror.Err{ Code: "longer_than_max_length", Message: fmt.Sprintf("The field %v is longer than the maximum length %v", fieldName, fieldInfo.Max), } } } } // If the model implements ModelValidateHook, call it. if validator, ok := m.(ModelValidateHook); ok { if err := validator.Validate(); err != nil { // Check if error is an apperror, and return it if so. if apperr, ok := err.(apperror.Error); ok { return apperr } else { // Not an apperror, so create a new one. return apperror.New(err.Error()) } } } return nil } func CallModelHook(b Backend, m interface{}, hook string) apperror.Error { switch hook { case "Validate": if h, ok := m.(ModelValidateHook); ok { err := h.Validate() if err == nil { return nil } else if apperr, ok := err.(apperror.Error); ok { return apperr } else { return apperror.Wrap(err, "validation_error") } } return nil case "BeforeCreate": if h, ok := m.(ModelBeforeCreateHook); ok { err := h.BeforeCreate(b) if err == nil { return nil } else if apperr, ok := err.(apperror.Error); ok { return apperr } else { return apperror.Wrap(err, "before_create_error") } } return nil case "AfterCreate": if h, ok := m.(ModelAfterCreateHook); ok { h.AfterCreate(b) } return nil case "BeforeUpdate": if h, ok := m.(ModelBeforeUpdateHook); ok { err := h.BeforeUpdate(b) if err == nil { return nil } else if apperr, ok := err.(apperror.Error); ok { return apperr } else { return apperror.Wrap(err, "before_update_error") } } return nil case "AfterUpdate": if h, ok := m.(ModelAfterUpdateHook); ok { h.AfterUpdate(b) } return nil case "BeforeDelete": if h, ok := m.(ModelBeforeDeleteHook); ok { err := h.BeforeDelete(b) if err == nil { return nil } else if apperr, ok := err.(apperror.Error); ok { return apperr } else { return apperror.Wrap(err, "before_create_error") } } return nil case "AfterDelete": if h, ok := m.(ModelAfterDeleteHook); ok { h.AfterDelete(b) } return nil case "AfterQuery": if h, ok := m.(ModelAfterQueryHook); ok { h.AfterQuery(b) } return nil default: return &apperror.Err{ Code: "invalid_hook", Message: fmt.Sprintf("Unknown hook %v", hook), } } }
package btrfs import ( "fmt" "github.com/dennwc/btrfs/mtab" "os" "path/filepath" "strings" "syscall" "unsafe" ) func isBtrfs(path string) (bool, error) { var stfs syscall.Statfs_t if err := syscall.Statfs(path, &stfs); err != nil { return false, &os.PathError{Op: "statfs", Path: path, Err: err} } return stfs.Type == SuperMagic, nil } func findMountRoot(path string) (string, error) { mounts, err := mtab.Mounts() if err != nil { return "", err } longest := "" isBtrfs := false for _, m := range mounts { if !strings.HasPrefix(path, m.Mount) { continue } if len(longest) < len(m.Mount) { longest = m.Mount isBtrfs = m.Type == "btrfs" } } if longest == "" { return "", os.ErrNotExist } else if !isBtrfs { return "", ErrNotBtrfs{Path: longest} } return filepath.Abs(longest) } // openDir does the following checks before calling Open: // 1: path is in a btrfs filesystem // 2: path is a directory func openDir(path string) (*os.File, error) { if ok, err := isBtrfs(path); err != nil { return nil, err } else if !ok { return nil, ErrNotBtrfs{Path: path} } file, err := os.Open(path) if err != nil { return nil, err } else if st, err := file.Stat(); err != nil { file.Close() return nil, err } else if !st.IsDir() { file.Close() return nil, fmt.Errorf("not a directory: %s", path) } return file, nil } type searchResult struct { TransID uint64 ObjectID objectID Type treeKeyType Offset uint64 Data []byte } func treeSearchRaw(mnt *os.File, key btrfs_ioctl_search_key) (out []searchResult, _ error) { args := btrfs_ioctl_search_args{ key: key, } if err := iocTreeSearch(mnt, &args); err != nil { return nil, err } out = make([]searchResult, 0, args.key.nr_items) buf := args.buf[:] for i := 0; i < int(args.key.nr_items); i++ { h := (*btrfs_ioctl_search_header)(unsafe.Pointer(&buf[0])) buf = buf[unsafe.Sizeof(btrfs_ioctl_search_header{}):] out = append(out, searchResult{ TransID: h.transid, ObjectID: h.objectid, Offset: h.offset, Type: h.typ, Data: buf[:h.len:h.len], // TODO: reallocate? }) buf = buf[h.len:] } return out, nil } Fix "constant 2435016766 overflows int32" on GOARCH=arm Fix #1 Signed-off-by: Akihiro Suda <10ddef0dea2a66ab193b07198faa879adcb213d8@hco.ntt.co.jp> package btrfs import ( "fmt" "github.com/dennwc/btrfs/mtab" "os" "path/filepath" "strings" "syscall" "unsafe" ) func isBtrfs(path string) (bool, error) { var stfs syscall.Statfs_t if err := syscall.Statfs(path, &stfs); err != nil { return false, &os.PathError{Op: "statfs", Path: path, Err: err} } return int64(stfs.Type) == SuperMagic, nil } func findMountRoot(path string) (string, error) { mounts, err := mtab.Mounts() if err != nil { return "", err } longest := "" isBtrfs := false for _, m := range mounts { if !strings.HasPrefix(path, m.Mount) { continue } if len(longest) < len(m.Mount) { longest = m.Mount isBtrfs = m.Type == "btrfs" } } if longest == "" { return "", os.ErrNotExist } else if !isBtrfs { return "", ErrNotBtrfs{Path: longest} } return filepath.Abs(longest) } // openDir does the following checks before calling Open: // 1: path is in a btrfs filesystem // 2: path is a directory func openDir(path string) (*os.File, error) { if ok, err := isBtrfs(path); err != nil { return nil, err } else if !ok { return nil, ErrNotBtrfs{Path: path} } file, err := os.Open(path) if err != nil { return nil, err } else if st, err := file.Stat(); err != nil { file.Close() return nil, err } else if !st.IsDir() { file.Close() return nil, fmt.Errorf("not a directory: %s", path) } return file, nil } type searchResult struct { TransID uint64 ObjectID objectID Type treeKeyType Offset uint64 Data []byte } func treeSearchRaw(mnt *os.File, key btrfs_ioctl_search_key) (out []searchResult, _ error) { args := btrfs_ioctl_search_args{ key: key, } if err := iocTreeSearch(mnt, &args); err != nil { return nil, err } out = make([]searchResult, 0, args.key.nr_items) buf := args.buf[:] for i := 0; i < int(args.key.nr_items); i++ { h := (*btrfs_ioctl_search_header)(unsafe.Pointer(&buf[0])) buf = buf[unsafe.Sizeof(btrfs_ioctl_search_header{}):] out = append(out, searchResult{ TransID: h.transid, ObjectID: h.objectid, Offset: h.offset, Type: h.typ, Data: buf[:h.len:h.len], // TODO: reallocate? }) buf = buf[h.len:] } return out, nil }
package conduit import ( "encoding/json" "github.com/karlseguin/typed" "io/ioutil" "net/http" "net/url" "reflect" "strings" "testing" ) // containsString checks whether s contains e. func containsString(s []string, e string) bool { for _, a := range s { if a == e { return true } } return false } // call does the heavy lifting of sending a request to conduit, // handling error responses by returning *ConduitError, // and unmarshalling the JSON result into the specified // result interface{}. func call(endpointURL string, params interface{}, result interface{}) error { form := url.Values{} form.Add("output", "json") if params != nil { b, err := json.Marshal(params) if err != nil { return err } form.Add("params", string(b)) _, isConduitConnect := params.(*pConduitConnect) if isConduitConnect { form.Add("__conduit__", "true") } } req, err := http.NewRequest("POST", endpointURL, strings.NewReader(form.Encode())) if err != nil { return err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") client := &http.Client{} resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return err } jsonBody, err := typed.Json(body) if err != nil { return err } // parse any error conduit returned first if jsonBody.String("error_code") != "" { return &ConduitError{ code: jsonBody.String("error_code"), info: jsonBody.String("error_info"), } } // if no error, parse the expected result resultBytes, err := jsonBody.ToBytes("result") if err != nil { return err } if result != nil { if err = json.Unmarshal(resultBytes, &result); err != nil { return err } } return nil } /* Test Helpers */ func expect(t *testing.T, a interface{}, b interface{}) { if a != b { t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) } } Check for resultBytes being nil, instead of result package conduit import ( "encoding/json" "io/ioutil" "net/http" "net/url" "reflect" "strings" "testing" "github.com/karlseguin/typed" ) // containsString checks whether s contains e. func containsString(s []string, e string) bool { for _, a := range s { if a == e { return true } } return false } // call does the heavy lifting of sending a request to conduit, // handling error responses by returning *ConduitError, // and unmarshalling the JSON result into the specified // result interface{}. func call(endpointURL string, params interface{}, result interface{}) error { form := url.Values{} form.Add("output", "json") if params != nil { b, err := json.Marshal(params) if err != nil { return err } form.Add("params", string(b)) _, isConduitConnect := params.(*pConduitConnect) if isConduitConnect { form.Add("__conduit__", "true") } } req, err := http.NewRequest("POST", endpointURL, strings.NewReader(form.Encode())) if err != nil { return err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") client := &http.Client{} resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return err } jsonBody, err := typed.Json(body) if err != nil { return err } // parse any error conduit returned first if jsonBody.String("error_code") != "" { return &ConduitError{ code: jsonBody.String("error_code"), info: jsonBody.String("error_info"), } } // if no error, parse the expected result resultBytes, err := jsonBody.ToBytes("result") if err != nil { return err } if resultBytes != nil { if err = json.Unmarshal(resultBytes, &result); err != nil { return err } } return nil } /* Test Helpers */ func expect(t *testing.T, a interface{}, b interface{}) { if a != b { t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) } }
package osgridconverter import ( "math" ) // toDegrees converts radians to numeric degrees func toDegrees(input float64) float64 { return input * 180 / math.Pi } // toRadians converts numeric degrees to radians func toRadians(input float64) float64 { return input * math.Pi / 180 } add comments package osgridconverter import ( "math" ) // toDegrees converts radians to numeric degrees. func toDegrees(input float64) float64 { return input * 180 / math.Pi } // toRadians converts numeric degrees to radians. func toRadians(input float64) float64 { return input * math.Pi / 180 }
/* Copyright (c) 2015-2016 Christopher Young Distributable under the terms of The "BSD New"" License that can be found in the LICENSE file, herein included as part of this header. managementinterface.go: Web interfaces (JSON and websocket), web server for web interface HTML. */ package main import ( "database/sql" "github.com/elgs/gosqljson" _ "github.com/mattn/go-sqlite3" "encoding/hex" "encoding/json" "fmt" humanize "github.com/dustin/go-humanize" "golang.org/x/net/websocket" "io" "io/ioutil" "log" "net/http" "os" "os/exec" "strings" "syscall" "text/template" "time" ) type SettingMessage struct { Setting string `json:"setting"` Value bool `json:"state"` } // Weather updates channel. var weatherUpdate *uibroadcaster var trafficUpdate *uibroadcaster // Tables in the database that can be queried var tables = map[string]string{ "flights": "startup", "status": "status", "uat": "messages", "es": "es_messages", "ownship": "mySituation", "traffic": "traffic"} /* The /weather websocket starts off by sending the current buffer of weather messages, then sends updates as they are received. */ func handleWeatherWS(conn *websocket.Conn) { // Subscribe the socket to receive updates. weatherUpdate.AddSocket(conn) // Connection closes when function returns. Since uibroadcast is writing and we don't need to read anything (for now), just keep it busy. for { buf := make([]byte, 1024) _, err := conn.Read(buf) if err != nil { break } if buf[0] != 0 { // Dummy. continue } time.Sleep(1 * time.Second) } } // Works just as weather updates do. func handleTrafficWS(conn *websocket.Conn) { trafficMutex.Lock() for _, traf := range traffic { if !traf.Position_valid { // Don't send unless a valid position exists. continue } trafficJSON, _ := json.Marshal(&traf) conn.Write(trafficJSON) } // Subscribe the socket to receive updates. trafficUpdate.AddSocket(conn) trafficMutex.Unlock() // Connection closes when function returns. Since uibroadcast is writing and we don't need to read anything (for now), just keep it busy. for { buf := make([]byte, 1024) _, err := conn.Read(buf) if err != nil { break } if buf[0] != 0 { // Dummy. continue } time.Sleep(1 * time.Second) } } func handleStatusWS(conn *websocket.Conn) { // log.Printf("Web client connected.\n") timer := time.NewTicker(1 * time.Second) for { // The below is not used, but should be if something needs to be streamed from the web client ever in the future. /* var msg SettingMessage err := websocket.JSON.Receive(conn, &msg) if err == io.EOF { break } else if err != nil { log.Printf("handleStatusWS: %s\n", err.Error()) } else { // Use 'msg'. } */ // Send status. <-timer.C update, _ := json.Marshal(&globalStatus) _, err := conn.Write(update) if err != nil { // log.Printf("Web client disconnected.\n") break } } } func handleSituationWS(conn *websocket.Conn) { timer := time.NewTicker(100 * time.Millisecond) for { <-timer.C situationJSON, _ := json.Marshal(&mySituation) _, err := conn.Write(situationJSON) if err != nil { break } } } // AJAX call - /getStatus. Responds with current global status // a webservice call for the same data available on the websocket but when only a single update is needed func handleStatusRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) statusJSON, _ := json.Marshal(&globalStatus) fmt.Fprintf(w, "%s\n", statusJSON) } // AJAX call - /getSituation. Responds with current situation (lat/lon/gdspeed/track/pitch/roll/heading/etc.) func handleSituationRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) situationJSON, _ := json.Marshal(&mySituation) fmt.Fprintf(w, "%s\n", situationJSON) } // AJAX call - /getTowers. Responds with all ADS-B ground towers that have sent messages that we were able to parse, along with its stats. func handleTowersRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) ADSBTowerMutex.Lock() towersJSON, err := json.Marshal(&ADSBTowers) if err != nil { log.Printf("Error sending tower JSON data: %s\n", err.Error()) } // for testing purposes, we can return a fixed reply // towersJSON = []byte(`{"(38.490880,-76.135554)":{"Lat":38.49087953567505,"Lng":-76.13555431365967,"Signal_strength_last_minute":100,"Signal_strength_max":67,"Messages_last_minute":1,"Messages_total":1059},"(38.978698,-76.309276)":{"Lat":38.97869825363159,"Lng":-76.30927562713623,"Signal_strength_last_minute":495,"Signal_strength_max":32,"Messages_last_minute":45,"Messages_total":83},"(39.179285,-76.668413)":{"Lat":39.17928457260132,"Lng":-76.66841268539429,"Signal_strength_last_minute":50,"Signal_strength_max":24,"Messages_last_minute":1,"Messages_total":16},"(39.666309,-74.315300)":{"Lat":39.66630935668945,"Lng":-74.31529998779297,"Signal_strength_last_minute":9884,"Signal_strength_max":35,"Messages_last_minute":4,"Messages_total":134}}`) fmt.Fprintf(w, "%s\n", towersJSON) ADSBTowerMutex.Unlock() } // AJAX call - /getSatellites. Responds with all GNSS satellites that are being tracked, along with status information. func handleSatellitesRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) satelliteMutex.Lock() satellitesJSON, err := json.Marshal(&Satellites) if err != nil { log.Printf("Error sending GNSS satellite JSON data: %s\n", err.Error()) } fmt.Fprintf(w, "%s\n", satellitesJSON) satelliteMutex.Unlock() } // AJAX call - /getSettings. Responds with all stratux.conf data. func handleSettingsGetRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) settingsJSON, _ := json.Marshal(&globalSettings) fmt.Fprintf(w, "%s\n", settingsJSON) } // AJAX call - /setSettings. receives via POST command, any/all stratux.conf data. func handleSettingsSetRequest(w http.ResponseWriter, r *http.Request) { // define header in support of cross-domain AJAX setNoCache(w) setJSONHeaders(w) w.Header().Set("Access-Control-Allow-Method", "GET, POST, OPTIONS") w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") // for an OPTION method request, we return header without processing. // this insures we are recognized as supporting cross-domain AJAX REST calls if r.Method == "POST" { // raw, _ := httputil.DumpRequest(r, true) // log.Printf("handleSettingsSetRequest:raw: %s\n", raw) decoder := json.NewDecoder(r.Body) for { var msg map[string]interface{} // support arbitrary JSON err := decoder.Decode(&msg) if err == io.EOF { break } else if err != nil { log.Printf("handleSettingsSetRequest:error: %s\n", err.Error()) } else { for key, val := range msg { // log.Printf("handleSettingsSetRequest:json: testing for key:%s of type %s\n", key, reflect.TypeOf(val)) switch key { case "UAT_Enabled": globalSettings.UAT_Enabled = val.(bool) case "ES_Enabled": globalSettings.ES_Enabled = val.(bool) case "Ping_Enabled": globalSettings.Ping_Enabled = val.(bool) case "GPS_Enabled": globalSettings.GPS_Enabled = val.(bool) case "AHRS_Enabled": globalSettings.AHRS_Enabled = val.(bool) case "DEBUG": globalSettings.DEBUG = val.(bool) case "DisplayTrafficSource": globalSettings.DisplayTrafficSource = val.(bool) case "ReplayLog": v := val.(bool) if v != globalSettings.ReplayLog { // Don't mark the files unless there is a change. globalSettings.ReplayLog = v } case "PPM": globalSettings.PPM = int(val.(float64)) case "FlightLogLevel": globalSettings.FlightLogLevel = int(val.(float64)) case "Baud": if serialOut, ok := globalSettings.SerialOutputs["/dev/serialout0"]; ok { //FIXME: Only one device for now. newBaud := int(val.(float64)) if newBaud == serialOut.Baud { // Same baud rate. No change. continue } log.Printf("changing /dev/serialout0 baud rate from %d to %d.\n", serialOut.Baud, newBaud) serialOut.Baud = newBaud // Close the port if it is open. if serialOut.serialPort != nil { log.Printf("closing /dev/serialout0 for baud rate change.\n") serialOut.serialPort.Close() serialOut.serialPort = nil } globalSettings.SerialOutputs["/dev/serialout0"] = serialOut } case "WatchList": globalSettings.WatchList = val.(string) case "OwnshipModeS": // Expecting a hex string less than 6 characters (24 bits) long. if len(val.(string)) > 6 { // Too long. continue } // Pad string, must be 6 characters long. vals := strings.ToUpper(val.(string)) for len(vals) < 6 { vals = "0" + vals } hexn, err := hex.DecodeString(vals) if err != nil { // Number not valid. log.Printf("handleSettingsSetRequest:OwnshipModeS: %s\n", err.Error()) continue } globalSettings.OwnshipModeS = fmt.Sprintf("%02X%02X%02X", hexn[0], hexn[1], hexn[2]) default: log.Printf("handleSettingsSetRequest:json: unrecognized key:%s\n", key) } } saveSettings() } } // while it may be redundent, we return the latest settings settingsJSON, _ := json.Marshal(&globalSettings) fmt.Fprintf(w, "%s\n", settingsJSON) } } func handleShutdownRequest(w http.ResponseWriter, r *http.Request) { syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_POWER_OFF) } func doReboot() { syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_RESTART) } func handleRebootRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) w.Header().Set("Access-Control-Allow-Method", "GET, POST, OPTIONS") w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") go delayReboot() } // AJAX call - /getClients. Responds with all connected clients. func handleClientsGetRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) clientsJSON, _ := json.Marshal(&outSockets) fmt.Fprintf(w, "%s\n", clientsJSON) } func getSQL(url string) (string, error) { var sql string = "" path := strings.Split(url, "/") // everything starts with "/flightlog" if path[1] != "flightlog" { return "", errors.New("Error - missing required 'flightlog' prefix") } // have to at least specify a table if len(path) < 3 { return "", errors.New("Error - missing parameters") } // table name is the 3rd item in the path var table string = tables[path[2]] if table == "" { return "", errors.New("Error - invalid table name") } // unless this is a request for the list of flights, a flight ID is required if table != "startup" && len(path) < 4 { return "", errors.New("Error - request must include a flight ID parameter") } // Return everything for the selected table and flight (startup) if table == "startup" { sql = fmt.Sprintf("SELECT * FROM startup ORDER BY id ASC\n") } else { startup, _ := strconv.Atoi(path[3]) sql = fmt.Sprintf("SELECT * FROM %s WHERE startup_id = %d ORDER BY timestamp_id ASC\n", table, startup) } // Limit value (max number of records to return) if len(path) > 4 { limit, err := strconv.Atoi(path[4]) if (err != nil) { return "", errors.New("Error - limit value must be an integer") } sql = sql + "LIMIT " + strconv.Itoa(limit); } // Offset value (from 0) if len(path) > 5 { offset, err := strconv.Atoi(path[5]) if (err != nil) { return "", errors.New("Error - offset value must be an integer") } sql = sql + " OFFSET " + strconv.Itoa(offset); } // SELECT * FROM {table} // WHERE startup_id = {startup} ORDER BY timestamp_id ASC // LIMIT {count} OFFSET {offset} return sql, nil } func handleFlightLogRequest(w http.ResponseWriter, r *http.Request) { db, err := sql.Open("sqlite3", dataLogFilef) if err != nil { log.Printf("sql.Open(): %s\n", err.Error()) } defer func() { db.Close() dataLogStarted = false }() stmt, err := getSQL(r.URL.String()) if err != nil { fmt.Println(err) return } m, err := gosqljson.QueryDbToMapJSON(db, "any", stmt) if err != nil { fmt.Println(err) return } setNoCache(w) setJSONHeaders(w) fmt.Fprintf(w, "%s\n", m) } func delayReboot() { time.Sleep(1 * time.Second) doReboot() } // Upload an update file. func handleUpdatePostRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) r.ParseMultipartForm(1024 * 1024 * 32) // ~32MB update. file, handler, err := r.FormFile("update_file") if err != nil { log.Printf("Update failed from %s (%s).\n", r.RemoteAddr, err.Error()) return } defer file.Close() // Special hardware builds. Don't allow an update unless the filename contains the hardware build name. if (len(globalStatus.HardwareBuild) > 0) && !strings.Contains(strings.ToLower(handler.Filename), strings.ToLower(globalStatus.HardwareBuild)) { w.WriteHeader(404) return } updateFile := fmt.Sprintf("/root/update-stratux-v.sh") f, err := os.OpenFile(updateFile, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { log.Printf("Update failed from %s (%s).\n", r.RemoteAddr, err.Error()) return } defer f.Close() io.Copy(f, file) log.Printf("%s uploaded %s for update.\n", r.RemoteAddr, updateFile) // Successful update upload. Now reboot. go delayReboot() } func setNoCache(w http.ResponseWriter) { w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Set("Pragma", "no-cache") w.Header().Set("Expires", "0") } func setJSONHeaders(w http.ResponseWriter) { w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") } func defaultServer(w http.ResponseWriter, r *http.Request) { // setNoCache(w) http.FileServer(http.Dir("/var/www")).ServeHTTP(w, r) } func handleroPartitionRebuild(w http.ResponseWriter, r *http.Request) { out, err := exec.Command("/usr/sbin/rebuild_ro_part.sh").Output() var ret_err error if err != nil { ret_err = fmt.Errorf("Rebuild RO Partition error: %s", err.Error()) } else { ret_err = fmt.Errorf("Rebuild RO Partition success: %s", out) } addSystemError(ret_err) } // https://gist.github.com/alexisrobert/982674. // Copyright (c) 2010-2014 Alexis ROBERT <alexis.robert@gmail.com>. const dirlisting_tpl = `<?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"> <!-- Modified from lighttpd directory listing --> <head> <title>Index of {{.Name}}</title> <style type="text/css"> a, a:active {text-decoration: none; color: blue;} a:visited {color: #48468F;} a:hover, a:focus {text-decoration: underline; color: red;} body {background-color: #F5F5F5;} h2 {margin-bottom: 12px;} table {margin-left: 12px;} th, td { font: 90% monospace; text-align: left;} th { font-weight: bold; padding-right: 14px; padding-bottom: 3px;} td {padding-right: 14px;} td.s, th.s {text-align: right;} div.list { background-color: white; border-top: 1px solid #646464; border-bottom: 1px solid #646464; padding-top: 10px; padding-bottom: 14px;} div.foot { font: 90% monospace; color: #787878; padding-top: 4px;} </style> </head> <body> <h2>Index of {{.Name}}</h2> <div class="list"> <table summary="Directory Listing" cellpadding="0" cellspacing="0"> <thead><tr><th class="n">Name</th><th>Last Modified</th><th>Size (bytes)</th><th class="dl">Options</th></tr></thead> <tbody> {{range .Children_files}} <tr><td class="n"><a href="/logs/stratux/{{.Name}}">{{.Name}}</a></td><td>{{.Mtime}}</td><td>{{.Size}}</td><td class="dl"><a href="/logs/stratux/{{.Name}}">Download</a></td></tr> {{end}} </tbody> </table> </div> <div class="foot">{{.ServerUA}}</div> </body> </html>` type fileInfo struct { Name string Mtime string Size string } // Manages directory listings type dirlisting struct { Name string Children_files []fileInfo ServerUA string } //FIXME: This needs to be switched to show a "sessions log" from the sqlite database. func viewLogs(w http.ResponseWriter, r *http.Request) { var logPath string if _, err := os.Stat("/etc/FlightBox"); !os.IsNotExist(err) { logPath = "/root/log/" } else { // if not using the FlightBox config, use "normal" log file locations logPath = "/var/log/stratux/" } names, err := ioutil.ReadDir(logPath) if err != nil { return } fi := make([]fileInfo, 0) for _, val := range names { if val.Name()[0] == '.' { continue } // Remove hidden files from listing if !val.IsDir() { mtime := val.ModTime().Format("2006-Jan-02 15:04:05") sz := humanize.Comma(val.Size()) fi = append(fi, fileInfo{Name: val.Name(), Mtime: mtime, Size: sz}) } } tpl, err := template.New("tpl").Parse(dirlisting_tpl) if err != nil { return } data := dirlisting{Name: r.URL.Path, ServerUA: "Stratux " + stratuxVersion + "/" + stratuxBuild, Children_files: fi} err = tpl.Execute(w, data) if err != nil { log.Printf("viewLogs() error: %s\n", err.Error()) } } func managementInterface() { weatherUpdate = NewUIBroadcaster() trafficUpdate = NewUIBroadcaster() http.HandleFunc("/", defaultServer) var logPath string if _, err := os.Stat("/etc/FlightBox"); !os.IsNotExist(err) { logPath = "/root/log" } else { // if not using the FlightBox config, use "normal" log file locations logPath = "/var/log" } http.Handle("/logs/", http.StripPrefix("/logs/", http.FileServer(http.Dir(logPath)))) http.Handle("/logs/stratux/", http.StripPrefix("/logs/stratux/", http.FileServer(http.Dir(logPath)))) http.HandleFunc("/view_logs/", viewLogs) http.HandleFunc("/status", func(w http.ResponseWriter, req *http.Request) { s := websocket.Server{ Handler: websocket.Handler(handleStatusWS)} s.ServeHTTP(w, req) }) http.HandleFunc("/situation", func(w http.ResponseWriter, req *http.Request) { s := websocket.Server{ Handler: websocket.Handler(handleSituationWS)} s.ServeHTTP(w, req) }) http.HandleFunc("/weather", func(w http.ResponseWriter, req *http.Request) { s := websocket.Server{ Handler: websocket.Handler(handleWeatherWS)} s.ServeHTTP(w, req) }) http.HandleFunc("/traffic", func(w http.ResponseWriter, req *http.Request) { s := websocket.Server{ Handler: websocket.Handler(handleTrafficWS)} s.ServeHTTP(w, req) }) http.HandleFunc("/getStatus", handleStatusRequest) http.HandleFunc("/getSituation", handleSituationRequest) http.HandleFunc("/getTowers", handleTowersRequest) http.HandleFunc("/getSatellites", handleSatellitesRequest) http.HandleFunc("/getSettings", handleSettingsGetRequest) http.HandleFunc("/setSettings", handleSettingsSetRequest) http.HandleFunc("/shutdown", handleShutdownRequest) http.HandleFunc("/reboot", handleRebootRequest) http.HandleFunc("/getClients", handleClientsGetRequest) http.HandleFunc("/updateUpload", handleUpdatePostRequest) http.HandleFunc("/roPartitionRebuild", handleroPartitionRebuild) http.HandleFunc("/flightlog/", handleFlightLogRequest) err := http.ListenAndServe(managementAddr, nil) if err != nil { log.Printf("managementInterface ListenAndServe: %s\n", err.Error()) } } Adding errors dependency /* Copyright (c) 2015-2016 Christopher Young Distributable under the terms of The "BSD New"" License that can be found in the LICENSE file, herein included as part of this header. managementinterface.go: Web interfaces (JSON and websocket), web server for web interface HTML. */ package main import ( "database/sql" "github.com/elgs/gosqljson" _ "github.com/mattn/go-sqlite3" "encoding/hex" "encoding/json" "fmt" humanize "github.com/dustin/go-humanize" "golang.org/x/net/websocket" "io" "io/ioutil" "log" "net/http" "os" "os/exec" "strings" "syscall" "text/template" "time" "errors" ) type SettingMessage struct { Setting string `json:"setting"` Value bool `json:"state"` } // Weather updates channel. var weatherUpdate *uibroadcaster var trafficUpdate *uibroadcaster // Tables in the database that can be queried var tables = map[string]string{ "flights": "startup", "status": "status", "uat": "messages", "es": "es_messages", "ownship": "mySituation", "traffic": "traffic"} /* The /weather websocket starts off by sending the current buffer of weather messages, then sends updates as they are received. */ func handleWeatherWS(conn *websocket.Conn) { // Subscribe the socket to receive updates. weatherUpdate.AddSocket(conn) // Connection closes when function returns. Since uibroadcast is writing and we don't need to read anything (for now), just keep it busy. for { buf := make([]byte, 1024) _, err := conn.Read(buf) if err != nil { break } if buf[0] != 0 { // Dummy. continue } time.Sleep(1 * time.Second) } } // Works just as weather updates do. func handleTrafficWS(conn *websocket.Conn) { trafficMutex.Lock() for _, traf := range traffic { if !traf.Position_valid { // Don't send unless a valid position exists. continue } trafficJSON, _ := json.Marshal(&traf) conn.Write(trafficJSON) } // Subscribe the socket to receive updates. trafficUpdate.AddSocket(conn) trafficMutex.Unlock() // Connection closes when function returns. Since uibroadcast is writing and we don't need to read anything (for now), just keep it busy. for { buf := make([]byte, 1024) _, err := conn.Read(buf) if err != nil { break } if buf[0] != 0 { // Dummy. continue } time.Sleep(1 * time.Second) } } func handleStatusWS(conn *websocket.Conn) { // log.Printf("Web client connected.\n") timer := time.NewTicker(1 * time.Second) for { // The below is not used, but should be if something needs to be streamed from the web client ever in the future. /* var msg SettingMessage err := websocket.JSON.Receive(conn, &msg) if err == io.EOF { break } else if err != nil { log.Printf("handleStatusWS: %s\n", err.Error()) } else { // Use 'msg'. } */ // Send status. <-timer.C update, _ := json.Marshal(&globalStatus) _, err := conn.Write(update) if err != nil { // log.Printf("Web client disconnected.\n") break } } } func handleSituationWS(conn *websocket.Conn) { timer := time.NewTicker(100 * time.Millisecond) for { <-timer.C situationJSON, _ := json.Marshal(&mySituation) _, err := conn.Write(situationJSON) if err != nil { break } } } // AJAX call - /getStatus. Responds with current global status // a webservice call for the same data available on the websocket but when only a single update is needed func handleStatusRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) statusJSON, _ := json.Marshal(&globalStatus) fmt.Fprintf(w, "%s\n", statusJSON) } // AJAX call - /getSituation. Responds with current situation (lat/lon/gdspeed/track/pitch/roll/heading/etc.) func handleSituationRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) situationJSON, _ := json.Marshal(&mySituation) fmt.Fprintf(w, "%s\n", situationJSON) } // AJAX call - /getTowers. Responds with all ADS-B ground towers that have sent messages that we were able to parse, along with its stats. func handleTowersRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) ADSBTowerMutex.Lock() towersJSON, err := json.Marshal(&ADSBTowers) if err != nil { log.Printf("Error sending tower JSON data: %s\n", err.Error()) } // for testing purposes, we can return a fixed reply // towersJSON = []byte(`{"(38.490880,-76.135554)":{"Lat":38.49087953567505,"Lng":-76.13555431365967,"Signal_strength_last_minute":100,"Signal_strength_max":67,"Messages_last_minute":1,"Messages_total":1059},"(38.978698,-76.309276)":{"Lat":38.97869825363159,"Lng":-76.30927562713623,"Signal_strength_last_minute":495,"Signal_strength_max":32,"Messages_last_minute":45,"Messages_total":83},"(39.179285,-76.668413)":{"Lat":39.17928457260132,"Lng":-76.66841268539429,"Signal_strength_last_minute":50,"Signal_strength_max":24,"Messages_last_minute":1,"Messages_total":16},"(39.666309,-74.315300)":{"Lat":39.66630935668945,"Lng":-74.31529998779297,"Signal_strength_last_minute":9884,"Signal_strength_max":35,"Messages_last_minute":4,"Messages_total":134}}`) fmt.Fprintf(w, "%s\n", towersJSON) ADSBTowerMutex.Unlock() } // AJAX call - /getSatellites. Responds with all GNSS satellites that are being tracked, along with status information. func handleSatellitesRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) satelliteMutex.Lock() satellitesJSON, err := json.Marshal(&Satellites) if err != nil { log.Printf("Error sending GNSS satellite JSON data: %s\n", err.Error()) } fmt.Fprintf(w, "%s\n", satellitesJSON) satelliteMutex.Unlock() } // AJAX call - /getSettings. Responds with all stratux.conf data. func handleSettingsGetRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) settingsJSON, _ := json.Marshal(&globalSettings) fmt.Fprintf(w, "%s\n", settingsJSON) } // AJAX call - /setSettings. receives via POST command, any/all stratux.conf data. func handleSettingsSetRequest(w http.ResponseWriter, r *http.Request) { // define header in support of cross-domain AJAX setNoCache(w) setJSONHeaders(w) w.Header().Set("Access-Control-Allow-Method", "GET, POST, OPTIONS") w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") // for an OPTION method request, we return header without processing. // this insures we are recognized as supporting cross-domain AJAX REST calls if r.Method == "POST" { // raw, _ := httputil.DumpRequest(r, true) // log.Printf("handleSettingsSetRequest:raw: %s\n", raw) decoder := json.NewDecoder(r.Body) for { var msg map[string]interface{} // support arbitrary JSON err := decoder.Decode(&msg) if err == io.EOF { break } else if err != nil { log.Printf("handleSettingsSetRequest:error: %s\n", err.Error()) } else { for key, val := range msg { // log.Printf("handleSettingsSetRequest:json: testing for key:%s of type %s\n", key, reflect.TypeOf(val)) switch key { case "UAT_Enabled": globalSettings.UAT_Enabled = val.(bool) case "ES_Enabled": globalSettings.ES_Enabled = val.(bool) case "Ping_Enabled": globalSettings.Ping_Enabled = val.(bool) case "GPS_Enabled": globalSettings.GPS_Enabled = val.(bool) case "AHRS_Enabled": globalSettings.AHRS_Enabled = val.(bool) case "DEBUG": globalSettings.DEBUG = val.(bool) case "DisplayTrafficSource": globalSettings.DisplayTrafficSource = val.(bool) case "ReplayLog": v := val.(bool) if v != globalSettings.ReplayLog { // Don't mark the files unless there is a change. globalSettings.ReplayLog = v } case "PPM": globalSettings.PPM = int(val.(float64)) case "FlightLogLevel": globalSettings.FlightLogLevel = int(val.(float64)) case "Baud": if serialOut, ok := globalSettings.SerialOutputs["/dev/serialout0"]; ok { //FIXME: Only one device for now. newBaud := int(val.(float64)) if newBaud == serialOut.Baud { // Same baud rate. No change. continue } log.Printf("changing /dev/serialout0 baud rate from %d to %d.\n", serialOut.Baud, newBaud) serialOut.Baud = newBaud // Close the port if it is open. if serialOut.serialPort != nil { log.Printf("closing /dev/serialout0 for baud rate change.\n") serialOut.serialPort.Close() serialOut.serialPort = nil } globalSettings.SerialOutputs["/dev/serialout0"] = serialOut } case "WatchList": globalSettings.WatchList = val.(string) case "OwnshipModeS": // Expecting a hex string less than 6 characters (24 bits) long. if len(val.(string)) > 6 { // Too long. continue } // Pad string, must be 6 characters long. vals := strings.ToUpper(val.(string)) for len(vals) < 6 { vals = "0" + vals } hexn, err := hex.DecodeString(vals) if err != nil { // Number not valid. log.Printf("handleSettingsSetRequest:OwnshipModeS: %s\n", err.Error()) continue } globalSettings.OwnshipModeS = fmt.Sprintf("%02X%02X%02X", hexn[0], hexn[1], hexn[2]) default: log.Printf("handleSettingsSetRequest:json: unrecognized key:%s\n", key) } } saveSettings() } } // while it may be redundent, we return the latest settings settingsJSON, _ := json.Marshal(&globalSettings) fmt.Fprintf(w, "%s\n", settingsJSON) } } func handleShutdownRequest(w http.ResponseWriter, r *http.Request) { syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_POWER_OFF) } func doReboot() { syscall.Sync() syscall.Reboot(syscall.LINUX_REBOOT_CMD_RESTART) } func handleRebootRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) w.Header().Set("Access-Control-Allow-Method", "GET, POST, OPTIONS") w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") go delayReboot() } // AJAX call - /getClients. Responds with all connected clients. func handleClientsGetRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) clientsJSON, _ := json.Marshal(&outSockets) fmt.Fprintf(w, "%s\n", clientsJSON) } func getSQL(url string) (string, error) { var sql string = "" path := strings.Split(url, "/") // everything starts with "/flightlog" if path[1] != "flightlog" { return "", errors.New("Error - missing required 'flightlog' prefix") } // have to at least specify a table if len(path) < 3 { return "", errors.New("Error - missing parameters") } // table name is the 3rd item in the path var table string = tables[path[2]] if table == "" { return "", errors.New("Error - invalid table name") } // unless this is a request for the list of flights, a flight ID is required if table != "startup" && len(path) < 4 { return "", errors.New("Error - request must include a flight ID parameter") } // Return everything for the selected table and flight (startup) if table == "startup" { sql = fmt.Sprintf("SELECT * FROM startup ORDER BY id ASC\n") } else { startup, _ := strconv.Atoi(path[3]) sql = fmt.Sprintf("SELECT * FROM %s WHERE startup_id = %d ORDER BY timestamp_id ASC\n", table, startup) } // Limit value (max number of records to return) if len(path) > 4 { limit, err := strconv.Atoi(path[4]) if (err != nil) { return "", errors.New("Error - limit value must be an integer") } sql = sql + "LIMIT " + strconv.Itoa(limit); } // Offset value (from 0) if len(path) > 5 { offset, err := strconv.Atoi(path[5]) if (err != nil) { return "", errors.New("Error - offset value must be an integer") } sql = sql + " OFFSET " + strconv.Itoa(offset); } // SELECT * FROM {table} // WHERE startup_id = {startup} ORDER BY timestamp_id ASC // LIMIT {count} OFFSET {offset} return sql, nil } func handleFlightLogRequest(w http.ResponseWriter, r *http.Request) { db, err := sql.Open("sqlite3", dataLogFilef) if err != nil { log.Printf("sql.Open(): %s\n", err.Error()) } defer func() { db.Close() dataLogStarted = false }() stmt, err := getSQL(r.URL.String()) if err != nil { fmt.Println(err) return } m, err := gosqljson.QueryDbToMapJSON(db, "any", stmt) if err != nil { fmt.Println(err) return } setNoCache(w) setJSONHeaders(w) fmt.Fprintf(w, "%s\n", m) } func delayReboot() { time.Sleep(1 * time.Second) doReboot() } // Upload an update file. func handleUpdatePostRequest(w http.ResponseWriter, r *http.Request) { setNoCache(w) setJSONHeaders(w) r.ParseMultipartForm(1024 * 1024 * 32) // ~32MB update. file, handler, err := r.FormFile("update_file") if err != nil { log.Printf("Update failed from %s (%s).\n", r.RemoteAddr, err.Error()) return } defer file.Close() // Special hardware builds. Don't allow an update unless the filename contains the hardware build name. if (len(globalStatus.HardwareBuild) > 0) && !strings.Contains(strings.ToLower(handler.Filename), strings.ToLower(globalStatus.HardwareBuild)) { w.WriteHeader(404) return } updateFile := fmt.Sprintf("/root/update-stratux-v.sh") f, err := os.OpenFile(updateFile, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { log.Printf("Update failed from %s (%s).\n", r.RemoteAddr, err.Error()) return } defer f.Close() io.Copy(f, file) log.Printf("%s uploaded %s for update.\n", r.RemoteAddr, updateFile) // Successful update upload. Now reboot. go delayReboot() } func setNoCache(w http.ResponseWriter) { w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Set("Pragma", "no-cache") w.Header().Set("Expires", "0") } func setJSONHeaders(w http.ResponseWriter) { w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") } func defaultServer(w http.ResponseWriter, r *http.Request) { // setNoCache(w) http.FileServer(http.Dir("/var/www")).ServeHTTP(w, r) } func handleroPartitionRebuild(w http.ResponseWriter, r *http.Request) { out, err := exec.Command("/usr/sbin/rebuild_ro_part.sh").Output() var ret_err error if err != nil { ret_err = fmt.Errorf("Rebuild RO Partition error: %s", err.Error()) } else { ret_err = fmt.Errorf("Rebuild RO Partition success: %s", out) } addSystemError(ret_err) } // https://gist.github.com/alexisrobert/982674. // Copyright (c) 2010-2014 Alexis ROBERT <alexis.robert@gmail.com>. const dirlisting_tpl = `<?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"> <!-- Modified from lighttpd directory listing --> <head> <title>Index of {{.Name}}</title> <style type="text/css"> a, a:active {text-decoration: none; color: blue;} a:visited {color: #48468F;} a:hover, a:focus {text-decoration: underline; color: red;} body {background-color: #F5F5F5;} h2 {margin-bottom: 12px;} table {margin-left: 12px;} th, td { font: 90% monospace; text-align: left;} th { font-weight: bold; padding-right: 14px; padding-bottom: 3px;} td {padding-right: 14px;} td.s, th.s {text-align: right;} div.list { background-color: white; border-top: 1px solid #646464; border-bottom: 1px solid #646464; padding-top: 10px; padding-bottom: 14px;} div.foot { font: 90% monospace; color: #787878; padding-top: 4px;} </style> </head> <body> <h2>Index of {{.Name}}</h2> <div class="list"> <table summary="Directory Listing" cellpadding="0" cellspacing="0"> <thead><tr><th class="n">Name</th><th>Last Modified</th><th>Size (bytes)</th><th class="dl">Options</th></tr></thead> <tbody> {{range .Children_files}} <tr><td class="n"><a href="/logs/stratux/{{.Name}}">{{.Name}}</a></td><td>{{.Mtime}}</td><td>{{.Size}}</td><td class="dl"><a href="/logs/stratux/{{.Name}}">Download</a></td></tr> {{end}} </tbody> </table> </div> <div class="foot">{{.ServerUA}}</div> </body> </html>` type fileInfo struct { Name string Mtime string Size string } // Manages directory listings type dirlisting struct { Name string Children_files []fileInfo ServerUA string } //FIXME: This needs to be switched to show a "sessions log" from the sqlite database. func viewLogs(w http.ResponseWriter, r *http.Request) { var logPath string if _, err := os.Stat("/etc/FlightBox"); !os.IsNotExist(err) { logPath = "/root/log/" } else { // if not using the FlightBox config, use "normal" log file locations logPath = "/var/log/stratux/" } names, err := ioutil.ReadDir(logPath) if err != nil { return } fi := make([]fileInfo, 0) for _, val := range names { if val.Name()[0] == '.' { continue } // Remove hidden files from listing if !val.IsDir() { mtime := val.ModTime().Format("2006-Jan-02 15:04:05") sz := humanize.Comma(val.Size()) fi = append(fi, fileInfo{Name: val.Name(), Mtime: mtime, Size: sz}) } } tpl, err := template.New("tpl").Parse(dirlisting_tpl) if err != nil { return } data := dirlisting{Name: r.URL.Path, ServerUA: "Stratux " + stratuxVersion + "/" + stratuxBuild, Children_files: fi} err = tpl.Execute(w, data) if err != nil { log.Printf("viewLogs() error: %s\n", err.Error()) } } func managementInterface() { weatherUpdate = NewUIBroadcaster() trafficUpdate = NewUIBroadcaster() http.HandleFunc("/", defaultServer) var logPath string if _, err := os.Stat("/etc/FlightBox"); !os.IsNotExist(err) { logPath = "/root/log" } else { // if not using the FlightBox config, use "normal" log file locations logPath = "/var/log" } http.Handle("/logs/", http.StripPrefix("/logs/", http.FileServer(http.Dir(logPath)))) http.Handle("/logs/stratux/", http.StripPrefix("/logs/stratux/", http.FileServer(http.Dir(logPath)))) http.HandleFunc("/view_logs/", viewLogs) http.HandleFunc("/status", func(w http.ResponseWriter, req *http.Request) { s := websocket.Server{ Handler: websocket.Handler(handleStatusWS)} s.ServeHTTP(w, req) }) http.HandleFunc("/situation", func(w http.ResponseWriter, req *http.Request) { s := websocket.Server{ Handler: websocket.Handler(handleSituationWS)} s.ServeHTTP(w, req) }) http.HandleFunc("/weather", func(w http.ResponseWriter, req *http.Request) { s := websocket.Server{ Handler: websocket.Handler(handleWeatherWS)} s.ServeHTTP(w, req) }) http.HandleFunc("/traffic", func(w http.ResponseWriter, req *http.Request) { s := websocket.Server{ Handler: websocket.Handler(handleTrafficWS)} s.ServeHTTP(w, req) }) http.HandleFunc("/getStatus", handleStatusRequest) http.HandleFunc("/getSituation", handleSituationRequest) http.HandleFunc("/getTowers", handleTowersRequest) http.HandleFunc("/getSatellites", handleSatellitesRequest) http.HandleFunc("/getSettings", handleSettingsGetRequest) http.HandleFunc("/setSettings", handleSettingsSetRequest) http.HandleFunc("/shutdown", handleShutdownRequest) http.HandleFunc("/reboot", handleRebootRequest) http.HandleFunc("/getClients", handleClientsGetRequest) http.HandleFunc("/updateUpload", handleUpdatePostRequest) http.HandleFunc("/roPartitionRebuild", handleroPartitionRebuild) http.HandleFunc("/flightlog/", handleFlightLogRequest) err := http.ListenAndServe(managementAddr, nil) if err != nil { log.Printf("managementInterface ListenAndServe: %s\n", err.Error()) } }
// Package operations does generic operations on filesystems and objects package operations import ( "bytes" "context" "encoding/base64" "encoding/csv" "encoding/hex" "fmt" "io" "io/ioutil" "net/http" "os" "path" "path/filepath" "sort" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/pkg/errors" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" "golang.org/x/sync/errgroup" ) // CheckHashes checks the two files to see if they have common // known hash types and compares them // // Returns // // equal - which is equality of the hashes // // hash - the HashType. This is HashNone if either of the hashes were // unset or a compatible hash couldn't be found. // // err - may return an error which will already have been logged // // If an error is returned it will return equal as false func CheckHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) { common := src.Fs().Hashes().Overlap(dst.Fs().Hashes()) // fs.Debugf(nil, "Shared hashes: %v", common) if common.Count() == 0 { return true, hash.None, nil } equal, ht, _, _, err = checkHashes(ctx, src, dst, common.GetOne()) return equal, ht, err } // checkHashes does the work of CheckHashes but takes a hash.Type and // returns the effective hash type used. func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.Type) (equal bool, htOut hash.Type, srcHash, dstHash string, err error) { // Calculate hashes in parallel g, ctx := errgroup.WithContext(ctx) g.Go(func() (err error) { srcHash, err = src.Hash(ctx, ht) if err != nil { err = fs.CountError(err) fs.Errorf(src, "Failed to calculate src hash: %v", err) } return err }) g.Go(func() (err error) { dstHash, err = dst.Hash(ctx, ht) if err != nil { err = fs.CountError(err) fs.Errorf(dst, "Failed to calculate dst hash: %v", err) } return err }) err = g.Wait() if err != nil { return false, ht, srcHash, dstHash, err } if srcHash == "" { return true, hash.None, srcHash, dstHash, nil } if dstHash == "" { return true, hash.None, srcHash, dstHash, nil } if srcHash != dstHash { fs.Debugf(src, "%v = %s (%v)", ht, srcHash, src.Fs()) fs.Debugf(dst, "%v = %s (%v)", ht, dstHash, dst.Fs()) } else { fs.Debugf(src, "%v = %s OK", ht, srcHash) } return srcHash == dstHash, ht, srcHash, dstHash, nil } // Equal checks to see if the src and dst objects are equal by looking at // size, mtime and hash // // If the src and dst size are different then it is considered to be // not equal. If --size-only is in effect then this is the only check // that is done. If --ignore-size is in effect then this check is // skipped and the files are considered the same size. // // If the size is the same and the mtime is the same then it is // considered to be equal. This check is skipped if using --checksum. // // If the size is the same and mtime is different, unreadable or // --checksum is set and the hash is the same then the file is // considered to be equal. In this case the mtime on the dst is // updated if --checksum is not set. // // Otherwise the file is considered to be not equal including if there // were errors reading info. func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool { return equal(ctx, src, dst, defaultEqualOpt()) } // sizeDiffers compare the size of src and dst taking into account the // various ways of ignoring sizes func sizeDiffers(src, dst fs.ObjectInfo) bool { if fs.Config.IgnoreSize || src.Size() < 0 || dst.Size() < 0 { return false } return src.Size() != dst.Size() } var checksumWarning sync.Once // options for equal function() type equalOpt struct { sizeOnly bool // if set only check size checkSum bool // if set check checksum+size instead of modtime+size updateModTime bool // if set update the modtime if hashes identical and checking with modtime+size forceModTimeMatch bool // if set assume modtimes match } // default set of options for equal() func defaultEqualOpt() equalOpt { return equalOpt{ sizeOnly: fs.Config.SizeOnly, checkSum: fs.Config.CheckSum, updateModTime: !fs.Config.NoUpdateModTime, forceModTimeMatch: false, } } var modTimeUploadOnce sync.Once // emit a log if we are about to upload a file to set its modification time func logModTimeUpload(dst fs.Object) { modTimeUploadOnce.Do(func() { fs.Logf(dst.Fs(), "Forced to upload files to set modification times on this backend.") }) } func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool { if sizeDiffers(src, dst) { fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size()) return false } if opt.sizeOnly { fs.Debugf(src, "Sizes identical") return true } // Assert: Size is equal or being ignored // If checking checksum and not modtime if opt.checkSum { // Check the hash same, ht, _ := CheckHashes(ctx, src, dst) if !same { fs.Debugf(src, "%v differ", ht) return false } if ht == hash.None { checksumWarning.Do(func() { fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only") }) fs.Debugf(src, "Size of src and dst objects identical") } else { fs.Debugf(src, "Size and %v of src and dst objects identical", ht) } return true } srcModTime := src.ModTime(ctx) if !opt.forceModTimeMatch { // Sizes the same so check the mtime modifyWindow := fs.GetModifyWindow(src.Fs(), dst.Fs()) if modifyWindow == fs.ModTimeNotSupported { fs.Debugf(src, "Sizes identical") return true } dstModTime := dst.ModTime(ctx) dt := dstModTime.Sub(srcModTime) if dt < modifyWindow && dt > -modifyWindow { fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow) return true } fs.Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime) } // Check if the hashes are the same same, ht, _ := CheckHashes(ctx, src, dst) if !same { fs.Debugf(src, "%v differ", ht) return false } if ht == hash.None && !fs.Config.RefreshTimes { // if couldn't check hash, return that they differ return false } // mod time differs but hash is the same to reset mod time if required if opt.updateModTime { if !SkipDestructive(ctx, src, "update modification time") { // Size and hash the same but mtime different // Error if objects are treated as immutable if fs.Config.Immutable { fs.Errorf(dst, "StartedAt mismatch between immutable objects") return false } // Update the mtime of the dst object here err := dst.SetModTime(ctx, srcModTime) if err == fs.ErrorCantSetModTime { logModTimeUpload(dst) fs.Infof(dst, "src and dst identical but can't set mod time without re-uploading") return false } else if err == fs.ErrorCantSetModTimeWithoutDelete { logModTimeUpload(dst) fs.Infof(dst, "src and dst identical but can't set mod time without deleting and re-uploading") // Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file // put in the BackupDir than deleted which is what will happen if we don't delete it. if fs.Config.BackupDir == "" { err = dst.Remove(ctx) if err != nil { fs.Errorf(dst, "failed to delete before re-upload: %v", err) } } return false } else if err != nil { err = fs.CountError(err) fs.Errorf(dst, "Failed to set modification time: %v", err) } else { fs.Infof(src, "Updated modification time in destination") } } } return true } // Used to remove a failed copy // // Returns whether the file was successfully removed or not func removeFailedCopy(ctx context.Context, dst fs.Object) bool { if dst == nil { return false } fs.Infof(dst, "Removing failed copy") removeErr := dst.Remove(ctx) if removeErr != nil { fs.Infof(dst, "Failed to remove failed copy: %s", removeErr) return false } return true } // OverrideRemote is a wrapper to override the Remote for an // ObjectInfo type OverrideRemote struct { fs.ObjectInfo remote string } // NewOverrideRemote returns an OverrideRemoteObject which will // return the remote specified func NewOverrideRemote(oi fs.ObjectInfo, remote string) *OverrideRemote { return &OverrideRemote{ ObjectInfo: oi, remote: remote, } } // Remote returns the overridden remote name func (o *OverrideRemote) Remote() string { return o.remote } // MimeType returns the mime type of the underlying object or "" if it // can't be worked out func (o *OverrideRemote) MimeType(ctx context.Context) string { if do, ok := o.ObjectInfo.(fs.MimeTyper); ok { return do.MimeType(ctx) } return "" } // ID returns the ID of the Object if known, or "" if not func (o *OverrideRemote) ID() string { if do, ok := o.ObjectInfo.(fs.IDer); ok { return do.ID() } return "" } // UnWrap returns the Object that this Object is wrapping or nil if it // isn't wrapping anything func (o *OverrideRemote) UnWrap() fs.Object { if o, ok := o.ObjectInfo.(fs.Object); ok { return o } return nil } // GetTier returns storage tier or class of the Object func (o *OverrideRemote) GetTier() string { if do, ok := o.ObjectInfo.(fs.GetTierer); ok { return do.GetTier() } return "" } // Check all optional interfaces satisfied var _ fs.FullObjectInfo = (*OverrideRemote)(nil) // CommonHash returns a single hash.Type and a HashOption with that // type which is in common between the two fs.Fs. func CommonHash(fa, fb fs.Info) (hash.Type, *fs.HashesOption) { // work out which hash to use - limit to 1 hash in common var common hash.Set hashType := hash.None if !fs.Config.IgnoreChecksum { common = fb.Hashes().Overlap(fa.Hashes()) if common.Count() > 0 { hashType = common.GetOne() common = hash.Set(hashType) } } return hashType, &fs.HashesOption{Hashes: common} } // Copy src object to dst or f if nil. If dst is nil then it uses // remote as the name of the new object. // // It returns the destination object if possible. Note that this may // be nil. func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { tr := accounting.Stats(ctx).NewTransfer(src) defer func() { tr.Done(err) }() newDst = dst if SkipDestructive(ctx, src, "copy") { return newDst, nil } maxTries := fs.Config.LowLevelRetries tries := 0 doUpdate := dst != nil hashType, hashOption := CommonHash(f, src.Fs()) var actionTaken string for { // Try server side copy first - if has optional interface and // is same underlying remote actionTaken = "Copied (server side copy)" if fs.Config.MaxTransfer >= 0 && (accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) || (fs.Config.CutoffMode == fs.CutoffModeCautious && accounting.Stats(ctx).GetBytesWithPending()+src.Size() >= int64(fs.Config.MaxTransfer))) { return nil, accounting.ErrorMaxTransferLimitReachedGraceful } if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) { in := tr.Account(ctx, nil) // account the transfer in.ServerSideCopyStart() newDst, err = doCopy(ctx, src, remote) if err == nil { dst = newDst in.ServerSideCopyEnd(dst.Size()) // account the bytes for the server side transfer err = in.Close() } else { _ = in.Close() } if err == fs.ErrorCantCopy { tr.Reset() // skip incomplete accounting - will be overwritten by the manual copy below } } else { err = fs.ErrorCantCopy } // If can't server side copy, do it manually if err == fs.ErrorCantCopy { if doMultiThreadCopy(f, src) { // Number of streams proportional to size streams := src.Size() / int64(fs.Config.MultiThreadCutoff) // With maximum if streams > int64(fs.Config.MultiThreadStreams) { streams = int64(fs.Config.MultiThreadStreams) } if streams < 2 { streams = 2 } dst, err = multiThreadCopy(ctx, f, remote, src, int(streams), tr) if doUpdate { actionTaken = "Multi-thread Copied (replaced existing)" } else { actionTaken = "Multi-thread Copied (new)" } } else { var in0 io.ReadCloser options := []fs.OpenOption{hashOption} for _, option := range fs.Config.DownloadHeaders { options = append(options, option) } in0, err = NewReOpen(ctx, src, fs.Config.LowLevelRetries, options...) if err != nil { err = errors.Wrap(err, "failed to open source object") } else { if src.Size() == -1 { // -1 indicates unknown size. Use Rcat to handle both remotes supporting and not supporting PutStream. if doUpdate { actionTaken = "Copied (Rcat, replaced existing)" } else { actionTaken = "Copied (Rcat, new)" } // NB Rcat closes in0 dst, err = Rcat(ctx, f, remote, in0, src.ModTime(ctx)) newDst = dst } else { in := tr.Account(ctx, in0).WithBuffer() // account and buffer the transfer var wrappedSrc fs.ObjectInfo = src // We try to pass the original object if possible if src.Remote() != remote { wrappedSrc = NewOverrideRemote(src, remote) } options := []fs.OpenOption{hashOption} for _, option := range fs.Config.UploadHeaders { options = append(options, option) } if doUpdate { actionTaken = "Copied (replaced existing)" err = dst.Update(ctx, in, wrappedSrc, options...) } else { actionTaken = "Copied (new)" dst, err = f.Put(ctx, in, wrappedSrc, options...) } closeErr := in.Close() if err == nil { newDst = dst err = closeErr } } } } } tries++ if tries >= maxTries { break } // Retry if err returned a retry error if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) { fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries) tr.Reset() // skip incomplete accounting - will be overwritten by retry continue } // otherwise finish break } if err != nil { err = fs.CountError(err) fs.Errorf(src, "Failed to copy: %v", err) return newDst, err } // Verify sizes are the same after transfer if sizeDiffers(src, dst) { err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size()) fs.Errorf(dst, "%v", err) err = fs.CountError(err) removeFailedCopy(ctx, dst) return newDst, err } // Verify hashes are the same after transfer - ignoring blank hashes if hashType != hash.None { // checkHashes has logged and counted errors equal, _, srcSum, dstSum, _ := checkHashes(ctx, src, dst, hashType) if !equal { err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum) fs.Errorf(dst, "%v", err) err = fs.CountError(err) removeFailedCopy(ctx, dst) return newDst, err } } fs.Infof(src, actionTaken) return newDst, err } // SameObject returns true if src and dst could be pointing to the // same object. func SameObject(src, dst fs.Object) bool { if !SameConfig(src.Fs(), dst.Fs()) { return false } srcPath := path.Join(src.Fs().Root(), src.Remote()) dstPath := path.Join(dst.Fs().Root(), dst.Remote()) if dst.Fs().Features().CaseInsensitive { srcPath = strings.ToLower(srcPath) dstPath = strings.ToLower(dstPath) } return srcPath == dstPath } // Move src object to dst or fdst if nil. If dst is nil then it uses // remote as the name of the new object. // // Note that you must check the destination does not exist before // calling this and pass it as dst. If you pass dst=nil and the // destination does exist then this may create duplicates or return // errors. // // It returns the destination object if possible. Note that this may // be nil. func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { tr := accounting.Stats(ctx).NewCheckingTransfer(src) defer func() { if err == nil { accounting.Stats(ctx).Renames(1) } tr.Done(err) }() newDst = dst if SkipDestructive(ctx, src, "move") { return newDst, nil } // See if we have Move available if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) { // Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive) if dst != nil && !SameObject(src, dst) { err = DeleteFile(ctx, dst) if err != nil { return newDst, err } } // Move dst <- src newDst, err = doMove(ctx, src, remote) switch err { case nil: fs.Infof(src, "Moved (server side)") return newDst, nil case fs.ErrorCantMove: fs.Debugf(src, "Can't move, switching to copy") default: err = fs.CountError(err) fs.Errorf(src, "Couldn't move: %v", err) return newDst, err } } // Move not found or didn't work so copy dst <- src newDst, err = Copy(ctx, fdst, dst, remote, src) if err != nil { fs.Errorf(src, "Not deleting source as copy failed: %v", err) return newDst, err } // Delete src if no error on copy return newDst, DeleteFile(ctx, src) } // CanServerSideMove returns true if fdst support server side moves or // server side copies // // Some remotes simulate rename by server-side copy and delete, so include // remotes that implements either Mover or Copier. func CanServerSideMove(fdst fs.Fs) bool { canMove := fdst.Features().Move != nil canCopy := fdst.Features().Copy != nil return canMove || canCopy } // SuffixName adds the current --suffix to the remote, obeying // --suffix-keep-extension if set func SuffixName(remote string) string { if fs.Config.Suffix == "" { return remote } if fs.Config.SuffixKeepExtension { ext := path.Ext(remote) base := remote[:len(remote)-len(ext)] return base + fs.Config.Suffix + ext } return remote + fs.Config.Suffix } // DeleteFileWithBackupDir deletes a single file respecting --dry-run // and accumulating stats and errors. // // If backupDir is set then it moves the file to there instead of // deleting func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) { tr := accounting.Stats(ctx).NewCheckingTransfer(dst) defer func() { tr.Done(err) }() numDeletes := accounting.Stats(ctx).Deletes(1) if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete { return fserrors.FatalError(errors.New("--max-delete threshold reached")) } action, actioned := "delete", "Deleted" if backupDir != nil { action, actioned = "move into backup dir", "Moved into backup dir" } skip := SkipDestructive(ctx, dst, action) if skip { // do nothing } else if backupDir != nil { err = MoveBackupDir(ctx, backupDir, dst) } else { err = dst.Remove(ctx) } if err != nil { fs.Errorf(dst, "Couldn't %s: %v", action, err) err = fs.CountError(err) } else if !skip { fs.Infof(dst, actioned) } return err } // DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors. // // If useBackupDir is set and --backup-dir is in effect then it moves // the file to there instead of deleting func DeleteFile(ctx context.Context, dst fs.Object) (err error) { return DeleteFileWithBackupDir(ctx, dst, nil) } // DeleteFilesWithBackupDir removes all the files passed in the // channel // // If backupDir is set the files will be placed into that directory // instead of being deleted. func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error { var wg sync.WaitGroup wg.Add(fs.Config.Transfers) var errorCount int32 var fatalErrorCount int32 for i := 0; i < fs.Config.Transfers; i++ { go func() { defer wg.Done() for dst := range toBeDeleted { err := DeleteFileWithBackupDir(ctx, dst, backupDir) if err != nil { atomic.AddInt32(&errorCount, 1) if fserrors.IsFatalError(err) { fs.Errorf(nil, "Got fatal error on delete: %s", err) atomic.AddInt32(&fatalErrorCount, 1) return } } } }() } fs.Debugf(nil, "Waiting for deletions to finish") wg.Wait() if errorCount > 0 { err := errors.Errorf("failed to delete %d files", errorCount) if fatalErrorCount > 0 { return fserrors.FatalError(err) } return err } return nil } // DeleteFiles removes all the files passed in the channel func DeleteFiles(ctx context.Context, toBeDeleted fs.ObjectsChan) error { return DeleteFilesWithBackupDir(ctx, toBeDeleted, nil) } // SameRemoteType returns true if fdst and fsrc are the same type func SameRemoteType(fdst, fsrc fs.Info) bool { return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc) } // SameConfig returns true if fdst and fsrc are using the same config // file entry func SameConfig(fdst, fsrc fs.Info) bool { return fdst.Name() == fsrc.Name() } // Same returns true if fdst and fsrc point to the same underlying Fs func Same(fdst, fsrc fs.Info) bool { return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/") } // fixRoot returns the Root with a trailing / if not empty. It is // aware of case insensitive filesystems. func fixRoot(f fs.Info) string { s := strings.Trim(filepath.ToSlash(f.Root()), "/") if s != "" { s += "/" } if f.Features().CaseInsensitive { s = strings.ToLower(s) } return s } // Overlapping returns true if fdst and fsrc point to the same // underlying Fs and they overlap. func Overlapping(fdst, fsrc fs.Info) bool { if !SameConfig(fdst, fsrc) { return false } fdstRoot := fixRoot(fdst) fsrcRoot := fixRoot(fsrc) return strings.HasPrefix(fdstRoot, fsrcRoot) || strings.HasPrefix(fsrcRoot, fdstRoot) } // SameDir returns true if fdst and fsrc point to the same // underlying Fs and they are the same directory. func SameDir(fdst, fsrc fs.Info) bool { if !SameConfig(fdst, fsrc) { return false } fdstRoot := fixRoot(fdst) fsrcRoot := fixRoot(fsrc) return fdstRoot == fsrcRoot } // Retry runs fn up to maxTries times if it returns a retriable error func Retry(o interface{}, maxTries int, fn func() error) (err error) { for tries := 1; tries <= maxTries; tries++ { // Call the function which might error err = fn() if err == nil { break } // Retry if err returned a retry error if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) { fs.Debugf(o, "Received error: %v - low level retry %d/%d", err, tries, maxTries) continue } break } return err } // ListFn lists the Fs to the supplied function // // Lists in parallel which may get them out of order func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error { return walk.ListR(ctx, f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { entries.ForObject(fn) return nil }) } // mutex for synchronized output var outMutex sync.Mutex // Synchronized fmt.Fprintf // // Ignores errors from Fprintf func syncFprintf(w io.Writer, format string, a ...interface{}) { outMutex.Lock() defer outMutex.Unlock() _, _ = fmt.Fprintf(w, format, a...) } // List the Fs to the supplied writer // // Shows size and path - obeys includes and excludes // // Lists in parallel which may get them out of order func List(ctx context.Context, f fs.Fs, w io.Writer) error { return ListFn(ctx, f, func(o fs.Object) { syncFprintf(w, "%9d %s\n", o.Size(), o.Remote()) }) } // ListLong lists the Fs to the supplied writer // // Shows size, mod time and path - obeys includes and excludes // // Lists in parallel which may get them out of order func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error { return ListFn(ctx, f, func(o fs.Object) { tr := accounting.Stats(ctx).NewCheckingTransfer(o) defer func() { tr.Done(nil) }() modTime := o.ModTime(ctx) syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote()) }) } // Md5sum list the Fs to the supplied writer // // Produces the same output as the md5sum command - obeys includes and // excludes // // Lists in parallel which may get them out of order func Md5sum(ctx context.Context, f fs.Fs, w io.Writer) error { return HashLister(ctx, hash.MD5, f, w) } // Sha1sum list the Fs to the supplied writer // // Obeys includes and excludes // // Lists in parallel which may get them out of order func Sha1sum(ctx context.Context, f fs.Fs, w io.Writer) error { return HashLister(ctx, hash.SHA1, f, w) } // hashSum returns the human readable hash for ht passed in. This may // be UNSUPPORTED or ERROR. If it isn't returning a valid hash it will // return an error. func hashSum(ctx context.Context, ht hash.Type, o fs.Object) (string, error) { var err error tr := accounting.Stats(ctx).NewCheckingTransfer(o) defer func() { tr.Done(err) }() sum, err := o.Hash(ctx, ht) if err == hash.ErrUnsupported { sum = "UNSUPPORTED" } else if err != nil { fs.Debugf(o, "Failed to read %v: %v", ht, err) sum = "ERROR" } return sum, err } // HashLister does an md5sum equivalent for the hash type passed in func HashLister(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error { return ListFn(ctx, f, func(o fs.Object) { sum, _ := hashSum(ctx, ht, o) syncFprintf(w, "%*s %s\n", hash.Width(ht), sum, o.Remote()) }) } // HashListerBase64 does an md5sum equivalent for the hash type passed in with base64 encoded func HashListerBase64(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error { return ListFn(ctx, f, func(o fs.Object) { sum, err := hashSum(ctx, ht, o) if err == nil { hexBytes, _ := hex.DecodeString(sum) sum = base64.URLEncoding.EncodeToString(hexBytes) } width := base64.URLEncoding.EncodedLen(hash.Width(ht) / 2) syncFprintf(w, "%*s %s\n", width, sum, o.Remote()) }) } // Count counts the objects and their sizes in the Fs // // Obeys includes and excludes func Count(ctx context.Context, f fs.Fs) (objects int64, size int64, err error) { err = ListFn(ctx, f, func(o fs.Object) { atomic.AddInt64(&objects, 1) objectSize := o.Size() if objectSize > 0 { atomic.AddInt64(&size, objectSize) } }) return } // ConfigMaxDepth returns the depth to use for a recursive or non recursive listing. func ConfigMaxDepth(recursive bool) int { depth := fs.Config.MaxDepth if !recursive && depth < 0 { depth = 1 } return depth } // ListDir lists the directories/buckets/containers in the Fs to the supplied writer func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error { return walk.ListR(ctx, f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error { entries.ForDir(func(dir fs.Directory) { if dir != nil { syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote()) } }) return nil }) } // Mkdir makes a destination directory or container func Mkdir(ctx context.Context, f fs.Fs, dir string) error { if SkipDestructive(ctx, fs.LogDirName(f, dir), "make directory") { return nil } fs.Debugf(fs.LogDirName(f, dir), "Making directory") err := f.Mkdir(ctx, dir) if err != nil { err = fs.CountError(err) return err } return nil } // TryRmdir removes a container but not if not empty. It doesn't // count errors but may return one. func TryRmdir(ctx context.Context, f fs.Fs, dir string) error { if SkipDestructive(ctx, fs.LogDirName(f, dir), "remove directory") { return nil } fs.Debugf(fs.LogDirName(f, dir), "Removing directory") return f.Rmdir(ctx, dir) } // Rmdir removes a container but not if not empty func Rmdir(ctx context.Context, f fs.Fs, dir string) error { err := TryRmdir(ctx, f, dir) if err != nil { err = fs.CountError(err) return err } return err } // Purge removes a directory and all of its contents func Purge(ctx context.Context, f fs.Fs, dir string) (err error) { doFallbackPurge := true if doPurge := f.Features().Purge; doPurge != nil { doFallbackPurge = false if SkipDestructive(ctx, fs.LogDirName(f, dir), "purge directory") { return nil } err = doPurge(ctx, dir) if err == fs.ErrorCantPurge { doFallbackPurge = true } } if doFallbackPurge { // DeleteFiles and Rmdir observe --dry-run err = DeleteFiles(ctx, listToChan(ctx, f, dir)) if err != nil { return err } err = Rmdirs(ctx, f, dir, false) } if err != nil { err = fs.CountError(err) return err } return nil } // Delete removes all the contents of a container. Unlike Purge, it // obeys includes and excludes. func Delete(ctx context.Context, f fs.Fs) error { delChan := make(fs.ObjectsChan, fs.Config.Transfers) delErr := make(chan error, 1) go func() { delErr <- DeleteFiles(ctx, delChan) }() err := ListFn(ctx, f, func(o fs.Object) { delChan <- o }) close(delChan) delError := <-delErr if err == nil { err = delError } return err } // listToChan will transfer all objects in the listing to the output // // If an error occurs, the error will be logged, and it will close the // channel. // // If the error was ErrorDirNotFound then it will be ignored func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan { o := make(fs.ObjectsChan, fs.Config.Checkers) go func() { defer close(o) err := walk.ListR(ctx, f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { entries.ForObject(func(obj fs.Object) { o <- obj }) return nil }) if err != nil && err != fs.ErrorDirNotFound { err = errors.Wrap(err, "failed to list") err = fs.CountError(err) fs.Errorf(nil, "%v", err) } }() return o } // CleanUp removes the trash for the Fs func CleanUp(ctx context.Context, f fs.Fs) error { doCleanUp := f.Features().CleanUp if doCleanUp == nil { return errors.Errorf("%v doesn't support cleanup", f) } if SkipDestructive(ctx, f, "clean up old files") { return nil } return doCleanUp(ctx) } // wrap a Reader and a Closer together into a ReadCloser type readCloser struct { io.Reader io.Closer } // Cat any files to the io.Writer // // if offset == 0 it will be ignored // if offset > 0 then the file will be seeked to that offset // if offset < 0 then the file will be seeked that far from the end // // if count < 0 then it will be ignored // if count >= 0 then only that many characters will be output func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error { var mu sync.Mutex return ListFn(ctx, f, func(o fs.Object) { var err error tr := accounting.Stats(ctx).NewTransfer(o) defer func() { tr.Done(err) }() opt := fs.RangeOption{Start: offset, End: -1} size := o.Size() if opt.Start < 0 { opt.Start += size } if count >= 0 { opt.End = opt.Start + count - 1 } var options []fs.OpenOption if opt.Start > 0 || opt.End >= 0 { options = append(options, &opt) } for _, option := range fs.Config.DownloadHeaders { options = append(options, option) } in, err := o.Open(ctx, options...) if err != nil { err = fs.CountError(err) fs.Errorf(o, "Failed to open: %v", err) return } if count >= 0 { in = &readCloser{Reader: &io.LimitedReader{R: in, N: count}, Closer: in} } in = tr.Account(ctx, in).WithBuffer() // account and buffer the transfer // take the lock just before we output stuff, so at the last possible moment mu.Lock() defer mu.Unlock() _, err = io.Copy(w, in) if err != nil { err = fs.CountError(err) fs.Errorf(o, "Failed to send to output: %v", err) } }) } // Rcat reads data from the Reader until EOF and uploads it to a file on remote func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) { tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1) defer func() { tr.Done(err) }() in = tr.Account(ctx, in).WithBuffer() readCounter := readers.NewCountingReader(in) var trackingIn io.Reader var hasher *hash.MultiHasher var options []fs.OpenOption if !fs.Config.IgnoreChecksum { hashes := hash.NewHashSet(fdst.Hashes().GetOne()) // just pick one hash hashOption := &fs.HashesOption{Hashes: hashes} options = append(options, hashOption) hasher, err = hash.NewMultiHasherTypes(hashes) if err != nil { return nil, err } trackingIn = io.TeeReader(readCounter, hasher) } else { trackingIn = readCounter } for _, option := range fs.Config.UploadHeaders { options = append(options, option) } compare := func(dst fs.Object) error { var sums map[hash.Type]string if hasher != nil { sums = hasher.Sums() } src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, sums, fdst) if !Equal(ctx, src, dst) { err = errors.Errorf("corrupted on transfer") err = fs.CountError(err) fs.Errorf(dst, "%v", err) return err } return nil } // check if file small enough for direct upload buf := make([]byte, fs.Config.StreamingUploadCutoff) if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF { fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n) src := object.NewMemoryObject(dstFileName, modTime, buf[:n]) return Copy(ctx, fdst, nil, dstFileName, src) } // Make a new ReadCloser with the bits we've already read in = &readCloser{ Reader: io.MultiReader(bytes.NewReader(buf), trackingIn), Closer: in, } fStreamTo := fdst canStream := fdst.Features().PutStream != nil if !canStream { fs.Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file") tmpLocalFs, err := fs.TemporaryLocalFs() if err != nil { return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file") } defer func() { err := Purge(ctx, tmpLocalFs, "") if err != nil { fs.Infof(tmpLocalFs, "Failed to cleanup temporary FS: %v", err) } }() fStreamTo = tmpLocalFs } if SkipDestructive(ctx, dstFileName, "upload from pipe") { // prevents "broken pipe" errors _, err = io.Copy(ioutil.Discard, in) return nil, err } objInfo := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, nil) if dst, err = fStreamTo.Features().PutStream(ctx, in, objInfo, options...); err != nil { return dst, err } if err = compare(dst); err != nil { return dst, err } if !canStream { // copy dst (which is the local object we have just streamed to) to the remote return Copy(ctx, fdst, nil, dstFileName, dst) } return dst, nil } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. func PublicLink(ctx context.Context, f fs.Fs, remote string, expire fs.Duration, unlink bool) (string, error) { doPublicLink := f.Features().PublicLink if doPublicLink == nil { return "", errors.Errorf("%v doesn't support public links", f) } return doPublicLink(ctx, remote, expire, unlink) } // Rmdirs removes any empty directories (or directories only // containing empty directories) under f, including f. func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error { dirEmpty := make(map[string]bool) dirEmpty[dir] = !leaveRoot err := walk.Walk(ctx, f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { err = fs.CountError(err) fs.Errorf(f, "Failed to list %q: %v", dirPath, err) return nil } for _, entry := range entries { switch x := entry.(type) { case fs.Directory: // add a new directory as empty dir := x.Remote() _, found := dirEmpty[dir] if !found { dirEmpty[dir] = true } case fs.Object: // mark the parents of the file as being non-empty dir := x.Remote() for dir != "" { dir = path.Dir(dir) if dir == "." || dir == "/" { dir = "" } empty, found := dirEmpty[dir] // End if we reach a directory which is non-empty if found && !empty { break } dirEmpty[dir] = false } } } return nil }) if err != nil { return errors.Wrap(err, "failed to rmdirs") } // Now delete the empty directories, starting from the longest path var toDelete []string for dir, empty := range dirEmpty { if empty { toDelete = append(toDelete, dir) } } sort.Strings(toDelete) for i := len(toDelete) - 1; i >= 0; i-- { dir := toDelete[i] err := TryRmdir(ctx, f, dir) if err != nil { err = fs.CountError(err) fs.Errorf(dir, "Failed to rmdir: %v", err) return err } } return nil } // GetCompareDest sets up --compare-dest func GetCompareDest() (CompareDest fs.Fs, err error) { CompareDest, err = cache.Get(fs.Config.CompareDest) if err != nil { return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", fs.Config.CompareDest, err)) } return CompareDest, nil } // compareDest checks --compare-dest to see if src needs to // be copied // // Returns True if src is in --compare-dest func compareDest(ctx context.Context, dst, src fs.Object, CompareDest fs.Fs) (NoNeedTransfer bool, err error) { var remote string if dst == nil { remote = src.Remote() } else { remote = dst.Remote() } CompareDestFile, err := CompareDest.NewObject(ctx, remote) switch err { case fs.ErrorObjectNotFound: return false, nil case nil: break default: return false, err } if Equal(ctx, src, CompareDestFile) { fs.Debugf(src, "Destination found in --compare-dest, skipping") return true, nil } return false, nil } // GetCopyDest sets up --copy-dest func GetCopyDest(fdst fs.Fs) (CopyDest fs.Fs, err error) { CopyDest, err = cache.Get(fs.Config.CopyDest) if err != nil { return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", fs.Config.CopyDest, err)) } if !SameConfig(fdst, CopyDest) { return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination")) } if CopyDest.Features().Copy == nil { return nil, fserrors.FatalError(errors.New("can't use --copy-dest on a remote which doesn't support server side copy")) } return CopyDest, nil } // copyDest checks --copy-dest to see if src needs to // be copied // // Returns True if src was copied from --copy-dest func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) { var remote string if dst == nil { remote = src.Remote() } else { remote = dst.Remote() } CopyDestFile, err := CopyDest.NewObject(ctx, remote) switch err { case fs.ErrorObjectNotFound: return false, nil case nil: break default: return false, err } opt := defaultEqualOpt() opt.updateModTime = false if equal(ctx, src, CopyDestFile, opt) { if dst == nil || !Equal(ctx, src, dst) { if dst != nil && backupDir != nil { err = MoveBackupDir(ctx, backupDir, dst) if err != nil { return false, errors.Wrap(err, "moving to --backup-dir failed") } // If successful zero out the dstObj as it is no longer there dst = nil } _, err := Copy(ctx, fdst, dst, remote, CopyDestFile) if err != nil { fs.Errorf(src, "Destination found in --copy-dest, error copying") return false, nil } fs.Debugf(src, "Destination found in --copy-dest, using server side copy") return true, nil } fs.Debugf(src, "Unchanged skipping") return true, nil } fs.Debugf(src, "Destination not found in --copy-dest") return false, nil } // CompareOrCopyDest checks --compare-dest and --copy-dest to see if src // does not need to be copied // // Returns True if src does not need to be copied func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CompareOrCopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) { if fs.Config.CompareDest != "" { return compareDest(ctx, dst, src, CompareOrCopyDest) } else if fs.Config.CopyDest != "" { return copyDest(ctx, fdst, dst, src, CompareOrCopyDest, backupDir) } return false, nil } // NeedTransfer checks to see if src needs to be copied to dst using // the current config. // // Returns a flag which indicates whether the file needs to be // transferred or not. func NeedTransfer(ctx context.Context, dst, src fs.Object) bool { if dst == nil { fs.Debugf(src, "Need to transfer - File not found at Destination") return true } // If we should ignore existing files, don't transfer if fs.Config.IgnoreExisting { fs.Debugf(src, "Destination exists, skipping") return false } // If we should upload unconditionally if fs.Config.IgnoreTimes { fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use") return true } // If UpdateOlder is in effect, skip if dst is newer than src if fs.Config.UpdateOlder { srcModTime := src.ModTime(ctx) dstModTime := dst.ModTime(ctx) dt := dstModTime.Sub(srcModTime) // If have a mutually agreed precision then use that modifyWindow := fs.GetModifyWindow(dst.Fs(), src.Fs()) if modifyWindow == fs.ModTimeNotSupported { // Otherwise use 1 second as a safe default as // the resolution of the time a file was // uploaded. modifyWindow = time.Second } switch { case dt >= modifyWindow: fs.Debugf(src, "Destination is newer than source, skipping") return false case dt <= -modifyWindow: // force --checksum on for the check and do update modtimes by default opt := defaultEqualOpt() opt.forceModTimeMatch = true if equal(ctx, src, dst, opt) { fs.Debugf(src, "Unchanged skipping") return false } default: // Do a size only compare unless --checksum is set opt := defaultEqualOpt() opt.sizeOnly = !fs.Config.CheckSum if equal(ctx, src, dst, opt) { fs.Debugf(src, "Destination mod time is within %v of source and files identical, skipping", modifyWindow) return false } fs.Debugf(src, "Destination mod time is within %v of source but files differ, transferring", modifyWindow) } } else { // Check to see if changed or not if Equal(ctx, src, dst) { fs.Debugf(src, "Unchanged skipping") return false } } return true } // RcatSize reads data from the Reader until EOF and uploads it to a file on remote. // Pass in size >=0 if known, <0 if not known func RcatSize(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (dst fs.Object, err error) { var obj fs.Object if size >= 0 { var err error // Size known use Put tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, size) defer func() { tr.Done(err) }() body := ioutil.NopCloser(in) // we let the server close the body in := tr.Account(ctx, body) // account the transfer (no buffering) if SkipDestructive(ctx, dstFileName, "upload from pipe") { // prevents "broken pipe" errors _, err = io.Copy(ioutil.Discard, in) return nil, err } info := object.NewStaticObjectInfo(dstFileName, modTime, size, true, nil, fdst) obj, err = fdst.Put(ctx, in, info) if err != nil { fs.Errorf(dstFileName, "Post request put error: %v", err) return nil, err } } else { // Size unknown use Rcat obj, err = Rcat(ctx, fdst, dstFileName, in, modTime) if err != nil { fs.Errorf(dstFileName, "Post request rcat error: %v", err) return nil, err } } return obj, nil } // copyURLFunc is called from CopyURLFn type copyURLFunc func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) // copyURLFn copies the data from the url to the function supplied func copyURLFn(ctx context.Context, dstFileName string, url string, dstFileNameFromURL bool, fn copyURLFunc) (err error) { client := fshttp.NewClient(fs.Config) resp, err := client.Get(url) if err != nil { return err } defer fs.CheckClose(resp.Body, &err) if resp.StatusCode < 200 || resp.StatusCode >= 300 { return errors.Errorf("CopyURL failed: %s", resp.Status) } modTime, err := http.ParseTime(resp.Header.Get("Last-Modified")) if err != nil { modTime = time.Now() } if dstFileNameFromURL { dstFileName = path.Base(resp.Request.URL.Path) if dstFileName == "." || dstFileName == "/" { return errors.Errorf("CopyURL failed: file name wasn't found in url") } } return fn(ctx, dstFileName, resp.Body, resp.ContentLength, modTime) } // CopyURL copies the data from the url to (fdst, dstFileName) func CopyURL(ctx context.Context, fdst fs.Fs, dstFileName string, url string, dstFileNameFromURL bool, noClobber bool) (dst fs.Object, err error) { err = copyURLFn(ctx, dstFileName, url, dstFileNameFromURL, func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) { if noClobber { _, err = fdst.NewObject(ctx, dstFileName) if err == nil { return errors.New("CopyURL failed: file already exist") } } dst, err = RcatSize(ctx, fdst, dstFileName, in, size, modTime) return err }) return dst, err } // CopyURLToWriter copies the data from the url to the io.Writer supplied func CopyURLToWriter(ctx context.Context, url string, out io.Writer) (err error) { return copyURLFn(ctx, "", url, false, func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) { _, err = io.Copy(out, in) return err }) } // BackupDir returns the correctly configured --backup-dir func BackupDir(fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err error) { if fs.Config.BackupDir != "" { backupDir, err = cache.Get(fs.Config.BackupDir) if err != nil { return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", fs.Config.BackupDir, err)) } if !SameConfig(fdst, backupDir) { return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination")) } if srcFileName == "" { if Overlapping(fdst, backupDir) { return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap")) } if Overlapping(fsrc, backupDir) { return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap")) } } else { if fs.Config.Suffix == "" { if SameDir(fdst, backupDir) { return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same")) } if SameDir(fsrc, backupDir) { return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't be the same")) } } } } else if fs.Config.Suffix != "" { // --backup-dir is not set but --suffix is - use the destination as the backupDir backupDir = fdst } else { return nil, fserrors.FatalError(errors.New("internal error: BackupDir called when --backup-dir and --suffix both empty")) } if !CanServerSideMove(backupDir) { return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy")) } return backupDir, nil } // MoveBackupDir moves a file to the backup dir func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err error) { remoteWithSuffix := SuffixName(dst.Remote()) overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix) _, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dst) return err } // moveOrCopyFile moves or copies a single file possibly to a new name func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) { dstFilePath := path.Join(fdst.Root(), dstFileName) srcFilePath := path.Join(fsrc.Root(), srcFileName) if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath { fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName) return nil } // Choose operations Op := Move if cp { Op = Copy } // Find src object srcObj, err := fsrc.NewObject(ctx, srcFileName) if err != nil { return err } // Find dst object if it exists var dstObj fs.Object if !fs.Config.NoCheckDest { dstObj, err = fdst.NewObject(ctx, dstFileName) if err == fs.ErrorObjectNotFound { dstObj = nil } else if err != nil { return err } } // Special case for changing case of a file on a case insensitive remote // This will move the file to a temporary name then // move it back to the intended destination. This is required // to avoid issues with certain remotes and avoid file deletion. if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) { // Create random name to temporarily move file to tmpObjName := dstFileName + "-rclone-move-" + random.String(8) _, err := fdst.NewObject(ctx, tmpObjName) if err != fs.ErrorObjectNotFound { if err == nil { return errors.New("found an already existing file with a randomly generated name. Try the operation again") } return errors.Wrap(err, "error while attempting to move file to a temporary location") } tr := accounting.Stats(ctx).NewTransfer(srcObj) defer func() { tr.Done(err) }() tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj) if err != nil { return errors.Wrap(err, "error while moving file to temporary location") } _, err = Op(ctx, fdst, nil, dstFileName, tmpObj) return err } var backupDir, copyDestDir fs.Fs if fs.Config.BackupDir != "" || fs.Config.Suffix != "" { backupDir, err = BackupDir(fdst, fsrc, srcFileName) if err != nil { return errors.Wrap(err, "creating Fs for --backup-dir failed") } } if fs.Config.CompareDest != "" { copyDestDir, err = GetCompareDest() if err != nil { return err } } else if fs.Config.CopyDest != "" { copyDestDir, err = GetCopyDest(fdst) if err != nil { return err } } NoNeedTransfer, err := CompareOrCopyDest(ctx, fdst, dstObj, srcObj, copyDestDir, backupDir) if err != nil { return err } if !NoNeedTransfer && NeedTransfer(ctx, dstObj, srcObj) { // If destination already exists, then we must move it into --backup-dir if required if dstObj != nil && backupDir != nil { err = MoveBackupDir(ctx, backupDir, dstObj) if err != nil { return errors.Wrap(err, "moving to --backup-dir failed") } // If successful zero out the dstObj as it is no longer there dstObj = nil } _, err = Op(ctx, fdst, dstObj, dstFileName, srcObj) } else { tr := accounting.Stats(ctx).NewCheckingTransfer(srcObj) if !cp { err = DeleteFile(ctx, srcObj) } tr.Done(err) } return err } // MoveFile moves a single file possibly to a new name func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false) } // CopyFile moves a single file possibly to a new name func CopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true) } // SetTier changes tier of object in remote func SetTier(ctx context.Context, fsrc fs.Fs, tier string) error { return ListFn(ctx, fsrc, func(o fs.Object) { objImpl, ok := o.(fs.SetTierer) if !ok { fs.Errorf(fsrc, "Remote object does not implement SetTier") return } err := objImpl.SetTier(tier) if err != nil { fs.Errorf(fsrc, "Failed to do SetTier, %v", err) } }) } // ListFormat defines files information print format type ListFormat struct { separator string dirSlash bool absolute bool output []func(entry *ListJSONItem) string csv *csv.Writer buf bytes.Buffer } // SetSeparator changes separator in struct func (l *ListFormat) SetSeparator(separator string) { l.separator = separator } // SetDirSlash defines if slash should be printed func (l *ListFormat) SetDirSlash(dirSlash bool) { l.dirSlash = dirSlash } // SetAbsolute prints a leading slash in front of path names func (l *ListFormat) SetAbsolute(absolute bool) { l.absolute = absolute } // SetCSV defines if the output should be csv // // Note that you should call SetSeparator before this if you want a // custom separator func (l *ListFormat) SetCSV(useCSV bool) { if useCSV { l.csv = csv.NewWriter(&l.buf) if l.separator != "" { l.csv.Comma = []rune(l.separator)[0] } } else { l.csv = nil } } // SetOutput sets functions used to create files information func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) { l.output = output } // AddModTime adds file's Mod Time to output func (l *ListFormat) AddModTime() { l.AppendOutput(func(entry *ListJSONItem) string { return entry.ModTime.When.Local().Format("2006-01-02 15:04:05") }) } // AddSize adds file's size to output func (l *ListFormat) AddSize() { l.AppendOutput(func(entry *ListJSONItem) string { return strconv.FormatInt(entry.Size, 10) }) } // normalisePath makes sure the path has the correct slashes for the current mode func (l *ListFormat) normalisePath(entry *ListJSONItem, remote string) string { if l.absolute && !strings.HasPrefix(remote, "/") { remote = "/" + remote } if entry.IsDir && l.dirSlash { remote += "/" } return remote } // AddPath adds path to file to output func (l *ListFormat) AddPath() { l.AppendOutput(func(entry *ListJSONItem) string { return l.normalisePath(entry, entry.Path) }) } // AddEncrypted adds the encrypted path to file to output func (l *ListFormat) AddEncrypted() { l.AppendOutput(func(entry *ListJSONItem) string { return l.normalisePath(entry, entry.Encrypted) }) } // AddHash adds the hash of the type given to the output func (l *ListFormat) AddHash(ht hash.Type) { hashName := ht.String() l.AppendOutput(func(entry *ListJSONItem) string { if entry.IsDir { return "" } return entry.Hashes[hashName] }) } // AddID adds file's ID to the output if known func (l *ListFormat) AddID() { l.AppendOutput(func(entry *ListJSONItem) string { return entry.ID }) } // AddOrigID adds file's Original ID to the output if known func (l *ListFormat) AddOrigID() { l.AppendOutput(func(entry *ListJSONItem) string { return entry.OrigID }) } // AddTier adds file's Tier to the output if known func (l *ListFormat) AddTier() { l.AppendOutput(func(entry *ListJSONItem) string { return entry.Tier }) } // AddMimeType adds file's MimeType to the output if known func (l *ListFormat) AddMimeType() { l.AppendOutput(func(entry *ListJSONItem) string { return entry.MimeType }) } // AppendOutput adds string generated by specific function to printed output func (l *ListFormat) AppendOutput(functionToAppend func(item *ListJSONItem) string) { l.output = append(l.output, functionToAppend) } // Format prints information about the DirEntry in the format defined func (l *ListFormat) Format(entry *ListJSONItem) (result string) { var out []string for _, fun := range l.output { out = append(out, fun(entry)) } if l.csv != nil { l.buf.Reset() _ = l.csv.Write(out) // can't fail writing to bytes.Buffer l.csv.Flush() result = strings.TrimRight(l.buf.String(), "\n") } else { result = strings.Join(out, l.separator) } return result } // DirMove renames srcRemote to dstRemote // // It does this by loading the directory tree into memory (using ListR // if available) and doing renames in parallel. func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) { // Use DirMove if possible if doDirMove := f.Features().DirMove; doDirMove != nil { err = doDirMove(ctx, f, srcRemote, dstRemote) if err == nil { accounting.Stats(ctx).Renames(1) } return err } // Load the directory tree into memory tree, err := walk.NewDirTree(ctx, f, srcRemote, true, -1) if err != nil { return errors.Wrap(err, "RenameDir tree walk") } // Get the directories in sorted order dirs := tree.Dirs() // Make the destination directories - must be done in order not in parallel for _, dir := range dirs { dstPath := dstRemote + dir[len(srcRemote):] err := f.Mkdir(ctx, dstPath) if err != nil { return errors.Wrap(err, "RenameDir mkdir") } } // Rename the files in parallel type rename struct { o fs.Object newPath string } renames := make(chan rename, fs.Config.Transfers) g, gCtx := errgroup.WithContext(context.Background()) for i := 0; i < fs.Config.Transfers; i++ { g.Go(func() error { for job := range renames { dstOverwritten, _ := f.NewObject(gCtx, job.newPath) _, err := Move(gCtx, f, dstOverwritten, job.newPath, job.o) if err != nil { return err } select { case <-gCtx.Done(): return gCtx.Err() default: } } return nil }) } for dir, entries := range tree { dstPath := dstRemote + dir[len(srcRemote):] for _, entry := range entries { if o, ok := entry.(fs.Object); ok { renames <- rename{o, path.Join(dstPath, path.Base(o.Remote()))} } } } close(renames) err = g.Wait() if err != nil { return errors.Wrap(err, "RenameDir renames") } // Remove the source directories in reverse order for i := len(dirs) - 1; i >= 0; i-- { err := f.Rmdir(ctx, dirs[i]) if err != nil { return errors.Wrap(err, "RenameDir rmdir") } } return nil } // FsInfo provides information about a remote type FsInfo struct { // Name of the remote (as passed into NewFs) Name string // Root of the remote (as passed into NewFs) Root string // String returns a description of the FS String string // Precision of the ModTimes in this Fs in Nanoseconds Precision time.Duration // Returns the supported hash types of the filesystem Hashes []string // Features returns the optional features of this Fs Features map[string]bool } // GetFsInfo gets the information (FsInfo) about a given Fs func GetFsInfo(f fs.Fs) *FsInfo { info := &FsInfo{ Name: f.Name(), Root: f.Root(), String: f.String(), Precision: f.Precision(), Hashes: make([]string, 0, 4), Features: f.Features().Enabled(), } for _, hashType := range f.Hashes().Array() { info.Hashes = append(info.Hashes, hashType.String()) } return info } var ( interactiveMu sync.Mutex skipped = map[string]bool{} ) // skipDestructiveChoose asks the user which action to take // // Call with interactiveMu held func skipDestructiveChoose(ctx context.Context, subject interface{}, action string) (skip bool) { fmt.Printf("rclone: %s \"%v\"?\n", action, subject) switch i := config.CommandDefault([]string{ "yYes, this is OK", "nNo, skip this", fmt.Sprintf("sSkip all %s operations with no more questions", action), fmt.Sprintf("!Do all %s operations with no more questions", action), "qExit rclone now.", }, 0); i { case 'y': skip = false case 'n': skip = true case 's': skip = true skipped[action] = true fs.Logf(nil, "Skipping all %s operations from now on without asking", action) case '!': skip = false skipped[action] = false fs.Logf(nil, "Doing all %s operations from now on without asking", action) case 'q': fs.Logf(nil, "Quitting rclone now") atexit.Run() os.Exit(0) default: skip = true fs.Errorf(nil, "Bad choice %c", i) } return skip } // SkipDestructive should be called whenever rclone is about to do an destructive operation. // // It will check the --dry-run flag and it will ask the user if the --interactive flag is set. // // subject should be the object or directory in use // // action should be a descriptive word or short phrase // // Together they should make sense in this sentence: "Rclone is about // to action subject". func SkipDestructive(ctx context.Context, subject interface{}, action string) (skip bool) { var flag string switch { case fs.Config.DryRun: flag = "--dry-run" skip = true case fs.Config.Interactive: flag = "--interactive" interactiveMu.Lock() defer interactiveMu.Unlock() var found bool skip, found = skipped[action] if !found { skip = skipDestructiveChoose(ctx, subject, action) } default: return false } if skip { fs.Logf(subject, "Skipped %s as %s is set", action, flag) } return skip } operations: fix spurious "--checksum is in use but the source and destination have no hashes in common" Before this change rclone would emit the message --checksum is in use but the source and destination have no hashes in common; falling back to --size-only When the source or destination hash was missing as well as when the source and destination had no hashes in common. This first case is very confusing for users when the source and destination do have a hash in common. This change fixes that and makes sure the error message is not emitted on missing hashes even when there is a hash in common. See: https://forum.rclone.org/t/source-and-destination-have-no-hashes-in-common-for-unencrypted-drive-to-local-sync/19531 // Package operations does generic operations on filesystems and objects package operations import ( "bytes" "context" "encoding/base64" "encoding/csv" "encoding/hex" "fmt" "io" "io/ioutil" "net/http" "os" "path" "path/filepath" "sort" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/pkg/errors" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" "golang.org/x/sync/errgroup" ) // CheckHashes checks the two files to see if they have common // known hash types and compares them // // Returns // // equal - which is equality of the hashes // // hash - the HashType. This is HashNone if either of the hashes were // unset or a compatible hash couldn't be found. // // err - may return an error which will already have been logged // // If an error is returned it will return equal as false func CheckHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) { common := src.Fs().Hashes().Overlap(dst.Fs().Hashes()) // fs.Debugf(nil, "Shared hashes: %v", common) if common.Count() == 0 { return true, hash.None, nil } equal, ht, _, _, err = checkHashes(ctx, src, dst, common.GetOne()) return equal, ht, err } // checkHashes does the work of CheckHashes but takes a hash.Type and // returns the effective hash type used. func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.Type) (equal bool, htOut hash.Type, srcHash, dstHash string, err error) { // Calculate hashes in parallel g, ctx := errgroup.WithContext(ctx) g.Go(func() (err error) { srcHash, err = src.Hash(ctx, ht) if err != nil { err = fs.CountError(err) fs.Errorf(src, "Failed to calculate src hash: %v", err) } return err }) g.Go(func() (err error) { dstHash, err = dst.Hash(ctx, ht) if err != nil { err = fs.CountError(err) fs.Errorf(dst, "Failed to calculate dst hash: %v", err) } return err }) err = g.Wait() if err != nil { return false, ht, srcHash, dstHash, err } if srcHash == "" { return true, hash.None, srcHash, dstHash, nil } if dstHash == "" { return true, hash.None, srcHash, dstHash, nil } if srcHash != dstHash { fs.Debugf(src, "%v = %s (%v)", ht, srcHash, src.Fs()) fs.Debugf(dst, "%v = %s (%v)", ht, dstHash, dst.Fs()) } else { fs.Debugf(src, "%v = %s OK", ht, srcHash) } return srcHash == dstHash, ht, srcHash, dstHash, nil } // Equal checks to see if the src and dst objects are equal by looking at // size, mtime and hash // // If the src and dst size are different then it is considered to be // not equal. If --size-only is in effect then this is the only check // that is done. If --ignore-size is in effect then this check is // skipped and the files are considered the same size. // // If the size is the same and the mtime is the same then it is // considered to be equal. This check is skipped if using --checksum. // // If the size is the same and mtime is different, unreadable or // --checksum is set and the hash is the same then the file is // considered to be equal. In this case the mtime on the dst is // updated if --checksum is not set. // // Otherwise the file is considered to be not equal including if there // were errors reading info. func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool { return equal(ctx, src, dst, defaultEqualOpt()) } // sizeDiffers compare the size of src and dst taking into account the // various ways of ignoring sizes func sizeDiffers(src, dst fs.ObjectInfo) bool { if fs.Config.IgnoreSize || src.Size() < 0 || dst.Size() < 0 { return false } return src.Size() != dst.Size() } var checksumWarning sync.Once // options for equal function() type equalOpt struct { sizeOnly bool // if set only check size checkSum bool // if set check checksum+size instead of modtime+size updateModTime bool // if set update the modtime if hashes identical and checking with modtime+size forceModTimeMatch bool // if set assume modtimes match } // default set of options for equal() func defaultEqualOpt() equalOpt { return equalOpt{ sizeOnly: fs.Config.SizeOnly, checkSum: fs.Config.CheckSum, updateModTime: !fs.Config.NoUpdateModTime, forceModTimeMatch: false, } } var modTimeUploadOnce sync.Once // emit a log if we are about to upload a file to set its modification time func logModTimeUpload(dst fs.Object) { modTimeUploadOnce.Do(func() { fs.Logf(dst.Fs(), "Forced to upload files to set modification times on this backend.") }) } func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool { if sizeDiffers(src, dst) { fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size()) return false } if opt.sizeOnly { fs.Debugf(src, "Sizes identical") return true } // Assert: Size is equal or being ignored // If checking checksum and not modtime if opt.checkSum { // Check the hash same, ht, _ := CheckHashes(ctx, src, dst) if !same { fs.Debugf(src, "%v differ", ht) return false } if ht == hash.None { common := src.Fs().Hashes().Overlap(dst.Fs().Hashes()) if common.Count() == 0 { checksumWarning.Do(func() { fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only") }) } fs.Debugf(src, "Size of src and dst objects identical") } else { fs.Debugf(src, "Size and %v of src and dst objects identical", ht) } return true } srcModTime := src.ModTime(ctx) if !opt.forceModTimeMatch { // Sizes the same so check the mtime modifyWindow := fs.GetModifyWindow(src.Fs(), dst.Fs()) if modifyWindow == fs.ModTimeNotSupported { fs.Debugf(src, "Sizes identical") return true } dstModTime := dst.ModTime(ctx) dt := dstModTime.Sub(srcModTime) if dt < modifyWindow && dt > -modifyWindow { fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow) return true } fs.Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime) } // Check if the hashes are the same same, ht, _ := CheckHashes(ctx, src, dst) if !same { fs.Debugf(src, "%v differ", ht) return false } if ht == hash.None && !fs.Config.RefreshTimes { // if couldn't check hash, return that they differ return false } // mod time differs but hash is the same to reset mod time if required if opt.updateModTime { if !SkipDestructive(ctx, src, "update modification time") { // Size and hash the same but mtime different // Error if objects are treated as immutable if fs.Config.Immutable { fs.Errorf(dst, "StartedAt mismatch between immutable objects") return false } // Update the mtime of the dst object here err := dst.SetModTime(ctx, srcModTime) if err == fs.ErrorCantSetModTime { logModTimeUpload(dst) fs.Infof(dst, "src and dst identical but can't set mod time without re-uploading") return false } else if err == fs.ErrorCantSetModTimeWithoutDelete { logModTimeUpload(dst) fs.Infof(dst, "src and dst identical but can't set mod time without deleting and re-uploading") // Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file // put in the BackupDir than deleted which is what will happen if we don't delete it. if fs.Config.BackupDir == "" { err = dst.Remove(ctx) if err != nil { fs.Errorf(dst, "failed to delete before re-upload: %v", err) } } return false } else if err != nil { err = fs.CountError(err) fs.Errorf(dst, "Failed to set modification time: %v", err) } else { fs.Infof(src, "Updated modification time in destination") } } } return true } // Used to remove a failed copy // // Returns whether the file was successfully removed or not func removeFailedCopy(ctx context.Context, dst fs.Object) bool { if dst == nil { return false } fs.Infof(dst, "Removing failed copy") removeErr := dst.Remove(ctx) if removeErr != nil { fs.Infof(dst, "Failed to remove failed copy: %s", removeErr) return false } return true } // OverrideRemote is a wrapper to override the Remote for an // ObjectInfo type OverrideRemote struct { fs.ObjectInfo remote string } // NewOverrideRemote returns an OverrideRemoteObject which will // return the remote specified func NewOverrideRemote(oi fs.ObjectInfo, remote string) *OverrideRemote { return &OverrideRemote{ ObjectInfo: oi, remote: remote, } } // Remote returns the overridden remote name func (o *OverrideRemote) Remote() string { return o.remote } // MimeType returns the mime type of the underlying object or "" if it // can't be worked out func (o *OverrideRemote) MimeType(ctx context.Context) string { if do, ok := o.ObjectInfo.(fs.MimeTyper); ok { return do.MimeType(ctx) } return "" } // ID returns the ID of the Object if known, or "" if not func (o *OverrideRemote) ID() string { if do, ok := o.ObjectInfo.(fs.IDer); ok { return do.ID() } return "" } // UnWrap returns the Object that this Object is wrapping or nil if it // isn't wrapping anything func (o *OverrideRemote) UnWrap() fs.Object { if o, ok := o.ObjectInfo.(fs.Object); ok { return o } return nil } // GetTier returns storage tier or class of the Object func (o *OverrideRemote) GetTier() string { if do, ok := o.ObjectInfo.(fs.GetTierer); ok { return do.GetTier() } return "" } // Check all optional interfaces satisfied var _ fs.FullObjectInfo = (*OverrideRemote)(nil) // CommonHash returns a single hash.Type and a HashOption with that // type which is in common between the two fs.Fs. func CommonHash(fa, fb fs.Info) (hash.Type, *fs.HashesOption) { // work out which hash to use - limit to 1 hash in common var common hash.Set hashType := hash.None if !fs.Config.IgnoreChecksum { common = fb.Hashes().Overlap(fa.Hashes()) if common.Count() > 0 { hashType = common.GetOne() common = hash.Set(hashType) } } return hashType, &fs.HashesOption{Hashes: common} } // Copy src object to dst or f if nil. If dst is nil then it uses // remote as the name of the new object. // // It returns the destination object if possible. Note that this may // be nil. func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { tr := accounting.Stats(ctx).NewTransfer(src) defer func() { tr.Done(err) }() newDst = dst if SkipDestructive(ctx, src, "copy") { return newDst, nil } maxTries := fs.Config.LowLevelRetries tries := 0 doUpdate := dst != nil hashType, hashOption := CommonHash(f, src.Fs()) var actionTaken string for { // Try server side copy first - if has optional interface and // is same underlying remote actionTaken = "Copied (server side copy)" if fs.Config.MaxTransfer >= 0 && (accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) || (fs.Config.CutoffMode == fs.CutoffModeCautious && accounting.Stats(ctx).GetBytesWithPending()+src.Size() >= int64(fs.Config.MaxTransfer))) { return nil, accounting.ErrorMaxTransferLimitReachedGraceful } if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) { in := tr.Account(ctx, nil) // account the transfer in.ServerSideCopyStart() newDst, err = doCopy(ctx, src, remote) if err == nil { dst = newDst in.ServerSideCopyEnd(dst.Size()) // account the bytes for the server side transfer err = in.Close() } else { _ = in.Close() } if err == fs.ErrorCantCopy { tr.Reset() // skip incomplete accounting - will be overwritten by the manual copy below } } else { err = fs.ErrorCantCopy } // If can't server side copy, do it manually if err == fs.ErrorCantCopy { if doMultiThreadCopy(f, src) { // Number of streams proportional to size streams := src.Size() / int64(fs.Config.MultiThreadCutoff) // With maximum if streams > int64(fs.Config.MultiThreadStreams) { streams = int64(fs.Config.MultiThreadStreams) } if streams < 2 { streams = 2 } dst, err = multiThreadCopy(ctx, f, remote, src, int(streams), tr) if doUpdate { actionTaken = "Multi-thread Copied (replaced existing)" } else { actionTaken = "Multi-thread Copied (new)" } } else { var in0 io.ReadCloser options := []fs.OpenOption{hashOption} for _, option := range fs.Config.DownloadHeaders { options = append(options, option) } in0, err = NewReOpen(ctx, src, fs.Config.LowLevelRetries, options...) if err != nil { err = errors.Wrap(err, "failed to open source object") } else { if src.Size() == -1 { // -1 indicates unknown size. Use Rcat to handle both remotes supporting and not supporting PutStream. if doUpdate { actionTaken = "Copied (Rcat, replaced existing)" } else { actionTaken = "Copied (Rcat, new)" } // NB Rcat closes in0 dst, err = Rcat(ctx, f, remote, in0, src.ModTime(ctx)) newDst = dst } else { in := tr.Account(ctx, in0).WithBuffer() // account and buffer the transfer var wrappedSrc fs.ObjectInfo = src // We try to pass the original object if possible if src.Remote() != remote { wrappedSrc = NewOverrideRemote(src, remote) } options := []fs.OpenOption{hashOption} for _, option := range fs.Config.UploadHeaders { options = append(options, option) } if doUpdate { actionTaken = "Copied (replaced existing)" err = dst.Update(ctx, in, wrappedSrc, options...) } else { actionTaken = "Copied (new)" dst, err = f.Put(ctx, in, wrappedSrc, options...) } closeErr := in.Close() if err == nil { newDst = dst err = closeErr } } } } } tries++ if tries >= maxTries { break } // Retry if err returned a retry error if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) { fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries) tr.Reset() // skip incomplete accounting - will be overwritten by retry continue } // otherwise finish break } if err != nil { err = fs.CountError(err) fs.Errorf(src, "Failed to copy: %v", err) return newDst, err } // Verify sizes are the same after transfer if sizeDiffers(src, dst) { err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size()) fs.Errorf(dst, "%v", err) err = fs.CountError(err) removeFailedCopy(ctx, dst) return newDst, err } // Verify hashes are the same after transfer - ignoring blank hashes if hashType != hash.None { // checkHashes has logged and counted errors equal, _, srcSum, dstSum, _ := checkHashes(ctx, src, dst, hashType) if !equal { err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum) fs.Errorf(dst, "%v", err) err = fs.CountError(err) removeFailedCopy(ctx, dst) return newDst, err } } fs.Infof(src, actionTaken) return newDst, err } // SameObject returns true if src and dst could be pointing to the // same object. func SameObject(src, dst fs.Object) bool { if !SameConfig(src.Fs(), dst.Fs()) { return false } srcPath := path.Join(src.Fs().Root(), src.Remote()) dstPath := path.Join(dst.Fs().Root(), dst.Remote()) if dst.Fs().Features().CaseInsensitive { srcPath = strings.ToLower(srcPath) dstPath = strings.ToLower(dstPath) } return srcPath == dstPath } // Move src object to dst or fdst if nil. If dst is nil then it uses // remote as the name of the new object. // // Note that you must check the destination does not exist before // calling this and pass it as dst. If you pass dst=nil and the // destination does exist then this may create duplicates or return // errors. // // It returns the destination object if possible. Note that this may // be nil. func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { tr := accounting.Stats(ctx).NewCheckingTransfer(src) defer func() { if err == nil { accounting.Stats(ctx).Renames(1) } tr.Done(err) }() newDst = dst if SkipDestructive(ctx, src, "move") { return newDst, nil } // See if we have Move available if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) { // Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive) if dst != nil && !SameObject(src, dst) { err = DeleteFile(ctx, dst) if err != nil { return newDst, err } } // Move dst <- src newDst, err = doMove(ctx, src, remote) switch err { case nil: fs.Infof(src, "Moved (server side)") return newDst, nil case fs.ErrorCantMove: fs.Debugf(src, "Can't move, switching to copy") default: err = fs.CountError(err) fs.Errorf(src, "Couldn't move: %v", err) return newDst, err } } // Move not found or didn't work so copy dst <- src newDst, err = Copy(ctx, fdst, dst, remote, src) if err != nil { fs.Errorf(src, "Not deleting source as copy failed: %v", err) return newDst, err } // Delete src if no error on copy return newDst, DeleteFile(ctx, src) } // CanServerSideMove returns true if fdst support server side moves or // server side copies // // Some remotes simulate rename by server-side copy and delete, so include // remotes that implements either Mover or Copier. func CanServerSideMove(fdst fs.Fs) bool { canMove := fdst.Features().Move != nil canCopy := fdst.Features().Copy != nil return canMove || canCopy } // SuffixName adds the current --suffix to the remote, obeying // --suffix-keep-extension if set func SuffixName(remote string) string { if fs.Config.Suffix == "" { return remote } if fs.Config.SuffixKeepExtension { ext := path.Ext(remote) base := remote[:len(remote)-len(ext)] return base + fs.Config.Suffix + ext } return remote + fs.Config.Suffix } // DeleteFileWithBackupDir deletes a single file respecting --dry-run // and accumulating stats and errors. // // If backupDir is set then it moves the file to there instead of // deleting func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) { tr := accounting.Stats(ctx).NewCheckingTransfer(dst) defer func() { tr.Done(err) }() numDeletes := accounting.Stats(ctx).Deletes(1) if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete { return fserrors.FatalError(errors.New("--max-delete threshold reached")) } action, actioned := "delete", "Deleted" if backupDir != nil { action, actioned = "move into backup dir", "Moved into backup dir" } skip := SkipDestructive(ctx, dst, action) if skip { // do nothing } else if backupDir != nil { err = MoveBackupDir(ctx, backupDir, dst) } else { err = dst.Remove(ctx) } if err != nil { fs.Errorf(dst, "Couldn't %s: %v", action, err) err = fs.CountError(err) } else if !skip { fs.Infof(dst, actioned) } return err } // DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors. // // If useBackupDir is set and --backup-dir is in effect then it moves // the file to there instead of deleting func DeleteFile(ctx context.Context, dst fs.Object) (err error) { return DeleteFileWithBackupDir(ctx, dst, nil) } // DeleteFilesWithBackupDir removes all the files passed in the // channel // // If backupDir is set the files will be placed into that directory // instead of being deleted. func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error { var wg sync.WaitGroup wg.Add(fs.Config.Transfers) var errorCount int32 var fatalErrorCount int32 for i := 0; i < fs.Config.Transfers; i++ { go func() { defer wg.Done() for dst := range toBeDeleted { err := DeleteFileWithBackupDir(ctx, dst, backupDir) if err != nil { atomic.AddInt32(&errorCount, 1) if fserrors.IsFatalError(err) { fs.Errorf(nil, "Got fatal error on delete: %s", err) atomic.AddInt32(&fatalErrorCount, 1) return } } } }() } fs.Debugf(nil, "Waiting for deletions to finish") wg.Wait() if errorCount > 0 { err := errors.Errorf("failed to delete %d files", errorCount) if fatalErrorCount > 0 { return fserrors.FatalError(err) } return err } return nil } // DeleteFiles removes all the files passed in the channel func DeleteFiles(ctx context.Context, toBeDeleted fs.ObjectsChan) error { return DeleteFilesWithBackupDir(ctx, toBeDeleted, nil) } // SameRemoteType returns true if fdst and fsrc are the same type func SameRemoteType(fdst, fsrc fs.Info) bool { return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc) } // SameConfig returns true if fdst and fsrc are using the same config // file entry func SameConfig(fdst, fsrc fs.Info) bool { return fdst.Name() == fsrc.Name() } // Same returns true if fdst and fsrc point to the same underlying Fs func Same(fdst, fsrc fs.Info) bool { return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/") } // fixRoot returns the Root with a trailing / if not empty. It is // aware of case insensitive filesystems. func fixRoot(f fs.Info) string { s := strings.Trim(filepath.ToSlash(f.Root()), "/") if s != "" { s += "/" } if f.Features().CaseInsensitive { s = strings.ToLower(s) } return s } // Overlapping returns true if fdst and fsrc point to the same // underlying Fs and they overlap. func Overlapping(fdst, fsrc fs.Info) bool { if !SameConfig(fdst, fsrc) { return false } fdstRoot := fixRoot(fdst) fsrcRoot := fixRoot(fsrc) return strings.HasPrefix(fdstRoot, fsrcRoot) || strings.HasPrefix(fsrcRoot, fdstRoot) } // SameDir returns true if fdst and fsrc point to the same // underlying Fs and they are the same directory. func SameDir(fdst, fsrc fs.Info) bool { if !SameConfig(fdst, fsrc) { return false } fdstRoot := fixRoot(fdst) fsrcRoot := fixRoot(fsrc) return fdstRoot == fsrcRoot } // Retry runs fn up to maxTries times if it returns a retriable error func Retry(o interface{}, maxTries int, fn func() error) (err error) { for tries := 1; tries <= maxTries; tries++ { // Call the function which might error err = fn() if err == nil { break } // Retry if err returned a retry error if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) { fs.Debugf(o, "Received error: %v - low level retry %d/%d", err, tries, maxTries) continue } break } return err } // ListFn lists the Fs to the supplied function // // Lists in parallel which may get them out of order func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error { return walk.ListR(ctx, f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { entries.ForObject(fn) return nil }) } // mutex for synchronized output var outMutex sync.Mutex // Synchronized fmt.Fprintf // // Ignores errors from Fprintf func syncFprintf(w io.Writer, format string, a ...interface{}) { outMutex.Lock() defer outMutex.Unlock() _, _ = fmt.Fprintf(w, format, a...) } // List the Fs to the supplied writer // // Shows size and path - obeys includes and excludes // // Lists in parallel which may get them out of order func List(ctx context.Context, f fs.Fs, w io.Writer) error { return ListFn(ctx, f, func(o fs.Object) { syncFprintf(w, "%9d %s\n", o.Size(), o.Remote()) }) } // ListLong lists the Fs to the supplied writer // // Shows size, mod time and path - obeys includes and excludes // // Lists in parallel which may get them out of order func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error { return ListFn(ctx, f, func(o fs.Object) { tr := accounting.Stats(ctx).NewCheckingTransfer(o) defer func() { tr.Done(nil) }() modTime := o.ModTime(ctx) syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote()) }) } // Md5sum list the Fs to the supplied writer // // Produces the same output as the md5sum command - obeys includes and // excludes // // Lists in parallel which may get them out of order func Md5sum(ctx context.Context, f fs.Fs, w io.Writer) error { return HashLister(ctx, hash.MD5, f, w) } // Sha1sum list the Fs to the supplied writer // // Obeys includes and excludes // // Lists in parallel which may get them out of order func Sha1sum(ctx context.Context, f fs.Fs, w io.Writer) error { return HashLister(ctx, hash.SHA1, f, w) } // hashSum returns the human readable hash for ht passed in. This may // be UNSUPPORTED or ERROR. If it isn't returning a valid hash it will // return an error. func hashSum(ctx context.Context, ht hash.Type, o fs.Object) (string, error) { var err error tr := accounting.Stats(ctx).NewCheckingTransfer(o) defer func() { tr.Done(err) }() sum, err := o.Hash(ctx, ht) if err == hash.ErrUnsupported { sum = "UNSUPPORTED" } else if err != nil { fs.Debugf(o, "Failed to read %v: %v", ht, err) sum = "ERROR" } return sum, err } // HashLister does an md5sum equivalent for the hash type passed in func HashLister(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error { return ListFn(ctx, f, func(o fs.Object) { sum, _ := hashSum(ctx, ht, o) syncFprintf(w, "%*s %s\n", hash.Width(ht), sum, o.Remote()) }) } // HashListerBase64 does an md5sum equivalent for the hash type passed in with base64 encoded func HashListerBase64(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error { return ListFn(ctx, f, func(o fs.Object) { sum, err := hashSum(ctx, ht, o) if err == nil { hexBytes, _ := hex.DecodeString(sum) sum = base64.URLEncoding.EncodeToString(hexBytes) } width := base64.URLEncoding.EncodedLen(hash.Width(ht) / 2) syncFprintf(w, "%*s %s\n", width, sum, o.Remote()) }) } // Count counts the objects and their sizes in the Fs // // Obeys includes and excludes func Count(ctx context.Context, f fs.Fs) (objects int64, size int64, err error) { err = ListFn(ctx, f, func(o fs.Object) { atomic.AddInt64(&objects, 1) objectSize := o.Size() if objectSize > 0 { atomic.AddInt64(&size, objectSize) } }) return } // ConfigMaxDepth returns the depth to use for a recursive or non recursive listing. func ConfigMaxDepth(recursive bool) int { depth := fs.Config.MaxDepth if !recursive && depth < 0 { depth = 1 } return depth } // ListDir lists the directories/buckets/containers in the Fs to the supplied writer func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error { return walk.ListR(ctx, f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error { entries.ForDir(func(dir fs.Directory) { if dir != nil { syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote()) } }) return nil }) } // Mkdir makes a destination directory or container func Mkdir(ctx context.Context, f fs.Fs, dir string) error { if SkipDestructive(ctx, fs.LogDirName(f, dir), "make directory") { return nil } fs.Debugf(fs.LogDirName(f, dir), "Making directory") err := f.Mkdir(ctx, dir) if err != nil { err = fs.CountError(err) return err } return nil } // TryRmdir removes a container but not if not empty. It doesn't // count errors but may return one. func TryRmdir(ctx context.Context, f fs.Fs, dir string) error { if SkipDestructive(ctx, fs.LogDirName(f, dir), "remove directory") { return nil } fs.Debugf(fs.LogDirName(f, dir), "Removing directory") return f.Rmdir(ctx, dir) } // Rmdir removes a container but not if not empty func Rmdir(ctx context.Context, f fs.Fs, dir string) error { err := TryRmdir(ctx, f, dir) if err != nil { err = fs.CountError(err) return err } return err } // Purge removes a directory and all of its contents func Purge(ctx context.Context, f fs.Fs, dir string) (err error) { doFallbackPurge := true if doPurge := f.Features().Purge; doPurge != nil { doFallbackPurge = false if SkipDestructive(ctx, fs.LogDirName(f, dir), "purge directory") { return nil } err = doPurge(ctx, dir) if err == fs.ErrorCantPurge { doFallbackPurge = true } } if doFallbackPurge { // DeleteFiles and Rmdir observe --dry-run err = DeleteFiles(ctx, listToChan(ctx, f, dir)) if err != nil { return err } err = Rmdirs(ctx, f, dir, false) } if err != nil { err = fs.CountError(err) return err } return nil } // Delete removes all the contents of a container. Unlike Purge, it // obeys includes and excludes. func Delete(ctx context.Context, f fs.Fs) error { delChan := make(fs.ObjectsChan, fs.Config.Transfers) delErr := make(chan error, 1) go func() { delErr <- DeleteFiles(ctx, delChan) }() err := ListFn(ctx, f, func(o fs.Object) { delChan <- o }) close(delChan) delError := <-delErr if err == nil { err = delError } return err } // listToChan will transfer all objects in the listing to the output // // If an error occurs, the error will be logged, and it will close the // channel. // // If the error was ErrorDirNotFound then it will be ignored func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan { o := make(fs.ObjectsChan, fs.Config.Checkers) go func() { defer close(o) err := walk.ListR(ctx, f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { entries.ForObject(func(obj fs.Object) { o <- obj }) return nil }) if err != nil && err != fs.ErrorDirNotFound { err = errors.Wrap(err, "failed to list") err = fs.CountError(err) fs.Errorf(nil, "%v", err) } }() return o } // CleanUp removes the trash for the Fs func CleanUp(ctx context.Context, f fs.Fs) error { doCleanUp := f.Features().CleanUp if doCleanUp == nil { return errors.Errorf("%v doesn't support cleanup", f) } if SkipDestructive(ctx, f, "clean up old files") { return nil } return doCleanUp(ctx) } // wrap a Reader and a Closer together into a ReadCloser type readCloser struct { io.Reader io.Closer } // Cat any files to the io.Writer // // if offset == 0 it will be ignored // if offset > 0 then the file will be seeked to that offset // if offset < 0 then the file will be seeked that far from the end // // if count < 0 then it will be ignored // if count >= 0 then only that many characters will be output func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error { var mu sync.Mutex return ListFn(ctx, f, func(o fs.Object) { var err error tr := accounting.Stats(ctx).NewTransfer(o) defer func() { tr.Done(err) }() opt := fs.RangeOption{Start: offset, End: -1} size := o.Size() if opt.Start < 0 { opt.Start += size } if count >= 0 { opt.End = opt.Start + count - 1 } var options []fs.OpenOption if opt.Start > 0 || opt.End >= 0 { options = append(options, &opt) } for _, option := range fs.Config.DownloadHeaders { options = append(options, option) } in, err := o.Open(ctx, options...) if err != nil { err = fs.CountError(err) fs.Errorf(o, "Failed to open: %v", err) return } if count >= 0 { in = &readCloser{Reader: &io.LimitedReader{R: in, N: count}, Closer: in} } in = tr.Account(ctx, in).WithBuffer() // account and buffer the transfer // take the lock just before we output stuff, so at the last possible moment mu.Lock() defer mu.Unlock() _, err = io.Copy(w, in) if err != nil { err = fs.CountError(err) fs.Errorf(o, "Failed to send to output: %v", err) } }) } // Rcat reads data from the Reader until EOF and uploads it to a file on remote func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) { tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1) defer func() { tr.Done(err) }() in = tr.Account(ctx, in).WithBuffer() readCounter := readers.NewCountingReader(in) var trackingIn io.Reader var hasher *hash.MultiHasher var options []fs.OpenOption if !fs.Config.IgnoreChecksum { hashes := hash.NewHashSet(fdst.Hashes().GetOne()) // just pick one hash hashOption := &fs.HashesOption{Hashes: hashes} options = append(options, hashOption) hasher, err = hash.NewMultiHasherTypes(hashes) if err != nil { return nil, err } trackingIn = io.TeeReader(readCounter, hasher) } else { trackingIn = readCounter } for _, option := range fs.Config.UploadHeaders { options = append(options, option) } compare := func(dst fs.Object) error { var sums map[hash.Type]string if hasher != nil { sums = hasher.Sums() } src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, sums, fdst) if !Equal(ctx, src, dst) { err = errors.Errorf("corrupted on transfer") err = fs.CountError(err) fs.Errorf(dst, "%v", err) return err } return nil } // check if file small enough for direct upload buf := make([]byte, fs.Config.StreamingUploadCutoff) if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF { fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n) src := object.NewMemoryObject(dstFileName, modTime, buf[:n]) return Copy(ctx, fdst, nil, dstFileName, src) } // Make a new ReadCloser with the bits we've already read in = &readCloser{ Reader: io.MultiReader(bytes.NewReader(buf), trackingIn), Closer: in, } fStreamTo := fdst canStream := fdst.Features().PutStream != nil if !canStream { fs.Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file") tmpLocalFs, err := fs.TemporaryLocalFs() if err != nil { return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file") } defer func() { err := Purge(ctx, tmpLocalFs, "") if err != nil { fs.Infof(tmpLocalFs, "Failed to cleanup temporary FS: %v", err) } }() fStreamTo = tmpLocalFs } if SkipDestructive(ctx, dstFileName, "upload from pipe") { // prevents "broken pipe" errors _, err = io.Copy(ioutil.Discard, in) return nil, err } objInfo := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, nil) if dst, err = fStreamTo.Features().PutStream(ctx, in, objInfo, options...); err != nil { return dst, err } if err = compare(dst); err != nil { return dst, err } if !canStream { // copy dst (which is the local object we have just streamed to) to the remote return Copy(ctx, fdst, nil, dstFileName, dst) } return dst, nil } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. func PublicLink(ctx context.Context, f fs.Fs, remote string, expire fs.Duration, unlink bool) (string, error) { doPublicLink := f.Features().PublicLink if doPublicLink == nil { return "", errors.Errorf("%v doesn't support public links", f) } return doPublicLink(ctx, remote, expire, unlink) } // Rmdirs removes any empty directories (or directories only // containing empty directories) under f, including f. func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error { dirEmpty := make(map[string]bool) dirEmpty[dir] = !leaveRoot err := walk.Walk(ctx, f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { err = fs.CountError(err) fs.Errorf(f, "Failed to list %q: %v", dirPath, err) return nil } for _, entry := range entries { switch x := entry.(type) { case fs.Directory: // add a new directory as empty dir := x.Remote() _, found := dirEmpty[dir] if !found { dirEmpty[dir] = true } case fs.Object: // mark the parents of the file as being non-empty dir := x.Remote() for dir != "" { dir = path.Dir(dir) if dir == "." || dir == "/" { dir = "" } empty, found := dirEmpty[dir] // End if we reach a directory which is non-empty if found && !empty { break } dirEmpty[dir] = false } } } return nil }) if err != nil { return errors.Wrap(err, "failed to rmdirs") } // Now delete the empty directories, starting from the longest path var toDelete []string for dir, empty := range dirEmpty { if empty { toDelete = append(toDelete, dir) } } sort.Strings(toDelete) for i := len(toDelete) - 1; i >= 0; i-- { dir := toDelete[i] err := TryRmdir(ctx, f, dir) if err != nil { err = fs.CountError(err) fs.Errorf(dir, "Failed to rmdir: %v", err) return err } } return nil } // GetCompareDest sets up --compare-dest func GetCompareDest() (CompareDest fs.Fs, err error) { CompareDest, err = cache.Get(fs.Config.CompareDest) if err != nil { return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", fs.Config.CompareDest, err)) } return CompareDest, nil } // compareDest checks --compare-dest to see if src needs to // be copied // // Returns True if src is in --compare-dest func compareDest(ctx context.Context, dst, src fs.Object, CompareDest fs.Fs) (NoNeedTransfer bool, err error) { var remote string if dst == nil { remote = src.Remote() } else { remote = dst.Remote() } CompareDestFile, err := CompareDest.NewObject(ctx, remote) switch err { case fs.ErrorObjectNotFound: return false, nil case nil: break default: return false, err } if Equal(ctx, src, CompareDestFile) { fs.Debugf(src, "Destination found in --compare-dest, skipping") return true, nil } return false, nil } // GetCopyDest sets up --copy-dest func GetCopyDest(fdst fs.Fs) (CopyDest fs.Fs, err error) { CopyDest, err = cache.Get(fs.Config.CopyDest) if err != nil { return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", fs.Config.CopyDest, err)) } if !SameConfig(fdst, CopyDest) { return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination")) } if CopyDest.Features().Copy == nil { return nil, fserrors.FatalError(errors.New("can't use --copy-dest on a remote which doesn't support server side copy")) } return CopyDest, nil } // copyDest checks --copy-dest to see if src needs to // be copied // // Returns True if src was copied from --copy-dest func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) { var remote string if dst == nil { remote = src.Remote() } else { remote = dst.Remote() } CopyDestFile, err := CopyDest.NewObject(ctx, remote) switch err { case fs.ErrorObjectNotFound: return false, nil case nil: break default: return false, err } opt := defaultEqualOpt() opt.updateModTime = false if equal(ctx, src, CopyDestFile, opt) { if dst == nil || !Equal(ctx, src, dst) { if dst != nil && backupDir != nil { err = MoveBackupDir(ctx, backupDir, dst) if err != nil { return false, errors.Wrap(err, "moving to --backup-dir failed") } // If successful zero out the dstObj as it is no longer there dst = nil } _, err := Copy(ctx, fdst, dst, remote, CopyDestFile) if err != nil { fs.Errorf(src, "Destination found in --copy-dest, error copying") return false, nil } fs.Debugf(src, "Destination found in --copy-dest, using server side copy") return true, nil } fs.Debugf(src, "Unchanged skipping") return true, nil } fs.Debugf(src, "Destination not found in --copy-dest") return false, nil } // CompareOrCopyDest checks --compare-dest and --copy-dest to see if src // does not need to be copied // // Returns True if src does not need to be copied func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CompareOrCopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) { if fs.Config.CompareDest != "" { return compareDest(ctx, dst, src, CompareOrCopyDest) } else if fs.Config.CopyDest != "" { return copyDest(ctx, fdst, dst, src, CompareOrCopyDest, backupDir) } return false, nil } // NeedTransfer checks to see if src needs to be copied to dst using // the current config. // // Returns a flag which indicates whether the file needs to be // transferred or not. func NeedTransfer(ctx context.Context, dst, src fs.Object) bool { if dst == nil { fs.Debugf(src, "Need to transfer - File not found at Destination") return true } // If we should ignore existing files, don't transfer if fs.Config.IgnoreExisting { fs.Debugf(src, "Destination exists, skipping") return false } // If we should upload unconditionally if fs.Config.IgnoreTimes { fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use") return true } // If UpdateOlder is in effect, skip if dst is newer than src if fs.Config.UpdateOlder { srcModTime := src.ModTime(ctx) dstModTime := dst.ModTime(ctx) dt := dstModTime.Sub(srcModTime) // If have a mutually agreed precision then use that modifyWindow := fs.GetModifyWindow(dst.Fs(), src.Fs()) if modifyWindow == fs.ModTimeNotSupported { // Otherwise use 1 second as a safe default as // the resolution of the time a file was // uploaded. modifyWindow = time.Second } switch { case dt >= modifyWindow: fs.Debugf(src, "Destination is newer than source, skipping") return false case dt <= -modifyWindow: // force --checksum on for the check and do update modtimes by default opt := defaultEqualOpt() opt.forceModTimeMatch = true if equal(ctx, src, dst, opt) { fs.Debugf(src, "Unchanged skipping") return false } default: // Do a size only compare unless --checksum is set opt := defaultEqualOpt() opt.sizeOnly = !fs.Config.CheckSum if equal(ctx, src, dst, opt) { fs.Debugf(src, "Destination mod time is within %v of source and files identical, skipping", modifyWindow) return false } fs.Debugf(src, "Destination mod time is within %v of source but files differ, transferring", modifyWindow) } } else { // Check to see if changed or not if Equal(ctx, src, dst) { fs.Debugf(src, "Unchanged skipping") return false } } return true } // RcatSize reads data from the Reader until EOF and uploads it to a file on remote. // Pass in size >=0 if known, <0 if not known func RcatSize(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (dst fs.Object, err error) { var obj fs.Object if size >= 0 { var err error // Size known use Put tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, size) defer func() { tr.Done(err) }() body := ioutil.NopCloser(in) // we let the server close the body in := tr.Account(ctx, body) // account the transfer (no buffering) if SkipDestructive(ctx, dstFileName, "upload from pipe") { // prevents "broken pipe" errors _, err = io.Copy(ioutil.Discard, in) return nil, err } info := object.NewStaticObjectInfo(dstFileName, modTime, size, true, nil, fdst) obj, err = fdst.Put(ctx, in, info) if err != nil { fs.Errorf(dstFileName, "Post request put error: %v", err) return nil, err } } else { // Size unknown use Rcat obj, err = Rcat(ctx, fdst, dstFileName, in, modTime) if err != nil { fs.Errorf(dstFileName, "Post request rcat error: %v", err) return nil, err } } return obj, nil } // copyURLFunc is called from CopyURLFn type copyURLFunc func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) // copyURLFn copies the data from the url to the function supplied func copyURLFn(ctx context.Context, dstFileName string, url string, dstFileNameFromURL bool, fn copyURLFunc) (err error) { client := fshttp.NewClient(fs.Config) resp, err := client.Get(url) if err != nil { return err } defer fs.CheckClose(resp.Body, &err) if resp.StatusCode < 200 || resp.StatusCode >= 300 { return errors.Errorf("CopyURL failed: %s", resp.Status) } modTime, err := http.ParseTime(resp.Header.Get("Last-Modified")) if err != nil { modTime = time.Now() } if dstFileNameFromURL { dstFileName = path.Base(resp.Request.URL.Path) if dstFileName == "." || dstFileName == "/" { return errors.Errorf("CopyURL failed: file name wasn't found in url") } } return fn(ctx, dstFileName, resp.Body, resp.ContentLength, modTime) } // CopyURL copies the data from the url to (fdst, dstFileName) func CopyURL(ctx context.Context, fdst fs.Fs, dstFileName string, url string, dstFileNameFromURL bool, noClobber bool) (dst fs.Object, err error) { err = copyURLFn(ctx, dstFileName, url, dstFileNameFromURL, func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) { if noClobber { _, err = fdst.NewObject(ctx, dstFileName) if err == nil { return errors.New("CopyURL failed: file already exist") } } dst, err = RcatSize(ctx, fdst, dstFileName, in, size, modTime) return err }) return dst, err } // CopyURLToWriter copies the data from the url to the io.Writer supplied func CopyURLToWriter(ctx context.Context, url string, out io.Writer) (err error) { return copyURLFn(ctx, "", url, false, func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) { _, err = io.Copy(out, in) return err }) } // BackupDir returns the correctly configured --backup-dir func BackupDir(fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err error) { if fs.Config.BackupDir != "" { backupDir, err = cache.Get(fs.Config.BackupDir) if err != nil { return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", fs.Config.BackupDir, err)) } if !SameConfig(fdst, backupDir) { return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination")) } if srcFileName == "" { if Overlapping(fdst, backupDir) { return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap")) } if Overlapping(fsrc, backupDir) { return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap")) } } else { if fs.Config.Suffix == "" { if SameDir(fdst, backupDir) { return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same")) } if SameDir(fsrc, backupDir) { return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't be the same")) } } } } else if fs.Config.Suffix != "" { // --backup-dir is not set but --suffix is - use the destination as the backupDir backupDir = fdst } else { return nil, fserrors.FatalError(errors.New("internal error: BackupDir called when --backup-dir and --suffix both empty")) } if !CanServerSideMove(backupDir) { return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy")) } return backupDir, nil } // MoveBackupDir moves a file to the backup dir func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err error) { remoteWithSuffix := SuffixName(dst.Remote()) overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix) _, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dst) return err } // moveOrCopyFile moves or copies a single file possibly to a new name func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) { dstFilePath := path.Join(fdst.Root(), dstFileName) srcFilePath := path.Join(fsrc.Root(), srcFileName) if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath { fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName) return nil } // Choose operations Op := Move if cp { Op = Copy } // Find src object srcObj, err := fsrc.NewObject(ctx, srcFileName) if err != nil { return err } // Find dst object if it exists var dstObj fs.Object if !fs.Config.NoCheckDest { dstObj, err = fdst.NewObject(ctx, dstFileName) if err == fs.ErrorObjectNotFound { dstObj = nil } else if err != nil { return err } } // Special case for changing case of a file on a case insensitive remote // This will move the file to a temporary name then // move it back to the intended destination. This is required // to avoid issues with certain remotes and avoid file deletion. if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) { // Create random name to temporarily move file to tmpObjName := dstFileName + "-rclone-move-" + random.String(8) _, err := fdst.NewObject(ctx, tmpObjName) if err != fs.ErrorObjectNotFound { if err == nil { return errors.New("found an already existing file with a randomly generated name. Try the operation again") } return errors.Wrap(err, "error while attempting to move file to a temporary location") } tr := accounting.Stats(ctx).NewTransfer(srcObj) defer func() { tr.Done(err) }() tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj) if err != nil { return errors.Wrap(err, "error while moving file to temporary location") } _, err = Op(ctx, fdst, nil, dstFileName, tmpObj) return err } var backupDir, copyDestDir fs.Fs if fs.Config.BackupDir != "" || fs.Config.Suffix != "" { backupDir, err = BackupDir(fdst, fsrc, srcFileName) if err != nil { return errors.Wrap(err, "creating Fs for --backup-dir failed") } } if fs.Config.CompareDest != "" { copyDestDir, err = GetCompareDest() if err != nil { return err } } else if fs.Config.CopyDest != "" { copyDestDir, err = GetCopyDest(fdst) if err != nil { return err } } NoNeedTransfer, err := CompareOrCopyDest(ctx, fdst, dstObj, srcObj, copyDestDir, backupDir) if err != nil { return err } if !NoNeedTransfer && NeedTransfer(ctx, dstObj, srcObj) { // If destination already exists, then we must move it into --backup-dir if required if dstObj != nil && backupDir != nil { err = MoveBackupDir(ctx, backupDir, dstObj) if err != nil { return errors.Wrap(err, "moving to --backup-dir failed") } // If successful zero out the dstObj as it is no longer there dstObj = nil } _, err = Op(ctx, fdst, dstObj, dstFileName, srcObj) } else { tr := accounting.Stats(ctx).NewCheckingTransfer(srcObj) if !cp { err = DeleteFile(ctx, srcObj) } tr.Done(err) } return err } // MoveFile moves a single file possibly to a new name func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false) } // CopyFile moves a single file possibly to a new name func CopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true) } // SetTier changes tier of object in remote func SetTier(ctx context.Context, fsrc fs.Fs, tier string) error { return ListFn(ctx, fsrc, func(o fs.Object) { objImpl, ok := o.(fs.SetTierer) if !ok { fs.Errorf(fsrc, "Remote object does not implement SetTier") return } err := objImpl.SetTier(tier) if err != nil { fs.Errorf(fsrc, "Failed to do SetTier, %v", err) } }) } // ListFormat defines files information print format type ListFormat struct { separator string dirSlash bool absolute bool output []func(entry *ListJSONItem) string csv *csv.Writer buf bytes.Buffer } // SetSeparator changes separator in struct func (l *ListFormat) SetSeparator(separator string) { l.separator = separator } // SetDirSlash defines if slash should be printed func (l *ListFormat) SetDirSlash(dirSlash bool) { l.dirSlash = dirSlash } // SetAbsolute prints a leading slash in front of path names func (l *ListFormat) SetAbsolute(absolute bool) { l.absolute = absolute } // SetCSV defines if the output should be csv // // Note that you should call SetSeparator before this if you want a // custom separator func (l *ListFormat) SetCSV(useCSV bool) { if useCSV { l.csv = csv.NewWriter(&l.buf) if l.separator != "" { l.csv.Comma = []rune(l.separator)[0] } } else { l.csv = nil } } // SetOutput sets functions used to create files information func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) { l.output = output } // AddModTime adds file's Mod Time to output func (l *ListFormat) AddModTime() { l.AppendOutput(func(entry *ListJSONItem) string { return entry.ModTime.When.Local().Format("2006-01-02 15:04:05") }) } // AddSize adds file's size to output func (l *ListFormat) AddSize() { l.AppendOutput(func(entry *ListJSONItem) string { return strconv.FormatInt(entry.Size, 10) }) } // normalisePath makes sure the path has the correct slashes for the current mode func (l *ListFormat) normalisePath(entry *ListJSONItem, remote string) string { if l.absolute && !strings.HasPrefix(remote, "/") { remote = "/" + remote } if entry.IsDir && l.dirSlash { remote += "/" } return remote } // AddPath adds path to file to output func (l *ListFormat) AddPath() { l.AppendOutput(func(entry *ListJSONItem) string { return l.normalisePath(entry, entry.Path) }) } // AddEncrypted adds the encrypted path to file to output func (l *ListFormat) AddEncrypted() { l.AppendOutput(func(entry *ListJSONItem) string { return l.normalisePath(entry, entry.Encrypted) }) } // AddHash adds the hash of the type given to the output func (l *ListFormat) AddHash(ht hash.Type) { hashName := ht.String() l.AppendOutput(func(entry *ListJSONItem) string { if entry.IsDir { return "" } return entry.Hashes[hashName] }) } // AddID adds file's ID to the output if known func (l *ListFormat) AddID() { l.AppendOutput(func(entry *ListJSONItem) string { return entry.ID }) } // AddOrigID adds file's Original ID to the output if known func (l *ListFormat) AddOrigID() { l.AppendOutput(func(entry *ListJSONItem) string { return entry.OrigID }) } // AddTier adds file's Tier to the output if known func (l *ListFormat) AddTier() { l.AppendOutput(func(entry *ListJSONItem) string { return entry.Tier }) } // AddMimeType adds file's MimeType to the output if known func (l *ListFormat) AddMimeType() { l.AppendOutput(func(entry *ListJSONItem) string { return entry.MimeType }) } // AppendOutput adds string generated by specific function to printed output func (l *ListFormat) AppendOutput(functionToAppend func(item *ListJSONItem) string) { l.output = append(l.output, functionToAppend) } // Format prints information about the DirEntry in the format defined func (l *ListFormat) Format(entry *ListJSONItem) (result string) { var out []string for _, fun := range l.output { out = append(out, fun(entry)) } if l.csv != nil { l.buf.Reset() _ = l.csv.Write(out) // can't fail writing to bytes.Buffer l.csv.Flush() result = strings.TrimRight(l.buf.String(), "\n") } else { result = strings.Join(out, l.separator) } return result } // DirMove renames srcRemote to dstRemote // // It does this by loading the directory tree into memory (using ListR // if available) and doing renames in parallel. func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) { // Use DirMove if possible if doDirMove := f.Features().DirMove; doDirMove != nil { err = doDirMove(ctx, f, srcRemote, dstRemote) if err == nil { accounting.Stats(ctx).Renames(1) } return err } // Load the directory tree into memory tree, err := walk.NewDirTree(ctx, f, srcRemote, true, -1) if err != nil { return errors.Wrap(err, "RenameDir tree walk") } // Get the directories in sorted order dirs := tree.Dirs() // Make the destination directories - must be done in order not in parallel for _, dir := range dirs { dstPath := dstRemote + dir[len(srcRemote):] err := f.Mkdir(ctx, dstPath) if err != nil { return errors.Wrap(err, "RenameDir mkdir") } } // Rename the files in parallel type rename struct { o fs.Object newPath string } renames := make(chan rename, fs.Config.Transfers) g, gCtx := errgroup.WithContext(context.Background()) for i := 0; i < fs.Config.Transfers; i++ { g.Go(func() error { for job := range renames { dstOverwritten, _ := f.NewObject(gCtx, job.newPath) _, err := Move(gCtx, f, dstOverwritten, job.newPath, job.o) if err != nil { return err } select { case <-gCtx.Done(): return gCtx.Err() default: } } return nil }) } for dir, entries := range tree { dstPath := dstRemote + dir[len(srcRemote):] for _, entry := range entries { if o, ok := entry.(fs.Object); ok { renames <- rename{o, path.Join(dstPath, path.Base(o.Remote()))} } } } close(renames) err = g.Wait() if err != nil { return errors.Wrap(err, "RenameDir renames") } // Remove the source directories in reverse order for i := len(dirs) - 1; i >= 0; i-- { err := f.Rmdir(ctx, dirs[i]) if err != nil { return errors.Wrap(err, "RenameDir rmdir") } } return nil } // FsInfo provides information about a remote type FsInfo struct { // Name of the remote (as passed into NewFs) Name string // Root of the remote (as passed into NewFs) Root string // String returns a description of the FS String string // Precision of the ModTimes in this Fs in Nanoseconds Precision time.Duration // Returns the supported hash types of the filesystem Hashes []string // Features returns the optional features of this Fs Features map[string]bool } // GetFsInfo gets the information (FsInfo) about a given Fs func GetFsInfo(f fs.Fs) *FsInfo { info := &FsInfo{ Name: f.Name(), Root: f.Root(), String: f.String(), Precision: f.Precision(), Hashes: make([]string, 0, 4), Features: f.Features().Enabled(), } for _, hashType := range f.Hashes().Array() { info.Hashes = append(info.Hashes, hashType.String()) } return info } var ( interactiveMu sync.Mutex skipped = map[string]bool{} ) // skipDestructiveChoose asks the user which action to take // // Call with interactiveMu held func skipDestructiveChoose(ctx context.Context, subject interface{}, action string) (skip bool) { fmt.Printf("rclone: %s \"%v\"?\n", action, subject) switch i := config.CommandDefault([]string{ "yYes, this is OK", "nNo, skip this", fmt.Sprintf("sSkip all %s operations with no more questions", action), fmt.Sprintf("!Do all %s operations with no more questions", action), "qExit rclone now.", }, 0); i { case 'y': skip = false case 'n': skip = true case 's': skip = true skipped[action] = true fs.Logf(nil, "Skipping all %s operations from now on without asking", action) case '!': skip = false skipped[action] = false fs.Logf(nil, "Doing all %s operations from now on without asking", action) case 'q': fs.Logf(nil, "Quitting rclone now") atexit.Run() os.Exit(0) default: skip = true fs.Errorf(nil, "Bad choice %c", i) } return skip } // SkipDestructive should be called whenever rclone is about to do an destructive operation. // // It will check the --dry-run flag and it will ask the user if the --interactive flag is set. // // subject should be the object or directory in use // // action should be a descriptive word or short phrase // // Together they should make sense in this sentence: "Rclone is about // to action subject". func SkipDestructive(ctx context.Context, subject interface{}, action string) (skip bool) { var flag string switch { case fs.Config.DryRun: flag = "--dry-run" skip = true case fs.Config.Interactive: flag = "--interactive" interactiveMu.Lock() defer interactiveMu.Unlock() var found bool skip, found = skipped[action] if !found { skip = skipDestructiveChoose(ctx, subject, action) } default: return false } if skip { fs.Logf(subject, "Skipped %s as %s is set", action, flag) } return skip }
package anteater import ( "net/http" "fmt" "time" "sync" "log" ) const ( version = "0.03" serverSign = "AE " + version ) /** * Path to index file **/ var IndexPath string = "file.index" /** * Path to data files **/ var DataPath string = "file.data" /** * Config object */ var Conf *Config /** * For Container.Id creation */ var ContainerLastId int32 /** * Map with container objects */ var FileContainers map[int32]*Container = make(map[int32]*Container) /** * Mutex for allocate new files */ var GetFileLock *sync.Mutex = &sync.Mutex{} /** * File info index */ var Index map[string]*FileInfo /** * Lock for Index */ var IndexLock *sync.Mutex = &sync.Mutex{} /** * Logger object */ var Log *AntLog /** * Server start time */ var StartTime time.Time = time.Now() /** * Time of last dump */ var LastDump time.Time = time.Now() /** * Making dump time */ var LastDumpTime time.Duration /** * Size of index file */ var IndexFileSize int64 /** * Metrics */ var HttpCn *StateHttpCounters = &StateHttpCounters{} var AllocCn *StateAllocateCounters = &StateAllocateCounters{} func MainInit(config string) { // Init config var err error Conf, err = LoadConfig(config) if err != nil { log.Fatal(err) } // Init logger Log, err = LogInit() if err != nil { log.Fatal(err) } // Set paths IndexPath = Conf.DataPath + "/" + IndexPath DataPath = Conf.DataPath + "/" + DataPath // Load data from index err = LoadData(IndexPath) if err != nil { // or create new Log.Debugln("Error while reading index file:", err) Log.Debugln("Try create conainer") _, err := NewContainer(DataPath) if err != nil { Log.Warnln("Can't create new container") Log.Fatal(err) } Cleanup() } go func() { ch := time.Tick(60 * time.Second) for _ = range ch { func () { Cleanup() }() } }() Log.Infoln("Start server with config", config) } func Start() { if Conf.HttpReadAddr != Conf.HttpWriteAddr { go RunServer(http.HandlerFunc(HttpRead), Conf.HttpReadAddr) } RunServer(http.HandlerFunc(HttpReadWrite), Conf.HttpWriteAddr) } func Stop() { Log.Infoln("Server stopping..") fmt.Println("Server stopping now") Cleanup() for _, c := range(FileContainers) { c.F.Close() } fmt.Println("Bye") } func Cleanup() { var maxSpace int64 var hasChanges bool for _, c := range(FileContainers) { if c.HasChanges() { hasChanges = true } c.Clean() if c.MaxSpace() > maxSpace { maxSpace = c.MaxSpace() } } if maxSpace <= Conf.MinEmptySpace { _, err := NewContainer(DataPath) if err != nil { Log.Warnln(err) } } if hasChanges { err := DumpData(IndexPath) if err != nil { Log.Infoln("Dump error:", err) } } } Ver 0.03.1 package anteater import ( "net/http" "fmt" "time" "sync" "log" ) const ( version = "0.03.1" serverSign = "AE " + version ) /** * Path to index file **/ var IndexPath string = "file.index" /** * Path to data files **/ var DataPath string = "file.data" /** * Config object */ var Conf *Config /** * For Container.Id creation */ var ContainerLastId int32 /** * Map with container objects */ var FileContainers map[int32]*Container = make(map[int32]*Container) /** * Mutex for allocate new files */ var GetFileLock *sync.Mutex = &sync.Mutex{} /** * File info index */ var Index map[string]*FileInfo /** * Lock for Index */ var IndexLock *sync.Mutex = &sync.Mutex{} /** * Logger object */ var Log *AntLog /** * Server start time */ var StartTime time.Time = time.Now() /** * Time of last dump */ var LastDump time.Time = time.Now() /** * Making dump time */ var LastDumpTime time.Duration /** * Size of index file */ var IndexFileSize int64 /** * Metrics */ var HttpCn *StateHttpCounters = &StateHttpCounters{} var AllocCn *StateAllocateCounters = &StateAllocateCounters{} func MainInit(config string) { // Init config var err error Conf, err = LoadConfig(config) if err != nil { log.Fatal(err) } // Init logger Log, err = LogInit() if err != nil { log.Fatal(err) } // Set paths IndexPath = Conf.DataPath + "/" + IndexPath DataPath = Conf.DataPath + "/" + DataPath // Load data from index err = LoadData(IndexPath) if err != nil { // or create new Log.Debugln("Error while reading index file:", err) Log.Debugln("Try create conainer") _, err := NewContainer(DataPath) if err != nil { Log.Warnln("Can't create new container") Log.Fatal(err) } Cleanup() } go func() { ch := time.Tick(60 * time.Second) for _ = range ch { func () { Cleanup() }() } }() Log.Infoln("Start server with config", config) } func Start() { if Conf.HttpReadAddr != Conf.HttpWriteAddr { go RunServer(http.HandlerFunc(HttpRead), Conf.HttpReadAddr) } RunServer(http.HandlerFunc(HttpReadWrite), Conf.HttpWriteAddr) } func Stop() { Log.Infoln("Server stopping..") fmt.Println("Server stopping now") Cleanup() for _, c := range(FileContainers) { c.F.Close() } fmt.Println("Bye") } func Cleanup() { var maxSpace int64 var hasChanges bool for _, c := range(FileContainers) { if c.HasChanges() { hasChanges = true } c.Clean() if c.MaxSpace() > maxSpace { maxSpace = c.MaxSpace() } } if maxSpace <= Conf.MinEmptySpace { _, err := NewContainer(DataPath) if err != nil { Log.Warnln(err) } } if hasChanges { err := DumpData(IndexPath) if err != nil { Log.Infoln("Dump error:", err) } } }
package state import ( "errors" "fmt" "regexp" "sort" "strings" "sync" "time" eventbus "github.com/Dataman-Cloud/swan/src/event" "github.com/Dataman-Cloud/swan/src/manager/framework/connector" "github.com/Dataman-Cloud/swan/src/manager/framework/event" "github.com/Dataman-Cloud/swan/src/manager/framework/store" "github.com/Dataman-Cloud/swan/src/types" "github.com/Dataman-Cloud/swan/src/utils" "github.com/Sirupsen/logrus" "golang.org/x/net/context" ) type AppMode string var ( APP_MODE_FIXED AppMode = "fixed" APP_MODE_REPLICATES AppMode = "replicates" ) var persistentStore store.Store func SetStore(newStore store.Store) { persistentStore = newStore } type App struct { ID string `json:"id"` Name string `json:"name"` Versions []*types.Version `json:"versions"` slotsLock sync.Mutex Slots map[int]*Slot `json:"slots"` // app run with CurrentVersion config CurrentVersion *types.Version `json:"current_version"` // use when app updated, ProposedVersion can either be commit or revert ProposedVersion *types.Version `json:"proposed_version"` Mode AppMode `json:"mode"` // fixed or repliactes Created time.Time Updated time.Time StateMachine *StateMachine ClusterID string UserEventChan chan *event.UserEvent } type AppsByUpdated []*App func (a AppsByUpdated) Len() int { return len(a) } func (a AppsByUpdated) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a AppsByUpdated) Less(i, j int) bool { return a[i].Updated.After(a[j].Updated) } // NOTE(xychu): Desc order func NewApp(version *types.Version, userEventChan chan *event.UserEvent) (*App, error) { appID := fmt.Sprintf("%s-%s-%s", version.AppName, version.RunAs, connector.Instance().ClusterID) existingApp, _ := persistentStore.GetApp(appID) if existingApp != nil { return nil, errors.New("app already exists") } err := validateAndFormatVersion(version) if err != nil { return nil, err } app := &App{ Versions: []*types.Version{}, Slots: make(map[int]*Slot), CurrentVersion: version, ID: appID, Name: version.AppName, ClusterID: connector.Instance().ClusterID, Created: time.Now(), Updated: time.Now(), UserEventChan: userEventChan, } if version.Mode == "fixed" { app.Mode = APP_MODE_FIXED } else { // if no mode specified, default should be replicates app.Mode = APP_MODE_REPLICATES } version.ID = fmt.Sprintf("%d", time.Now().Unix()) if version.AppVersion == "" { version.AppVersion = version.ID } app.StateMachine = NewStateMachine() app.StateMachine.Start(NewStateCreating(app)) app.create() app.SaveVersion(app.CurrentVersion) return app, nil } // also need user pass ip here func (app *App) ScaleUp(newInstances int, newIps []string) error { if !app.StateMachine.CanTransitTo(APP_STATE_SCALE_UP) { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_SCALE_UP)) } if newInstances <= 0 { return errors.New("specify instances num want to increase") } if app.IsFixed() && len(newIps) != newInstances { return fmt.Errorf("please provide %d unique ip", newInstances) } app.CurrentVersion.IP = append(app.CurrentVersion.IP, newIps...) app.CurrentVersion.Instances = int32(len(app.Slots) + newInstances) app.Updated = time.Now() app.Touch() return app.TransitTo(APP_STATE_SCALE_UP) } func (app *App) ScaleDown(removeInstances int) error { if !app.StateMachine.CanTransitTo(APP_STATE_SCALE_DOWN) { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_SCALE_DOWN)) } if removeInstances <= 0 { return errors.New("please specify at least 1 task to scale-down") } if removeInstances > len(app.Slots) { return fmt.Errorf("no more than %d tasks can be shutdown", app.CurrentVersion.Instances) } app.CurrentVersion.Instances = int32(len(app.Slots) - removeInstances) app.Updated = time.Now() app.Touch() return app.TransitTo(APP_STATE_SCALE_DOWN) } // delete a application and all related objects: versions, tasks, slots, proxies, dns record func (app *App) Delete() error { if !app.StateMachine.CanTransitTo(APP_STATE_DELETING) { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_DELETING)) } return app.TransitTo(APP_STATE_DELETING) } func (app *App) Update(version *types.Version, store store.Store) error { if !app.StateMachine.CanTransitTo(APP_STATE_UPDATING) || app.ProposedVersion != nil { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_UPDATING)) } if err := validateAndFormatVersion(version); err != nil { return err } version.ID = fmt.Sprintf("%d", time.Now().Unix()) if version.AppVersion == "" { version.AppVersion = version.ID } if err := app.checkProposedVersionValid(version); err != nil { return err } if app.CurrentVersion == nil { return errors.New("update failed: current version was losted") } app.ProposedVersion = version app.SaveVersion(app.ProposedVersion) app.Touch() return app.TransitTo(APP_STATE_UPDATING, 1) } func (app *App) ProceedingRollingUpdate(instances int) error { if !app.StateMachine.CanTransitTo(APP_STATE_UPDATING) || app.ProposedVersion == nil { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_UPDATING)) } updatedCount := 0 for index, slot := range app.GetSlots() { if slot.Version.ID == app.ProposedVersion.ID { updatedCount = index + 1 } } // when instances is -1, indicates all remaining if instances == -1 { instances = len(app.Slots) - updatedCount } if instances < 1 { return errors.New("please specify how many instance want proceeding the update") } if updatedCount+instances > len(app.GetSlots()) { return errors.New(fmt.Sprintf("only %d tasks left need to be updated now", len(app.GetSlots())-updatedCount)) } return app.TransitTo(APP_STATE_UPDATING, instances) } func (app *App) CancelUpdate() error { if !app.StateMachine.CanTransitTo(APP_STATE_CANCEL_UPDATE) || app.ProposedVersion == nil { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_CANCEL_UPDATE)) } if app.CurrentVersion == nil { return errors.New("cancel update failed: current version was nil") } return app.TransitTo(APP_STATE_CANCEL_UPDATE) } func (app *App) ServiceDiscoveryURL() string { return strings.ToLower(strings.Replace(app.ID, "-", ".", -1)) } func (app *App) IsReplicates() bool { return app.Mode == APP_MODE_REPLICATES } func (app *App) IsFixed() bool { return app.Mode == APP_MODE_FIXED } func (app *App) EmitAppEvent(stateString string) { eventType := "" switch stateString { case APP_STATE_CREATING: eventType = eventbus.EventTypeAppStateCreating case APP_STATE_DELETING: eventType = eventbus.EventTypeAppStateDeletion case APP_STATE_NORMAL: eventType = eventbus.EventTypeAppStateNormal case APP_STATE_UPDATING: eventType = eventbus.EventTypeAppStateUpdating case APP_STATE_CANCEL_UPDATE: eventType = eventbus.EventTypeAppStateCancelUpdate case APP_STATE_SCALE_UP: eventType = eventbus.EventTypeAppStateScaleUp case APP_STATE_SCALE_DOWN: eventType = eventbus.EventTypeAppStateScaleDown default: } e := &eventbus.Event{Type: eventType} e.AppID = app.ID e.Payload = &types.AppInfoEvent{ AppID: app.ID, Name: app.Name, ClusterID: app.ClusterID, RunAs: app.CurrentVersion.RunAs, } eventbus.WriteEvent(e) } func (app *App) StateIs(state string) bool { return app.StateMachine.Is(state) } func (app *App) TransitTo(targetState string, args ...interface{}) error { err := app.StateMachine.TransitTo(app.stateFactory(targetState, args...)) if err != nil { return err } app.Touch() return nil } func (app *App) RemoveSlot(index int) { if slot, found := app.GetSlot(index); found { OfferAllocatorInstance().RemoveSlotFromAllocator(slot) slot.Remove() app.slotsLock.Lock() delete(app.Slots, index) app.slotsLock.Unlock() app.Touch() } } func (app *App) stateFactory(stateName string, args ...interface{}) State { switch stateName { case APP_STATE_NORMAL: return NewStateNormal(app) case APP_STATE_CREATING: return NewStateCreating(app) case APP_STATE_DELETING: return NewStateDeleting(app) case APP_STATE_SCALE_UP: return NewStateScaleUp(app) case APP_STATE_SCALE_DOWN: return NewStateScaleDown(app) case APP_STATE_UPDATING: slotCountNeedUpdate, ok := args[0].(int) if !ok { slotCountNeedUpdate = 1 } return NewStateUpdating(app, slotCountNeedUpdate) case APP_STATE_CANCEL_UPDATE: return NewStateCancelUpdate(app) default: panic(errors.New("unrecognized state")) } } func (app *App) GetSlots() []*Slot { slots := make([]*Slot, 0) for _, v := range app.Slots { slots = append(slots, v) } slotsById := SlotsById(slots) sort.Sort(slotsById) return slotsById } func (app *App) GetSlot(index int) (*Slot, bool) { slot, ok := app.Slots[index] return slot, ok } func (app *App) SetSlot(index int, slot *Slot) { app.slotsLock.Lock() app.Slots[index] = slot app.slotsLock.Unlock() app.Touch() } func (app *App) Step() { app.StateMachine.Step() app.Touch() } // make sure proposed version is valid then applied it to field ProposedVersion func (app *App) checkProposedVersionValid(version *types.Version) error { // mode can not change if version.Mode != app.CurrentVersion.Mode { return fmt.Errorf("mode can not change when update app, current version is %s", app.CurrentVersion.Mode) } // runAs can not change if version.RunAs != app.CurrentVersion.RunAs { return fmt.Errorf("runAs can not change when update app, current version is %s", app.CurrentVersion.RunAs) } // appVersion should not equal if version.AppVersion == app.CurrentVersion.AppVersion { return fmt.Errorf("app version %s exists, choose another one", version.AppVersion) } // app instances should same as current instances if version.Instances != app.CurrentVersion.Instances { return fmt.Errorf("instances can not change when update app, current version is %d", app.CurrentVersion.Instances) } // fixed app IP length should be same as current instances if app.IsFixed() && int32(len(version.IP)) != app.CurrentVersion.Instances { return fmt.Errorf("Fixed mode App IP length can not change when update app, current version is %d", app.CurrentVersion.Instances) } return nil } func validateAndFormatVersion(version *types.Version) error { if version.Container == nil { return errors.New("swan only support mesos docker containerization, no container found") } if version.Container.Docker == nil { return errors.New("swan only support mesos docker containerization, no container found") } if version.AppName == "" { return errors.New("invalid appName: appName was empty") } if version.Instances == 0 { return errors.New("invalid instances: instances must be specified and should greater than 0") } version.AppName = strings.TrimSpace(version.AppName) r, _ := regexp.Compile("([A-Z]+)|([\\-\\.\\$\\*\\+\\?\\{\\}\\(\\)\\[\\]\\|]+)") errMsg := errors.New(`must be lower case characters and should not contain following special characters "-.$*?{}()[]|"`) //validation of AppId match := r.MatchString(version.AppName) if match { return fmt.Errorf("invalid app id [%s]: %s", version.AppName, errMsg) } //validation of RunAs match = r.MatchString(version.RunAs) if match { return fmt.Errorf("invalid runAs [%s]: %s", version.RunAs, errMsg) } match = r.MatchString(version.Container.Docker.Network) if match { return fmt.Errorf("invalid network [%s]: %s", version.Container.Docker.Network, errMsg) } if len(version.RunAs) == 0 { return errors.New("runAs should not empty") } if len(version.Mode) == 0 { version.Mode = string(APP_MODE_REPLICATES) } if (version.Mode != string(APP_MODE_REPLICATES)) && (version.Mode != string(APP_MODE_FIXED)) { return fmt.Errorf("enrecognized app mode %s", version.Mode) } // validation for fixed mode application if version.Mode == string(APP_MODE_FIXED) { if len(version.IP) != int(version.Instances) { return fmt.Errorf("should provide exactly %d ip for FIXED type app", version.Instances) } if len(version.Container.Docker.PortMappings) > 0 { return errors.New("fixed mode application doesn't support portmapping") } } // validation for replicates mode app if version.Mode == string(APP_MODE_REPLICATES) { // the only network driver should be **bridge** if !utils.SliceContains([]string{"bridge", "host"}, strings.ToLower(version.Container.Docker.Network)) { return errors.New("replicates mode app suppose the only network driver should be bridge or host") } // portMapping.Name should be mandatory for _, portmapping := range version.Container.Docker.PortMappings { if strings.TrimSpace(portmapping.Name) == "" { return errors.New("each port mapping should have a uniquely identified name") } } if strings.ToLower(version.Container.Docker.Network) == "host" { // portMapping.Name should be mandatory for _, portmapping := range version.Container.Docker.PortMappings { if portmapping.ContainerPort != 0 { return errors.New("containerPort not recongnizable for docker host network, port is mandatory") } } } portNames := make([]string, 0) for _, portmapping := range version.Container.Docker.PortMappings { portNames = append(portNames, portmapping.Name) } // portName should be unique if !utils.SliceUnique(portNames) { return errors.New("each port mapping should have a uniquely identified name") } // portName for health check should mandatory if version.HealthCheck != nil { protocol, portName := version.HealthCheck.Protocol, version.HealthCheck.PortName // portName should present in dockers' portMappings definition // portName if manditory for non-cmd health checks if strings.ToLower(protocol) != "cmd" && !utils.SliceContains(portNames, portName) { return fmt.Errorf("portname in healthCheck section should match that defined in portMappings") } if !utils.SliceContains([]string{"tcp", "http", "TCP", "HTTP", "cmd", "CMD"}, protocol) { return fmt.Errorf("doesn't recoginized protocol %s for health check", protocol) } if strings.ToLower(protocol) == "http" { if len(version.HealthCheck.Path) == 0 { return fmt.Errorf("no path provided for health check with %s protocol", protocol) } } if strings.ToLower(protocol) == "cmd" { if len(version.HealthCheck.Value) == 0 { return fmt.Errorf("no value provided for health check with %s protocol", protocol) } } } } else { if version.HealthCheck != nil { protocol := version.HealthCheck.Protocol if !utils.SliceContains([]string{"cmd", "CMD"}, protocol) { return fmt.Errorf("doesn't recoginized protocol %s for health check for fixed type app", protocol) } if len(version.HealthCheck.Value) == 0 { return fmt.Errorf("no value provided for health check with %s", protocol) } } } // validate constraints are all valid if len(version.Constraints) > 0 { evalStatement, err := ParseConstraint(strings.ToLower(version.Constraints)) if err != nil { return err } err = evalStatement.Valid() if err != nil { return err } } return nil } func (app *App) SaveVersion(version *types.Version) { app.Versions = append(app.Versions, version) WithConvertVersion(context.TODO(), app.ID, version, nil, persistentStore.CreateVersion) } // 1, remove app from persisted storage // 2, other cleanup process func (app *App) Remove() { app.remove() } // storage related func (app *App) Touch() { app.update() } func (app *App) update() { logrus.Debugf("update app %s", app.ID) WithConvertApp(context.TODO(), app, nil, persistentStore.UpdateApp) } func (app *App) create() { logrus.Debugf("create app %s", app.ID) WithConvertApp(context.TODO(), app, nil, persistentStore.CreateApp) } func (app *App) remove() { logrus.Debugf("remove app %s", app.ID) persistentStore.DeleteApp(context.TODO(), app.ID, nil) } more validations on app payload package state import ( "errors" "fmt" "net" "regexp" "sort" "strings" "sync" "time" eventbus "github.com/Dataman-Cloud/swan/src/event" "github.com/Dataman-Cloud/swan/src/manager/framework/connector" "github.com/Dataman-Cloud/swan/src/manager/framework/event" "github.com/Dataman-Cloud/swan/src/manager/framework/store" "github.com/Dataman-Cloud/swan/src/types" "github.com/Dataman-Cloud/swan/src/utils" "github.com/Sirupsen/logrus" "golang.org/x/net/context" ) type AppMode string var ( APP_MODE_FIXED AppMode = "fixed" APP_MODE_REPLICATES AppMode = "replicates" ) var persistentStore store.Store func SetStore(newStore store.Store) { persistentStore = newStore } type App struct { ID string `json:"id"` Name string `json:"name"` Versions []*types.Version `json:"versions"` slotsLock sync.Mutex Slots map[int]*Slot `json:"slots"` // app run with CurrentVersion config CurrentVersion *types.Version `json:"current_version"` // use when app updated, ProposedVersion can either be commit or revert ProposedVersion *types.Version `json:"proposed_version"` Mode AppMode `json:"mode"` // fixed or repliactes Created time.Time Updated time.Time StateMachine *StateMachine ClusterID string UserEventChan chan *event.UserEvent } type AppsByUpdated []*App func (a AppsByUpdated) Len() int { return len(a) } func (a AppsByUpdated) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a AppsByUpdated) Less(i, j int) bool { return a[i].Updated.After(a[j].Updated) } // NOTE(xychu): Desc order func NewApp(version *types.Version, userEventChan chan *event.UserEvent) (*App, error) { appID := fmt.Sprintf("%s-%s-%s", version.AppName, version.RunAs, connector.Instance().ClusterID) existingApp, _ := persistentStore.GetApp(appID) if existingApp != nil { return nil, errors.New("app already exists") } err := validateAndFormatVersion(version) if err != nil { return nil, err } app := &App{ Versions: []*types.Version{}, Slots: make(map[int]*Slot), CurrentVersion: version, ID: appID, Name: version.AppName, ClusterID: connector.Instance().ClusterID, Created: time.Now(), Updated: time.Now(), UserEventChan: userEventChan, } if version.Mode == "fixed" { app.Mode = APP_MODE_FIXED } else { // if no mode specified, default should be replicates app.Mode = APP_MODE_REPLICATES } version.ID = fmt.Sprintf("%d", time.Now().Unix()) if version.AppVersion == "" { version.AppVersion = version.ID } app.StateMachine = NewStateMachine() app.StateMachine.Start(NewStateCreating(app)) app.create() app.SaveVersion(app.CurrentVersion) return app, nil } // also need user pass ip here func (app *App) ScaleUp(newInstances int, newIps []string) error { if !app.StateMachine.CanTransitTo(APP_STATE_SCALE_UP) { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_SCALE_UP)) } if newInstances <= 0 { return errors.New("specify instances num want to increase") } if app.IsFixed() && len(newIps) != newInstances { return fmt.Errorf("please provide %d unique ip", newInstances) } app.CurrentVersion.IP = append(app.CurrentVersion.IP, newIps...) app.CurrentVersion.Instances = int32(len(app.Slots) + newInstances) app.Updated = time.Now() app.Touch() return app.TransitTo(APP_STATE_SCALE_UP) } func (app *App) ScaleDown(removeInstances int) error { if !app.StateMachine.CanTransitTo(APP_STATE_SCALE_DOWN) { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_SCALE_DOWN)) } if removeInstances <= 0 { return errors.New("please specify at least 1 task to scale-down") } if removeInstances > len(app.Slots) { return fmt.Errorf("no more than %d tasks can be shutdown", app.CurrentVersion.Instances) } app.CurrentVersion.Instances = int32(len(app.Slots) - removeInstances) app.Updated = time.Now() app.Touch() return app.TransitTo(APP_STATE_SCALE_DOWN) } // delete a application and all related objects: versions, tasks, slots, proxies, dns record func (app *App) Delete() error { if !app.StateMachine.CanTransitTo(APP_STATE_DELETING) { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_DELETING)) } return app.TransitTo(APP_STATE_DELETING) } func (app *App) Update(version *types.Version, store store.Store) error { if !app.StateMachine.CanTransitTo(APP_STATE_UPDATING) || app.ProposedVersion != nil { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_UPDATING)) } if err := validateAndFormatVersion(version); err != nil { return err } version.ID = fmt.Sprintf("%d", time.Now().Unix()) if version.AppVersion == "" { version.AppVersion = version.ID } if err := app.checkProposedVersionValid(version); err != nil { return err } if app.CurrentVersion == nil { return errors.New("update failed: current version was losted") } app.ProposedVersion = version app.SaveVersion(app.ProposedVersion) app.Touch() return app.TransitTo(APP_STATE_UPDATING, 1) } func (app *App) ProceedingRollingUpdate(instances int) error { if !app.StateMachine.CanTransitTo(APP_STATE_UPDATING) || app.ProposedVersion == nil { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_UPDATING)) } updatedCount := 0 for index, slot := range app.GetSlots() { if slot.Version.ID == app.ProposedVersion.ID { updatedCount = index + 1 } } // when instances is -1, indicates all remaining if instances == -1 { instances = len(app.Slots) - updatedCount } if instances < 1 { return errors.New("please specify how many instance want proceeding the update") } if updatedCount+instances > len(app.GetSlots()) { return errors.New(fmt.Sprintf("only %d tasks left need to be updated now", len(app.GetSlots())-updatedCount)) } return app.TransitTo(APP_STATE_UPDATING, instances) } func (app *App) CancelUpdate() error { if !app.StateMachine.CanTransitTo(APP_STATE_CANCEL_UPDATE) || app.ProposedVersion == nil { return errors.New(fmt.Sprintf("state machine can not transit from state: %s to state: %s", app.StateMachine.ReadableState(), APP_STATE_CANCEL_UPDATE)) } if app.CurrentVersion == nil { return errors.New("cancel update failed: current version was nil") } return app.TransitTo(APP_STATE_CANCEL_UPDATE) } func (app *App) ServiceDiscoveryURL() string { return strings.ToLower(strings.Replace(app.ID, "-", ".", -1)) } func (app *App) IsReplicates() bool { return app.Mode == APP_MODE_REPLICATES } func (app *App) IsFixed() bool { return app.Mode == APP_MODE_FIXED } func (app *App) EmitAppEvent(stateString string) { eventType := "" switch stateString { case APP_STATE_CREATING: eventType = eventbus.EventTypeAppStateCreating case APP_STATE_DELETING: eventType = eventbus.EventTypeAppStateDeletion case APP_STATE_NORMAL: eventType = eventbus.EventTypeAppStateNormal case APP_STATE_UPDATING: eventType = eventbus.EventTypeAppStateUpdating case APP_STATE_CANCEL_UPDATE: eventType = eventbus.EventTypeAppStateCancelUpdate case APP_STATE_SCALE_UP: eventType = eventbus.EventTypeAppStateScaleUp case APP_STATE_SCALE_DOWN: eventType = eventbus.EventTypeAppStateScaleDown default: } e := &eventbus.Event{Type: eventType} e.AppID = app.ID e.Payload = &types.AppInfoEvent{ AppID: app.ID, Name: app.Name, ClusterID: app.ClusterID, RunAs: app.CurrentVersion.RunAs, } eventbus.WriteEvent(e) } func (app *App) StateIs(state string) bool { return app.StateMachine.Is(state) } func (app *App) TransitTo(targetState string, args ...interface{}) error { err := app.StateMachine.TransitTo(app.stateFactory(targetState, args...)) if err != nil { return err } app.Touch() return nil } func (app *App) RemoveSlot(index int) { if slot, found := app.GetSlot(index); found { OfferAllocatorInstance().RemoveSlotFromAllocator(slot) slot.Remove() app.slotsLock.Lock() delete(app.Slots, index) app.slotsLock.Unlock() app.Touch() } } func (app *App) stateFactory(stateName string, args ...interface{}) State { switch stateName { case APP_STATE_NORMAL: return NewStateNormal(app) case APP_STATE_CREATING: return NewStateCreating(app) case APP_STATE_DELETING: return NewStateDeleting(app) case APP_STATE_SCALE_UP: return NewStateScaleUp(app) case APP_STATE_SCALE_DOWN: return NewStateScaleDown(app) case APP_STATE_UPDATING: slotCountNeedUpdate, ok := args[0].(int) if !ok { slotCountNeedUpdate = 1 } return NewStateUpdating(app, slotCountNeedUpdate) case APP_STATE_CANCEL_UPDATE: return NewStateCancelUpdate(app) default: panic(errors.New("unrecognized state")) } } func (app *App) GetSlots() []*Slot { slots := make([]*Slot, 0) for _, v := range app.Slots { slots = append(slots, v) } slotsById := SlotsById(slots) sort.Sort(slotsById) return slotsById } func (app *App) GetSlot(index int) (*Slot, bool) { slot, ok := app.Slots[index] return slot, ok } func (app *App) SetSlot(index int, slot *Slot) { app.slotsLock.Lock() app.Slots[index] = slot app.slotsLock.Unlock() app.Touch() } func (app *App) Step() { app.StateMachine.Step() app.Touch() } // make sure proposed version is valid then applied it to field ProposedVersion func (app *App) checkProposedVersionValid(version *types.Version) error { // mode can not change if version.Mode != app.CurrentVersion.Mode { return fmt.Errorf("mode can not change when update app, current version is %s", app.CurrentVersion.Mode) } // runAs can not change if version.RunAs != app.CurrentVersion.RunAs { return fmt.Errorf("runAs can not change when update app, current version is %s", app.CurrentVersion.RunAs) } // appVersion should not equal if version.AppVersion == app.CurrentVersion.AppVersion { return fmt.Errorf("app version %s exists, choose another one", version.AppVersion) } // app instances should same as current instances if version.Instances != app.CurrentVersion.Instances { return fmt.Errorf("instances can not change when update app, current version is %d", app.CurrentVersion.Instances) } // fixed app IP length should be same as current instances if app.IsFixed() && int32(len(version.IP)) != app.CurrentVersion.Instances { return fmt.Errorf("Fixed mode App IP length can not change when update app, current version is %d", app.CurrentVersion.Instances) } return nil } func validateAndFormatVersion(version *types.Version) error { if version.Container == nil { return errors.New("swan only support mesos docker containerization, no container found") } if version.Container.Docker == nil { return errors.New("swan only support mesos docker containerization, no container found") } if version.Container.Docker.Image == "" { return errors.New("image field required") } if n := len(version.AppName); n == 0 || n > 48 { return errors.New("invalid appName: appName empty or too long") } if version.Instances <= 0 { return errors.New("invalid instances: instances must be specified and should greater than 0") } if version.CPUs < 0.01 { return errors.New("cpu should >= 0.01") } if version.Mem < 5 { return errors.New("mem should >= 5m") } version.AppName = strings.TrimSpace(version.AppName) r := regexp.MustCompile("([A-Z]+)|([\\-\\.\\$\\*\\+\\?\\{\\}\\(\\)\\[\\]\\|]+)") errMsg := errors.New(`must be lower case characters and should not contain following special characters "-.$*?{}()[]|"`) //validation of AppId match := r.MatchString(version.AppName) if match { return fmt.Errorf("invalid app id [%s]: %s", version.AppName, errMsg) } //validation of RunAs match = r.MatchString(version.RunAs) if match { return fmt.Errorf("invalid runAs [%s]: %s", version.RunAs, errMsg) } match = r.MatchString(version.Container.Docker.Network) if match { return fmt.Errorf("invalid network [%s]: %s", version.Container.Docker.Network, errMsg) } if len(version.RunAs) == 0 { return errors.New("runAs should not empty") } if len(version.Mode) == 0 { version.Mode = string(APP_MODE_REPLICATES) } switch version.Mode { case string(APP_MODE_FIXED): // validation for fixed mode application if len(version.IP) != int(version.Instances) { return fmt.Errorf("should provide exactly %d ip for FIXED type app", version.Instances) } if len(version.Container.Docker.PortMappings) > 0 { return errors.New("fixed mode application doesn't support portmapping") } for _, ip := range version.IP { if addr := net.ParseIP(ip); addr == nil || addr.IsLoopback() { return errors.New("invalid fix ip: " + ip) } } if version.HealthCheck != nil { protocol := version.HealthCheck.Protocol if !utils.SliceContains([]string{"cmd", "CMD"}, protocol) { return fmt.Errorf("doesn't recoginized protocol %s for health check for fixed type app", protocol) } if len(version.HealthCheck.Value) == 0 { return fmt.Errorf("no value provided for health check with %s", protocol) } } case string(APP_MODE_REPLICATES): // validation for replicates mode app // the only network driver should be **bridge** if !utils.SliceContains([]string{"bridge", "host"}, strings.ToLower(version.Container.Docker.Network)) { return errors.New("replicates mode app suppose the only network driver should be bridge or host") } // portMapping.Name should be mandatory for _, portmapping := range version.Container.Docker.PortMappings { if strings.TrimSpace(portmapping.Name) == "" { return errors.New("each port mapping should have a uniquely identified name") } } if strings.ToLower(version.Container.Docker.Network) == "host" { // portMapping.Name should be mandatory for _, portmapping := range version.Container.Docker.PortMappings { if portmapping.ContainerPort != 0 { return errors.New("containerPort not recongnizable for docker host network, port is mandatory") } } } portNames := make([]string, 0) for _, portmapping := range version.Container.Docker.PortMappings { portNames = append(portNames, portmapping.Name) } // portName should be unique if !utils.SliceUnique(portNames) { return errors.New("each port mapping should have a uniquely identified name") } // portName for health check should mandatory if version.HealthCheck != nil { protocol, portName := version.HealthCheck.Protocol, version.HealthCheck.PortName // portName should present in dockers' portMappings definition // portName if manditory for non-cmd health checks if strings.ToLower(protocol) != "cmd" && !utils.SliceContains(portNames, portName) { return fmt.Errorf("portname in healthCheck section should match that defined in portMappings") } if !utils.SliceContains([]string{"tcp", "http", "TCP", "HTTP", "cmd", "CMD"}, protocol) { return fmt.Errorf("doesn't recoginized protocol %s for health check", protocol) } if strings.ToLower(protocol) == "http" { if len(version.HealthCheck.Path) == 0 { return fmt.Errorf("no path provided for health check with %s protocol", protocol) } } if strings.ToLower(protocol) == "cmd" { if len(version.HealthCheck.Value) == 0 { return fmt.Errorf("no value provided for health check with %s protocol", protocol) } } } default: return fmt.Errorf("enrecognized app mode %s", version.Mode) } // validate constraints are all valid if len(version.Constraints) > 0 { evalStatement, err := ParseConstraint(strings.ToLower(version.Constraints)) if err != nil { return err } err = evalStatement.Valid() if err != nil { return err } } return nil } func (app *App) SaveVersion(version *types.Version) { app.Versions = append(app.Versions, version) WithConvertVersion(context.TODO(), app.ID, version, nil, persistentStore.CreateVersion) } // 1, remove app from persisted storage // 2, other cleanup process func (app *App) Remove() { app.remove() } // storage related func (app *App) Touch() { app.update() } func (app *App) update() { logrus.Debugf("update app %s", app.ID) WithConvertApp(context.TODO(), app, nil, persistentStore.UpdateApp) } func (app *App) create() { logrus.Debugf("create app %s", app.ID) WithConvertApp(context.TODO(), app, nil, persistentStore.CreateApp) } func (app *App) remove() { logrus.Debugf("remove app %s", app.ID) persistentStore.DeleteApp(context.TODO(), app.ID, nil) }
package lang import ( "fmt" "go/types" ) type Object interface { fmt.Stringer Value() interface{} isObject() } type ObjectNone struct{} func (o ObjectNone) Value() interface{} { return nil } func (o ObjectNone) String() string { return "<none>" } func (o ObjectNone) isObject() {} type ObjectInt struct { val int64 } func (o ObjectInt) Value() interface{} { return o.val } func (o ObjectInt) String() string { return fmt.Sprintf("%d", o.val) } func (o ObjectInt) isObject() {} type ObjectStr struct { val string } func (o ObjectStr) Value() interface{} { return o.val } func (o ObjectStr) String() string { return fmt.Sprintf("\"%s\"", o.val) } func (o ObjectStr) isObject() {} type ObjectBool struct { val bool } func (o ObjectBool) Value() interface{} { return o.val } func (o ObjectBool) String() string { return fmt.Sprintf("%t", o.val) } func (o ObjectBool) isObject() {} type ObjectBuiltin struct { typ types.Type val func(args []Object) (Object, error) } func (o ObjectBuiltin) Type() types.Type { return o.typ } func (o ObjectBuiltin) Value() interface{} { return o.val } func (o ObjectBuiltin) String() string { return "<builtin>" } func (o ObjectBuiltin) isObject() {} type ObjectFunction struct { params []*UniqueSymbol bytecode Bytecode } func (o ObjectFunction) Value() interface{} { return o.bytecode } func (o ObjectFunction) String() string { return "<function>" } func (o ObjectFunction) isObject() {} type ObjectClosure struct { // Env *Env params []*UniqueSymbol bytecode *Bytecode } func (o ObjectClosure) Value() interface{} { return o.bytecode } func (o ObjectClosure) String() string { return "<closure>" } func (o ObjectClosure) isObject() {} fix import reference package lang import ( "fmt" "plaid/lang/types" ) type Object interface { fmt.Stringer Value() interface{} isObject() } type ObjectNone struct{} func (o ObjectNone) Value() interface{} { return nil } func (o ObjectNone) String() string { return "<none>" } func (o ObjectNone) isObject() {} type ObjectInt struct { val int64 } func (o ObjectInt) Value() interface{} { return o.val } func (o ObjectInt) String() string { return fmt.Sprintf("%d", o.val) } func (o ObjectInt) isObject() {} type ObjectStr struct { val string } func (o ObjectStr) Value() interface{} { return o.val } func (o ObjectStr) String() string { return fmt.Sprintf("\"%s\"", o.val) } func (o ObjectStr) isObject() {} type ObjectBool struct { val bool } func (o ObjectBool) Value() interface{} { return o.val } func (o ObjectBool) String() string { return fmt.Sprintf("%t", o.val) } func (o ObjectBool) isObject() {} type ObjectBuiltin struct { typ types.Type val func(args []Object) (Object, error) } func (o ObjectBuiltin) Type() types.Type { return o.typ } func (o ObjectBuiltin) Value() interface{} { return o.val } func (o ObjectBuiltin) String() string { return "<builtin>" } func (o ObjectBuiltin) isObject() {} type ObjectFunction struct { params []*UniqueSymbol bytecode Bytecode } func (o ObjectFunction) Value() interface{} { return o.bytecode } func (o ObjectFunction) String() string { return "<function>" } func (o ObjectFunction) isObject() {} type ObjectClosure struct { // Env *Env params []*UniqueSymbol bytecode *Bytecode } func (o ObjectClosure) Value() interface{} { return o.bytecode } func (o ObjectClosure) String() string { return "<closure>" } func (o ObjectClosure) isObject() {}
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package merge_test import ( "testing" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" . "sigs.k8s.io/structured-merge-diff/v4/internal/fixture" ) func TestExtractApply(t *testing.T) { tests := map[string]TestCase{ "apply_one_extract_apply_one_own_both": { Ops: []Operation{ Apply{ Manager: "default", Object: ` list: - a `, APIVersion: "v1", }, ExtractApply{ Manager: "default", Object: ` list: - b `, APIVersion: "v1", }, }, Object: ` list: - a - b `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "default": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_from_beginning": { Ops: []Operation{ ExtractApply{ Manager: "default", Object: ` list: - a `, APIVersion: "v1", }, ExtractApply{ Manager: "default", Object: ` list: - b `, APIVersion: "v1", }, }, Object: ` list: - a - b `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "default": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("b")), ), "v1", false, ), }, }, "apply_after_extract_remove_fields": { Ops: []Operation{ ExtractApply{ Manager: "default", Object: ` list: - a `, APIVersion: "v1", }, Apply{ Manager: "default", Object: ` list: - b `, APIVersion: "v1", }, }, Object: ` list: - b `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "default": fieldpath.NewVersionedSet( _NS( _P("list", _V("b")), ), "v1", false, ), }, }, "apply_one_controller_remove_extract_apply_one": { Ops: []Operation{ Apply{ Manager: "default", Object: ` list: - a `, APIVersion: "v1", }, Update{ Manager: "controller", Object: ` list: - b `, APIVersion: "v1", }, ExtractApply{ Manager: "default", Object: ` list: - c `, APIVersion: "v1", }, }, Object: ` list: - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "default": fieldpath.NewVersionedSet( _NS( _P("list", _V("c")), ), "v1", false, ), "controller": fieldpath.NewVersionedSet( _NS( _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_retain_ownership_after_controller_update": { Ops: []Operation{ Apply{ Manager: "default", Object: ` list: - a `, APIVersion: "v1", }, Update{ Manager: "controller", Object: ` list: - a - b `, APIVersion: "v1", }, ExtractApply{ Manager: "default", Object: ` list: - c `, APIVersion: "v1", }, }, Object: ` list: - a - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "default": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("c")), ), "v1", false, ), "controller": fieldpath.NewVersionedSet( _NS( _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_share_ownership_after_another_apply": { Ops: []Operation{ Apply{ Manager: "apply-one", Object: ` list: - a `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - a - b `, APIVersion: "v1", }, ExtractApply{ Manager: "apply-one", Object: ` list: - c `, APIVersion: "v1", }, }, Object: ` list: - a - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("c")), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("b")), ), "v1", false, ), }, }, "apply_two_cant_delete_object_also_owned_by_extract_apply": { Ops: []Operation{ Apply{ Manager: "apply-one", Object: ` list: - a `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - a - b `, APIVersion: "v1", }, ExtractApply{ Manager: "apply-one", Object: ` list: - c `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - b `, APIVersion: "v1", }, }, Object: ` list: - a - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("c")), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_empty_structure": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` list: `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - a - b `, APIVersion: "v1", }, //ExtractApply{ // Manager: "apply-one", // Object: ` // list: // `, // APIVersion: "v1", //}, //Apply{ // Manager: "apply-two", // Object: ` // list: // - b // `, // APIVersion: "v1", //}, }, Object: ` list: - a - b `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("list"), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_empty_structure_map": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` map: `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` map: a: c b: d `, APIVersion: "v1", }, //ExtractApply{ // Manager: "apply-one", // Object: ` // map: // `, // APIVersion: "v1", //}, Apply{ Manager: "apply-two", Object: ` map: b: d `, APIVersion: "v1", }, }, Object: ` map: b: d `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("map"), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("map", "b"), ), "v1", false, ), }, }, "extract_apply_atomic_list": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` atomicList: - a - b - c `, APIVersion: "v1", }, }, Object: ` atomicList: - a - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("atomicList"), ), "v1", false, ), }, }, "extract_apply_atomic_map": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` atomicMap: a: c b: d `, APIVersion: "v1", }, }, Object: ` atomicMap: a: c b: d `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("atomicMap"), ), "v1", false, ), }, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { if err := test.Test(setFieldsParser); err != nil { t.Fatal(err) } }) } } complete empty structure tests /* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package merge_test import ( "testing" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" . "sigs.k8s.io/structured-merge-diff/v4/internal/fixture" ) func TestExtractApply(t *testing.T) { tests := map[string]TestCase{ "apply_one_extract_apply_one_own_both": { Ops: []Operation{ Apply{ Manager: "default", Object: ` list: - a `, APIVersion: "v1", }, ExtractApply{ Manager: "default", Object: ` list: - b `, APIVersion: "v1", }, }, Object: ` list: - a - b `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "default": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_from_beginning": { Ops: []Operation{ ExtractApply{ Manager: "default", Object: ` list: - a `, APIVersion: "v1", }, ExtractApply{ Manager: "default", Object: ` list: - b `, APIVersion: "v1", }, }, Object: ` list: - a - b `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "default": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("b")), ), "v1", false, ), }, }, "apply_after_extract_remove_fields": { Ops: []Operation{ ExtractApply{ Manager: "default", Object: ` list: - a `, APIVersion: "v1", }, Apply{ Manager: "default", Object: ` list: - b `, APIVersion: "v1", }, }, Object: ` list: - b `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "default": fieldpath.NewVersionedSet( _NS( _P("list", _V("b")), ), "v1", false, ), }, }, "apply_one_controller_remove_extract_apply_one": { Ops: []Operation{ Apply{ Manager: "default", Object: ` list: - a `, APIVersion: "v1", }, Update{ Manager: "controller", Object: ` list: - b `, APIVersion: "v1", }, ExtractApply{ Manager: "default", Object: ` list: - c `, APIVersion: "v1", }, }, Object: ` list: - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "default": fieldpath.NewVersionedSet( _NS( _P("list", _V("c")), ), "v1", false, ), "controller": fieldpath.NewVersionedSet( _NS( _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_retain_ownership_after_controller_update": { Ops: []Operation{ Apply{ Manager: "default", Object: ` list: - a `, APIVersion: "v1", }, Update{ Manager: "controller", Object: ` list: - a - b `, APIVersion: "v1", }, ExtractApply{ Manager: "default", Object: ` list: - c `, APIVersion: "v1", }, }, Object: ` list: - a - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "default": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("c")), ), "v1", false, ), "controller": fieldpath.NewVersionedSet( _NS( _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_share_ownership_after_another_apply": { Ops: []Operation{ Apply{ Manager: "apply-one", Object: ` list: - a `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - a - b `, APIVersion: "v1", }, ExtractApply{ Manager: "apply-one", Object: ` list: - c `, APIVersion: "v1", }, }, Object: ` list: - a - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("c")), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("b")), ), "v1", false, ), }, }, "apply_two_cant_delete_object_also_owned_by_extract_apply": { Ops: []Operation{ Apply{ Manager: "apply-one", Object: ` list: - a `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - a - b `, APIVersion: "v1", }, ExtractApply{ Manager: "apply-one", Object: ` list: - c `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - b `, APIVersion: "v1", }, }, Object: ` list: - a - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("c")), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_empty_structure_list": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` list: `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - a - b `, APIVersion: "v1", }, }, Object: ` list: - a - b `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("list"), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("list", _V("a")), _P("list", _V("b")), ), "v1", false, ), }, }, // BROKEN "extract_apply_empty_structure_remove_list": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` list: `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - a - b `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - b `, APIVersion: "v1", }, }, // BROKEN: expected: //Object: ` // list: // - b //`, // but actually got: Object: ` list: - a - b `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( // BROKEN expected: //_NS( // _P("list"), //), // but actually got: _NS( _P("list"), _P("list", _V("a")), _P("list", _V("b")), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_empty_structure_add_later_list": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` list: `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - a - b `, APIVersion: "v1", }, ExtractApply{ Manager: "apply-one", Object: ` list: - c `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` list: - b `, APIVersion: "v1", }, }, Object: ` list: - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("list", _V("c")), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("list", _V("b")), ), "v1", false, ), }, }, "extract_apply_empty_structure_map": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` map: `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` map: a: c b: d `, APIVersion: "v1", }, }, Object: ` map: a: c b: d `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("map"), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("map", "a"), _P("map", "b"), ), "v1", false, ), }, }, "extract_apply_empty_structure_remove_map": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` map: `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` map: a: c b: d `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` map: b: d `, APIVersion: "v1", }, }, Object: ` map: b: d `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("map"), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("map", "b"), ), "v1", false, ), }, }, "extract_apply_empty_structure_add_later_map": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` map: `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` map: a: c b: d `, APIVersion: "v1", }, ExtractApply{ Manager: "apply-one", Object: ` map: e: f `, APIVersion: "v1", }, Apply{ Manager: "apply-two", Object: ` map: b: d `, APIVersion: "v1", }, }, Object: ` map: b: d e: f `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("map", "e"), ), "v1", false, ), "apply-two": fieldpath.NewVersionedSet( _NS( _P("map", "b"), ), "v1", false, ), }, }, "extract_apply_atomic_list": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` atomicList: - a - b - c `, APIVersion: "v1", }, }, Object: ` atomicList: - a - b - c `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("atomicList"), ), "v1", false, ), }, }, "extract_apply_atomic_map": { Ops: []Operation{ ExtractApply{ Manager: "apply-one", Object: ` atomicMap: a: c b: d `, APIVersion: "v1", }, }, Object: ` atomicMap: a: c b: d `, APIVersion: "v1", Managed: fieldpath.ManagedFields{ "apply-one": fieldpath.NewVersionedSet( _NS( _P("atomicMap"), ), "v1", false, ), }, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { if err := test.Test(setFieldsParser); err != nil { t.Fatal(err) } }) } }
package conditionalaccess import ( "context" "errors" "fmt" "log" "net/http" "time" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/manicminer/hamilton/msgraph" "github.com/manicminer/hamilton/odata" "github.com/hashicorp/terraform-provider-azuread/internal/clients" "github.com/hashicorp/terraform-provider-azuread/internal/tf" "github.com/hashicorp/terraform-provider-azuread/internal/utils" "github.com/hashicorp/terraform-provider-azuread/internal/validate" ) func conditionalAccessPolicyResource() *schema.Resource { return &schema.Resource{ CreateContext: conditionalAccessPolicyResourceCreate, ReadContext: conditionalAccessPolicyResourceRead, UpdateContext: conditionalAccessPolicyResourceUpdate, DeleteContext: conditionalAccessPolicyResourceDelete, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Read: schema.DefaultTimeout(5 * time.Minute), Update: schema.DefaultTimeout(5 * time.Minute), Delete: schema.DefaultTimeout(5 * time.Minute), }, Importer: tf.ValidateResourceIDPriorToImport(func(id string) error { if _, err := uuid.ParseUUID(id); err != nil { return fmt.Errorf("specified ID (%q) is not valid: %s", id, err) } return nil }), Schema: map[string]*schema.Schema{ "display_name": { Type: schema.TypeString, Required: true, ValidateDiagFunc: validate.NoEmptyStrings, }, "state": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ msgraph.ConditionalAccessPolicyStateDisabled, msgraph.ConditionalAccessPolicyStateEnabled, msgraph.ConditionalAccessPolicyStateEnabledForReportingButNotEnforced, }, false), }, "conditions": { Type: schema.TypeList, Required: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "applications": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "included_applications": { Type: schema.TypeList, Required: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "excluded_applications": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "included_user_actions": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, }, }, }, "users": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "included_users": { Type: schema.TypeList, Optional: true, AtLeastOneOf: []string{"conditions.0.users.0.included_groups", "conditions.0.users.0.included_roles", "conditions.0.users.0.included_users"}, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "excluded_users": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "included_groups": { Type: schema.TypeList, Optional: true, AtLeastOneOf: []string{"conditions.0.users.0.included_groups", "conditions.0.users.0.included_roles", "conditions.0.users.0.included_users"}, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "excluded_groups": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "included_roles": { Type: schema.TypeList, Optional: true, AtLeastOneOf: []string{"conditions.0.users.0.included_groups", "conditions.0.users.0.included_roles", "conditions.0.users.0.included_users"}, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "excluded_roles": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, }, }, }, "client_app_types": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "all", "browser", "mobileAppsAndDesktopClients", "exchangeActiveSync", "easSupported", "other", }, false), }, }, "locations": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "included_locations": { Type: schema.TypeList, Required: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "excluded_locations": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, }, }, }, "platforms": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "included_platforms": { Type: schema.TypeList, Required: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "all", "android", "iOS", "macOS", "unknownFutureValue", "windows", "windowsPhone", }, false), }, }, "excluded_platforms": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "all", "android", "iOS", "macOS", "unknownFutureValue", "windows", "windowsPhone", }, false), }, }, }, }, }, "sign_in_risk_levels": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "hidden", "high", "low", "medium", "none", "unknownFutureValue", }, false), }, }, "user_risk_levels": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "hidden", "high", "low", "medium", "none", "unknownFutureValue", }, false), }, }, }, }, }, "grant_controls": { Type: schema.TypeList, Required: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "operator": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"AND", "OR"}, false), }, "built_in_controls": { Type: schema.TypeList, Required: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "approvedApplication", "block", "compliantApplication", "compliantDevice", "domainJoinedDevice", "mfa", "passwordChange", "unknownFutureValue", }, false), }, }, "custom_authentication_factors": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "terms_of_use": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, }, }, }, "session_controls": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "application_enforced_restrictions_enabled": { Type: schema.TypeBool, Optional: true, }, "cloud_app_security_policy": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ "blockDownloads", "mcasConfigured", "monitorOnly", "unknownFutureValue", }, false), }, "persistent_browser_mode": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{"always", "never"}, false), }, "sign_in_frequency": { Type: schema.TypeInt, Optional: true, RequiredWith: []string{"session_controls.0.sign_in_frequency_period"}, ValidateFunc: validation.IntAtLeast(0), }, "sign_in_frequency_period": { Type: schema.TypeString, Optional: true, RequiredWith: []string{"session_controls.0.sign_in_frequency"}, ValidateFunc: validation.StringInSlice([]string{"days", "hours"}, false), }, }, }, }, }, } } func conditionalAccessPolicyResourceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*clients.Client).ConditionalAccess.PoliciesClient properties := msgraph.ConditionalAccessPolicy{ DisplayName: utils.String(d.Get("display_name").(string)), State: utils.String(d.Get("state").(string)), Conditions: expandConditionalAccessConditionSet(d.Get("conditions").([]interface{})), GrantControls: expandConditionalAccessGrantControls(d.Get("grant_controls").([]interface{})), SessionControls: expandConditionalAccessSessionControls(d.Get("session_controls").([]interface{})), } policy, _, err := client.Create(ctx, properties) if err != nil { return tf.ErrorDiagF(err, "Could not create conditional access policy") } if policy.ID == nil || *policy.ID == "" { return tf.ErrorDiagF(errors.New("Bad API response"), "Object ID returned for conditional access policy is nil/empty") } d.SetId(*policy.ID) return conditionalAccessPolicyResourceRead(ctx, d, meta) } func conditionalAccessPolicyResourceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*clients.Client).ConditionalAccess.PoliciesClient properties := msgraph.ConditionalAccessPolicy{ ID: utils.String(d.Id()), DisplayName: utils.String(d.Get("display_name").(string)), State: utils.String(d.Get("state").(string)), Conditions: expandConditionalAccessConditionSet(d.Get("conditions").([]interface{})), GrantControls: expandConditionalAccessGrantControls(d.Get("grant_controls").([]interface{})), SessionControls: expandConditionalAccessSessionControls(d.Get("session_controls").([]interface{})), } if _, err := client.Update(ctx, properties); err != nil { return tf.ErrorDiagF(err, "Could not update conditional access policy with ID: %q", d.Id()) } return nil } func conditionalAccessPolicyResourceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*clients.Client).ConditionalAccess.PoliciesClient policy, status, err := client.Get(ctx, d.Id(), odata.Query{}) if err != nil { if status == http.StatusNotFound { log.Printf("[DEBUG] Conditional Access Policy with Object ID %q was not found - removing from state", d.Id()) d.SetId("") return nil } return tf.ErrorDiagPathF(err, "id", "Retrieving Conditional Access Policy with object ID %q", d.Id()) } tf.Set(d, "display_name", policy.DisplayName) tf.Set(d, "state", policy.State) tf.Set(d, "conditions", flattenConditionalAccessConditionSet(policy.Conditions)) tf.Set(d, "grant_controls", flattenConditionalAccessGrantControls(policy.GrantControls)) tf.Set(d, "session_controls", flattenConditionalAccessSessionControls(policy.SessionControls)) return nil } func conditionalAccessPolicyResourceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*clients.Client).ConditionalAccess.PoliciesClient _, status, err := client.Get(ctx, d.Id(), odata.Query{}) if err != nil { if status == http.StatusNotFound { log.Printf("[DEBUG] Conditional Access Policy with ID %q already deleted", d.Id()) return nil } return tf.ErrorDiagPathF(err, "id", "Retrieving conditional access policy with ID %q", d.Id()) } status, err = client.Delete(ctx, d.Id()) if err != nil { return tf.ErrorDiagPathF(err, "id", "Deleting conditional access policy with ID %q, got status %d", d.Id(), status) } return nil } func flattenConditionalAccessConditionSet(in *msgraph.ConditionalAccessConditionSet) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "applications": flattenConditionalAccessApplications(in.Applications), "users": flattenConditionalAccessUsers(in.Users), "client_app_types": tf.FlattenStringSlicePtr(in.ClientAppTypes), "locations": flattenConditionalAccessLocations(in.Locations), "platforms": flattenConditionalAccessPlatforms(in.Platforms), "sign_in_risk_levels": tf.FlattenStringSlicePtr(in.SignInRiskLevels), "user_risk_levels": tf.FlattenStringSlicePtr(in.UserRiskLevels), }, } } func flattenConditionalAccessApplications(in *msgraph.ConditionalAccessApplications) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "included_applications": tf.FlattenStringSlicePtr(in.IncludeApplications), "excluded_applications": tf.FlattenStringSlicePtr(in.ExcludeApplications), "included_user_actions": tf.FlattenStringSlicePtr(in.IncludeUserActions), }, } } func flattenConditionalAccessUsers(in *msgraph.ConditionalAccessUsers) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "included_users": tf.FlattenStringSlicePtr(in.IncludeUsers), "excluded_users": tf.FlattenStringSlicePtr(in.ExcludeUsers), "included_groups": tf.FlattenStringSlicePtr(in.IncludeGroups), "excluded_groups": tf.FlattenStringSlicePtr(in.ExcludeGroups), "included_roles": tf.FlattenStringSlicePtr(in.IncludeRoles), "excluded_roles": tf.FlattenStringSlicePtr(in.ExcludeRoles), }, } } func flattenConditionalAccessLocations(in *msgraph.ConditionalAccessLocations) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "included_locations": tf.FlattenStringSlicePtr(in.IncludeLocations), "excluded_locations": tf.FlattenStringSlicePtr(in.ExcludeLocations), }, } } func flattenConditionalAccessPlatforms(in *msgraph.ConditionalAccessPlatforms) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "included_platforms": tf.FlattenStringSlicePtr(in.IncludePlatforms), "excluded_platforms": tf.FlattenStringSlicePtr(in.ExcludePlatforms), }, } } func flattenConditionalAccessGrantControls(in *msgraph.ConditionalAccessGrantControls) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "operator": in.Operator, "built_in_controls": tf.FlattenStringSlicePtr(in.BuiltInControls), "custom_authentication_factors": tf.FlattenStringSlicePtr(in.CustomAuthenticationFactors), "terms_of_use": tf.FlattenStringSlicePtr(in.TermsOfUse), }, } } func flattenConditionalAccessSessionControls(in *msgraph.ConditionalAccessSessionControls) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "application_enforced_restrictions_enabled": in.ApplicationEnforcedRestrictions.IsEnabled, "cloud_app_security_policy": in.CloudAppSecurity.CloudAppSecurityType, "persistent_browser_mode": in.PersistentBrowser.Mode, "sign_in_frequency": in.SignInFrequency.Value, "sign_in_frequency_period": in.SignInFrequency.Type, }, } } func expandConditionalAccessConditionSet(in []interface{}) *msgraph.ConditionalAccessConditionSet { if len(in) == 0 { return nil } result := msgraph.ConditionalAccessConditionSet{} config := in[0].(map[string]interface{}) applications := config["applications"].([]interface{}) users := config["users"].([]interface{}) clientAppTypes := config["client_app_types"].([]interface{}) locations := config["locations"].([]interface{}) platforms := config["platforms"].([]interface{}) signInRiskLevels := config["sign_in_risk_levels"].([]interface{}) userRiskLevels := config["user_risk_levels"].([]interface{}) result.Applications = expandConditionalAccessApplications(applications) result.Users = expandConditionalAccessUsers(users) result.ClientAppTypes = tf.ExpandStringSlicePtr(clientAppTypes) result.Locations = expandConditionalAccessLocations(locations) result.Platforms = expandConditionalAccessPlatforms(platforms) result.SignInRiskLevels = tf.ExpandStringSlicePtr(signInRiskLevels) result.UserRiskLevels = tf.ExpandStringSlicePtr(userRiskLevels) return &result } func expandConditionalAccessApplications(in []interface{}) *msgraph.ConditionalAccessApplications { if len(in) == 0 { return nil } result := msgraph.ConditionalAccessApplications{} config := in[0].(map[string]interface{}) includeApplications := config["included_applications"].([]interface{}) excludeApplications := config["excluded_applications"].([]interface{}) includeUserActions := config["included_user_actions"].([]interface{}) result.IncludeApplications = tf.ExpandStringSlicePtr(includeApplications) result.ExcludeApplications = tf.ExpandStringSlicePtr(excludeApplications) result.IncludeUserActions = tf.ExpandStringSlicePtr(includeUserActions) return &result } func expandConditionalAccessUsers(in []interface{}) *msgraph.ConditionalAccessUsers { if len(in) == 0 { return nil } result := msgraph.ConditionalAccessUsers{} config := in[0].(map[string]interface{}) includeUsers := config["included_users"].([]interface{}) excludeUsers := config["excluded_users"].([]interface{}) includeGroups := config["included_groups"].([]interface{}) excludeGroups := config["excluded_groups"].([]interface{}) includeRoles := config["included_roles"].([]interface{}) excludeRoles := config["excluded_roles"].([]interface{}) result.IncludeUsers = tf.ExpandStringSlicePtr(includeUsers) result.ExcludeUsers = tf.ExpandStringSlicePtr(excludeUsers) result.IncludeGroups = tf.ExpandStringSlicePtr(includeGroups) result.ExcludeGroups = tf.ExpandStringSlicePtr(excludeGroups) result.IncludeRoles = tf.ExpandStringSlicePtr(includeRoles) result.ExcludeRoles = tf.ExpandStringSlicePtr(excludeRoles) return &result } func expandConditionalAccessPlatforms(in []interface{}) *msgraph.ConditionalAccessPlatforms { if len(in) == 0 { return nil } result := msgraph.ConditionalAccessPlatforms{} config := in[0].(map[string]interface{}) includePlatforms := config["included_platforms"].([]interface{}) excludePlatforms := config["excluded_platforms"].([]interface{}) result.IncludePlatforms = tf.ExpandStringSlicePtr(includePlatforms) result.ExcludePlatforms = tf.ExpandStringSlicePtr(excludePlatforms) return &result } func expandConditionalAccessLocations(in []interface{}) *msgraph.ConditionalAccessLocations { if len(in) == 0 { return nil } result := msgraph.ConditionalAccessLocations{} config := in[0].(map[string]interface{}) includeLocations := config["included_locations"].([]interface{}) excludeLocations := config["excluded_locations"].([]interface{}) result.IncludeLocations = tf.ExpandStringSlicePtr(includeLocations) result.ExcludeLocations = tf.ExpandStringSlicePtr(excludeLocations) return &result } func expandConditionalAccessGrantControls(in []interface{}) *msgraph.ConditionalAccessGrantControls { if len(in) == 0 { return nil } result := msgraph.ConditionalAccessGrantControls{} config := in[0].(map[string]interface{}) operator := config["operator"].(string) builtInControls := config["built_in_controls"].([]interface{}) customAuthenticationFactors := config["custom_authentication_factors"].([]interface{}) termsOfUse := config["terms_of_use"].([]interface{}) result.Operator = &operator result.BuiltInControls = tf.ExpandStringSlicePtr(builtInControls) result.CustomAuthenticationFactors = tf.ExpandStringSlicePtr(customAuthenticationFactors) result.TermsOfUse = tf.ExpandStringSlicePtr(termsOfUse) return &result } func expandConditionalAccessSessionControls(in []interface{}) *msgraph.ConditionalAccessSessionControls { if len(in) == 0 { return nil } result := msgraph.ConditionalAccessSessionControls{} config := in[0].(map[string]interface{}) if applicationEnforcedRestrictions := config["application_enforced_restrictions_enabled"]; applicationEnforcedRestrictions != nil { result.ApplicationEnforcedRestrictions = &msgraph.ApplicationEnforcedRestrictionsSessionControl{ IsEnabled: utils.Bool(applicationEnforcedRestrictions.(bool)), } } if cloudAppSecurity := config["cloud_app_security_policy"]; cloudAppSecurity != nil { result.CloudAppSecurity = &msgraph.CloudAppSecurityControl{ IsEnabled: utils.Bool(true), CloudAppSecurityType: utils.String(cloudAppSecurity.(string)), } } if persistentBrowser := config["persistent_browser_mode"]; persistentBrowser != nil { result.PersistentBrowser = &msgraph.PersistentBrowserSessionControl{ IsEnabled: utils.Bool(true), Mode: utils.String(persistentBrowser.(string)), } } if signInFrequency := config["sign_in_frequency"]; signInFrequency != nil { result.SignInFrequency = &msgraph.SignInFrequencySessionControl{ IsEnabled: utils.Bool(true), Type: utils.String(config["sign_in_frequency_period"].(string)), Value: utils.Int32(int32(signInFrequency.(int))), } } return &result } Remove persistent browser package conditionalaccess import ( "context" "errors" "fmt" "log" "net/http" "time" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/manicminer/hamilton/msgraph" "github.com/manicminer/hamilton/odata" "github.com/hashicorp/terraform-provider-azuread/internal/clients" "github.com/hashicorp/terraform-provider-azuread/internal/tf" "github.com/hashicorp/terraform-provider-azuread/internal/utils" "github.com/hashicorp/terraform-provider-azuread/internal/validate" ) func conditionalAccessPolicyResource() *schema.Resource { return &schema.Resource{ CreateContext: conditionalAccessPolicyResourceCreate, ReadContext: conditionalAccessPolicyResourceRead, UpdateContext: conditionalAccessPolicyResourceUpdate, DeleteContext: conditionalAccessPolicyResourceDelete, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Read: schema.DefaultTimeout(5 * time.Minute), Update: schema.DefaultTimeout(5 * time.Minute), Delete: schema.DefaultTimeout(5 * time.Minute), }, Importer: tf.ValidateResourceIDPriorToImport(func(id string) error { if _, err := uuid.ParseUUID(id); err != nil { return fmt.Errorf("specified ID (%q) is not valid: %s", id, err) } return nil }), Schema: map[string]*schema.Schema{ "display_name": { Type: schema.TypeString, Required: true, ValidateDiagFunc: validate.NoEmptyStrings, }, "state": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ msgraph.ConditionalAccessPolicyStateDisabled, msgraph.ConditionalAccessPolicyStateEnabled, msgraph.ConditionalAccessPolicyStateEnabledForReportingButNotEnforced, }, false), }, "conditions": { Type: schema.TypeList, Required: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "applications": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "included_applications": { Type: schema.TypeList, Required: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "excluded_applications": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "included_user_actions": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, }, }, }, "users": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "included_users": { Type: schema.TypeList, Optional: true, AtLeastOneOf: []string{"conditions.0.users.0.included_groups", "conditions.0.users.0.included_roles", "conditions.0.users.0.included_users"}, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "excluded_users": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "included_groups": { Type: schema.TypeList, Optional: true, AtLeastOneOf: []string{"conditions.0.users.0.included_groups", "conditions.0.users.0.included_roles", "conditions.0.users.0.included_users"}, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "excluded_groups": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "included_roles": { Type: schema.TypeList, Optional: true, AtLeastOneOf: []string{"conditions.0.users.0.included_groups", "conditions.0.users.0.included_roles", "conditions.0.users.0.included_users"}, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "excluded_roles": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, }, }, }, "client_app_types": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "all", "browser", "mobileAppsAndDesktopClients", "exchangeActiveSync", "easSupported", "other", }, false), }, }, "locations": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "included_locations": { Type: schema.TypeList, Required: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "excluded_locations": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, }, }, }, "platforms": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "included_platforms": { Type: schema.TypeList, Required: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "all", "android", "iOS", "macOS", "unknownFutureValue", "windows", "windowsPhone", }, false), }, }, "excluded_platforms": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "all", "android", "iOS", "macOS", "unknownFutureValue", "windows", "windowsPhone", }, false), }, }, }, }, }, "sign_in_risk_levels": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "hidden", "high", "low", "medium", "none", "unknownFutureValue", }, false), }, }, "user_risk_levels": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "hidden", "high", "low", "medium", "none", "unknownFutureValue", }, false), }, }, }, }, }, "grant_controls": { Type: schema.TypeList, Required: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "operator": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"AND", "OR"}, false), }, "built_in_controls": { Type: schema.TypeList, Required: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{ "approvedApplication", "block", "compliantApplication", "compliantDevice", "domainJoinedDevice", "mfa", "passwordChange", "unknownFutureValue", }, false), }, }, "custom_authentication_factors": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, "terms_of_use": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateDiagFunc: validate.NoEmptyStrings, }, }, }, }, }, "session_controls": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "application_enforced_restrictions_enabled": { Type: schema.TypeBool, Optional: true, }, "cloud_app_security_policy": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ "blockDownloads", "mcasConfigured", "monitorOnly", "unknownFutureValue", }, false), }, "sign_in_frequency": { Type: schema.TypeInt, Optional: true, RequiredWith: []string{"session_controls.0.sign_in_frequency_period"}, ValidateFunc: validation.IntAtLeast(0), }, "sign_in_frequency_period": { Type: schema.TypeString, Optional: true, RequiredWith: []string{"session_controls.0.sign_in_frequency"}, ValidateFunc: validation.StringInSlice([]string{"days", "hours"}, false), }, }, }, }, }, } } func conditionalAccessPolicyResourceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*clients.Client).ConditionalAccess.PoliciesClient properties := msgraph.ConditionalAccessPolicy{ DisplayName: utils.String(d.Get("display_name").(string)), State: utils.String(d.Get("state").(string)), Conditions: expandConditionalAccessConditionSet(d.Get("conditions").([]interface{})), GrantControls: expandConditionalAccessGrantControls(d.Get("grant_controls").([]interface{})), SessionControls: expandConditionalAccessSessionControls(d.Get("session_controls").([]interface{})), } policy, _, err := client.Create(ctx, properties) if err != nil { return tf.ErrorDiagF(err, "Could not create conditional access policy") } if policy.ID == nil || *policy.ID == "" { return tf.ErrorDiagF(errors.New("Bad API response"), "Object ID returned for conditional access policy is nil/empty") } d.SetId(*policy.ID) return conditionalAccessPolicyResourceRead(ctx, d, meta) } func conditionalAccessPolicyResourceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*clients.Client).ConditionalAccess.PoliciesClient properties := msgraph.ConditionalAccessPolicy{ ID: utils.String(d.Id()), DisplayName: utils.String(d.Get("display_name").(string)), State: utils.String(d.Get("state").(string)), Conditions: expandConditionalAccessConditionSet(d.Get("conditions").([]interface{})), GrantControls: expandConditionalAccessGrantControls(d.Get("grant_controls").([]interface{})), SessionControls: expandConditionalAccessSessionControls(d.Get("session_controls").([]interface{})), } if _, err := client.Update(ctx, properties); err != nil { return tf.ErrorDiagF(err, "Could not update conditional access policy with ID: %q", d.Id()) } return nil } func conditionalAccessPolicyResourceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*clients.Client).ConditionalAccess.PoliciesClient policy, status, err := client.Get(ctx, d.Id(), odata.Query{}) if err != nil { if status == http.StatusNotFound { log.Printf("[DEBUG] Conditional Access Policy with Object ID %q was not found - removing from state", d.Id()) d.SetId("") return nil } return tf.ErrorDiagPathF(err, "id", "Retrieving Conditional Access Policy with object ID %q", d.Id()) } tf.Set(d, "display_name", policy.DisplayName) tf.Set(d, "state", policy.State) tf.Set(d, "conditions", flattenConditionalAccessConditionSet(policy.Conditions)) tf.Set(d, "grant_controls", flattenConditionalAccessGrantControls(policy.GrantControls)) tf.Set(d, "session_controls", flattenConditionalAccessSessionControls(policy.SessionControls)) return nil } func conditionalAccessPolicyResourceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*clients.Client).ConditionalAccess.PoliciesClient _, status, err := client.Get(ctx, d.Id(), odata.Query{}) if err != nil { if status == http.StatusNotFound { log.Printf("[DEBUG] Conditional Access Policy with ID %q already deleted", d.Id()) return nil } return tf.ErrorDiagPathF(err, "id", "Retrieving conditional access policy with ID %q", d.Id()) } status, err = client.Delete(ctx, d.Id()) if err != nil { return tf.ErrorDiagPathF(err, "id", "Deleting conditional access policy with ID %q, got status %d", d.Id(), status) } return nil } func flattenConditionalAccessConditionSet(in *msgraph.ConditionalAccessConditionSet) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "applications": flattenConditionalAccessApplications(in.Applications), "users": flattenConditionalAccessUsers(in.Users), "client_app_types": tf.FlattenStringSlicePtr(in.ClientAppTypes), "locations": flattenConditionalAccessLocations(in.Locations), "platforms": flattenConditionalAccessPlatforms(in.Platforms), "sign_in_risk_levels": tf.FlattenStringSlicePtr(in.SignInRiskLevels), "user_risk_levels": tf.FlattenStringSlicePtr(in.UserRiskLevels), }, } } func flattenConditionalAccessApplications(in *msgraph.ConditionalAccessApplications) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "included_applications": tf.FlattenStringSlicePtr(in.IncludeApplications), "excluded_applications": tf.FlattenStringSlicePtr(in.ExcludeApplications), "included_user_actions": tf.FlattenStringSlicePtr(in.IncludeUserActions), }, } } func flattenConditionalAccessUsers(in *msgraph.ConditionalAccessUsers) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "included_users": tf.FlattenStringSlicePtr(in.IncludeUsers), "excluded_users": tf.FlattenStringSlicePtr(in.ExcludeUsers), "included_groups": tf.FlattenStringSlicePtr(in.IncludeGroups), "excluded_groups": tf.FlattenStringSlicePtr(in.ExcludeGroups), "included_roles": tf.FlattenStringSlicePtr(in.IncludeRoles), "excluded_roles": tf.FlattenStringSlicePtr(in.ExcludeRoles), }, } } func flattenConditionalAccessLocations(in *msgraph.ConditionalAccessLocations) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "included_locations": tf.FlattenStringSlicePtr(in.IncludeLocations), "excluded_locations": tf.FlattenStringSlicePtr(in.ExcludeLocations), }, } } func flattenConditionalAccessPlatforms(in *msgraph.ConditionalAccessPlatforms) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "included_platforms": tf.FlattenStringSlicePtr(in.IncludePlatforms), "excluded_platforms": tf.FlattenStringSlicePtr(in.ExcludePlatforms), }, } } func flattenConditionalAccessGrantControls(in *msgraph.ConditionalAccessGrantControls) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "operator": in.Operator, "built_in_controls": tf.FlattenStringSlicePtr(in.BuiltInControls), "custom_authentication_factors": tf.FlattenStringSlicePtr(in.CustomAuthenticationFactors), "terms_of_use": tf.FlattenStringSlicePtr(in.TermsOfUse), }, } } func flattenConditionalAccessSessionControls(in *msgraph.ConditionalAccessSessionControls) []interface{} { if in == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ "application_enforced_restrictions_enabled": in.ApplicationEnforcedRestrictions.IsEnabled, "cloud_app_security_policy": in.CloudAppSecurity.CloudAppSecurityType, "sign_in_frequency": in.SignInFrequency.Value, "sign_in_frequency_period": in.SignInFrequency.Type, }, } } func expandConditionalAccessConditionSet(in []interface{}) *msgraph.ConditionalAccessConditionSet { if len(in) == 0 || in[0] == nil { return nil } result := msgraph.ConditionalAccessConditionSet{} config := in[0].(map[string]interface{}) applications := config["applications"].([]interface{}) users := config["users"].([]interface{}) clientAppTypes := config["client_app_types"].([]interface{}) locations := config["locations"].([]interface{}) platforms := config["platforms"].([]interface{}) signInRiskLevels := config["sign_in_risk_levels"].([]interface{}) userRiskLevels := config["user_risk_levels"].([]interface{}) result.Applications = expandConditionalAccessApplications(applications) result.Users = expandConditionalAccessUsers(users) result.ClientAppTypes = tf.ExpandStringSlicePtr(clientAppTypes) result.Locations = expandConditionalAccessLocations(locations) result.Platforms = expandConditionalAccessPlatforms(platforms) result.SignInRiskLevels = tf.ExpandStringSlicePtr(signInRiskLevels) result.UserRiskLevels = tf.ExpandStringSlicePtr(userRiskLevels) return &result } func expandConditionalAccessApplications(in []interface{}) *msgraph.ConditionalAccessApplications { if len(in) == 0 || in[0] == nil { return nil } result := msgraph.ConditionalAccessApplications{} config := in[0].(map[string]interface{}) includeApplications := config["included_applications"].([]interface{}) excludeApplications := config["excluded_applications"].([]interface{}) includeUserActions := config["included_user_actions"].([]interface{}) result.IncludeApplications = tf.ExpandStringSlicePtr(includeApplications) result.ExcludeApplications = tf.ExpandStringSlicePtr(excludeApplications) result.IncludeUserActions = tf.ExpandStringSlicePtr(includeUserActions) return &result } func expandConditionalAccessUsers(in []interface{}) *msgraph.ConditionalAccessUsers { if len(in) == 0 || in[0] == nil { return nil } result := msgraph.ConditionalAccessUsers{} config := in[0].(map[string]interface{}) includeUsers := config["included_users"].([]interface{}) excludeUsers := config["excluded_users"].([]interface{}) includeGroups := config["included_groups"].([]interface{}) excludeGroups := config["excluded_groups"].([]interface{}) includeRoles := config["included_roles"].([]interface{}) excludeRoles := config["excluded_roles"].([]interface{}) result.IncludeUsers = tf.ExpandStringSlicePtr(includeUsers) result.ExcludeUsers = tf.ExpandStringSlicePtr(excludeUsers) result.IncludeGroups = tf.ExpandStringSlicePtr(includeGroups) result.ExcludeGroups = tf.ExpandStringSlicePtr(excludeGroups) result.IncludeRoles = tf.ExpandStringSlicePtr(includeRoles) result.ExcludeRoles = tf.ExpandStringSlicePtr(excludeRoles) return &result } func expandConditionalAccessPlatforms(in []interface{}) *msgraph.ConditionalAccessPlatforms { if len(in) == 0 || in[0] == nil { return nil } result := msgraph.ConditionalAccessPlatforms{} config := in[0].(map[string]interface{}) includePlatforms := config["included_platforms"].([]interface{}) excludePlatforms := config["excluded_platforms"].([]interface{}) result.IncludePlatforms = tf.ExpandStringSlicePtr(includePlatforms) result.ExcludePlatforms = tf.ExpandStringSlicePtr(excludePlatforms) return &result } func expandConditionalAccessLocations(in []interface{}) *msgraph.ConditionalAccessLocations { if len(in) == 0 || in[0] == nil { return nil } result := msgraph.ConditionalAccessLocations{} config := in[0].(map[string]interface{}) includeLocations := config["included_locations"].([]interface{}) excludeLocations := config["excluded_locations"].([]interface{}) result.IncludeLocations = tf.ExpandStringSlicePtr(includeLocations) result.ExcludeLocations = tf.ExpandStringSlicePtr(excludeLocations) return &result } func expandConditionalAccessGrantControls(in []interface{}) *msgraph.ConditionalAccessGrantControls { if len(in) == 0 || in[0] == nil { return nil } result := msgraph.ConditionalAccessGrantControls{} config := in[0].(map[string]interface{}) operator := config["operator"].(string) builtInControls := config["built_in_controls"].([]interface{}) customAuthenticationFactors := config["custom_authentication_factors"].([]interface{}) termsOfUse := config["terms_of_use"].([]interface{}) result.Operator = &operator result.BuiltInControls = tf.ExpandStringSlicePtr(builtInControls) result.CustomAuthenticationFactors = tf.ExpandStringSlicePtr(customAuthenticationFactors) result.TermsOfUse = tf.ExpandStringSlicePtr(termsOfUse) return &result } func expandConditionalAccessSessionControls(in []interface{}) *msgraph.ConditionalAccessSessionControls { if len(in) == 0 || in[0] == nil { return nil } config := in[0].(map[string]interface{}) applicationEnforcedRestrictions := config["application_enforced_restrictions_enabled"].(bool) cloudAppSecurity := config["cloud_app_security_policy"].(string) signInFrequency := config["sign_in_frequency"].(int) result := msgraph.ConditionalAccessSessionControls{ ApplicationEnforcedRestrictions: &msgraph.ApplicationEnforcedRestrictionsSessionControl{ IsEnabled: utils.Bool(applicationEnforcedRestrictions), }, CloudAppSecurity: &msgraph.CloudAppSecurityControl{ IsEnabled: utils.Bool(cloudAppSecurity != ""), CloudAppSecurityType: utils.String(cloudAppSecurity), }, SignInFrequency: &msgraph.SignInFrequencySessionControl{ IsEnabled: utils.Bool(signInFrequency > 0), Type: utils.String(config["sign_in_frequency_period"].(string)), Value: utils.Int32(int32(signInFrequency)), }, } return &result }
/* Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cel import ( "context" "strings" "testing" "time" genericfeatures "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/test/integration/framework" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" v1 "k8s.io/api/core/v1" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" ) // Test_ValidateNamespace_NoParams tests a ValidatingAdmissionPolicy that validates creation of a Namespace with no params. func Test_ValidateNamespace_NoParams(t *testing.T) { forbiddenReason := metav1.StatusReasonForbidden testcases := []struct { name string policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy policyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding namespace *v1.Namespace err string failureReason metav1.StatusReason }{ { name: "namespace name contains suffix enforced by validating admission policy, using object metadata fields", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.metadata.name.endsWith('k8s')", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "", }, { name: "namespace name does NOT contain suffix enforced by validating admission policyusing, object metadata fields", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.metadata.name.endsWith('k8s')", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-foobar", }, }, err: "namespaces \"test-foobar\" is forbidden: ValidatingAdmissionPolicy 'validate-namespace-suffix' with binding 'validate-namespace-suffix-binding' denied request: failed expression: object.metadata.name.endsWith('k8s')", failureReason: metav1.StatusReasonInvalid, }, { name: "namespace name does NOT contain suffix enforced by validating admission policy using object metadata fields, AND validating expression returns StatusReasonForbidden", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.metadata.name.endsWith('k8s')", Reason: &forbiddenReason, }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "forbidden-test-foobar", }, }, err: "namespaces \"forbidden-test-foobar\" is forbidden: ValidatingAdmissionPolicy 'validate-namespace-suffix' with binding 'validate-namespace-suffix-binding' denied request: failed expression: object.metadata.name.endsWith('k8s')", failureReason: metav1.StatusReasonForbidden, }, { name: "namespace name contains suffix enforced by validating admission policy, using request field", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "request.name.endsWith('k8s')", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "", }, { name: "namespace name does NOT contains suffix enforced by validating admission policy, using request field", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "request.name.endsWith('k8s')", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "", }, { name: "runtime error when validating namespace, but failurePolicy=Ignore", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.nonExistentProperty == 'someval'", }, }, withFailurePolicy(admissionregistrationv1alpha1.Ignore, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "", }, { name: "runtime error when validating namespace, but failurePolicy=Fail", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.nonExistentProperty == 'someval'", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "namespaces \"test-k8s\" is forbidden: ValidatingAdmissionPolicy 'validate-namespace-suffix' with binding 'validate-namespace-suffix-binding' denied request: expression 'object.nonExistentProperty == 'someval'' resulted in error: no such key: nonExistentProperty", failureReason: metav1.StatusReasonInvalid, }, } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ValidatingAdmissionPolicy, true)() server, err := apiservertesting.StartTestServer(t, nil, []string{ "--enable-admission-plugins", "ValidatingAdmissionPolicy", }, framework.SharedEtcd()) if err != nil { t.Fatal(err) } defer server.TearDownFn() config := server.ClientConfig client, err := clientset.NewForConfig(config) if err != nil { t.Fatal(err) } policy := withWaitReadyConstraintAndExpression(testcase.policy) if _, err := client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Create(context.TODO(), policy, metav1.CreateOptions{}); err != nil { t.Fatal(err) } if err := createAndWaitReady(t, client, testcase.policyBinding, nil); err != nil { t.Fatal(err) } _, err = client.CoreV1().Namespaces().Create(context.TODO(), testcase.namespace, metav1.CreateOptions{}) if err == nil && testcase.err == "" { return } if err == nil && testcase.err != "" { t.Logf("actual error: %v", err) t.Logf("expected error: %v", testcase.err) t.Fatal("got nil error but expected an error") } if err != nil && testcase.err == "" { t.Logf("actual error: %v", err) t.Logf("expected error: %v", testcase.err) t.Fatal("got error but expected none") } if err.Error() != testcase.err { t.Logf("actual validation error: %v", err) t.Logf("expected validation error: %v", testcase.err) t.Error("unexpected validation error") } checkFailureReason(t, err, testcase.failureReason) }) } } // Test_ValidateNamespace_WithConfigMapParams tests a ValidatingAdmissionPolicy that validates creation of a Namespace, // using ConfigMap as a param reference. func Test_ValidateNamespace_WithConfigMapParams(t *testing.T) { testcases := []struct { name string policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy policyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding configMap *v1.ConfigMap namespace *v1.Namespace err string failureReason metav1.StatusReason }{ { name: "namespace name contains suffix enforced by validating admission policy", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.metadata.name.endsWith(params.data.namespaceSuffix)", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withParams(configParamKind(), withNamespaceMatch(makePolicy("validate-namespace-suffix"))))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", "validate-namespace-suffix-param"), configMap: makeConfigParams("validate-namespace-suffix-param", map[string]string{ "namespaceSuffix": "k8s", }), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "", }, { name: "namespace name does NOT contain suffix enforced by validating admission policy", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.metadata.name.endsWith(params.data.namespaceSuffix)", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withParams(configParamKind(), withNamespaceMatch(makePolicy("validate-namespace-suffix"))))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", "validate-namespace-suffix-param"), configMap: makeConfigParams("validate-namespace-suffix-param", map[string]string{ "namespaceSuffix": "k8s", }), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-foo", }, }, err: "namespaces \"test-foo\" is forbidden: ValidatingAdmissionPolicy 'validate-namespace-suffix' with binding 'validate-namespace-suffix-binding' denied request: failed expression: object.metadata.name.endsWith(params.data.namespaceSuffix)", failureReason: metav1.StatusReasonInvalid, }, } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ValidatingAdmissionPolicy, true)() server, err := apiservertesting.StartTestServer(t, nil, []string{ "--enable-admission-plugins", "ValidatingAdmissionPolicy", }, framework.SharedEtcd()) if err != nil { t.Fatal(err) } defer server.TearDownFn() config := server.ClientConfig client, err := clientset.NewForConfig(config) if err != nil { t.Fatal(err) } if _, err := client.CoreV1().ConfigMaps("default").Create(context.TODO(), testcase.configMap, metav1.CreateOptions{}); err != nil { t.Fatal(err) } policy := withWaitReadyConstraintAndExpression(testcase.policy) if _, err := client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Create(context.TODO(), policy, metav1.CreateOptions{}); err != nil { t.Fatal(err) } if err := createAndWaitReady(t, client, testcase.policyBinding, nil); err != nil { t.Fatal(err) } _, err = client.CoreV1().Namespaces().Create(context.TODO(), testcase.namespace, metav1.CreateOptions{}) if err == nil && testcase.err == "" { return } if err == nil && testcase.err != "" { t.Logf("actual error: %v", err) t.Logf("expected error: %v", testcase.err) t.Fatal("got nil error but expected an error") } if err != nil && testcase.err == "" { t.Logf("actual error: %v", err) t.Logf("expected error: %v", testcase.err) t.Fatal("got error but expected none") } if err.Error() != testcase.err { t.Logf("actual validation error: %v", err) t.Logf("expected validation error: %v", testcase.err) t.Error("unexpected validation error") } checkFailureReason(t, err, testcase.failureReason) }) } } func TestMultiplePolicyBindings(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ValidatingAdmissionPolicy, true)() server, err := apiservertesting.StartTestServer(t, nil, nil, framework.SharedEtcd()) if err != nil { t.Fatal(err) } defer server.TearDownFn() config := server.ClientConfig client, err := clientset.NewForConfig(config) if err != nil { t.Fatal(err) } paramKind := &admissionregistrationv1alpha1.ParamKind{ APIVersion: "v1", Kind: "ConfigMap", } policy := withPolicyExistsLabels([]string{"paramIdent"}, withParams(paramKind, withPolicyMatch("secrets", withFailurePolicy(admissionregistrationv1alpha1.Fail, makePolicy("test-policy"))))) policy.Spec.Validations = []admissionregistrationv1alpha1.Validation{ { Expression: "params.data.autofail != 'true' && (params.data.conditional == 'false' || object.metadata.name.startsWith(params.data.check))", }, } policy = withWaitReadyConstraintAndExpression(policy) if _, err := client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Create(context.TODO(), policy, metav1.CreateOptions{}); err != nil { t.Fatal(err) } autoFailParams := makeConfigParams("autofail-params", map[string]string{ "autofail": "true", }) if _, err := client.CoreV1().ConfigMaps("default").Create(context.TODO(), autoFailParams, metav1.CreateOptions{}); err != nil { t.Fatal(err) } autofailBinding := withBindingExistsLabels([]string{"autofail-binding-label"}, policy, makeBinding("autofail-binding", "test-policy", "autofail-params")) if err := createAndWaitReady(t, client, autofailBinding, map[string]string{"paramIdent": "true", "autofail-binding-label": "true"}); err != nil { t.Fatal(err) } autoPassParams := makeConfigParams("autopass-params", map[string]string{ "autofail": "false", "conditional": "false", }) if _, err := client.CoreV1().ConfigMaps("default").Create(context.TODO(), autoPassParams, metav1.CreateOptions{}); err != nil { t.Fatal(err) } autopassBinding := withBindingExistsLabels([]string{"autopass-binding-label"}, policy, makeBinding("autopass-binding", "test-policy", "autopass-params")) if err := createAndWaitReady(t, client, autopassBinding, map[string]string{"paramIdent": "true", "autopass-binding-label": "true"}); err != nil { t.Fatal(err) } condpassParams := makeConfigParams("condpass-params", map[string]string{ "autofail": "false", "conditional": "true", "check": "prefix-", }) if _, err := client.CoreV1().ConfigMaps("default").Create(context.TODO(), condpassParams, metav1.CreateOptions{}); err != nil { t.Fatal(err) } condpassBinding := withBindingExistsLabels([]string{"condpass-binding-label"}, policy, makeBinding("condpass-binding", "test-policy", "condpass-params")) if err := createAndWaitReady(t, client, condpassBinding, map[string]string{"paramIdent": "true", "condpass-binding-label": "true"}); err != nil { t.Fatal(err) } autofailingSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "autofailing-secret", Labels: map[string]string{ "paramIdent": "someVal", "autofail-binding-label": "true", }, }, } _, err = client.CoreV1().Secrets("default").Create(context.TODO(), autofailingSecret, metav1.CreateOptions{}) if err == nil { t.Fatal("expected secret creation to fail due to autofail-binding") } checkForFailedRule(t, err) checkFailureReason(t, err, metav1.StatusReasonInvalid) autopassingSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "autopassing-secret", Labels: map[string]string{ "paramIdent": "someVal", "autopass-binding-label": "true", }, }, } if _, err := client.CoreV1().Secrets("default").Create(context.TODO(), autopassingSecret, metav1.CreateOptions{}); err != nil { t.Fatalf("expected secret creation to succeed, got: %s", err) } condpassingSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "prefix-condpassing-secret", Labels: map[string]string{ "paramIdent": "someVal", "condpass-binding-label": "true", }, }, } if _, err := client.CoreV1().Secrets("default").Create(context.TODO(), condpassingSecret, metav1.CreateOptions{}); err != nil { t.Fatalf("expected secret creation to succeed, got: %s", err) } condfailingSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "condfailing-secret", Labels: map[string]string{ "paramIdent": "someVal", "condpass-binding-label": "true", }, }, } _, err = client.CoreV1().Secrets("default").Create(context.TODO(), condfailingSecret, metav1.CreateOptions{}) if err == nil { t.Fatal("expected secret creation to fail due to autofail-binding") } checkForFailedRule(t, err) checkFailureReason(t, err, metav1.StatusReasonInvalid) } func withWaitReadyConstraintAndExpression(policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { policy = policy.DeepCopy() policy.Spec.MatchConstraints.ResourceRules = append(policy.Spec.MatchConstraints.ResourceRules, admissionregistrationv1alpha1.NamedRuleWithOperations{ ResourceNames: []string{"test-marker"}, RuleWithOperations: admissionregistrationv1alpha1.RuleWithOperations{ Operations: []admissionregistrationv1.OperationType{ "UPDATE", }, Rule: admissionregistrationv1.Rule{ APIGroups: []string{ "", }, APIVersions: []string{ "v1", }, Resources: []string{ "endpoints", }, }, }, }) policy.Spec.Validations = append([]admissionregistrationv1alpha1.Validation{{ Expression: "object.metadata.name != 'test-marker'", Message: "marker denied; policy is ready", }}, policy.Spec.Validations...) return policy } func createAndWaitReady(t *testing.T, client *clientset.Clientset, binding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, matchLabels map[string]string) error { marker := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: "test-marker", Namespace: "default", Labels: matchLabels}} defer func() { err := client.CoreV1().Endpoints("default").Delete(context.TODO(), marker.Name, metav1.DeleteOptions{}) if err != nil { t.Logf("error deleting marker: %v", err) } }() marker, err := client.CoreV1().Endpoints("default").Create(context.TODO(), marker, metav1.CreateOptions{}) if err != nil { return err } _, err = client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().Create(context.TODO(), binding, metav1.CreateOptions{}) if err != nil { return err } if waitErr := wait.PollImmediate(time.Millisecond*5, wait.ForeverTestTimeout, func() (bool, error) { _, err := client.CoreV1().Endpoints("default").Patch(context.TODO(), marker.Name, types.JSONPatchType, []byte("[]"), metav1.PatchOptions{}) if err != nil && strings.Contains(err.Error(), "marker denied; policy is ready") { return true, nil } else { t.Logf("waiting for policy to be ready. Marker: %v, Last marker patch response: %v", marker, err) return false, err } }); waitErr != nil { return waitErr } return nil } func makePolicy(name string) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { return &admissionregistrationv1alpha1.ValidatingAdmissionPolicy{ ObjectMeta: metav1.ObjectMeta{Name: name}, } } func withParams(params *admissionregistrationv1alpha1.ParamKind, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { policy.Spec.ParamKind = params return policy } func configParamKind() *admissionregistrationv1alpha1.ParamKind { return &admissionregistrationv1alpha1.ParamKind{ APIVersion: "v1", Kind: "ConfigMap", } } func withFailurePolicy(failure admissionregistrationv1alpha1.FailurePolicyType, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { policy.Spec.FailurePolicy = &failure return policy } func withNamespaceMatch(policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { return withPolicyMatch("namespaces", policy) } func withPolicyMatch(resource string, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { policy.Spec.MatchConstraints = &admissionregistrationv1alpha1.MatchResources{ ResourceRules: []admissionregistrationv1alpha1.NamedRuleWithOperations{ { RuleWithOperations: admissionregistrationv1alpha1.RuleWithOperations{ Operations: []admissionregistrationv1.OperationType{ "CREATE", }, Rule: admissionregistrationv1.Rule{ APIGroups: []string{ "", }, APIVersions: []string{ "*", }, Resources: []string{ resource, }, }, }, }, }, } return policy } func withPolicyExistsLabels(labels []string, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { if policy.Spec.MatchConstraints == nil { policy.Spec.MatchConstraints = &admissionregistrationv1alpha1.MatchResources{} } matchExprs := buildExistsSelector(labels) policy.Spec.MatchConstraints.ObjectSelector = &metav1.LabelSelector{ MatchExpressions: matchExprs, } return policy } func withValidations(validations []admissionregistrationv1alpha1.Validation, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { policy.Spec.Validations = validations return policy } func makeBinding(name, policyName, paramName string) *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding { var paramRef *admissionregistrationv1alpha1.ParamRef if paramName != "" { paramRef = &admissionregistrationv1alpha1.ParamRef{ Name: paramName, Namespace: "default", } } return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{ ObjectMeta: metav1.ObjectMeta{Name: name}, Spec: admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpec{ PolicyName: policyName, ParamRef: paramRef, }, } } func withBindingExistsLabels(labels []string, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, binding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding) *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding { if policy != nil { // shallow copy constraintsCopy := *policy.Spec.MatchConstraints binding.Spec.MatchResources = &constraintsCopy } matchExprs := buildExistsSelector(labels) binding.Spec.MatchResources.ObjectSelector = &metav1.LabelSelector{ MatchExpressions: matchExprs, } return binding } func buildExistsSelector(labels []string) []metav1.LabelSelectorRequirement { matchExprs := make([]metav1.LabelSelectorRequirement, len(labels)) for i := 0; i < len(labels); i++ { matchExprs[i].Key = labels[i] matchExprs[i].Operator = metav1.LabelSelectorOpExists } return matchExprs } func makeConfigParams(name string, data map[string]string) *v1.ConfigMap { return &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: name}, Data: data, } } func checkForFailedRule(t *testing.T, err error) { if !strings.Contains(err.Error(), "failed expression") { t.Fatalf("unexpected error (expected to find \"failed expression\"): %s", err) } if strings.Contains(err.Error(), "evaluation error") { t.Fatalf("CEL rule evaluation failed: %s", err) } } func checkFailureReason(t *testing.T, err error, expectedReason metav1.StatusReason) { reason := err.(apierrors.APIStatus).Status().Reason if reason != expectedReason { t.Logf("actual error reason: %v", reason) t.Logf("expected failure reason: %v", expectedReason) t.Error("unexpected error reason") } } test/integration/apiserver/cel: add Test_PolicyExemption Signed-off-by: Andrew Sy Kim <e6955f0a2995ee00d92594510dd67341c68bf52d@google.com> /* Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cel import ( "context" "strings" "testing" "time" genericfeatures "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/test/integration/framework" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" v1 "k8s.io/api/core/v1" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" ) // Test_ValidateNamespace_NoParams tests a ValidatingAdmissionPolicy that validates creation of a Namespace with no params. func Test_ValidateNamespace_NoParams(t *testing.T) { forbiddenReason := metav1.StatusReasonForbidden testcases := []struct { name string policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy policyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding namespace *v1.Namespace err string failureReason metav1.StatusReason }{ { name: "namespace name contains suffix enforced by validating admission policy, using object metadata fields", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.metadata.name.endsWith('k8s')", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "", }, { name: "namespace name does NOT contain suffix enforced by validating admission policyusing, object metadata fields", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.metadata.name.endsWith('k8s')", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-foobar", }, }, err: "namespaces \"test-foobar\" is forbidden: ValidatingAdmissionPolicy 'validate-namespace-suffix' with binding 'validate-namespace-suffix-binding' denied request: failed expression: object.metadata.name.endsWith('k8s')", failureReason: metav1.StatusReasonInvalid, }, { name: "namespace name does NOT contain suffix enforced by validating admission policy using object metadata fields, AND validating expression returns StatusReasonForbidden", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.metadata.name.endsWith('k8s')", Reason: &forbiddenReason, }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "forbidden-test-foobar", }, }, err: "namespaces \"forbidden-test-foobar\" is forbidden: ValidatingAdmissionPolicy 'validate-namespace-suffix' with binding 'validate-namespace-suffix-binding' denied request: failed expression: object.metadata.name.endsWith('k8s')", failureReason: metav1.StatusReasonForbidden, }, { name: "namespace name contains suffix enforced by validating admission policy, using request field", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "request.name.endsWith('k8s')", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "", }, { name: "namespace name does NOT contains suffix enforced by validating admission policy, using request field", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "request.name.endsWith('k8s')", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "", }, { name: "runtime error when validating namespace, but failurePolicy=Ignore", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.nonExistentProperty == 'someval'", }, }, withFailurePolicy(admissionregistrationv1alpha1.Ignore, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "", }, { name: "runtime error when validating namespace, but failurePolicy=Fail", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.nonExistentProperty == 'someval'", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withNamespaceMatch(makePolicy("validate-namespace-suffix")))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", ""), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "namespaces \"test-k8s\" is forbidden: ValidatingAdmissionPolicy 'validate-namespace-suffix' with binding 'validate-namespace-suffix-binding' denied request: expression 'object.nonExistentProperty == 'someval'' resulted in error: no such key: nonExistentProperty", failureReason: metav1.StatusReasonInvalid, }, } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ValidatingAdmissionPolicy, true)() server, err := apiservertesting.StartTestServer(t, nil, []string{ "--enable-admission-plugins", "ValidatingAdmissionPolicy", }, framework.SharedEtcd()) if err != nil { t.Fatal(err) } defer server.TearDownFn() config := server.ClientConfig client, err := clientset.NewForConfig(config) if err != nil { t.Fatal(err) } policy := withWaitReadyConstraintAndExpression(testcase.policy) if _, err := client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Create(context.TODO(), policy, metav1.CreateOptions{}); err != nil { t.Fatal(err) } if err := createAndWaitReady(t, client, testcase.policyBinding, nil); err != nil { t.Fatal(err) } _, err = client.CoreV1().Namespaces().Create(context.TODO(), testcase.namespace, metav1.CreateOptions{}) if err == nil && testcase.err == "" { return } if err == nil && testcase.err != "" { t.Logf("actual error: %v", err) t.Logf("expected error: %v", testcase.err) t.Fatal("got nil error but expected an error") } if err != nil && testcase.err == "" { t.Logf("actual error: %v", err) t.Logf("expected error: %v", testcase.err) t.Fatal("got error but expected none") } if err.Error() != testcase.err { t.Logf("actual validation error: %v", err) t.Logf("expected validation error: %v", testcase.err) t.Error("unexpected validation error") } checkFailureReason(t, err, testcase.failureReason) }) } } // Test_ValidateNamespace_WithConfigMapParams tests a ValidatingAdmissionPolicy that validates creation of a Namespace, // using ConfigMap as a param reference. func Test_ValidateNamespace_WithConfigMapParams(t *testing.T) { testcases := []struct { name string policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy policyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding configMap *v1.ConfigMap namespace *v1.Namespace err string failureReason metav1.StatusReason }{ { name: "namespace name contains suffix enforced by validating admission policy", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.metadata.name.endsWith(params.data.namespaceSuffix)", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withParams(configParamKind(), withNamespaceMatch(makePolicy("validate-namespace-suffix"))))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", "validate-namespace-suffix-param"), configMap: makeConfigParams("validate-namespace-suffix-param", map[string]string{ "namespaceSuffix": "k8s", }), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-k8s", }, }, err: "", }, { name: "namespace name does NOT contain suffix enforced by validating admission policy", policy: withValidations([]admissionregistrationv1alpha1.Validation{ { Expression: "object.metadata.name.endsWith(params.data.namespaceSuffix)", }, }, withFailurePolicy(admissionregistrationv1alpha1.Fail, withParams(configParamKind(), withNamespaceMatch(makePolicy("validate-namespace-suffix"))))), policyBinding: makeBinding("validate-namespace-suffix-binding", "validate-namespace-suffix", "validate-namespace-suffix-param"), configMap: makeConfigParams("validate-namespace-suffix-param", map[string]string{ "namespaceSuffix": "k8s", }), namespace: &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-foo", }, }, err: "namespaces \"test-foo\" is forbidden: ValidatingAdmissionPolicy 'validate-namespace-suffix' with binding 'validate-namespace-suffix-binding' denied request: failed expression: object.metadata.name.endsWith(params.data.namespaceSuffix)", failureReason: metav1.StatusReasonInvalid, }, } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ValidatingAdmissionPolicy, true)() server, err := apiservertesting.StartTestServer(t, nil, []string{ "--enable-admission-plugins", "ValidatingAdmissionPolicy", }, framework.SharedEtcd()) if err != nil { t.Fatal(err) } defer server.TearDownFn() config := server.ClientConfig client, err := clientset.NewForConfig(config) if err != nil { t.Fatal(err) } if _, err := client.CoreV1().ConfigMaps("default").Create(context.TODO(), testcase.configMap, metav1.CreateOptions{}); err != nil { t.Fatal(err) } policy := withWaitReadyConstraintAndExpression(testcase.policy) if _, err := client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Create(context.TODO(), policy, metav1.CreateOptions{}); err != nil { t.Fatal(err) } if err := createAndWaitReady(t, client, testcase.policyBinding, nil); err != nil { t.Fatal(err) } _, err = client.CoreV1().Namespaces().Create(context.TODO(), testcase.namespace, metav1.CreateOptions{}) if err == nil && testcase.err == "" { return } if err == nil && testcase.err != "" { t.Logf("actual error: %v", err) t.Logf("expected error: %v", testcase.err) t.Fatal("got nil error but expected an error") } if err != nil && testcase.err == "" { t.Logf("actual error: %v", err) t.Logf("expected error: %v", testcase.err) t.Fatal("got error but expected none") } if err.Error() != testcase.err { t.Logf("actual validation error: %v", err) t.Logf("expected validation error: %v", testcase.err) t.Error("unexpected validation error") } checkFailureReason(t, err, testcase.failureReason) }) } } func TestMultiplePolicyBindings(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ValidatingAdmissionPolicy, true)() server, err := apiservertesting.StartTestServer(t, nil, nil, framework.SharedEtcd()) if err != nil { t.Fatal(err) } defer server.TearDownFn() config := server.ClientConfig client, err := clientset.NewForConfig(config) if err != nil { t.Fatal(err) } paramKind := &admissionregistrationv1alpha1.ParamKind{ APIVersion: "v1", Kind: "ConfigMap", } policy := withPolicyExistsLabels([]string{"paramIdent"}, withParams(paramKind, withPolicyMatch("secrets", withFailurePolicy(admissionregistrationv1alpha1.Fail, makePolicy("test-policy"))))) policy.Spec.Validations = []admissionregistrationv1alpha1.Validation{ { Expression: "params.data.autofail != 'true' && (params.data.conditional == 'false' || object.metadata.name.startsWith(params.data.check))", }, } policy = withWaitReadyConstraintAndExpression(policy) if _, err := client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Create(context.TODO(), policy, metav1.CreateOptions{}); err != nil { t.Fatal(err) } autoFailParams := makeConfigParams("autofail-params", map[string]string{ "autofail": "true", }) if _, err := client.CoreV1().ConfigMaps("default").Create(context.TODO(), autoFailParams, metav1.CreateOptions{}); err != nil { t.Fatal(err) } autofailBinding := withBindingExistsLabels([]string{"autofail-binding-label"}, policy, makeBinding("autofail-binding", "test-policy", "autofail-params")) if err := createAndWaitReady(t, client, autofailBinding, map[string]string{"paramIdent": "true", "autofail-binding-label": "true"}); err != nil { t.Fatal(err) } autoPassParams := makeConfigParams("autopass-params", map[string]string{ "autofail": "false", "conditional": "false", }) if _, err := client.CoreV1().ConfigMaps("default").Create(context.TODO(), autoPassParams, metav1.CreateOptions{}); err != nil { t.Fatal(err) } autopassBinding := withBindingExistsLabels([]string{"autopass-binding-label"}, policy, makeBinding("autopass-binding", "test-policy", "autopass-params")) if err := createAndWaitReady(t, client, autopassBinding, map[string]string{"paramIdent": "true", "autopass-binding-label": "true"}); err != nil { t.Fatal(err) } condpassParams := makeConfigParams("condpass-params", map[string]string{ "autofail": "false", "conditional": "true", "check": "prefix-", }) if _, err := client.CoreV1().ConfigMaps("default").Create(context.TODO(), condpassParams, metav1.CreateOptions{}); err != nil { t.Fatal(err) } condpassBinding := withBindingExistsLabels([]string{"condpass-binding-label"}, policy, makeBinding("condpass-binding", "test-policy", "condpass-params")) if err := createAndWaitReady(t, client, condpassBinding, map[string]string{"paramIdent": "true", "condpass-binding-label": "true"}); err != nil { t.Fatal(err) } autofailingSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "autofailing-secret", Labels: map[string]string{ "paramIdent": "someVal", "autofail-binding-label": "true", }, }, } _, err = client.CoreV1().Secrets("default").Create(context.TODO(), autofailingSecret, metav1.CreateOptions{}) if err == nil { t.Fatal("expected secret creation to fail due to autofail-binding") } checkForFailedRule(t, err) checkFailureReason(t, err, metav1.StatusReasonInvalid) autopassingSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "autopassing-secret", Labels: map[string]string{ "paramIdent": "someVal", "autopass-binding-label": "true", }, }, } if _, err := client.CoreV1().Secrets("default").Create(context.TODO(), autopassingSecret, metav1.CreateOptions{}); err != nil { t.Fatalf("expected secret creation to succeed, got: %s", err) } condpassingSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "prefix-condpassing-secret", Labels: map[string]string{ "paramIdent": "someVal", "condpass-binding-label": "true", }, }, } if _, err := client.CoreV1().Secrets("default").Create(context.TODO(), condpassingSecret, metav1.CreateOptions{}); err != nil { t.Fatalf("expected secret creation to succeed, got: %s", err) } condfailingSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "condfailing-secret", Labels: map[string]string{ "paramIdent": "someVal", "condpass-binding-label": "true", }, }, } _, err = client.CoreV1().Secrets("default").Create(context.TODO(), condfailingSecret, metav1.CreateOptions{}) if err == nil { t.Fatal("expected secret creation to fail due to autofail-binding") } checkForFailedRule(t, err) checkFailureReason(t, err, metav1.StatusReasonInvalid) } // Test_PolicyExemption tests that ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding resources // are exempt from policy rules. func Test_PolicyExemption(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.CELValidatingAdmission, true)() server, err := apiservertesting.StartTestServer(t, nil, []string{ "--enable-admission-plugins", "ValidatingAdmissionPolicy", }, framework.SharedEtcd()) if err != nil { t.Fatal(err) } defer server.TearDownFn() config := server.ClientConfig client, err := clientset.NewForConfig(config) if err != nil { t.Fatal(err) } policy := makePolicy("test-policy") policy.Spec.MatchConstraints = &admissionregistrationv1alpha1.MatchResources{ ResourceRules: []admissionregistrationv1alpha1.NamedRuleWithOperations{ { RuleWithOperations: admissionregistrationv1alpha1.RuleWithOperations{ Operations: []admissionregistrationv1.OperationType{ "*", }, Rule: admissionregistrationv1.Rule{ APIGroups: []string{ "*", }, APIVersions: []string{ "*", }, Resources: []string{ "*", }, }, }, }, }, } policy.Spec.Validations = []admissionregistrationv1alpha1.Validation{{ Expression: "false", Message: "marker denied; policy is ready", }} policy, err = client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Create(context.TODO(), policy, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } policyBinding := makeBinding("test-policy-binding", "test-policy", "") if err := createAndWaitReady(t, client, policyBinding, nil); err != nil { t.Fatal(err) } // validate that operations to ValidatingAdmissionPolicy are exempt from an existing policy that catches all resources policyCopy := policy.DeepCopy() ignoreFailurePolicy := admissionregistrationv1alpha1.Ignore policyCopy.Spec.FailurePolicy = &ignoreFailurePolicy _, err = client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Update(context.TODO(), policyCopy, metav1.UpdateOptions{}) if err != nil { t.Error(err) } policyBinding, err = client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().Get(context.TODO(), policyBinding.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } // validate that operations to ValidatingAdmissionPolicyBindings are exempt from an existing policy that catches all resources policyBindingCopy := policyBinding.DeepCopy() policyBindingCopy.Spec.PolicyName = "different-binding" _, err = client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().Update(context.TODO(), policyBindingCopy, metav1.UpdateOptions{}) if err != nil { t.Error(err) } } func withWaitReadyConstraintAndExpression(policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { policy = policy.DeepCopy() policy.Spec.MatchConstraints.ResourceRules = append(policy.Spec.MatchConstraints.ResourceRules, admissionregistrationv1alpha1.NamedRuleWithOperations{ ResourceNames: []string{"test-marker"}, RuleWithOperations: admissionregistrationv1alpha1.RuleWithOperations{ Operations: []admissionregistrationv1.OperationType{ "UPDATE", }, Rule: admissionregistrationv1.Rule{ APIGroups: []string{ "", }, APIVersions: []string{ "v1", }, Resources: []string{ "endpoints", }, }, }, }) policy.Spec.Validations = append([]admissionregistrationv1alpha1.Validation{{ Expression: "object.metadata.name != 'test-marker'", Message: "marker denied; policy is ready", }}, policy.Spec.Validations...) return policy } func createAndWaitReady(t *testing.T, client *clientset.Clientset, binding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, matchLabels map[string]string) error { marker := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: "test-marker", Namespace: "default", Labels: matchLabels}} defer func() { err := client.CoreV1().Endpoints("default").Delete(context.TODO(), marker.Name, metav1.DeleteOptions{}) if err != nil { t.Logf("error deleting marker: %v", err) } }() marker, err := client.CoreV1().Endpoints("default").Create(context.TODO(), marker, metav1.CreateOptions{}) if err != nil { return err } _, err = client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().Create(context.TODO(), binding, metav1.CreateOptions{}) if err != nil { return err } if waitErr := wait.PollImmediate(time.Millisecond*5, wait.ForeverTestTimeout, func() (bool, error) { _, err := client.CoreV1().Endpoints("default").Patch(context.TODO(), marker.Name, types.JSONPatchType, []byte("[]"), metav1.PatchOptions{}) if err != nil && strings.Contains(err.Error(), "marker denied; policy is ready") { return true, nil } else { t.Logf("waiting for policy to be ready. Marker: %v, Last marker patch response: %v", marker, err) return false, err } }); waitErr != nil { return waitErr } return nil } func makePolicy(name string) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { return &admissionregistrationv1alpha1.ValidatingAdmissionPolicy{ ObjectMeta: metav1.ObjectMeta{Name: name}, } } func withParams(params *admissionregistrationv1alpha1.ParamKind, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { policy.Spec.ParamKind = params return policy } func configParamKind() *admissionregistrationv1alpha1.ParamKind { return &admissionregistrationv1alpha1.ParamKind{ APIVersion: "v1", Kind: "ConfigMap", } } func withFailurePolicy(failure admissionregistrationv1alpha1.FailurePolicyType, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { policy.Spec.FailurePolicy = &failure return policy } func withNamespaceMatch(policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { return withPolicyMatch("namespaces", policy) } func withPolicyMatch(resource string, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { policy.Spec.MatchConstraints = &admissionregistrationv1alpha1.MatchResources{ ResourceRules: []admissionregistrationv1alpha1.NamedRuleWithOperations{ { RuleWithOperations: admissionregistrationv1alpha1.RuleWithOperations{ Operations: []admissionregistrationv1.OperationType{ "CREATE", }, Rule: admissionregistrationv1.Rule{ APIGroups: []string{ "", }, APIVersions: []string{ "*", }, Resources: []string{ resource, }, }, }, }, }, } return policy } func withPolicyExistsLabels(labels []string, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { if policy.Spec.MatchConstraints == nil { policy.Spec.MatchConstraints = &admissionregistrationv1alpha1.MatchResources{} } matchExprs := buildExistsSelector(labels) policy.Spec.MatchConstraints.ObjectSelector = &metav1.LabelSelector{ MatchExpressions: matchExprs, } return policy } func withValidations(validations []admissionregistrationv1alpha1.Validation, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { policy.Spec.Validations = validations return policy } func makeBinding(name, policyName, paramName string) *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding { var paramRef *admissionregistrationv1alpha1.ParamRef if paramName != "" { paramRef = &admissionregistrationv1alpha1.ParamRef{ Name: paramName, Namespace: "default", } } return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{ ObjectMeta: metav1.ObjectMeta{Name: name}, Spec: admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpec{ PolicyName: policyName, ParamRef: paramRef, }, } } func withBindingExistsLabels(labels []string, policy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, binding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding) *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding { if policy != nil { // shallow copy constraintsCopy := *policy.Spec.MatchConstraints binding.Spec.MatchResources = &constraintsCopy } matchExprs := buildExistsSelector(labels) binding.Spec.MatchResources.ObjectSelector = &metav1.LabelSelector{ MatchExpressions: matchExprs, } return binding } func buildExistsSelector(labels []string) []metav1.LabelSelectorRequirement { matchExprs := make([]metav1.LabelSelectorRequirement, len(labels)) for i := 0; i < len(labels); i++ { matchExprs[i].Key = labels[i] matchExprs[i].Operator = metav1.LabelSelectorOpExists } return matchExprs } func makeConfigParams(name string, data map[string]string) *v1.ConfigMap { return &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: name}, Data: data, } } func checkForFailedRule(t *testing.T, err error) { if !strings.Contains(err.Error(), "failed expression") { t.Fatalf("unexpected error (expected to find \"failed expression\"): %s", err) } if strings.Contains(err.Error(), "evaluation error") { t.Fatalf("CEL rule evaluation failed: %s", err) } } func checkFailureReason(t *testing.T, err error, expectedReason metav1.StatusReason) { reason := err.(apierrors.APIStatus).Status().Reason if reason != expectedReason { t.Logf("actual error reason: %v", reason) t.Logf("expected failure reason: %v", expectedReason) t.Error("unexpected error reason") } }
package engine import "testing" type idtest struct { assertion string valid bool } var idtests = []idtest{ {"t_alice", true}, {"t_bob", true}, {"t_charlie", true}, {"t_doug", true}, {"t_ellen", false}, {"t_ellen", false}, {"tacovontaco@twitter", false}, {"t_alice+tacovontaco@twitter", true}, {"t_alice+tacovontaco@twitter+kbtester2@github", true}, {"tacovontaco@twitter+kbtester2@github", false}, {"kbtester2@github", true}, {"kbtester1@github", true}, {"kbtester1@twitter", true}, {"kbtester1@twitter+t_bob", true}, {"t_charlie+tacovontaco@twitter", true}, {"t_charlie+tacoplusplus@github", true}, {"t_charlie+t_alice", false}, {"t_charlie+kbtester2@github", false}, } func TestIdentify(t *testing.T) { tc := SetupEngineTest(t, "Identify") defer tc.Cleanup() for _, x := range idtests { ctx := &Context{IdentifyUI: &FakeIdentifyUI{}} eng := NewIdentify(NewIdentifyArg(x.assertion, false)) err := RunEngine(eng, ctx) if x.valid && err != nil { t.Errorf("assertion %q failed unexpectedly: %s", x.assertion, err) } if !x.valid && err == nil { t.Errorf("assertion %q passed unexpectedly", x.assertion) } } } func TestIdentifyWithTracking(t *testing.T) { tc := SetupEngineTest(t, "Identify") defer tc.Cleanup() CreateAndSignupFakeUser(t, "login") for _, x := range idtests { ctx := &Context{IdentifyUI: &FakeIdentifyUI{}} eng := NewIdentify(NewIdentifyArg(x.assertion, true)) err := RunEngine(eng, ctx) if x.valid && err != nil { t.Errorf("assertion %q failed unexpectedly: %s", x.assertion, err) } if !x.valid && err == nil { t.Errorf("assertion %q passed unexpectedly", x.assertion) } } } // TestIdentifySelf makes sure that you can identify yourself, via // empty assertion or your username. It also tests that the // withTracking flag can be set to true or false and both will // work. This is so clients can identify w/ tracking if they // don't know if the user is identifying themselves. func TestIdentifySelf(t *testing.T) { tc := SetupEngineTest(t, "Identify") defer tc.Cleanup() u := CreateAndSignupFakeUser(t, "login") assertions := []string{"", u.Username} for _, a := range assertions { ctx := &Context{IdentifyUI: &FakeIdentifyUI{}} eng := NewIdentify(NewIdentifyArg(a, false)) if err := RunEngine(eng, ctx); err != nil { t.Errorf("identify self (%q) (withTracking = false) failed: %s", a, err) } eng = NewIdentify(NewIdentifyArg(a, true)) if err := RunEngine(eng, ctx); err != nil { t.Errorf("identify self (%q) (withTracking = true) failed: %s", a, err) } } } more specific assertions for max's very old local DB package engine import "testing" type idtest struct { assertion string valid bool } var idtests = []idtest{ {"t_alice", true}, {"t_bob", true}, {"t_charlie", true}, {"t_doug", true}, {"t_ellen", false}, {"t_ellen", false}, {"tacovontaco@twitter", false}, {"t_alice+tacovontaco@twitter", true}, {"t_alice+tacovontaco@twitter+kbtester2@github", true}, {"tacovontaco@twitter+kbtester2@github", false}, {"kbtester2@github+t_alice", true}, {"kbtester1@github+t_bob", true}, {"kbtester1@twitter+t_bob", true}, {"t_charlie+tacovontaco@twitter", true}, {"t_charlie+tacoplusplus@github", true}, {"t_charlie+t_alice", false}, {"t_charlie+kbtester2@github", false}, } func TestIdentify(t *testing.T) { tc := SetupEngineTest(t, "Identify") defer tc.Cleanup() for _, x := range idtests { ctx := &Context{IdentifyUI: &FakeIdentifyUI{}} eng := NewIdentify(NewIdentifyArg(x.assertion, false)) err := RunEngine(eng, ctx) if x.valid && err != nil { t.Errorf("assertion %q failed unexpectedly: %s", x.assertion, err) } if !x.valid && err == nil { t.Errorf("assertion %q passed unexpectedly", x.assertion) } } } func TestIdentifyWithTracking(t *testing.T) { tc := SetupEngineTest(t, "Identify") defer tc.Cleanup() CreateAndSignupFakeUser(t, "login") for _, x := range idtests { ctx := &Context{IdentifyUI: &FakeIdentifyUI{}} eng := NewIdentify(NewIdentifyArg(x.assertion, true)) err := RunEngine(eng, ctx) if x.valid && err != nil { t.Errorf("assertion %q failed unexpectedly: %s", x.assertion, err) } if !x.valid && err == nil { t.Errorf("assertion %q passed unexpectedly", x.assertion) } } } // TestIdentifySelf makes sure that you can identify yourself, via // empty assertion or your username. It also tests that the // withTracking flag can be set to true or false and both will // work. This is so clients can identify w/ tracking if they // don't know if the user is identifying themselves. func TestIdentifySelf(t *testing.T) { tc := SetupEngineTest(t, "Identify") defer tc.Cleanup() u := CreateAndSignupFakeUser(t, "login") assertions := []string{"", u.Username} for _, a := range assertions { ctx := &Context{IdentifyUI: &FakeIdentifyUI{}} eng := NewIdentify(NewIdentifyArg(a, false)) if err := RunEngine(eng, ctx); err != nil { t.Errorf("identify self (%q) (withTracking = false) failed: %s", a, err) } eng = NewIdentify(NewIdentifyArg(a, true)) if err := RunEngine(eng, ctx); err != nil { t.Errorf("identify self (%q) (withTracking = true) failed: %s", a, err) } } }
// Copyright 2012, Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tabletmanager import ( "fmt" "time" log "github.com/golang/glog" "github.com/youtube/vitess/go/rpcwrap" rpcproto "github.com/youtube/vitess/go/rpcwrap/proto" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/rpc" "github.com/youtube/vitess/go/vt/topo" ) // This file contains the RPC methods for the tablet manager. // rpcTimeout is used for timing out the queries on the server in a // reasonable amount of time. The actions are stored in the // topo.Server, and if the client goes away, it cleans up the action // node, and the server doesn't do the action. In the RPC case, if the // client goes away (while waiting on the action mutex), the server // won't know, and may still execute the RPC call at a later time. // To prevent that, if it takes more than rpcTimeout to take the action mutex, // we return an error to the caller. const rpcTimeout = time.Second * 30 // // Utility functions for RPC service // // we keep track of the agent so we can use its tabletAlias, ts, ... type TabletManager struct { agent *ActionAgent mysqld *mysqlctl.Mysqld } var TabletManagerRpcService *TabletManager func (agent *ActionAgent) RegisterQueryService(mysqld *mysqlctl.Mysqld) { if TabletManagerRpcService != nil { log.Warningf("RPC service already up %v", TabletManagerRpcService) return } TabletManagerRpcService = &TabletManager{agent, mysqld} rpcwrap.RegisterAuthenticated(TabletManagerRpcService) } func (tm *TabletManager) rpcWrapper(context *rpcproto.Context, name string, args, reply interface{}, f func() error, lock, runAfterAction, reloadSchema bool) (err error) { defer func() { if x := recover(); x != nil { log.Errorf("TabletManager.%v(%v) panic: %v", name, args, x) err = fmt.Errorf("caught panic during %v: %v", name, x) } }() if lock { beforeLock := time.Now() tm.agent.actionMutex.Lock() defer tm.agent.actionMutex.Unlock() if time.Now().Sub(beforeLock) > rpcTimeout { return fmt.Errorf("server timeout for " + name) } } if err = f(); err != nil { log.Warningf("TabletManager.%v(%v)(from %v) error: %v", name, args, context.RemoteAddr, err.Error()) return fmt.Errorf("TabletManager.%v on %v error: %v", name, tm.agent.tabletAlias, err) } log.Infof("TabletManager.%v(%v)(from %v): %v", name, args, context.RemoteAddr, reply) if runAfterAction { tm.agent.afterAction("RPC("+name+")", reloadSchema) } return } // There are multiple kinds of actions: // 1 - read-only actions that can be executed in parallel. // 2 - read-write actions that change something, and need to take the // action lock. // 3 - read-write actions that need to take the action lock, and also // need to reload the tablet state. // 4 - read-write actions that need to take the action lock, need to // reload the tablet state, and reload the schema afterwards. func (tm *TabletManager) type1Wrapper(context *rpcproto.Context, name string, args, reply interface{}, f func() error) error { return tm.rpcWrapper(context, name, args, reply, f, false /*lock*/, false /*runAfterAction*/, false /*reloadSchema*/) } func (tm *TabletManager) type2Wrapper(context *rpcproto.Context, name string, args, reply interface{}, f func() error) error { return tm.rpcWrapper(context, name, args, reply, f, true /*lock*/, false /*runAfterAction*/, false /*reloadSchema*/) } func (tm *TabletManager) type3Wrapper(context *rpcproto.Context, name string, args, reply interface{}, f func() error) error { return tm.rpcWrapper(context, name, args, reply, f, true /*lock*/, true /*runAfterAction*/, false /*reloadSchema*/) } func (tm *TabletManager) type4Wrapper(context *rpcproto.Context, name string, args, reply interface{}, f func() error) error { return tm.rpcWrapper(context, name, args, reply, f, true /*lock*/, true /*runAfterAction*/, true /*reloadSchema*/) } // // Various read-only methods // func (tm *TabletManager) Ping(context *rpcproto.Context, args, reply *string) error { return tm.type1Wrapper(context, TABLET_ACTION_PING, args, reply, func() error { *reply = *args return nil }) } func (tm *TabletManager) GetSchema(context *rpcproto.Context, args *GetSchemaArgs, reply *mysqlctl.SchemaDefinition) error { return tm.type1Wrapper(context, TABLET_ACTION_GET_SCHEMA, args, reply, func() error { // read the tablet to get the dbname tablet, err := tm.agent.ts.GetTablet(tm.agent.tabletAlias) if err != nil { return err } // and get the schema sd, err := tm.mysqld.GetSchema(tablet.DbName(), args.Tables, args.IncludeViews) if err == nil { *reply = *sd } return err }) } func (tm *TabletManager) GetPermissions(context *rpcproto.Context, args *rpc.UnusedRequest, reply *mysqlctl.Permissions) error { return tm.type1Wrapper(context, TABLET_ACTION_GET_PERMISSIONS, args, reply, func() error { p, err := tm.mysqld.GetPermissions() if err == nil { *reply = *p } return err }) } // // Various read-write methods // func (tm *TabletManager) ChangeType(context *rpcproto.Context, args *topo.TabletType, reply *rpc.UnusedResponse) error { return tm.type3Wrapper(context, TABLET_ACTION_CHANGE_TYPE, args, reply, func() error { return ChangeType(tm.agent.ts, tm.agent.tabletAlias, *args, true /*runHooks*/) }) } // // Replication related methods // func (tm *TabletManager) SlavePosition(context *rpcproto.Context, args *rpc.UnusedRequest, reply *mysqlctl.ReplicationPosition) error { return tm.type1Wrapper(context, TABLET_ACTION_SLAVE_POSITION, args, reply, func() error { position, err := tm.mysqld.SlaveStatus() if err == nil { *reply = *position } return err }) } func (tm *TabletManager) WaitSlavePosition(context *rpcproto.Context, args *SlavePositionReq, reply *mysqlctl.ReplicationPosition) error { return tm.type1Wrapper(context, TABLET_ACTION_WAIT_SLAVE_POSITION, args, reply, func() error { if err := tm.mysqld.WaitMasterPos(&args.ReplicationPosition, args.WaitTimeout); err != nil { return err } position, err := tm.mysqld.SlaveStatus() if err == nil { *reply = *position } return err }) } func (tm *TabletManager) MasterPosition(context *rpcproto.Context, args *rpc.UnusedRequest, reply *mysqlctl.ReplicationPosition) error { return tm.type1Wrapper(context, TABLET_ACTION_MASTER_POSITION, args, reply, func() error { position, err := tm.mysqld.MasterStatus() if err == nil { *reply = *position } return err }) } func (tm *TabletManager) StopSlave(context *rpcproto.Context, args *rpc.UnusedRequest, reply *rpc.UnusedResponse) error { return tm.type2Wrapper(context, TABLET_ACTION_STOP_SLAVE, args, reply, func() error { return tm.mysqld.StopSlave(map[string]string{"TABLET_ALIAS": tm.agent.tabletAlias.String()}) }) } func (tm *TabletManager) GetSlaves(context *rpcproto.Context, args *rpc.UnusedRequest, reply *SlaveList) error { return tm.type1Wrapper(context, TABLET_ACTION_GET_SLAVES, args, reply, func() error { var err error reply.Addrs, err = tm.mysqld.FindSlaves() return err }) } type WaitBlpPositionArgs struct { BlpPosition mysqlctl.BlpPosition WaitTimeout int } func (tm *TabletManager) WaitBlpPosition(context *rpcproto.Context, args *WaitBlpPositionArgs, reply *rpc.UnusedResponse) error { return tm.type1Wrapper(context, TABLET_ACTION_WAIT_BLP_POSITION, args, reply, func() error { return tm.mysqld.WaitBlpPos(&args.BlpPosition, args.WaitTimeout) }) } // // Reparenting related functions // func (tm *TabletManager) SlaveWasPromoted(context *rpcproto.Context, args *rpc.UnusedRequest, reply *rpc.UnusedResponse) error { return tm.type3Wrapper(context, TABLET_ACTION_SLAVE_WAS_PROMOTED, args, reply, func() error { return slaveWasPromoted(tm.agent.ts, tm.agent.tabletAlias) }) } func (tm *TabletManager) SlaveWasRestarted(context *rpcproto.Context, args *SlaveWasRestartedData, reply *rpc.UnusedResponse) error { return tm.type3Wrapper(context, TABLET_ACTION_SLAVE_WAS_RESTARTED, args, reply, func() error { return slaveWasRestarted(tm.agent.ts, tm.mysqld, tm.agent.tabletAlias, args) }) } Renaming functions to be clearer. // Copyright 2012, Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tabletmanager import ( "fmt" "time" log "github.com/golang/glog" "github.com/youtube/vitess/go/rpcwrap" rpcproto "github.com/youtube/vitess/go/rpcwrap/proto" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/rpc" "github.com/youtube/vitess/go/vt/topo" ) // This file contains the RPC methods for the tablet manager. // rpcTimeout is used for timing out the queries on the server in a // reasonable amount of time. The actions are stored in the // topo.Server, and if the client goes away, it cleans up the action // node, and the server doesn't do the action. In the RPC case, if the // client goes away (while waiting on the action mutex), the server // won't know, and may still execute the RPC call at a later time. // To prevent that, if it takes more than rpcTimeout to take the action mutex, // we return an error to the caller. const rpcTimeout = time.Second * 30 // // Utility functions for RPC service // // we keep track of the agent so we can use its tabletAlias, ts, ... type TabletManager struct { agent *ActionAgent mysqld *mysqlctl.Mysqld } var TabletManagerRpcService *TabletManager func (agent *ActionAgent) RegisterQueryService(mysqld *mysqlctl.Mysqld) { if TabletManagerRpcService != nil { log.Warningf("RPC service already up %v", TabletManagerRpcService) return } TabletManagerRpcService = &TabletManager{agent, mysqld} rpcwrap.RegisterAuthenticated(TabletManagerRpcService) } func (tm *TabletManager) rpcWrapper(context *rpcproto.Context, name string, args, reply interface{}, f func() error, lock, runAfterAction, reloadSchema bool) (err error) { defer func() { if x := recover(); x != nil { log.Errorf("TabletManager.%v(%v) panic: %v", name, args, x) err = fmt.Errorf("caught panic during %v: %v", name, x) } }() if lock { beforeLock := time.Now() tm.agent.actionMutex.Lock() defer tm.agent.actionMutex.Unlock() if time.Now().Sub(beforeLock) > rpcTimeout { return fmt.Errorf("server timeout for " + name) } } if err = f(); err != nil { log.Warningf("TabletManager.%v(%v)(from %v) error: %v", name, args, context.RemoteAddr, err.Error()) return fmt.Errorf("TabletManager.%v on %v error: %v", name, tm.agent.tabletAlias, err) } log.Infof("TabletManager.%v(%v)(from %v): %v", name, args, context.RemoteAddr, reply) if runAfterAction { tm.agent.afterAction("RPC("+name+")", reloadSchema) } return } // There are multiple kinds of actions: // 1 - read-only actions that can be executed in parallel. // 2 - read-write actions that change something, and need to take the // action lock. // 3 - read-write actions that need to take the action lock, and also // need to reload the tablet state. // 4 - read-write actions that need to take the action lock, need to // reload the tablet state, and reload the schema afterwards. func (tm *TabletManager) rpcWrap(context *rpcproto.Context, name string, args, reply interface{}, f func() error) error { return tm.rpcWrapper(context, name, args, reply, f, false /*lock*/, false /*runAfterAction*/, false /*reloadSchema*/) } func (tm *TabletManager) rpcWrapLock(context *rpcproto.Context, name string, args, reply interface{}, f func() error) error { return tm.rpcWrapper(context, name, args, reply, f, true /*lock*/, false /*runAfterAction*/, false /*reloadSchema*/) } func (tm *TabletManager) rpcWrapLockAction(context *rpcproto.Context, name string, args, reply interface{}, f func() error) error { return tm.rpcWrapper(context, name, args, reply, f, true /*lock*/, true /*runAfterAction*/, false /*reloadSchema*/) } func (tm *TabletManager) rpcWrapLockActionSchema(context *rpcproto.Context, name string, args, reply interface{}, f func() error) error { return tm.rpcWrapper(context, name, args, reply, f, true /*lock*/, true /*runAfterAction*/, true /*reloadSchema*/) } // // Various read-only methods // func (tm *TabletManager) Ping(context *rpcproto.Context, args, reply *string) error { return tm.rpcWrap(context, TABLET_ACTION_PING, args, reply, func() error { *reply = *args return nil }) } func (tm *TabletManager) GetSchema(context *rpcproto.Context, args *GetSchemaArgs, reply *mysqlctl.SchemaDefinition) error { return tm.rpcWrap(context, TABLET_ACTION_GET_SCHEMA, args, reply, func() error { // read the tablet to get the dbname tablet, err := tm.agent.ts.GetTablet(tm.agent.tabletAlias) if err != nil { return err } // and get the schema sd, err := tm.mysqld.GetSchema(tablet.DbName(), args.Tables, args.IncludeViews) if err == nil { *reply = *sd } return err }) } func (tm *TabletManager) GetPermissions(context *rpcproto.Context, args *rpc.UnusedRequest, reply *mysqlctl.Permissions) error { return tm.rpcWrap(context, TABLET_ACTION_GET_PERMISSIONS, args, reply, func() error { p, err := tm.mysqld.GetPermissions() if err == nil { *reply = *p } return err }) } // // Various read-write methods // func (tm *TabletManager) ChangeType(context *rpcproto.Context, args *topo.TabletType, reply *rpc.UnusedResponse) error { return tm.rpcWrapLockAction(context, TABLET_ACTION_CHANGE_TYPE, args, reply, func() error { return ChangeType(tm.agent.ts, tm.agent.tabletAlias, *args, true /*runHooks*/) }) } // // Replication related methods // func (tm *TabletManager) SlavePosition(context *rpcproto.Context, args *rpc.UnusedRequest, reply *mysqlctl.ReplicationPosition) error { return tm.rpcWrap(context, TABLET_ACTION_SLAVE_POSITION, args, reply, func() error { position, err := tm.mysqld.SlaveStatus() if err == nil { *reply = *position } return err }) } func (tm *TabletManager) WaitSlavePosition(context *rpcproto.Context, args *SlavePositionReq, reply *mysqlctl.ReplicationPosition) error { return tm.rpcWrap(context, TABLET_ACTION_WAIT_SLAVE_POSITION, args, reply, func() error { if err := tm.mysqld.WaitMasterPos(&args.ReplicationPosition, args.WaitTimeout); err != nil { return err } position, err := tm.mysqld.SlaveStatus() if err == nil { *reply = *position } return err }) } func (tm *TabletManager) MasterPosition(context *rpcproto.Context, args *rpc.UnusedRequest, reply *mysqlctl.ReplicationPosition) error { return tm.rpcWrap(context, TABLET_ACTION_MASTER_POSITION, args, reply, func() error { position, err := tm.mysqld.MasterStatus() if err == nil { *reply = *position } return err }) } func (tm *TabletManager) StopSlave(context *rpcproto.Context, args *rpc.UnusedRequest, reply *rpc.UnusedResponse) error { return tm.rpcWrapLock(context, TABLET_ACTION_STOP_SLAVE, args, reply, func() error { return tm.mysqld.StopSlave(map[string]string{"TABLET_ALIAS": tm.agent.tabletAlias.String()}) }) } func (tm *TabletManager) GetSlaves(context *rpcproto.Context, args *rpc.UnusedRequest, reply *SlaveList) error { return tm.rpcWrap(context, TABLET_ACTION_GET_SLAVES, args, reply, func() error { var err error reply.Addrs, err = tm.mysqld.FindSlaves() return err }) } type WaitBlpPositionArgs struct { BlpPosition mysqlctl.BlpPosition WaitTimeout int } func (tm *TabletManager) WaitBlpPosition(context *rpcproto.Context, args *WaitBlpPositionArgs, reply *rpc.UnusedResponse) error { return tm.rpcWrap(context, TABLET_ACTION_WAIT_BLP_POSITION, args, reply, func() error { return tm.mysqld.WaitBlpPos(&args.BlpPosition, args.WaitTimeout) }) } // // Reparenting related functions // func (tm *TabletManager) SlaveWasPromoted(context *rpcproto.Context, args *rpc.UnusedRequest, reply *rpc.UnusedResponse) error { return tm.rpcWrapLockAction(context, TABLET_ACTION_SLAVE_WAS_PROMOTED, args, reply, func() error { return slaveWasPromoted(tm.agent.ts, tm.agent.tabletAlias) }) } func (tm *TabletManager) SlaveWasRestarted(context *rpcproto.Context, args *SlaveWasRestartedData, reply *rpc.UnusedResponse) error { return tm.rpcWrapLockAction(context, TABLET_ACTION_SLAVE_WAS_RESTARTED, args, reply, func() error { return slaveWasRestarted(tm.agent.ts, tm.mysqld, tm.agent.tabletAlias, args) }) }
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package redirect provides hooks to register HTTP handlers that redirect old // godoc paths to their new equivalents and assist in accessing the issue // tracker, wiki, code review system, etc. package redirect import ( "net/http" "regexp" ) // Register registers HTTP handlers that redirect old godoc paths to their new // equivalents and assist in accessing the issue tracker, wiki, code review // system, etc. If mux is nil it uses http.DefaultServeMux. func Register(mux *http.ServeMux) { if mux == nil { mux = http.DefaultServeMux } handlePathRedirects(mux, pkgRedirects, "/pkg/") handlePathRedirects(mux, cmdRedirects, "/cmd/") for prefix, redirect := range prefixHelpers { p := "/" + prefix + "/" mux.Handle(p, PrefixHandler(p, redirect)) } for path, redirect := range redirects { mux.Handle(path, Handler(redirect)) } // NB: /src/pkg (sans trailing slash) is the index of packages. http.HandleFunc("/src/pkg/", srcPkgHandler) } func handlePathRedirects(mux *http.ServeMux, redirects map[string]string, prefix string) { for source, target := range redirects { h := Handler(prefix + target + "/") p := prefix + source mux.Handle(p, h) mux.Handle(p+"/", h) } } // Packages that were renamed between r60 and go1. var pkgRedirects = map[string]string{ "asn1": "encoding/asn1", "big": "math/big", "cmath": "math/cmplx", "csv": "encoding/csv", "exec": "os/exec", "exp/template/html": "html/template", "gob": "encoding/gob", "http": "net/http", "http/cgi": "net/http/cgi", "http/fcgi": "net/http/fcgi", "http/httptest": "net/http/httptest", "http/pprof": "net/http/pprof", "json": "encoding/json", "mail": "net/mail", "rand": "math/rand", "rpc": "net/rpc", "rpc/jsonrpc": "net/rpc/jsonrpc", "scanner": "text/scanner", "smtp": "net/smtp", "tabwriter": "text/tabwriter", "template": "text/template", "template/parse": "text/template/parse", "url": "net/url", "utf16": "unicode/utf16", "utf8": "unicode/utf8", "xml": "encoding/xml", } // Commands that were renamed between r60 and go1. var cmdRedirects = map[string]string{ "gofix": "fix", "goinstall": "go", "gopack": "pack", "gotest": "go", "govet": "vet", "goyacc": "yacc", } var redirects = map[string]string{ "/blog": "/blog/", "/build": "http://build.golang.org", "/change": "https://code.google.com/p/go/source/list", "/cl": "https://gocodereview.appspot.com/", "/cmd/godoc/": "http://godoc.org/golang.org/x/tools/cmd/godoc/", "/cmd/vet/": "http://godoc.org/golang.org/x/tools/cmd/vet/", "/issue": "https://code.google.com/p/go/issues", "/issue/new": "https://code.google.com/p/go/issues/entry", "/issues": "https://code.google.com/p/go/issues", "/play": "http://play.golang.org", // In Go 1.2 the references page is part of /doc/. "/ref": "/doc/#references", // This next rule clobbers /ref/spec and /ref/mem. // TODO(adg): figure out what to do here, if anything. // "/ref/": "/doc/#references", // Be nice to people who are looking in the wrong place. "/doc/mem": "/ref/mem", "/doc/spec": "/ref/spec", "/talks": "http://talks.golang.org", "/tour": "http://tour.golang.org", "/wiki": "https://code.google.com/p/go-wiki/w/list", "/doc/articles/c_go_cgo.html": "/blog/c-go-cgo", "/doc/articles/concurrency_patterns.html": "/blog/go-concurrency-patterns-timing-out-and", "/doc/articles/defer_panic_recover.html": "/blog/defer-panic-and-recover", "/doc/articles/error_handling.html": "/blog/error-handling-and-go", "/doc/articles/gobs_of_data.html": "/blog/gobs-of-data", "/doc/articles/godoc_documenting_go_code.html": "/blog/godoc-documenting-go-code", "/doc/articles/gos_declaration_syntax.html": "/blog/gos-declaration-syntax", "/doc/articles/image_draw.html": "/blog/go-imagedraw-package", "/doc/articles/image_package.html": "/blog/go-image-package", "/doc/articles/json_and_go.html": "/blog/json-and-go", "/doc/articles/json_rpc_tale_of_interfaces.html": "/blog/json-rpc-tale-of-interfaces", "/doc/articles/laws_of_reflection.html": "/blog/laws-of-reflection", "/doc/articles/slices_usage_and_internals.html": "/blog/go-slices-usage-and-internals", "/doc/go_for_cpp_programmers.html": "https://code.google.com/p/go-wiki/wiki/GoForCPPProgrammers", "/doc/go_tutorial.html": "http://tour.golang.org/", } var prefixHelpers = map[string]string{ "change": "https://code.google.com/p/go/source/detail?r=", "cl": "https://codereview.appspot.com/", "issue": "https://code.google.com/p/go/issues/detail?id=", "play": "http://play.golang.org/", "review": "https://go-review.googlesource.com/#/q/", "talks": "http://talks.golang.org/", "wiki": "https://code.google.com/p/go-wiki/wiki/", } func Handler(target string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, target, http.StatusMovedPermanently) }) } var validId = regexp.MustCompile(`^[A-Za-z0-9-]*$`) func PrefixHandler(prefix, baseURL string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if p := r.URL.Path; p == prefix { // redirect /prefix/ to /prefix http.Redirect(w, r, p[:len(p)-1], http.StatusFound) return } id := r.URL.Path[len(prefix):] if !validId.MatchString(id) { http.Error(w, "Not found", http.StatusNotFound) return } target := baseURL + id http.Redirect(w, r, target, http.StatusFound) }) } // Redirect requests from the old "/src/pkg/foo" to the new "/src/foo". // See http://golang.org/s/go14nopkg func srcPkgHandler(w http.ResponseWriter, r *http.Request) { r.URL.Path = "/src/" + r.URL.Path[len("/src/pkg/"):] http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently) } x/tools/godoc/redirect: handle Rietveld and Gerrit CLs at /cl/ LGTM=rsc, dsymonds R=rsc, dsymonds CC=golang-codereviews https://codereview.appspot.com/177260043 // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package redirect provides hooks to register HTTP handlers that redirect old // godoc paths to their new equivalents and assist in accessing the issue // tracker, wiki, code review system, etc. package redirect import ( "net/http" "regexp" "strings" ) // Register registers HTTP handlers that redirect old godoc paths to their new // equivalents and assist in accessing the issue tracker, wiki, code review // system, etc. If mux is nil it uses http.DefaultServeMux. func Register(mux *http.ServeMux) { if mux == nil { mux = http.DefaultServeMux } handlePathRedirects(mux, pkgRedirects, "/pkg/") handlePathRedirects(mux, cmdRedirects, "/cmd/") for prefix, redirect := range prefixHelpers { p := "/" + prefix + "/" mux.Handle(p, PrefixHandler(p, redirect)) } for path, redirect := range redirects { mux.Handle(path, Handler(redirect)) } // NB: /src/pkg (sans trailing slash) is the index of packages. mux.HandleFunc("/src/pkg/", srcPkgHandler) mux.HandleFunc("/cl/", clHandler) } func handlePathRedirects(mux *http.ServeMux, redirects map[string]string, prefix string) { for source, target := range redirects { h := Handler(prefix + target + "/") p := prefix + source mux.Handle(p, h) mux.Handle(p+"/", h) } } // Packages that were renamed between r60 and go1. var pkgRedirects = map[string]string{ "asn1": "encoding/asn1", "big": "math/big", "cmath": "math/cmplx", "csv": "encoding/csv", "exec": "os/exec", "exp/template/html": "html/template", "gob": "encoding/gob", "http": "net/http", "http/cgi": "net/http/cgi", "http/fcgi": "net/http/fcgi", "http/httptest": "net/http/httptest", "http/pprof": "net/http/pprof", "json": "encoding/json", "mail": "net/mail", "rand": "math/rand", "rpc": "net/rpc", "rpc/jsonrpc": "net/rpc/jsonrpc", "scanner": "text/scanner", "smtp": "net/smtp", "tabwriter": "text/tabwriter", "template": "text/template", "template/parse": "text/template/parse", "url": "net/url", "utf16": "unicode/utf16", "utf8": "unicode/utf8", "xml": "encoding/xml", } // Commands that were renamed between r60 and go1. var cmdRedirects = map[string]string{ "gofix": "fix", "goinstall": "go", "gopack": "pack", "gotest": "go", "govet": "vet", "goyacc": "yacc", } var redirects = map[string]string{ "/blog": "/blog/", "/build": "http://build.golang.org", "/change": "https://code.google.com/p/go/source/list", "/cl": "https://gocodereview.appspot.com/", "/cmd/godoc/": "http://godoc.org/golang.org/x/tools/cmd/godoc/", "/cmd/vet/": "http://godoc.org/golang.org/x/tools/cmd/vet/", "/issue": "https://code.google.com/p/go/issues", "/issue/new": "https://code.google.com/p/go/issues/entry", "/issues": "https://code.google.com/p/go/issues", "/play": "http://play.golang.org", // In Go 1.2 the references page is part of /doc/. "/ref": "/doc/#references", // This next rule clobbers /ref/spec and /ref/mem. // TODO(adg): figure out what to do here, if anything. // "/ref/": "/doc/#references", // Be nice to people who are looking in the wrong place. "/doc/mem": "/ref/mem", "/doc/spec": "/ref/spec", "/talks": "http://talks.golang.org", "/tour": "http://tour.golang.org", "/wiki": "https://code.google.com/p/go-wiki/w/list", "/doc/articles/c_go_cgo.html": "/blog/c-go-cgo", "/doc/articles/concurrency_patterns.html": "/blog/go-concurrency-patterns-timing-out-and", "/doc/articles/defer_panic_recover.html": "/blog/defer-panic-and-recover", "/doc/articles/error_handling.html": "/blog/error-handling-and-go", "/doc/articles/gobs_of_data.html": "/blog/gobs-of-data", "/doc/articles/godoc_documenting_go_code.html": "/blog/godoc-documenting-go-code", "/doc/articles/gos_declaration_syntax.html": "/blog/gos-declaration-syntax", "/doc/articles/image_draw.html": "/blog/go-imagedraw-package", "/doc/articles/image_package.html": "/blog/go-image-package", "/doc/articles/json_and_go.html": "/blog/json-and-go", "/doc/articles/json_rpc_tale_of_interfaces.html": "/blog/json-rpc-tale-of-interfaces", "/doc/articles/laws_of_reflection.html": "/blog/laws-of-reflection", "/doc/articles/slices_usage_and_internals.html": "/blog/go-slices-usage-and-internals", "/doc/go_for_cpp_programmers.html": "https://code.google.com/p/go-wiki/wiki/GoForCPPProgrammers", "/doc/go_tutorial.html": "http://tour.golang.org/", } var prefixHelpers = map[string]string{ "change": "https://code.google.com/p/go/source/detail?r=", "issue": "https://code.google.com/p/go/issues/detail?id=", "play": "http://play.golang.org/", "talks": "http://talks.golang.org/", "wiki": "https://code.google.com/p/go-wiki/wiki/", } func Handler(target string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, target, http.StatusMovedPermanently) }) } var validId = regexp.MustCompile(`^[A-Za-z0-9-]*$`) func PrefixHandler(prefix, baseURL string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if p := r.URL.Path; p == prefix { // redirect /prefix/ to /prefix http.Redirect(w, r, p[:len(p)-1], http.StatusFound) return } id := r.URL.Path[len(prefix):] if !validId.MatchString(id) { http.Error(w, "Not found", http.StatusNotFound) return } target := baseURL + id http.Redirect(w, r, target, http.StatusFound) }) } // Redirect requests from the old "/src/pkg/foo" to the new "/src/foo". // See http://golang.org/s/go14nopkg func srcPkgHandler(w http.ResponseWriter, r *http.Request) { r.URL.Path = "/src/" + r.URL.Path[len("/src/pkg/"):] http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently) } func clHandler(w http.ResponseWriter, r *http.Request) { const prefix = "/cl/" if p := r.URL.Path; p == prefix { // redirect /prefix/ to /prefix http.Redirect(w, r, p[:len(p)-1], http.StatusFound) return } id := r.URL.Path[len(prefix):] if !validId.MatchString(id) { http.Error(w, "Not found", http.StatusNotFound) return } target := "" if strings.HasPrefix(id, "I") { target = "https://go-review.googlesource.com/#/q/" + id } else { target = "https://codereview.appspot.com/" + id } http.Redirect(w, r, target, http.StatusFound) }
// ---------------------------------------------------------------------------- // // This file is copied here by Magic Modules. The code for building up a // compute instance object is copied from the manually implemented // provider file: // third_party/terraform/resources/resource_compute_instance.go // // ---------------------------------------------------------------------------- package google import ( "fmt" "log" "strings" computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) func GetComputeInstanceCaiObject(d TerraformResourceData, config *Config) (Asset, error) { name, err := assetName(d, config, "//compute.googleapis.com/projects/{{project}}/zones/{{zone}}/instances/{{name}}") if err != nil { return Asset{}, err } if obj, err := GetComputeInstanceApiObject(d, config); err == nil { return Asset{ Name: name, Type: "compute.googleapis.com/Instance", Resource: &AssetResource{ Version: "v1", DiscoveryDocumentURI: "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", DiscoveryName: "Instance", Data: obj, }, }, nil } else { return Asset{}, err } } func GetComputeInstanceApiObject(d TerraformResourceData, config *Config) (map[string]interface{}, error) { project, err := getProject(d, config) if err != nil { return nil, err } // Get the zone z, err := getZone(d, config) if err != nil { return nil, err } log.Printf("[DEBUG] Loading zone: %s", z) zone, err := config.clientCompute.Zones.Get( project, z).Do() if err != nil { return nil, fmt.Errorf("Error loading zone '%s': %s", z, err) } instance, err := expandComputeInstance(project, zone, d, config) if err != nil { return nil, err } return jsonMap(instance) } func expandComputeInstance(project string, zone *compute.Zone, d TerraformResourceData, config *Config) (*computeBeta.Instance, error) { // Get the machine type var machineTypeUrl string if mt, ok := d.GetOk("machine_type"); ok { log.Printf("[DEBUG] Loading machine type: %s", mt.(string)) machineType, err := config.clientCompute.MachineTypes.Get( project, zone.Name, mt.(string)).Do() if err != nil { return nil, fmt.Errorf( "Error loading machine type: %s", err) } machineTypeUrl = machineType.SelfLink } // Build up the list of disks disks := []*computeBeta.AttachedDisk{} if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { bootDisk, err := expandBootDisk(d, config, zone, project) if err != nil { return nil, err } disks = append(disks, bootDisk) } if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk { scratchDisks, err := expandScratchDisks(d, config, zone, project) if err != nil { return nil, err } disks = append(disks, scratchDisks...) } attachedDisksCount := d.Get("attached_disk.#").(int) for i := 0; i < attachedDisksCount; i++ { diskConfig := d.Get(fmt.Sprintf("attached_disk.%d", i)).(map[string]interface{}) disk, err := expandAttachedDisk(diskConfig, d, config) if err != nil { return nil, err } disks = append(disks, disk) } sch := d.Get("scheduling").([]interface{}) var scheduling *computeBeta.Scheduling if len(sch) == 0 { // TF doesn't do anything about defaults inside of nested objects, so if // scheduling hasn't been set, then send it with its default values. scheduling = &computeBeta.Scheduling{ AutomaticRestart: googleapi.Bool(true), } } else { prefix := "scheduling.0" scheduling = &computeBeta.Scheduling{ AutomaticRestart: googleapi.Bool(d.Get(prefix + ".automatic_restart").(bool)), Preemptible: d.Get(prefix + ".preemptible").(bool), OnHostMaintenance: d.Get(prefix + ".on_host_maintenance").(string), ForceSendFields: []string{"AutomaticRestart", "Preemptible"}, } } metadata, err := resourceInstanceMetadata(d) if err != nil { return nil, fmt.Errorf("Error creating metadata: %s", err) } networkInterfaces, err := expandNetworkInterfaces(d, config) if err != nil { return nil, fmt.Errorf("Error creating network interfaces: %s", err) } accels, err := expandInstanceGuestAccelerators(d, config) if err != nil { return nil, fmt.Errorf("Error creating guest accelerators: %s", err) } // Create the instance information return &computeBeta.Instance{ CanIpForward: d.Get("can_ip_forward").(bool), Description: d.Get("description").(string), Disks: disks, MachineType: machineTypeUrl, Metadata: metadata, Name: d.Get("name").(string), NetworkInterfaces: networkInterfaces, Tags: resourceInstanceTags(d), Labels: expandLabels(d), ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), GuestAccelerators: accels, MinCpuPlatform: d.Get("min_cpu_platform").(string), Scheduling: scheduling, DeletionProtection: d.Get("deletion_protection").(bool), Hostname: d.Get("hostname").(string), ForceSendFields: []string{"CanIpForward", "DeletionProtection"}, }, nil } func expandAttachedDisk(diskConfig map[string]interface{}, d TerraformResourceData, meta interface{}) (*computeBeta.AttachedDisk, error) { config := meta.(*Config) s := diskConfig["source"].(string) var sourceLink string if strings.Contains(s, "regions/") { source, err := ParseRegionDiskFieldValue(s, d, config) if err != nil { return nil, err } sourceLink = source.RelativeLink() } else { source, err := ParseDiskFieldValue(s, d, config) if err != nil { return nil, err } sourceLink = source.RelativeLink() } disk := &computeBeta.AttachedDisk{ Source: sourceLink, } if v, ok := diskConfig["mode"]; ok { disk.Mode = v.(string) } if v, ok := diskConfig["device_name"]; ok { disk.DeviceName = v.(string) } if v, ok := diskConfig["disk_encryption_key_raw"]; ok { disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ RawKey: v.(string), } } return disk, nil } // See comment on expandInstanceTemplateGuestAccelerators regarding why this // code is duplicated. func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([]*computeBeta.AcceleratorConfig, error) { configs, ok := d.GetOk("guest_accelerator") if !ok { return nil, nil } accels := configs.([]interface{}) guestAccelerators := make([]*computeBeta.AcceleratorConfig, 0, len(accels)) for _, raw := range accels { data := raw.(map[string]interface{}) if data["count"].(int) == 0 { continue } at, err := ParseAcceleratorFieldValue(data["type"].(string), d, config) if err != nil { return nil, fmt.Errorf("cannot parse accelerator type: %v", err) } guestAccelerators = append(guestAccelerators, &computeBeta.AcceleratorConfig{ AcceleratorCount: int64(data["count"].(int)), AcceleratorType: at.RelativeLink(), }) } return guestAccelerators, nil } func expandBootDisk(d TerraformResourceData, config *Config, zone *compute.Zone, project string) (*computeBeta.AttachedDisk, error) { disk := &computeBeta.AttachedDisk{ AutoDelete: d.Get("boot_disk.0.auto_delete").(bool), Boot: true, } if v, ok := d.GetOk("boot_disk.0.device_name"); ok { disk.DeviceName = v.(string) } if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ RawKey: v.(string), } } if v, ok := d.GetOk("boot_disk.0.source"); ok { source, err := ParseDiskFieldValue(v.(string), d, config) if err != nil { return nil, err } disk.Source = source.RelativeLink() } if _, ok := d.GetOk("boot_disk.0.initialize_params"); ok { disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{} if v, ok := d.GetOk("boot_disk.0.initialize_params.0.size"); ok { disk.InitializeParams.DiskSizeGb = int64(v.(int)) } if v, ok := d.GetOk("boot_disk.0.initialize_params.0.type"); ok { diskTypeName := v.(string) diskType, err := readDiskType(config, d, diskTypeName) if err != nil { return nil, fmt.Errorf("Error loading disk type '%s': %s", diskTypeName, err) } disk.InitializeParams.DiskType = diskType.RelativeLink() } if v, ok := d.GetOk("boot_disk.0.initialize_params.0.image"); ok { imageName := v.(string) imageUrl, err := resolveImage(config, project, imageName) if err != nil { return nil, fmt.Errorf("Error resolving image name '%s': %s", imageName, err) } disk.InitializeParams.SourceImage = imageUrl } } return disk, nil } func expandScratchDisks(d TerraformResourceData, config *Config, zone *compute.Zone, project string) ([]*computeBeta.AttachedDisk, error) { diskType, err := readDiskType(config, d, "local-ssd") if err != nil { return nil, fmt.Errorf("Error loading disk type 'local-ssd': %s", err) } n := d.Get("scratch_disk.#").(int) scratchDisks := make([]*computeBeta.AttachedDisk, 0, n) for i := 0; i < n; i++ { scratchDisks = append(scratchDisks, &computeBeta.AttachedDisk{ AutoDelete: true, Type: "SCRATCH", Interface: d.Get(fmt.Sprintf("scratch_disk.%d.interface", i)).(string), InitializeParams: &computeBeta.AttachedDiskInitializeParams{ DiskType: diskType.RelativeLink(), }, }) } return scratchDisks, nil } remove compute API usage in third_party/validator/compute_instance.go (#162) Signed-off-by: Modular Magician <86ce7da8ad74b1c583667a2bebd347455fa06219@google.com> // ---------------------------------------------------------------------------- // // This file is copied here by Magic Modules. The code for building up a // compute instance object is copied from the manually implemented // provider file: // third_party/terraform/resources/resource_compute_instance.go // // ---------------------------------------------------------------------------- package google import ( "fmt" "strings" computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/googleapi" ) func GetComputeInstanceCaiObject(d TerraformResourceData, config *Config) (Asset, error) { name, err := assetName(d, config, "//compute.googleapis.com/projects/{{project}}/zones/{{zone}}/instances/{{name}}") if err != nil { return Asset{}, err } if obj, err := GetComputeInstanceApiObject(d, config); err == nil { return Asset{ Name: name, Type: "compute.googleapis.com/Instance", Resource: &AssetResource{ Version: "v1", DiscoveryDocumentURI: "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", DiscoveryName: "Instance", Data: obj, }, }, nil } else { return Asset{}, err } } func GetComputeInstanceApiObject(d TerraformResourceData, config *Config) (map[string]interface{}, error) { project, err := getProject(d, config) if err != nil { return nil, err } instance, err := expandComputeInstance(project, d, config) if err != nil { return nil, err } return jsonMap(instance) } func expandComputeInstance(project string, d TerraformResourceData, config *Config) (*computeBeta.Instance, error) { // Get the machine type var machineTypeUrl string if mt, ok := d.GetOk("machine_type"); ok { machineType, err := ParseMachineTypesFieldValue(mt.(string), d, config) if err != nil { return nil, fmt.Errorf( "Error loading machine type: %s", err) } machineTypeUrl = machineType.RelativeLink() } // Build up the list of disks disks := []*computeBeta.AttachedDisk{} if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { bootDisk, err := expandBootDisk(d, config, project) if err != nil { return nil, err } disks = append(disks, bootDisk) } if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk { scratchDisks, err := expandScratchDisks(d, config, project) if err != nil { return nil, err } disks = append(disks, scratchDisks...) } attachedDisksCount := d.Get("attached_disk.#").(int) for i := 0; i < attachedDisksCount; i++ { diskConfig := d.Get(fmt.Sprintf("attached_disk.%d", i)).(map[string]interface{}) disk, err := expandAttachedDisk(diskConfig, d, config) if err != nil { return nil, err } disks = append(disks, disk) } sch := d.Get("scheduling").([]interface{}) var scheduling *computeBeta.Scheduling if len(sch) == 0 { // TF doesn't do anything about defaults inside of nested objects, so if // scheduling hasn't been set, then send it with its default values. scheduling = &computeBeta.Scheduling{ AutomaticRestart: googleapi.Bool(true), } } else { prefix := "scheduling.0" scheduling = &computeBeta.Scheduling{ AutomaticRestart: googleapi.Bool(d.Get(prefix + ".automatic_restart").(bool)), Preemptible: d.Get(prefix + ".preemptible").(bool), OnHostMaintenance: d.Get(prefix + ".on_host_maintenance").(string), ForceSendFields: []string{"AutomaticRestart", "Preemptible"}, } } metadata, err := resourceInstanceMetadata(d) if err != nil { return nil, fmt.Errorf("Error creating metadata: %s", err) } networkInterfaces, err := expandNetworkInterfaces(d, config) if err != nil { return nil, fmt.Errorf("Error creating network interfaces: %s", err) } accels, err := expandInstanceGuestAccelerators(d, config) if err != nil { return nil, fmt.Errorf("Error creating guest accelerators: %s", err) } // Create the instance information return &computeBeta.Instance{ CanIpForward: d.Get("can_ip_forward").(bool), Description: d.Get("description").(string), Disks: disks, MachineType: machineTypeUrl, Metadata: metadata, Name: d.Get("name").(string), NetworkInterfaces: networkInterfaces, Tags: resourceInstanceTags(d), Labels: expandLabels(d), ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), GuestAccelerators: accels, MinCpuPlatform: d.Get("min_cpu_platform").(string), Scheduling: scheduling, DeletionProtection: d.Get("deletion_protection").(bool), Hostname: d.Get("hostname").(string), ForceSendFields: []string{"CanIpForward", "DeletionProtection"}, }, nil } func expandAttachedDisk(diskConfig map[string]interface{}, d TerraformResourceData, meta interface{}) (*computeBeta.AttachedDisk, error) { config := meta.(*Config) s := diskConfig["source"].(string) var sourceLink string if strings.Contains(s, "regions/") { source, err := ParseRegionDiskFieldValue(s, d, config) if err != nil { return nil, err } sourceLink = source.RelativeLink() } else { source, err := ParseDiskFieldValue(s, d, config) if err != nil { return nil, err } sourceLink = source.RelativeLink() } disk := &computeBeta.AttachedDisk{ Source: sourceLink, } if v, ok := diskConfig["mode"]; ok { disk.Mode = v.(string) } if v, ok := diskConfig["device_name"]; ok { disk.DeviceName = v.(string) } if v, ok := diskConfig["disk_encryption_key_raw"]; ok { disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ RawKey: v.(string), } } return disk, nil } // See comment on expandInstanceTemplateGuestAccelerators regarding why this // code is duplicated. func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([]*computeBeta.AcceleratorConfig, error) { configs, ok := d.GetOk("guest_accelerator") if !ok { return nil, nil } accels := configs.([]interface{}) guestAccelerators := make([]*computeBeta.AcceleratorConfig, 0, len(accels)) for _, raw := range accels { data := raw.(map[string]interface{}) if data["count"].(int) == 0 { continue } at, err := ParseAcceleratorFieldValue(data["type"].(string), d, config) if err != nil { return nil, fmt.Errorf("cannot parse accelerator type: %v", err) } guestAccelerators = append(guestAccelerators, &computeBeta.AcceleratorConfig{ AcceleratorCount: int64(data["count"].(int)), AcceleratorType: at.RelativeLink(), }) } return guestAccelerators, nil } func expandBootDisk(d TerraformResourceData, config *Config, project string) (*computeBeta.AttachedDisk, error) { disk := &computeBeta.AttachedDisk{ AutoDelete: d.Get("boot_disk.0.auto_delete").(bool), Boot: true, } if v, ok := d.GetOk("boot_disk.0.device_name"); ok { disk.DeviceName = v.(string) } if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ RawKey: v.(string), } } if v, ok := d.GetOk("boot_disk.0.source"); ok { source, err := ParseDiskFieldValue(v.(string), d, config) if err != nil { return nil, err } disk.Source = source.RelativeLink() } if _, ok := d.GetOk("boot_disk.0.initialize_params"); ok { disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{} if v, ok := d.GetOk("boot_disk.0.initialize_params.0.size"); ok { disk.InitializeParams.DiskSizeGb = int64(v.(int)) } if v, ok := d.GetOk("boot_disk.0.initialize_params.0.type"); ok { diskTypeName := v.(string) diskType, err := readDiskType(config, d, diskTypeName) if err != nil { return nil, fmt.Errorf("Error loading disk type '%s': %s", diskTypeName, err) } disk.InitializeParams.DiskType = diskType.RelativeLink() } if v, ok := d.GetOk("boot_disk.0.initialize_params.0.image"); ok { imageName := v.(string) imageUrl, err := resolveImage(config, project, imageName) if err != nil { return nil, fmt.Errorf("Error resolving image name '%s': %s", imageName, err) } disk.InitializeParams.SourceImage = imageUrl } } return disk, nil } func expandScratchDisks(d TerraformResourceData, config *Config, project string) ([]*computeBeta.AttachedDisk, error) { diskType, err := readDiskType(config, d, "local-ssd") if err != nil { return nil, fmt.Errorf("Error loading disk type 'local-ssd': %s", err) } n := d.Get("scratch_disk.#").(int) scratchDisks := make([]*computeBeta.AttachedDisk, 0, n) for i := 0; i < n; i++ { scratchDisks = append(scratchDisks, &computeBeta.AttachedDisk{ AutoDelete: true, Type: "SCRATCH", Interface: d.Get(fmt.Sprintf("scratch_disk.%d.interface", i)).(string), InitializeParams: &computeBeta.AttachedDiskInitializeParams{ DiskType: diskType.RelativeLink(), }, }) } return scratchDisks, nil }
// ---------------------------------------------------------------------------- // // This file is copied here by Magic Modules. The code for building up a // compute instance object is copied from the manually implemented // provider file: // third_party/terraform/resources/resource_compute_instance.go // // ---------------------------------------------------------------------------- package google import ( "fmt" "strings" computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/googleapi" ) func GetComputeInstanceCaiObject(d TerraformResourceData, config *Config) (Asset, error) { name, err := assetName(d, config, "//compute.googleapis.com/projects/{{project}}/zones/{{zone}}/instances/{{name}}") if err != nil { return Asset{}, err } if obj, err := GetComputeInstanceApiObject(d, config); err == nil { return Asset{ Name: name, Type: "compute.googleapis.com/Instance", Resource: &AssetResource{ Version: "v1", DiscoveryDocumentURI: "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", DiscoveryName: "Instance", Data: obj, }, }, nil } else { return Asset{}, err } } func GetComputeInstanceApiObject(d TerraformResourceData, config *Config) (map[string]interface{}, error) { project, err := getProject(d, config) if err != nil { return nil, err } instance, err := expandComputeInstance(project, d, config) if err != nil { return nil, err } return jsonMap(instance) } func expandComputeInstance(project string, d TerraformResourceData, config *Config) (*computeBeta.Instance, error) { // Get the machine type var machineTypeUrl string if mt, ok := d.GetOk("machine_type"); ok { machineType, err := ParseMachineTypesFieldValue(mt.(string), d, config) if err != nil { return nil, fmt.Errorf( "Error loading machine type: %s", err) } machineTypeUrl = machineType.RelativeLink() } // Build up the list of disks disks := []*computeBeta.AttachedDisk{} if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { bootDisk, err := expandBootDisk(d, config, project) if err != nil { return nil, err } disks = append(disks, bootDisk) } if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk { scratchDisks, err := expandScratchDisks(d, config, project) if err != nil { return nil, err } disks = append(disks, scratchDisks...) } attachedDisksCount := d.Get("attached_disk.#").(int) for i := 0; i < attachedDisksCount; i++ { diskConfig := d.Get(fmt.Sprintf("attached_disk.%d", i)).(map[string]interface{}) disk, err := expandAttachedDisk(diskConfig, d, config) if err != nil { return nil, err } disks = append(disks, disk) } sch := d.Get("scheduling").([]interface{}) var scheduling *computeBeta.Scheduling if len(sch) == 0 { // TF doesn't do anything about defaults inside of nested objects, so if // scheduling hasn't been set, then send it with its default values. scheduling = &computeBeta.Scheduling{ AutomaticRestart: googleapi.Bool(true), } } else { prefix := "scheduling.0" scheduling = &computeBeta.Scheduling{ AutomaticRestart: googleapi.Bool(d.Get(prefix + ".automatic_restart").(bool)), Preemptible: d.Get(prefix + ".preemptible").(bool), OnHostMaintenance: d.Get(prefix + ".on_host_maintenance").(string), ForceSendFields: []string{"AutomaticRestart", "Preemptible"}, } } metadata, err := resourceInstanceMetadata(d) if err != nil { return nil, fmt.Errorf("Error creating metadata: %s", err) } networkInterfaces, err := expandNetworkInterfaces(d, config) if err != nil { return nil, fmt.Errorf("Error creating network interfaces: %s", err) } accels, err := expandInstanceGuestAccelerators(d, config) if err != nil { return nil, fmt.Errorf("Error creating guest accelerators: %s", err) } // Create the instance information return &computeBeta.Instance{ CanIpForward: d.Get("can_ip_forward").(bool), Description: d.Get("description").(string), Disks: disks, MachineType: machineTypeUrl, Metadata: metadata, Name: d.Get("name").(string), NetworkInterfaces: networkInterfaces, Tags: resourceInstanceTags(d), Labels: expandLabels(d), ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), GuestAccelerators: accels, MinCpuPlatform: d.Get("min_cpu_platform").(string), Scheduling: scheduling, DeletionProtection: d.Get("deletion_protection").(bool), Hostname: d.Get("hostname").(string), ForceSendFields: []string{"CanIpForward", "DeletionProtection"}, }, nil } func expandAttachedDisk(diskConfig map[string]interface{}, d TerraformResourceData, meta interface{}) (*computeBeta.AttachedDisk, error) { config := meta.(*Config) s := diskConfig["source"].(string) var sourceLink string if strings.Contains(s, "regions/") { source, err := ParseRegionDiskFieldValue(s, d, config) if err != nil { return nil, err } sourceLink = source.RelativeLink() } else { source, err := ParseDiskFieldValue(s, d, config) if err != nil { return nil, err } sourceLink = source.RelativeLink() } disk := &computeBeta.AttachedDisk{ Source: sourceLink, } if v, ok := diskConfig["mode"]; ok { disk.Mode = v.(string) } if v, ok := diskConfig["device_name"]; ok { disk.DeviceName = v.(string) } if v, ok := diskConfig["disk_encryption_key_raw"]; ok { disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ RawKey: v.(string), } } return disk, nil } // See comment on expandInstanceTemplateGuestAccelerators regarding why this // code is duplicated. func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([]*computeBeta.AcceleratorConfig, error) { configs, ok := d.GetOk("guest_accelerator") if !ok { return nil, nil } accels := configs.([]interface{}) guestAccelerators := make([]*computeBeta.AcceleratorConfig, 0, len(accels)) for _, raw := range accels { data := raw.(map[string]interface{}) if data["count"].(int) == 0 { continue } at, err := ParseAcceleratorFieldValue(data["type"].(string), d, config) if err != nil { return nil, fmt.Errorf("cannot parse accelerator type: %v", err) } guestAccelerators = append(guestAccelerators, &computeBeta.AcceleratorConfig{ AcceleratorCount: int64(data["count"].(int)), AcceleratorType: at.RelativeLink(), }) } return guestAccelerators, nil } func expandBootDisk(d TerraformResourceData, config *Config, project string) (*computeBeta.AttachedDisk, error) { disk := &computeBeta.AttachedDisk{ AutoDelete: d.Get("boot_disk.0.auto_delete").(bool), Boot: true, } if v, ok := d.GetOk("boot_disk.0.device_name"); ok { disk.DeviceName = v.(string) } if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ RawKey: v.(string), } } if v, ok := d.GetOk("boot_disk.0.source"); ok { source, err := ParseDiskFieldValue(v.(string), d, config) if err != nil { return nil, err } disk.Source = source.RelativeLink() } if _, ok := d.GetOk("boot_disk.0.initialize_params"); ok { disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{} if v, ok := d.GetOk("boot_disk.0.initialize_params.0.size"); ok { disk.InitializeParams.DiskSizeGb = int64(v.(int)) } if v, ok := d.GetOk("boot_disk.0.initialize_params.0.type"); ok { diskTypeName := v.(string) diskType, err := readDiskType(config, d, diskTypeName) if err != nil { return nil, fmt.Errorf("Error loading disk type '%s': %s", diskTypeName, err) } disk.InitializeParams.DiskType = diskType.RelativeLink() } if v, ok := d.GetOk("boot_disk.0.initialize_params.0.image"); ok { imageName := v.(string) imageUrl, err := resolveImage(config, project, imageName) if err != nil { return nil, fmt.Errorf("Error resolving image name '%s': %s", imageName, err) } disk.InitializeParams.SourceImage = imageUrl } } return disk, nil } func expandScratchDisks(d TerraformResourceData, config *Config, project string) ([]*computeBeta.AttachedDisk, error) { diskType, err := readDiskType(config, d, "local-ssd") if err != nil { return nil, fmt.Errorf("Error loading disk type 'local-ssd': %s", err) } n := d.Get("scratch_disk.#").(int) scratchDisks := make([]*computeBeta.AttachedDisk, 0, n) for i := 0; i < n; i++ { scratchDisks = append(scratchDisks, &computeBeta.AttachedDisk{ AutoDelete: true, Type: "SCRATCH", Interface: d.Get(fmt.Sprintf("scratch_disk.%d.interface", i)).(string), InitializeParams: &computeBeta.AttachedDiskInitializeParams{ DiskType: diskType.RelativeLink(), }, }) } return scratchDisks, nil } add mode option to google compute instance boot disk (#198) Signed-off-by: Modular Magician <86ce7da8ad74b1c583667a2bebd347455fa06219@google.com> // ---------------------------------------------------------------------------- // // This file is copied here by Magic Modules. The code for building up a // compute instance object is copied from the manually implemented // provider file: // third_party/terraform/resources/resource_compute_instance.go // // ---------------------------------------------------------------------------- package google import ( "fmt" "strings" computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/googleapi" ) func GetComputeInstanceCaiObject(d TerraformResourceData, config *Config) (Asset, error) { name, err := assetName(d, config, "//compute.googleapis.com/projects/{{project}}/zones/{{zone}}/instances/{{name}}") if err != nil { return Asset{}, err } if obj, err := GetComputeInstanceApiObject(d, config); err == nil { return Asset{ Name: name, Type: "compute.googleapis.com/Instance", Resource: &AssetResource{ Version: "v1", DiscoveryDocumentURI: "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", DiscoveryName: "Instance", Data: obj, }, }, nil } else { return Asset{}, err } } func GetComputeInstanceApiObject(d TerraformResourceData, config *Config) (map[string]interface{}, error) { project, err := getProject(d, config) if err != nil { return nil, err } instance, err := expandComputeInstance(project, d, config) if err != nil { return nil, err } return jsonMap(instance) } func expandComputeInstance(project string, d TerraformResourceData, config *Config) (*computeBeta.Instance, error) { // Get the machine type var machineTypeUrl string if mt, ok := d.GetOk("machine_type"); ok { machineType, err := ParseMachineTypesFieldValue(mt.(string), d, config) if err != nil { return nil, fmt.Errorf( "Error loading machine type: %s", err) } machineTypeUrl = machineType.RelativeLink() } // Build up the list of disks disks := []*computeBeta.AttachedDisk{} if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { bootDisk, err := expandBootDisk(d, config, project) if err != nil { return nil, err } disks = append(disks, bootDisk) } if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk { scratchDisks, err := expandScratchDisks(d, config, project) if err != nil { return nil, err } disks = append(disks, scratchDisks...) } attachedDisksCount := d.Get("attached_disk.#").(int) for i := 0; i < attachedDisksCount; i++ { diskConfig := d.Get(fmt.Sprintf("attached_disk.%d", i)).(map[string]interface{}) disk, err := expandAttachedDisk(diskConfig, d, config) if err != nil { return nil, err } disks = append(disks, disk) } sch := d.Get("scheduling").([]interface{}) var scheduling *computeBeta.Scheduling if len(sch) == 0 { // TF doesn't do anything about defaults inside of nested objects, so if // scheduling hasn't been set, then send it with its default values. scheduling = &computeBeta.Scheduling{ AutomaticRestart: googleapi.Bool(true), } } else { prefix := "scheduling.0" scheduling = &computeBeta.Scheduling{ AutomaticRestart: googleapi.Bool(d.Get(prefix + ".automatic_restart").(bool)), Preemptible: d.Get(prefix + ".preemptible").(bool), OnHostMaintenance: d.Get(prefix + ".on_host_maintenance").(string), ForceSendFields: []string{"AutomaticRestart", "Preemptible"}, } } metadata, err := resourceInstanceMetadata(d) if err != nil { return nil, fmt.Errorf("Error creating metadata: %s", err) } networkInterfaces, err := expandNetworkInterfaces(d, config) if err != nil { return nil, fmt.Errorf("Error creating network interfaces: %s", err) } accels, err := expandInstanceGuestAccelerators(d, config) if err != nil { return nil, fmt.Errorf("Error creating guest accelerators: %s", err) } // Create the instance information return &computeBeta.Instance{ CanIpForward: d.Get("can_ip_forward").(bool), Description: d.Get("description").(string), Disks: disks, MachineType: machineTypeUrl, Metadata: metadata, Name: d.Get("name").(string), NetworkInterfaces: networkInterfaces, Tags: resourceInstanceTags(d), Labels: expandLabels(d), ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), GuestAccelerators: accels, MinCpuPlatform: d.Get("min_cpu_platform").(string), Scheduling: scheduling, DeletionProtection: d.Get("deletion_protection").(bool), Hostname: d.Get("hostname").(string), ForceSendFields: []string{"CanIpForward", "DeletionProtection"}, }, nil } func expandAttachedDisk(diskConfig map[string]interface{}, d TerraformResourceData, meta interface{}) (*computeBeta.AttachedDisk, error) { config := meta.(*Config) s := diskConfig["source"].(string) var sourceLink string if strings.Contains(s, "regions/") { source, err := ParseRegionDiskFieldValue(s, d, config) if err != nil { return nil, err } sourceLink = source.RelativeLink() } else { source, err := ParseDiskFieldValue(s, d, config) if err != nil { return nil, err } sourceLink = source.RelativeLink() } disk := &computeBeta.AttachedDisk{ Source: sourceLink, } if v, ok := diskConfig["mode"]; ok { disk.Mode = v.(string) } if v, ok := diskConfig["device_name"]; ok { disk.DeviceName = v.(string) } if v, ok := diskConfig["disk_encryption_key_raw"]; ok { disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ RawKey: v.(string), } } return disk, nil } // See comment on expandInstanceTemplateGuestAccelerators regarding why this // code is duplicated. func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([]*computeBeta.AcceleratorConfig, error) { configs, ok := d.GetOk("guest_accelerator") if !ok { return nil, nil } accels := configs.([]interface{}) guestAccelerators := make([]*computeBeta.AcceleratorConfig, 0, len(accels)) for _, raw := range accels { data := raw.(map[string]interface{}) if data["count"].(int) == 0 { continue } at, err := ParseAcceleratorFieldValue(data["type"].(string), d, config) if err != nil { return nil, fmt.Errorf("cannot parse accelerator type: %v", err) } guestAccelerators = append(guestAccelerators, &computeBeta.AcceleratorConfig{ AcceleratorCount: int64(data["count"].(int)), AcceleratorType: at.RelativeLink(), }) } return guestAccelerators, nil } func expandBootDisk(d TerraformResourceData, config *Config, project string) (*computeBeta.AttachedDisk, error) { disk := &computeBeta.AttachedDisk{ AutoDelete: d.Get("boot_disk.0.auto_delete").(bool), Boot: true, } if v, ok := d.GetOk("boot_disk.0.device_name"); ok { disk.DeviceName = v.(string) } if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { if v != "" { disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ RawKey: v.(string), } } } if v, ok := d.GetOk("boot_disk.0.kms_key_self_link"); ok { if v != "" { disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ KmsKeyName: v.(string), } } } if v, ok := d.GetOk("boot_disk.0.source"); ok { source, err := ParseDiskFieldValue(v.(string), d, config) if err != nil { return nil, err } disk.Source = source.RelativeLink() } if _, ok := d.GetOk("boot_disk.0.initialize_params"); ok { disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{} if v, ok := d.GetOk("boot_disk.0.initialize_params.0.size"); ok { disk.InitializeParams.DiskSizeGb = int64(v.(int)) } if v, ok := d.GetOk("boot_disk.0.initialize_params.0.type"); ok { diskTypeName := v.(string) diskType, err := readDiskType(config, d, diskTypeName) if err != nil { return nil, fmt.Errorf("Error loading disk type '%s': %s", diskTypeName, err) } disk.InitializeParams.DiskType = diskType.RelativeLink() } if v, ok := d.GetOk("boot_disk.0.initialize_params.0.image"); ok { imageName := v.(string) imageUrl, err := resolveImage(config, project, imageName) if err != nil { return nil, fmt.Errorf("Error resolving image name '%s': %s", imageName, err) } disk.InitializeParams.SourceImage = imageUrl } if _, ok := d.GetOk("boot_disk.0.initialize_params.0.labels"); ok { disk.InitializeParams.Labels = expandStringMap(d, "boot_disk.0.initialize_params.0.labels") } } if v, ok := d.GetOk("boot_disk.0.mode"); ok { disk.Mode = v.(string) } return disk, nil } func expandScratchDisks(d TerraformResourceData, config *Config, project string) ([]*computeBeta.AttachedDisk, error) { diskType, err := readDiskType(config, d, "local-ssd") if err != nil { return nil, fmt.Errorf("Error loading disk type 'local-ssd': %s", err) } n := d.Get("scratch_disk.#").(int) scratchDisks := make([]*computeBeta.AttachedDisk, 0, n) for i := 0; i < n; i++ { scratchDisks = append(scratchDisks, &computeBeta.AttachedDisk{ AutoDelete: true, Type: "SCRATCH", Interface: d.Get(fmt.Sprintf("scratch_disk.%d.interface", i)).(string), InitializeParams: &computeBeta.AttachedDiskInitializeParams{ DiskType: diskType.RelativeLink(), }, }) } return scratchDisks, nil }
/* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package state import ( "bytes" "errors" "fmt" "math/rand" "sync" "sync/atomic" "testing" "time" pb "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric/bccsp/factory" "github.com/hyperledger/fabric/common/configtx/test" errors2 "github.com/hyperledger/fabric/common/errors" "github.com/hyperledger/fabric/common/flogging/floggingtest" "github.com/hyperledger/fabric/common/util" "github.com/hyperledger/fabric/core/committer" "github.com/hyperledger/fabric/core/committer/txvalidator" "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/mocks/validator" "github.com/hyperledger/fabric/core/transientstore" "github.com/hyperledger/fabric/gossip/api" "github.com/hyperledger/fabric/gossip/comm" "github.com/hyperledger/fabric/gossip/common" "github.com/hyperledger/fabric/gossip/discovery" "github.com/hyperledger/fabric/gossip/gossip" "github.com/hyperledger/fabric/gossip/privdata" "github.com/hyperledger/fabric/gossip/state/mocks" gutil "github.com/hyperledger/fabric/gossip/util" pcomm "github.com/hyperledger/fabric/protos/common" proto "github.com/hyperledger/fabric/protos/gossip" "github.com/hyperledger/fabric/protos/ledger/rwset" transientstore2 "github.com/hyperledger/fabric/protos/transientstore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) var ( portStartRange = 5610 orgID = []byte("ORG1") noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error { return nil } ) type peerIdentityAcceptor func(identity api.PeerIdentityType) error type joinChanMsg struct { } func init() { gutil.SetupTestLogging() factory.InitFactories(nil) } // SequenceNumber returns the sequence number of the block that the message // is derived from func (*joinChanMsg) SequenceNumber() uint64 { return uint64(time.Now().UnixNano()) } // Members returns the organizations of the channel func (jcm *joinChanMsg) Members() []api.OrgIdentityType { return []api.OrgIdentityType{orgID} } // AnchorPeersOf returns the anchor peers of the given organization func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer { return []api.AnchorPeer{} } type orgCryptoService struct { } // OrgByPeerIdentity returns the OrgIdentityType // of a given peer identity func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType { return orgID } // Verify verifies a JoinChannelMessage, returns nil on success, // and an error on failure func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error { return nil } type cryptoServiceMock struct { acceptor peerIdentityAcceptor } func (cryptoServiceMock) Expiration(peerIdentity api.PeerIdentityType) (time.Time, error) { return time.Now().Add(time.Hour), nil } // GetPKIidOfCert returns the PKI-ID of a peer's identity func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType { return common.PKIidType(peerIdentity) } // VerifyBlock returns nil if the block is properly signed, // else returns error func (*cryptoServiceMock) VerifyBlock(chainID common.ChainID, seqNum uint64, signedBlock []byte) error { return nil } // Sign signs msg with this peer's signing key and outputs // the signature if no error occurred. func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) { clone := make([]byte, len(msg)) copy(clone, msg) return clone, nil } // Verify checks that signature is a valid signature of message under a peer's verification key. // If the verification succeeded, Verify returns nil meaning no error occurred. // If peerCert is nil, then the signature is verified against this peer's verification key. func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error { equal := bytes.Equal(signature, message) if !equal { return fmt.Errorf("Wrong signature:%v, %v", signature, message) } return nil } // VerifyByChannel checks that signature is a valid signature of message // under a peer's verification key, but also in the context of a specific channel. // If the verification succeeded, Verify returns nil meaning no error occurred. // If peerIdentity is nil, then the signature is verified against this peer's verification key. func (cs *cryptoServiceMock) VerifyByChannel(chainID common.ChainID, peerIdentity api.PeerIdentityType, signature, message []byte) error { return cs.acceptor(peerIdentity) } func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error { return nil } func bootPeers(portPrefix int, ids ...int) []string { peers := []string{} for _, id := range ids { peers = append(peers, fmt.Sprintf("localhost:%d", id+portPrefix)) } return peers } // Simple presentation of peer which includes only // communication module, gossip and state transfer type peerNode struct { port int g gossip.Gossip s *GossipStateProviderImpl cs *cryptoServiceMock commit committer.Committer } // Shutting down all modules used func (node *peerNode) shutdown() { node.s.Stop() node.g.Stop() } type mockTransientStore struct { } func (*mockTransientStore) PurgeByHeight(maxBlockNumToRetain uint64) error { return nil } func (*mockTransientStore) Persist(txid string, blockHeight uint64, privateSimulationResults *rwset.TxPvtReadWriteSet) error { panic("implement me") } func (*mockTransientStore) PersistWithConfig(txid string, blockHeight uint64, privateSimulationResultsWithConfig *transientstore2.TxPvtReadWriteSetWithConfigInfo) error { panic("implement me") } func (mockTransientStore) GetTxPvtRWSetByTxid(txid string, filter ledger.PvtNsCollFilter) (transientstore.RWSetScanner, error) { panic("implement me") } func (*mockTransientStore) PurgeByTxids(txids []string) error { panic("implement me") } type mockCommitter struct { *mock.Mock sync.Mutex } func (mc *mockCommitter) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) { args := mc.Called() return args.Get(0).(ledger.ConfigHistoryRetriever), args.Error(1) } func (mc *mockCommitter) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) { args := mc.Called(blockNum, filter) return args.Get(0).([]*ledger.TxPvtData), args.Error(1) } func (mc *mockCommitter) CommitWithPvtData(blockAndPvtData *ledger.BlockAndPvtData) error { mc.Lock() m := mc.Mock mc.Unlock() m.Called(blockAndPvtData.Block) return nil } func (mc *mockCommitter) GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error) { args := mc.Called(seqNum) return args.Get(0).(*ledger.BlockAndPvtData), args.Error(1) } func (mc *mockCommitter) LedgerHeight() (uint64, error) { mc.Lock() m := mc.Mock mc.Unlock() args := m.Called() if args.Get(1) == nil { return args.Get(0).(uint64), nil } return args.Get(0).(uint64), args.Get(1).(error) } func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block { if mc.Called(blockSeqs).Get(0) == nil { return nil } return mc.Called(blockSeqs).Get(0).([]*pcomm.Block) } func (*mockCommitter) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) { panic("implement me") } func (*mockCommitter) CommitPvtDataOfOldBlocks(blockPvtData []*ledger.BlockPvtData) ([]*ledger.PvtdataHashMismatch, error) { panic("implement me") } func (*mockCommitter) Close() { } type ramLedger struct { ledger map[uint64]*ledger.BlockAndPvtData sync.RWMutex } func (mock *ramLedger) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) { panic("implement me") } func (mock *ramLedger) CommitPvtDataOfOldBlocks(blockPvtData []*ledger.BlockPvtData) ([]*ledger.PvtdataHashMismatch, error) { panic("implement me") } func (mock *ramLedger) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) { panic("implement me") } func (mock *ramLedger) GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error) { mock.RLock() defer mock.RUnlock() if block, ok := mock.ledger[blockNum]; !ok { return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNum)) } else { return block, nil } } func (mock *ramLedger) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) { panic("implement me") } func (mock *ramLedger) CommitWithPvtData(blockAndPvtdata *ledger.BlockAndPvtData) error { mock.Lock() defer mock.Unlock() if blockAndPvtdata != nil && blockAndPvtdata.Block != nil { mock.ledger[blockAndPvtdata.Block.Header.Number] = blockAndPvtdata return nil } return errors.New("invalid input parameters for block and private data param") } func (mock *ramLedger) GetBlockchainInfo() (*pcomm.BlockchainInfo, error) { mock.RLock() defer mock.RUnlock() currentBlock := mock.ledger[uint64(len(mock.ledger)-1)].Block return &pcomm.BlockchainInfo{ Height: currentBlock.Header.Number + 1, CurrentBlockHash: currentBlock.Header.Hash(), PreviousBlockHash: currentBlock.Header.PreviousHash, }, nil } func (mock *ramLedger) GetBlockByNumber(blockNumber uint64) (*pcomm.Block, error) { mock.RLock() defer mock.RUnlock() if blockAndPvtData, ok := mock.ledger[blockNumber]; !ok { return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNumber)) } else { return blockAndPvtData.Block, nil } } func (mock *ramLedger) Close() { } // Default configuration to be used for gossip and communication modules func newGossipConfig(portPrefix, id int, boot ...int) *gossip.Config { port := id + portPrefix return &gossip.Config{ BindPort: port, BootstrapPeers: bootPeers(portPrefix, boot...), ID: fmt.Sprintf("p%d", id), MaxBlockCountToStore: 0, MaxPropagationBurstLatency: time.Duration(10) * time.Millisecond, MaxPropagationBurstSize: 10, PropagateIterations: 1, PropagatePeerNum: 3, PullInterval: time.Duration(4) * time.Second, PullPeerNum: 5, InternalEndpoint: fmt.Sprintf("localhost:%d", port), PublishCertPeriod: 10 * time.Second, RequestStateInfoInterval: 4 * time.Second, PublishStateInfoInterval: 4 * time.Second, TimeForMembershipTracker: 5 * time.Second, } } // Create gossip instance func newGossipInstance(config *gossip.Config, mcs api.MessageCryptoService) gossip.Gossip { id := api.PeerIdentityType(config.InternalEndpoint) return gossip.NewGossipServiceWithServer(config, &orgCryptoService{}, mcs, id, nil) } // Create new instance of KVLedger to be used for testing func newCommitter() committer.Committer { cb, _ := test.MakeGenesisBlock("testChain") ldgr := &ramLedger{ ledger: make(map[uint64]*ledger.BlockAndPvtData), } ldgr.CommitWithPvtData(&ledger.BlockAndPvtData{ Block: cb, }) return committer.NewLedgerCommitter(ldgr) } func newPeerNodeWithGossip(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor, g gossip.Gossip) *peerNode { return newPeerNodeWithGossipWithValidator(config, committer, acceptor, g, &validator.MockValidator{}) } // Constructing pseudo peer node, simulating only gossip and state transfer part func newPeerNodeWithGossipWithValidator(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor, g gossip.Gossip, v txvalidator.Validator) *peerNode { cs := &cryptoServiceMock{acceptor: acceptor} // Gossip component based on configuration provided and communication module if g == nil { g = newGossipInstance(config, &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}) } g.JoinChan(&joinChanMsg{}, common.ChainID(util.GetTestChainID())) // Initialize pseudo peer simulator, which has only three // basic parts servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: cs} coord := privdata.NewCoordinator(privdata.Support{ Validator: v, TransientStore: &mockTransientStore{}, Committer: committer, }, pcomm.SignedData{}) sp := NewGossipStateProvider(util.GetTestChainID(), servicesAdapater, coord) if sp == nil { return nil } return &peerNode{ port: config.BindPort, g: g, s: sp.(*GossipStateProviderImpl), commit: committer, cs: cs, } } // Constructing pseudo peer node, simulating only gossip and state transfer part func newPeerNode(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor) *peerNode { return newPeerNodeWithGossip(config, committer, acceptor, nil) } func TestNilDirectMsg(t *testing.T) { t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) portPrefix := portStartRange + 50 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() p.s.handleStateRequest(nil) p.s.directMessage(nil) sMsg, _ := p.s.stateRequestMessage(uint64(10), uint64(8)).NoopSign() req := &comm.ReceivedMessageImpl{ SignedGossipMessage: sMsg, } p.s.directMessage(req) } func TestNilAddPayload(t *testing.T) { t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) portPrefix := portStartRange + 100 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() err := p.s.AddPayload(nil) assert.Error(t, err) assert.Contains(t, err.Error(), "nil") } func TestAddPayloadLedgerUnavailable(t *testing.T) { t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) portPrefix := portStartRange + 150 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() // Simulate a problem in the ledger failedLedger := mock.Mock{} failedLedger.On("LedgerHeight", mock.Anything).Return(uint64(0), errors.New("cannot query ledger")) mc.Lock() mc.Mock = &failedLedger mc.Unlock() rawblock := pcomm.NewBlock(uint64(1), []byte{}) b, _ := pb.Marshal(rawblock) err := p.s.AddPayload(&proto.Payload{ SeqNum: uint64(1), Data: b, }) assert.Error(t, err) assert.Contains(t, err.Error(), "Failed obtaining ledger height") assert.Contains(t, err.Error(), "cannot query ledger") } func TestLargeBlockGap(t *testing.T) { // Scenario: the peer knows of a peer who has a ledger height much higher // than itself (500 blocks higher). // The peer needs to ask blocks in a way such that the size of the payload buffer // never rises above a certain threshold. t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} blocksPassedToLedger := make(chan uint64, 200) mc.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) { blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number }) msgsFromPeer := make(chan proto.ReceivedMessage) mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} membership := []discovery.NetworkMember{ { PKIid: common.PKIidType("a"), Endpoint: "a", Properties: &proto.Properties{ LedgerHeight: 500, }, }} g.On("PeersOfChannel", mock.Anything).Return(membership) g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, msgsFromPeer) g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) { msg := arguments.Get(0).(*proto.GossipMessage) // The peer requested a state request req := msg.GetStateRequest() // Construct a skeleton for the response res := &proto.GossipMessage{ Nonce: msg.Nonce, Channel: []byte(util.GetTestChainID()), Content: &proto.GossipMessage_StateResponse{ StateResponse: &proto.RemoteStateResponse{}, }, } // Populate the response with payloads according to what the peer asked for seq := req.StartSeqNum; seq <= req.EndSeqNum; seq++ { rawblock := pcomm.NewBlock(seq, []byte{}) b, _ := pb.Marshal(rawblock) payload := &proto.Payload{ SeqNum: seq, Data: b, } res.GetStateResponse().Payloads = append(res.GetStateResponse().Payloads, payload) } // Finally, send the response down the channel the peer expects to receive it from sMsg, _ := res.NoopSign() msgsFromPeer <- &comm.ReceivedMessageImpl{ SignedGossipMessage: sMsg, } }) portPrefix := portStartRange + 200 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() // Process blocks at a speed of 20 Millisecond for each block. // The imaginative peer that responds to state // If the payload buffer expands above defMaxBlockDistance*2 + defAntiEntropyBatchSize blocks, fail the test blockProcessingTime := 20 * time.Millisecond // 10 seconds for total 500 blocks expectedSequence := 1 for expectedSequence < 500 { blockSeq := <-blocksPassedToLedger assert.Equal(t, expectedSequence, int(blockSeq)) // Ensure payload buffer isn't over-populated assert.True(t, p.s.payloads.Size() <= defMaxBlockDistance*2+defAntiEntropyBatchSize, "payload buffer size is %d", p.s.payloads.Size()) expectedSequence++ time.Sleep(blockProcessingTime) } } func TestOverPopulation(t *testing.T) { // Scenario: Add to the state provider blocks // with a gap in between, and ensure that the payload buffer // rejects blocks starting if the distance between the ledger height to the latest // block it contains is bigger than defMaxBlockDistance. t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} blocksPassedToLedger := make(chan uint64, 10) mc.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) { blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number }) mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) portPrefix := portStartRange + 250 p := newPeerNode(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor) defer p.shutdown() // Add some blocks in a sequential manner and make sure it works for i := 1; i <= 4; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) b, _ := pb.Marshal(rawblock) assert.NoError(t, p.s.addPayload(&proto.Payload{ SeqNum: uint64(i), Data: b, }, nonBlocking)) } // Add payloads from 10 to defMaxBlockDistance, while we're missing blocks [5,9] // Should succeed for i := 10; i <= defMaxBlockDistance; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) b, _ := pb.Marshal(rawblock) assert.NoError(t, p.s.addPayload(&proto.Payload{ SeqNum: uint64(i), Data: b, }, nonBlocking)) } // Add payloads from defMaxBlockDistance + 2 to defMaxBlockDistance * 10 // Should fail. for i := defMaxBlockDistance + 1; i <= defMaxBlockDistance*10; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) b, _ := pb.Marshal(rawblock) assert.Error(t, p.s.addPayload(&proto.Payload{ SeqNum: uint64(i), Data: b, }, nonBlocking)) } // Ensure only blocks 1-4 were passed to the ledger close(blocksPassedToLedger) i := 1 for seq := range blocksPassedToLedger { assert.Equal(t, uint64(i), seq) i++ } assert.Equal(t, 5, i) // Ensure we don't store too many blocks in memory sp := p.s assert.True(t, sp.payloads.Size() < defMaxBlockDistance) } func TestBlockingEnqueue(t *testing.T) { // Scenario: In parallel, get blocks from gossip and from the orderer. // The blocks from the orderer we get are X2 times the amount of blocks from gossip. // The blocks we get from gossip are random indices, to maximize disruption. t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} blocksPassedToLedger := make(chan uint64, 10) mc.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) { blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number }) mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) portPrefix := portStartRange + 300 p := newPeerNode(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor) defer p.shutdown() numBlocksReceived := 500 receivedBlockCount := 0 // Get a block from the orderer every 1ms go func() { for i := 1; i <= numBlocksReceived; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) b, _ := pb.Marshal(rawblock) block := &proto.Payload{ SeqNum: uint64(i), Data: b, } p.s.AddPayload(block) time.Sleep(time.Millisecond) } }() // Get a block from gossip every 1ms too go func() { rand.Seed(time.Now().UnixNano()) for i := 1; i <= numBlocksReceived/2; i++ { blockSeq := rand.Intn(numBlocksReceived) rawblock := pcomm.NewBlock(uint64(blockSeq), []byte{}) b, _ := pb.Marshal(rawblock) block := &proto.Payload{ SeqNum: uint64(blockSeq), Data: b, } p.s.addPayload(block, nonBlocking) time.Sleep(time.Millisecond) } }() for { receivedBlock := <-blocksPassedToLedger receivedBlockCount++ m := &mock.Mock{} m.On("LedgerHeight", mock.Anything).Return(receivedBlock, nil) m.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) { blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number }) mc.Lock() mc.Mock = m mc.Unlock() assert.Equal(t, receivedBlock, uint64(receivedBlockCount)) if int(receivedBlockCount) == numBlocksReceived { break } time.Sleep(time.Millisecond * 10) } } func TestHaltChainProcessing(t *testing.T) { gossipChannel := func(c chan *proto.GossipMessage) <-chan *proto.GossipMessage { return c } makeBlock := func(seq int) []byte { b := &pcomm.Block{ Header: &pcomm.BlockHeader{ Number: uint64(seq), }, Data: &pcomm.BlockData{ Data: [][]byte{}, }, Metadata: &pcomm.BlockMetadata{ Metadata: [][]byte{ {}, {}, {}, {}, }, }, } data, _ := pb.Marshal(b) return data } newBlockMsg := func(i int) *proto.GossipMessage { return &proto.GossipMessage{ Channel: []byte("testchainid"), Content: &proto.GossipMessage_DataMsg{ DataMsg: &proto.DataMessage{ Payload: &proto.Payload{ SeqNum: uint64(i), Data: makeBlock(i), }, }, }, } } l, recorder := floggingtest.NewTestLogger(t) logger = l mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("CommitWithPvtData", mock.Anything) mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} gossipMsgs := make(chan *proto.GossipMessage) g.On("Accept", mock.Anything, false).Return(gossipChannel(gossipMsgs), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{}) v := &validator.MockValidator{} v.On("Validate").Return(&errors2.VSCCExecutionFailureError{ Err: errors.New("foobar"), }).Once() portPrefix := portStartRange + 350 newPeerNodeWithGossipWithValidator(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g, v) gossipMsgs <- newBlockMsg(1) assertLogged(t, recorder, "Got error while committing") assertLogged(t, recorder, "Aborting chain processing") assertLogged(t, recorder, "foobar") } func TestFailures(t *testing.T) { t.Parallel() portPrefix := portStartRange + 400 mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{}) assert.Panics(t, func() { newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) }) // Reprogram mock mc.Mock = &mock.Mock{} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger")) assert.Nil(t, newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g)) } func TestGossipReception(t *testing.T) { t.Parallel() signalChan := make(chan struct{}) rawblock := &pcomm.Block{ Header: &pcomm.BlockHeader{ Number: uint64(1), }, Data: &pcomm.BlockData{ Data: [][]byte{}, }, Metadata: &pcomm.BlockMetadata{ Metadata: [][]byte{ {}, {}, {}, {}, }, }, } b, _ := pb.Marshal(rawblock) newMsg := func(channel string) *proto.GossipMessage { { return &proto.GossipMessage{ Channel: []byte(channel), Content: &proto.GossipMessage_DataMsg{ DataMsg: &proto.DataMessage{ Payload: &proto.Payload{ SeqNum: 1, Data: b, }, }, }, } } } createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage { c := make(chan *proto.GossipMessage) go func(c chan *proto.GossipMessage) { // Wait for Accept() to be called <-signalChan // Simulate a message reception from the gossip component with an invalid channel c <- newMsg("AAA") // Simulate a message reception from the gossip component c <- newMsg(util.GetTestChainID()) }(c) return c } g := &mocks.GossipMock{} rmc := createChan(signalChan) g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) { signalChan <- struct{}{} }) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{}) mc := &mockCommitter{Mock: &mock.Mock{}} receivedChan := make(chan struct{}) mc.On("CommitWithPvtData", mock.Anything).Run(func(arguments mock.Arguments) { block := arguments.Get(0).(*pcomm.Block) assert.Equal(t, uint64(1), block.Header.Number) receivedChan <- struct{}{} }) mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) portPrefix := portStartRange + 450 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() select { case <-receivedChan: case <-time.After(time.Second * 15): assert.Fail(t, "Didn't commit a block within a timely manner") } } func TestLedgerHeightFromProperties(t *testing.T) { // Scenario: For each test, spawn a peer and supply it // with a specific mock of PeersOfChannel from peers that // either set both metadata properly, or only the properties, or none, or both. // Ensure the logic handles all of the 4 possible cases as needed t.Parallel() // Returns whether the given networkMember was selected or not wasNetworkMemberSelected := func(t *testing.T, networkMember discovery.NetworkMember, wg *sync.WaitGroup) bool { var wasGivenNetworkMemberSelected int32 finChan := make(chan struct{}) g := &mocks.GossipMock{} g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) { defer wg.Done() msg := arguments.Get(0).(*proto.GossipMessage) assert.NotNil(t, msg.GetStateRequest()) peer := arguments.Get(1).([]*comm.RemotePeer)[0] if bytes.Equal(networkMember.PKIid, peer.PKIID) { atomic.StoreInt32(&wasGivenNetworkMemberSelected, 1) } finChan <- struct{}{} }) g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) defaultPeer := discovery.NetworkMember{ InternalEndpoint: "b", PKIid: common.PKIidType("b"), Properties: &proto.Properties{ LedgerHeight: 5, }, } g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{ defaultPeer, networkMember, }) mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) portPrefix := portStartRange + 500 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() select { case <-time.After(time.Second * 20): t.Fatal("Didn't send a request within a timely manner") case <-finChan: } return atomic.LoadInt32(&wasGivenNetworkMemberSelected) == 1 } peerWithProperties := discovery.NetworkMember{ PKIid: common.PKIidType("peerWithoutMetadata"), Properties: &proto.Properties{ LedgerHeight: 10, }, InternalEndpoint: "peerWithoutMetadata", } peerWithoutProperties := discovery.NetworkMember{ PKIid: common.PKIidType("peerWithoutProperties"), InternalEndpoint: "peerWithoutProperties", } tests := []struct { shouldGivenBeSelected bool member discovery.NetworkMember }{ {member: peerWithProperties, shouldGivenBeSelected: true}, {member: peerWithoutProperties, shouldGivenBeSelected: false}, } var wg sync.WaitGroup wg.Add(len(tests)) for _, tst := range tests { go func(shouldGivenBeSelected bool, member discovery.NetworkMember) { assert.Equal(t, shouldGivenBeSelected, wasNetworkMemberSelected(t, member, &wg)) }(tst.shouldGivenBeSelected, tst.member) } wg.Wait() } func TestAccessControl(t *testing.T) { t.Parallel() bootstrapSetSize := 5 bootstrapSet := make([]*peerNode, 0) authorizedPeers := map[string]struct{}{ "localhost:5610": {}, "localhost:5615": {}, "localhost:5618": {}, "localhost:5621": {}, } portPrefix := portStartRange + 600 blockPullPolicy := func(identity api.PeerIdentityType) error { if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized { return nil } return errors.New("Not authorized") } for i := 0; i < bootstrapSetSize; i++ { commit := newCommitter() bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(portPrefix, i), commit, blockPullPolicy)) } defer func() { for _, p := range bootstrapSet { p.shutdown() } }() msgCount := 5 for i := 1; i <= msgCount; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) if b, err := pb.Marshal(rawblock); err == nil { payload := &proto.Payload{ SeqNum: uint64(i), Data: b, } bootstrapSet[0].s.AddPayload(payload) } else { t.Fail() } } standardPeerSetSize := 10 peersSet := make([]*peerNode, 0) for i := 0; i < standardPeerSetSize; i++ { commit := newCommitter() peersSet = append(peersSet, newPeerNode(newGossipConfig(portPrefix, bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, blockPullPolicy)) } defer func() { for _, p := range peersSet { p.shutdown() } }() waitUntilTrueOrTimeout(t, func() bool { for _, p := range peersSet { if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standardPeerSetSize-1 { t.Log("Peer discovery has not finished yet") return false } } t.Log("All peer discovered each other!!!") return true }, 30*time.Second) t.Log("Waiting for all blocks to arrive.") waitUntilTrueOrTimeout(t, func() bool { t.Log("Trying to see all authorized peers get all blocks, and all non-authorized didn't") for _, p := range peersSet { height, err := p.commit.LedgerHeight() id := fmt.Sprintf("localhost:%d", p.port) if _, isAuthorized := authorizedPeers[id]; isAuthorized { if height != uint64(msgCount+1) || err != nil { return false } } else { if err == nil && height > 1 { assert.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height) } } } t.Log("All peers have same ledger height!!!") return true }, 60*time.Second) } func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) { t.Parallel() bootstrapSetSize := 5 bootstrapSet := make([]*peerNode, 0) portPrefix := portStartRange + 650 for i := 0; i < bootstrapSetSize; i++ { commit := newCommitter() bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(portPrefix, i, 0, 1, 2, 3, 4), commit, noopPeerIdentityAcceptor)) } defer func() { for _, p := range bootstrapSet { p.shutdown() } }() msgCount := 10 for i := 1; i <= msgCount; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) if b, err := pb.Marshal(rawblock); err == nil { payload := &proto.Payload{ SeqNum: uint64(i), Data: b, } bootstrapSet[0].s.AddPayload(payload) } else { t.Fail() } } standartPeersSize := 10 peersSet := make([]*peerNode, 0) for i := 0; i < standartPeersSize; i++ { commit := newCommitter() peersSet = append(peersSet, newPeerNode(newGossipConfig(portPrefix, bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, noopPeerIdentityAcceptor)) } defer func() { for _, p := range peersSet { p.shutdown() } }() waitUntilTrueOrTimeout(t, func() bool { for _, p := range peersSet { if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standartPeersSize-1 { t.Log("Peer discovery has not finished yet") return false } } t.Log("All peer discovered each other!!!") return true }, 30*time.Second) t.Log("Waiting for all blocks to arrive.") waitUntilTrueOrTimeout(t, func() bool { t.Log("Trying to see all peers get all blocks") for _, p := range peersSet { height, err := p.commit.LedgerHeight() if height != uint64(msgCount+1) || err != nil { return false } } t.Log("All peers have same ledger height!!!") return true }, 60*time.Second) } func TestGossipStateProvider_TestStateMessages(t *testing.T) { t.Parallel() portPrefix := portStartRange + 700 bootPeer := newPeerNode(newGossipConfig(portPrefix, 0), newCommitter(), noopPeerIdentityAcceptor) defer bootPeer.shutdown() peer := newPeerNode(newGossipConfig(portPrefix, 1, 0), newCommitter(), noopPeerIdentityAcceptor) defer peer.shutdown() naiveStateMsgPredicate := func(message interface{}) bool { return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage() } _, bootCh := bootPeer.g.Accept(naiveStateMsgPredicate, true) _, peerCh := peer.g.Accept(naiveStateMsgPredicate, true) wg := sync.WaitGroup{} wg.Add(2) go func() { msg := <-bootCh t.Log("Bootstrap node got message, ", msg) assert.True(t, msg.GetGossipMessage().GetStateRequest() != nil) msg.Respond(&proto.GossipMessage{ Content: &proto.GossipMessage_StateResponse{StateResponse: &proto.RemoteStateResponse{Payloads: nil}}, }) wg.Done() }() go func() { msg := <-peerCh t.Log("Peer node got an answer, ", msg) assert.True(t, msg.GetGossipMessage().GetStateResponse() != nil) wg.Done() }() readyCh := make(chan struct{}) go func() { wg.Wait() readyCh <- struct{}{} }() time.Sleep(time.Duration(5) * time.Second) t.Log("Sending gossip message with remote state request") chainID := common.ChainID(util.GetTestChainID()) peer.g.Send(&proto.GossipMessage{ Content: &proto.GossipMessage_StateRequest{StateRequest: &proto.RemoteStateRequest{StartSeqNum: 0, EndSeqNum: 1}}, }, &comm.RemotePeer{Endpoint: peer.g.PeersOfChannel(chainID)[0].Endpoint, PKIID: peer.g.PeersOfChannel(chainID)[0].PKIid}) t.Log("Waiting until peers exchange messages") select { case <-readyCh: { t.Log("Done!!!") } case <-time.After(time.Duration(10) * time.Second): { t.Fail() } } } // Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into // local ledger, next spawning a new peer waiting for anti-entropy procedure to // complete missing blocks. Since state transfer messages now batched, it is expected // to see _exactly_ two messages with state transfer response. func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) { t.Parallel() portPrefix := portStartRange + 750 bootPeer := newPeerNode(newGossipConfig(portPrefix, 0), newCommitter(), noopPeerIdentityAcceptor) defer bootPeer.shutdown() msgCount := defAntiEntropyBatchSize + 5 expectedMessagesCnt := 2 for i := 1; i <= msgCount; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) if b, err := pb.Marshal(rawblock); err == nil { payload := &proto.Payload{ SeqNum: uint64(i), Data: b, } bootPeer.s.AddPayload(payload) } else { t.Fail() } } peer := newPeerNode(newGossipConfig(portPrefix, 1, 0), newCommitter(), noopPeerIdentityAcceptor) defer peer.shutdown() naiveStateMsgPredicate := func(message interface{}) bool { return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage() } _, peerCh := peer.g.Accept(naiveStateMsgPredicate, true) messageCh := make(chan struct{}) stopWaiting := make(chan struct{}) // Number of submitted messages is defAntiEntropyBatchSize + 5, therefore // expected number of batches is expectedMessagesCnt = 2. Following go routine // makes sure it receives expected amount of messages and sends signal of success // to continue the test go func(expected int) { cnt := 0 for cnt < expected { select { case <-peerCh: { cnt++ } case <-stopWaiting: { return } } } messageCh <- struct{}{} }(expectedMessagesCnt) // Waits for message which indicates that expected number of message batches received // otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds select { case <-messageCh: { // Once we got message which indicate of two batches being received, // making sure messages indeed committed. waitUntilTrueOrTimeout(t, func() bool { if len(peer.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != 1 { t.Log("Peer discovery has not finished yet") return false } t.Log("All peer discovered each other!!!") return true }, 30*time.Second) t.Log("Waiting for all blocks to arrive.") waitUntilTrueOrTimeout(t, func() bool { t.Log("Trying to see all peers get all blocks") height, err := peer.commit.LedgerHeight() if height != uint64(msgCount+1) || err != nil { return false } t.Log("All peers have same ledger height!!!") return true }, 60*time.Second) } case <-time.After(defAntiEntropyInterval*2 + time.Second*1): { close(stopWaiting) t.Fatal("Expected to receive two batches with missing payloads") } } } // coordinatorMock mocking structure to capture mock interface for // coord to simulate coord flow during the test type coordinatorMock struct { committer.Committer mock.Mock } func (mock *coordinatorMock) GetPvtDataAndBlockByNum(seqNum uint64, _ pcomm.SignedData) (*pcomm.Block, gutil.PvtDataCollections, error) { args := mock.Called(seqNum) return args.Get(0).(*pcomm.Block), args.Get(1).(gutil.PvtDataCollections), args.Error(2) } func (mock *coordinatorMock) GetBlockByNum(seqNum uint64) (*pcomm.Block, error) { args := mock.Called(seqNum) return args.Get(0).(*pcomm.Block), args.Error(1) } func (mock *coordinatorMock) StoreBlock(block *pcomm.Block, data gutil.PvtDataCollections) error { args := mock.Called(block, data) return args.Error(1) } func (mock *coordinatorMock) LedgerHeight() (uint64, error) { args := mock.Called() return args.Get(0).(uint64), args.Error(1) } func (mock *coordinatorMock) Close() { mock.Called() } // StorePvtData used to persist private date into transient store func (mock *coordinatorMock) StorePvtData(txid string, privData *transientstore2.TxPvtReadWriteSetWithConfigInfo, blkHeight uint64) error { return mock.Called().Error(0) } type receivedMessageMock struct { mock.Mock } // Ack returns to the sender an acknowledgement for the message func (mock *receivedMessageMock) Ack(err error) { } func (mock *receivedMessageMock) Respond(msg *proto.GossipMessage) { mock.Called(msg) } func (mock *receivedMessageMock) GetGossipMessage() *proto.SignedGossipMessage { args := mock.Called() return args.Get(0).(*proto.SignedGossipMessage) } func (mock *receivedMessageMock) GetSourceEnvelope() *proto.Envelope { args := mock.Called() return args.Get(0).(*proto.Envelope) } func (mock *receivedMessageMock) GetConnectionInfo() *proto.ConnectionInfo { args := mock.Called() return args.Get(0).(*proto.ConnectionInfo) } type testData struct { block *pcomm.Block pvtData gutil.PvtDataCollections } func TestTransferOfPrivateRWSet(t *testing.T) { t.Parallel() chainID := "testChainID" // First gossip instance g := &mocks.GossipMock{} coord1 := new(coordinatorMock) gossipChannel := make(chan *proto.GossipMessage) commChannel := make(chan proto.ReceivedMessage) gossipChannelFactory := func(ch chan *proto.GossipMessage) <-chan *proto.GossipMessage { return ch } g.On("Accept", mock.Anything, false).Return(gossipChannelFactory(gossipChannel), nil) g.On("Accept", mock.Anything, true).Return(nil, commChannel) g.On("UpdateChannelMetadata", mock.Anything, mock.Anything) g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{}) g.On("Close") coord1.On("LedgerHeight", mock.Anything).Return(uint64(5), nil) var data = map[uint64]*testData{ uint64(2): { block: &pcomm.Block{ Header: &pcomm.BlockHeader{ Number: 2, DataHash: []byte{0, 1, 1, 1}, PreviousHash: []byte{0, 0, 0, 1}, }, Data: &pcomm.BlockData{ Data: [][]byte{{1}, {2}, {3}}, }, }, pvtData: gutil.PvtDataCollections{ { SeqInBlock: uint64(0), WriteSet: &rwset.TxPvtReadWriteSet{ DataModel: rwset.TxReadWriteSet_KV, NsPvtRwset: []*rwset.NsPvtReadWriteSet{ { Namespace: "myCC:v1", CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{ { CollectionName: "mysecrectCollection", Rwset: []byte{1, 2, 3, 4, 5}, }, }, }, }, }, }, }, }, uint64(3): { block: &pcomm.Block{ Header: &pcomm.BlockHeader{ Number: 3, DataHash: []byte{1, 1, 1, 1}, PreviousHash: []byte{0, 1, 1, 1}, }, Data: &pcomm.BlockData{ Data: [][]byte{{4}, {5}, {6}}, }, }, pvtData: gutil.PvtDataCollections{ { SeqInBlock: uint64(2), WriteSet: &rwset.TxPvtReadWriteSet{ DataModel: rwset.TxReadWriteSet_KV, NsPvtRwset: []*rwset.NsPvtReadWriteSet{ { Namespace: "otherCC:v1", CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{ { CollectionName: "topClassified", Rwset: []byte{0, 0, 0, 4, 2}, }, }, }, }, }, }, }, }, } for seqNum, each := range data { coord1.On("GetPvtDataAndBlockByNum", seqNum).Return(each.block, each.pvtData, nil /* no error*/) } coord1.On("Close") servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}} st := NewGossipStateProvider(chainID, servicesAdapater, coord1) defer st.Stop() // Mocked state request message requestMsg := new(receivedMessageMock) // Get state request message, blocks [2...3] requestGossipMsg := &proto.GossipMessage{ // Copy nonce field from the request, so it will be possible to match response Nonce: 1, Tag: proto.GossipMessage_CHAN_OR_ORG, Channel: []byte(chainID), Content: &proto.GossipMessage_StateRequest{StateRequest: &proto.RemoteStateRequest{ StartSeqNum: 2, EndSeqNum: 3, }}, } msg, _ := requestGossipMsg.NoopSign() requestMsg.On("GetGossipMessage").Return(msg) requestMsg.On("GetConnectionInfo").Return(&proto.ConnectionInfo{ Auth: &proto.AuthInfo{}, }) // Channel to send responses back responseChannel := make(chan proto.ReceivedMessage) defer close(responseChannel) requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) { // Get gossip response to respond back on state request response := args.Get(0).(*proto.GossipMessage) // Wrap it up into received response receivedMsg := new(receivedMessageMock) // Create sign response msg, _ := response.NoopSign() // Mock to respond receivedMsg.On("GetGossipMessage").Return(msg) // Send response responseChannel <- receivedMsg }) // Send request message via communication channel into state transfer commChannel <- requestMsg // State transfer request should result in state response back response := <-responseChannel // Start the assertion section stateResponse := response.GetGossipMessage().GetStateResponse() assertion := assert.New(t) // Nonce should be equal to Nonce of the request assertion.Equal(response.GetGossipMessage().Nonce, uint64(1)) // Payload should not need be nil assertion.NotNil(stateResponse) assertion.NotNil(stateResponse.Payloads) // Exactly two messages expected assertion.Equal(len(stateResponse.Payloads), 2) // Assert we have all data and it's same as we expected it for _, each := range stateResponse.Payloads { block := &pcomm.Block{} err := pb.Unmarshal(each.Data, block) assertion.NoError(err) assertion.NotNil(block.Header) testBlock, ok := data[block.Header.Number] assertion.True(ok) for i, d := range testBlock.block.Data.Data { assertion.True(bytes.Equal(d, block.Data.Data[i])) } for i, p := range testBlock.pvtData { pvtDataPayload := &proto.PvtDataPayload{} err := pb.Unmarshal(each.PrivateData[i], pvtDataPayload) assertion.NoError(err) pvtRWSet := &rwset.TxPvtReadWriteSet{} err = pb.Unmarshal(pvtDataPayload.Payload, pvtRWSet) assertion.NoError(err) assertion.True(pb.Equal(p.WriteSet, pvtRWSet)) } } } type testPeer struct { *mocks.GossipMock id string gossipChannel chan *proto.GossipMessage commChannel chan proto.ReceivedMessage coord *coordinatorMock } func (t testPeer) Gossip() <-chan *proto.GossipMessage { return t.gossipChannel } func (t testPeer) Comm() chan proto.ReceivedMessage { return t.commChannel } var peers = map[string]testPeer{ "peer1": { id: "peer1", gossipChannel: make(chan *proto.GossipMessage), commChannel: make(chan proto.ReceivedMessage), GossipMock: &mocks.GossipMock{}, coord: new(coordinatorMock), }, "peer2": { id: "peer2", gossipChannel: make(chan *proto.GossipMessage), commChannel: make(chan proto.ReceivedMessage), GossipMock: &mocks.GossipMock{}, coord: new(coordinatorMock), }, } func TestTransferOfPvtDataBetweenPeers(t *testing.T) { /* This test covers pretty basic scenario, there are two peers: "peer1" and "peer2", while peer2 missing a few blocks in the ledger therefore asking to replicate those blocks from the first peers. Test going to check that block from one peer will be replicated into second one and have identical content. */ t.Parallel() chainID := "testChainID" // Initialize peer for _, peer := range peers { peer.On("Accept", mock.Anything, false).Return(peer.Gossip(), nil) peer.On("Accept", mock.Anything, true). Return(nil, peer.Comm()). Once(). On("Accept", mock.Anything, true). Return(nil, make(chan proto.ReceivedMessage)) peer.On("UpdateChannelMetadata", mock.Anything, mock.Anything) peer.coord.On("Close") peer.On("Close") } // First peer going to have more advanced ledger peers["peer1"].coord.On("LedgerHeight", mock.Anything).Return(uint64(3), nil) // Second peer has a gap of one block, hence it will have to replicate it from previous peers["peer2"].coord.On("LedgerHeight", mock.Anything).Return(uint64(2), nil) peers["peer1"].coord.On("GetPvtDataAndBlockByNum", uint64(2)).Return(&pcomm.Block{ Header: &pcomm.BlockHeader{ Number: 2, DataHash: []byte{0, 0, 0, 1}, PreviousHash: []byte{0, 1, 1, 1}, }, Data: &pcomm.BlockData{ Data: [][]byte{{4}, {5}, {6}}, }, }, gutil.PvtDataCollections{&ledger.TxPvtData{ SeqInBlock: uint64(1), WriteSet: &rwset.TxPvtReadWriteSet{ DataModel: rwset.TxReadWriteSet_KV, NsPvtRwset: []*rwset.NsPvtReadWriteSet{ { Namespace: "myCC:v1", CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{ { CollectionName: "mysecrectCollection", Rwset: []byte{1, 2, 3, 4, 5}, }, }, }, }, }, }}, nil) // Return membership of the peers member2 := discovery.NetworkMember{ PKIid: common.PKIidType([]byte{2}), Endpoint: "peer2:7051", InternalEndpoint: "peer2:7051", Properties: &proto.Properties{ LedgerHeight: 2, }, } member1 := discovery.NetworkMember{ PKIid: common.PKIidType([]byte{1}), Endpoint: "peer1:7051", InternalEndpoint: "peer1:7051", Properties: &proto.Properties{ LedgerHeight: 3, }, } peers["peer1"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member2}) peers["peer2"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member1}) peers["peer2"].On("Send", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { request := args.Get(0).(*proto.GossipMessage) requestMsg := new(receivedMessageMock) msg, _ := request.NoopSign() requestMsg.On("GetGossipMessage").Return(msg) requestMsg.On("GetConnectionInfo").Return(&proto.ConnectionInfo{ Auth: &proto.AuthInfo{}, }) requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) { response := args.Get(0).(*proto.GossipMessage) receivedMsg := new(receivedMessageMock) msg, _ := response.NoopSign() receivedMsg.On("GetGossipMessage").Return(msg) // Send response back to the peer peers["peer2"].commChannel <- receivedMsg }) peers["peer1"].commChannel <- requestMsg }) wg := sync.WaitGroup{} wg.Add(1) peers["peer2"].coord.On("StoreBlock", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { wg.Done() // Done once second peer hits commit of the block }).Return([]string{}, nil) // No pvt data to complete and no error cryptoService := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor} mediator := &ServicesMediator{GossipAdapter: peers["peer1"], MCSAdapter: cryptoService} peer1State := NewGossipStateProvider(chainID, mediator, peers["peer1"].coord) defer peer1State.Stop() mediator = &ServicesMediator{GossipAdapter: peers["peer2"], MCSAdapter: cryptoService} peer2State := NewGossipStateProvider(chainID, mediator, peers["peer2"].coord) defer peer2State.Stop() // Make sure state was replicated done := make(chan struct{}) go func() { wg.Wait() done <- struct{}{} }() select { case <-done: break case <-time.After(30 * time.Second): t.Fail() } } func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) { ch := make(chan struct{}) go func() { t.Log("Started to spin off, until predicate will be satisfied.") for !predicate() { time.Sleep(1 * time.Second) } ch <- struct{}{} t.Log("Done.") }() select { case <-ch: break case <-time.After(timeout): t.Fatal("Timeout has expired") break } t.Log("Stop waiting until timeout or true") } func assertLogged(t *testing.T, r *floggingtest.Recorder, msg string) { observed := func() bool { return len(r.MessagesContaining(msg)) > 0 } waitUntilTrueOrTimeout(t, observed, 30*time.Second) } [FAB-13366] fix gossip state flake This commit removes time.Sleep() and instead waiting for peer to get connected with bootstrap peer to build membership before sending state message. Change-Id: If132432a710e3006fc9de0e9ea051709e6edbde6 Signed-off-by: Artem Barger <6d127d713cb520aa658cf81ac2373d74ffc1958c@il.ibm.com> /* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package state import ( "bytes" "errors" "fmt" "math/rand" "sync" "sync/atomic" "testing" "time" pb "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric/bccsp/factory" "github.com/hyperledger/fabric/common/configtx/test" errors2 "github.com/hyperledger/fabric/common/errors" "github.com/hyperledger/fabric/common/flogging/floggingtest" "github.com/hyperledger/fabric/common/util" "github.com/hyperledger/fabric/core/committer" "github.com/hyperledger/fabric/core/committer/txvalidator" "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/mocks/validator" "github.com/hyperledger/fabric/core/transientstore" "github.com/hyperledger/fabric/gossip/api" "github.com/hyperledger/fabric/gossip/comm" "github.com/hyperledger/fabric/gossip/common" "github.com/hyperledger/fabric/gossip/discovery" "github.com/hyperledger/fabric/gossip/gossip" "github.com/hyperledger/fabric/gossip/privdata" "github.com/hyperledger/fabric/gossip/state/mocks" gutil "github.com/hyperledger/fabric/gossip/util" pcomm "github.com/hyperledger/fabric/protos/common" proto "github.com/hyperledger/fabric/protos/gossip" "github.com/hyperledger/fabric/protos/ledger/rwset" transientstore2 "github.com/hyperledger/fabric/protos/transientstore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) var ( portStartRange = 5610 orgID = []byte("ORG1") noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error { return nil } ) type peerIdentityAcceptor func(identity api.PeerIdentityType) error type joinChanMsg struct { } func init() { gutil.SetupTestLogging() factory.InitFactories(nil) } // SequenceNumber returns the sequence number of the block that the message // is derived from func (*joinChanMsg) SequenceNumber() uint64 { return uint64(time.Now().UnixNano()) } // Members returns the organizations of the channel func (jcm *joinChanMsg) Members() []api.OrgIdentityType { return []api.OrgIdentityType{orgID} } // AnchorPeersOf returns the anchor peers of the given organization func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer { return []api.AnchorPeer{} } type orgCryptoService struct { } // OrgByPeerIdentity returns the OrgIdentityType // of a given peer identity func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType { return orgID } // Verify verifies a JoinChannelMessage, returns nil on success, // and an error on failure func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error { return nil } type cryptoServiceMock struct { acceptor peerIdentityAcceptor } func (cryptoServiceMock) Expiration(peerIdentity api.PeerIdentityType) (time.Time, error) { return time.Now().Add(time.Hour), nil } // GetPKIidOfCert returns the PKI-ID of a peer's identity func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType { return common.PKIidType(peerIdentity) } // VerifyBlock returns nil if the block is properly signed, // else returns error func (*cryptoServiceMock) VerifyBlock(chainID common.ChainID, seqNum uint64, signedBlock []byte) error { return nil } // Sign signs msg with this peer's signing key and outputs // the signature if no error occurred. func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) { clone := make([]byte, len(msg)) copy(clone, msg) return clone, nil } // Verify checks that signature is a valid signature of message under a peer's verification key. // If the verification succeeded, Verify returns nil meaning no error occurred. // If peerCert is nil, then the signature is verified against this peer's verification key. func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error { equal := bytes.Equal(signature, message) if !equal { return fmt.Errorf("Wrong signature:%v, %v", signature, message) } return nil } // VerifyByChannel checks that signature is a valid signature of message // under a peer's verification key, but also in the context of a specific channel. // If the verification succeeded, Verify returns nil meaning no error occurred. // If peerIdentity is nil, then the signature is verified against this peer's verification key. func (cs *cryptoServiceMock) VerifyByChannel(chainID common.ChainID, peerIdentity api.PeerIdentityType, signature, message []byte) error { return cs.acceptor(peerIdentity) } func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error { return nil } func bootPeers(portPrefix int, ids ...int) []string { peers := []string{} for _, id := range ids { peers = append(peers, fmt.Sprintf("localhost:%d", id+portPrefix)) } return peers } // Simple presentation of peer which includes only // communication module, gossip and state transfer type peerNode struct { port int g gossip.Gossip s *GossipStateProviderImpl cs *cryptoServiceMock commit committer.Committer } // Shutting down all modules used func (node *peerNode) shutdown() { node.s.Stop() node.g.Stop() } type mockTransientStore struct { } func (*mockTransientStore) PurgeByHeight(maxBlockNumToRetain uint64) error { return nil } func (*mockTransientStore) Persist(txid string, blockHeight uint64, privateSimulationResults *rwset.TxPvtReadWriteSet) error { panic("implement me") } func (*mockTransientStore) PersistWithConfig(txid string, blockHeight uint64, privateSimulationResultsWithConfig *transientstore2.TxPvtReadWriteSetWithConfigInfo) error { panic("implement me") } func (mockTransientStore) GetTxPvtRWSetByTxid(txid string, filter ledger.PvtNsCollFilter) (transientstore.RWSetScanner, error) { panic("implement me") } func (*mockTransientStore) PurgeByTxids(txids []string) error { panic("implement me") } type mockCommitter struct { *mock.Mock sync.Mutex } func (mc *mockCommitter) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) { args := mc.Called() return args.Get(0).(ledger.ConfigHistoryRetriever), args.Error(1) } func (mc *mockCommitter) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) { args := mc.Called(blockNum, filter) return args.Get(0).([]*ledger.TxPvtData), args.Error(1) } func (mc *mockCommitter) CommitWithPvtData(blockAndPvtData *ledger.BlockAndPvtData) error { mc.Lock() m := mc.Mock mc.Unlock() m.Called(blockAndPvtData.Block) return nil } func (mc *mockCommitter) GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error) { args := mc.Called(seqNum) return args.Get(0).(*ledger.BlockAndPvtData), args.Error(1) } func (mc *mockCommitter) LedgerHeight() (uint64, error) { mc.Lock() m := mc.Mock mc.Unlock() args := m.Called() if args.Get(1) == nil { return args.Get(0).(uint64), nil } return args.Get(0).(uint64), args.Get(1).(error) } func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block { if mc.Called(blockSeqs).Get(0) == nil { return nil } return mc.Called(blockSeqs).Get(0).([]*pcomm.Block) } func (*mockCommitter) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) { panic("implement me") } func (*mockCommitter) CommitPvtDataOfOldBlocks(blockPvtData []*ledger.BlockPvtData) ([]*ledger.PvtdataHashMismatch, error) { panic("implement me") } func (*mockCommitter) Close() { } type ramLedger struct { ledger map[uint64]*ledger.BlockAndPvtData sync.RWMutex } func (mock *ramLedger) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) { panic("implement me") } func (mock *ramLedger) CommitPvtDataOfOldBlocks(blockPvtData []*ledger.BlockPvtData) ([]*ledger.PvtdataHashMismatch, error) { panic("implement me") } func (mock *ramLedger) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) { panic("implement me") } func (mock *ramLedger) GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error) { mock.RLock() defer mock.RUnlock() if block, ok := mock.ledger[blockNum]; !ok { return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNum)) } else { return block, nil } } func (mock *ramLedger) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) { panic("implement me") } func (mock *ramLedger) CommitWithPvtData(blockAndPvtdata *ledger.BlockAndPvtData) error { mock.Lock() defer mock.Unlock() if blockAndPvtdata != nil && blockAndPvtdata.Block != nil { mock.ledger[blockAndPvtdata.Block.Header.Number] = blockAndPvtdata return nil } return errors.New("invalid input parameters for block and private data param") } func (mock *ramLedger) GetBlockchainInfo() (*pcomm.BlockchainInfo, error) { mock.RLock() defer mock.RUnlock() currentBlock := mock.ledger[uint64(len(mock.ledger)-1)].Block return &pcomm.BlockchainInfo{ Height: currentBlock.Header.Number + 1, CurrentBlockHash: currentBlock.Header.Hash(), PreviousBlockHash: currentBlock.Header.PreviousHash, }, nil } func (mock *ramLedger) GetBlockByNumber(blockNumber uint64) (*pcomm.Block, error) { mock.RLock() defer mock.RUnlock() if blockAndPvtData, ok := mock.ledger[blockNumber]; !ok { return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNumber)) } else { return blockAndPvtData.Block, nil } } func (mock *ramLedger) Close() { } // Default configuration to be used for gossip and communication modules func newGossipConfig(portPrefix, id int, boot ...int) *gossip.Config { port := id + portPrefix return &gossip.Config{ BindPort: port, BootstrapPeers: bootPeers(portPrefix, boot...), ID: fmt.Sprintf("p%d", id), MaxBlockCountToStore: 0, MaxPropagationBurstLatency: time.Duration(10) * time.Millisecond, MaxPropagationBurstSize: 10, PropagateIterations: 1, PropagatePeerNum: 3, PullInterval: time.Duration(4) * time.Second, PullPeerNum: 5, InternalEndpoint: fmt.Sprintf("localhost:%d", port), PublishCertPeriod: 10 * time.Second, RequestStateInfoInterval: 4 * time.Second, PublishStateInfoInterval: 4 * time.Second, TimeForMembershipTracker: 5 * time.Second, } } // Create gossip instance func newGossipInstance(config *gossip.Config, mcs api.MessageCryptoService) gossip.Gossip { id := api.PeerIdentityType(config.InternalEndpoint) return gossip.NewGossipServiceWithServer(config, &orgCryptoService{}, mcs, id, nil) } // Create new instance of KVLedger to be used for testing func newCommitter() committer.Committer { cb, _ := test.MakeGenesisBlock("testChain") ldgr := &ramLedger{ ledger: make(map[uint64]*ledger.BlockAndPvtData), } ldgr.CommitWithPvtData(&ledger.BlockAndPvtData{ Block: cb, }) return committer.NewLedgerCommitter(ldgr) } func newPeerNodeWithGossip(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor, g gossip.Gossip) *peerNode { return newPeerNodeWithGossipWithValidator(config, committer, acceptor, g, &validator.MockValidator{}) } // Constructing pseudo peer node, simulating only gossip and state transfer part func newPeerNodeWithGossipWithValidator(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor, g gossip.Gossip, v txvalidator.Validator) *peerNode { cs := &cryptoServiceMock{acceptor: acceptor} // Gossip component based on configuration provided and communication module if g == nil { g = newGossipInstance(config, &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}) } g.JoinChan(&joinChanMsg{}, common.ChainID(util.GetTestChainID())) // Initialize pseudo peer simulator, which has only three // basic parts servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: cs} coord := privdata.NewCoordinator(privdata.Support{ Validator: v, TransientStore: &mockTransientStore{}, Committer: committer, }, pcomm.SignedData{}) sp := NewGossipStateProvider(util.GetTestChainID(), servicesAdapater, coord) if sp == nil { return nil } return &peerNode{ port: config.BindPort, g: g, s: sp.(*GossipStateProviderImpl), commit: committer, cs: cs, } } // Constructing pseudo peer node, simulating only gossip and state transfer part func newPeerNode(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor) *peerNode { return newPeerNodeWithGossip(config, committer, acceptor, nil) } func TestNilDirectMsg(t *testing.T) { t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) portPrefix := portStartRange + 50 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() p.s.handleStateRequest(nil) p.s.directMessage(nil) sMsg, _ := p.s.stateRequestMessage(uint64(10), uint64(8)).NoopSign() req := &comm.ReceivedMessageImpl{ SignedGossipMessage: sMsg, } p.s.directMessage(req) } func TestNilAddPayload(t *testing.T) { t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) portPrefix := portStartRange + 100 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() err := p.s.AddPayload(nil) assert.Error(t, err) assert.Contains(t, err.Error(), "nil") } func TestAddPayloadLedgerUnavailable(t *testing.T) { t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) portPrefix := portStartRange + 150 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() // Simulate a problem in the ledger failedLedger := mock.Mock{} failedLedger.On("LedgerHeight", mock.Anything).Return(uint64(0), errors.New("cannot query ledger")) mc.Lock() mc.Mock = &failedLedger mc.Unlock() rawblock := pcomm.NewBlock(uint64(1), []byte{}) b, _ := pb.Marshal(rawblock) err := p.s.AddPayload(&proto.Payload{ SeqNum: uint64(1), Data: b, }) assert.Error(t, err) assert.Contains(t, err.Error(), "Failed obtaining ledger height") assert.Contains(t, err.Error(), "cannot query ledger") } func TestLargeBlockGap(t *testing.T) { // Scenario: the peer knows of a peer who has a ledger height much higher // than itself (500 blocks higher). // The peer needs to ask blocks in a way such that the size of the payload buffer // never rises above a certain threshold. t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} blocksPassedToLedger := make(chan uint64, 200) mc.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) { blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number }) msgsFromPeer := make(chan proto.ReceivedMessage) mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} membership := []discovery.NetworkMember{ { PKIid: common.PKIidType("a"), Endpoint: "a", Properties: &proto.Properties{ LedgerHeight: 500, }, }} g.On("PeersOfChannel", mock.Anything).Return(membership) g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, msgsFromPeer) g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) { msg := arguments.Get(0).(*proto.GossipMessage) // The peer requested a state request req := msg.GetStateRequest() // Construct a skeleton for the response res := &proto.GossipMessage{ Nonce: msg.Nonce, Channel: []byte(util.GetTestChainID()), Content: &proto.GossipMessage_StateResponse{ StateResponse: &proto.RemoteStateResponse{}, }, } // Populate the response with payloads according to what the peer asked for seq := req.StartSeqNum; seq <= req.EndSeqNum; seq++ { rawblock := pcomm.NewBlock(seq, []byte{}) b, _ := pb.Marshal(rawblock) payload := &proto.Payload{ SeqNum: seq, Data: b, } res.GetStateResponse().Payloads = append(res.GetStateResponse().Payloads, payload) } // Finally, send the response down the channel the peer expects to receive it from sMsg, _ := res.NoopSign() msgsFromPeer <- &comm.ReceivedMessageImpl{ SignedGossipMessage: sMsg, } }) portPrefix := portStartRange + 200 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() // Process blocks at a speed of 20 Millisecond for each block. // The imaginative peer that responds to state // If the payload buffer expands above defMaxBlockDistance*2 + defAntiEntropyBatchSize blocks, fail the test blockProcessingTime := 20 * time.Millisecond // 10 seconds for total 500 blocks expectedSequence := 1 for expectedSequence < 500 { blockSeq := <-blocksPassedToLedger assert.Equal(t, expectedSequence, int(blockSeq)) // Ensure payload buffer isn't over-populated assert.True(t, p.s.payloads.Size() <= defMaxBlockDistance*2+defAntiEntropyBatchSize, "payload buffer size is %d", p.s.payloads.Size()) expectedSequence++ time.Sleep(blockProcessingTime) } } func TestOverPopulation(t *testing.T) { // Scenario: Add to the state provider blocks // with a gap in between, and ensure that the payload buffer // rejects blocks starting if the distance between the ledger height to the latest // block it contains is bigger than defMaxBlockDistance. t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} blocksPassedToLedger := make(chan uint64, 10) mc.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) { blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number }) mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) portPrefix := portStartRange + 250 p := newPeerNode(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor) defer p.shutdown() // Add some blocks in a sequential manner and make sure it works for i := 1; i <= 4; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) b, _ := pb.Marshal(rawblock) assert.NoError(t, p.s.addPayload(&proto.Payload{ SeqNum: uint64(i), Data: b, }, nonBlocking)) } // Add payloads from 10 to defMaxBlockDistance, while we're missing blocks [5,9] // Should succeed for i := 10; i <= defMaxBlockDistance; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) b, _ := pb.Marshal(rawblock) assert.NoError(t, p.s.addPayload(&proto.Payload{ SeqNum: uint64(i), Data: b, }, nonBlocking)) } // Add payloads from defMaxBlockDistance + 2 to defMaxBlockDistance * 10 // Should fail. for i := defMaxBlockDistance + 1; i <= defMaxBlockDistance*10; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) b, _ := pb.Marshal(rawblock) assert.Error(t, p.s.addPayload(&proto.Payload{ SeqNum: uint64(i), Data: b, }, nonBlocking)) } // Ensure only blocks 1-4 were passed to the ledger close(blocksPassedToLedger) i := 1 for seq := range blocksPassedToLedger { assert.Equal(t, uint64(i), seq) i++ } assert.Equal(t, 5, i) // Ensure we don't store too many blocks in memory sp := p.s assert.True(t, sp.payloads.Size() < defMaxBlockDistance) } func TestBlockingEnqueue(t *testing.T) { // Scenario: In parallel, get blocks from gossip and from the orderer. // The blocks from the orderer we get are X2 times the amount of blocks from gossip. // The blocks we get from gossip are random indices, to maximize disruption. t.Parallel() mc := &mockCommitter{Mock: &mock.Mock{}} blocksPassedToLedger := make(chan uint64, 10) mc.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) { blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number }) mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) portPrefix := portStartRange + 300 p := newPeerNode(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor) defer p.shutdown() numBlocksReceived := 500 receivedBlockCount := 0 // Get a block from the orderer every 1ms go func() { for i := 1; i <= numBlocksReceived; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) b, _ := pb.Marshal(rawblock) block := &proto.Payload{ SeqNum: uint64(i), Data: b, } p.s.AddPayload(block) time.Sleep(time.Millisecond) } }() // Get a block from gossip every 1ms too go func() { rand.Seed(time.Now().UnixNano()) for i := 1; i <= numBlocksReceived/2; i++ { blockSeq := rand.Intn(numBlocksReceived) rawblock := pcomm.NewBlock(uint64(blockSeq), []byte{}) b, _ := pb.Marshal(rawblock) block := &proto.Payload{ SeqNum: uint64(blockSeq), Data: b, } p.s.addPayload(block, nonBlocking) time.Sleep(time.Millisecond) } }() for { receivedBlock := <-blocksPassedToLedger receivedBlockCount++ m := &mock.Mock{} m.On("LedgerHeight", mock.Anything).Return(receivedBlock, nil) m.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) { blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number }) mc.Lock() mc.Mock = m mc.Unlock() assert.Equal(t, receivedBlock, uint64(receivedBlockCount)) if int(receivedBlockCount) == numBlocksReceived { break } time.Sleep(time.Millisecond * 10) } } func TestHaltChainProcessing(t *testing.T) { gossipChannel := func(c chan *proto.GossipMessage) <-chan *proto.GossipMessage { return c } makeBlock := func(seq int) []byte { b := &pcomm.Block{ Header: &pcomm.BlockHeader{ Number: uint64(seq), }, Data: &pcomm.BlockData{ Data: [][]byte{}, }, Metadata: &pcomm.BlockMetadata{ Metadata: [][]byte{ {}, {}, {}, {}, }, }, } data, _ := pb.Marshal(b) return data } newBlockMsg := func(i int) *proto.GossipMessage { return &proto.GossipMessage{ Channel: []byte("testchainid"), Content: &proto.GossipMessage_DataMsg{ DataMsg: &proto.DataMessage{ Payload: &proto.Payload{ SeqNum: uint64(i), Data: makeBlock(i), }, }, }, } } l, recorder := floggingtest.NewTestLogger(t) logger = l mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("CommitWithPvtData", mock.Anything) mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) g := &mocks.GossipMock{} gossipMsgs := make(chan *proto.GossipMessage) g.On("Accept", mock.Anything, false).Return(gossipChannel(gossipMsgs), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{}) v := &validator.MockValidator{} v.On("Validate").Return(&errors2.VSCCExecutionFailureError{ Err: errors.New("foobar"), }).Once() portPrefix := portStartRange + 350 newPeerNodeWithGossipWithValidator(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g, v) gossipMsgs <- newBlockMsg(1) assertLogged(t, recorder, "Got error while committing") assertLogged(t, recorder, "Aborting chain processing") assertLogged(t, recorder, "foobar") } func TestFailures(t *testing.T) { t.Parallel() portPrefix := portStartRange + 400 mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil) g := &mocks.GossipMock{} g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{}) assert.Panics(t, func() { newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) }) // Reprogram mock mc.Mock = &mock.Mock{} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger")) assert.Nil(t, newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g)) } func TestGossipReception(t *testing.T) { t.Parallel() signalChan := make(chan struct{}) rawblock := &pcomm.Block{ Header: &pcomm.BlockHeader{ Number: uint64(1), }, Data: &pcomm.BlockData{ Data: [][]byte{}, }, Metadata: &pcomm.BlockMetadata{ Metadata: [][]byte{ {}, {}, {}, {}, }, }, } b, _ := pb.Marshal(rawblock) newMsg := func(channel string) *proto.GossipMessage { { return &proto.GossipMessage{ Channel: []byte(channel), Content: &proto.GossipMessage_DataMsg{ DataMsg: &proto.DataMessage{ Payload: &proto.Payload{ SeqNum: 1, Data: b, }, }, }, } } } createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage { c := make(chan *proto.GossipMessage) go func(c chan *proto.GossipMessage) { // Wait for Accept() to be called <-signalChan // Simulate a message reception from the gossip component with an invalid channel c <- newMsg("AAA") // Simulate a message reception from the gossip component c <- newMsg(util.GetTestChainID()) }(c) return c } g := &mocks.GossipMock{} rmc := createChan(signalChan) g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) { signalChan <- struct{}{} }) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{}) mc := &mockCommitter{Mock: &mock.Mock{}} receivedChan := make(chan struct{}) mc.On("CommitWithPvtData", mock.Anything).Run(func(arguments mock.Arguments) { block := arguments.Get(0).(*pcomm.Block) assert.Equal(t, uint64(1), block.Header.Number) receivedChan <- struct{}{} }) mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) portPrefix := portStartRange + 450 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() select { case <-receivedChan: case <-time.After(time.Second * 15): assert.Fail(t, "Didn't commit a block within a timely manner") } } func TestLedgerHeightFromProperties(t *testing.T) { // Scenario: For each test, spawn a peer and supply it // with a specific mock of PeersOfChannel from peers that // either set both metadata properly, or only the properties, or none, or both. // Ensure the logic handles all of the 4 possible cases as needed t.Parallel() // Returns whether the given networkMember was selected or not wasNetworkMemberSelected := func(t *testing.T, networkMember discovery.NetworkMember, wg *sync.WaitGroup) bool { var wasGivenNetworkMemberSelected int32 finChan := make(chan struct{}) g := &mocks.GossipMock{} g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) { defer wg.Done() msg := arguments.Get(0).(*proto.GossipMessage) assert.NotNil(t, msg.GetStateRequest()) peer := arguments.Get(1).([]*comm.RemotePeer)[0] if bytes.Equal(networkMember.PKIid, peer.PKIID) { atomic.StoreInt32(&wasGivenNetworkMemberSelected, 1) } finChan <- struct{}{} }) g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) defaultPeer := discovery.NetworkMember{ InternalEndpoint: "b", PKIid: common.PKIidType("b"), Properties: &proto.Properties{ LedgerHeight: 5, }, } g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{ defaultPeer, networkMember, }) mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) portPrefix := portStartRange + 500 p := newPeerNodeWithGossip(newGossipConfig(portPrefix, 0), mc, noopPeerIdentityAcceptor, g) defer p.shutdown() select { case <-time.After(time.Second * 20): t.Fatal("Didn't send a request within a timely manner") case <-finChan: } return atomic.LoadInt32(&wasGivenNetworkMemberSelected) == 1 } peerWithProperties := discovery.NetworkMember{ PKIid: common.PKIidType("peerWithoutMetadata"), Properties: &proto.Properties{ LedgerHeight: 10, }, InternalEndpoint: "peerWithoutMetadata", } peerWithoutProperties := discovery.NetworkMember{ PKIid: common.PKIidType("peerWithoutProperties"), InternalEndpoint: "peerWithoutProperties", } tests := []struct { shouldGivenBeSelected bool member discovery.NetworkMember }{ {member: peerWithProperties, shouldGivenBeSelected: true}, {member: peerWithoutProperties, shouldGivenBeSelected: false}, } var wg sync.WaitGroup wg.Add(len(tests)) for _, tst := range tests { go func(shouldGivenBeSelected bool, member discovery.NetworkMember) { assert.Equal(t, shouldGivenBeSelected, wasNetworkMemberSelected(t, member, &wg)) }(tst.shouldGivenBeSelected, tst.member) } wg.Wait() } func TestAccessControl(t *testing.T) { t.Parallel() bootstrapSetSize := 5 bootstrapSet := make([]*peerNode, 0) authorizedPeers := map[string]struct{}{ "localhost:5610": {}, "localhost:5615": {}, "localhost:5618": {}, "localhost:5621": {}, } portPrefix := portStartRange + 600 blockPullPolicy := func(identity api.PeerIdentityType) error { if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized { return nil } return errors.New("Not authorized") } for i := 0; i < bootstrapSetSize; i++ { commit := newCommitter() bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(portPrefix, i), commit, blockPullPolicy)) } defer func() { for _, p := range bootstrapSet { p.shutdown() } }() msgCount := 5 for i := 1; i <= msgCount; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) if b, err := pb.Marshal(rawblock); err == nil { payload := &proto.Payload{ SeqNum: uint64(i), Data: b, } bootstrapSet[0].s.AddPayload(payload) } else { t.Fail() } } standardPeerSetSize := 10 peersSet := make([]*peerNode, 0) for i := 0; i < standardPeerSetSize; i++ { commit := newCommitter() peersSet = append(peersSet, newPeerNode(newGossipConfig(portPrefix, bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, blockPullPolicy)) } defer func() { for _, p := range peersSet { p.shutdown() } }() waitUntilTrueOrTimeout(t, func() bool { for _, p := range peersSet { if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standardPeerSetSize-1 { t.Log("Peer discovery has not finished yet") return false } } t.Log("All peer discovered each other!!!") return true }, 30*time.Second) t.Log("Waiting for all blocks to arrive.") waitUntilTrueOrTimeout(t, func() bool { t.Log("Trying to see all authorized peers get all blocks, and all non-authorized didn't") for _, p := range peersSet { height, err := p.commit.LedgerHeight() id := fmt.Sprintf("localhost:%d", p.port) if _, isAuthorized := authorizedPeers[id]; isAuthorized { if height != uint64(msgCount+1) || err != nil { return false } } else { if err == nil && height > 1 { assert.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height) } } } t.Log("All peers have same ledger height!!!") return true }, 60*time.Second) } func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) { t.Parallel() bootstrapSetSize := 5 bootstrapSet := make([]*peerNode, 0) portPrefix := portStartRange + 650 for i := 0; i < bootstrapSetSize; i++ { commit := newCommitter() bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(portPrefix, i, 0, 1, 2, 3, 4), commit, noopPeerIdentityAcceptor)) } defer func() { for _, p := range bootstrapSet { p.shutdown() } }() msgCount := 10 for i := 1; i <= msgCount; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) if b, err := pb.Marshal(rawblock); err == nil { payload := &proto.Payload{ SeqNum: uint64(i), Data: b, } bootstrapSet[0].s.AddPayload(payload) } else { t.Fail() } } standartPeersSize := 10 peersSet := make([]*peerNode, 0) for i := 0; i < standartPeersSize; i++ { commit := newCommitter() peersSet = append(peersSet, newPeerNode(newGossipConfig(portPrefix, bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, noopPeerIdentityAcceptor)) } defer func() { for _, p := range peersSet { p.shutdown() } }() waitUntilTrueOrTimeout(t, func() bool { for _, p := range peersSet { if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standartPeersSize-1 { t.Log("Peer discovery has not finished yet") return false } } t.Log("All peer discovered each other!!!") return true }, 30*time.Second) t.Log("Waiting for all blocks to arrive.") waitUntilTrueOrTimeout(t, func() bool { t.Log("Trying to see all peers get all blocks") for _, p := range peersSet { height, err := p.commit.LedgerHeight() if height != uint64(msgCount+1) || err != nil { return false } } t.Log("All peers have same ledger height!!!") return true }, 60*time.Second) } func TestGossipStateProvider_TestStateMessages(t *testing.T) { t.Parallel() portPrefix := portStartRange + 700 bootPeer := newPeerNode(newGossipConfig(portPrefix, 0), newCommitter(), noopPeerIdentityAcceptor) defer bootPeer.shutdown() peer := newPeerNode(newGossipConfig(portPrefix, 1, 0), newCommitter(), noopPeerIdentityAcceptor) defer peer.shutdown() naiveStateMsgPredicate := func(message interface{}) bool { return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage() } _, bootCh := bootPeer.g.Accept(naiveStateMsgPredicate, true) _, peerCh := peer.g.Accept(naiveStateMsgPredicate, true) wg := sync.WaitGroup{} wg.Add(2) go func() { msg := <-bootCh t.Log("Bootstrap node got message, ", msg) assert.True(t, msg.GetGossipMessage().GetStateRequest() != nil) msg.Respond(&proto.GossipMessage{ Content: &proto.GossipMessage_StateResponse{StateResponse: &proto.RemoteStateResponse{Payloads: nil}}, }) wg.Done() }() go func() { msg := <-peerCh t.Log("Peer node got an answer, ", msg) assert.True(t, msg.GetGossipMessage().GetStateResponse() != nil) wg.Done() }() readyCh := make(chan struct{}) go func() { wg.Wait() readyCh <- struct{}{} }() chainID := common.ChainID(util.GetTestChainID()) waitUntilTrueOrTimeout(t, func() bool { return len(peer.g.PeersOfChannel(chainID)) == 1 }, 30*time.Second) t.Log("Sending gossip message with remote state request") peer.g.Send(&proto.GossipMessage{ Content: &proto.GossipMessage_StateRequest{StateRequest: &proto.RemoteStateRequest{StartSeqNum: 0, EndSeqNum: 1}}, }, &comm.RemotePeer{Endpoint: peer.g.PeersOfChannel(chainID)[0].Endpoint, PKIID: peer.g.PeersOfChannel(chainID)[0].PKIid}) t.Log("Waiting until peers exchange messages") select { case <-readyCh: { t.Log("Done!!!") } case <-time.After(time.Duration(10) * time.Second): { t.Fail() } } } // Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into // local ledger, next spawning a new peer waiting for anti-entropy procedure to // complete missing blocks. Since state transfer messages now batched, it is expected // to see _exactly_ two messages with state transfer response. func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) { t.Parallel() portPrefix := portStartRange + 750 bootPeer := newPeerNode(newGossipConfig(portPrefix, 0), newCommitter(), noopPeerIdentityAcceptor) defer bootPeer.shutdown() msgCount := defAntiEntropyBatchSize + 5 expectedMessagesCnt := 2 for i := 1; i <= msgCount; i++ { rawblock := pcomm.NewBlock(uint64(i), []byte{}) if b, err := pb.Marshal(rawblock); err == nil { payload := &proto.Payload{ SeqNum: uint64(i), Data: b, } bootPeer.s.AddPayload(payload) } else { t.Fail() } } peer := newPeerNode(newGossipConfig(portPrefix, 1, 0), newCommitter(), noopPeerIdentityAcceptor) defer peer.shutdown() naiveStateMsgPredicate := func(message interface{}) bool { return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage() } _, peerCh := peer.g.Accept(naiveStateMsgPredicate, true) messageCh := make(chan struct{}) stopWaiting := make(chan struct{}) // Number of submitted messages is defAntiEntropyBatchSize + 5, therefore // expected number of batches is expectedMessagesCnt = 2. Following go routine // makes sure it receives expected amount of messages and sends signal of success // to continue the test go func(expected int) { cnt := 0 for cnt < expected { select { case <-peerCh: { cnt++ } case <-stopWaiting: { return } } } messageCh <- struct{}{} }(expectedMessagesCnt) // Waits for message which indicates that expected number of message batches received // otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds select { case <-messageCh: { // Once we got message which indicate of two batches being received, // making sure messages indeed committed. waitUntilTrueOrTimeout(t, func() bool { if len(peer.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != 1 { t.Log("Peer discovery has not finished yet") return false } t.Log("All peer discovered each other!!!") return true }, 30*time.Second) t.Log("Waiting for all blocks to arrive.") waitUntilTrueOrTimeout(t, func() bool { t.Log("Trying to see all peers get all blocks") height, err := peer.commit.LedgerHeight() if height != uint64(msgCount+1) || err != nil { return false } t.Log("All peers have same ledger height!!!") return true }, 60*time.Second) } case <-time.After(defAntiEntropyInterval*2 + time.Second*1): { close(stopWaiting) t.Fatal("Expected to receive two batches with missing payloads") } } } // coordinatorMock mocking structure to capture mock interface for // coord to simulate coord flow during the test type coordinatorMock struct { committer.Committer mock.Mock } func (mock *coordinatorMock) GetPvtDataAndBlockByNum(seqNum uint64, _ pcomm.SignedData) (*pcomm.Block, gutil.PvtDataCollections, error) { args := mock.Called(seqNum) return args.Get(0).(*pcomm.Block), args.Get(1).(gutil.PvtDataCollections), args.Error(2) } func (mock *coordinatorMock) GetBlockByNum(seqNum uint64) (*pcomm.Block, error) { args := mock.Called(seqNum) return args.Get(0).(*pcomm.Block), args.Error(1) } func (mock *coordinatorMock) StoreBlock(block *pcomm.Block, data gutil.PvtDataCollections) error { args := mock.Called(block, data) return args.Error(1) } func (mock *coordinatorMock) LedgerHeight() (uint64, error) { args := mock.Called() return args.Get(0).(uint64), args.Error(1) } func (mock *coordinatorMock) Close() { mock.Called() } // StorePvtData used to persist private date into transient store func (mock *coordinatorMock) StorePvtData(txid string, privData *transientstore2.TxPvtReadWriteSetWithConfigInfo, blkHeight uint64) error { return mock.Called().Error(0) } type receivedMessageMock struct { mock.Mock } // Ack returns to the sender an acknowledgement for the message func (mock *receivedMessageMock) Ack(err error) { } func (mock *receivedMessageMock) Respond(msg *proto.GossipMessage) { mock.Called(msg) } func (mock *receivedMessageMock) GetGossipMessage() *proto.SignedGossipMessage { args := mock.Called() return args.Get(0).(*proto.SignedGossipMessage) } func (mock *receivedMessageMock) GetSourceEnvelope() *proto.Envelope { args := mock.Called() return args.Get(0).(*proto.Envelope) } func (mock *receivedMessageMock) GetConnectionInfo() *proto.ConnectionInfo { args := mock.Called() return args.Get(0).(*proto.ConnectionInfo) } type testData struct { block *pcomm.Block pvtData gutil.PvtDataCollections } func TestTransferOfPrivateRWSet(t *testing.T) { t.Parallel() chainID := "testChainID" // First gossip instance g := &mocks.GossipMock{} coord1 := new(coordinatorMock) gossipChannel := make(chan *proto.GossipMessage) commChannel := make(chan proto.ReceivedMessage) gossipChannelFactory := func(ch chan *proto.GossipMessage) <-chan *proto.GossipMessage { return ch } g.On("Accept", mock.Anything, false).Return(gossipChannelFactory(gossipChannel), nil) g.On("Accept", mock.Anything, true).Return(nil, commChannel) g.On("UpdateChannelMetadata", mock.Anything, mock.Anything) g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{}) g.On("Close") coord1.On("LedgerHeight", mock.Anything).Return(uint64(5), nil) var data = map[uint64]*testData{ uint64(2): { block: &pcomm.Block{ Header: &pcomm.BlockHeader{ Number: 2, DataHash: []byte{0, 1, 1, 1}, PreviousHash: []byte{0, 0, 0, 1}, }, Data: &pcomm.BlockData{ Data: [][]byte{{1}, {2}, {3}}, }, }, pvtData: gutil.PvtDataCollections{ { SeqInBlock: uint64(0), WriteSet: &rwset.TxPvtReadWriteSet{ DataModel: rwset.TxReadWriteSet_KV, NsPvtRwset: []*rwset.NsPvtReadWriteSet{ { Namespace: "myCC:v1", CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{ { CollectionName: "mysecrectCollection", Rwset: []byte{1, 2, 3, 4, 5}, }, }, }, }, }, }, }, }, uint64(3): { block: &pcomm.Block{ Header: &pcomm.BlockHeader{ Number: 3, DataHash: []byte{1, 1, 1, 1}, PreviousHash: []byte{0, 1, 1, 1}, }, Data: &pcomm.BlockData{ Data: [][]byte{{4}, {5}, {6}}, }, }, pvtData: gutil.PvtDataCollections{ { SeqInBlock: uint64(2), WriteSet: &rwset.TxPvtReadWriteSet{ DataModel: rwset.TxReadWriteSet_KV, NsPvtRwset: []*rwset.NsPvtReadWriteSet{ { Namespace: "otherCC:v1", CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{ { CollectionName: "topClassified", Rwset: []byte{0, 0, 0, 4, 2}, }, }, }, }, }, }, }, }, } for seqNum, each := range data { coord1.On("GetPvtDataAndBlockByNum", seqNum).Return(each.block, each.pvtData, nil /* no error*/) } coord1.On("Close") servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}} st := NewGossipStateProvider(chainID, servicesAdapater, coord1) defer st.Stop() // Mocked state request message requestMsg := new(receivedMessageMock) // Get state request message, blocks [2...3] requestGossipMsg := &proto.GossipMessage{ // Copy nonce field from the request, so it will be possible to match response Nonce: 1, Tag: proto.GossipMessage_CHAN_OR_ORG, Channel: []byte(chainID), Content: &proto.GossipMessage_StateRequest{StateRequest: &proto.RemoteStateRequest{ StartSeqNum: 2, EndSeqNum: 3, }}, } msg, _ := requestGossipMsg.NoopSign() requestMsg.On("GetGossipMessage").Return(msg) requestMsg.On("GetConnectionInfo").Return(&proto.ConnectionInfo{ Auth: &proto.AuthInfo{}, }) // Channel to send responses back responseChannel := make(chan proto.ReceivedMessage) defer close(responseChannel) requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) { // Get gossip response to respond back on state request response := args.Get(0).(*proto.GossipMessage) // Wrap it up into received response receivedMsg := new(receivedMessageMock) // Create sign response msg, _ := response.NoopSign() // Mock to respond receivedMsg.On("GetGossipMessage").Return(msg) // Send response responseChannel <- receivedMsg }) // Send request message via communication channel into state transfer commChannel <- requestMsg // State transfer request should result in state response back response := <-responseChannel // Start the assertion section stateResponse := response.GetGossipMessage().GetStateResponse() assertion := assert.New(t) // Nonce should be equal to Nonce of the request assertion.Equal(response.GetGossipMessage().Nonce, uint64(1)) // Payload should not need be nil assertion.NotNil(stateResponse) assertion.NotNil(stateResponse.Payloads) // Exactly two messages expected assertion.Equal(len(stateResponse.Payloads), 2) // Assert we have all data and it's same as we expected it for _, each := range stateResponse.Payloads { block := &pcomm.Block{} err := pb.Unmarshal(each.Data, block) assertion.NoError(err) assertion.NotNil(block.Header) testBlock, ok := data[block.Header.Number] assertion.True(ok) for i, d := range testBlock.block.Data.Data { assertion.True(bytes.Equal(d, block.Data.Data[i])) } for i, p := range testBlock.pvtData { pvtDataPayload := &proto.PvtDataPayload{} err := pb.Unmarshal(each.PrivateData[i], pvtDataPayload) assertion.NoError(err) pvtRWSet := &rwset.TxPvtReadWriteSet{} err = pb.Unmarshal(pvtDataPayload.Payload, pvtRWSet) assertion.NoError(err) assertion.True(pb.Equal(p.WriteSet, pvtRWSet)) } } } type testPeer struct { *mocks.GossipMock id string gossipChannel chan *proto.GossipMessage commChannel chan proto.ReceivedMessage coord *coordinatorMock } func (t testPeer) Gossip() <-chan *proto.GossipMessage { return t.gossipChannel } func (t testPeer) Comm() chan proto.ReceivedMessage { return t.commChannel } var peers = map[string]testPeer{ "peer1": { id: "peer1", gossipChannel: make(chan *proto.GossipMessage), commChannel: make(chan proto.ReceivedMessage), GossipMock: &mocks.GossipMock{}, coord: new(coordinatorMock), }, "peer2": { id: "peer2", gossipChannel: make(chan *proto.GossipMessage), commChannel: make(chan proto.ReceivedMessage), GossipMock: &mocks.GossipMock{}, coord: new(coordinatorMock), }, } func TestTransferOfPvtDataBetweenPeers(t *testing.T) { /* This test covers pretty basic scenario, there are two peers: "peer1" and "peer2", while peer2 missing a few blocks in the ledger therefore asking to replicate those blocks from the first peers. Test going to check that block from one peer will be replicated into second one and have identical content. */ t.Parallel() chainID := "testChainID" // Initialize peer for _, peer := range peers { peer.On("Accept", mock.Anything, false).Return(peer.Gossip(), nil) peer.On("Accept", mock.Anything, true). Return(nil, peer.Comm()). Once(). On("Accept", mock.Anything, true). Return(nil, make(chan proto.ReceivedMessage)) peer.On("UpdateChannelMetadata", mock.Anything, mock.Anything) peer.coord.On("Close") peer.On("Close") } // First peer going to have more advanced ledger peers["peer1"].coord.On("LedgerHeight", mock.Anything).Return(uint64(3), nil) // Second peer has a gap of one block, hence it will have to replicate it from previous peers["peer2"].coord.On("LedgerHeight", mock.Anything).Return(uint64(2), nil) peers["peer1"].coord.On("GetPvtDataAndBlockByNum", uint64(2)).Return(&pcomm.Block{ Header: &pcomm.BlockHeader{ Number: 2, DataHash: []byte{0, 0, 0, 1}, PreviousHash: []byte{0, 1, 1, 1}, }, Data: &pcomm.BlockData{ Data: [][]byte{{4}, {5}, {6}}, }, }, gutil.PvtDataCollections{&ledger.TxPvtData{ SeqInBlock: uint64(1), WriteSet: &rwset.TxPvtReadWriteSet{ DataModel: rwset.TxReadWriteSet_KV, NsPvtRwset: []*rwset.NsPvtReadWriteSet{ { Namespace: "myCC:v1", CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{ { CollectionName: "mysecrectCollection", Rwset: []byte{1, 2, 3, 4, 5}, }, }, }, }, }, }}, nil) // Return membership of the peers member2 := discovery.NetworkMember{ PKIid: common.PKIidType([]byte{2}), Endpoint: "peer2:7051", InternalEndpoint: "peer2:7051", Properties: &proto.Properties{ LedgerHeight: 2, }, } member1 := discovery.NetworkMember{ PKIid: common.PKIidType([]byte{1}), Endpoint: "peer1:7051", InternalEndpoint: "peer1:7051", Properties: &proto.Properties{ LedgerHeight: 3, }, } peers["peer1"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member2}) peers["peer2"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member1}) peers["peer2"].On("Send", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { request := args.Get(0).(*proto.GossipMessage) requestMsg := new(receivedMessageMock) msg, _ := request.NoopSign() requestMsg.On("GetGossipMessage").Return(msg) requestMsg.On("GetConnectionInfo").Return(&proto.ConnectionInfo{ Auth: &proto.AuthInfo{}, }) requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) { response := args.Get(0).(*proto.GossipMessage) receivedMsg := new(receivedMessageMock) msg, _ := response.NoopSign() receivedMsg.On("GetGossipMessage").Return(msg) // Send response back to the peer peers["peer2"].commChannel <- receivedMsg }) peers["peer1"].commChannel <- requestMsg }) wg := sync.WaitGroup{} wg.Add(1) peers["peer2"].coord.On("StoreBlock", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { wg.Done() // Done once second peer hits commit of the block }).Return([]string{}, nil) // No pvt data to complete and no error cryptoService := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor} mediator := &ServicesMediator{GossipAdapter: peers["peer1"], MCSAdapter: cryptoService} peer1State := NewGossipStateProvider(chainID, mediator, peers["peer1"].coord) defer peer1State.Stop() mediator = &ServicesMediator{GossipAdapter: peers["peer2"], MCSAdapter: cryptoService} peer2State := NewGossipStateProvider(chainID, mediator, peers["peer2"].coord) defer peer2State.Stop() // Make sure state was replicated done := make(chan struct{}) go func() { wg.Wait() done <- struct{}{} }() select { case <-done: break case <-time.After(30 * time.Second): t.Fail() } } func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) { ch := make(chan struct{}) go func() { t.Log("Started to spin off, until predicate will be satisfied.") for !predicate() { time.Sleep(1 * time.Second) } ch <- struct{}{} t.Log("Done.") }() select { case <-ch: break case <-time.After(timeout): t.Fatal("Timeout has expired") break } t.Log("Stop waiting until timeout or true") } func assertLogged(t *testing.T, r *floggingtest.Recorder, msg string) { observed := func() bool { return len(r.MessagesContaining(msg)) > 0 } waitUntilTrueOrTimeout(t, observed, 30*time.Second) }
package main import ( "os/user" "testing" "strconv" "syscall" "path" "os" ) func TestUserClean(t *testing.T) { currentUser, _ := user.Current() userConfig := &UserConfig{ UserName: currentUser.Username, } if err := userConfig.clean(nil); err != nil { t.Error("User config clean fails for valid user:", err) } currentUserId, _ := strconv.Atoi(currentUser.Uid) if userConfig.Uid != currentUserId { t.Error("Incorrect user id") } userConfig.UserName = "" if err := userConfig.clean(nil); err != nil { t.Error("UserConfig.clean fails for empty username:", err) } userConfig.UserName = "ThisShouldNotBeAValidUser" if err := userConfig.clean(nil); err == nil { t.Error("UserConfig.clean does not fail for invalid user.") } } func TestInternalPortsClean(t *testing.T) { internalPortsConfig := &InternalPortsConfig{} if err := internalPortsConfig.clean(nil); err != nil { t.Error("Internal ports config fails for empty setting:", err) } if internalPortsConfig.To != defaultPortTo || internalPortsConfig.From != defaultPortFrom { t.Error("Incorrect default values set") } internalPortsConfig.From = 6000 internalPortsConfig.To = 5000 if internalPortsConfig.clean(nil) != ErrInvalidPortRange { t.Error("Invalid port range does not fail clean") } internalPortsConfig.From = 5000 internalPortsConfig.To = 5000 if internalPortsConfig.clean(nil) != ErrInvalidPortRange { t.Error("Zero sized port range does not fail clean") } internalPortsConfig.From = 5000 internalPortsConfig.To = 6000 if err := internalPortsConfig.clean(nil); err != nil { t.Error("Valid port range fails clean:", err) } } func loggersEqual(l1 *LoggerConfig, l2 *LoggerConfig) bool { return l1.LogDir == l2.LogDir && l1.MaxLogSize == l2.MaxLogSize && l1.MaxLogsKept == l2.MaxLogsKept && l1.MaxLogAge == l2.MaxLogAge } func TestAppClean(t *testing.T) { config := &Config{ Logger: &LoggerConfig{ LogDir: "/tmp/log-test/", MaxLogSize: 100, MaxLogsKept: -1, MaxLogAge: -1, }, User: &UserConfig{ UserName: "root", }, } appConfig := &AppConfig{ Name: "demo", Command: "../demoapp/demoapp --port={port}", } if err := appConfig.clean(config); err != nil { t.Error("Minimal app config clean fails:", err) } if appConfig.StopSignal != syscall.SIGTERM { t.Error("Incorrect default stop signal set") } if appConfig.MaxRetries != defaultMaxRetries { t.Error("Incorrect default max retries set:", appConfig.MaxRetries) } if appConfig.InternalHost != defaultHost { t.Error("Incorrect default internal host set:", appConfig.InternalHost) } if appConfig.ExternalHost != defaultHost { t.Error("Incorrect default external host set:", appConfig.ExternalHost) } if appConfig.ExternalPort != defaultExternalPort { t.Error("Incorrect default external port set:", appConfig.ExternalPort) } if appConfig.Logger == config.Logger { t.Error("Logger should be new instance not pointer copy") } if !loggersEqual(appConfig.Logger, config.Logger) { t.Error("Logger values not copied correctly") } if appConfig.User == config.User { t.Error("User should be new instance not pointer copy") } if appConfig.User.UserName != config.User.UserName { t.Error("User should have username copied from global config") } appConfig.Name = "" if appConfig.clean(config) != ErrNameRequired { t.Error("AppConfig.clean should fail when no name") } appConfig.Name = "demo" appConfig.Command = "" if appConfig.clean(config) != ErrCommandRequired { t.Error("AppConfig.clean should fail when no command") } appConfig.Command = "../demoapp/demoapp" if appConfig.clean(config) != ErrPortBadgeRequired { t.Error("AppConfig.clean should fail when no {port} badge") } appConfig.Command = "../demoapp/demoapp --port={port}" appConfig.StopSignalName = "INT" if err := appConfig.clean(config); err != nil { t.Error("AppConfig.clean fails with custom signal name:", err) } if appConfig.StopSignal != syscall.SIGINT { t.Error("Incorrect stop signal name to signal conversion") } appConfig.StopSignalName = "ThisIsNotASignalName" if appConfig.clean(config) != ErrInvalidStopSignal { t.Error("AppConfig.clean should fail with invalid signal name") } } func TestAppHasPortBadge(t *testing.T) { appConfig := &AppConfig{ Command: "../demoapp/demoapp --port={port}", } if !appConfig.hasPortBadge() { t.Error("AppConfig.hasPortBadge fails with port badge in command") } appConfig.Command = "../demoapp/demoapp" appConfig.Environment = []string{"PORT={port}"} if !appConfig.hasPortBadge() { t.Error("AppConfig.hasPortBadge fails with port badge in environment") } appConfig.Environment = []string{"PORT=80"} if appConfig.hasPortBadge() { t.Error("AppConfig.hasPortBadge should fail with no port badge") } } func TestRpcClean(t *testing.T) { rpcConfig := &RpcConfig{} if err := rpcConfig.clean(nil); err != nil { t.Error("RpcConfig.clean fails with for empty setting:", err) } if rpcConfig.Host != defaultHost { t.Error("Incorrect default rpc host set:", rpcConfig.Host) } if rpcConfig.Port != defaultRpcPort { t.Error("Incorrect default rpc port set:", rpcConfig.Port) } rpcConfig.Host = "test.com" rpcConfig.Port = 123 if err := rpcConfig.clean(nil); err != nil { t.Error("RpcConfig.clean fails with valid settings:", err) } } func TestLoggerGlobalClean(t *testing.T) { loggerConfig := &LoggerConfig{} loggerConfig.globalClean(nil) if loggerConfig.LogDir != defaultLogDir { t.Error("Incorrect default log dir set:", loggerConfig.LogDir) } if loggerConfig.LogFile != path.Join(defaultLogDir, defaultLogFileName) { t.Error("Incorrect default log file set:", loggerConfig.LogFile) } if loggerConfig.MaxLogSize != defaultMaxLogSize { t.Error("Incorrect default max log size set:", loggerConfig.MaxLogSize) } loggerConfig.LogFile = "/tmp/log-test/test.log" if err := loggerConfig.globalClean(nil); err != nil { t.Error("Global clean failed:", err) } if _, err := os.Stat("/tmp/log-test/"); err != nil { t.Error("LoggerConfig.globalClean did not create dir:", err) } os.Remove("/tmp/log-test/") } func TestLoggerAppClean(t *testing.T) { config := &Config{ Logger: &LoggerConfig{ LogDir: "/tmp/log-test/", MaxLogSize: 100, MaxLogsKept: -1, MaxLogAge: -1, }, } appConfig := &AppConfig{ Name: "demo", } loggerConfig := &LoggerConfig{} if err := loggerConfig.appClean(config, appConfig); err != nil { t.Error("App clean failed:", err) } if loggerConfig.LogDir != config.Logger.LogDir { t.Error("LoggerConfig.LogDir not copied from global config") } if loggerConfig.MaxLogSize != config.Logger.MaxLogSize { t.Error("LoggerConfig.MaxLogSize not copied from global config") } if loggerConfig.MaxLogsKept != config.Logger.MaxLogsKept { t.Error("LoggerConfig.MaxLogsKept not copied from global config") } if loggerConfig.MaxLogAge != config.Logger.MaxLogAge { t.Error("LoggerConfig.MaxLogAge not copied from global config") } if loggerConfig.StdoutLogFile != "/tmp/log-test/app_demo.out" { t.Error("Incorrect default stdout log file set:", loggerConfig.StdoutLogFile) } if loggerConfig.StderrLogFile != "/tmp/log-test/app_demo.err" { t.Error("Incorrect default stderr log file set:", loggerConfig.StderrLogFile) } if _, err := os.Stat("/tmp/log-test/"); err != nil { t.Error("LoggerConfig.appClean did not create dir:", err) } os.Remove("/tmp/log-test/") } func TestConfigClean(t *testing.T) { config := &Config{ Logger: &LoggerConfig{ LogDir: "/tmp/log-test/", }, Apps: []*AppConfig{ &AppConfig{ Name: "demo", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8000, }, &AppConfig{ Name: "demo1", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8001, }, }, } if err := config.clean(nil); err != nil { t.Error("Config.clean fails with empty settings:", err) } if config.PortRange == nil { t.Error("Config.PortRange should be initialized") } if config.Rpc == nil { t.Error("config.Rpc should be initialized") } config.Apps = []*AppConfig{ &AppConfig{ Name: "demo", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8000, }, &AppConfig{ Name: "demo", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8001, }, } if config.clean(nil) == nil { t.Error("Config.clean should fail with apps with same Name") } config.Apps = []*AppConfig{ &AppConfig{ Name: "demo", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8000, }, &AppConfig{ Name: "demo1", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8000, }, } if config.clean(nil) == nil { t.Error("Config.clean should fail with apps with same ExternalPort") } } func TestConfigIncludeFile(t *testing.T) { config := &Config{} if config.includeFile("/not/a/path/gracevisor.yaml") != nil { t.Error("Config.includeFile should ignore any file named gracevisor.yaml") } if config.includeFile("/not/a/path/notconfig.txt") != nil { t.Error("Config.includeFile should ignore any file not .yaml") } if config.includeFile("/not/a/path/invalid_file.yaml") == nil { t.Error("Config.includeFile should fail with invalid file.") } if err := config.includeFile("../conf/app.yaml"); err != nil { t.Error("Error including '../conf/app.yaml':", err) } if len(config.Apps) != 1 { t.Error("App not added to Config.Apps") } } func TestConfigInclude(t *testing.T) { config := &Config{} if config.include("/not/a/path/") == nil { t.Error("Config.include should fail on folder that does not exist") } if config.include("/not/a/path/invalid_file.yaml") == nil { t.Error("Config.includeFile should fail on file that does not exist") } if err := config.include("../conf/"); err != nil { t.Error("Error including folder '../conf/':", err) } if len(config.Apps) != 1 { t.Error("App not added to Config.Apps") } config.Apps = []*AppConfig{} if err := config.include("../conf/app.yaml"); err != nil { t.Error("Error including file '../conf/app.yaml':", err) } if len(config.Apps) != 1 { t.Error("App not added to Config.Apps") } } func TestParseConfig(t *testing.T) { config, err := ParseConfing("/not/a/path") if err == nil { t.Error("Parsing of invalid path should fail.") } config, err = ParseConfing("../conf") if err != nil { t.Error("Parsing of sample config failed:", err) } if len(config.Apps) != 3 { t.Error("Sample config should load 3 apps.") } } go fmt Signed-off-by: Domen Ipavec <d11d22f7dbd5ce2cc259c953391f3b291ef56fb5@ipavec.net> package main import ( "os" "os/user" "path" "strconv" "syscall" "testing" ) func TestUserClean(t *testing.T) { currentUser, _ := user.Current() userConfig := &UserConfig{ UserName: currentUser.Username, } if err := userConfig.clean(nil); err != nil { t.Error("User config clean fails for valid user:", err) } currentUserId, _ := strconv.Atoi(currentUser.Uid) if userConfig.Uid != currentUserId { t.Error("Incorrect user id") } userConfig.UserName = "" if err := userConfig.clean(nil); err != nil { t.Error("UserConfig.clean fails for empty username:", err) } userConfig.UserName = "ThisShouldNotBeAValidUser" if err := userConfig.clean(nil); err == nil { t.Error("UserConfig.clean does not fail for invalid user.") } } func TestInternalPortsClean(t *testing.T) { internalPortsConfig := &InternalPortsConfig{} if err := internalPortsConfig.clean(nil); err != nil { t.Error("Internal ports config fails for empty setting:", err) } if internalPortsConfig.To != defaultPortTo || internalPortsConfig.From != defaultPortFrom { t.Error("Incorrect default values set") } internalPortsConfig.From = 6000 internalPortsConfig.To = 5000 if internalPortsConfig.clean(nil) != ErrInvalidPortRange { t.Error("Invalid port range does not fail clean") } internalPortsConfig.From = 5000 internalPortsConfig.To = 5000 if internalPortsConfig.clean(nil) != ErrInvalidPortRange { t.Error("Zero sized port range does not fail clean") } internalPortsConfig.From = 5000 internalPortsConfig.To = 6000 if err := internalPortsConfig.clean(nil); err != nil { t.Error("Valid port range fails clean:", err) } } func loggersEqual(l1 *LoggerConfig, l2 *LoggerConfig) bool { return l1.LogDir == l2.LogDir && l1.MaxLogSize == l2.MaxLogSize && l1.MaxLogsKept == l2.MaxLogsKept && l1.MaxLogAge == l2.MaxLogAge } func TestAppClean(t *testing.T) { config := &Config{ Logger: &LoggerConfig{ LogDir: "/tmp/log-test/", MaxLogSize: 100, MaxLogsKept: -1, MaxLogAge: -1, }, User: &UserConfig{ UserName: "root", }, } appConfig := &AppConfig{ Name: "demo", Command: "../demoapp/demoapp --port={port}", } if err := appConfig.clean(config); err != nil { t.Error("Minimal app config clean fails:", err) } if appConfig.StopSignal != syscall.SIGTERM { t.Error("Incorrect default stop signal set") } if appConfig.MaxRetries != defaultMaxRetries { t.Error("Incorrect default max retries set:", appConfig.MaxRetries) } if appConfig.InternalHost != defaultHost { t.Error("Incorrect default internal host set:", appConfig.InternalHost) } if appConfig.ExternalHost != defaultHost { t.Error("Incorrect default external host set:", appConfig.ExternalHost) } if appConfig.ExternalPort != defaultExternalPort { t.Error("Incorrect default external port set:", appConfig.ExternalPort) } if appConfig.Logger == config.Logger { t.Error("Logger should be new instance not pointer copy") } if !loggersEqual(appConfig.Logger, config.Logger) { t.Error("Logger values not copied correctly") } if appConfig.User == config.User { t.Error("User should be new instance not pointer copy") } if appConfig.User.UserName != config.User.UserName { t.Error("User should have username copied from global config") } appConfig.Name = "" if appConfig.clean(config) != ErrNameRequired { t.Error("AppConfig.clean should fail when no name") } appConfig.Name = "demo" appConfig.Command = "" if appConfig.clean(config) != ErrCommandRequired { t.Error("AppConfig.clean should fail when no command") } appConfig.Command = "../demoapp/demoapp" if appConfig.clean(config) != ErrPortBadgeRequired { t.Error("AppConfig.clean should fail when no {port} badge") } appConfig.Command = "../demoapp/demoapp --port={port}" appConfig.StopSignalName = "INT" if err := appConfig.clean(config); err != nil { t.Error("AppConfig.clean fails with custom signal name:", err) } if appConfig.StopSignal != syscall.SIGINT { t.Error("Incorrect stop signal name to signal conversion") } appConfig.StopSignalName = "ThisIsNotASignalName" if appConfig.clean(config) != ErrInvalidStopSignal { t.Error("AppConfig.clean should fail with invalid signal name") } } func TestAppHasPortBadge(t *testing.T) { appConfig := &AppConfig{ Command: "../demoapp/demoapp --port={port}", } if !appConfig.hasPortBadge() { t.Error("AppConfig.hasPortBadge fails with port badge in command") } appConfig.Command = "../demoapp/demoapp" appConfig.Environment = []string{"PORT={port}"} if !appConfig.hasPortBadge() { t.Error("AppConfig.hasPortBadge fails with port badge in environment") } appConfig.Environment = []string{"PORT=80"} if appConfig.hasPortBadge() { t.Error("AppConfig.hasPortBadge should fail with no port badge") } } func TestRpcClean(t *testing.T) { rpcConfig := &RpcConfig{} if err := rpcConfig.clean(nil); err != nil { t.Error("RpcConfig.clean fails with for empty setting:", err) } if rpcConfig.Host != defaultHost { t.Error("Incorrect default rpc host set:", rpcConfig.Host) } if rpcConfig.Port != defaultRpcPort { t.Error("Incorrect default rpc port set:", rpcConfig.Port) } rpcConfig.Host = "test.com" rpcConfig.Port = 123 if err := rpcConfig.clean(nil); err != nil { t.Error("RpcConfig.clean fails with valid settings:", err) } } func TestLoggerGlobalClean(t *testing.T) { loggerConfig := &LoggerConfig{} loggerConfig.globalClean(nil) if loggerConfig.LogDir != defaultLogDir { t.Error("Incorrect default log dir set:", loggerConfig.LogDir) } if loggerConfig.LogFile != path.Join(defaultLogDir, defaultLogFileName) { t.Error("Incorrect default log file set:", loggerConfig.LogFile) } if loggerConfig.MaxLogSize != defaultMaxLogSize { t.Error("Incorrect default max log size set:", loggerConfig.MaxLogSize) } loggerConfig.LogFile = "/tmp/log-test/test.log" if err := loggerConfig.globalClean(nil); err != nil { t.Error("Global clean failed:", err) } if _, err := os.Stat("/tmp/log-test/"); err != nil { t.Error("LoggerConfig.globalClean did not create dir:", err) } os.Remove("/tmp/log-test/") } func TestLoggerAppClean(t *testing.T) { config := &Config{ Logger: &LoggerConfig{ LogDir: "/tmp/log-test/", MaxLogSize: 100, MaxLogsKept: -1, MaxLogAge: -1, }, } appConfig := &AppConfig{ Name: "demo", } loggerConfig := &LoggerConfig{} if err := loggerConfig.appClean(config, appConfig); err != nil { t.Error("App clean failed:", err) } if loggerConfig.LogDir != config.Logger.LogDir { t.Error("LoggerConfig.LogDir not copied from global config") } if loggerConfig.MaxLogSize != config.Logger.MaxLogSize { t.Error("LoggerConfig.MaxLogSize not copied from global config") } if loggerConfig.MaxLogsKept != config.Logger.MaxLogsKept { t.Error("LoggerConfig.MaxLogsKept not copied from global config") } if loggerConfig.MaxLogAge != config.Logger.MaxLogAge { t.Error("LoggerConfig.MaxLogAge not copied from global config") } if loggerConfig.StdoutLogFile != "/tmp/log-test/app_demo.out" { t.Error("Incorrect default stdout log file set:", loggerConfig.StdoutLogFile) } if loggerConfig.StderrLogFile != "/tmp/log-test/app_demo.err" { t.Error("Incorrect default stderr log file set:", loggerConfig.StderrLogFile) } if _, err := os.Stat("/tmp/log-test/"); err != nil { t.Error("LoggerConfig.appClean did not create dir:", err) } os.Remove("/tmp/log-test/") } func TestConfigClean(t *testing.T) { config := &Config{ Logger: &LoggerConfig{ LogDir: "/tmp/log-test/", }, Apps: []*AppConfig{ &AppConfig{ Name: "demo", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8000, }, &AppConfig{ Name: "demo1", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8001, }, }, } if err := config.clean(nil); err != nil { t.Error("Config.clean fails with empty settings:", err) } if config.PortRange == nil { t.Error("Config.PortRange should be initialized") } if config.Rpc == nil { t.Error("config.Rpc should be initialized") } config.Apps = []*AppConfig{ &AppConfig{ Name: "demo", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8000, }, &AppConfig{ Name: "demo", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8001, }, } if config.clean(nil) == nil { t.Error("Config.clean should fail with apps with same Name") } config.Apps = []*AppConfig{ &AppConfig{ Name: "demo", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8000, }, &AppConfig{ Name: "demo1", Command: "../demoapp/demoapp --port={port}", ExternalPort: 8000, }, } if config.clean(nil) == nil { t.Error("Config.clean should fail with apps with same ExternalPort") } } func TestConfigIncludeFile(t *testing.T) { config := &Config{} if config.includeFile("/not/a/path/gracevisor.yaml") != nil { t.Error("Config.includeFile should ignore any file named gracevisor.yaml") } if config.includeFile("/not/a/path/notconfig.txt") != nil { t.Error("Config.includeFile should ignore any file not .yaml") } if config.includeFile("/not/a/path/invalid_file.yaml") == nil { t.Error("Config.includeFile should fail with invalid file.") } if err := config.includeFile("../conf/app.yaml"); err != nil { t.Error("Error including '../conf/app.yaml':", err) } if len(config.Apps) != 1 { t.Error("App not added to Config.Apps") } } func TestConfigInclude(t *testing.T) { config := &Config{} if config.include("/not/a/path/") == nil { t.Error("Config.include should fail on folder that does not exist") } if config.include("/not/a/path/invalid_file.yaml") == nil { t.Error("Config.includeFile should fail on file that does not exist") } if err := config.include("../conf/"); err != nil { t.Error("Error including folder '../conf/':", err) } if len(config.Apps) != 1 { t.Error("App not added to Config.Apps") } config.Apps = []*AppConfig{} if err := config.include("../conf/app.yaml"); err != nil { t.Error("Error including file '../conf/app.yaml':", err) } if len(config.Apps) != 1 { t.Error("App not added to Config.Apps") } } func TestParseConfig(t *testing.T) { config, err := ParseConfing("/not/a/path") if err == nil { t.Error("Parsing of invalid path should fail.") } config, err = ParseConfing("../conf") if err != nil { t.Error("Parsing of sample config failed:", err) } if len(config.Apps) != 3 { t.Error("Sample config should load 3 apps.") } }
package acsengine import ( "strconv" "strings" "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/acs-engine/pkg/api/common" "github.com/Azure/acs-engine/pkg/helpers" ) func setKubeletConfig(cs *api.ContainerService) { o := cs.Properties.OrchestratorProfile cloudSpecConfig := getCloudSpecConfig(cs.Location) staticLinuxKubeletConfig := map[string]string{ "--address": "0.0.0.0", "--allow-privileged": "true", "--anonymous-auth": "false", "--authorization-mode": "Webhook", "--client-ca-file": "/etc/kubernetes/certs/ca.crt", "--pod-manifest-path": "/etc/kubernetes/manifests", "--cluster-dns": o.KubernetesConfig.DNSServiceIP, "--cgroups-per-qos": "true", "--enforce-node-allocatable": "pods", "--kubeconfig": "/var/lib/kubelet/kubeconfig", "--keep-terminated-pod-volumes": "false", } staticWindowsKubeletConfig := make(map[string]string) for key, val := range staticLinuxKubeletConfig { staticWindowsKubeletConfig[key] = val } staticWindowsKubeletConfig["--azure-container-registry-config"] = "c:\\k\\azure.json" staticWindowsKubeletConfig["--pod-infra-container-image"] = "kubletwin/pause" staticWindowsKubeletConfig["--kubeconfig"] = "c:\\k\\config" staticWindowsKubeletConfig["--cloud-config"] = "c:\\k\\azure.json" staticWindowsKubeletConfig["--cgroups-per-qos"] = "false" staticWindowsKubeletConfig["--enforce-node-allocatable"] = "\"\"" // Default Kubelet config defaultKubeletConfig := map[string]string{ "--cluster-domain": "cluster.local", "--network-plugin": "cni", "--pod-infra-container-image": cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase + KubeConfigs[o.OrchestratorVersion]["pause"], "--max-pods": strconv.Itoa(DefaultKubernetesMaxPods), "--eviction-hard": DefaultKubernetesHardEvictionThreshold, "--node-status-update-frequency": KubeConfigs[o.OrchestratorVersion]["nodestatusfreq"], "--image-gc-high-threshold": strconv.Itoa(DefaultKubernetesGCHighThreshold), "--image-gc-low-threshold": strconv.Itoa(DefaultKubernetesGCLowThreshold), "--non-masquerade-cidr": o.KubernetesConfig.ClusterSubnet, "--cloud-provider": "azure", "--cloud-config": "/etc/kubernetes/azure.json", "--azure-container-registry-config": "/etc/kubernetes/azure.json", "--event-qps": DefaultKubeletEventQPS, "--cadvisor-port": DefaultKubeletCadvisorPort, "--pod-max-pids": strconv.Itoa(DefaultKubeletPodMaxPIDs), "--image-pull-progress-deadline": "30m", } // Apply Azure CNI-specific --max-pods value if o.KubernetesConfig.NetworkPlugin == NetworkPluginAzure { defaultKubeletConfig["--max-pods"] = strconv.Itoa(DefaultKubernetesMaxPodsVNETIntegrated) } // If no user-configurable kubelet config values exists, use the defaults setMissingKubeletValues(o.KubernetesConfig, defaultKubeletConfig) addDefaultFeatureGates(o.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "", "") // Override default cloud-provider? if helpers.IsTrueBoolPointer(o.KubernetesConfig.UseCloudControllerManager) { staticLinuxKubeletConfig["--cloud-provider"] = "external" } // Override default --network-plugin? if o.KubernetesConfig.NetworkPlugin == NetworkPluginKubenet { if o.KubernetesConfig.NetworkPolicy != NetworkPolicyCalico { o.KubernetesConfig.KubeletConfig["--network-plugin"] = NetworkPluginKubenet } } // We don't support user-configurable values for the following, // so any of the value assignments below will override user-provided values for key, val := range staticLinuxKubeletConfig { o.KubernetesConfig.KubeletConfig[key] = val } // Get rid of values not supported in v1.5 clusters if !common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.6.0") { for _, key := range []string{"--non-masquerade-cidr", "--cgroups-per-qos", "--enforce-node-allocatable"} { delete(o.KubernetesConfig.KubeletConfig, key) } } // Get rid of values not supported in v1.10 clusters if !common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.10.0") { for _, key := range []string{"--pod-max-pids"} { delete(o.KubernetesConfig.KubeletConfig, key) } } // Get rid of values not supported in v1.12 and up if common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.12.0-alpha.1") { for _, key := range []string{"--cadvisor-port"} { delete(o.KubernetesConfig.KubeletConfig, key) } } // Remove secure kubelet flags, if configured if !helpers.IsTrueBoolPointer(o.KubernetesConfig.EnableSecureKubelet) { for _, key := range []string{"--anonymous-auth", "--client-ca-file"} { delete(o.KubernetesConfig.KubeletConfig, key) } } // Master-specific kubelet config changes go here if cs.Properties.MasterProfile != nil { if cs.Properties.MasterProfile.KubernetesConfig == nil { cs.Properties.MasterProfile.KubernetesConfig = &api.KubernetesConfig{} cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig = copyMap(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig) } setMissingKubeletValues(cs.Properties.MasterProfile.KubernetesConfig, o.KubernetesConfig.KubeletConfig) addDefaultFeatureGates(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "", "") } // Agent-specific kubelet config changes go here for _, profile := range cs.Properties.AgentPoolProfiles { if profile.KubernetesConfig == nil { profile.KubernetesConfig = &api.KubernetesConfig{} profile.KubernetesConfig.KubeletConfig = copyMap(profile.KubernetesConfig.KubeletConfig) if profile.OSType == "Windows" { for key, val := range staticWindowsKubeletConfig { profile.KubernetesConfig.KubeletConfig[key] = val } } } setMissingKubeletValues(profile.KubernetesConfig, o.KubernetesConfig.KubeletConfig) // For N Series (GPU) VMs if strings.Contains(profile.VMSize, "Standard_N") { if !cs.Properties.IsNVIDIADevicePluginEnabled() && !common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.11.0") { // enabling accelerators for Kubernetes >= 1.6 to <= 1.9 addDefaultFeatureGates(profile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "1.6.0", "Accelerators=true") } } } } func setMissingKubeletValues(p *api.KubernetesConfig, d map[string]string) { if p.KubeletConfig == nil { p.KubeletConfig = d } else { for key, val := range d { // If we don't have a user-configurable value for each option if _, ok := p.KubeletConfig[key]; !ok { // then assign the default value p.KubeletConfig[key] = val } } } } func copyMap(input map[string]string) map[string]string { copy := map[string]string{} for key, value := range input { copy[key] = value } return copy } Enable pod priority for kubelet (#3743) package acsengine import ( "strconv" "strings" "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/acs-engine/pkg/api/common" "github.com/Azure/acs-engine/pkg/helpers" ) func setKubeletConfig(cs *api.ContainerService) { o := cs.Properties.OrchestratorProfile cloudSpecConfig := getCloudSpecConfig(cs.Location) staticLinuxKubeletConfig := map[string]string{ "--address": "0.0.0.0", "--allow-privileged": "true", "--anonymous-auth": "false", "--authorization-mode": "Webhook", "--client-ca-file": "/etc/kubernetes/certs/ca.crt", "--pod-manifest-path": "/etc/kubernetes/manifests", "--cluster-dns": o.KubernetesConfig.DNSServiceIP, "--cgroups-per-qos": "true", "--enforce-node-allocatable": "pods", "--kubeconfig": "/var/lib/kubelet/kubeconfig", "--keep-terminated-pod-volumes": "false", } staticWindowsKubeletConfig := make(map[string]string) for key, val := range staticLinuxKubeletConfig { staticWindowsKubeletConfig[key] = val } staticWindowsKubeletConfig["--azure-container-registry-config"] = "c:\\k\\azure.json" staticWindowsKubeletConfig["--pod-infra-container-image"] = "kubletwin/pause" staticWindowsKubeletConfig["--kubeconfig"] = "c:\\k\\config" staticWindowsKubeletConfig["--cloud-config"] = "c:\\k\\azure.json" staticWindowsKubeletConfig["--cgroups-per-qos"] = "false" staticWindowsKubeletConfig["--enforce-node-allocatable"] = "\"\"" // Default Kubelet config defaultKubeletConfig := map[string]string{ "--cluster-domain": "cluster.local", "--network-plugin": "cni", "--pod-infra-container-image": cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase + KubeConfigs[o.OrchestratorVersion]["pause"], "--max-pods": strconv.Itoa(DefaultKubernetesMaxPods), "--eviction-hard": DefaultKubernetesHardEvictionThreshold, "--node-status-update-frequency": KubeConfigs[o.OrchestratorVersion]["nodestatusfreq"], "--image-gc-high-threshold": strconv.Itoa(DefaultKubernetesGCHighThreshold), "--image-gc-low-threshold": strconv.Itoa(DefaultKubernetesGCLowThreshold), "--non-masquerade-cidr": o.KubernetesConfig.ClusterSubnet, "--cloud-provider": "azure", "--cloud-config": "/etc/kubernetes/azure.json", "--azure-container-registry-config": "/etc/kubernetes/azure.json", "--event-qps": DefaultKubeletEventQPS, "--cadvisor-port": DefaultKubeletCadvisorPort, "--pod-max-pids": strconv.Itoa(DefaultKubeletPodMaxPIDs), "--image-pull-progress-deadline": "30m", } // Apply Azure CNI-specific --max-pods value if o.KubernetesConfig.NetworkPlugin == NetworkPluginAzure { defaultKubeletConfig["--max-pods"] = strconv.Itoa(DefaultKubernetesMaxPodsVNETIntegrated) } // If no user-configurable kubelet config values exists, use the defaults setMissingKubeletValues(o.KubernetesConfig, defaultKubeletConfig) addDefaultFeatureGates(o.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "", "") addDefaultFeatureGates(o.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "1.8.0", "PodPriority=true") // Override default cloud-provider? if helpers.IsTrueBoolPointer(o.KubernetesConfig.UseCloudControllerManager) { staticLinuxKubeletConfig["--cloud-provider"] = "external" } // Override default --network-plugin? if o.KubernetesConfig.NetworkPlugin == NetworkPluginKubenet { if o.KubernetesConfig.NetworkPolicy != NetworkPolicyCalico { o.KubernetesConfig.KubeletConfig["--network-plugin"] = NetworkPluginKubenet } } // We don't support user-configurable values for the following, // so any of the value assignments below will override user-provided values for key, val := range staticLinuxKubeletConfig { o.KubernetesConfig.KubeletConfig[key] = val } // Get rid of values not supported in v1.5 clusters if !common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.6.0") { for _, key := range []string{"--non-masquerade-cidr", "--cgroups-per-qos", "--enforce-node-allocatable"} { delete(o.KubernetesConfig.KubeletConfig, key) } } // Get rid of values not supported in v1.10 clusters if !common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.10.0") { for _, key := range []string{"--pod-max-pids"} { delete(o.KubernetesConfig.KubeletConfig, key) } } // Get rid of values not supported in v1.12 and up if common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.12.0-alpha.1") { for _, key := range []string{"--cadvisor-port"} { delete(o.KubernetesConfig.KubeletConfig, key) } } // Remove secure kubelet flags, if configured if !helpers.IsTrueBoolPointer(o.KubernetesConfig.EnableSecureKubelet) { for _, key := range []string{"--anonymous-auth", "--client-ca-file"} { delete(o.KubernetesConfig.KubeletConfig, key) } } // Master-specific kubelet config changes go here if cs.Properties.MasterProfile != nil { if cs.Properties.MasterProfile.KubernetesConfig == nil { cs.Properties.MasterProfile.KubernetesConfig = &api.KubernetesConfig{} cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig = copyMap(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig) } setMissingKubeletValues(cs.Properties.MasterProfile.KubernetesConfig, o.KubernetesConfig.KubeletConfig) addDefaultFeatureGates(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "", "") } // Agent-specific kubelet config changes go here for _, profile := range cs.Properties.AgentPoolProfiles { if profile.KubernetesConfig == nil { profile.KubernetesConfig = &api.KubernetesConfig{} profile.KubernetesConfig.KubeletConfig = copyMap(profile.KubernetesConfig.KubeletConfig) if profile.OSType == "Windows" { for key, val := range staticWindowsKubeletConfig { profile.KubernetesConfig.KubeletConfig[key] = val } } } setMissingKubeletValues(profile.KubernetesConfig, o.KubernetesConfig.KubeletConfig) // For N Series (GPU) VMs if strings.Contains(profile.VMSize, "Standard_N") { if !cs.Properties.IsNVIDIADevicePluginEnabled() && !common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.11.0") { // enabling accelerators for Kubernetes >= 1.6 to <= 1.9 addDefaultFeatureGates(profile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "1.6.0", "Accelerators=true") } } } } func setMissingKubeletValues(p *api.KubernetesConfig, d map[string]string) { if p.KubeletConfig == nil { p.KubeletConfig = d } else { for key, val := range d { // If we don't have a user-configurable value for each option if _, ok := p.KubeletConfig[key]; !ok { // then assign the default value p.KubeletConfig[key] = val } } } } func copyMap(input map[string]string) map[string]string { copy := map[string]string{} for key, value := range input { copy[key] = value } return copy }
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package componentconfig import "k8s.io/kubernetes/pkg/api/unversioned" type KubeProxyConfiguration struct { unversioned.TypeMeta // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 // for all interfaces) BindAddress string `json:"bindAddress"` // clusterCIDR is the CIDR range of the pods in the cluster. It is used to // bridge traffic coming from outside of the cluster. If not provided, // no off-cluster bridging will be performed. ClusterCIDR string `json:"clusterCIDR"` // healthzBindAddress is the IP address for the health check server to serve on, // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces) HealthzBindAddress string `json:"healthzBindAddress"` // healthzPort is the port to bind the health check server. Use 0 to disable. HealthzPort int32 `json:"healthzPort"` // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. HostnameOverride string `json:"hostnameOverride"` // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using // the pure iptables proxy mode. Values must be within the range [0, 31]. IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"` // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', // '2h22m'). Must be greater than 0. IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"` // kubeconfigPath is the path to the kubeconfig file with authorization information (the // master location is set by the master flag). KubeconfigPath string `json:"kubeconfigPath"` // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. MasqueradeAll bool `json:"masqueradeAll"` // master is the address of the Kubernetes API server (overrides any value in kubeconfig) Master string `json:"master"` // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within // the range [-1000, 1000] OOMScoreAdj *int32 `json:"oomScoreAdj"` // mode specifies which proxy mode to use. Mode ProxyMode `json:"mode"` // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. PortRange string `json:"portRange"` // resourceContainer is the absolute name of the resource-only container to create and run // the Kube-proxy in (Default: /kube-proxy). ResourceContainer string `json:"kubeletCgroups"` // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). // Must be greater than 0. Only applicable for proxyMode=userspace. UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"` // conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)") ConntrackMax int32 `json:"conntrackMax"` // conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open // (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"` } // Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' // (newer, faster). If blank, look at the Node object on the Kubernetes API and respect the // 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the // best-available proxy (currently iptables, but may change in future versions). If the // iptables proxy is selected, regardless of how, but the system's kernel or iptables // versions are insufficient, this always falls back to the userspace proxy. type ProxyMode string const ( ProxyModeUserspace ProxyMode = "userspace" ProxyModeIPTables ProxyMode = "iptables" ) // HairpinMode denotes how the kubelet should configure networking to handle // hairpin packets. type HairpinMode string // Enum settings for different ways to handle hairpin packets. const ( // Set the hairpin flag on the veth of containers in the respective // container runtime. HairpinVeth = "hairpin-veth" // Make the container bridge promiscuous. This will force it to accept // hairpin packets, even if the flag isn't set on ports of the bridge. PromiscuousBridge = "promiscuous-bridge" // Neither of the above. If the kubelet is started in this hairpin mode // and kube-proxy is running in iptables mode, hairpin packets will be // dropped by the container bridge. HairpinNone = "none" ) // TODO: curate the ordering and structure of this config object type KubeletConfiguration struct { // config is the path to the config file or directory of files Config string `json:"config"` // syncFrequency is the max period between synchronizing running // containers and config SyncFrequency unversioned.Duration `json:"syncFrequency"` // fileCheckFrequency is the duration between checking config files for // new data FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"` // httpCheckFrequency is the duration between checking http for new data HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"` // manifestURL is the URL for accessing the container manifest ManifestURL string `json:"manifestURL"` // manifestURLHeader is the HTTP header to use when accessing the manifest // URL, with the key separated from the value with a ':', as in 'key:value' ManifestURLHeader string `json:"manifestURLHeader"` // enableServer enables the Kubelet's server EnableServer bool `json:"enableServer"` // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 // for all interfaces) Address string `json:"address"` // port is the port for the Kubelet to serve on. Port uint `json:"port"` // readOnlyPort is the read-only port for the Kubelet to serve on with // no authentication/authorization (set to 0 to disable) ReadOnlyPort uint `json:"readOnlyPort"` // tLSCertFile is the file containing x509 Certificate for HTTPS. (CA cert, // if any, concatenated after server cert). If tlsCertFile and // tlsPrivateKeyFile are not provided, a self-signed certificate // and key are generated for the public address and saved to the directory // passed to certDir. TLSCertFile string `json:"tLSCertFile"` // tLSPrivateKeyFile is the ile containing x509 private key matching // tlsCertFile. TLSPrivateKeyFile string `json:"tLSPrivateKeyFile"` // certDirectory is the directory where the TLS certs are located (by // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile // are provided, this flag will be ignored. CertDirectory string `json:"certDirectory"` // hostnameOverride is the hostname used to identify the kubelet instead // of the actual hostname. HostnameOverride string `json:"hostnameOverride"` // podInfraContainerImage is the image whose network/ipc namespaces // containers in each pod will use. PodInfraContainerImage string `json:"podInfraContainerImage"` // dockerEndpoint is the path to the docker endpoint to communicate with. DockerEndpoint string `json:"dockerEndpoint"` // rootDirectory is the directory path to place kubelet files (volume // mounts,etc). RootDirectory string `json:"rootDirectory"` // seccompProfileRoot is the directory path for seccomp profiles. SeccompProfileRoot string `json:"seccompProfileRoot"` // allowPrivileged enables containers to request privileged mode. // Defaults to false. AllowPrivileged bool `json:"allowPrivileged"` // hostNetworkSources is a comma-separated list of sources from which the // Kubelet allows pods to use of host network. Defaults to "*". HostNetworkSources string `json:"hostNetworkSources"` // hostPIDSources is a comma-separated list of sources from which the // Kubelet allows pods to use the host pid namespace. Defaults to "*". HostPIDSources string `json:"hostPIDSources"` // hostIPCSources is a comma-separated list of sources from which the // Kubelet allows pods to use the host ipc namespace. Defaults to "*". HostIPCSources string `json:"hostIPCSources"` // registryPullQPS is the limit of registry pulls per second. If 0, // unlimited. Set to 0 for no limit. Defaults to 5.0. RegistryPullQPS float64 `json:"registryPullQPS"` // registryBurst is the maximum size of a bursty pulls, temporarily allows // pulls to burst to this number, while still not exceeding registryQps. // Only used if registryQps > 0. RegistryBurst int32 `json:"registryBurst"` // eventRecordQPS is the maximum event creations per second. If 0, there // is no limit enforced. EventRecordQPS float32 `json:"eventRecordQPS"` // eventBurst is the maximum size of a bursty event records, temporarily // allows event records to burst to this number, while still not exceeding // event-qps. Only used if eventQps > 0 EventBurst int32 `json:"eventBurst"` // enableDebuggingHandlers enables server endpoints for log collection // and local running of containers and commands EnableDebuggingHandlers bool `json:"enableDebuggingHandlers"` // minimumGCAge is the minimum age for a finished container before it is // garbage collected. MinimumGCAge unversioned.Duration `json:"minimumGCAge"` // maxPerPodContainerCount is the maximum number of old instances to // retain per container. Each container takes up some disk space. MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"` // maxContainerCount is the maximum number of old instances of containers // to retain globally. Each container takes up some disk space. MaxContainerCount int32 `json:"maxContainerCount"` // cAdvisorPort is the port of the localhost cAdvisor endpoint CAdvisorPort uint `json:"cAdvisorPort"` // healthzPort is the port of the localhost healthz endpoint HealthzPort int32 `json:"healthzPort"` // healthzBindAddress is the IP address for the healthz server to serve // on. HealthzBindAddress string `json:"healthzBindAddress"` // oomScoreAdj is The oom-score-adj value for kubelet process. Values // must be within the range [-1000, 1000]. OOMScoreAdj int32 `json:"oomScoreAdj"` // registerNode enables automatic registration with the apiserver. RegisterNode bool `json:"registerNode"` // clusterDomain is the DNS domain for this cluster. If set, kubelet will // configure all containers to search this domain in addition to the // host's search domains. ClusterDomain string `json:"clusterDomain"` // masterServiceNamespace is The namespace from which the kubernetes // master services should be injected into pods. MasterServiceNamespace string `json:"masterServiceNamespace"` // clusterDNS is the IP address for a cluster DNS server. If set, kubelet // will configure all containers to use this for DNS resolution in // addition to the host's DNS servers ClusterDNS string `json:"clusterDNS"` // streamingConnectionIdleTimeout is the maximum time a streaming connection // can be idle before the connection is automatically closed. StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"` // nodeStatusUpdateFrequency is the frequency that kubelet posts node // status to master. Note: be cautious when changing the constant, it // must work with nodeMonitorGracePeriod in nodecontroller. NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"` // minimumGCAge is the minimum age for a unused image before it is // garbage collected. ImageMinimumGCAge unversioned.Duration `json:"imageMinimumGCAge"` // imageGCHighThresholdPercent is the percent of disk usage after which // image garbage collection is always run. ImageGCHighThresholdPercent int32 `json:"imageGCHighThresholdPercent"` // imageGCLowThresholdPercent is the percent of disk usage before which // image garbage collection is never run. Lowest disk usage to garbage // collect to. ImageGCLowThresholdPercent int32 `json:"imageGCLowThresholdPercent"` // lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to // maintain. When disk space falls below this threshold, new pods would // be rejected. LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"` // How frequently to calculate and cache volume disk usage for all pods VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"` // networkPluginName is the name of the network plugin to be invoked for // various events in kubelet/pod lifecycle NetworkPluginName string `json:"networkPluginName"` // networkPluginDir is the full path of the directory in which to search // for network plugins NetworkPluginDir string `json:"networkPluginDir"` // volumePluginDir is the full path of the directory in which to search // for additional third party volume plugins VolumePluginDir string `json:"volumePluginDir"` // cloudProvider is the provider for cloud services. CloudProvider string `json:"cloudProvider,omitempty"` // cloudConfigFile is the path to the cloud provider configuration file. CloudConfigFile string `json:"cloudConfigFile,omitempty"` // KubeletCgroups is the absolute name of cgroups to isolate the kubelet in. KubeletCgroups string `json:"kubeletCgroups,omitempty"` // Cgroups that container runtime is expected to be isolated in. RuntimeCgroups string `json:"runtimeCgroups,omitempty"` // SystemCgroups is absolute name of cgroups in which to place // all non-kernel processes that are not already in a container. Empty // for no container. Rolling back the flag requires a reboot. SystemCgroups string `json:"systemContainer,omitempty"` // cgroupRoot is the root cgroup to use for pods. This is handled by the // container runtime on a best effort basis. CgroupRoot string `json:"cgroupRoot,omitempty"` // containerRuntime is the container runtime to use. ContainerRuntime string `json:"containerRuntime"` // rktPath is the path of rkt binary. Leave empty to use the first rkt in // $PATH. RktPath string `json:"rktPath,omitempty"` // rktApiEndpoint is the endpoint of the rkt API service to communicate with. RktAPIEndpoint string `json:"rktAPIEndpoint,omitempty"` // rktStage1Image is the image to use as stage1. Local paths and // http/https URLs are supported. RktStage1Image string `json:"rktStage1Image,omitempty"` // lockFilePath is the path that kubelet will use to as a lock file. // It uses this file as a lock to synchronize with other kubelet processes // that may be running. LockFilePath string `json:"lockFilePath"` // ExitOnLockContention is a flag that signifies to the kubelet that it is running // in "bootstrap" mode. This requires that 'LockFilePath' has been set. // This will cause the kubelet to listen to inotify events on the lock file, // releasing it and exiting when another process tries to open that file. ExitOnLockContention bool `json:"exitOnLockContention"` // configureCBR0 enables the kublet to configure cbr0 based on // Node.Spec.PodCIDR. ConfigureCBR0 bool `json:"configureCbr0"` // How should the kubelet configure the container bridge for hairpin packets. // Setting this flag allows endpoints in a Service to loadbalance back to // themselves if they should try to access their own Service. Values: // "promiscuous-bridge": make the container bridge promiscuous. // "hairpin-veth": set the hairpin flag on container veth interfaces. // "none": do nothing. // Setting --configure-cbr0 to false implies that to achieve hairpin NAT // one must set --hairpin-mode=veth-flag, because bridge assumes the // existence of a container bridge named cbr0. HairpinMode string `json:"hairpinMode"` // The node has babysitter process monitoring docker and kubelet. BabysitDaemons bool `json:"babysitDaemons"` // maxPods is the number of pods that can run on this Kubelet. MaxPods int32 `json:"maxPods"` // nvidiaGPUs is the number of NVIDIA GPU devices on this node. NvidiaGPUs int32 `json:"nvidiaGPUs"` // dockerExecHandlerName is the handler to use when executing a command // in a container. Valid values are 'native' and 'nsenter'. Defaults to // 'native'. DockerExecHandlerName string `json:"dockerExecHandlerName"` // The CIDR to use for pod IP addresses, only used in standalone mode. // In cluster mode, this is obtained from the master. PodCIDR string `json:"podCIDR"` // ResolverConfig is the resolver configuration file used as the basis // for the container DNS resolution configuration."), [] ResolverConfig string `json:"resolvConf"` // cpuCFSQuota is Enable CPU CFS quota enforcement for containers that // specify CPU limits CPUCFSQuota bool `json:"cpuCFSQuota"` // containerized should be set to true if kubelet is running in a container. Containerized bool `json:"containerized"` // maxOpenFiles is Number of files that can be opened by Kubelet process. MaxOpenFiles uint64 `json:"maxOpenFiles"` // reconcileCIDR is Reconcile node CIDR with the CIDR specified by the // API server. No-op if register-node or configure-cbr0 is false. ReconcileCIDR bool `json:"reconcileCIDR"` // registerSchedulable tells the kubelet to register the node as // schedulable. No-op if register-node is false. RegisterSchedulable bool `json:"registerSchedulable"` // contentType is contentType of requests sent to apiserver. ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver KubeAPIQPS float32 `json:"kubeAPIQPS"` // kubeAPIBurst is the burst to allow while talking with kubernetes // apiserver KubeAPIBurst int32 `json:"kubeAPIBurst"` // serializeImagePulls when enabled, tells the Kubelet to pull images one // at a time. We recommend *not* changing the default value on nodes that // run docker daemon with version < 1.9 or an Aufs storage backend. // Issue #10959 has more details. SerializeImagePulls bool `json:"serializeImagePulls"` // experimentalFlannelOverlay enables experimental support for starting the // kubelet with the default overlay network (flannel). Assumes flanneld // is already running in client mode. ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"` // outOfDiskTransitionFrequency is duration for which the kubelet has to // wait before transitioning out of out-of-disk node condition status. OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"` // nodeIP is IP address of the node. If set, kubelet will use this IP // address for the node. NodeIP string `json:"nodeIP,omitempty"` // nodeLabels to add when registering the node in the cluster. NodeLabels map[string]string `json:"nodeLabels"` // nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade. NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"` // enable gathering custom metrics. EnableCustomMetrics bool `json:"enableCustomMetrics"` // Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'. EvictionHard string `json:"evictionHard,omitempty"` // Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'. EvictionSoft string `json:"evictionSoft,omitempty"` // Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'. EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod,omitempty"` // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod,omitempty"` // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"` } type KubeSchedulerConfiguration struct { unversioned.TypeMeta // port is the port that the scheduler's http service runs on. Port int32 `json:"port"` // address is the IP address to serve on. Address string `json:"address"` // algorithmProvider is the scheduling algorithm provider to use. AlgorithmProvider string `json:"algorithmProvider"` // policyConfigFile is the filepath to the scheduler policy configuration. PolicyConfigFile string `json:"policyConfigFile"` // enableProfiling enables profiling via web interface. EnableProfiling bool `json:"enableProfiling"` // contentType is contentType of requests sent to apiserver. ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. KubeAPIQPS float32 `json:"kubeAPIQPS"` // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. KubeAPIBurst int32 `json:"kubeAPIBurst"` // schedulerName is name of the scheduler, used to select which pods // will be processed by this scheduler, based on pod's annotation with // key 'scheduler.alpha.kubernetes.io/name'. SchedulerName string `json:"schedulerName"` // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. FailureDomains string `json:"failureDomains"` // leaderElection defines the configuration of leader election client. LeaderElection LeaderElectionConfiguration `json:"leaderElection"` } // LeaderElectionConfiguration defines the configuration of leader election // clients for components that can run with leader election enabled. type LeaderElectionConfiguration struct { // leaderElect enables a leader election client to gain leadership // before executing the main loop. Enable this when running replicated // components for high availability. LeaderElect bool `json:"leaderElect"` // leaseDuration is the duration that non-leader candidates will wait // after observing a leadership renewal until attempting to acquire // leadership of a led but unrenewed leader slot. This is effectively the // maximum duration that a leader can be stopped before it is replaced // by another candidate. This is only applicable if leader election is // enabled. LeaseDuration unversioned.Duration `json:"leaseDuration"` // renewDeadline is the interval between attempts by the acting master to // renew a leadership slot before it stops leading. This must be less // than or equal to the lease duration. This is only applicable if leader // election is enabled. RenewDeadline unversioned.Duration `json:"renewDeadline"` // retryPeriod is the duration the clients should wait between attempting // acquisition and renewal of a leadership. This is only applicable if // leader election is enabled. RetryPeriod unversioned.Duration `json:"retryPeriod"` } type KubeControllerManagerConfiguration struct { unversioned.TypeMeta // port is the port that the controller-manager's http service runs on. Port int32 `json:"port"` // address is the IP address to serve on (set to 0.0.0.0 for all interfaces). Address string `json:"address"` // cloudProvider is the provider for cloud services. CloudProvider string `json:"cloudProvider"` // cloudConfigFile is the path to the cloud provider configuration file. CloudConfigFile string `json:"cloudConfigFile"` // concurrentEndpointSyncs is the number of endpoint syncing operations // that will be done concurrently. Larger number = faster endpoint updating, // but more CPU (and network) load. ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"` // concurrentRSSyncs is the number of replica sets that are allowed to sync // concurrently. Larger number = more responsive replica management, but more // CPU (and network) load. ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"` // concurrentRCSyncs is the number of replication controllers that are // allowed to sync concurrently. Larger number = more responsive replica // management, but more CPU (and network) load. ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"` // concurrentResourceQuotaSyncs is the number of resource quotas that are // allowed to sync concurrently. Larger number = more responsive quota // management, but more CPU (and network) load. ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"` // concurrentDeploymentSyncs is the number of deployment objects that are // allowed to sync concurrently. Larger number = more responsive deployments, // but more CPU (and network) load. ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"` // concurrentDaemonSetSyncs is the number of daemonset objects that are // allowed to sync concurrently. Larger number = more responsive daemonset, // but more CPU (and network) load. ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"` // concurrentJobSyncs is the number of job objects that are // allowed to sync concurrently. Larger number = more responsive jobs, // but more CPU (and network) load. ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"` // concurrentNamespaceSyncs is the number of namespace objects that are // allowed to sync concurrently. ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"` // lookupCacheSizeForRC is the size of lookup cache for replication controllers. // Larger number = more responsive replica management, but more MEM load. LookupCacheSizeForRC int32 `json:"lookupCacheSizeForRC"` // lookupCacheSizeForRS is the size of lookup cache for replicatsets. // Larger number = more responsive replica management, but more MEM load. LookupCacheSizeForRS int32 `json:"lookupCacheSizeForRS"` // lookupCacheSizeForDaemonSet is the size of lookup cache for daemonsets. // Larger number = more responsive daemonset, but more MEM load. LookupCacheSizeForDaemonSet int32 `json:"lookupCacheSizeForDaemonSet"` // serviceSyncPeriod is the period for syncing services with their external // load balancers. ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"` // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer // periods will result in fewer calls to cloud provider, but may delay addition // of new nodes to cluster. NodeSyncPeriod unversioned.Duration `json:"nodeSyncPeriod"` // resourceQuotaSyncPeriod is the period for syncing quota usage status // in the system. ResourceQuotaSyncPeriod unversioned.Duration `json:"resourceQuotaSyncPeriod"` // namespaceSyncPeriod is the period for syncing namespace life-cycle // updates. NamespaceSyncPeriod unversioned.Duration `json:"namespaceSyncPeriod"` // pvClaimBinderSyncPeriod is the period for syncing persistent volumes // and persistent volume claims. PVClaimBinderSyncPeriod unversioned.Duration `json:"pvClaimBinderSyncPeriod"` // minResyncPeriod is the resync period in reflectors; will be random between // minResyncPeriod and 2*minResyncPeriod. MinResyncPeriod unversioned.Duration `json:"minResyncPeriod"` // terminatedPodGCThreshold is the number of terminated pods that can exist // before the terminated pod garbage collector starts deleting terminated pods. // If <= 0, the terminated pod garbage collector is disabled. TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"` // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of // pods in horizontal pod autoscaler. HorizontalPodAutoscalerSyncPeriod unversioned.Duration `json:"horizontalPodAutoscalerSyncPeriod"` // deploymentControllerSyncPeriod is the period for syncing the deployments. DeploymentControllerSyncPeriod unversioned.Duration `json:"deploymentControllerSyncPeriod"` // podEvictionTimeout is the grace period for deleting pods on failed nodes. PodEvictionTimeout unversioned.Duration `json:"podEvictionTimeout"` // deletingPodsQps is the number of nodes per second on which pods are deleted in // case of node failure. DeletingPodsQps float32 `json:"deletingPodsQps"` // deletingPodsBurst is the number of nodes on which pods are bursty deleted in // case of node failure. For more details look into RateLimiter. DeletingPodsBurst int32 `json:"deletingPodsBurst"` // nodeMontiorGracePeriod is the amount of time which we allow a running node to be // unresponsive before marking it unhealty. Must be N times more than kubelet's // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet // to post node status. NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"` // registerRetryCount is the number of retries for initial node registration. // Retry interval equals node-sync-period. RegisterRetryCount int32 `json:"registerRetryCount"` // nodeStartupGracePeriod is the amount of time which we allow starting a node to // be unresponsive before marking it unhealty. NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"` // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController. NodeMonitorPeriod unversioned.Duration `json:"nodeMonitorPeriod"` // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key // used to sign service account tokens. ServiceAccountKeyFile string `json:"serviceAccountKeyFile"` // enableProfiling enables profiling via web interface host:port/debug/pprof/ EnableProfiling bool `json:"enableProfiling"` // clusterName is the instance prefix for the cluster. ClusterName string `json:"clusterName"` // clusterCIDR is CIDR Range for Pods in cluster. ClusterCIDR string `json:"clusterCIDR"` // serviceCIDR is CIDR Range for Services in cluster. ServiceCIDR string `json:"serviceCIDR"` // NodeCIDRMaskSize is the mask size for node cidr in cluster. NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"` // allocateNodeCIDRs enables CIDRs for Pods to be allocated and set on the // cloud provider. AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"` // rootCAFile is the root certificate authority will be included in service // account's token secret. This must be a valid PEM-encoded CA bundle. RootCAFile string `json:"rootCAFile"` // contentType is contentType of requests sent to apiserver. ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. KubeAPIQPS float32 `json:"kubeAPIQPS"` // kubeAPIBurst is the burst to use while talking with kubernetes apiserver. KubeAPIBurst int32 `json:"kubeAPIBurst"` // leaderElection defines the configuration of leader election client. LeaderElection LeaderElectionConfiguration `json:"leaderElection"` // volumeConfiguration holds configuration for volume related features. VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"` // How long to wait between starting controller managers ControllerStartInterval unversioned.Duration `json:"controllerStartInterval"` } // VolumeConfiguration contains *all* enumerated flags meant to configure all volume // plugins. From this config, the controller-manager binary will create many instances of // volume.VolumeConfig, each containing only the configuration needed for that plugin which // are then passed to the appropriate plugin. The ControllerManager binary is the only part // of the code which knows what plugins are supported and which flags correspond to each plugin. type VolumeConfiguration struct { // enableHostPathProvisioning enables HostPath PV provisioning when running without a // cloud provider. This allows testing and development of provisioning features. HostPath // provisioning is not supported in any way, won't work in a multi-node cluster, and // should not be used for anything other than testing or development. EnableHostPathProvisioning bool `json:"enableHostPathProvisioning"` // persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins. PersistentVolumeRecyclerConfiguration PersistentVolumeRecyclerConfiguration `json:"persitentVolumeRecyclerConfiguration"` // volumePluginDir is the full path of the directory in which the flex // volume plugin should search for additional third party volume plugins FlexVolumePluginDir string `json:"flexVolumePluginDir"` } type PersistentVolumeRecyclerConfiguration struct { // maximumRetry is number of retries the PV recycler will execute on failure to recycle // PV. MaximumRetry int32 `json:"maximumRetry"` // minimumTimeoutNFS is the minimum ActiveDeadlineSeconds to use for an NFS Recycler // pod. MinimumTimeoutNFS int32 `json:"minimumTimeoutNFS"` // podTemplateFilePathNFS is the file path to a pod definition used as a template for // NFS persistent volume recycling PodTemplateFilePathNFS string `json:"podTemplateFilePathNFS"` // incrementTimeoutNFS is the increment of time added per Gi to ActiveDeadlineSeconds // for an NFS scrubber pod. IncrementTimeoutNFS int32 `json:"incrementTimeoutNFS"` // podTemplateFilePathHostPath is the file path to a pod definition used as a template for // HostPath persistent volume recycling. This is for development and testing only and // will not work in a multi-node cluster. PodTemplateFilePathHostPath string `json:"podTemplateFilePathHostPath"` // minimumTimeoutHostPath is the minimum ActiveDeadlineSeconds to use for a HostPath // Recycler pod. This is for development and testing only and will not work in a multi-node // cluster. MinimumTimeoutHostPath int32 `json:"minimumTimeoutHostPath"` // incrementTimeoutHostPath is the increment of time added per Gi to ActiveDeadlineSeconds // for a HostPath scrubber pod. This is for development and testing only and will not work // in a multi-node cluster. IncrementTimeoutHostPath int32 `json:"incrementTimeoutHostPath"` } kube-controller-manager: Add configure-cloud-routes option This allows kube-controller-manager to allocate CIDRs to nodes (with allocate-node-cidrs=true), but will not try to configure them on the cloud provider, even if the cloud provider supports Routes. The default is configure-cloud-routes=true, and it will only try to configure routes if allocate-node-cidrs is also configured, so the default behaviour is unchanged. This is useful because on AWS the cloud provider configures routes by setting up VPC routing table entries, but there is a limit of 50 entries. So setting configure-cloud-routes on AWS would allow us to continue to allocate node CIDRs as today, but replace the VPC route-table mechanism with something not limited to 50 nodes. We can't just turn off the cloud-provider entirely because it also controls other things - node discovery, load balancer creation etc. Fix #25602 Kubernetes-commit: b754393630362280ef13591813b1817eabc9f939 /* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package componentconfig import "k8s.io/kubernetes/pkg/api/unversioned" type KubeProxyConfiguration struct { unversioned.TypeMeta // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 // for all interfaces) BindAddress string `json:"bindAddress"` // clusterCIDR is the CIDR range of the pods in the cluster. It is used to // bridge traffic coming from outside of the cluster. If not provided, // no off-cluster bridging will be performed. ClusterCIDR string `json:"clusterCIDR"` // healthzBindAddress is the IP address for the health check server to serve on, // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces) HealthzBindAddress string `json:"healthzBindAddress"` // healthzPort is the port to bind the health check server. Use 0 to disable. HealthzPort int32 `json:"healthzPort"` // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. HostnameOverride string `json:"hostnameOverride"` // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using // the pure iptables proxy mode. Values must be within the range [0, 31]. IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"` // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', // '2h22m'). Must be greater than 0. IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"` // kubeconfigPath is the path to the kubeconfig file with authorization information (the // master location is set by the master flag). KubeconfigPath string `json:"kubeconfigPath"` // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. MasqueradeAll bool `json:"masqueradeAll"` // master is the address of the Kubernetes API server (overrides any value in kubeconfig) Master string `json:"master"` // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within // the range [-1000, 1000] OOMScoreAdj *int32 `json:"oomScoreAdj"` // mode specifies which proxy mode to use. Mode ProxyMode `json:"mode"` // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. PortRange string `json:"portRange"` // resourceContainer is the absolute name of the resource-only container to create and run // the Kube-proxy in (Default: /kube-proxy). ResourceContainer string `json:"kubeletCgroups"` // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). // Must be greater than 0. Only applicable for proxyMode=userspace. UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"` // conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)") ConntrackMax int32 `json:"conntrackMax"` // conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open // (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"` } // Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' // (newer, faster). If blank, look at the Node object on the Kubernetes API and respect the // 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the // best-available proxy (currently iptables, but may change in future versions). If the // iptables proxy is selected, regardless of how, but the system's kernel or iptables // versions are insufficient, this always falls back to the userspace proxy. type ProxyMode string const ( ProxyModeUserspace ProxyMode = "userspace" ProxyModeIPTables ProxyMode = "iptables" ) // HairpinMode denotes how the kubelet should configure networking to handle // hairpin packets. type HairpinMode string // Enum settings for different ways to handle hairpin packets. const ( // Set the hairpin flag on the veth of containers in the respective // container runtime. HairpinVeth = "hairpin-veth" // Make the container bridge promiscuous. This will force it to accept // hairpin packets, even if the flag isn't set on ports of the bridge. PromiscuousBridge = "promiscuous-bridge" // Neither of the above. If the kubelet is started in this hairpin mode // and kube-proxy is running in iptables mode, hairpin packets will be // dropped by the container bridge. HairpinNone = "none" ) // TODO: curate the ordering and structure of this config object type KubeletConfiguration struct { // config is the path to the config file or directory of files Config string `json:"config"` // syncFrequency is the max period between synchronizing running // containers and config SyncFrequency unversioned.Duration `json:"syncFrequency"` // fileCheckFrequency is the duration between checking config files for // new data FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"` // httpCheckFrequency is the duration between checking http for new data HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"` // manifestURL is the URL for accessing the container manifest ManifestURL string `json:"manifestURL"` // manifestURLHeader is the HTTP header to use when accessing the manifest // URL, with the key separated from the value with a ':', as in 'key:value' ManifestURLHeader string `json:"manifestURLHeader"` // enableServer enables the Kubelet's server EnableServer bool `json:"enableServer"` // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 // for all interfaces) Address string `json:"address"` // port is the port for the Kubelet to serve on. Port uint `json:"port"` // readOnlyPort is the read-only port for the Kubelet to serve on with // no authentication/authorization (set to 0 to disable) ReadOnlyPort uint `json:"readOnlyPort"` // tLSCertFile is the file containing x509 Certificate for HTTPS. (CA cert, // if any, concatenated after server cert). If tlsCertFile and // tlsPrivateKeyFile are not provided, a self-signed certificate // and key are generated for the public address and saved to the directory // passed to certDir. TLSCertFile string `json:"tLSCertFile"` // tLSPrivateKeyFile is the ile containing x509 private key matching // tlsCertFile. TLSPrivateKeyFile string `json:"tLSPrivateKeyFile"` // certDirectory is the directory where the TLS certs are located (by // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile // are provided, this flag will be ignored. CertDirectory string `json:"certDirectory"` // hostnameOverride is the hostname used to identify the kubelet instead // of the actual hostname. HostnameOverride string `json:"hostnameOverride"` // podInfraContainerImage is the image whose network/ipc namespaces // containers in each pod will use. PodInfraContainerImage string `json:"podInfraContainerImage"` // dockerEndpoint is the path to the docker endpoint to communicate with. DockerEndpoint string `json:"dockerEndpoint"` // rootDirectory is the directory path to place kubelet files (volume // mounts,etc). RootDirectory string `json:"rootDirectory"` // seccompProfileRoot is the directory path for seccomp profiles. SeccompProfileRoot string `json:"seccompProfileRoot"` // allowPrivileged enables containers to request privileged mode. // Defaults to false. AllowPrivileged bool `json:"allowPrivileged"` // hostNetworkSources is a comma-separated list of sources from which the // Kubelet allows pods to use of host network. Defaults to "*". HostNetworkSources string `json:"hostNetworkSources"` // hostPIDSources is a comma-separated list of sources from which the // Kubelet allows pods to use the host pid namespace. Defaults to "*". HostPIDSources string `json:"hostPIDSources"` // hostIPCSources is a comma-separated list of sources from which the // Kubelet allows pods to use the host ipc namespace. Defaults to "*". HostIPCSources string `json:"hostIPCSources"` // registryPullQPS is the limit of registry pulls per second. If 0, // unlimited. Set to 0 for no limit. Defaults to 5.0. RegistryPullQPS float64 `json:"registryPullQPS"` // registryBurst is the maximum size of a bursty pulls, temporarily allows // pulls to burst to this number, while still not exceeding registryQps. // Only used if registryQps > 0. RegistryBurst int32 `json:"registryBurst"` // eventRecordQPS is the maximum event creations per second. If 0, there // is no limit enforced. EventRecordQPS float32 `json:"eventRecordQPS"` // eventBurst is the maximum size of a bursty event records, temporarily // allows event records to burst to this number, while still not exceeding // event-qps. Only used if eventQps > 0 EventBurst int32 `json:"eventBurst"` // enableDebuggingHandlers enables server endpoints for log collection // and local running of containers and commands EnableDebuggingHandlers bool `json:"enableDebuggingHandlers"` // minimumGCAge is the minimum age for a finished container before it is // garbage collected. MinimumGCAge unversioned.Duration `json:"minimumGCAge"` // maxPerPodContainerCount is the maximum number of old instances to // retain per container. Each container takes up some disk space. MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"` // maxContainerCount is the maximum number of old instances of containers // to retain globally. Each container takes up some disk space. MaxContainerCount int32 `json:"maxContainerCount"` // cAdvisorPort is the port of the localhost cAdvisor endpoint CAdvisorPort uint `json:"cAdvisorPort"` // healthzPort is the port of the localhost healthz endpoint HealthzPort int32 `json:"healthzPort"` // healthzBindAddress is the IP address for the healthz server to serve // on. HealthzBindAddress string `json:"healthzBindAddress"` // oomScoreAdj is The oom-score-adj value for kubelet process. Values // must be within the range [-1000, 1000]. OOMScoreAdj int32 `json:"oomScoreAdj"` // registerNode enables automatic registration with the apiserver. RegisterNode bool `json:"registerNode"` // clusterDomain is the DNS domain for this cluster. If set, kubelet will // configure all containers to search this domain in addition to the // host's search domains. ClusterDomain string `json:"clusterDomain"` // masterServiceNamespace is The namespace from which the kubernetes // master services should be injected into pods. MasterServiceNamespace string `json:"masterServiceNamespace"` // clusterDNS is the IP address for a cluster DNS server. If set, kubelet // will configure all containers to use this for DNS resolution in // addition to the host's DNS servers ClusterDNS string `json:"clusterDNS"` // streamingConnectionIdleTimeout is the maximum time a streaming connection // can be idle before the connection is automatically closed. StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"` // nodeStatusUpdateFrequency is the frequency that kubelet posts node // status to master. Note: be cautious when changing the constant, it // must work with nodeMonitorGracePeriod in nodecontroller. NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"` // minimumGCAge is the minimum age for a unused image before it is // garbage collected. ImageMinimumGCAge unversioned.Duration `json:"imageMinimumGCAge"` // imageGCHighThresholdPercent is the percent of disk usage after which // image garbage collection is always run. ImageGCHighThresholdPercent int32 `json:"imageGCHighThresholdPercent"` // imageGCLowThresholdPercent is the percent of disk usage before which // image garbage collection is never run. Lowest disk usage to garbage // collect to. ImageGCLowThresholdPercent int32 `json:"imageGCLowThresholdPercent"` // lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to // maintain. When disk space falls below this threshold, new pods would // be rejected. LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"` // How frequently to calculate and cache volume disk usage for all pods VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"` // networkPluginName is the name of the network plugin to be invoked for // various events in kubelet/pod lifecycle NetworkPluginName string `json:"networkPluginName"` // networkPluginDir is the full path of the directory in which to search // for network plugins NetworkPluginDir string `json:"networkPluginDir"` // volumePluginDir is the full path of the directory in which to search // for additional third party volume plugins VolumePluginDir string `json:"volumePluginDir"` // cloudProvider is the provider for cloud services. CloudProvider string `json:"cloudProvider,omitempty"` // cloudConfigFile is the path to the cloud provider configuration file. CloudConfigFile string `json:"cloudConfigFile,omitempty"` // KubeletCgroups is the absolute name of cgroups to isolate the kubelet in. KubeletCgroups string `json:"kubeletCgroups,omitempty"` // Cgroups that container runtime is expected to be isolated in. RuntimeCgroups string `json:"runtimeCgroups,omitempty"` // SystemCgroups is absolute name of cgroups in which to place // all non-kernel processes that are not already in a container. Empty // for no container. Rolling back the flag requires a reboot. SystemCgroups string `json:"systemContainer,omitempty"` // cgroupRoot is the root cgroup to use for pods. This is handled by the // container runtime on a best effort basis. CgroupRoot string `json:"cgroupRoot,omitempty"` // containerRuntime is the container runtime to use. ContainerRuntime string `json:"containerRuntime"` // rktPath is the path of rkt binary. Leave empty to use the first rkt in // $PATH. RktPath string `json:"rktPath,omitempty"` // rktApiEndpoint is the endpoint of the rkt API service to communicate with. RktAPIEndpoint string `json:"rktAPIEndpoint,omitempty"` // rktStage1Image is the image to use as stage1. Local paths and // http/https URLs are supported. RktStage1Image string `json:"rktStage1Image,omitempty"` // lockFilePath is the path that kubelet will use to as a lock file. // It uses this file as a lock to synchronize with other kubelet processes // that may be running. LockFilePath string `json:"lockFilePath"` // ExitOnLockContention is a flag that signifies to the kubelet that it is running // in "bootstrap" mode. This requires that 'LockFilePath' has been set. // This will cause the kubelet to listen to inotify events on the lock file, // releasing it and exiting when another process tries to open that file. ExitOnLockContention bool `json:"exitOnLockContention"` // configureCBR0 enables the kublet to configure cbr0 based on // Node.Spec.PodCIDR. ConfigureCBR0 bool `json:"configureCbr0"` // How should the kubelet configure the container bridge for hairpin packets. // Setting this flag allows endpoints in a Service to loadbalance back to // themselves if they should try to access their own Service. Values: // "promiscuous-bridge": make the container bridge promiscuous. // "hairpin-veth": set the hairpin flag on container veth interfaces. // "none": do nothing. // Setting --configure-cbr0 to false implies that to achieve hairpin NAT // one must set --hairpin-mode=veth-flag, because bridge assumes the // existence of a container bridge named cbr0. HairpinMode string `json:"hairpinMode"` // The node has babysitter process monitoring docker and kubelet. BabysitDaemons bool `json:"babysitDaemons"` // maxPods is the number of pods that can run on this Kubelet. MaxPods int32 `json:"maxPods"` // nvidiaGPUs is the number of NVIDIA GPU devices on this node. NvidiaGPUs int32 `json:"nvidiaGPUs"` // dockerExecHandlerName is the handler to use when executing a command // in a container. Valid values are 'native' and 'nsenter'. Defaults to // 'native'. DockerExecHandlerName string `json:"dockerExecHandlerName"` // The CIDR to use for pod IP addresses, only used in standalone mode. // In cluster mode, this is obtained from the master. PodCIDR string `json:"podCIDR"` // ResolverConfig is the resolver configuration file used as the basis // for the container DNS resolution configuration."), [] ResolverConfig string `json:"resolvConf"` // cpuCFSQuota is Enable CPU CFS quota enforcement for containers that // specify CPU limits CPUCFSQuota bool `json:"cpuCFSQuota"` // containerized should be set to true if kubelet is running in a container. Containerized bool `json:"containerized"` // maxOpenFiles is Number of files that can be opened by Kubelet process. MaxOpenFiles uint64 `json:"maxOpenFiles"` // reconcileCIDR is Reconcile node CIDR with the CIDR specified by the // API server. No-op if register-node or configure-cbr0 is false. ReconcileCIDR bool `json:"reconcileCIDR"` // registerSchedulable tells the kubelet to register the node as // schedulable. No-op if register-node is false. RegisterSchedulable bool `json:"registerSchedulable"` // contentType is contentType of requests sent to apiserver. ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver KubeAPIQPS float32 `json:"kubeAPIQPS"` // kubeAPIBurst is the burst to allow while talking with kubernetes // apiserver KubeAPIBurst int32 `json:"kubeAPIBurst"` // serializeImagePulls when enabled, tells the Kubelet to pull images one // at a time. We recommend *not* changing the default value on nodes that // run docker daemon with version < 1.9 or an Aufs storage backend. // Issue #10959 has more details. SerializeImagePulls bool `json:"serializeImagePulls"` // experimentalFlannelOverlay enables experimental support for starting the // kubelet with the default overlay network (flannel). Assumes flanneld // is already running in client mode. ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"` // outOfDiskTransitionFrequency is duration for which the kubelet has to // wait before transitioning out of out-of-disk node condition status. OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"` // nodeIP is IP address of the node. If set, kubelet will use this IP // address for the node. NodeIP string `json:"nodeIP,omitempty"` // nodeLabels to add when registering the node in the cluster. NodeLabels map[string]string `json:"nodeLabels"` // nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade. NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"` // enable gathering custom metrics. EnableCustomMetrics bool `json:"enableCustomMetrics"` // Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'. EvictionHard string `json:"evictionHard,omitempty"` // Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'. EvictionSoft string `json:"evictionSoft,omitempty"` // Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'. EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod,omitempty"` // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod,omitempty"` // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"` } type KubeSchedulerConfiguration struct { unversioned.TypeMeta // port is the port that the scheduler's http service runs on. Port int32 `json:"port"` // address is the IP address to serve on. Address string `json:"address"` // algorithmProvider is the scheduling algorithm provider to use. AlgorithmProvider string `json:"algorithmProvider"` // policyConfigFile is the filepath to the scheduler policy configuration. PolicyConfigFile string `json:"policyConfigFile"` // enableProfiling enables profiling via web interface. EnableProfiling bool `json:"enableProfiling"` // contentType is contentType of requests sent to apiserver. ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. KubeAPIQPS float32 `json:"kubeAPIQPS"` // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. KubeAPIBurst int32 `json:"kubeAPIBurst"` // schedulerName is name of the scheduler, used to select which pods // will be processed by this scheduler, based on pod's annotation with // key 'scheduler.alpha.kubernetes.io/name'. SchedulerName string `json:"schedulerName"` // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. FailureDomains string `json:"failureDomains"` // leaderElection defines the configuration of leader election client. LeaderElection LeaderElectionConfiguration `json:"leaderElection"` } // LeaderElectionConfiguration defines the configuration of leader election // clients for components that can run with leader election enabled. type LeaderElectionConfiguration struct { // leaderElect enables a leader election client to gain leadership // before executing the main loop. Enable this when running replicated // components for high availability. LeaderElect bool `json:"leaderElect"` // leaseDuration is the duration that non-leader candidates will wait // after observing a leadership renewal until attempting to acquire // leadership of a led but unrenewed leader slot. This is effectively the // maximum duration that a leader can be stopped before it is replaced // by another candidate. This is only applicable if leader election is // enabled. LeaseDuration unversioned.Duration `json:"leaseDuration"` // renewDeadline is the interval between attempts by the acting master to // renew a leadership slot before it stops leading. This must be less // than or equal to the lease duration. This is only applicable if leader // election is enabled. RenewDeadline unversioned.Duration `json:"renewDeadline"` // retryPeriod is the duration the clients should wait between attempting // acquisition and renewal of a leadership. This is only applicable if // leader election is enabled. RetryPeriod unversioned.Duration `json:"retryPeriod"` } type KubeControllerManagerConfiguration struct { unversioned.TypeMeta // port is the port that the controller-manager's http service runs on. Port int32 `json:"port"` // address is the IP address to serve on (set to 0.0.0.0 for all interfaces). Address string `json:"address"` // cloudProvider is the provider for cloud services. CloudProvider string `json:"cloudProvider"` // cloudConfigFile is the path to the cloud provider configuration file. CloudConfigFile string `json:"cloudConfigFile"` // concurrentEndpointSyncs is the number of endpoint syncing operations // that will be done concurrently. Larger number = faster endpoint updating, // but more CPU (and network) load. ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"` // concurrentRSSyncs is the number of replica sets that are allowed to sync // concurrently. Larger number = more responsive replica management, but more // CPU (and network) load. ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"` // concurrentRCSyncs is the number of replication controllers that are // allowed to sync concurrently. Larger number = more responsive replica // management, but more CPU (and network) load. ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"` // concurrentResourceQuotaSyncs is the number of resource quotas that are // allowed to sync concurrently. Larger number = more responsive quota // management, but more CPU (and network) load. ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"` // concurrentDeploymentSyncs is the number of deployment objects that are // allowed to sync concurrently. Larger number = more responsive deployments, // but more CPU (and network) load. ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"` // concurrentDaemonSetSyncs is the number of daemonset objects that are // allowed to sync concurrently. Larger number = more responsive daemonset, // but more CPU (and network) load. ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"` // concurrentJobSyncs is the number of job objects that are // allowed to sync concurrently. Larger number = more responsive jobs, // but more CPU (and network) load. ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"` // concurrentNamespaceSyncs is the number of namespace objects that are // allowed to sync concurrently. ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"` // lookupCacheSizeForRC is the size of lookup cache for replication controllers. // Larger number = more responsive replica management, but more MEM load. LookupCacheSizeForRC int32 `json:"lookupCacheSizeForRC"` // lookupCacheSizeForRS is the size of lookup cache for replicatsets. // Larger number = more responsive replica management, but more MEM load. LookupCacheSizeForRS int32 `json:"lookupCacheSizeForRS"` // lookupCacheSizeForDaemonSet is the size of lookup cache for daemonsets. // Larger number = more responsive daemonset, but more MEM load. LookupCacheSizeForDaemonSet int32 `json:"lookupCacheSizeForDaemonSet"` // serviceSyncPeriod is the period for syncing services with their external // load balancers. ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"` // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer // periods will result in fewer calls to cloud provider, but may delay addition // of new nodes to cluster. NodeSyncPeriod unversioned.Duration `json:"nodeSyncPeriod"` // resourceQuotaSyncPeriod is the period for syncing quota usage status // in the system. ResourceQuotaSyncPeriod unversioned.Duration `json:"resourceQuotaSyncPeriod"` // namespaceSyncPeriod is the period for syncing namespace life-cycle // updates. NamespaceSyncPeriod unversioned.Duration `json:"namespaceSyncPeriod"` // pvClaimBinderSyncPeriod is the period for syncing persistent volumes // and persistent volume claims. PVClaimBinderSyncPeriod unversioned.Duration `json:"pvClaimBinderSyncPeriod"` // minResyncPeriod is the resync period in reflectors; will be random between // minResyncPeriod and 2*minResyncPeriod. MinResyncPeriod unversioned.Duration `json:"minResyncPeriod"` // terminatedPodGCThreshold is the number of terminated pods that can exist // before the terminated pod garbage collector starts deleting terminated pods. // If <= 0, the terminated pod garbage collector is disabled. TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"` // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of // pods in horizontal pod autoscaler. HorizontalPodAutoscalerSyncPeriod unversioned.Duration `json:"horizontalPodAutoscalerSyncPeriod"` // deploymentControllerSyncPeriod is the period for syncing the deployments. DeploymentControllerSyncPeriod unversioned.Duration `json:"deploymentControllerSyncPeriod"` // podEvictionTimeout is the grace period for deleting pods on failed nodes. PodEvictionTimeout unversioned.Duration `json:"podEvictionTimeout"` // deletingPodsQps is the number of nodes per second on which pods are deleted in // case of node failure. DeletingPodsQps float32 `json:"deletingPodsQps"` // deletingPodsBurst is the number of nodes on which pods are bursty deleted in // case of node failure. For more details look into RateLimiter. DeletingPodsBurst int32 `json:"deletingPodsBurst"` // nodeMontiorGracePeriod is the amount of time which we allow a running node to be // unresponsive before marking it unhealty. Must be N times more than kubelet's // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet // to post node status. NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"` // registerRetryCount is the number of retries for initial node registration. // Retry interval equals node-sync-period. RegisterRetryCount int32 `json:"registerRetryCount"` // nodeStartupGracePeriod is the amount of time which we allow starting a node to // be unresponsive before marking it unhealty. NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"` // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController. NodeMonitorPeriod unversioned.Duration `json:"nodeMonitorPeriod"` // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key // used to sign service account tokens. ServiceAccountKeyFile string `json:"serviceAccountKeyFile"` // enableProfiling enables profiling via web interface host:port/debug/pprof/ EnableProfiling bool `json:"enableProfiling"` // clusterName is the instance prefix for the cluster. ClusterName string `json:"clusterName"` // clusterCIDR is CIDR Range for Pods in cluster. ClusterCIDR string `json:"clusterCIDR"` // serviceCIDR is CIDR Range for Services in cluster. ServiceCIDR string `json:"serviceCIDR"` // NodeCIDRMaskSize is the mask size for node cidr in cluster. NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"` // allocateNodeCIDRs enables CIDRs for Pods to be allocated and, if // ConfigureCloudRoutes is true, to be set on the cloud provider. AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"` // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs // to be configured on the cloud provider. ConfigureCloudRoutes bool `json:"configureCloudRoutes"` // rootCAFile is the root certificate authority will be included in service // account's token secret. This must be a valid PEM-encoded CA bundle. RootCAFile string `json:"rootCAFile"` // contentType is contentType of requests sent to apiserver. ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. KubeAPIQPS float32 `json:"kubeAPIQPS"` // kubeAPIBurst is the burst to use while talking with kubernetes apiserver. KubeAPIBurst int32 `json:"kubeAPIBurst"` // leaderElection defines the configuration of leader election client. LeaderElection LeaderElectionConfiguration `json:"leaderElection"` // volumeConfiguration holds configuration for volume related features. VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"` // How long to wait between starting controller managers ControllerStartInterval unversioned.Duration `json:"controllerStartInterval"` } // VolumeConfiguration contains *all* enumerated flags meant to configure all volume // plugins. From this config, the controller-manager binary will create many instances of // volume.VolumeConfig, each containing only the configuration needed for that plugin which // are then passed to the appropriate plugin. The ControllerManager binary is the only part // of the code which knows what plugins are supported and which flags correspond to each plugin. type VolumeConfiguration struct { // enableHostPathProvisioning enables HostPath PV provisioning when running without a // cloud provider. This allows testing and development of provisioning features. HostPath // provisioning is not supported in any way, won't work in a multi-node cluster, and // should not be used for anything other than testing or development. EnableHostPathProvisioning bool `json:"enableHostPathProvisioning"` // persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins. PersistentVolumeRecyclerConfiguration PersistentVolumeRecyclerConfiguration `json:"persitentVolumeRecyclerConfiguration"` // volumePluginDir is the full path of the directory in which the flex // volume plugin should search for additional third party volume plugins FlexVolumePluginDir string `json:"flexVolumePluginDir"` } type PersistentVolumeRecyclerConfiguration struct { // maximumRetry is number of retries the PV recycler will execute on failure to recycle // PV. MaximumRetry int32 `json:"maximumRetry"` // minimumTimeoutNFS is the minimum ActiveDeadlineSeconds to use for an NFS Recycler // pod. MinimumTimeoutNFS int32 `json:"minimumTimeoutNFS"` // podTemplateFilePathNFS is the file path to a pod definition used as a template for // NFS persistent volume recycling PodTemplateFilePathNFS string `json:"podTemplateFilePathNFS"` // incrementTimeoutNFS is the increment of time added per Gi to ActiveDeadlineSeconds // for an NFS scrubber pod. IncrementTimeoutNFS int32 `json:"incrementTimeoutNFS"` // podTemplateFilePathHostPath is the file path to a pod definition used as a template for // HostPath persistent volume recycling. This is for development and testing only and // will not work in a multi-node cluster. PodTemplateFilePathHostPath string `json:"podTemplateFilePathHostPath"` // minimumTimeoutHostPath is the minimum ActiveDeadlineSeconds to use for a HostPath // Recycler pod. This is for development and testing only and will not work in a multi-node // cluster. MinimumTimeoutHostPath int32 `json:"minimumTimeoutHostPath"` // incrementTimeoutHostPath is the increment of time added per Gi to ActiveDeadlineSeconds // for a HostPath scrubber pod. This is for development and testing only and will not work // in a multi-node cluster. IncrementTimeoutHostPath int32 `json:"incrementTimeoutHostPath"` }
/* Copyright 2017 the Heptio Ark contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package backup import ( "archive/tar" "encoding/json" "fmt" "reflect" "testing" "time" "github.com/heptio/ark/pkg/apis/ark/v1" api "github.com/heptio/ark/pkg/apis/ark/v1" "github.com/heptio/ark/pkg/util/collections" arktest "github.com/heptio/ark/pkg/util/test" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) func TestBackupItemSkips(t *testing.T) { tests := []struct { testName string namespace string name string namespaces *collections.IncludesExcludes groupResource schema.GroupResource resources *collections.IncludesExcludes backedUpItems map[itemKey]struct{} }{ { testName: "namespace not in includes list", namespace: "ns", name: "foo", namespaces: collections.NewIncludesExcludes().Includes("a"), }, { testName: "namespace in excludes list", namespace: "ns", name: "foo", namespaces: collections.NewIncludesExcludes().Excludes("ns"), }, { testName: "resource not in includes list", namespace: "ns", name: "foo", groupResource: schema.GroupResource{Group: "foo", Resource: "bar"}, namespaces: collections.NewIncludesExcludes(), resources: collections.NewIncludesExcludes().Includes("a.b"), }, { testName: "resource in excludes list", namespace: "ns", name: "foo", groupResource: schema.GroupResource{Group: "foo", Resource: "bar"}, namespaces: collections.NewIncludesExcludes(), resources: collections.NewIncludesExcludes().Excludes("bar.foo"), }, { testName: "resource already backed up", namespace: "ns", name: "foo", groupResource: schema.GroupResource{Group: "foo", Resource: "bar"}, namespaces: collections.NewIncludesExcludes(), resources: collections.NewIncludesExcludes(), backedUpItems: map[itemKey]struct{}{ {resource: "bar.foo", namespace: "ns", name: "foo"}: {}, }, }, } for _, test := range tests { t.Run(test.testName, func(t *testing.T) { ib := &defaultItemBackupper{ namespaces: test.namespaces, resources: test.resources, backedUpItems: test.backedUpItems, } u := unstructuredOrDie(fmt.Sprintf(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"%s","name":"%s"}}`, test.namespace, test.name)) err := ib.backupItem(arktest.NewLogger(), u, test.groupResource) assert.NoError(t, err) }) } } func TestBackupItemSkipsClusterScopedResourceWhenIncludeClusterResourcesFalse(t *testing.T) { f := false ib := &defaultItemBackupper{ backup: &v1.Backup{ Spec: v1.BackupSpec{ IncludeClusterResources: &f, }, }, namespaces: collections.NewIncludesExcludes(), resources: collections.NewIncludesExcludes(), } u := unstructuredOrDie(`{"apiVersion":"v1","kind":"Foo","metadata":{"name":"bar"}}`) err := ib.backupItem(arktest.NewLogger(), u, schema.GroupResource{Group: "foo", Resource: "bar"}) assert.NoError(t, err) } func TestBackupItemNoSkips(t *testing.T) { tests := []struct { name string item string namespaceIncludesExcludes *collections.IncludesExcludes expectError bool expectExcluded bool expectedTarHeaderName string tarWriteError bool tarHeaderWriteError bool customAction bool expectedActionID string customActionAdditionalItemIdentifiers []ResourceIdentifier customActionAdditionalItems []runtime.Unstructured groupResource string snapshottableVolumes map[string]api.VolumeBackupInfo snapshotError error }{ { name: "explicit namespace include", item: `{"metadata":{"namespace":"foo","name":"bar"}}`, namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("foo"), expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json", }, { name: "* namespace include", item: `{"metadata":{"namespace":"foo","name":"bar"}}`, namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json", }, { name: "cluster-scoped", item: `{"metadata":{"name":"bar"}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/cluster/bar.json", }, { name: "tar header write error", item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, expectError: true, tarHeaderWriteError: true, }, { name: "tar write error", item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, expectError: true, tarWriteError: true, }, { name: "action invoked - cluster-scoped", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"metadata":{"name":"bar"}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/cluster/bar.json", customAction: true, expectedActionID: "bar", }, { name: "action invoked - namespaced", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"metadata":{"namespace": "myns", "name":"bar"}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json", customAction: true, expectedActionID: "myns/bar", }, { name: "action invoked - additional items", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"metadata":{"namespace": "myns", "name":"bar"}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json", customAction: true, expectedActionID: "myns/bar", customActionAdditionalItemIdentifiers: []ResourceIdentifier{ { GroupResource: schema.GroupResource{Group: "g1", Resource: "r1"}, Namespace: "ns1", Name: "n1", }, { GroupResource: schema.GroupResource{Group: "g2", Resource: "r2"}, Namespace: "ns2", Name: "n2", }, }, customActionAdditionalItems: []runtime.Unstructured{ unstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`), unstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`), }, }, { name: "takePVSnapshot is not invoked for PVs when snapshotService == nil", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json", groupResource: "persistentvolumes", }, { name: "takePVSnapshot is invoked for PVs when snapshotService != nil", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json", groupResource: "persistentvolumes", snapshottableVolumes: map[string]api.VolumeBackupInfo{ "vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"}, }, }, { name: "backup fails when takePVSnapshot fails", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: true, groupResource: "persistentvolumes", snapshottableVolumes: map[string]api.VolumeBackupInfo{ "vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"}, }, snapshotError: fmt.Errorf("failure"), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var ( actions []resolvedAction action *fakeAction backup = &v1.Backup{} groupResource = schema.ParseGroupResource("resource.group") backedUpItems = make(map[itemKey]struct{}) resources = collections.NewIncludesExcludes() w = &fakeTarWriter{} ) if test.groupResource != "" { groupResource = schema.ParseGroupResource(test.groupResource) } item, err := getAsMap(test.item) if err != nil { t.Fatal(err) } namespaces := test.namespaceIncludesExcludes if namespaces == nil { namespaces = collections.NewIncludesExcludes() } if test.tarHeaderWriteError { w.writeHeaderError = errors.New("error") } if test.tarWriteError { w.writeError = errors.New("error") } if test.customAction { action = &fakeAction{ additionalItems: test.customActionAdditionalItemIdentifiers, } actions = []resolvedAction{ { ItemAction: action, namespaceIncludesExcludes: collections.NewIncludesExcludes(), resourceIncludesExcludes: collections.NewIncludesExcludes().Includes(groupResource.String()), selector: labels.Everything(), }, } } resourceHooks := []resourceHook{} podCommandExecutor := &mockPodCommandExecutor{} defer podCommandExecutor.AssertExpectations(t) dynamicFactory := &arktest.FakeDynamicFactory{} defer dynamicFactory.AssertExpectations(t) discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) b := (&defaultItemBackupperFactory{}).newItemBackupper( backup, namespaces, resources, backedUpItems, actions, podCommandExecutor, w, resourceHooks, dynamicFactory, discoveryHelper, nil, ).(*defaultItemBackupper) var snapshotService *arktest.FakeSnapshotService if test.snapshottableVolumes != nil { snapshotService = &arktest.FakeSnapshotService{ SnapshottableVolumes: test.snapshottableVolumes, VolumeID: "vol-abc123", Error: test.snapshotError, } b.snapshotService = snapshotService } // make sure the podCommandExecutor was set correctly in the real hook handler assert.Equal(t, podCommandExecutor, b.itemHookHandler.(*defaultItemHookHandler).podCommandExecutor) itemHookHandler := &mockItemHookHandler{} defer itemHookHandler.AssertExpectations(t) b.itemHookHandler = itemHookHandler additionalItemBackupper := &mockItemBackupper{} defer additionalItemBackupper.AssertExpectations(t) b.additionalItemBackupper = additionalItemBackupper obj := &unstructured.Unstructured{Object: item} itemHookHandler.On("handleHooks", mock.Anything, groupResource, obj, resourceHooks, hookPhasePre).Return(nil) if test.snapshotError == nil { // TODO: Remove if-clause when #511 is resolved. itemHookHandler.On("handleHooks", mock.Anything, groupResource, obj, resourceHooks, hookPhasePost).Return(nil) } for i, item := range test.customActionAdditionalItemIdentifiers { itemClient := &arktest.FakeDynamicClient{} defer itemClient.AssertExpectations(t) dynamicFactory.On("ClientForGroupVersionResource", item.GroupResource.WithVersion("").GroupVersion(), metav1.APIResource{Name: item.Resource}, item.Namespace).Return(itemClient, nil) itemClient.On("Get", item.Name, metav1.GetOptions{}).Return(test.customActionAdditionalItems[i], nil) additionalItemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), test.customActionAdditionalItems[i], item.GroupResource).Return(nil) } err = b.backupItem(arktest.NewLogger(), obj, groupResource) gotError := err != nil if e, a := test.expectError, gotError; e != a { t.Fatalf("error: expected %t, got %t: %v", e, a, err) } if test.expectError { return } if test.expectExcluded { if len(w.headers) > 0 { t.Errorf("unexpected header write") } if len(w.data) > 0 { t.Errorf("unexpected data write") } return } // Convert to JSON for comparing number of bytes to the tar header itemJSON, err := json.Marshal(&item) if err != nil { t.Fatal(err) } require.Equal(t, 1, len(w.headers), "headers") assert.Equal(t, test.expectedTarHeaderName, w.headers[0].Name, "header.name") assert.Equal(t, int64(len(itemJSON)), w.headers[0].Size, "header.size") assert.Equal(t, byte(tar.TypeReg), w.headers[0].Typeflag, "header.typeflag") assert.Equal(t, int64(0755), w.headers[0].Mode, "header.mode") assert.False(t, w.headers[0].ModTime.IsZero(), "header.modTime set") assert.Equal(t, 1, len(w.data), "# of data") actual, err := getAsMap(string(w.data[0])) if err != nil { t.Fatal(err) } if e, a := item, actual; !reflect.DeepEqual(e, a) { t.Errorf("data: expected %s, got %s", e, a) } if test.customAction { if len(action.ids) != 1 { t.Errorf("unexpected custom action ids: %v", action.ids) } else if e, a := test.expectedActionID, action.ids[0]; e != a { t.Errorf("action.ids[0]: expected %s, got %s", e, a) } require.Equal(t, 1, len(action.backups), "unexpected custom action backups: %#v", action.backups) assert.Equal(t, backup, &(action.backups[0]), "backup") } if test.snapshottableVolumes != nil { require.Equal(t, 1, len(snapshotService.SnapshotsTaken)) var expectedBackups []api.VolumeBackupInfo for _, vbi := range test.snapshottableVolumes { expectedBackups = append(expectedBackups, vbi) } var actualBackups []api.VolumeBackupInfo for _, vbi := range backup.Status.VolumeBackups { actualBackups = append(actualBackups, *vbi) } assert.Equal(t, expectedBackups, actualBackups) } }) } } func TestTakePVSnapshot(t *testing.T) { iops := int64(1000) tests := []struct { name string snapshotEnabled bool pv string ttl time.Duration expectError bool expectedVolumeID string expectedSnapshotsTaken int existingVolumeBackups map[string]*v1.VolumeBackupInfo volumeInfo map[string]v1.VolumeBackupInfo }{ { name: "snapshot disabled", pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}}`, snapshotEnabled: false, }, { name: "unsupported PV source type", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"unsupportedPVSource": {}}}`, expectError: false, }, { name: "without iops", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: false, expectedSnapshotsTaken: 1, expectedVolumeID: "vol-abc123", ttl: 5 * time.Minute, volumeInfo: map[string]v1.VolumeBackupInfo{ "vol-abc123": {Type: "gp", SnapshotID: "snap-1", AvailabilityZone: "us-east-1c"}, }, }, { name: "with iops", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: false, expectedSnapshotsTaken: 1, expectedVolumeID: "vol-abc123", ttl: 5 * time.Minute, volumeInfo: map[string]v1.VolumeBackupInfo{ "vol-abc123": {Type: "io1", Iops: &iops, SnapshotID: "snap-1", AvailabilityZone: "us-east-1c"}, }, }, { name: "preexisting volume backup info in backup status", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`, expectError: false, expectedSnapshotsTaken: 1, expectedVolumeID: "pd-abc123", ttl: 5 * time.Minute, existingVolumeBackups: map[string]*v1.VolumeBackupInfo{ "anotherpv": {SnapshotID: "anothersnap"}, }, volumeInfo: map[string]v1.VolumeBackupInfo{ "pd-abc123": {Type: "gp", SnapshotID: "snap-1"}, }, }, { name: "create snapshot error", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`, expectedVolumeID: "pd-abc123", expectError: true, }, { name: "PV with label metadata but no failureDomainZone", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/region": "us-east-1"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: false, expectedSnapshotsTaken: 1, expectedVolumeID: "vol-abc123", ttl: 5 * time.Minute, volumeInfo: map[string]v1.VolumeBackupInfo{ "vol-abc123": {Type: "gp", SnapshotID: "snap-1"}, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { backup := &v1.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: v1.DefaultNamespace, Name: "mybackup", }, Spec: v1.BackupSpec{ SnapshotVolumes: &test.snapshotEnabled, TTL: metav1.Duration{Duration: test.ttl}, }, Status: v1.BackupStatus{ VolumeBackups: test.existingVolumeBackups, }, } snapshotService := &arktest.FakeSnapshotService{ SnapshottableVolumes: test.volumeInfo, VolumeID: test.expectedVolumeID, } ib := &defaultItemBackupper{snapshotService: snapshotService} pv, err := getAsMap(test.pv) if err != nil { t.Fatal(err) } // method under test err = ib.takePVSnapshot(&unstructured.Unstructured{Object: pv}, backup, arktest.NewLogger()) gotErr := err != nil if e, a := test.expectError, gotErr; e != a { t.Errorf("error: expected %v, got %v", e, a) } if test.expectError { return } if !test.snapshotEnabled { // don't need to check anything else if snapshots are disabled return } expectedVolumeBackups := test.existingVolumeBackups if expectedVolumeBackups == nil { expectedVolumeBackups = make(map[string]*v1.VolumeBackupInfo) } // we should have one snapshot taken exactly require.Equal(t, test.expectedSnapshotsTaken, snapshotService.SnapshotsTaken.Len()) if test.expectedSnapshotsTaken > 0 { // the snapshotID should be the one in the entry in snapshotService.SnapshottableVolumes // for the volume we ran the test for snapshotID, _ := snapshotService.SnapshotsTaken.PopAny() expectedVolumeBackups["mypv"] = &v1.VolumeBackupInfo{ SnapshotID: snapshotID, Type: test.volumeInfo[test.expectedVolumeID].Type, Iops: test.volumeInfo[test.expectedVolumeID].Iops, AvailabilityZone: test.volumeInfo[test.expectedVolumeID].AvailabilityZone, } if e, a := expectedVolumeBackups, backup.Status.VolumeBackups; !reflect.DeepEqual(e, a) { t.Errorf("backup.status.VolumeBackups: expected %v, got %v", e, a) } } }) } } type fakeTarWriter struct { closeCalled bool headers []*tar.Header data [][]byte writeHeaderError error writeError error } func (w *fakeTarWriter) Close() error { return nil } func (w *fakeTarWriter) Write(data []byte) (int, error) { w.data = append(w.data, data) return 0, w.writeError } func (w *fakeTarWriter) WriteHeader(header *tar.Header) error { w.headers = append(w.headers, header) return w.writeHeaderError } type mockItemBackupper struct { mock.Mock } func (ib *mockItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error { args := ib.Called(logger, obj, groupResource) return args.Error(0) } Add additional test Signed-off-by: Calle Pettersson <a2ac79ca63d845ddce26b76d15222599470c42b8@gmail.com> /* Copyright 2017 the Heptio Ark contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package backup import ( "archive/tar" "encoding/json" "fmt" "reflect" "testing" "time" "github.com/heptio/ark/pkg/apis/ark/v1" api "github.com/heptio/ark/pkg/apis/ark/v1" "github.com/heptio/ark/pkg/util/collections" arktest "github.com/heptio/ark/pkg/util/test" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) func TestBackupItemSkips(t *testing.T) { tests := []struct { testName string namespace string name string namespaces *collections.IncludesExcludes groupResource schema.GroupResource resources *collections.IncludesExcludes backedUpItems map[itemKey]struct{} }{ { testName: "namespace not in includes list", namespace: "ns", name: "foo", namespaces: collections.NewIncludesExcludes().Includes("a"), }, { testName: "namespace in excludes list", namespace: "ns", name: "foo", namespaces: collections.NewIncludesExcludes().Excludes("ns"), }, { testName: "resource not in includes list", namespace: "ns", name: "foo", groupResource: schema.GroupResource{Group: "foo", Resource: "bar"}, namespaces: collections.NewIncludesExcludes(), resources: collections.NewIncludesExcludes().Includes("a.b"), }, { testName: "resource in excludes list", namespace: "ns", name: "foo", groupResource: schema.GroupResource{Group: "foo", Resource: "bar"}, namespaces: collections.NewIncludesExcludes(), resources: collections.NewIncludesExcludes().Excludes("bar.foo"), }, { testName: "resource already backed up", namespace: "ns", name: "foo", groupResource: schema.GroupResource{Group: "foo", Resource: "bar"}, namespaces: collections.NewIncludesExcludes(), resources: collections.NewIncludesExcludes(), backedUpItems: map[itemKey]struct{}{ {resource: "bar.foo", namespace: "ns", name: "foo"}: {}, }, }, } for _, test := range tests { t.Run(test.testName, func(t *testing.T) { ib := &defaultItemBackupper{ namespaces: test.namespaces, resources: test.resources, backedUpItems: test.backedUpItems, } u := unstructuredOrDie(fmt.Sprintf(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"%s","name":"%s"}}`, test.namespace, test.name)) err := ib.backupItem(arktest.NewLogger(), u, test.groupResource) assert.NoError(t, err) }) } } func TestBackupItemSkipsClusterScopedResourceWhenIncludeClusterResourcesFalse(t *testing.T) { f := false ib := &defaultItemBackupper{ backup: &v1.Backup{ Spec: v1.BackupSpec{ IncludeClusterResources: &f, }, }, namespaces: collections.NewIncludesExcludes(), resources: collections.NewIncludesExcludes(), } u := unstructuredOrDie(`{"apiVersion":"v1","kind":"Foo","metadata":{"name":"bar"}}`) err := ib.backupItem(arktest.NewLogger(), u, schema.GroupResource{Group: "foo", Resource: "bar"}) assert.NoError(t, err) } func TestBackupItemNoSkips(t *testing.T) { tests := []struct { name string item string namespaceIncludesExcludes *collections.IncludesExcludes expectError bool expectExcluded bool expectedTarHeaderName string tarWriteError bool tarHeaderWriteError bool customAction bool expectedActionID string customActionAdditionalItemIdentifiers []ResourceIdentifier customActionAdditionalItems []runtime.Unstructured groupResource string snapshottableVolumes map[string]api.VolumeBackupInfo snapshotError error additionalItemError error }{ { name: "explicit namespace include", item: `{"metadata":{"namespace":"foo","name":"bar"}}`, namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("foo"), expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json", }, { name: "* namespace include", item: `{"metadata":{"namespace":"foo","name":"bar"}}`, namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json", }, { name: "cluster-scoped", item: `{"metadata":{"name":"bar"}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/cluster/bar.json", }, { name: "tar header write error", item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, expectError: true, tarHeaderWriteError: true, }, { name: "tar write error", item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, expectError: true, tarWriteError: true, }, { name: "action invoked - cluster-scoped", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"metadata":{"name":"bar"}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/cluster/bar.json", customAction: true, expectedActionID: "bar", }, { name: "action invoked - namespaced", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"metadata":{"namespace": "myns", "name":"bar"}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json", customAction: true, expectedActionID: "myns/bar", }, { name: "action invoked - additional items", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"metadata":{"namespace": "myns", "name":"bar"}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json", customAction: true, expectedActionID: "myns/bar", customActionAdditionalItemIdentifiers: []ResourceIdentifier{ { GroupResource: schema.GroupResource{Group: "g1", Resource: "r1"}, Namespace: "ns1", Name: "n1", }, { GroupResource: schema.GroupResource{Group: "g2", Resource: "r2"}, Namespace: "ns2", Name: "n2", }, }, customActionAdditionalItems: []runtime.Unstructured{ unstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`), unstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`), }, }, { name: "action invoked - additional items - error", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"metadata":{"namespace": "myns", "name":"bar"}}`, expectError: true, expectExcluded: false, expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json", customAction: true, expectedActionID: "myns/bar", customActionAdditionalItemIdentifiers: []ResourceIdentifier{ { GroupResource: schema.GroupResource{Group: "g1", Resource: "r1"}, Namespace: "ns1", Name: "n1", }, { GroupResource: schema.GroupResource{Group: "g2", Resource: "r2"}, Namespace: "ns2", Name: "n2", }, }, customActionAdditionalItems: []runtime.Unstructured{ unstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`), unstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`), }, additionalItemError: errors.New("foo"), }, { name: "takePVSnapshot is not invoked for PVs when snapshotService == nil", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json", groupResource: "persistentvolumes", }, { name: "takePVSnapshot is invoked for PVs when snapshotService != nil", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: false, expectExcluded: false, expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json", groupResource: "persistentvolumes", snapshottableVolumes: map[string]api.VolumeBackupInfo{ "vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"}, }, }, { name: "backup fails when takePVSnapshot fails", namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: true, groupResource: "persistentvolumes", snapshottableVolumes: map[string]api.VolumeBackupInfo{ "vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"}, }, snapshotError: fmt.Errorf("failure"), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var ( actions []resolvedAction action *fakeAction backup = &v1.Backup{} groupResource = schema.ParseGroupResource("resource.group") backedUpItems = make(map[itemKey]struct{}) resources = collections.NewIncludesExcludes() w = &fakeTarWriter{} ) if test.groupResource != "" { groupResource = schema.ParseGroupResource(test.groupResource) } item, err := getAsMap(test.item) if err != nil { t.Fatal(err) } namespaces := test.namespaceIncludesExcludes if namespaces == nil { namespaces = collections.NewIncludesExcludes() } if test.tarHeaderWriteError { w.writeHeaderError = errors.New("error") } if test.tarWriteError { w.writeError = errors.New("error") } if test.customAction { action = &fakeAction{ additionalItems: test.customActionAdditionalItemIdentifiers, } actions = []resolvedAction{ { ItemAction: action, namespaceIncludesExcludes: collections.NewIncludesExcludes(), resourceIncludesExcludes: collections.NewIncludesExcludes().Includes(groupResource.String()), selector: labels.Everything(), }, } } resourceHooks := []resourceHook{} podCommandExecutor := &mockPodCommandExecutor{} defer podCommandExecutor.AssertExpectations(t) dynamicFactory := &arktest.FakeDynamicFactory{} defer dynamicFactory.AssertExpectations(t) discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) b := (&defaultItemBackupperFactory{}).newItemBackupper( backup, namespaces, resources, backedUpItems, actions, podCommandExecutor, w, resourceHooks, dynamicFactory, discoveryHelper, nil, ).(*defaultItemBackupper) var snapshotService *arktest.FakeSnapshotService if test.snapshottableVolumes != nil { snapshotService = &arktest.FakeSnapshotService{ SnapshottableVolumes: test.snapshottableVolumes, VolumeID: "vol-abc123", Error: test.snapshotError, } b.snapshotService = snapshotService } // make sure the podCommandExecutor was set correctly in the real hook handler assert.Equal(t, podCommandExecutor, b.itemHookHandler.(*defaultItemHookHandler).podCommandExecutor) itemHookHandler := &mockItemHookHandler{} defer itemHookHandler.AssertExpectations(t) b.itemHookHandler = itemHookHandler additionalItemBackupper := &mockItemBackupper{} defer additionalItemBackupper.AssertExpectations(t) b.additionalItemBackupper = additionalItemBackupper obj := &unstructured.Unstructured{Object: item} itemHookHandler.On("handleHooks", mock.Anything, groupResource, obj, resourceHooks, hookPhasePre).Return(nil) if test.snapshotError == nil && test.additionalItemError == nil { // TODO: Remove if-clause when #511 is resolved. itemHookHandler.On("handleHooks", mock.Anything, groupResource, obj, resourceHooks, hookPhasePost).Return(nil) } for i, item := range test.customActionAdditionalItemIdentifiers { if test.additionalItemError != nil && i > 0 { break } itemClient := &arktest.FakeDynamicClient{} defer itemClient.AssertExpectations(t) dynamicFactory.On("ClientForGroupVersionResource", item.GroupResource.WithVersion("").GroupVersion(), metav1.APIResource{Name: item.Resource}, item.Namespace).Return(itemClient, nil) itemClient.On("Get", item.Name, metav1.GetOptions{}).Return(test.customActionAdditionalItems[i], nil) additionalItemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), test.customActionAdditionalItems[i], item.GroupResource).Return(test.additionalItemError) } err = b.backupItem(arktest.NewLogger(), obj, groupResource) gotError := err != nil if e, a := test.expectError, gotError; e != a { t.Fatalf("error: expected %t, got %t: %v", e, a, err) } if test.expectError { return } if test.expectExcluded { if len(w.headers) > 0 { t.Errorf("unexpected header write") } if len(w.data) > 0 { t.Errorf("unexpected data write") } return } // Convert to JSON for comparing number of bytes to the tar header itemJSON, err := json.Marshal(&item) if err != nil { t.Fatal(err) } require.Equal(t, 1, len(w.headers), "headers") assert.Equal(t, test.expectedTarHeaderName, w.headers[0].Name, "header.name") assert.Equal(t, int64(len(itemJSON)), w.headers[0].Size, "header.size") assert.Equal(t, byte(tar.TypeReg), w.headers[0].Typeflag, "header.typeflag") assert.Equal(t, int64(0755), w.headers[0].Mode, "header.mode") assert.False(t, w.headers[0].ModTime.IsZero(), "header.modTime set") assert.Equal(t, 1, len(w.data), "# of data") actual, err := getAsMap(string(w.data[0])) if err != nil { t.Fatal(err) } if e, a := item, actual; !reflect.DeepEqual(e, a) { t.Errorf("data: expected %s, got %s", e, a) } if test.customAction { if len(action.ids) != 1 { t.Errorf("unexpected custom action ids: %v", action.ids) } else if e, a := test.expectedActionID, action.ids[0]; e != a { t.Errorf("action.ids[0]: expected %s, got %s", e, a) } require.Equal(t, 1, len(action.backups), "unexpected custom action backups: %#v", action.backups) assert.Equal(t, backup, &(action.backups[0]), "backup") } if test.snapshottableVolumes != nil { require.Equal(t, 1, len(snapshotService.SnapshotsTaken)) var expectedBackups []api.VolumeBackupInfo for _, vbi := range test.snapshottableVolumes { expectedBackups = append(expectedBackups, vbi) } var actualBackups []api.VolumeBackupInfo for _, vbi := range backup.Status.VolumeBackups { actualBackups = append(actualBackups, *vbi) } assert.Equal(t, expectedBackups, actualBackups) } }) } } func TestTakePVSnapshot(t *testing.T) { iops := int64(1000) tests := []struct { name string snapshotEnabled bool pv string ttl time.Duration expectError bool expectedVolumeID string expectedSnapshotsTaken int existingVolumeBackups map[string]*v1.VolumeBackupInfo volumeInfo map[string]v1.VolumeBackupInfo }{ { name: "snapshot disabled", pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}}`, snapshotEnabled: false, }, { name: "unsupported PV source type", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"unsupportedPVSource": {}}}`, expectError: false, }, { name: "without iops", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: false, expectedSnapshotsTaken: 1, expectedVolumeID: "vol-abc123", ttl: 5 * time.Minute, volumeInfo: map[string]v1.VolumeBackupInfo{ "vol-abc123": {Type: "gp", SnapshotID: "snap-1", AvailabilityZone: "us-east-1c"}, }, }, { name: "with iops", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: false, expectedSnapshotsTaken: 1, expectedVolumeID: "vol-abc123", ttl: 5 * time.Minute, volumeInfo: map[string]v1.VolumeBackupInfo{ "vol-abc123": {Type: "io1", Iops: &iops, SnapshotID: "snap-1", AvailabilityZone: "us-east-1c"}, }, }, { name: "preexisting volume backup info in backup status", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`, expectError: false, expectedSnapshotsTaken: 1, expectedVolumeID: "pd-abc123", ttl: 5 * time.Minute, existingVolumeBackups: map[string]*v1.VolumeBackupInfo{ "anotherpv": {SnapshotID: "anothersnap"}, }, volumeInfo: map[string]v1.VolumeBackupInfo{ "pd-abc123": {Type: "gp", SnapshotID: "snap-1"}, }, }, { name: "create snapshot error", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`, expectedVolumeID: "pd-abc123", expectError: true, }, { name: "PV with label metadata but no failureDomainZone", snapshotEnabled: true, pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/region": "us-east-1"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: false, expectedSnapshotsTaken: 1, expectedVolumeID: "vol-abc123", ttl: 5 * time.Minute, volumeInfo: map[string]v1.VolumeBackupInfo{ "vol-abc123": {Type: "gp", SnapshotID: "snap-1"}, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { backup := &v1.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: v1.DefaultNamespace, Name: "mybackup", }, Spec: v1.BackupSpec{ SnapshotVolumes: &test.snapshotEnabled, TTL: metav1.Duration{Duration: test.ttl}, }, Status: v1.BackupStatus{ VolumeBackups: test.existingVolumeBackups, }, } snapshotService := &arktest.FakeSnapshotService{ SnapshottableVolumes: test.volumeInfo, VolumeID: test.expectedVolumeID, } ib := &defaultItemBackupper{snapshotService: snapshotService} pv, err := getAsMap(test.pv) if err != nil { t.Fatal(err) } // method under test err = ib.takePVSnapshot(&unstructured.Unstructured{Object: pv}, backup, arktest.NewLogger()) gotErr := err != nil if e, a := test.expectError, gotErr; e != a { t.Errorf("error: expected %v, got %v", e, a) } if test.expectError { return } if !test.snapshotEnabled { // don't need to check anything else if snapshots are disabled return } expectedVolumeBackups := test.existingVolumeBackups if expectedVolumeBackups == nil { expectedVolumeBackups = make(map[string]*v1.VolumeBackupInfo) } // we should have one snapshot taken exactly require.Equal(t, test.expectedSnapshotsTaken, snapshotService.SnapshotsTaken.Len()) if test.expectedSnapshotsTaken > 0 { // the snapshotID should be the one in the entry in snapshotService.SnapshottableVolumes // for the volume we ran the test for snapshotID, _ := snapshotService.SnapshotsTaken.PopAny() expectedVolumeBackups["mypv"] = &v1.VolumeBackupInfo{ SnapshotID: snapshotID, Type: test.volumeInfo[test.expectedVolumeID].Type, Iops: test.volumeInfo[test.expectedVolumeID].Iops, AvailabilityZone: test.volumeInfo[test.expectedVolumeID].AvailabilityZone, } if e, a := expectedVolumeBackups, backup.Status.VolumeBackups; !reflect.DeepEqual(e, a) { t.Errorf("backup.status.VolumeBackups: expected %v, got %v", e, a) } } }) } } type fakeTarWriter struct { closeCalled bool headers []*tar.Header data [][]byte writeHeaderError error writeError error } func (w *fakeTarWriter) Close() error { return nil } func (w *fakeTarWriter) Write(data []byte) (int, error) { w.data = append(w.data, data) return 0, w.writeError } func (w *fakeTarWriter) WriteHeader(header *tar.Header) error { w.headers = append(w.headers, header) return w.writeHeaderError } type mockItemBackupper struct { mock.Mock } func (ib *mockItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error { args := ib.Called(logger, obj, groupResource) return args.Error(0) }
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package args import ( "fmt" "path" "github.com/spf13/pflag" "k8s.io/gengo/args" "k8s.io/gengo/types" codegenutil "k8s.io/code-generator/pkg/util" ) // CustomArgs is a wrapper for arguments to applyconfiguration-gen. type CustomArgs struct { // ExternalApplyConfigurations provides the locations of externally generated // apply configuration types for types referenced by the go structs provided as input. // Locations are provided as a comma separated list of <package>.<typeName>:<applyconfiguration-package> // entries. // // E.g. if a type references appsv1.Deployment, the location of its apply configuration should // be provided: // k8s.io/api/apps/v1.Deployment:k8s.io/client-go/applyconfigurations/apps/v1 // // meta/v1 types (TypeMeta and ObjectMeta) are always included and do not need to be passed in. ExternalApplyConfigurations map[types.Name]string OpenAPISchemaFilePath string } // NewDefaults returns default arguments for the generator. func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { genericArgs := args.Default().WithoutDefaultFlagParsing() customArgs := &CustomArgs{ ExternalApplyConfigurations: map[types.Name]string{ // Always include TypeMeta and ObjectMeta. They are sufficient for the vast majority of use cases. {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "TypeMeta"}: "k8s.io/client-go/applyconfigurations/meta/v1", {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ObjectMeta"}: "k8s.io/client-go/applyconfigurations/meta/v1", }, } genericArgs.CustomArgs = customArgs if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/applyconfigurations") } return genericArgs, customArgs } func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet, inputBase string) { pflag.Var(NewExternalApplyConfigurationValue(&ca.ExternalApplyConfigurations, nil), "external-applyconfigurations", "list of comma separated external apply configurations locations in <type-package>.<type-name>:<applyconfiguration-package> form."+ "For example: k8s.io/api/apps/v1.Deployment:k8s.io/client-go/applyconfigurations/apps/v1") pflag.StringVar(&ca.OpenAPISchemaFilePath, "openapi-schema", "", "path to the openapi schema containing all the types that apply configurations will be generated for") } // Validate checks the given arguments. func Validate(genericArgs *args.GeneratorArgs) error { if len(genericArgs.OutputPackagePath) == 0 { return fmt.Errorf("output package cannot be empty") } return nil } add metav1.OwnerReference to the default external configurations to ease generation /* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package args import ( "fmt" "path" "github.com/spf13/pflag" "k8s.io/gengo/args" "k8s.io/gengo/types" codegenutil "k8s.io/code-generator/pkg/util" ) // CustomArgs is a wrapper for arguments to applyconfiguration-gen. type CustomArgs struct { // ExternalApplyConfigurations provides the locations of externally generated // apply configuration types for types referenced by the go structs provided as input. // Locations are provided as a comma separated list of <package>.<typeName>:<applyconfiguration-package> // entries. // // E.g. if a type references appsv1.Deployment, the location of its apply configuration should // be provided: // k8s.io/api/apps/v1.Deployment:k8s.io/client-go/applyconfigurations/apps/v1 // // meta/v1 types (TypeMeta and ObjectMeta) are always included and do not need to be passed in. ExternalApplyConfigurations map[types.Name]string OpenAPISchemaFilePath string } // NewDefaults returns default arguments for the generator. func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { genericArgs := args.Default().WithoutDefaultFlagParsing() customArgs := &CustomArgs{ ExternalApplyConfigurations: map[types.Name]string{ // Always include TypeMeta and ObjectMeta. They are sufficient for the vast majority of use cases. {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "TypeMeta"}: "k8s.io/client-go/applyconfigurations/meta/v1", {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ObjectMeta"}: "k8s.io/client-go/applyconfigurations/meta/v1", {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "OwnerReference"}: "k8s.io/client-go/applyconfigurations/meta/v1", }, } genericArgs.CustomArgs = customArgs if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/applyconfigurations") } return genericArgs, customArgs } func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet, inputBase string) { pflag.Var(NewExternalApplyConfigurationValue(&ca.ExternalApplyConfigurations, nil), "external-applyconfigurations", "list of comma separated external apply configurations locations in <type-package>.<type-name>:<applyconfiguration-package> form."+ "For example: k8s.io/api/apps/v1.Deployment:k8s.io/client-go/applyconfigurations/apps/v1") pflag.StringVar(&ca.OpenAPISchemaFilePath, "openapi-schema", "", "path to the openapi schema containing all the types that apply configurations will be generated for") } // Validate checks the given arguments. func Validate(genericArgs *args.GeneratorArgs) error { if len(genericArgs.OutputPackagePath) == 0 { return fmt.Errorf("output package cannot be empty") } return nil }
// SPDX-License-Identifier: Apache-2.0 // Copyright Authors of Hubble package tcp import ( "context" "github.com/prometheus/client_golang/prometheus" flowpb "github.com/cilium/cilium/api/v1/flow" "github.com/cilium/cilium/pkg/hubble/metrics/api" ) type tcpHandler struct { tcpFlags *prometheus.CounterVec context *api.ContextOptions } func (h *tcpHandler) Init(registry *prometheus.Registry, options api.Options) error { c, err := api.ParseContextOptions(options) if err != nil { return err } h.context = c contextLabels := h.context.GetLabelNames() labels := append(contextLabels, "flag", "family") h.tcpFlags = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: api.DefaultPrometheusNamespace, Name: "tcp_flags_total", Help: "TCP flag occurrences", }, labels) registry.MustRegister(h.tcpFlags) return nil } func (h *tcpHandler) Status() string { return h.context.Status() } func (h *tcpHandler) ProcessFlow(ctx context.Context, flow *flowpb.Flow) error { if (flow.GetVerdict() != flowpb.Verdict_FORWARDED && flow.GetVerdict() != flowpb.Verdict_REDIRECTED) || flow.GetL4() == nil { return nil } ip := flow.GetIP() tcp := flow.GetL4().GetTCP() if ip == nil || tcp == nil || tcp.Flags == nil { return nil } contextLabels, err := h.context.GetLabelValues(flow) if err != nil { return err } labels := append(contextLabels, "", ip.IpVersion.String()) if tcp.Flags.FIN { labels[0] = "FIN" h.tcpFlags.WithLabelValues(labels...).Inc() } if tcp.Flags.SYN { if tcp.Flags.ACK { labels[0] = "SYN-ACK" h.tcpFlags.WithLabelValues(labels...).Inc() } else { labels[0] = "SYN" h.tcpFlags.WithLabelValues(labels...).Inc() } } if tcp.Flags.RST { labels[0] = "RST" h.tcpFlags.WithLabelValues(labels...).Inc() } return nil } hubble/metrics: Fix label ordering in Hubble TCP metrics The code setting the flag label value assumes that it's the first label in the slice. If context options are enabled, then it's not true, so one of the context labels incorrectly gets the flag value, and the flag label gets discarded. Fixes: d4d73681026b ("hubble/metrics: Replace panic in contextLabels with error log") Signed-off-by: Anna Kapuscinska <2bc1ecb410e142bce83bce6f212b41e1781536dc@isovalent.com> // SPDX-License-Identifier: Apache-2.0 // Copyright Authors of Hubble package tcp import ( "context" "github.com/prometheus/client_golang/prometheus" flowpb "github.com/cilium/cilium/api/v1/flow" "github.com/cilium/cilium/pkg/hubble/metrics/api" ) type tcpHandler struct { tcpFlags *prometheus.CounterVec context *api.ContextOptions } func (h *tcpHandler) Init(registry *prometheus.Registry, options api.Options) error { c, err := api.ParseContextOptions(options) if err != nil { return err } h.context = c labels := []string{"flag", "family"} labels = append(labels, h.context.GetLabelNames()...) h.tcpFlags = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: api.DefaultPrometheusNamespace, Name: "tcp_flags_total", Help: "TCP flag occurrences", }, labels) registry.MustRegister(h.tcpFlags) return nil } func (h *tcpHandler) Status() string { return h.context.Status() } func (h *tcpHandler) ProcessFlow(ctx context.Context, flow *flowpb.Flow) error { if (flow.GetVerdict() != flowpb.Verdict_FORWARDED && flow.GetVerdict() != flowpb.Verdict_REDIRECTED) || flow.GetL4() == nil { return nil } ip := flow.GetIP() tcp := flow.GetL4().GetTCP() if ip == nil || tcp == nil || tcp.Flags == nil { return nil } contextLabels, err := h.context.GetLabelValues(flow) if err != nil { return err } labels := append([]string{"", ip.IpVersion.String()}, contextLabels...) if tcp.Flags.FIN { labels[0] = "FIN" h.tcpFlags.WithLabelValues(labels...).Inc() } if tcp.Flags.SYN { if tcp.Flags.ACK { labels[0] = "SYN-ACK" h.tcpFlags.WithLabelValues(labels...).Inc() } else { labels[0] = "SYN" h.tcpFlags.WithLabelValues(labels...).Inc() } } if tcp.Flags.RST { labels[0] = "RST" h.tcpFlags.WithLabelValues(labels...).Inc() } return nil }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package config import ( "sort" "testing" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/types" ) const ( NoneSource = "" TestSource = "test" ) func expectEmptyChannel(t *testing.T, ch <-chan interface{}) { select { case update := <-ch: t.Errorf("Expected no update in channel, Got %v", update) default: } } type sortedPods []*api.Pod func (s sortedPods) Len() int { return len(s) } func (s sortedPods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s sortedPods) Less(i, j int) bool { return s[i].Namespace < s[j].Namespace } func CreateValidPod(name, namespace, source string) *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: types.UID(name), // for the purpose of testing, this is unique enough Name: name, Namespace: namespace, }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{ { Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), }, }, }, } } func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...*api.Pod) kubelet.PodUpdate { return kubelet.PodUpdate{Pods: pods, Op: op, Source: source} } func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) { eventBroadcaster := record.NewBroadcaster() config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"})) channel := config.Channel(TestSource) ch := config.Updates() return channel, ch, config } func expectPodUpdate(t *testing.T, ch <-chan kubelet.PodUpdate, expected ...kubelet.PodUpdate) { for i := range expected { update := <-ch sort.Sort(sortedPods(update.Pods)) // Clear the annotation field before the comparison. // TODO: consider mock out recordFirstSeen in config.go for _, pod := range update.Pods { delete(pod.Annotations, kubelet.ConfigFirstSeenAnnotationKey) delete(pod.Annotations, kubelet.ConfigSourceAnnotationKey) } for _, pod := range expected[i].Pods { delete(pod.Annotations, kubelet.ConfigFirstSeenAnnotationKey) delete(pod.Annotations, kubelet.ConfigSourceAnnotationKey) } if !api.Semantic.DeepEqual(expected[i], update) { t.Fatalf("Expected %#v, Got %#v", expected[i], update) } } expectNoPodUpdate(t, ch) } func expectNoPodUpdate(t *testing.T, ch <-chan kubelet.PodUpdate) { select { case update := <-ch: t.Errorf("Expected no update in channel, Got %#v", update) default: } } func TestNewPodAdded(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test"))) } func TestNewPodAddedInvalidNamespace(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "", "")) channel <- podUpdate config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource)) } func TestNewPodAddedDefaultNamespace(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default", "test"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "default", "test"))) } func TestNewPodAddedDifferentNamespaces(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default", "test"))) // see an update in another namespace podUpdate = CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "default", "test"), CreateValidPod("foo", "new", "test"))) } func TestInvalidPodFiltered(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"))) // add an invalid update podUpdate = CreatePodUpdate(kubelet.UPDATE, NoneSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) channel <- podUpdate expectNoPodUpdate(t, ch) } func TestNewPodAddedSnapshotAndUpdates(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationSnapshotAndUpdates) // see an set podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, CreateValidPod("foo", "new", "test"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test"))) // container updates are separated as UPDATE pod := *podUpdate.Pods[0] pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} channel <- CreatePodUpdate(kubelet.ADD, NoneSource, &pod) expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, &pod)) } func TestNewPodAddedSnapshot(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationSnapshot) // see an set podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, CreateValidPod("foo", "new", "test"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test"))) // container updates are separated as UPDATE pod := *podUpdate.Pods[0] pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} channel <- CreatePodUpdate(kubelet.ADD, NoneSource, &pod) expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, &pod)) } func TestNewPodAddedUpdatedRemoved(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) // should register an add podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"))) // should ignore ADDs that are identical expectNoPodUpdate(t, ch) // an kubelet.ADD should be converted to kubelet.UPDATE pod := CreateValidPod("foo", "new", "test") pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} podUpdate = CreatePodUpdate(kubelet.ADD, NoneSource, pod) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) podUpdate = CreatePodUpdate(kubelet.REMOVE, NoneSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}}) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.REMOVE, NoneSource, pod)) } func TestNewPodAddedUpdatedSet(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) // should register an add podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", ""), CreateValidPod("foo2", "new", ""), CreateValidPod("foo3", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"), CreateValidPod("foo2", "new", "test"), CreateValidPod("foo3", "new", "test"))) // should ignore ADDs that are identical expectNoPodUpdate(t, ch) // should be converted to an kubelet.ADD, kubelet.REMOVE, and kubelet.UPDATE pod := CreateValidPod("foo2", "new", "test") pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, pod, CreateValidPod("foo3", "new", ""), CreateValidPod("foo4", "new", "test")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.REMOVE, NoneSource, CreateValidPod("foo", "new", "test")), CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo4", "new", "test")), CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) } func TestPodUpdateAnnotations(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) pod := CreateValidPod("foo2", "new", "test") pod.Annotations = make(map[string]string, 0) pod.Annotations["kubernetes.io/blah"] = "blah" clone, err := conversion.NewCloner().DeepCopy(pod) if err != nil { t.Fatalf("%v", err) } podUpdate := CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new", "test"), clone.(*api.Pod), CreateValidPod("foo3", "new", "test")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo1", "new", "test"), pod, CreateValidPod("foo3", "new", "test"))) pod.Annotations["kubenetes.io/blah"] = "superblah" podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new", "test"), pod, CreateValidPod("foo3", "new", "test")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) pod.Annotations["kubernetes.io/otherblah"] = "doh" podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new", "test"), pod, CreateValidPod("foo3", "new", "test")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) delete(pod.Annotations, "kubernetes.io/blah") podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new", "test"), pod, CreateValidPod("foo3", "new", "test")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) } config_test: clean up unused function arguements /* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package config import ( "sort" "testing" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/types" ) const ( NoneSource = "" TestSource = "test" ) func expectEmptyChannel(t *testing.T, ch <-chan interface{}) { select { case update := <-ch: t.Errorf("Expected no update in channel, Got %v", update) default: } } type sortedPods []*api.Pod func (s sortedPods) Len() int { return len(s) } func (s sortedPods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s sortedPods) Less(i, j int) bool { return s[i].Namespace < s[j].Namespace } func CreateValidPod(name, namespace string) *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: types.UID(name), // for the purpose of testing, this is unique enough Name: name, Namespace: namespace, }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{ { Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), }, }, }, } } func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...*api.Pod) kubelet.PodUpdate { return kubelet.PodUpdate{Pods: pods, Op: op, Source: source} } func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) { eventBroadcaster := record.NewBroadcaster() config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"})) channel := config.Channel(TestSource) ch := config.Updates() return channel, ch, config } func expectPodUpdate(t *testing.T, ch <-chan kubelet.PodUpdate, expected ...kubelet.PodUpdate) { for i := range expected { update := <-ch sort.Sort(sortedPods(update.Pods)) // Clear the annotation field before the comparison. // TODO: consider mock out recordFirstSeen in config.go for _, pod := range update.Pods { delete(pod.Annotations, kubelet.ConfigFirstSeenAnnotationKey) delete(pod.Annotations, kubelet.ConfigSourceAnnotationKey) } for _, pod := range expected[i].Pods { delete(pod.Annotations, kubelet.ConfigFirstSeenAnnotationKey) delete(pod.Annotations, kubelet.ConfigSourceAnnotationKey) } if !api.Semantic.DeepEqual(expected[i], update) { t.Fatalf("Expected %#v, Got %#v", expected[i], update) } } expectNoPodUpdate(t, ch) } func expectNoPodUpdate(t *testing.T, ch <-chan kubelet.PodUpdate) { select { case update := <-ch: t.Errorf("Expected no update in channel, Got %#v", update) default: } } func TestNewPodAdded(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new"))) } func TestNewPodAddedInvalidNamespace(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "")) channel <- podUpdate config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource)) } func TestNewPodAddedDefaultNamespace(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "default"))) } func TestNewPodAddedDifferentNamespaces(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default"))) // see an update in another namespace podUpdate = CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "default"), CreateValidPod("foo", "new"))) } func TestInvalidPodFiltered(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new"))) // add an invalid update podUpdate = CreatePodUpdate(kubelet.UPDATE, NoneSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) channel <- podUpdate expectNoPodUpdate(t, ch) } func TestNewPodAddedSnapshotAndUpdates(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationSnapshotAndUpdates) // see an set podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, CreateValidPod("foo", "new"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new"))) // container updates are separated as UPDATE pod := *podUpdate.Pods[0] pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} channel <- CreatePodUpdate(kubelet.ADD, NoneSource, &pod) expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, &pod)) } func TestNewPodAddedSnapshot(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationSnapshot) // see an set podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, CreateValidPod("foo", "new"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new"))) // container updates are separated as UPDATE pod := *podUpdate.Pods[0] pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} channel <- CreatePodUpdate(kubelet.ADD, NoneSource, &pod) expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, &pod)) } func TestNewPodAddedUpdatedRemoved(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) // should register an add podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new"))) // should ignore ADDs that are identical expectNoPodUpdate(t, ch) // an kubelet.ADD should be converted to kubelet.UPDATE pod := CreateValidPod("foo", "new") pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} podUpdate = CreatePodUpdate(kubelet.ADD, NoneSource, pod) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) podUpdate = CreatePodUpdate(kubelet.REMOVE, NoneSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}}) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.REMOVE, NoneSource, pod)) } func TestNewPodAddedUpdatedSet(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) // should register an add podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new"), CreateValidPod("foo2", "new"), CreateValidPod("foo3", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new"), CreateValidPod("foo2", "new"), CreateValidPod("foo3", "new"))) // should ignore ADDs that are identical expectNoPodUpdate(t, ch) // should be converted to an kubelet.ADD, kubelet.REMOVE, and kubelet.UPDATE pod := CreateValidPod("foo2", "new") pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, pod, CreateValidPod("foo3", "new"), CreateValidPod("foo4", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.REMOVE, NoneSource, CreateValidPod("foo", "new")), CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo4", "new")), CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) } func TestPodUpdateAnnotations(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) pod := CreateValidPod("foo2", "new") pod.Annotations = make(map[string]string, 0) pod.Annotations["kubernetes.io/blah"] = "blah" clone, err := conversion.NewCloner().DeepCopy(pod) if err != nil { t.Fatalf("%v", err) } podUpdate := CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new"), clone.(*api.Pod), CreateValidPod("foo3", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))) pod.Annotations["kubenetes.io/blah"] = "superblah" podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) pod.Annotations["kubernetes.io/otherblah"] = "doh" podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) delete(pod.Annotations, "kubernetes.io/blah") podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) }
// Copyright 2020 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stack import ( "fmt" "sync" "time" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" ) const ( // immediateDuration is a duration of zero for scheduling work that needs to // be done immediately but asynchronously to avoid deadlock. immediateDuration time.Duration = 0 ) // NeighborEntry describes a neighboring device in the local network. type NeighborEntry struct { Addr tcpip.Address LinkAddr tcpip.LinkAddress State NeighborState UpdatedAt time.Time } // NeighborState defines the state of a NeighborEntry within the Neighbor // Unreachability Detection state machine, as per RFC 4861 section 7.3.2 and // RFC 7048. type NeighborState uint8 const ( // Unknown means reachability has not been verified yet. This is the initial // state of entries that have been created automatically by the Neighbor // Unreachability Detection state machine. Unknown NeighborState = iota // Incomplete means that there is an outstanding request to resolve the // address. Incomplete // Reachable means the path to the neighbor is functioning properly for both // receive and transmit paths. Reachable // Stale means reachability to the neighbor is unknown, but packets are still // able to be transmitted to the possibly stale link address. Stale // Delay means reachability to the neighbor is unknown and pending // confirmation from an upper-level protocol like TCP, but packets are still // able to be transmitted to the possibly stale link address. Delay // Probe means a reachability confirmation is actively being sought by // periodically retransmitting reachability probes until a reachability // confirmation is received, or until the maximum number of probes has been // sent. Probe // Static describes entries that have been explicitly added by the user. They // do not expire and are not deleted until explicitly removed. Static // Unreachable means reachability confirmation failed; the maximum number of // reachability probes has been sent and no replies have been received. // // TODO(gvisor.dev/issue/5472): Add the following sentence when we implement // RFC 7048: "Packets continue to be sent to the neighbor while // re-attempting to resolve the address." Unreachable ) type timer struct { // done indicates to the timer that the timer was stopped. done *bool timer tcpip.Timer } // neighborEntry implements a neighbor entry's individual node behavior, as per // RFC 4861 section 7.3.3. Neighbor Unreachability Detection operates in // parallel with the sending of packets to a neighbor, necessitating the // entry's lock to be acquired for all operations. type neighborEntry struct { neighborEntryEntry cache *neighborCache // nudState points to the Neighbor Unreachability Detection configuration. nudState *NUDState mu struct { sync.RWMutex neigh NeighborEntry // done is closed when address resolution is complete. It is nil iff s is // incomplete and resolution is not yet in progress. done chan struct{} // onResolve is called with the result of address resolution. onResolve []func(LinkResolutionResult) isRouter bool timer timer } } // newNeighborEntry creates a neighbor cache entry starting at the default // state, Unknown. Transition out of Unknown by calling either // `handlePacketQueuedLocked` or `handleProbeLocked` on the newly created // neighborEntry. func newNeighborEntry(cache *neighborCache, remoteAddr tcpip.Address, nudState *NUDState) *neighborEntry { n := &neighborEntry{ cache: cache, nudState: nudState, } n.mu.Lock() n.mu.neigh = NeighborEntry{ Addr: remoteAddr, State: Unknown, } n.mu.Unlock() return n } // newStaticNeighborEntry creates a neighbor cache entry starting at the // Static state. The entry can only transition out of Static by directly // calling `setStateLocked`. func newStaticNeighborEntry(cache *neighborCache, addr tcpip.Address, linkAddr tcpip.LinkAddress, state *NUDState) *neighborEntry { entry := NeighborEntry{ Addr: addr, LinkAddr: linkAddr, State: Static, UpdatedAt: cache.nic.stack.clock.Now(), } n := &neighborEntry{ cache: cache, nudState: state, } n.mu.Lock() n.mu.neigh = entry n.mu.Unlock() return n } // notifyCompletionLocked notifies those waiting for address resolution, with // the link address if resolution completed successfully. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) notifyCompletionLocked(err tcpip.Error) { res := LinkResolutionResult{LinkAddress: e.mu.neigh.LinkAddr, Err: err} for _, callback := range e.mu.onResolve { callback(res) } e.mu.onResolve = nil if ch := e.mu.done; ch != nil { close(ch) e.mu.done = nil // Dequeue the pending packets asynchronously to not hold up the current // goroutine as writing packets may be a costly operation. // // At the time of writing, when writing packets, a neighbor's link address // is resolved (which ends up obtaining the entry's lock) while holding the // link resolution queue's lock. Dequeuing packets asynchronously avoids a // lock ordering violation. // // NB: this is equivalent to spawning a goroutine directly using the go // keyword but allows tests that use manual clocks to deterministically // wait for this work to complete. e.cache.nic.stack.clock.AfterFunc(0, func() { e.cache.nic.linkResQueue.dequeue(ch, e.mu.neigh.LinkAddr, err) }) } } // dispatchAddEventLocked signals to stack's NUD Dispatcher that the entry has // been added. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) dispatchAddEventLocked() { if nudDisp := e.cache.nic.stack.nudDisp; nudDisp != nil { nudDisp.OnNeighborAdded(e.cache.nic.id, e.mu.neigh) } } // dispatchChangeEventLocked signals to stack's NUD Dispatcher that the entry // has changed state or link-layer address. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) dispatchChangeEventLocked() { if nudDisp := e.cache.nic.stack.nudDisp; nudDisp != nil { nudDisp.OnNeighborChanged(e.cache.nic.id, e.mu.neigh) } } // dispatchRemoveEventLocked signals to stack's NUD Dispatcher that the entry // has been removed. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) dispatchRemoveEventLocked() { if nudDisp := e.cache.nic.stack.nudDisp; nudDisp != nil { nudDisp.OnNeighborRemoved(e.cache.nic.id, e.mu.neigh) } } // cancelTimerLocked cancels the currently scheduled action, if there is one. // Entries in Unknown, Stale, or Static state do not have a scheduled action. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) cancelTimerLocked() { if e.mu.timer.timer != nil { e.mu.timer.timer.Stop() *e.mu.timer.done = true e.mu.timer = timer{} } } // removeLocked prepares the entry for removal. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) removeLocked() { e.mu.neigh.UpdatedAt = e.cache.nic.stack.clock.Now() e.dispatchRemoveEventLocked() e.cancelTimerLocked() // TODO(https://gvisor.dev/issues/5583): test the case where this function is // called during resolution; that can happen in at least these scenarios: // // - manual address removal during resolution // // - neighbor cache eviction during resolution e.notifyCompletionLocked(&tcpip.ErrAborted{}) } // setStateLocked transitions the entry to the specified state immediately. // // Follows the logic defined in RFC 4861 section 7.3.3. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) setStateLocked(next NeighborState) { e.cancelTimerLocked() prev := e.mu.neigh.State e.mu.neigh.State = next e.mu.neigh.UpdatedAt = e.cache.nic.stack.clock.Now() config := e.nudState.Config() switch next { case Incomplete: panic(fmt.Sprintf("should never transition to Incomplete with setStateLocked; neigh = %#v, prev state = %s", e.mu.neigh, prev)) case Reachable: // Protected by e.mu. done := false e.mu.timer = timer{ done: &done, timer: e.cache.nic.stack.Clock().AfterFunc(e.nudState.ReachableTime(), func() { e.mu.Lock() defer e.mu.Unlock() if done { // The timer was stopped because the entry changed state. return } e.setStateLocked(Stale) e.dispatchChangeEventLocked() }), } case Delay: // Protected by e.mu. done := false e.mu.timer = timer{ done: &done, timer: e.cache.nic.stack.Clock().AfterFunc(config.DelayFirstProbeTime, func() { e.mu.Lock() defer e.mu.Unlock() if done { // The timer was stopped because the entry changed state. return } e.setStateLocked(Probe) e.dispatchChangeEventLocked() }), } case Probe: // Protected by e.mu. done := false remaining := config.MaxUnicastProbes addr := e.mu.neigh.Addr linkAddr := e.mu.neigh.LinkAddr // Send a probe in another gorountine to free this thread of execution // for finishing the state transition. This is necessary to escape the // currently held lock so we can send the probe message without holding // a shared lock. e.mu.timer = timer{ done: &done, timer: e.cache.nic.stack.Clock().AfterFunc(immediateDuration, func() { var err tcpip.Error = &tcpip.ErrTimeout{} if remaining != 0 { err = e.cache.linkRes.LinkAddressRequest(addr, "" /* localAddr */, linkAddr) } e.mu.Lock() defer e.mu.Unlock() if done { // The timer was stopped because the entry changed state. return } if err != nil { e.setStateLocked(Unreachable) e.notifyCompletionLocked(err) e.dispatchChangeEventLocked() return } remaining-- e.mu.timer.timer.Reset(config.RetransmitTimer) }), } case Unreachable: case Unknown, Stale, Static: // Do nothing default: panic(fmt.Sprintf("Invalid state transition from %q to %q", prev, next)) } } // handlePacketQueuedLocked advances the state machine according to a packet // being queued for outgoing transmission. // // Follows the logic defined in RFC 4861 section 7.3.3. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) handlePacketQueuedLocked(localAddr tcpip.Address) { switch e.mu.neigh.State { case Unknown, Unreachable: prev := e.mu.neigh.State e.mu.neigh.State = Incomplete e.mu.neigh.UpdatedAt = e.cache.nic.stack.clock.Now() switch prev { case Unknown: e.dispatchAddEventLocked() case Unreachable: e.dispatchChangeEventLocked() e.cache.nic.stats.neighbor.unreachableEntryLookups.Increment() } config := e.nudState.Config() // Protected by e.mu. done := false remaining := config.MaxMulticastProbes addr := e.mu.neigh.Addr // Send a probe in another gorountine to free this thread of execution // for finishing the state transition. This is necessary to escape the // currently held lock so we can send the probe message without holding // a shared lock. e.mu.timer = timer{ done: &done, timer: e.cache.nic.stack.Clock().AfterFunc(immediateDuration, func() { var err tcpip.Error = &tcpip.ErrTimeout{} if remaining != 0 { // As per RFC 4861 section 7.2.2: // // If the source address of the packet prompting the solicitation is // the same as one of the addresses assigned to the outgoing interface, // that address SHOULD be placed in the IP Source Address of the // outgoing solicitation. // err = e.cache.linkRes.LinkAddressRequest(addr, localAddr, "" /* linkAddr */) } e.mu.Lock() defer e.mu.Unlock() if done { // The timer was stopped because the entry changed state. return } if err != nil { e.setStateLocked(Unreachable) e.notifyCompletionLocked(err) e.dispatchChangeEventLocked() return } remaining-- e.mu.timer.timer.Reset(config.RetransmitTimer) }), } case Stale: e.setStateLocked(Delay) e.dispatchChangeEventLocked() case Incomplete, Reachable, Delay, Probe, Static: // Do nothing default: panic(fmt.Sprintf("Invalid cache entry state: %s", e.mu.neigh.State)) } } // handleProbeLocked processes an incoming neighbor probe (e.g. ARP request or // Neighbor Solicitation for ARP or NDP, respectively). // // Follows the logic defined in RFC 4861 section 7.2.3. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) handleProbeLocked(remoteLinkAddr tcpip.LinkAddress) { // Probes MUST be silently discarded if the target address is tentative, does // not exist, or not bound to the NIC as per RFC 4861 section 7.2.3. These // checks MUST be done by the NetworkEndpoint. switch e.mu.neigh.State { case Unknown: e.mu.neigh.LinkAddr = remoteLinkAddr e.setStateLocked(Stale) e.dispatchAddEventLocked() case Incomplete: // "If an entry already exists, and the cached link-layer address // differs from the one in the received Source Link-Layer option, the // cached address should be replaced by the received address, and the // entry's reachability state MUST be set to STALE." // - RFC 4861 section 7.2.3 e.mu.neigh.LinkAddr = remoteLinkAddr e.setStateLocked(Stale) e.notifyCompletionLocked(nil) e.dispatchChangeEventLocked() case Reachable, Delay, Probe: if e.mu.neigh.LinkAddr != remoteLinkAddr { e.mu.neigh.LinkAddr = remoteLinkAddr e.setStateLocked(Stale) e.dispatchChangeEventLocked() } case Stale: if e.mu.neigh.LinkAddr != remoteLinkAddr { e.mu.neigh.LinkAddr = remoteLinkAddr e.dispatchChangeEventLocked() } case Unreachable: // TODO(gvisor.dev/issue/5472): Do not change the entry if the link // address is the same, as per RFC 7048. e.mu.neigh.LinkAddr = remoteLinkAddr e.setStateLocked(Stale) e.dispatchChangeEventLocked() case Static: // Do nothing default: panic(fmt.Sprintf("Invalid cache entry state: %s", e.mu.neigh.State)) } } // handleConfirmationLocked processes an incoming neighbor confirmation // (e.g. ARP reply or Neighbor Advertisement for ARP or NDP, respectively). // // Follows the state machine defined by RFC 4861 section 7.2.5. // // TODO(gvisor.dev/issue/2277): To protect against ARP poisoning and other // attacks against NDP functions, Secure Neighbor Discovery (SEND) Protocol // should be deployed where preventing access to the broadcast segment might // not be possible. SEND uses RSA key pairs to produce Cryptographically // Generated Addresses (CGA), as defined in RFC 3972. This ensures that the // claimed source of an NDP message is the owner of the claimed address. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) handleConfirmationLocked(linkAddr tcpip.LinkAddress, flags ReachabilityConfirmationFlags) { switch e.mu.neigh.State { case Incomplete: if len(linkAddr) == 0 { // "If the link layer has addresses and no Target Link-Layer Address // option is included, the receiving node SHOULD silently discard the // received advertisement." - RFC 4861 section 7.2.5 break } e.mu.neigh.LinkAddr = linkAddr if flags.Solicited { e.setStateLocked(Reachable) } else { e.setStateLocked(Stale) } e.dispatchChangeEventLocked() e.mu.isRouter = flags.IsRouter e.notifyCompletionLocked(nil) // "Note that the Override flag is ignored if the entry is in the // INCOMPLETE state." - RFC 4861 section 7.2.5 case Reachable, Stale, Delay, Probe: isLinkAddrDifferent := len(linkAddr) != 0 && e.mu.neigh.LinkAddr != linkAddr if isLinkAddrDifferent { if !flags.Override { if e.mu.neigh.State == Reachable { e.setStateLocked(Stale) e.dispatchChangeEventLocked() } break } e.mu.neigh.LinkAddr = linkAddr if !flags.Solicited { if e.mu.neigh.State != Stale { e.setStateLocked(Stale) e.dispatchChangeEventLocked() } else { // Notify the LinkAddr change, even though NUD state hasn't changed. e.dispatchChangeEventLocked() } break } } if flags.Solicited && (flags.Override || !isLinkAddrDifferent) { wasReachable := e.mu.neigh.State == Reachable // Set state to Reachable again to refresh timers. e.setStateLocked(Reachable) e.notifyCompletionLocked(nil) if !wasReachable { e.dispatchChangeEventLocked() } } if e.mu.isRouter && !flags.IsRouter && header.IsV6UnicastAddress(e.mu.neigh.Addr) { // "In those cases where the IsRouter flag changes from TRUE to FALSE as // a result of this update, the node MUST remove that router from the // Default Router List and update the Destination Cache entries for all // destinations using that neighbor as a router as specified in Section // 7.3.3. This is needed to detect when a node that is used as a router // stops forwarding packets due to being configured as a host." // - RFC 4861 section 7.2.5 // // TODO(gvisor.dev/issue/4085): Remove the special casing we do for IPv6 // here. ep, ok := e.cache.nic.networkEndpoints[header.IPv6ProtocolNumber] if !ok { panic(fmt.Sprintf("have a neighbor entry for an IPv6 router but no IPv6 network endpoint")) } if ndpEP, ok := ep.(NDPEndpoint); ok { ndpEP.InvalidateDefaultRouter(e.mu.neigh.Addr) } } e.mu.isRouter = flags.IsRouter case Unknown, Unreachable, Static: // Do nothing default: panic(fmt.Sprintf("Invalid cache entry state: %s", e.mu.neigh.State)) } } // handleUpperLevelConfirmationLocked processes an incoming upper-level protocol // (e.g. TCP acknowledgements) reachability confirmation. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) handleUpperLevelConfirmationLocked() { switch e.mu.neigh.State { case Reachable, Stale, Delay, Probe: wasReachable := e.mu.neigh.State == Reachable // Set state to Reachable again to refresh timers. e.setStateLocked(Reachable) if !wasReachable { e.dispatchChangeEventLocked() } case Unknown, Incomplete, Unreachable, Static: // Do nothing default: panic(fmt.Sprintf("Invalid cache entry state: %s", e.mu.neigh.State)) } } Reduce timer churn in NUD PiperOrigin-RevId: 438852403 // Copyright 2020 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stack import ( "fmt" "sync" "time" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" ) const ( // immediateDuration is a duration of zero for scheduling work that needs to // be done immediately but asynchronously to avoid deadlock. immediateDuration time.Duration = 0 ) // NeighborEntry describes a neighboring device in the local network. type NeighborEntry struct { Addr tcpip.Address LinkAddr tcpip.LinkAddress State NeighborState UpdatedAt time.Time } // NeighborState defines the state of a NeighborEntry within the Neighbor // Unreachability Detection state machine, as per RFC 4861 section 7.3.2 and // RFC 7048. type NeighborState uint8 const ( // Unknown means reachability has not been verified yet. This is the initial // state of entries that have been created automatically by the Neighbor // Unreachability Detection state machine. Unknown NeighborState = iota // Incomplete means that there is an outstanding request to resolve the // address. Incomplete // Reachable means the path to the neighbor is functioning properly for both // receive and transmit paths. Reachable // Stale means reachability to the neighbor is unknown, but packets are still // able to be transmitted to the possibly stale link address. Stale // Delay means reachability to the neighbor is unknown and pending // confirmation from an upper-level protocol like TCP, but packets are still // able to be transmitted to the possibly stale link address. Delay // Probe means a reachability confirmation is actively being sought by // periodically retransmitting reachability probes until a reachability // confirmation is received, or until the maximum number of probes has been // sent. Probe // Static describes entries that have been explicitly added by the user. They // do not expire and are not deleted until explicitly removed. Static // Unreachable means reachability confirmation failed; the maximum number of // reachability probes has been sent and no replies have been received. // // TODO(gvisor.dev/issue/5472): Add the following sentence when we implement // RFC 7048: "Packets continue to be sent to the neighbor while // re-attempting to resolve the address." Unreachable ) type timer struct { // done indicates to the timer that the timer was stopped. done *bool timer tcpip.Timer } // neighborEntry implements a neighbor entry's individual node behavior, as per // RFC 4861 section 7.3.3. Neighbor Unreachability Detection operates in // parallel with the sending of packets to a neighbor, necessitating the // entry's lock to be acquired for all operations. type neighborEntry struct { neighborEntryEntry cache *neighborCache // nudState points to the Neighbor Unreachability Detection configuration. nudState *NUDState mu struct { sync.RWMutex neigh NeighborEntry // done is closed when address resolution is complete. It is nil iff s is // incomplete and resolution is not yet in progress. done chan struct{} // onResolve is called with the result of address resolution. onResolve []func(LinkResolutionResult) isRouter bool timer timer } } // newNeighborEntry creates a neighbor cache entry starting at the default // state, Unknown. Transition out of Unknown by calling either // `handlePacketQueuedLocked` or `handleProbeLocked` on the newly created // neighborEntry. func newNeighborEntry(cache *neighborCache, remoteAddr tcpip.Address, nudState *NUDState) *neighborEntry { n := &neighborEntry{ cache: cache, nudState: nudState, } n.mu.Lock() n.mu.neigh = NeighborEntry{ Addr: remoteAddr, State: Unknown, } n.mu.Unlock() return n } // newStaticNeighborEntry creates a neighbor cache entry starting at the // Static state. The entry can only transition out of Static by directly // calling `setStateLocked`. func newStaticNeighborEntry(cache *neighborCache, addr tcpip.Address, linkAddr tcpip.LinkAddress, state *NUDState) *neighborEntry { entry := NeighborEntry{ Addr: addr, LinkAddr: linkAddr, State: Static, UpdatedAt: cache.nic.stack.clock.Now(), } n := &neighborEntry{ cache: cache, nudState: state, } n.mu.Lock() n.mu.neigh = entry n.mu.Unlock() return n } // notifyCompletionLocked notifies those waiting for address resolution, with // the link address if resolution completed successfully. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) notifyCompletionLocked(err tcpip.Error) { res := LinkResolutionResult{LinkAddress: e.mu.neigh.LinkAddr, Err: err} for _, callback := range e.mu.onResolve { callback(res) } e.mu.onResolve = nil if ch := e.mu.done; ch != nil { close(ch) e.mu.done = nil // Dequeue the pending packets asynchronously to not hold up the current // goroutine as writing packets may be a costly operation. // // At the time of writing, when writing packets, a neighbor's link address // is resolved (which ends up obtaining the entry's lock) while holding the // link resolution queue's lock. Dequeuing packets asynchronously avoids a // lock ordering violation. // // NB: this is equivalent to spawning a goroutine directly using the go // keyword but allows tests that use manual clocks to deterministically // wait for this work to complete. e.cache.nic.stack.clock.AfterFunc(0, func() { e.cache.nic.linkResQueue.dequeue(ch, e.mu.neigh.LinkAddr, err) }) } } // dispatchAddEventLocked signals to stack's NUD Dispatcher that the entry has // been added. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) dispatchAddEventLocked() { if nudDisp := e.cache.nic.stack.nudDisp; nudDisp != nil { nudDisp.OnNeighborAdded(e.cache.nic.id, e.mu.neigh) } } // dispatchChangeEventLocked signals to stack's NUD Dispatcher that the entry // has changed state or link-layer address. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) dispatchChangeEventLocked() { if nudDisp := e.cache.nic.stack.nudDisp; nudDisp != nil { nudDisp.OnNeighborChanged(e.cache.nic.id, e.mu.neigh) } } // dispatchRemoveEventLocked signals to stack's NUD Dispatcher that the entry // has been removed. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) dispatchRemoveEventLocked() { if nudDisp := e.cache.nic.stack.nudDisp; nudDisp != nil { nudDisp.OnNeighborRemoved(e.cache.nic.id, e.mu.neigh) } } // cancelTimerLocked cancels the currently scheduled action, if there is one. // Entries in Unknown, Stale, or Static state do not have a scheduled action. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) cancelTimerLocked() { if e.mu.timer.timer != nil { e.mu.timer.timer.Stop() *e.mu.timer.done = true e.mu.timer = timer{} } } // removeLocked prepares the entry for removal. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) removeLocked() { e.mu.neigh.UpdatedAt = e.cache.nic.stack.clock.Now() e.dispatchRemoveEventLocked() e.cancelTimerLocked() // TODO(https://gvisor.dev/issues/5583): test the case where this function is // called during resolution; that can happen in at least these scenarios: // // - manual address removal during resolution // // - neighbor cache eviction during resolution e.notifyCompletionLocked(&tcpip.ErrAborted{}) } // setStateLocked transitions the entry to the specified state immediately. // // Follows the logic defined in RFC 4861 section 7.3.3. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) setStateLocked(next NeighborState) { e.cancelTimerLocked() prev := e.mu.neigh.State e.mu.neigh.State = next e.mu.neigh.UpdatedAt = e.cache.nic.stack.clock.Now() config := e.nudState.Config() switch next { case Incomplete: panic(fmt.Sprintf("should never transition to Incomplete with setStateLocked; neigh = %#v, prev state = %s", e.mu.neigh, prev)) case Reachable: // Protected by e.mu. done := false e.mu.timer = timer{ done: &done, timer: e.cache.nic.stack.Clock().AfterFunc(e.nudState.ReachableTime(), func() { e.mu.Lock() defer e.mu.Unlock() if done { // The timer was stopped because the entry changed state. return } e.setStateLocked(Stale) e.dispatchChangeEventLocked() }), } case Delay: // Protected by e.mu. done := false e.mu.timer = timer{ done: &done, timer: e.cache.nic.stack.Clock().AfterFunc(config.DelayFirstProbeTime, func() { e.mu.Lock() defer e.mu.Unlock() if done { // The timer was stopped because the entry changed state. return } e.setStateLocked(Probe) e.dispatchChangeEventLocked() }), } case Probe: // Protected by e.mu. done := false remaining := config.MaxUnicastProbes addr := e.mu.neigh.Addr linkAddr := e.mu.neigh.LinkAddr // Send a probe in another gorountine to free this thread of execution // for finishing the state transition. This is necessary to escape the // currently held lock so we can send the probe message without holding // a shared lock. e.mu.timer = timer{ done: &done, timer: e.cache.nic.stack.Clock().AfterFunc(immediateDuration, func() { var err tcpip.Error = &tcpip.ErrTimeout{} if remaining != 0 { err = e.cache.linkRes.LinkAddressRequest(addr, "" /* localAddr */, linkAddr) } e.mu.Lock() defer e.mu.Unlock() if done { // The timer was stopped because the entry changed state. return } if err != nil { e.setStateLocked(Unreachable) e.notifyCompletionLocked(err) e.dispatchChangeEventLocked() return } remaining-- e.mu.timer.timer.Reset(config.RetransmitTimer) }), } case Unreachable: case Unknown, Stale, Static: // Do nothing default: panic(fmt.Sprintf("Invalid state transition from %q to %q", prev, next)) } } // handlePacketQueuedLocked advances the state machine according to a packet // being queued for outgoing transmission. // // Follows the logic defined in RFC 4861 section 7.3.3. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) handlePacketQueuedLocked(localAddr tcpip.Address) { switch e.mu.neigh.State { case Unknown, Unreachable: prev := e.mu.neigh.State e.mu.neigh.State = Incomplete e.mu.neigh.UpdatedAt = e.cache.nic.stack.clock.Now() switch prev { case Unknown: e.dispatchAddEventLocked() case Unreachable: e.dispatchChangeEventLocked() e.cache.nic.stats.neighbor.unreachableEntryLookups.Increment() } config := e.nudState.Config() // Protected by e.mu. done := false remaining := config.MaxMulticastProbes addr := e.mu.neigh.Addr // Send a probe in another gorountine to free this thread of execution // for finishing the state transition. This is necessary to escape the // currently held lock so we can send the probe message without holding // a shared lock. e.mu.timer = timer{ done: &done, timer: e.cache.nic.stack.Clock().AfterFunc(immediateDuration, func() { var err tcpip.Error = &tcpip.ErrTimeout{} if remaining != 0 { // As per RFC 4861 section 7.2.2: // // If the source address of the packet prompting the solicitation is // the same as one of the addresses assigned to the outgoing interface, // that address SHOULD be placed in the IP Source Address of the // outgoing solicitation. // err = e.cache.linkRes.LinkAddressRequest(addr, localAddr, "" /* linkAddr */) } e.mu.Lock() defer e.mu.Unlock() if done { // The timer was stopped because the entry changed state. return } if err != nil { e.setStateLocked(Unreachable) e.notifyCompletionLocked(err) e.dispatchChangeEventLocked() return } remaining-- e.mu.timer.timer.Reset(config.RetransmitTimer) }), } case Stale: e.setStateLocked(Delay) e.dispatchChangeEventLocked() case Incomplete, Reachable, Delay, Probe, Static: // Do nothing default: panic(fmt.Sprintf("Invalid cache entry state: %s", e.mu.neigh.State)) } } // handleProbeLocked processes an incoming neighbor probe (e.g. ARP request or // Neighbor Solicitation for ARP or NDP, respectively). // // Follows the logic defined in RFC 4861 section 7.2.3. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) handleProbeLocked(remoteLinkAddr tcpip.LinkAddress) { // Probes MUST be silently discarded if the target address is tentative, does // not exist, or not bound to the NIC as per RFC 4861 section 7.2.3. These // checks MUST be done by the NetworkEndpoint. switch e.mu.neigh.State { case Unknown: e.mu.neigh.LinkAddr = remoteLinkAddr e.setStateLocked(Stale) e.dispatchAddEventLocked() case Incomplete: // "If an entry already exists, and the cached link-layer address // differs from the one in the received Source Link-Layer option, the // cached address should be replaced by the received address, and the // entry's reachability state MUST be set to STALE." // - RFC 4861 section 7.2.3 e.mu.neigh.LinkAddr = remoteLinkAddr e.setStateLocked(Stale) e.notifyCompletionLocked(nil) e.dispatchChangeEventLocked() case Reachable, Delay, Probe: if e.mu.neigh.LinkAddr != remoteLinkAddr { e.mu.neigh.LinkAddr = remoteLinkAddr e.setStateLocked(Stale) e.dispatchChangeEventLocked() } case Stale: if e.mu.neigh.LinkAddr != remoteLinkAddr { e.mu.neigh.LinkAddr = remoteLinkAddr e.dispatchChangeEventLocked() } case Unreachable: // TODO(gvisor.dev/issue/5472): Do not change the entry if the link // address is the same, as per RFC 7048. e.mu.neigh.LinkAddr = remoteLinkAddr e.setStateLocked(Stale) e.dispatchChangeEventLocked() case Static: // Do nothing default: panic(fmt.Sprintf("Invalid cache entry state: %s", e.mu.neigh.State)) } } // handleConfirmationLocked processes an incoming neighbor confirmation // (e.g. ARP reply or Neighbor Advertisement for ARP or NDP, respectively). // // Follows the state machine defined by RFC 4861 section 7.2.5. // // TODO(gvisor.dev/issue/2277): To protect against ARP poisoning and other // attacks against NDP functions, Secure Neighbor Discovery (SEND) Protocol // should be deployed where preventing access to the broadcast segment might // not be possible. SEND uses RSA key pairs to produce Cryptographically // Generated Addresses (CGA), as defined in RFC 3972. This ensures that the // claimed source of an NDP message is the owner of the claimed address. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) handleConfirmationLocked(linkAddr tcpip.LinkAddress, flags ReachabilityConfirmationFlags) { switch e.mu.neigh.State { case Incomplete: if len(linkAddr) == 0 { // "If the link layer has addresses and no Target Link-Layer Address // option is included, the receiving node SHOULD silently discard the // received advertisement." - RFC 4861 section 7.2.5 break } e.mu.neigh.LinkAddr = linkAddr if flags.Solicited { e.setStateLocked(Reachable) } else { e.setStateLocked(Stale) } e.dispatchChangeEventLocked() e.mu.isRouter = flags.IsRouter e.notifyCompletionLocked(nil) // "Note that the Override flag is ignored if the entry is in the // INCOMPLETE state." - RFC 4861 section 7.2.5 case Reachable, Stale, Delay, Probe: isLinkAddrDifferent := len(linkAddr) != 0 && e.mu.neigh.LinkAddr != linkAddr if isLinkAddrDifferent { if !flags.Override { if e.mu.neigh.State == Reachable { e.setStateLocked(Stale) e.dispatchChangeEventLocked() } break } e.mu.neigh.LinkAddr = linkAddr if !flags.Solicited { if e.mu.neigh.State != Stale { e.setStateLocked(Stale) e.dispatchChangeEventLocked() } else { // Notify the LinkAddr change, even though NUD state hasn't changed. e.dispatchChangeEventLocked() } break } } if flags.Solicited && (flags.Override || !isLinkAddrDifferent) { wasReachable := e.mu.neigh.State == Reachable // Set state to Reachable again to refresh timers. e.setStateLocked(Reachable) e.notifyCompletionLocked(nil) if !wasReachable { e.dispatchChangeEventLocked() } } if e.mu.isRouter && !flags.IsRouter && header.IsV6UnicastAddress(e.mu.neigh.Addr) { // "In those cases where the IsRouter flag changes from TRUE to FALSE as // a result of this update, the node MUST remove that router from the // Default Router List and update the Destination Cache entries for all // destinations using that neighbor as a router as specified in Section // 7.3.3. This is needed to detect when a node that is used as a router // stops forwarding packets due to being configured as a host." // - RFC 4861 section 7.2.5 // // TODO(gvisor.dev/issue/4085): Remove the special casing we do for IPv6 // here. ep, ok := e.cache.nic.networkEndpoints[header.IPv6ProtocolNumber] if !ok { panic(fmt.Sprintf("have a neighbor entry for an IPv6 router but no IPv6 network endpoint")) } if ndpEP, ok := ep.(NDPEndpoint); ok { ndpEP.InvalidateDefaultRouter(e.mu.neigh.Addr) } } e.mu.isRouter = flags.IsRouter case Unknown, Unreachable, Static: // Do nothing default: panic(fmt.Sprintf("Invalid cache entry state: %s", e.mu.neigh.State)) } } // handleUpperLevelConfirmationLocked processes an incoming upper-level protocol // (e.g. TCP acknowledgements) reachability confirmation. // // Precondition: e.mu MUST be locked. func (e *neighborEntry) handleUpperLevelConfirmationLocked() { switch e.mu.neigh.State { case Stale, Delay, Probe: e.setStateLocked(Reachable) e.dispatchChangeEventLocked() case Reachable: // Avoid setStateLocked; Timer.Reset is cheaper. e.mu.timer.timer.Reset(e.nudState.ReachableTime()) case Unknown, Incomplete, Unreachable, Static: // Do nothing default: panic(fmt.Sprintf("Invalid cache entry state: %s", e.mu.neigh.State)) } }
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package deployment import ( "fmt" "strconv" "time" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/extensions" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/integer" intstrutil "k8s.io/kubernetes/pkg/util/intstr" labelsutil "k8s.io/kubernetes/pkg/util/labels" podutil "k8s.io/kubernetes/pkg/util/pod" "k8s.io/kubernetes/pkg/util/wait" ) const ( // The revision annotation of a deployment's replica sets which records its rollout sequence RevisionAnnotation = "deployment.kubernetes.io/revision" // Here are the possible rollback event reasons RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound" RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged" RollbackDone = "DeploymentRollback" ) // GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. func GetOldReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { return GetOldReplicaSetsFromLists(deployment, c, func(namespace string, options api.ListOptions) (*api.PodList, error) { return c.Core().Pods(namespace).List(options) }, func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) { rsList, err := c.Extensions().ReplicaSets(namespace).List(options) return rsList.Items, err }) } // TODO: switch this to full namespacers type rsListFunc func(string, api.ListOptions) ([]extensions.ReplicaSet, error) type podListFunc func(string, api.ListOptions) (*api.PodList, error) // GetOldReplicaSetsFromLists returns two sets of old replica sets targeted by the given Deployment; get PodList and ReplicaSetList with input functions. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. func GetOldReplicaSetsFromLists(deployment *extensions.Deployment, c clientset.Interface, getPodList podListFunc, getRSList rsListFunc) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { // Find all pods whose labels match deployment.Spec.Selector, and corresponding replica sets for pods in podList. // All pods and replica sets are labeled with pod-template-hash to prevent overlapping // TODO: Right now we list all replica sets and then filter. We should add an API for this. oldRSs := map[string]extensions.ReplicaSet{} allOldRSs := map[string]extensions.ReplicaSet{} rsList, podList, err := rsAndPodsWithHashKeySynced(deployment, c, getRSList, getPodList) if err != nil { return nil, nil, fmt.Errorf("error labeling replica sets and pods with pod-template-hash: %v", err) } newRSTemplate := GetNewReplicaSetTemplate(deployment) for _, pod := range podList.Items { podLabelsSelector := labels.Set(pod.ObjectMeta.Labels) for _, rs := range rsList { rsLabelsSelector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) if err != nil { return nil, nil, fmt.Errorf("invalid label selector: %v", err) } // Filter out replica set that has the same pod template spec as the deployment - that is the new replica set. if api.Semantic.DeepEqual(rs.Spec.Template, &newRSTemplate) { continue } allOldRSs[rs.ObjectMeta.Name] = rs if rsLabelsSelector.Matches(podLabelsSelector) { oldRSs[rs.ObjectMeta.Name] = rs } } } requiredRSs := []*extensions.ReplicaSet{} for key := range oldRSs { value := oldRSs[key] requiredRSs = append(requiredRSs, &value) } allRSs := []*extensions.ReplicaSet{} for key := range allOldRSs { value := allOldRSs[key] allRSs = append(allRSs, &value) } return requiredRSs, allRSs, nil } // GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. // Returns nil if the new replica set doesn't exist yet. func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface) (*extensions.ReplicaSet, error) { return GetNewReplicaSetFromList(deployment, c, func(namespace string, options api.ListOptions) (*api.PodList, error) { return c.Core().Pods(namespace).List(options) }, func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) { rsList, err := c.Extensions().ReplicaSets(namespace).List(options) return rsList.Items, err }) } // GetNewReplicaSetFromList returns a replica set that matches the intent of the given deployment; get ReplicaSetList with the input function. // Returns nil if the new replica set doesn't exist yet. func GetNewReplicaSetFromList(deployment *extensions.Deployment, c clientset.Interface, getPodList podListFunc, getRSList rsListFunc) (*extensions.ReplicaSet, error) { rsList, _, err := rsAndPodsWithHashKeySynced(deployment, c, getRSList, getPodList) if err != nil { return nil, fmt.Errorf("error listing ReplicaSets: %v", err) } newRSTemplate := GetNewReplicaSetTemplate(deployment) for i := range rsList { if api.Semantic.DeepEqual(rsList[i].Spec.Template, &newRSTemplate) { // This is the new ReplicaSet. return &rsList[i], nil } } // new ReplicaSet does not exist. return nil, nil } // rsAndPodsWithHashKeySynced returns the RSs and pods the given deployment targets, with pod-template-hash information synced. func rsAndPodsWithHashKeySynced(deployment *extensions.Deployment, c clientset.Interface, getRSList rsListFunc, getPodList podListFunc) ([]extensions.ReplicaSet, *api.PodList, error) { namespace := deployment.Namespace selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) if err != nil { return nil, nil, err } options := api.ListOptions{LabelSelector: selector} rsList, err := getRSList(namespace, options) if err != nil { return nil, nil, err } syncedRSList := []extensions.ReplicaSet{} for _, rs := range rsList { // Add pod-template-hash information if it's not in the RS. // Otherwise, new RS produced by Deployment will overlap with pre-existing ones // that aren't constrained by the pod-template-hash. syncedRS, err := addHashKeyToRSAndPods(deployment, c, rs, getPodList) if err != nil { return nil, nil, err } syncedRSList = append(syncedRSList, *syncedRS) } syncedPodList, err := getPodList(namespace, options) if err != nil { return nil, nil, err } return syncedRSList, syncedPodList, nil } // addHashKeyToRSAndPods adds pod-template-hash information to the given rs, if it's not already there, with the following steps: // 1. Add hash label to the rs's pod template, and make sure the controller sees this update so that no orphaned pods will be created // 2. Add hash label to all pods this rs owns // 3. Add hash label to the rs's label and selector func addHashKeyToRSAndPods(deployment *extensions.Deployment, c clientset.Interface, rs extensions.ReplicaSet, getPodList podListFunc) (updatedRS *extensions.ReplicaSet, err error) { updatedRS = &rs // If the rs already has the new hash label in its selector, it's done syncing if labelsutil.SelectorHasLabel(rs.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { return } namespace := deployment.Namespace meta := rs.Spec.Template.ObjectMeta meta.Labels = labelsutil.CloneAndRemoveLabel(meta.Labels, extensions.DefaultDeploymentUniqueLabelKey) hash := fmt.Sprintf("%d", podutil.GetPodTemplateSpecHash(api.PodTemplateSpec{ ObjectMeta: meta, Spec: rs.Spec.Template.Spec, })) // 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label. if len(updatedRS.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 { updatedRS, err = updateRSWithRetries(c.Extensions().ReplicaSets(namespace), updatedRS, func(updated *extensions.ReplicaSet) { updated.Spec.Template.Labels = labelsutil.AddLabel(updated.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) }) if err != nil { return nil, fmt.Errorf("error updating rs %s pod template label with template hash: %v", updatedRS.Name, err) } // Make sure rs pod template is updated so that it won't create pods without the new label (orphaned pods). if updatedRS.Generation > updatedRS.Status.ObservedGeneration { if err = waitForReplicaSetUpdated(c, updatedRS.Generation, namespace, updatedRS.Name); err != nil { return nil, fmt.Errorf("error waiting for rs %s generation %d observed by controller: %v", updatedRS.Name, updatedRS.Generation, err) } } glog.V(4).Infof("Observed the update of rs %s's pod template with hash %s.", rs.Name, hash) } // 2. Update all pods managed by the rs to have the new hash label, so they will be correctly adopted. selector, err := unversioned.LabelSelectorAsSelector(updatedRS.Spec.Selector) if err != nil { return nil, err } options := api.ListOptions{LabelSelector: selector} podList, err := getPodList(namespace, options) if err != nil { return nil, err } if err = labelPodsWithHash(podList, c, namespace, hash); err != nil { return nil, err } glog.V(4).Infof("Labeled rs %s's pods with hash %s.", rs.Name, hash) // 3. Update rs label and selector to include the new hash label // Copy the old selector, so that we can scrub out any orphaned pods if updatedRS, err = updateRSWithRetries(c.Extensions().ReplicaSets(namespace), updatedRS, func(updated *extensions.ReplicaSet) { updated.Labels = labelsutil.AddLabel(updated.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) updated.Spec.Selector = labelsutil.AddLabelToSelector(updated.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, hash) }); err != nil { return nil, fmt.Errorf("error updating rs %s label and selector with template hash: %v", updatedRS.Name, err) } glog.V(4).Infof("Updated rs %s's selector and label with hash %s.", rs.Name, hash) // TODO: look for orphaned pods and label them in the background somewhere else periodically return updatedRS, nil } func waitForReplicaSetUpdated(c clientset.Interface, desiredGeneration int64, namespace, name string) error { return wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { rs, err := c.Extensions().ReplicaSets(namespace).Get(name) if err != nil { return false, err } return rs.Status.ObservedGeneration >= desiredGeneration, nil }) } // labelPodsWithHash labels all pods in the given podList with the new hash label. func labelPodsWithHash(podList *api.PodList, c clientset.Interface, namespace, hash string) error { for _, pod := range podList.Items { // Only label the pod that doesn't already have the new hash if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash { if _, err := updatePodWithRetries(c.Core().Pods(namespace), &pod, func(updated *api.Pod) { pod.Labels = labelsutil.AddLabel(pod.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) }); err != nil { return err } glog.V(4).Infof("Labeled pod %s with hash %s.", pod.Name, hash) } } return nil } // TODO: use client library instead when it starts to support update retries // see https://github.com/kubernetes/kubernetes/issues/21479 type updateRSFunc func(rs *extensions.ReplicaSet) func updateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rs *extensions.ReplicaSet, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) { var err error oldRs := rs err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) if rs, err = rsClient.Update(rs); err == nil { // rs contains the latest controller post update return true, nil } // Update the controller with the latest resource version, if the update failed we // can't trust rs so use oldRs.Name. if rs, err = rsClient.Get(oldRs.Name); err != nil { // The Get failed: Value in rs cannot be trusted. rs = oldRs } // The Get passed: rs contains the latest controller, expect a poll for the update. return false, nil }) // If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned // controller contains the applied update. return rs, err } type updatePodFunc func(pod *api.Pod) func updatePodWithRetries(podClient unversionedcore.PodInterface, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, error) { var err error oldPod := pod err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { // Apply the update, then attempt to push it to the apiserver. applyUpdate(pod) if pod, err = podClient.Update(pod); err == nil { return true, nil } if pod, err = podClient.Get(oldPod.Name); err != nil { pod = oldPod } return false, nil }) return pod, err } // Returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet. func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplateSpec { // newRS will have the same template as in deployment spec, plus a unique label in some cases. newRSTemplate := api.PodTemplateSpec{ ObjectMeta: deployment.Spec.Template.ObjectMeta, Spec: deployment.Spec.Template.Spec, } newRSTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel( deployment.Spec.Template.ObjectMeta.Labels, extensions.DefaultDeploymentUniqueLabelKey, podutil.GetPodTemplateSpecHash(newRSTemplate)) return newRSTemplate } // SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment. func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.PodTemplateSpec) *extensions.Deployment { deployment.Spec.Template.ObjectMeta = template.ObjectMeta deployment.Spec.Template.Spec = template.Spec deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel( deployment.Spec.Template.ObjectMeta.Labels, extensions.DefaultDeploymentUniqueLabelKey) return deployment } // Returns the sum of Replicas of the given replica sets. func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int { totalReplicaCount := 0 for _, rs := range replicaSets { if rs != nil { totalReplicaCount += rs.Spec.Replicas } } return totalReplicaCount } // GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets. func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int { totalReplicaCount := 0 for _, rs := range replicaSets { if rs != nil { totalReplicaCount += rs.Status.Replicas } } return totalReplicaCount } // Returns the number of available pods corresponding to the given replica sets. func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int) (int, error) { allPods, err := GetPodsForReplicaSets(c, rss) if err != nil { return 0, err } return getReadyPodsCount(allPods, minReadySeconds), nil } func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int { readyPodCount := 0 for _, pod := range pods { if IsPodAvailable(&pod, minReadySeconds) { readyPodCount++ } } return readyPodCount } func IsPodAvailable(pod *api.Pod, minReadySeconds int) bool { // Check if we've passed minReadySeconds since LastTransitionTime // If so, this pod is ready for _, c := range pod.Status.Conditions { // we only care about pod ready conditions if c.Type == api.PodReady && c.Status == api.ConditionTrue { // 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is ready): // 1. minReadySeconds <= 0 // 2. LastTransitionTime (is set) + minReadySeconds (>0) < current time minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second if minReadySeconds <= 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(time.Now()) { return true } } } return false } func GetPodsForReplicaSets(c clientset.Interface, replicaSets []*extensions.ReplicaSet) ([]api.Pod, error) { allPods := map[string]api.Pod{} for _, rs := range replicaSets { if rs != nil { selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } options := api.ListOptions{LabelSelector: selector} podList, err := c.Core().Pods(rs.ObjectMeta.Namespace).List(options) if err != nil { return nil, fmt.Errorf("error listing pods: %v", err) } for _, pod := range podList.Items { allPods[pod.Name] = pod } } } requiredPods := []api.Pod{} for _, pod := range allPods { requiredPods = append(requiredPods, pod) } return requiredPods, nil } // Revision returns the revision number of the input replica set func Revision(rs *extensions.ReplicaSet) (int64, error) { v, ok := rs.Annotations[RevisionAnnotation] if !ok { return 0, nil } return strconv.ParseInt(v, 10, 64) } func IsRollingUpdate(deployment *extensions.Deployment) bool { return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType } // NewRSNewReplicas calculates the number of replicas a deployment's new RS should have. // When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. // 1) The new RS is saturated: newRS's replicas == deployment's replicas // 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int, error) { switch deployment.Spec.Strategy.Type { case extensions.RollingUpdateDeploymentStrategyType: // Check if we can scale up. maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Replicas, true) if err != nil { return 0, err } // Find the total number of pods currentPodCount := GetReplicaCountForReplicaSets(allRSs) maxTotalPods := deployment.Spec.Replicas + maxSurge if currentPodCount >= maxTotalPods { // Cannot scale up. return newRS.Spec.Replicas, nil } // Scale up. scaleUpCount := maxTotalPods - currentPodCount // Do not exceed the number of desired replicas. scaleUpCount = integer.IntMin(scaleUpCount, deployment.Spec.Replicas-newRS.Spec.Replicas) return newRS.Spec.Replicas + scaleUpCount, nil case extensions.RecreateDeploymentStrategyType: return deployment.Spec.Replicas, nil default: return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) } } // Polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. // Returns error if polling timesout. func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error { // TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface. return wait.Poll(interval, timeout, func() (bool, error) { deployment, err := getDeploymentFunc() if err != nil { return false, err } return deployment.Status.ObservedGeneration >= desiredGeneration, nil }) } Improving deployment e2e failure error messages /* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package deployment import ( "fmt" "strconv" "time" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/extensions" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/integer" intstrutil "k8s.io/kubernetes/pkg/util/intstr" labelsutil "k8s.io/kubernetes/pkg/util/labels" podutil "k8s.io/kubernetes/pkg/util/pod" "k8s.io/kubernetes/pkg/util/wait" ) const ( // The revision annotation of a deployment's replica sets which records its rollout sequence RevisionAnnotation = "deployment.kubernetes.io/revision" // Here are the possible rollback event reasons RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound" RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged" RollbackDone = "DeploymentRollback" ) // GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. func GetOldReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { return GetOldReplicaSetsFromLists(deployment, c, func(namespace string, options api.ListOptions) (*api.PodList, error) { return c.Core().Pods(namespace).List(options) }, func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) { rsList, err := c.Extensions().ReplicaSets(namespace).List(options) return rsList.Items, err }) } // TODO: switch this to full namespacers type rsListFunc func(string, api.ListOptions) ([]extensions.ReplicaSet, error) type podListFunc func(string, api.ListOptions) (*api.PodList, error) // GetOldReplicaSetsFromLists returns two sets of old replica sets targeted by the given Deployment; get PodList and ReplicaSetList with input functions. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. func GetOldReplicaSetsFromLists(deployment *extensions.Deployment, c clientset.Interface, getPodList podListFunc, getRSList rsListFunc) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { // Find all pods whose labels match deployment.Spec.Selector, and corresponding replica sets for pods in podList. // All pods and replica sets are labeled with pod-template-hash to prevent overlapping // TODO: Right now we list all replica sets and then filter. We should add an API for this. oldRSs := map[string]extensions.ReplicaSet{} allOldRSs := map[string]extensions.ReplicaSet{} rsList, podList, err := rsAndPodsWithHashKeySynced(deployment, c, getRSList, getPodList) if err != nil { return nil, nil, fmt.Errorf("error labeling replica sets and pods with pod-template-hash: %v", err) } newRSTemplate := GetNewReplicaSetTemplate(deployment) for _, pod := range podList.Items { podLabelsSelector := labels.Set(pod.ObjectMeta.Labels) for _, rs := range rsList { rsLabelsSelector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) if err != nil { return nil, nil, fmt.Errorf("invalid label selector: %v", err) } // Filter out replica set that has the same pod template spec as the deployment - that is the new replica set. if api.Semantic.DeepEqual(rs.Spec.Template, &newRSTemplate) { continue } allOldRSs[rs.ObjectMeta.Name] = rs if rsLabelsSelector.Matches(podLabelsSelector) { oldRSs[rs.ObjectMeta.Name] = rs } } } requiredRSs := []*extensions.ReplicaSet{} for key := range oldRSs { value := oldRSs[key] requiredRSs = append(requiredRSs, &value) } allRSs := []*extensions.ReplicaSet{} for key := range allOldRSs { value := allOldRSs[key] allRSs = append(allRSs, &value) } return requiredRSs, allRSs, nil } // GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. // Returns nil if the new replica set doesn't exist yet. func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface) (*extensions.ReplicaSet, error) { return GetNewReplicaSetFromList(deployment, c, func(namespace string, options api.ListOptions) (*api.PodList, error) { return c.Core().Pods(namespace).List(options) }, func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) { rsList, err := c.Extensions().ReplicaSets(namespace).List(options) return rsList.Items, err }) } // GetNewReplicaSetFromList returns a replica set that matches the intent of the given deployment; get ReplicaSetList with the input function. // Returns nil if the new replica set doesn't exist yet. func GetNewReplicaSetFromList(deployment *extensions.Deployment, c clientset.Interface, getPodList podListFunc, getRSList rsListFunc) (*extensions.ReplicaSet, error) { rsList, _, err := rsAndPodsWithHashKeySynced(deployment, c, getRSList, getPodList) if err != nil { return nil, fmt.Errorf("error listing ReplicaSets: %v", err) } newRSTemplate := GetNewReplicaSetTemplate(deployment) for i := range rsList { if api.Semantic.DeepEqual(rsList[i].Spec.Template, &newRSTemplate) { // This is the new ReplicaSet. return &rsList[i], nil } } // new ReplicaSet does not exist. return nil, nil } // rsAndPodsWithHashKeySynced returns the RSs and pods the given deployment targets, with pod-template-hash information synced. func rsAndPodsWithHashKeySynced(deployment *extensions.Deployment, c clientset.Interface, getRSList rsListFunc, getPodList podListFunc) ([]extensions.ReplicaSet, *api.PodList, error) { namespace := deployment.Namespace selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) if err != nil { return nil, nil, err } options := api.ListOptions{LabelSelector: selector} rsList, err := getRSList(namespace, options) if err != nil { return nil, nil, err } syncedRSList := []extensions.ReplicaSet{} for _, rs := range rsList { // Add pod-template-hash information if it's not in the RS. // Otherwise, new RS produced by Deployment will overlap with pre-existing ones // that aren't constrained by the pod-template-hash. syncedRS, err := addHashKeyToRSAndPods(deployment, c, rs, getPodList) if err != nil { return nil, nil, err } syncedRSList = append(syncedRSList, *syncedRS) } syncedPodList, err := getPodList(namespace, options) if err != nil { return nil, nil, err } return syncedRSList, syncedPodList, nil } // addHashKeyToRSAndPods adds pod-template-hash information to the given rs, if it's not already there, with the following steps: // 1. Add hash label to the rs's pod template, and make sure the controller sees this update so that no orphaned pods will be created // 2. Add hash label to all pods this rs owns // 3. Add hash label to the rs's label and selector func addHashKeyToRSAndPods(deployment *extensions.Deployment, c clientset.Interface, rs extensions.ReplicaSet, getPodList podListFunc) (updatedRS *extensions.ReplicaSet, err error) { updatedRS = &rs // If the rs already has the new hash label in its selector, it's done syncing if labelsutil.SelectorHasLabel(rs.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { return } namespace := deployment.Namespace meta := rs.Spec.Template.ObjectMeta meta.Labels = labelsutil.CloneAndRemoveLabel(meta.Labels, extensions.DefaultDeploymentUniqueLabelKey) hash := fmt.Sprintf("%d", podutil.GetPodTemplateSpecHash(api.PodTemplateSpec{ ObjectMeta: meta, Spec: rs.Spec.Template.Spec, })) // 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label. if len(updatedRS.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 { updatedRS, err = updateRSWithRetries(c.Extensions().ReplicaSets(namespace), updatedRS, func(updated *extensions.ReplicaSet) { updated.Spec.Template.Labels = labelsutil.AddLabel(updated.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) }) if err != nil { return nil, fmt.Errorf("error updating rs %s pod template label with template hash: %v", updatedRS.Name, err) } // Make sure rs pod template is updated so that it won't create pods without the new label (orphaned pods). if updatedRS.Generation > updatedRS.Status.ObservedGeneration { if err = waitForReplicaSetUpdated(c, updatedRS.Generation, namespace, updatedRS.Name); err != nil { return nil, fmt.Errorf("error waiting for rs %s generation %d observed by controller: %v", updatedRS.Name, updatedRS.Generation, err) } } glog.V(4).Infof("Observed the update of rs %s's pod template with hash %s.", rs.Name, hash) } // 2. Update all pods managed by the rs to have the new hash label, so they will be correctly adopted. selector, err := unversioned.LabelSelectorAsSelector(updatedRS.Spec.Selector) if err != nil { return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err) } options := api.ListOptions{LabelSelector: selector} podList, err := getPodList(namespace, options) if err != nil { return nil, fmt.Errorf("error in getting pod list for namespace %s and list options %+v: %s", namespace, options, err) } if err = labelPodsWithHash(podList, c, namespace, hash); err != nil { return nil, fmt.Errorf("error in adding template hash label %s to pods %+v: %s", hash, podList, err) } glog.V(4).Infof("Labeled rs %s's pods with hash %s.", rs.Name, hash) // 3. Update rs label and selector to include the new hash label // Copy the old selector, so that we can scrub out any orphaned pods if updatedRS, err = updateRSWithRetries(c.Extensions().ReplicaSets(namespace), updatedRS, func(updated *extensions.ReplicaSet) { updated.Labels = labelsutil.AddLabel(updated.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) updated.Spec.Selector = labelsutil.AddLabelToSelector(updated.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, hash) }); err != nil { return nil, fmt.Errorf("error updating rs %s label and selector with template hash: %v", updatedRS.Name, err) } glog.V(4).Infof("Updated rs %s's selector and label with hash %s.", rs.Name, hash) // TODO: look for orphaned pods and label them in the background somewhere else periodically return updatedRS, nil } func waitForReplicaSetUpdated(c clientset.Interface, desiredGeneration int64, namespace, name string) error { return wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { rs, err := c.Extensions().ReplicaSets(namespace).Get(name) if err != nil { return false, err } return rs.Status.ObservedGeneration >= desiredGeneration, nil }) } // labelPodsWithHash labels all pods in the given podList with the new hash label. func labelPodsWithHash(podList *api.PodList, c clientset.Interface, namespace, hash string) error { for _, pod := range podList.Items { // Only label the pod that doesn't already have the new hash if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash { if _, err := updatePodWithRetries(c.Core().Pods(namespace), &pod, func(updated *api.Pod) { pod.Labels = labelsutil.AddLabel(pod.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) }); err != nil { return fmt.Errorf("error in adding template hash label %s to pod %+v: %s", hash, pod, err) } glog.V(4).Infof("Labeled pod %s with hash %s.", pod.Name, hash) } } return nil } // TODO: use client library instead when it starts to support update retries // see https://github.com/kubernetes/kubernetes/issues/21479 type updateRSFunc func(rs *extensions.ReplicaSet) func updateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rs *extensions.ReplicaSet, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) { var err error oldRs := rs err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) if rs, err = rsClient.Update(rs); err == nil { // rs contains the latest controller post update return true, nil } // Update the controller with the latest resource version, if the update failed we // can't trust rs so use oldRs.Name. if rs, err = rsClient.Get(oldRs.Name); err != nil { // The Get failed: Value in rs cannot be trusted. rs = oldRs } // The Get passed: rs contains the latest controller, expect a poll for the update. return false, nil }) // If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned // controller contains the applied update. return rs, err } type updatePodFunc func(pod *api.Pod) func updatePodWithRetries(podClient unversionedcore.PodInterface, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, error) { var err error oldPod := pod err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { // Apply the update, then attempt to push it to the apiserver. applyUpdate(pod) if pod, err = podClient.Update(pod); err == nil { return true, nil } if pod, err = podClient.Get(oldPod.Name); err != nil { pod = oldPod } return false, nil }) if err == wait.ErrWaitTimeout { return nil, fmt.Errorf("timed out trying to update pod: %+v", oldPod) } return pod, err } // Returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet. func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplateSpec { // newRS will have the same template as in deployment spec, plus a unique label in some cases. newRSTemplate := api.PodTemplateSpec{ ObjectMeta: deployment.Spec.Template.ObjectMeta, Spec: deployment.Spec.Template.Spec, } newRSTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel( deployment.Spec.Template.ObjectMeta.Labels, extensions.DefaultDeploymentUniqueLabelKey, podutil.GetPodTemplateSpecHash(newRSTemplate)) return newRSTemplate } // SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment. func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.PodTemplateSpec) *extensions.Deployment { deployment.Spec.Template.ObjectMeta = template.ObjectMeta deployment.Spec.Template.Spec = template.Spec deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel( deployment.Spec.Template.ObjectMeta.Labels, extensions.DefaultDeploymentUniqueLabelKey) return deployment } // Returns the sum of Replicas of the given replica sets. func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int { totalReplicaCount := 0 for _, rs := range replicaSets { if rs != nil { totalReplicaCount += rs.Spec.Replicas } } return totalReplicaCount } // GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets. func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int { totalReplicaCount := 0 for _, rs := range replicaSets { if rs != nil { totalReplicaCount += rs.Status.Replicas } } return totalReplicaCount } // Returns the number of available pods corresponding to the given replica sets. func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int) (int, error) { allPods, err := GetPodsForReplicaSets(c, rss) if err != nil { return 0, err } return getReadyPodsCount(allPods, minReadySeconds), nil } func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int { readyPodCount := 0 for _, pod := range pods { if IsPodAvailable(&pod, minReadySeconds) { readyPodCount++ } } return readyPodCount } func IsPodAvailable(pod *api.Pod, minReadySeconds int) bool { // Check if we've passed minReadySeconds since LastTransitionTime // If so, this pod is ready for _, c := range pod.Status.Conditions { // we only care about pod ready conditions if c.Type == api.PodReady && c.Status == api.ConditionTrue { // 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is ready): // 1. minReadySeconds <= 0 // 2. LastTransitionTime (is set) + minReadySeconds (>0) < current time minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second if minReadySeconds <= 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(time.Now()) { return true } } } return false } func GetPodsForReplicaSets(c clientset.Interface, replicaSets []*extensions.ReplicaSet) ([]api.Pod, error) { allPods := map[string]api.Pod{} for _, rs := range replicaSets { if rs != nil { selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } options := api.ListOptions{LabelSelector: selector} podList, err := c.Core().Pods(rs.ObjectMeta.Namespace).List(options) if err != nil { return nil, fmt.Errorf("error listing pods: %v", err) } for _, pod := range podList.Items { allPods[pod.Name] = pod } } } requiredPods := []api.Pod{} for _, pod := range allPods { requiredPods = append(requiredPods, pod) } return requiredPods, nil } // Revision returns the revision number of the input replica set func Revision(rs *extensions.ReplicaSet) (int64, error) { v, ok := rs.Annotations[RevisionAnnotation] if !ok { return 0, nil } return strconv.ParseInt(v, 10, 64) } func IsRollingUpdate(deployment *extensions.Deployment) bool { return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType } // NewRSNewReplicas calculates the number of replicas a deployment's new RS should have. // When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. // 1) The new RS is saturated: newRS's replicas == deployment's replicas // 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int, error) { switch deployment.Spec.Strategy.Type { case extensions.RollingUpdateDeploymentStrategyType: // Check if we can scale up. maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Replicas, true) if err != nil { return 0, err } // Find the total number of pods currentPodCount := GetReplicaCountForReplicaSets(allRSs) maxTotalPods := deployment.Spec.Replicas + maxSurge if currentPodCount >= maxTotalPods { // Cannot scale up. return newRS.Spec.Replicas, nil } // Scale up. scaleUpCount := maxTotalPods - currentPodCount // Do not exceed the number of desired replicas. scaleUpCount = integer.IntMin(scaleUpCount, deployment.Spec.Replicas-newRS.Spec.Replicas) return newRS.Spec.Replicas + scaleUpCount, nil case extensions.RecreateDeploymentStrategyType: return deployment.Spec.Replicas, nil default: return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) } } // Polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. // Returns error if polling timesout. func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error { // TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface. return wait.Poll(interval, timeout, func() (bool, error) { deployment, err := getDeploymentFunc() if err != nil { return false, err } return deployment.Status.ObservedGeneration >= desiredGeneration, nil }) }
package va import ( "crypto/sha256" "crypto/subtle" "crypto/tls" "crypto/x509" "encoding/asn1" "encoding/base64" "fmt" "io/ioutil" "log" "math/rand" "net" "net/http" "net/url" "os" "runtime" "strconv" "strings" "time" "github.com/jmhodges/clock" "github.com/letsencrypt/pebble/acme" "github.com/letsencrypt/pebble/core" ) const ( whitespaceCutset = "\n\r\t" userAgentBase = "LetsEncrypt-Pebble-VA" // How long do valid authorizations last before expiring? validAuthzExpire = time.Hour // How many vaTasks can be in the channel before the WFE blocks on adding // another? taskQueueSize = 6 // How many concurrent validations are performed? concurrentValidations = 3 // noSleepEnvVar defines the environment variable name used to signal that the // VA should *not* sleep between validation attempts. Set this to 1 when you // invoke Pebble if you wish validation to be done at full speed, e.g.: // PEBBLE_VA_NOSLEEP=1 pebble noSleepEnvVar = "PEBBLE_VA_NOSLEEP" // noValidateEnvVar defines the environment variable name used to signal that // the VA should *not* actually validate challenges. Set this to 1 when you // invoke Pebble if you wish validation to always succeed without actually // making any challenge requests, e.g.: // PEBBLE_VA_ALWAYS_VALID=1 pebble" noValidateEnvVar = "PEBBLE_VA_ALWAYS_VALID" ) var IdPeAcmeIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 30, 1} func userAgent() string { return fmt.Sprintf( "%s (%s; %s)", userAgentBase, runtime.GOOS, runtime.GOARCH) } // certNames collects up all of a certificate's subject names (Subject CN and // Subject Alternate Names) and reduces them to a comma joined string. func certNames(cert *x509.Certificate) string { var names []string if cert.Subject.CommonName != "" { names = append(names, cert.Subject.CommonName) } names = append(names, cert.DNSNames...) return strings.Join(names, ", ") } type vaTask struct { Identifier string Challenge *core.Challenge Account *core.Account } type VAImpl struct { log *log.Logger clk clock.Clock httpPort int tlsPort int tasks chan *vaTask sleep bool alwaysValid bool } func New( log *log.Logger, clk clock.Clock, httpPort, tlsPort int) *VAImpl { va := &VAImpl{ log: log, clk: clk, httpPort: httpPort, tlsPort: tlsPort, tasks: make(chan *vaTask, taskQueueSize), sleep: true, } // Read the PEBBLE_VA_NOSLEEP environment variable string noSleep := os.Getenv(noSleepEnvVar) // If it is set to something true-like, then the VA shouldn't sleep switch noSleep { case "1", "true", "True", "TRUE": va.sleep = false va.log.Printf("Disabling random VA sleeps") } noValidate := os.Getenv(noValidateEnvVar) switch noValidate { case "1", "true", "True", "TRUE": va.alwaysValid = true va.log.Printf("Disabling VA challenge requests. VA always returns valid") } go va.processTasks() return va } func (va VAImpl) ValidateChallenge(ident string, chal *core.Challenge, acct *core.Account) { task := &vaTask{ Identifier: ident, Challenge: chal, Account: acct, } // Submit the task for validation va.tasks <- task } func (va VAImpl) processTasks() { for task := range va.tasks { go va.process(task) } } func (va VAImpl) firstError(results chan *core.ValidationRecord) *acme.ProblemDetails { for i := 0; i < concurrentValidations; i++ { result := <-results if result.Error != nil { return result.Error } } return nil } // setAuthzValid updates an authorization and an associated challenge to be // status valid. The authorization expiry is updated to now plus the configured // `validAuthzExpire` duration. func (va VAImpl) setAuthzValid(authz *core.Authorization, chal *core.Challenge) { authz.Lock() defer authz.Unlock() // Update the authz expiry for the new validity period now := va.clk.Now().UTC() authz.ExpiresDate = now.Add(validAuthzExpire) authz.Expires = authz.ExpiresDate.Format(time.RFC3339) // Update the authz status authz.Status = acme.StatusValid chal.Lock() defer chal.Unlock() // Update the challenge status chal.Status = acme.StatusValid } // setOrderError updates an order with an error from an authorization // validation. func (va VAImpl) setOrderError(order *core.Order, err *acme.ProblemDetails) { order.Lock() defer order.Unlock() order.Error = err } // setAuthzInvalid updates an authorization and an associated challenge to be // status invalid. The challenge's error is set to the provided problem and both // the challenge and the authorization have their status updated to invalid. func (va VAImpl) setAuthzInvalid( authz *core.Authorization, chal *core.Challenge, err *acme.ProblemDetails) { authz.Lock() defer authz.Unlock() // Update the authz status authz.Status = acme.StatusInvalid // Lock the challenge for update chal.Lock() defer chal.Unlock() // Update the challenge error field chal.Error = err // Update the challenge status chal.Status = acme.StatusInvalid } func (va VAImpl) process(task *vaTask) { va.log.Printf("Pulled a task from the Tasks queue: %#v", task) va.log.Printf("Starting %d validations.", concurrentValidations) chal := task.Challenge chal.Lock() // Update the validated date for the challenge now := va.clk.Now().UTC() chal.ValidatedDate = now chal.Validated = chal.ValidatedDate.Format(time.RFC3339) authz := chal.Authz chal.Unlock() results := make(chan *core.ValidationRecord, concurrentValidations) // Start a number of go routines to perform concurrent validations for i := 0; i < concurrentValidations; i++ { go va.performValidation(task, results) } err := va.firstError(results) // If one of the results was an error, the challenge fails if err != nil { va.setAuthzInvalid(authz, chal, err) va.log.Printf("authz %s set INVALID by completed challenge %s", authz.ID, chal.ID) va.setOrderError(authz.Order, err) va.log.Printf("order %s set INVALID by invalid authz %s", authz.Order.ID, authz.ID) return } // If there was no error, then the challenge succeeded and the authz is valid va.setAuthzValid(authz, chal) va.log.Printf("authz %s set VALID by completed challenge %s", authz.ID, chal.ID) } func (va VAImpl) performValidation(task *vaTask, results chan<- *core.ValidationRecord) { if va.sleep { // Sleep for a random amount of time between 1-15s len := time.Duration(rand.Intn(15)) va.log.Printf("Sleeping for %s seconds before validating", time.Second*len) va.clk.Sleep(time.Second * len) } // If `alwaysValid` is true then return a validation record immediately // without actually making any validation requests. if va.alwaysValid { va.log.Printf("%s is enabled. Skipping real validation of challenge %s", noValidateEnvVar, task.Challenge.ID) // NOTE(@cpu): The validation record's URL will not match the value it would // have received in a real validation request. For simplicity when faking // validation we always set it to the task identifier regardless of challenge // type. For example comparison, a real DNS-01 validation would set // the URL to the `_acme-challenge` subdomain. results <- &core.ValidationRecord{ URL: task.Identifier, ValidatedAt: va.clk.Now(), } return } switch task.Challenge.Type { case acme.ChallengeHTTP01: results <- va.validateHTTP01(task) case acme.ChallengeTLSALPN01: results <- va.validateTLSALPN01(task) case acme.ChallengeDNS01: results <- va.validateDNS01(task) default: va.log.Printf("Error: performValidation(): Invalid challenge type: %q", task.Challenge.Type) } } func (va VAImpl) validateDNS01(task *vaTask) *core.ValidationRecord { const dns01Prefix = "_acme-challenge" challengeSubdomain := fmt.Sprintf("%s.%s", dns01Prefix, task.Identifier) result := &core.ValidationRecord{ URL: challengeSubdomain, ValidatedAt: va.clk.Now(), } txts, err := net.LookupTXT(challengeSubdomain) if err != nil { result.Error = acme.UnauthorizedProblem("Error retrieving TXT records for DNS challenge") return result } if len(txts) == 0 { msg := fmt.Sprintf("No TXT records found for DNS challenge") result.Error = acme.UnauthorizedProblem(msg) return result } task.Challenge.RLock() expectedKeyAuthorization := task.Challenge.ExpectedKeyAuthorization(task.Account.Key) h := sha256.Sum256([]byte(expectedKeyAuthorization)) task.Challenge.RUnlock() authorizedKeysDigest := base64.RawURLEncoding.EncodeToString(h[:]) for _, element := range txts { if subtle.ConstantTimeCompare([]byte(element), []byte(authorizedKeysDigest)) == 1 { return result } } msg := fmt.Sprintf("Correct value not found for DNS challenge") result.Error = acme.UnauthorizedProblem(msg) return result } func (va VAImpl) validateTLSALPN01(task *vaTask) *core.ValidationRecord { portString := strconv.Itoa(va.tlsPort) hostPort := net.JoinHostPort(task.Identifier, portString) result := &core.ValidationRecord{ URL: hostPort, ValidatedAt: va.clk.Now(), } cs, problem := va.fetchConnectionState(hostPort, &tls.Config{ ServerName: task.Identifier, NextProtos: []string{acme.ACMETLS1Protocol}, InsecureSkipVerify: true, }) if problem != nil { result.Error = problem return result } if !cs.NegotiatedProtocolIsMutual || cs.NegotiatedProtocol != acme.ACMETLS1Protocol { result.Error = acme.UnauthorizedProblem(fmt.Sprintf( "Cannot negotiate ALPN protocol %q for %s challenge", acme.ACMETLS1Protocol, acme.ChallengeTLSALPN01, )) return result } certs := cs.PeerCertificates if len(certs) == 0 { result.Error = acme.UnauthorizedProblem(fmt.Sprintf("No certs presented for %s challenge", acme.ChallengeTLSALPN01)) return result } leafCert := certs[0] // Verify SNI - certificate returned must be issued only for the domain we are verifying. if len(leafCert.DNSNames) != 1 || !strings.EqualFold(leafCert.DNSNames[0], task.Identifier) { names := certNames(leafCert) errText := fmt.Sprintf( "Incorrect validation certificate for %s challenge. "+ "Requested %s from %s. Received %d certificate(s), "+ "first certificate had names %q", acme.ChallengeTLSALPN01, task.Identifier, hostPort, len(certs), names) result.Error = acme.UnauthorizedProblem(errText) return result } // Verify key authorization in acmeValidation extension expectedKeyAuthorization := task.Challenge.ExpectedKeyAuthorization(task.Account.Key) h := sha256.Sum256([]byte(expectedKeyAuthorization)) for _, ext := range leafCert.Extensions { if IdPeAcmeIdentifierV1.Equal(ext.Id) && ext.Critical { if subtle.ConstantTimeCompare(h[:], ext.Value) == 1 { return result } errText := fmt.Sprintf("Incorrect validation certificate for %s challenge. "+ "Invalid acmeValidationV1 extension value.", acme.ChallengeTLSALPN01) result.Error = acme.UnauthorizedProblem(errText) return result } } errText := fmt.Sprintf( "Incorrect validation certificate for %s challenge. "+ "Missing acmeValidationV1 extension.", acme.ChallengeTLSALPN01) result.Error = acme.UnauthorizedProblem(errText) return result } func (va VAImpl) fetchConnectionState(hostPort string, config *tls.Config) (*tls.ConnectionState, *acme.ProblemDetails) { conn, err := tls.DialWithDialer(&net.Dialer{Timeout: time.Second * 5}, "tcp", hostPort, config) if err != nil { // TODO(@cpu): Return better err - see parseHTTPConnError from boulder return nil, acme.UnauthorizedProblem( fmt.Sprintf("Failed to connect to %s for the %s challenge", hostPort, acme.ChallengeTLSALPN01)) } // close errors are not important here defer func() { _ = conn.Close() }() cs := conn.ConnectionState() return &cs, nil } func (va VAImpl) validateHTTP01(task *vaTask) *core.ValidationRecord { body, url, err := va.fetchHTTP(task.Identifier, task.Challenge.Token) result := &core.ValidationRecord{ URL: url, ValidatedAt: va.clk.Now(), Error: err, } if result.Error != nil { return result } expectedKeyAuthorization := task.Challenge.ExpectedKeyAuthorization(task.Account.Key) // The server SHOULD ignore whitespace characters at the end of the body payload := strings.TrimRight(string(body), whitespaceCutset) if payload != expectedKeyAuthorization { result.Error = acme.UnauthorizedProblem( fmt.Sprintf("The key authorization file from the server did not match this challenge %q != %q", expectedKeyAuthorization, payload)) } return result } // NOTE(@cpu): fetchHTTP only fetches the ACME HTTP-01 challenge path for // a given challenge & identifier domain. It is not a challenge agnostic general // purpose HTTP function func (va VAImpl) fetchHTTP(identifier string, token string) ([]byte, string, *acme.ProblemDetails) { path := fmt.Sprintf("%s%s", acme.HTTP01BaseURL, token) url := &url.URL{ Scheme: "http", Host: fmt.Sprintf("%s:%d", identifier, va.httpPort), Path: path, } va.log.Printf("Attempting to validate w/ HTTP: %s\n", url) httpRequest, err := http.NewRequest("GET", url.String(), nil) if err != nil { return nil, url.String(), acme.MalformedProblem( fmt.Sprintf("Invalid URL %q\n", url.String())) } httpRequest.Header.Set("User-Agent", userAgent()) httpRequest.Header.Set("Accept", "*/*") transport := &http.Transport{ // We don't expect to make multiple requests to a client, so close // connection immediately. DisableKeepAlives: true, } client := &http.Client{ Transport: transport, Timeout: time.Second * 5, } resp, err := client.Do(httpRequest) if err != nil { return nil, url.String(), acme.ConnectionProblem(err.Error()) } // NOTE: This is *not* using a `io.LimitedReader` and isn't suitable for // production because a very large response will bog down the server. Don't // use Pebble anywhere that isn't a testing rig!!! body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, url.String(), acme.InternalErrorProblem(err.Error()) } err = resp.Body.Close() if err != nil { return nil, url.String(), acme.InternalErrorProblem(err.Error()) } if resp.StatusCode != 200 { return nil, url.String(), acme.UnauthorizedProblem( fmt.Sprintf("Non-200 status code from HTTP: %s returned %d", url.String(), resp.StatusCode)) } return body, url.String(), nil } Assume acmeValidation-v1 is wrapped OCTET STRING (#138) See also letsencrypt/boulder#3752. package va import ( "crypto/sha256" "crypto/subtle" "crypto/tls" "crypto/x509" "encoding/asn1" "encoding/base64" "fmt" "io/ioutil" "log" "math/rand" "net" "net/http" "net/url" "os" "runtime" "strconv" "strings" "time" "github.com/jmhodges/clock" "github.com/letsencrypt/pebble/acme" "github.com/letsencrypt/pebble/core" ) const ( whitespaceCutset = "\n\r\t" userAgentBase = "LetsEncrypt-Pebble-VA" // How long do valid authorizations last before expiring? validAuthzExpire = time.Hour // How many vaTasks can be in the channel before the WFE blocks on adding // another? taskQueueSize = 6 // How many concurrent validations are performed? concurrentValidations = 3 // noSleepEnvVar defines the environment variable name used to signal that the // VA should *not* sleep between validation attempts. Set this to 1 when you // invoke Pebble if you wish validation to be done at full speed, e.g.: // PEBBLE_VA_NOSLEEP=1 pebble noSleepEnvVar = "PEBBLE_VA_NOSLEEP" // noValidateEnvVar defines the environment variable name used to signal that // the VA should *not* actually validate challenges. Set this to 1 when you // invoke Pebble if you wish validation to always succeed without actually // making any challenge requests, e.g.: // PEBBLE_VA_ALWAYS_VALID=1 pebble" noValidateEnvVar = "PEBBLE_VA_ALWAYS_VALID" ) var IdPeAcmeIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 30, 1} func userAgent() string { return fmt.Sprintf( "%s (%s; %s)", userAgentBase, runtime.GOOS, runtime.GOARCH) } // certNames collects up all of a certificate's subject names (Subject CN and // Subject Alternate Names) and reduces them to a comma joined string. func certNames(cert *x509.Certificate) string { var names []string if cert.Subject.CommonName != "" { names = append(names, cert.Subject.CommonName) } names = append(names, cert.DNSNames...) return strings.Join(names, ", ") } type vaTask struct { Identifier string Challenge *core.Challenge Account *core.Account } type VAImpl struct { log *log.Logger clk clock.Clock httpPort int tlsPort int tasks chan *vaTask sleep bool alwaysValid bool } func New( log *log.Logger, clk clock.Clock, httpPort, tlsPort int) *VAImpl { va := &VAImpl{ log: log, clk: clk, httpPort: httpPort, tlsPort: tlsPort, tasks: make(chan *vaTask, taskQueueSize), sleep: true, } // Read the PEBBLE_VA_NOSLEEP environment variable string noSleep := os.Getenv(noSleepEnvVar) // If it is set to something true-like, then the VA shouldn't sleep switch noSleep { case "1", "true", "True", "TRUE": va.sleep = false va.log.Printf("Disabling random VA sleeps") } noValidate := os.Getenv(noValidateEnvVar) switch noValidate { case "1", "true", "True", "TRUE": va.alwaysValid = true va.log.Printf("Disabling VA challenge requests. VA always returns valid") } go va.processTasks() return va } func (va VAImpl) ValidateChallenge(ident string, chal *core.Challenge, acct *core.Account) { task := &vaTask{ Identifier: ident, Challenge: chal, Account: acct, } // Submit the task for validation va.tasks <- task } func (va VAImpl) processTasks() { for task := range va.tasks { go va.process(task) } } func (va VAImpl) firstError(results chan *core.ValidationRecord) *acme.ProblemDetails { for i := 0; i < concurrentValidations; i++ { result := <-results if result.Error != nil { return result.Error } } return nil } // setAuthzValid updates an authorization and an associated challenge to be // status valid. The authorization expiry is updated to now plus the configured // `validAuthzExpire` duration. func (va VAImpl) setAuthzValid(authz *core.Authorization, chal *core.Challenge) { authz.Lock() defer authz.Unlock() // Update the authz expiry for the new validity period now := va.clk.Now().UTC() authz.ExpiresDate = now.Add(validAuthzExpire) authz.Expires = authz.ExpiresDate.Format(time.RFC3339) // Update the authz status authz.Status = acme.StatusValid chal.Lock() defer chal.Unlock() // Update the challenge status chal.Status = acme.StatusValid } // setOrderError updates an order with an error from an authorization // validation. func (va VAImpl) setOrderError(order *core.Order, err *acme.ProblemDetails) { order.Lock() defer order.Unlock() order.Error = err } // setAuthzInvalid updates an authorization and an associated challenge to be // status invalid. The challenge's error is set to the provided problem and both // the challenge and the authorization have their status updated to invalid. func (va VAImpl) setAuthzInvalid( authz *core.Authorization, chal *core.Challenge, err *acme.ProblemDetails) { authz.Lock() defer authz.Unlock() // Update the authz status authz.Status = acme.StatusInvalid // Lock the challenge for update chal.Lock() defer chal.Unlock() // Update the challenge error field chal.Error = err // Update the challenge status chal.Status = acme.StatusInvalid } func (va VAImpl) process(task *vaTask) { va.log.Printf("Pulled a task from the Tasks queue: %#v", task) va.log.Printf("Starting %d validations.", concurrentValidations) chal := task.Challenge chal.Lock() // Update the validated date for the challenge now := va.clk.Now().UTC() chal.ValidatedDate = now chal.Validated = chal.ValidatedDate.Format(time.RFC3339) authz := chal.Authz chal.Unlock() results := make(chan *core.ValidationRecord, concurrentValidations) // Start a number of go routines to perform concurrent validations for i := 0; i < concurrentValidations; i++ { go va.performValidation(task, results) } err := va.firstError(results) // If one of the results was an error, the challenge fails if err != nil { va.setAuthzInvalid(authz, chal, err) va.log.Printf("authz %s set INVALID by completed challenge %s", authz.ID, chal.ID) va.setOrderError(authz.Order, err) va.log.Printf("order %s set INVALID by invalid authz %s", authz.Order.ID, authz.ID) return } // If there was no error, then the challenge succeeded and the authz is valid va.setAuthzValid(authz, chal) va.log.Printf("authz %s set VALID by completed challenge %s", authz.ID, chal.ID) } func (va VAImpl) performValidation(task *vaTask, results chan<- *core.ValidationRecord) { if va.sleep { // Sleep for a random amount of time between 1-15s len := time.Duration(rand.Intn(15)) va.log.Printf("Sleeping for %s seconds before validating", time.Second*len) va.clk.Sleep(time.Second * len) } // If `alwaysValid` is true then return a validation record immediately // without actually making any validation requests. if va.alwaysValid { va.log.Printf("%s is enabled. Skipping real validation of challenge %s", noValidateEnvVar, task.Challenge.ID) // NOTE(@cpu): The validation record's URL will not match the value it would // have received in a real validation request. For simplicity when faking // validation we always set it to the task identifier regardless of challenge // type. For example comparison, a real DNS-01 validation would set // the URL to the `_acme-challenge` subdomain. results <- &core.ValidationRecord{ URL: task.Identifier, ValidatedAt: va.clk.Now(), } return } switch task.Challenge.Type { case acme.ChallengeHTTP01: results <- va.validateHTTP01(task) case acme.ChallengeTLSALPN01: results <- va.validateTLSALPN01(task) case acme.ChallengeDNS01: results <- va.validateDNS01(task) default: va.log.Printf("Error: performValidation(): Invalid challenge type: %q", task.Challenge.Type) } } func (va VAImpl) validateDNS01(task *vaTask) *core.ValidationRecord { const dns01Prefix = "_acme-challenge" challengeSubdomain := fmt.Sprintf("%s.%s", dns01Prefix, task.Identifier) result := &core.ValidationRecord{ URL: challengeSubdomain, ValidatedAt: va.clk.Now(), } txts, err := net.LookupTXT(challengeSubdomain) if err != nil { result.Error = acme.UnauthorizedProblem("Error retrieving TXT records for DNS challenge") return result } if len(txts) == 0 { msg := fmt.Sprintf("No TXT records found for DNS challenge") result.Error = acme.UnauthorizedProblem(msg) return result } task.Challenge.RLock() expectedKeyAuthorization := task.Challenge.ExpectedKeyAuthorization(task.Account.Key) h := sha256.Sum256([]byte(expectedKeyAuthorization)) task.Challenge.RUnlock() authorizedKeysDigest := base64.RawURLEncoding.EncodeToString(h[:]) for _, element := range txts { if subtle.ConstantTimeCompare([]byte(element), []byte(authorizedKeysDigest)) == 1 { return result } } msg := fmt.Sprintf("Correct value not found for DNS challenge") result.Error = acme.UnauthorizedProblem(msg) return result } func (va VAImpl) validateTLSALPN01(task *vaTask) *core.ValidationRecord { portString := strconv.Itoa(va.tlsPort) hostPort := net.JoinHostPort(task.Identifier, portString) result := &core.ValidationRecord{ URL: hostPort, ValidatedAt: va.clk.Now(), } cs, problem := va.fetchConnectionState(hostPort, &tls.Config{ ServerName: task.Identifier, NextProtos: []string{acme.ACMETLS1Protocol}, InsecureSkipVerify: true, }) if problem != nil { result.Error = problem return result } if !cs.NegotiatedProtocolIsMutual || cs.NegotiatedProtocol != acme.ACMETLS1Protocol { result.Error = acme.UnauthorizedProblem(fmt.Sprintf( "Cannot negotiate ALPN protocol %q for %s challenge", acme.ACMETLS1Protocol, acme.ChallengeTLSALPN01, )) return result } certs := cs.PeerCertificates if len(certs) == 0 { result.Error = acme.UnauthorizedProblem(fmt.Sprintf("No certs presented for %s challenge", acme.ChallengeTLSALPN01)) return result } leafCert := certs[0] // Verify SNI - certificate returned must be issued only for the domain we are verifying. if len(leafCert.DNSNames) != 1 || !strings.EqualFold(leafCert.DNSNames[0], task.Identifier) { names := certNames(leafCert) errText := fmt.Sprintf( "Incorrect validation certificate for %s challenge. "+ "Requested %s from %s. Received %d certificate(s), "+ "first certificate had names %q", acme.ChallengeTLSALPN01, task.Identifier, hostPort, len(certs), names) result.Error = acme.UnauthorizedProblem(errText) return result } // Verify key authorization in acmeValidation extension expectedKeyAuthorization := task.Challenge.ExpectedKeyAuthorization(task.Account.Key) h := sha256.Sum256([]byte(expectedKeyAuthorization)) for _, ext := range leafCert.Extensions { if IdPeAcmeIdentifierV1.Equal(ext.Id) && ext.Critical { var extValue []byte if _, err := asn1.Unmarshal(ext.Value, &extValue); err != nil { errText := fmt.Sprintf("Incorrect validation certificate for %s challenge. "+ "Malformed acmeValidationV1 extension value.", acme.ChallengeTLSALPN01) result.Error = acme.UnauthorizedProblem(errText) return result } if subtle.ConstantTimeCompare(h[:], extValue) == 1 { return result } errText := fmt.Sprintf("Incorrect validation certificate for %s challenge. "+ "Invalid acmeValidationV1 extension value.", acme.ChallengeTLSALPN01) result.Error = acme.UnauthorizedProblem(errText) return result } } errText := fmt.Sprintf( "Incorrect validation certificate for %s challenge. "+ "Missing acmeValidationV1 extension.", acme.ChallengeTLSALPN01) result.Error = acme.UnauthorizedProblem(errText) return result } func (va VAImpl) fetchConnectionState(hostPort string, config *tls.Config) (*tls.ConnectionState, *acme.ProblemDetails) { conn, err := tls.DialWithDialer(&net.Dialer{Timeout: time.Second * 5}, "tcp", hostPort, config) if err != nil { // TODO(@cpu): Return better err - see parseHTTPConnError from boulder return nil, acme.UnauthorizedProblem( fmt.Sprintf("Failed to connect to %s for the %s challenge", hostPort, acme.ChallengeTLSALPN01)) } // close errors are not important here defer func() { _ = conn.Close() }() cs := conn.ConnectionState() return &cs, nil } func (va VAImpl) validateHTTP01(task *vaTask) *core.ValidationRecord { body, url, err := va.fetchHTTP(task.Identifier, task.Challenge.Token) result := &core.ValidationRecord{ URL: url, ValidatedAt: va.clk.Now(), Error: err, } if result.Error != nil { return result } expectedKeyAuthorization := task.Challenge.ExpectedKeyAuthorization(task.Account.Key) // The server SHOULD ignore whitespace characters at the end of the body payload := strings.TrimRight(string(body), whitespaceCutset) if payload != expectedKeyAuthorization { result.Error = acme.UnauthorizedProblem( fmt.Sprintf("The key authorization file from the server did not match this challenge %q != %q", expectedKeyAuthorization, payload)) } return result } // NOTE(@cpu): fetchHTTP only fetches the ACME HTTP-01 challenge path for // a given challenge & identifier domain. It is not a challenge agnostic general // purpose HTTP function func (va VAImpl) fetchHTTP(identifier string, token string) ([]byte, string, *acme.ProblemDetails) { path := fmt.Sprintf("%s%s", acme.HTTP01BaseURL, token) url := &url.URL{ Scheme: "http", Host: fmt.Sprintf("%s:%d", identifier, va.httpPort), Path: path, } va.log.Printf("Attempting to validate w/ HTTP: %s\n", url) httpRequest, err := http.NewRequest("GET", url.String(), nil) if err != nil { return nil, url.String(), acme.MalformedProblem( fmt.Sprintf("Invalid URL %q\n", url.String())) } httpRequest.Header.Set("User-Agent", userAgent()) httpRequest.Header.Set("Accept", "*/*") transport := &http.Transport{ // We don't expect to make multiple requests to a client, so close // connection immediately. DisableKeepAlives: true, } client := &http.Client{ Transport: transport, Timeout: time.Second * 5, } resp, err := client.Do(httpRequest) if err != nil { return nil, url.String(), acme.ConnectionProblem(err.Error()) } // NOTE: This is *not* using a `io.LimitedReader` and isn't suitable for // production because a very large response will bog down the server. Don't // use Pebble anywhere that isn't a testing rig!!! body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, url.String(), acme.InternalErrorProblem(err.Error()) } err = resp.Body.Close() if err != nil { return nil, url.String(), acme.InternalErrorProblem(err.Error()) } if resp.StatusCode != 200 { return nil, url.String(), acme.UnauthorizedProblem( fmt.Sprintf("Non-200 status code from HTTP: %s returned %d", url.String(), resp.StatusCode)) } return body, url.String(), nil }
/* * This file is part of the KubeVirt project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright 2017 Red Hat, Inc. * */ package virtlauncher import ( "flag" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "sync" "syscall" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "kubevirt.io/client-go/log" ) var fakeQEMUBinary string func init() { flag.StringVar(&fakeQEMUBinary, "fake-qemu-binary-path", "_out/cmd/fake-qemu-process/fake-qemu-process", "path to cirros test image") flag.Parse() fakeQEMUBinary = filepath.Join("../../", fakeQEMUBinary) } var _ = Describe("VirtLauncher", func() { var mon *monitor var cmd *exec.Cmd var cmdLock sync.Mutex uuid := "123-123-123-123" tmpDir, _ := ioutil.TempDir("", "monitortest") tmpNetworkDir, _ := ioutil.TempDir("", "monitortest-network") log.Log.SetIOWriter(GinkgoWriter) dir := os.Getenv("PWD") dir = strings.TrimSuffix(dir, "pkg/virt-launcher") processStarted := false StartProcess := func() { cmdLock.Lock() defer cmdLock.Unlock() cmd = exec.Command(fakeQEMUBinary, "--uuid", uuid) err := cmd.Start() Expect(err).ToNot(HaveOccurred()) currentPid := cmd.Process.Pid Expect(currentPid).ToNot(Equal(0)) processStarted = true } StopProcess := func() { cmdLock.Lock() defer cmdLock.Unlock() cmd.Process.Kill() processStarted = false } CleanupProcess := func() { cmdLock.Lock() defer cmdLock.Unlock() cmd.Wait() } VerifyProcessStarted := func() { Eventually(func() bool { mon.refresh() if mon.pid != 0 { return true } return false }).Should(BeTrue()) } VerifyProcessStopped := func() { Eventually(func() bool { mon.refresh() if mon.pid == 0 && mon.isDone == true { return true } return false }).Should(BeTrue()) } BeforeEach(func() { InitializeSharedDirectories(tmpDir, tmpNetworkDir) triggerFile := GracefulShutdownTriggerFromNamespaceName(tmpDir, "fakenamespace", "fakedomain") shutdownCallback := func(pid int) { syscall.Kill(pid, syscall.SIGTERM) } mon = &monitor{ cmdlineMatchStr: uuid, gracePeriod: 30, gracefulShutdownTriggerFile: triggerFile, shutdownCallback: shutdownCallback, } }) AfterEach(func() { os.RemoveAll(tmpDir) if processStarted == true { cmdLock.Lock() defer cmdLock.Unlock() cmd.Process.Kill() } processStarted = false }) Describe("VirtLauncher", func() { Context("process monitor", func() { It("verify pid detection works", func() { StartProcess() VerifyProcessStarted() go func() { CleanupProcess() }() StopProcess() VerifyProcessStopped() }) It("verify start timeout works", func() { stopChan := make(chan struct{}) done := make(chan string) go func() { mon.RunForever(time.Second, stopChan) done <- "exit" }() noExitCheck := time.After(3 * time.Second) exited := false select { case <-noExitCheck: case <-done: exited = true } Expect(exited).To(BeTrue()) }) It("verify monitor loop exits when signal arrives and no pid is present", func() { stopChan := make(chan struct{}) done := make(chan string) go func() { mon.monitorLoop(1*time.Second, stopChan) done <- "exit" }() time.Sleep(time.Second) close(stopChan) noExitCheck := time.After(5 * time.Second) exited := false select { case <-noExitCheck: case <-done: exited = true } Expect(exited).To(BeTrue()) }) It("verify graceful shutdown trigger works", func() { stopChan := make(chan struct{}) done := make(chan string) StartProcess() VerifyProcessStarted() go func() { CleanupProcess() }() go func() { mon.monitorLoop(1*time.Second, stopChan) done <- "exit" }() time.Sleep(time.Second) exists, err := hasGracefulShutdownTrigger(tmpDir, "fakenamespace", "fakedomain") Expect(err).ToNot(HaveOccurred()) Expect(exists).To(BeFalse()) close(stopChan) time.Sleep(time.Second) exists, err = hasGracefulShutdownTrigger(tmpDir, "fakenamespace", "fakedomain") Expect(err).ToNot(HaveOccurred()) Expect(exists).To(BeTrue()) }) It("verify grace period works", func() { stopChan := make(chan struct{}) done := make(chan string) StartProcess() VerifyProcessStarted() go func() { CleanupProcess() }() go func() { mon.gracePeriod = 1 mon.monitorLoop(1*time.Second, stopChan) done <- "exit" }() close(stopChan) noExitCheck := time.After(5 * time.Second) exited := false select { case <-noExitCheck: case <-done: exited = true } Expect(exited).To(BeTrue()) }) }) }) }) tests: remove /var/run/kubevirt-network/... on completion Signed-off-by: Ihar Hrachyshka <b339079bdd0c511d3934e046f7b02c5281dc2db1@redhat.com> /* * This file is part of the KubeVirt project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright 2017 Red Hat, Inc. * */ package virtlauncher import ( "flag" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "sync" "syscall" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "kubevirt.io/client-go/log" ) var fakeQEMUBinary string func init() { flag.StringVar(&fakeQEMUBinary, "fake-qemu-binary-path", "_out/cmd/fake-qemu-process/fake-qemu-process", "path to cirros test image") flag.Parse() fakeQEMUBinary = filepath.Join("../../", fakeQEMUBinary) } var _ = Describe("VirtLauncher", func() { var mon *monitor var cmd *exec.Cmd var cmdLock sync.Mutex uuid := "123-123-123-123" tmpDir, _ := ioutil.TempDir("", "monitortest") tmpNetworkDir, _ := ioutil.TempDir("", "monitortest-network") log.Log.SetIOWriter(GinkgoWriter) dir := os.Getenv("PWD") dir = strings.TrimSuffix(dir, "pkg/virt-launcher") processStarted := false StartProcess := func() { cmdLock.Lock() defer cmdLock.Unlock() cmd = exec.Command(fakeQEMUBinary, "--uuid", uuid) err := cmd.Start() Expect(err).ToNot(HaveOccurred()) currentPid := cmd.Process.Pid Expect(currentPid).ToNot(Equal(0)) processStarted = true } StopProcess := func() { cmdLock.Lock() defer cmdLock.Unlock() cmd.Process.Kill() processStarted = false } CleanupProcess := func() { cmdLock.Lock() defer cmdLock.Unlock() cmd.Wait() } VerifyProcessStarted := func() { Eventually(func() bool { mon.refresh() if mon.pid != 0 { return true } return false }).Should(BeTrue()) } VerifyProcessStopped := func() { Eventually(func() bool { mon.refresh() if mon.pid == 0 && mon.isDone == true { return true } return false }).Should(BeTrue()) } BeforeEach(func() { InitializeSharedDirectories(tmpDir, tmpNetworkDir) triggerFile := GracefulShutdownTriggerFromNamespaceName(tmpDir, "fakenamespace", "fakedomain") shutdownCallback := func(pid int) { syscall.Kill(pid, syscall.SIGTERM) } mon = &monitor{ cmdlineMatchStr: uuid, gracePeriod: 30, gracefulShutdownTriggerFile: triggerFile, shutdownCallback: shutdownCallback, } }) AfterEach(func() { os.RemoveAll(tmpDir) os.RemoveAll(tmpNetworkDir) if processStarted == true { cmdLock.Lock() defer cmdLock.Unlock() cmd.Process.Kill() } processStarted = false }) Describe("VirtLauncher", func() { Context("process monitor", func() { It("verify pid detection works", func() { StartProcess() VerifyProcessStarted() go func() { CleanupProcess() }() StopProcess() VerifyProcessStopped() }) It("verify start timeout works", func() { stopChan := make(chan struct{}) done := make(chan string) go func() { mon.RunForever(time.Second, stopChan) done <- "exit" }() noExitCheck := time.After(3 * time.Second) exited := false select { case <-noExitCheck: case <-done: exited = true } Expect(exited).To(BeTrue()) }) It("verify monitor loop exits when signal arrives and no pid is present", func() { stopChan := make(chan struct{}) done := make(chan string) go func() { mon.monitorLoop(1*time.Second, stopChan) done <- "exit" }() time.Sleep(time.Second) close(stopChan) noExitCheck := time.After(5 * time.Second) exited := false select { case <-noExitCheck: case <-done: exited = true } Expect(exited).To(BeTrue()) }) It("verify graceful shutdown trigger works", func() { stopChan := make(chan struct{}) done := make(chan string) StartProcess() VerifyProcessStarted() go func() { CleanupProcess() }() go func() { mon.monitorLoop(1*time.Second, stopChan) done <- "exit" }() time.Sleep(time.Second) exists, err := hasGracefulShutdownTrigger(tmpDir, "fakenamespace", "fakedomain") Expect(err).ToNot(HaveOccurred()) Expect(exists).To(BeFalse()) close(stopChan) time.Sleep(time.Second) exists, err = hasGracefulShutdownTrigger(tmpDir, "fakenamespace", "fakedomain") Expect(err).ToNot(HaveOccurred()) Expect(exists).To(BeTrue()) }) It("verify grace period works", func() { stopChan := make(chan struct{}) done := make(chan string) StartProcess() VerifyProcessStarted() go func() { CleanupProcess() }() go func() { mon.gracePeriod = 1 mon.monitorLoop(1*time.Second, stopChan) done <- "exit" }() close(stopChan) noExitCheck := time.After(5 * time.Second) exited := false select { case <-noExitCheck: case <-done: exited = true } Expect(exited).To(BeTrue()) }) }) }) })
package cfutil import ( "errors" cfenv "github.com/cloudfoundry-community/go-cfenv" vault "github.com/hashicorp/vault/api" ) type VaultClient struct { vault.Client Endpoint string RoleID string SecretID string ServiceSecretPath string ServiceTransitPath string SpaceSecretPath string OrgSecretPath string Secret *vault.Secret } func (v *VaultClient) Login() (err error) { path := "auth/approle/login" options := map[string]interface{}{ "role_id": v.RoleID, "secret_id": v.SecretID, } v.Secret, err = v.Logical().Write(path, options) return err } func NewVaultClient(serviceName string) (*VaultClient, error) { appEnv, _ := Current() service := &cfenv.Service{} err := errors.New("") if serviceName != "" { service, err = serviceByName(appEnv, serviceName) } else { service, err = serviceByTag(appEnv, "Vault") } if err != nil { return nil, err } var vaultClient VaultClient if str, ok := service.Credentials["role_id"].(string); ok { vaultClient.RoleID = str } if str, ok := service.Credentials["secret_id"].(string); ok { vaultClient.SecretID = str } if str, ok := service.Credentials["org_secret_path"].(string); ok { vaultClient.OrgSecretPath = str } if str, ok := service.Credentials["service_secret_path"].(string); ok { vaultClient.ServiceSecretPath = str } if str, ok := service.Credentials["endpoint"].(string); ok { vaultClient.Endpoint = str } if str, ok := service.Credentials["space_secret_path"].(string); ok { vaultClient.SpaceSecretPath = str } if str, ok := service.Credentials["service_transit_path"].(string); ok { vaultClient.ServiceTransitPath = str } client, err := vault.NewClient(&vault.Config{ Address: vaultClient.Endpoint, }) if err != nil { return nil, err } vaultClient.Client = *client return &vaultClient, vaultClient.Login() } Set token in Login() package cfutil import ( "errors" cfenv "github.com/cloudfoundry-community/go-cfenv" vault "github.com/hashicorp/vault/api" ) type VaultClient struct { vault.Client Endpoint string RoleID string SecretID string ServiceSecretPath string ServiceTransitPath string SpaceSecretPath string OrgSecretPath string Secret *vault.Secret } func (v *VaultClient) Login() (err error) { path := "auth/approle/login" options := map[string]interface{}{ "role_id": v.RoleID, "secret_id": v.SecretID, } v.Secret, err = v.Logical().Write(path, options) v.SetToken(v.Secret.Auth.ClientToken) return err } func NewVaultClient(serviceName string) (*VaultClient, error) { appEnv, _ := Current() service := &cfenv.Service{} err := errors.New("") if serviceName != "" { service, err = serviceByName(appEnv, serviceName) } else { service, err = serviceByTag(appEnv, "Vault") } if err != nil { return nil, err } var vaultClient VaultClient if str, ok := service.Credentials["role_id"].(string); ok { vaultClient.RoleID = str } if str, ok := service.Credentials["secret_id"].(string); ok { vaultClient.SecretID = str } if str, ok := service.Credentials["org_secret_path"].(string); ok { vaultClient.OrgSecretPath = str } if str, ok := service.Credentials["service_secret_path"].(string); ok { vaultClient.ServiceSecretPath = str } if str, ok := service.Credentials["endpoint"].(string); ok { vaultClient.Endpoint = str } if str, ok := service.Credentials["space_secret_path"].(string); ok { vaultClient.SpaceSecretPath = str } if str, ok := service.Credentials["service_transit_path"].(string); ok { vaultClient.ServiceTransitPath = str } client, err := vault.NewClient(&vault.Config{ Address: vaultClient.Endpoint, }) if err != nil { return nil, err } vaultClient.Client = *client err = vaultClient.Login() if err != nil { return nil, err } return &vaultClient, vaultClient.Login() }
package main import ( "os" "os/signal" "sync" "syscall" ) func main() { var ( wg sync.WaitGroup signalCh = make(chan os.Signal, 1) opts = NewOptions() ) signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) sFlow := NewSFlow(opts) go func() { wg.Add(1) defer wg.Done() sFlow.run() }() <-signalCh sFlow.shutdown() wg.Wait() } minor change: user syscall int signal instead of os package main import ( "os" "os/signal" "sync" "syscall" ) func main() { var ( wg sync.WaitGroup signalCh = make(chan os.Signal, 1) opts = NewOptions() ) signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM) sFlow := NewSFlow(opts) go func() { wg.Add(1) defer wg.Done() sFlow.run() }() <-signalCh sFlow.shutdown() wg.Wait() }
package chat import ( "encoding/gob" "errors" "fmt" "os" "path/filepath" "sync" "time" "github.com/keybase/client/go/chat/signencrypt" "github.com/keybase/client/go/protocol/chat1" "github.com/keybase/client/go/protocol/keybase1" ) type AttachmentInfo struct { ObjectKey string // s3 destination EncKey signencrypt.SecretboxKey // encryption key SignKey signencrypt.SignKey // signing key VerifyKey signencrypt.VerifyKey // verification key Parts map[int]string // map of parts uploaded to S3, key == part number, value == hash of ciphertext StartedAt time.Time // when the upload started } type StashKey struct { PlaintextHash []byte ConversationID chat1.ConversationID UserID keybase1.UID } func (s StashKey) String() string { return fmt.Sprintf("%x:%x:%s", s.PlaintextHash, s.ConversationID, s.UserID) } func NewStashKey(plaintextHash []byte, cid chat1.ConversationID, uid keybase1.UID) StashKey { return StashKey{ PlaintextHash: plaintextHash, ConversationID: cid, UserID: uid, } } type AttachmentStash interface { Start(key StashKey, info AttachmentInfo) error Lookup(key StashKey) (AttachmentInfo, bool, error) RecordPart(key StashKey, partNumber int, hash string) error VerifyPart(key StashKey, partNumber int, hash string) (bool, error) Finish(key StashKey) error } var ErrPartNotFound = errors.New("part does not exist in stash") type FileStash struct { dir string sync.Mutex } func NewFileStash(dir string) *FileStash { return &FileStash{dir: dir} } func (f *FileStash) Start(key StashKey, info AttachmentInfo) error { f.Lock() defer f.Unlock() c, err := f.contents() if err != nil { return err } info.StartedAt = time.Now() c[key.String()] = info return f.serialize(c) } func (f *FileStash) Lookup(key StashKey) (AttachmentInfo, bool, error) { f.Lock() defer f.Unlock() return f.lookup(key) } func (f *FileStash) RecordPart(key StashKey, partNumber int, hash string) error { f.Lock() defer f.Unlock() c, err := f.contents() if err != nil { return err } info, found := c[key.String()] if !found { return ErrPartNotFound } if info.Parts == nil { info.Parts = make(map[int]string) } info.Parts[partNumber] = hash c[key.String()] = info return f.serialize(c) } func (f *FileStash) VerifyPart(key StashKey, partNumber int, hash string) (bool, error) { f.Lock() defer f.Unlock() info, found, err := f.lookup(key) if err != nil { return false, err } if !found { return false, ErrPartNotFound } rhash, pfound := info.Parts[partNumber] if !pfound { return false, ErrPartNotFound } return rhash == hash, nil } func (f *FileStash) Finish(key StashKey) error { f.Lock() defer f.Unlock() c, err := f.contents() if err != nil { return err } delete(c, key.String()) return f.serialize(c) } func (f *FileStash) filename() string { if f.dir == "" { panic("FileStash used with no directory") } return filepath.Join(f.dir, "chat_attachment_stash") } func (f *FileStash) contents() (map[string]AttachmentInfo, error) { x, err := os.Open(f.filename()) if err != nil { if os.IsNotExist(err) { return make(map[string]AttachmentInfo), nil } return nil, err } defer x.Close() v := make(map[string]AttachmentInfo) dec := gob.NewDecoder(x) if err := dec.Decode(&v); err != nil { return nil, err } return v, nil } func (f *FileStash) serialize(m map[string]AttachmentInfo) error { x, err := os.Create(f.filename()) if err != nil { return err } defer x.Close() enc := gob.NewEncoder(x) return enc.Encode(m) } func (f *FileStash) lookup(key StashKey) (AttachmentInfo, bool, error) { c, err := f.contents() if err != nil { return AttachmentInfo{}, false, err } info, found := c[key.String()] return info, found, nil } Remove VerifyPart, not used anymore package chat import ( "encoding/gob" "errors" "fmt" "os" "path/filepath" "sync" "time" "github.com/keybase/client/go/chat/signencrypt" "github.com/keybase/client/go/protocol/chat1" "github.com/keybase/client/go/protocol/keybase1" ) type AttachmentInfo struct { ObjectKey string // s3 destination EncKey signencrypt.SecretboxKey // encryption key SignKey signencrypt.SignKey // signing key VerifyKey signencrypt.VerifyKey // verification key Parts map[int]string // map of parts uploaded to S3, key == part number, value == hash of ciphertext StartedAt time.Time // when the upload started } type StashKey struct { PlaintextHash []byte ConversationID chat1.ConversationID UserID keybase1.UID } func (s StashKey) String() string { return fmt.Sprintf("%x:%x:%s", s.PlaintextHash, s.ConversationID, s.UserID) } func NewStashKey(plaintextHash []byte, cid chat1.ConversationID, uid keybase1.UID) StashKey { return StashKey{ PlaintextHash: plaintextHash, ConversationID: cid, UserID: uid, } } type AttachmentStash interface { Start(key StashKey, info AttachmentInfo) error Lookup(key StashKey) (AttachmentInfo, bool, error) RecordPart(key StashKey, partNumber int, hash string) error Finish(key StashKey) error } var ErrPartNotFound = errors.New("part does not exist in stash") type FileStash struct { dir string sync.Mutex } func NewFileStash(dir string) *FileStash { return &FileStash{dir: dir} } func (f *FileStash) Start(key StashKey, info AttachmentInfo) error { f.Lock() defer f.Unlock() c, err := f.contents() if err != nil { return err } info.StartedAt = time.Now() c[key.String()] = info return f.serialize(c) } func (f *FileStash) Lookup(key StashKey) (AttachmentInfo, bool, error) { f.Lock() defer f.Unlock() return f.lookup(key) } func (f *FileStash) RecordPart(key StashKey, partNumber int, hash string) error { f.Lock() defer f.Unlock() c, err := f.contents() if err != nil { return err } info, found := c[key.String()] if !found { return ErrPartNotFound } if info.Parts == nil { info.Parts = make(map[int]string) } info.Parts[partNumber] = hash c[key.String()] = info return f.serialize(c) } func (f *FileStash) Finish(key StashKey) error { f.Lock() defer f.Unlock() c, err := f.contents() if err != nil { return err } delete(c, key.String()) return f.serialize(c) } func (f *FileStash) filename() string { if f.dir == "" { panic("FileStash used with no directory") } return filepath.Join(f.dir, "chat_attachment_stash") } func (f *FileStash) contents() (map[string]AttachmentInfo, error) { x, err := os.Open(f.filename()) if err != nil { if os.IsNotExist(err) { return make(map[string]AttachmentInfo), nil } return nil, err } defer x.Close() v := make(map[string]AttachmentInfo) dec := gob.NewDecoder(x) if err := dec.Decode(&v); err != nil { return nil, err } return v, nil } func (f *FileStash) serialize(m map[string]AttachmentInfo) error { x, err := os.Create(f.filename()) if err != nil { return err } defer x.Close() enc := gob.NewEncoder(x) return enc.Encode(m) } func (f *FileStash) lookup(key StashKey) (AttachmentInfo, bool, error) { c, err := f.contents() if err != nil { return AttachmentInfo{}, false, err } info, found := c[key.String()] return info, found, nil }
package main import ( "bytes" "encoding/json" "flag" "fmt" "log" "net/http" "github.com/danmane/abalone/go/api" "github.com/danmane/abalone/go/game" ) var ( playAgainstHuman = flag.Bool("playAgainstHuman", false, "play against human on frontend rather than AI vs AI") humanPort = flag.String("humanPort", "1337", "port for javascript frontend") aiPort1 = flag.String("aiPort1", "3423", "port for first ai") aiPort2 = flag.String("aiPort2", "3424", "port for second ai (if present)") ) func main() { flag.Parse() if err := run(); err != nil { log.Fatal(err) } } func run() error { whiteAI := api.Player{} blackAI := api.Player{} whiteAgent := PlayerInstance{Player: whiteAI, Port: *aiPort1} blackAgent := PlayerInstance{Player: blackAI, Port: *aiPort2} start := game.Standard result := playAIGame(whiteAgent, blackAgent, start) fmt.Println(result) return nil } type PlayerInstance struct { Player api.Player Port string } func gameFromAI(port string, state *game.State) (error, *game.State) { // TODO reverse args var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(state); err != nil { return err, nil } resp, err := http.Post("http://localhost:"+port+"/move", "application/json", &buf) if err != nil { return err, nil } responseGame := &game.State{} if err := json.NewDecoder(resp.Body).Decode(responseGame); err != nil { return err, nil } resp.Body.Close() if !state.ValidFuture(responseGame) { return fmt.Errorf("game parsed correctly, but isn't a valid future"), nil } return nil, responseGame } func playAIGame(whiteAgent, blackAgent PlayerInstance, startState game.State) api.GameResult { states := []game.State{startState} currentGame := &startState victory := api.NoVictory outcome := game.NullOutcome for !currentGame.GameOver() { var nextAI PlayerInstance if currentGame.NextPlayer == game.White { nextAI = whiteAgent } else { nextAI = blackAgent } err, futureGame := gameFromAI(nextAI.Port, currentGame) if err != nil { fmt.Println(err) victory = api.InvalidResponse outcome = currentGame.NextPlayer.Loses() return api.GameResult{ White: whiteAgent.Player, Black: blackAgent.Player, Outcome: outcome, VictoryReason: victory, States: states, } } currentGame = futureGame states = append(states, *currentGame) } outcome = currentGame.Winner() if currentGame.MovesRemaining == 0 { // TODO win on last move = stones depleted victory = api.MovesDepleted fmt.Println("someone won by move depletion") } else { victory = api.StonesDepleted fmt.Println("someone won by stone depletion") } return api.GameResult{ White: whiteAgent.Player, Black: blackAgent.Player, Outcome: outcome, VictoryReason: victory, States: states, } } resolve TODO: use idiomatic return sequence package main import ( "bytes" "encoding/json" "flag" "fmt" "log" "net/http" "github.com/danmane/abalone/go/api" "github.com/danmane/abalone/go/game" ) var ( playAgainstHuman = flag.Bool("playAgainstHuman", false, "play against human on frontend rather than AI vs AI") humanPort = flag.String("humanPort", "1337", "port for javascript frontend") aiPort1 = flag.String("aiPort1", "3423", "port for first ai") aiPort2 = flag.String("aiPort2", "3424", "port for second ai (if present)") ) func main() { flag.Parse() if err := run(); err != nil { log.Fatal(err) } } func run() error { whiteAI := api.Player{} blackAI := api.Player{} whiteAgent := PlayerInstance{Player: whiteAI, Port: *aiPort1} blackAgent := PlayerInstance{Player: blackAI, Port: *aiPort2} start := game.Standard result := playAIGame(whiteAgent, blackAgent, start) fmt.Println(result) return nil } type PlayerInstance struct { Player api.Player Port string } func gameFromAI(state *game.State, port string) (*game.State, error) { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(state); err != nil { return nil, err } resp, err := http.Post("http://localhost:"+port+"/move", "application/json", &buf) if err != nil { return nil, err } responseGame := &game.State{} if err := json.NewDecoder(resp.Body).Decode(responseGame); err != nil { return nil, err } resp.Body.Close() if !state.ValidFuture(responseGame) { return fmt.Errorf("game parsed correctly, but isn't a valid future"), nil } return responseGame, nil } func playAIGame(whiteAgent, blackAgent PlayerInstance, startState game.State) api.GameResult { states := []game.State{startState} currentGame := &startState victory := api.NoVictory outcome := game.NullOutcome for !currentGame.GameOver() { var nextAI PlayerInstance if currentGame.NextPlayer == game.White { nextAI = whiteAgent } else { nextAI = blackAgent } futureGame, err := gameFromAI(nextAI.Port, currentGame) if err != nil { fmt.Println(err) victory = api.InvalidResponse outcome = currentGame.NextPlayer.Loses() return api.GameResult{ White: whiteAgent.Player, Black: blackAgent.Player, Outcome: outcome, VictoryReason: victory, States: states, } } currentGame = futureGame states = append(states, *currentGame) } outcome = currentGame.Winner() if currentGame.MovesRemaining == 0 { // TODO win on last move = stones depleted victory = api.MovesDepleted fmt.Println("someone won by move depletion") } else { victory = api.StonesDepleted fmt.Println("someone won by stone depletion") } return api.GameResult{ White: whiteAgent.Player, Black: blackAgent.Player, Outcome: outcome, VictoryReason: victory, States: states, } }
// Copyright (c) 2016 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rules import ( "github.com/projectcalico/felix/go/felix/hashutils" . "github.com/projectcalico/felix/go/felix/iptables" "github.com/projectcalico/felix/go/felix/proto" ) func (r *ruleRenderer) WorkloadDispatchChains(endpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint) []*Chain { toEndpointRules := make([]Rule, 0, len(endpoints)+1) fromEndpointRules := make([]Rule, 0, len(endpoints)+1) for _, endpoint := range endpoints { fromEndpointRules = append(fromEndpointRules, Rule{ Match: Match().InInterface(endpoint.Name), Action: GotoAction{ Target: EndpointChainName(WorkloadFromEndpointPfx, endpoint.Name), }, }) toEndpointRules = append(toEndpointRules, Rule{ Match: Match().OutInterface(endpoint.Name), Action: GotoAction{ Target: EndpointChainName(WorkloadToEndpointPfx, endpoint.Name), }, }) } fromEndpointRules = append(fromEndpointRules, Rule{ Action: DropAction{}, }) toEndpointRules = append(toEndpointRules, Rule{ Action: DropAction{}, }) fromEndpointDispatchChain := Chain{ Name: ChainFromWorkloadDispatch, Rules: fromEndpointRules, } toEndpointDispatchChain := Chain{ Name: ChainToWorkloadDispatch, Rules: toEndpointRules, } return []*Chain{&toEndpointDispatchChain, &fromEndpointDispatchChain} } func (r *ruleRenderer) WorkloadEndpointToIptablesChains(epID *proto.WorkloadEndpointID, endpoint *proto.WorkloadEndpoint) []*Chain { inRules := []Rule{} outRules := []Rule{} // Start by ensuring that the accept mark bit is clear, policies set that bit to indicate // that they accepted the packet. inRules = append(inRules, Rule{ Action: ClearMarkAction{ Mark: r.IptablesMarkAccept, }, }) outRules = append(outRules, Rule{ Action: ClearMarkAction{ Mark: r.IptablesMarkAccept, }, }) // TODO(smc) Police the MAC? for _, tier := range endpoint.Tiers { // For each tier, clear the "accepted by tier" mark. inRules = append(inRules, Rule{ Comment: "Start of tier " + tier.Name, Action: ClearMarkAction{ Mark: r.IptablesMarkNextTier, }, }) outRules = append(outRules, Rule{ Comment: "Start of tier " + tier.Name, Action: ClearMarkAction{ Mark: r.IptablesMarkNextTier, }, }) // Then, jump to each policy in turn. for _, polID := range tier.Policies { inPolChainName := PolicyChainName( PolicyInboundPfx, &proto.PolicyID{Tier: tier.Name, Name: polID}, ) inRules = append(inRules, Rule{ Match: Match().MarkClear(r.IptablesMarkNextTier), Action: JumpAction{Target: inPolChainName}, }, // If policy marked packet as accepted, it returns, setting the // accept mark bit. If that is set, return from this chain. Rule{ Match: Match().MarkSet(r.IptablesMarkAccept), Action: ReturnAction{}, Comment: "Return if policy accepted", }) outPolChainName := PolicyChainName( PolicyOutboundPfx, &proto.PolicyID{Tier: tier.Name, Name: polID}, ) outRules = append(outRules, Rule{ Match: Match().MarkClear(r.IptablesMarkNextTier), Action: JumpAction{Target: outPolChainName}, }, // If policy marked packet as accepted, it returns, setting the // accept mark bit. If that is set, return from this chain. Rule{ Match: Match().MarkSet(r.IptablesMarkAccept), Action: ReturnAction{}, Comment: "Return if policy accepted", }) } // If no policy in the tier marked the packet as next-tier, drop the packet. inRules = append(inRules, r.DropRules(Match().MarkClear(r.IptablesMarkNextTier), "Drop if no policies passed packet")...) outRules = append(outRules, r.DropRules(Match().MarkClear(r.IptablesMarkNextTier), "Drop if no policies passed packet")...) } // Then, jump to each profile in turn. for _, profileID := range endpoint.ProfileIds { inProfChainName := ProfileChainName(PolicyInboundPfx, &proto.ProfileID{Name: profileID}) outProfChainName := ProfileChainName(PolicyOutboundPfx, &proto.ProfileID{Name: profileID}) inRules = append(inRules, Rule{Action: JumpAction{Target: inProfChainName}}, // If policy marked packet as accepted, it returns, setting the // accept mark bit. If that is set, return from this chain. Rule{ Match: Match().MarkSet(r.IptablesMarkAccept), Action: ReturnAction{}, Comment: "Return if profile accepted", }) outRules = append(outRules, Rule{Action: JumpAction{Target: outProfChainName}}, // If policy marked packet as accepted, it returns, setting the // accept mark bit. If that is set, return from this chain. Rule{ Match: Match().MarkSet(r.IptablesMarkAccept), Action: ReturnAction{}, Comment: "Return if profile accepted", }) } inRules = append(inRules, r.DropRules(Match(), "Drop if no profiles matched")...) outRules = append(outRules, r.DropRules(Match(), "Drop if no profiles matched")...) toEndpointChain := Chain{ Name: EndpointChainName(WorkloadToEndpointPfx, endpoint.Name), Rules: inRules, } fromEndpointChain := Chain{ Name: EndpointChainName(WorkloadFromEndpointPfx, endpoint.Name), Rules: outRules, } return []*Chain{&toEndpointChain, &fromEndpointChain} } func (r *ruleRenderer) HostDispatchChains(map[proto.HostEndpointID]*proto.HostEndpoint) []*Chain { panic("Not implemented") return nil } func (r *ruleRenderer) HostEndpointToIptablesChains(epID *proto.HostEndpointID, endpoint *proto.HostEndpoint) []*Chain { panic("Not implemented") // TODO(smc) Failsafe chains return nil } func EndpointChainName(prefix string, ifaceName string) string { return hashutils.GetLengthLimitedID( prefix, ifaceName, MaxChainNameLength, ) } Implement HostDispatchChains // Copyright (c) 2016 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rules import ( "github.com/projectcalico/felix/go/felix/hashutils" . "github.com/projectcalico/felix/go/felix/iptables" "github.com/projectcalico/felix/go/felix/proto" ) func (r *ruleRenderer) WorkloadDispatchChains(endpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint) []*Chain { // Extract endpoint names. names := make([]string, 0, len(endpoints)) for _, endpoint := range endpoints { names = append(names, endpoint.Name) } return dispatchChains( names, WorkloadFromEndpointPfx, WorkloadToEndpointPfx, ChainFromWorkloadDispatch, ChainToWorkloadDispatch, DropAction{}, ) } func dispatchChains( names []string, fromEndpointPfx, toEndpointPfx, dispatchFromEndpoint, dispatchToEndpoint string, unknownInterfaceAction Action, ) []*Chain { toEndpointRules := make([]Rule, 0, len(names)+1) fromEndpointRules := make([]Rule, 0, len(names)+1) for _, name := range names { fromEndpointRules = append(fromEndpointRules, Rule{ Match: Match().InInterface(name), Action: GotoAction{ Target: EndpointChainName(fromEndpointPfx, name), }, }) toEndpointRules = append(toEndpointRules, Rule{ Match: Match().OutInterface(name), Action: GotoAction{ Target: EndpointChainName(toEndpointPfx, name), }, }) } fromEndpointRules = append(fromEndpointRules, Rule{ Action: unknownInterfaceAction, Comment: "Unknown interface", }) toEndpointRules = append(toEndpointRules, Rule{ Action: unknownInterfaceAction, Comment: "Unknown interface", }) fromEndpointDispatchChain := Chain{ Name: dispatchFromEndpoint, Rules: fromEndpointRules, } toEndpointDispatchChain := Chain{ Name: dispatchToEndpoint, Rules: toEndpointRules, } return []*Chain{&toEndpointDispatchChain, &fromEndpointDispatchChain} } func (r *ruleRenderer) WorkloadEndpointToIptablesChains(epID *proto.WorkloadEndpointID, endpoint *proto.WorkloadEndpoint) []*Chain { inRules := []Rule{} outRules := []Rule{} // Start by ensuring that the accept mark bit is clear, policies set that bit to indicate // that they accepted the packet. inRules = append(inRules, Rule{ Action: ClearMarkAction{ Mark: r.IptablesMarkAccept, }, }) outRules = append(outRules, Rule{ Action: ClearMarkAction{ Mark: r.IptablesMarkAccept, }, }) // TODO(smc) Police the MAC? for _, tier := range endpoint.Tiers { // For each tier, clear the "accepted by tier" mark. inRules = append(inRules, Rule{ Comment: "Start of tier " + tier.Name, Action: ClearMarkAction{ Mark: r.IptablesMarkNextTier, }, }) outRules = append(outRules, Rule{ Comment: "Start of tier " + tier.Name, Action: ClearMarkAction{ Mark: r.IptablesMarkNextTier, }, }) // Then, jump to each policy in turn. for _, polID := range tier.Policies { inPolChainName := PolicyChainName( PolicyInboundPfx, &proto.PolicyID{Tier: tier.Name, Name: polID}, ) inRules = append(inRules, Rule{ Match: Match().MarkClear(r.IptablesMarkNextTier), Action: JumpAction{Target: inPolChainName}, }, // If policy marked packet as accepted, it returns, setting the // accept mark bit. If that is set, return from this chain. Rule{ Match: Match().MarkSet(r.IptablesMarkAccept), Action: ReturnAction{}, Comment: "Return if policy accepted", }) outPolChainName := PolicyChainName( PolicyOutboundPfx, &proto.PolicyID{Tier: tier.Name, Name: polID}, ) outRules = append(outRules, Rule{ Match: Match().MarkClear(r.IptablesMarkNextTier), Action: JumpAction{Target: outPolChainName}, }, // If policy marked packet as accepted, it returns, setting the // accept mark bit. If that is set, return from this chain. Rule{ Match: Match().MarkSet(r.IptablesMarkAccept), Action: ReturnAction{}, Comment: "Return if policy accepted", }) } // If no policy in the tier marked the packet as next-tier, drop the packet. inRules = append(inRules, r.DropRules(Match().MarkClear(r.IptablesMarkNextTier), "Drop if no policies passed packet")...) outRules = append(outRules, r.DropRules(Match().MarkClear(r.IptablesMarkNextTier), "Drop if no policies passed packet")...) } // Then, jump to each profile in turn. for _, profileID := range endpoint.ProfileIds { inProfChainName := ProfileChainName(PolicyInboundPfx, &proto.ProfileID{Name: profileID}) outProfChainName := ProfileChainName(PolicyOutboundPfx, &proto.ProfileID{Name: profileID}) inRules = append(inRules, Rule{Action: JumpAction{Target: inProfChainName}}, // If policy marked packet as accepted, it returns, setting the // accept mark bit. If that is set, return from this chain. Rule{ Match: Match().MarkSet(r.IptablesMarkAccept), Action: ReturnAction{}, Comment: "Return if profile accepted", }) outRules = append(outRules, Rule{Action: JumpAction{Target: outProfChainName}}, // If policy marked packet as accepted, it returns, setting the // accept mark bit. If that is set, return from this chain. Rule{ Match: Match().MarkSet(r.IptablesMarkAccept), Action: ReturnAction{}, Comment: "Return if profile accepted", }) } inRules = append(inRules, r.DropRules(Match(), "Drop if no profiles matched")...) outRules = append(outRules, r.DropRules(Match(), "Drop if no profiles matched")...) toEndpointChain := Chain{ Name: EndpointChainName(WorkloadToEndpointPfx, endpoint.Name), Rules: inRules, } fromEndpointChain := Chain{ Name: EndpointChainName(WorkloadFromEndpointPfx, endpoint.Name), Rules: outRules, } return []*Chain{&toEndpointChain, &fromEndpointChain} } func (r *ruleRenderer) HostDispatchChains(endpoints map[proto.HostEndpointID]*proto.HostEndpoint) []*Chain { // Extract endpoint names. names := make([]string, 0, len(endpoints)) for _, endpoint := range endpoints { names = append(names, endpoint.Name) } return dispatchChains( names, HostFromEndpointPfx, HostToEndpointPfx, ChainDispatchFromHostEndpoint, ChainDispatchToHostEndpoint, ReturnAction{}, ) } func (r *ruleRenderer) HostEndpointToIptablesChains(epID *proto.HostEndpointID, endpoint *proto.HostEndpoint) []*Chain { panic("Not implemented") // TODO(smc) Failsafe chains return nil } func EndpointChainName(prefix string, ifaceName string) string { return hashutils.GetLengthLimitedID( prefix, ifaceName, MaxChainNameLength, ) }
package main import ( "crypto/x509" "log" "github.com/gopherjs/gopherjs/js" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" ) type PlatformKeysBackend struct { pk *PlatformKeys } func NewPlatformKeysBackend() *PlatformKeysBackend { pk := js.Global.Get("chrome").Get("platformKeys") return &PlatformKeysBackend{ pk: &PlatformKeys{pk}, } } func (a *PlatformKeysBackend) List() ([]*agent.Key, error) { certs, err := a.listCertificates() if err != nil { return nil, err } log.Printf("Listing keys: count=%d", len(certs)) keys := make([]*agent.Key, 0, len(certs)) for _, cert := range certs { pubkey, err := ssh.NewPublicKey(cert.PublicKey) if err != nil { return nil, err } keys = append(keys, &agent.Key{ Format: pubkey.Type(), Blob: pubkey.Marshal(), Comment: "", }) } return keys, nil } func (a *PlatformKeysBackend) Signers() (signers []ssh.Signer, err error) { certs, err := a.listCertificates() if err != nil { return nil, err } for _, cert := range certs { signer, err := ssh.NewSignerFromSigner(NewPKSigner(a.pk, cert)) if err != nil { return nil, err } signers = append(signers, signer) } return } func (a *PlatformKeysBackend) listCertificates() ([]*x509.Certificate, error) { req := js.M{ "request": js.M{ "certificateTypes": []string{}, "certificateAuthorities": js.S{}, }, "interactive": false, } matches, err := a.pk.SelectClientCertificates(req) if err != nil { return nil, err } certs := make([]*x509.Certificate, 0, len(matches)) for _, m := range matches { cert, err := x509.ParseCertificate(m) if err != nil { return nil, err } certs = append(certs, cert) } return certs, nil } Make selectClientCertificates interactive: true I still don't entirely understand why this matters, but it's the only way I've been able to make things work. package main import ( "crypto/x509" "log" "github.com/gopherjs/gopherjs/js" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" ) type PlatformKeysBackend struct { pk *PlatformKeys } func NewPlatformKeysBackend() *PlatformKeysBackend { pk := js.Global.Get("chrome").Get("platformKeys") return &PlatformKeysBackend{ pk: &PlatformKeys{pk}, } } func (a *PlatformKeysBackend) List() ([]*agent.Key, error) { certs, err := a.listCertificates() if err != nil { return nil, err } log.Printf("Listing keys: count=%d", len(certs)) keys := make([]*agent.Key, 0, len(certs)) for _, cert := range certs { pubkey, err := ssh.NewPublicKey(cert.PublicKey) if err != nil { return nil, err } keys = append(keys, &agent.Key{ Format: pubkey.Type(), Blob: pubkey.Marshal(), Comment: "", }) } return keys, nil } func (a *PlatformKeysBackend) Signers() (signers []ssh.Signer, err error) { certs, err := a.listCertificates() if err != nil { return nil, err } for _, cert := range certs { signer, err := ssh.NewSignerFromSigner(NewPKSigner(a.pk, cert)) if err != nil { return nil, err } signers = append(signers, signer) } return } func (a *PlatformKeysBackend) listCertificates() ([]*x509.Certificate, error) { req := js.M{ "request": js.M{ "certificateTypes": []string{}, "certificateAuthorities": js.S{}, }, "interactive": true, } matches, err := a.pk.SelectClientCertificates(req) if err != nil { return nil, err } certs := make([]*x509.Certificate, 0, len(matches)) for _, m := range matches { cert, err := x509.ParseCertificate(m) if err != nil { return nil, err } certs = append(certs, cert) } return certs, nil }
package main import ( "log" "os" "os/signal" "syscall" "time" ) func dataPrinter(dataChan <-chan *EndpointData) { for { data, ok := <-dataChan if !ok { // channel closed return } if data.Err == nil { log.Println("Message=" + string(data.Content)) } else { log.Println(data.Err) } } } func main() { config := NewConfig() control, err := NewControl(config) if err != nil { log.Fatal(err) } // AccountActivate for { res, err := control.AccountActivate() if err != nil { log.Fatal(err) } if res.AccountState == "ENABLED" { break } time.Sleep(30 * time.Second) } // pxGrid ServiceLookup for Session Directory services, err := control.ServiceLookup("com.cisco.ise.session") if err != nil { log.Fatal(err) } else if len(services) == 0 { log.Fatal("Service unavailable") } // Use first service wsPubsubService := services[0].Properties["wsPubsubService"] sessionTopic := services[0].Properties["sessionTopic"] log.Println("wsPubsubService=", wsPubsubService, "sessionTopic=", sessionTopic) // pxGrid ServiceLookup for pubsub service pubsubServices, err := control.ServiceLookup(wsPubsubService) if err != nil { log.Fatal(err) } else if len(pubsubServices) == 0 { log.Fatal("Pubsub service unavailable") } // Use first pubsub service pubsubService := pubsubServices[0] pubsubNodeName := pubsubService.NodeName wsUrl := pubsubService.Properties["wsUrl"] log.Println("wsUrl=", wsUrl) // pxGrid AccessSecret with the pubsub node secret, err := control.GetAccessSecret(pubsubNodeName) if err != nil { log.Fatal(err) } // Setup WebSocket endpoint, err := NewEndpoint(config) if err != nil { log.Fatal(err) } err = endpoint.Connect(wsUrl, config.nodeName, secret) if err != nil { log.Fatal(err) } err = endpoint.Subscribe(sessionTopic) if err != nil { log.Fatal(err) } dataChan := make(chan *EndpointData) go dataPrinter(dataChan) go endpoint.Listener(dataChan) // Setup abort channel log.Println("Press <Ctrl-c> to disconnect...") abort := make(chan os.Signal) signal.Notify(abort, os.Interrupt, syscall.SIGTERM) <-abort // Cleanup log.Printf("Disconnecting websocket connection...") endpoint.Disconnect() close(dataChan) log.Printf("...Done") } Reverts the change about checking for dataChan package main import ( "log" "os" "os/signal" "syscall" "time" ) func dataPrinter(dataChan <-chan *EndpointData) { for data := range dataChan { if data.Err == nil { log.Println("Message=" + string(data.Content)) } else { log.Println(data.Err) } } } func main() { config := NewConfig() control, err := NewControl(config) if err != nil { log.Fatal(err) } // AccountActivate for { res, err := control.AccountActivate() if err != nil { log.Fatal(err) } if res.AccountState == "ENABLED" { break } time.Sleep(30 * time.Second) } // pxGrid ServiceLookup for Session Directory services, err := control.ServiceLookup("com.cisco.ise.session") if err != nil { log.Fatal(err) } else if len(services) == 0 { log.Fatal("Service unavailable") } // Use first service wsPubsubService := services[0].Properties["wsPubsubService"] sessionTopic := services[0].Properties["sessionTopic"] log.Println("wsPubsubService=", wsPubsubService, "sessionTopic=", sessionTopic) // pxGrid ServiceLookup for pubsub service pubsubServices, err := control.ServiceLookup(wsPubsubService) if err != nil { log.Fatal(err) } else if len(pubsubServices) == 0 { log.Fatal("Pubsub service unavailable") } // Use first pubsub service pubsubService := pubsubServices[0] pubsubNodeName := pubsubService.NodeName wsUrl := pubsubService.Properties["wsUrl"] log.Println("wsUrl=", wsUrl) // pxGrid AccessSecret with the pubsub node secret, err := control.GetAccessSecret(pubsubNodeName) if err != nil { log.Fatal(err) } // Setup WebSocket endpoint, err := NewEndpoint(config) if err != nil { log.Fatal(err) } err = endpoint.Connect(wsUrl, config.nodeName, secret) if err != nil { log.Fatal(err) } err = endpoint.Subscribe(sessionTopic) if err != nil { log.Fatal(err) } dataChan := make(chan *EndpointData) go dataPrinter(dataChan) go endpoint.Listener(dataChan) // Setup abort channel log.Println("Press <Ctrl-c> to disconnect...") abort := make(chan os.Signal) signal.Notify(abort, os.Interrupt, syscall.SIGTERM) <-abort // Cleanup log.Printf("Disconnecting websocket connection...") endpoint.Disconnect() close(dataChan) log.Printf("...Done") }
package vimeo import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "mime/multipart" "net/http" "net/url" "reflect" "github.com/google/go-querystring/query" ) const ( libraryVersion = "0.8.0" defaultBaseURL = "https://api.vimeo.com/" defaultUserAgent = "go-vimeo/" + libraryVersion mediaTypeVersion = "application/vnd.vimeo.*+json;version=3.2" ) // Client manages communication with Vimeo API. type Client struct { client *http.Client BaseURL *url.URL UserAgent string // Services used for communicating with the API Categories *CategoriesService Channels *ChannelsService ContentRatings *ContentRatingsService CreativeCommons *CreativeCommonsService Groups *GroupsService Languages *LanguagesService Tags *TagsService Videos *VideosService Users *UsersService } type service struct { client *Client } // NewClient returns a new Vimeo API client. If a nil httpClient is // provided, http.DefaultClient will be used. To use API methods which require // authentication, provide an http.Client that will perform the authentication // for you (such as that provided by the golang.org/x/oauth2 library). func NewClient(httpClient *http.Client) *Client { if httpClient == nil { httpClient = http.DefaultClient } baseURL, _ := url.Parse(defaultBaseURL) c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: defaultUserAgent} c.Categories = &CategoriesService{client: c} c.Channels = &ChannelsService{client: c} c.ContentRatings = &ContentRatingsService{client: c} c.CreativeCommons = &CreativeCommonsService{client: c} c.Groups = &GroupsService{client: c} c.Languages = &LanguagesService{client: c} c.Tags = &TagsService{client: c} c.Videos = &VideosService{client: c} c.Users = &UsersService{client: c} return c } // Client returns the HTTP client configured for this client. func (c *Client) Client() *http.Client { return c.client } // NewRequest creates an API request. func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) { rel, err := url.Parse(urlStr) if err != nil { return nil, err } u := c.BaseURL.ResolveReference(rel) var buf io.ReadWriter if body != nil { buf = new(bytes.Buffer) err = json.NewEncoder(buf).Encode(body) if err != nil { return nil, err } } req, err := http.NewRequest(method, u.String(), buf) if err != nil { return nil, err } if body != nil { req.Header.Set("Content-Type", "application/json") } req.Header.Set("Accept", mediaTypeVersion) if c.UserAgent != "" { req.Header.Set("User-Agent", c.UserAgent) } return req, nil } // NewUploadRequest creates an upload request. func (c *Client) NewUploadRequest(url string, reader io.Reader, name string) (*http.Request, error) { body := new(bytes.Buffer) writer := multipart.NewWriter(body) part, err := writer.CreateFormFile("file_data", name) if err != nil { return nil, err } content, err := ioutil.ReadAll(reader) if err != nil { return nil, err } part.Write(content) err = writer.Close() if err != nil { return nil, err } req, err := http.NewRequest("POST", url, body) req.Header.Set("Content-Type", writer.FormDataContentType()) if err != nil { return nil, err } return req, nil } // Do sends an API request and returns the API response. The API response is JSON decoded and stored in the value // pointed to by v, or returned as an error if an API error has occurred. If v implements the io.Writer interface, // the raw response will be written to v, without attempting to decode it. func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { resp, err := c.client.Do(req) if err != nil { return nil, err } defer func() { io.CopyN(ioutil.Discard, resp.Body, 512) resp.Body.Close() }() response := newResponse(resp) err = CheckResponse(resp) if err != nil { return response, err } if v != nil { if w, ok := v.(io.Writer); ok { _, err = io.Copy(w, resp.Body) if err != nil { return nil, err } } else { err = json.NewDecoder(resp.Body).Decode(v) if err == io.EOF { err = nil } } } return response, err } type paginator interface { GetPage() int GetTotal() int GetPaging() (string, string, string, string) } type paging struct { Next string `json:"next,omitempty"` Prev string `json:"previous,omitempty"` First string `json:"first,omitempty"` Last string `json:"last,omitempty"` } type pagination struct { Total int `json:"total,omitempty"` Page int `json:"page,omitempty"` Paging paging `json:"paging,omitempty"` } // GetPage returns the current page number. func (p pagination) GetPage() int { return p.Page } // GetTotal returns the total number of pages. func (p pagination) GetTotal() int { return p.Total } // GetPaging returns the data pagination presented as relative references. // In the following procedure: next, previous, first, last page. func (p pagination) GetPaging() (string, string, string, string) { return p.Paging.Next, p.Paging.Prev, p.Paging.First, p.Paging.Last } // Response is a Vimeo response. This wraps the standard http.Response. // Provides access pagination links. type Response struct { *http.Response // Pagination Page int TotalPages int NextPage string PrevPage string FirstPage string LastPage string } func (r *Response) setPaging(p paginator) { r.Page = p.GetPage() r.TotalPages = p.GetTotal() r.NextPage, r.PrevPage, r.FirstPage, r.LastPage = p.GetPaging() } // ErrorResponse is a Vimeo error response. This wraps the standard http.Response. // Provides access error message returned Vimeo. type ErrorResponse struct { Response *http.Response Message string `json:"error"` } func (r *ErrorResponse) Error() string { return fmt.Sprintf("%v %v: %d %v", r.Response.Request.Method, sanitizeURL(r.Response.Request.URL), r.Response.StatusCode, r.Message) } func sanitizeURL(uri *url.URL) *url.URL { if uri == nil { return nil } params := uri.Query() if len(params.Get("client_secret")) > 0 { params.Set("client_secret", "REDACTED") uri.RawQuery = params.Encode() } return uri } func newResponse(r *http.Response) *Response { response := &Response{Response: r} return response } // CheckResponse checks the API response for errors, and returns them if // present. A response is considered an error if it has a status code outside // the 200 range. API error responses are expected to have either no response // body, or a JSON response body that maps to ErrorResponse. Any other // response body will be silently ignored. func CheckResponse(r *http.Response) error { if code := r.StatusCode; 200 <= code && code <= 299 { return nil } errorResponse := &ErrorResponse{Response: r} data, err := ioutil.ReadAll(r.Body) if err == nil && data != nil { json.Unmarshal(data, errorResponse) } return errorResponse } // ListOptions specifies the optional parameters to various List methods that // support pagination. type ListOptions struct { Page int `url:"page,omitempty"` PerPage int `url:"per_page,omitempty"` Sort int `url:"sort,omitempty"` Direction int `url:"direction,omitempty"` } func addOptions(s string, opt interface{}) (string, error) { v := reflect.ValueOf(opt) if v.Kind() == reflect.Ptr && v.IsNil() { return s, nil } u, err := url.Parse(s) if err != nil { return s, err } qs, err := query.Values(opt) if err != nil { return s, err } u.RawQuery = qs.Encode() return u.String(), nil } Prepare 1.0.0 package vimeo import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "mime/multipart" "net/http" "net/url" "reflect" "github.com/google/go-querystring/query" ) const ( libraryVersion = "1.0.0" defaultBaseURL = "https://api.vimeo.com/" defaultUserAgent = "go-vimeo/" + libraryVersion mediaTypeVersion = "application/vnd.vimeo.*+json;version=3.2" ) // Client manages communication with Vimeo API. type Client struct { client *http.Client BaseURL *url.URL UserAgent string // Services used for communicating with the API Categories *CategoriesService Channels *ChannelsService ContentRatings *ContentRatingsService CreativeCommons *CreativeCommonsService Groups *GroupsService Languages *LanguagesService Tags *TagsService Videos *VideosService Users *UsersService } type service struct { client *Client } // NewClient returns a new Vimeo API client. If a nil httpClient is // provided, http.DefaultClient will be used. To use API methods which require // authentication, provide an http.Client that will perform the authentication // for you (such as that provided by the golang.org/x/oauth2 library). func NewClient(httpClient *http.Client) *Client { if httpClient == nil { httpClient = http.DefaultClient } baseURL, _ := url.Parse(defaultBaseURL) c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: defaultUserAgent} c.Categories = &CategoriesService{client: c} c.Channels = &ChannelsService{client: c} c.ContentRatings = &ContentRatingsService{client: c} c.CreativeCommons = &CreativeCommonsService{client: c} c.Groups = &GroupsService{client: c} c.Languages = &LanguagesService{client: c} c.Tags = &TagsService{client: c} c.Videos = &VideosService{client: c} c.Users = &UsersService{client: c} return c } // Client returns the HTTP client configured for this client. func (c *Client) Client() *http.Client { return c.client } // NewRequest creates an API request. func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) { rel, err := url.Parse(urlStr) if err != nil { return nil, err } u := c.BaseURL.ResolveReference(rel) var buf io.ReadWriter if body != nil { buf = new(bytes.Buffer) err = json.NewEncoder(buf).Encode(body) if err != nil { return nil, err } } req, err := http.NewRequest(method, u.String(), buf) if err != nil { return nil, err } if body != nil { req.Header.Set("Content-Type", "application/json") } req.Header.Set("Accept", mediaTypeVersion) if c.UserAgent != "" { req.Header.Set("User-Agent", c.UserAgent) } return req, nil } // NewUploadRequest creates an upload request. func (c *Client) NewUploadRequest(url string, reader io.Reader, name string) (*http.Request, error) { body := new(bytes.Buffer) writer := multipart.NewWriter(body) part, err := writer.CreateFormFile("file_data", name) if err != nil { return nil, err } content, err := ioutil.ReadAll(reader) if err != nil { return nil, err } part.Write(content) err = writer.Close() if err != nil { return nil, err } req, err := http.NewRequest("POST", url, body) req.Header.Set("Content-Type", writer.FormDataContentType()) if err != nil { return nil, err } return req, nil } // Do sends an API request and returns the API response. The API response is JSON decoded and stored in the value // pointed to by v, or returned as an error if an API error has occurred. If v implements the io.Writer interface, // the raw response will be written to v, without attempting to decode it. func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { resp, err := c.client.Do(req) if err != nil { return nil, err } defer func() { io.CopyN(ioutil.Discard, resp.Body, 512) resp.Body.Close() }() response := newResponse(resp) err = CheckResponse(resp) if err != nil { return response, err } if v != nil { if w, ok := v.(io.Writer); ok { _, err = io.Copy(w, resp.Body) if err != nil { return nil, err } } else { err = json.NewDecoder(resp.Body).Decode(v) if err == io.EOF { err = nil } } } return response, err } type paginator interface { GetPage() int GetTotal() int GetPaging() (string, string, string, string) } type paging struct { Next string `json:"next,omitempty"` Prev string `json:"previous,omitempty"` First string `json:"first,omitempty"` Last string `json:"last,omitempty"` } type pagination struct { Total int `json:"total,omitempty"` Page int `json:"page,omitempty"` Paging paging `json:"paging,omitempty"` } // GetPage returns the current page number. func (p pagination) GetPage() int { return p.Page } // GetTotal returns the total number of pages. func (p pagination) GetTotal() int { return p.Total } // GetPaging returns the data pagination presented as relative references. // In the following procedure: next, previous, first, last page. func (p pagination) GetPaging() (string, string, string, string) { return p.Paging.Next, p.Paging.Prev, p.Paging.First, p.Paging.Last } // Response is a Vimeo response. This wraps the standard http.Response. // Provides access pagination links. type Response struct { *http.Response // Pagination Page int TotalPages int NextPage string PrevPage string FirstPage string LastPage string } func (r *Response) setPaging(p paginator) { r.Page = p.GetPage() r.TotalPages = p.GetTotal() r.NextPage, r.PrevPage, r.FirstPage, r.LastPage = p.GetPaging() } // ErrorResponse is a Vimeo error response. This wraps the standard http.Response. // Provides access error message returned Vimeo. type ErrorResponse struct { Response *http.Response Message string `json:"error"` } func (r *ErrorResponse) Error() string { return fmt.Sprintf("%v %v: %d %v", r.Response.Request.Method, sanitizeURL(r.Response.Request.URL), r.Response.StatusCode, r.Message) } func sanitizeURL(uri *url.URL) *url.URL { if uri == nil { return nil } params := uri.Query() if len(params.Get("client_secret")) > 0 { params.Set("client_secret", "REDACTED") uri.RawQuery = params.Encode() } return uri } func newResponse(r *http.Response) *Response { response := &Response{Response: r} return response } // CheckResponse checks the API response for errors, and returns them if // present. A response is considered an error if it has a status code outside // the 200 range. API error responses are expected to have either no response // body, or a JSON response body that maps to ErrorResponse. Any other // response body will be silently ignored. func CheckResponse(r *http.Response) error { if code := r.StatusCode; 200 <= code && code <= 299 { return nil } errorResponse := &ErrorResponse{Response: r} data, err := ioutil.ReadAll(r.Body) if err == nil && data != nil { json.Unmarshal(data, errorResponse) } return errorResponse } // ListOptions specifies the optional parameters to various List methods that // support pagination. type ListOptions struct { Page int `url:"page,omitempty"` PerPage int `url:"per_page,omitempty"` Sort int `url:"sort,omitempty"` Direction int `url:"direction,omitempty"` } func addOptions(s string, opt interface{}) (string, error) { v := reflect.ValueOf(opt) if v.Kind() == reflect.Ptr && v.IsNil() { return s, nil } u, err := url.Parse(s) if err != nil { return s, err } qs, err := query.Values(opt) if err != nil { return s, err } u.RawQuery = qs.Encode() return u.String(), nil }
// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. // Package vm provides a compiler and virtual machine environment for executing // mtail programs. package vm import ( "bytes" "fmt" "math" "regexp" "runtime/debug" "strconv" "strings" "text/tabwriter" "time" "github.com/golang/glog" "github.com/google/mtail/metrics" "github.com/google/mtail/metrics/datum" ) type opcode int const ( match opcode = iota // Match a regular expression against input, and set the match register. cmp // Compare two values on the stack and set the match register. jnm // Jump if no match. jm // Jump if match. jmp // Unconditional jump inc // Increment a variable value strptime // Parse into the timestamp register timestamp // Return value of timestamp register onto TOS. settime // Set timestamp register to value at TOS. push // Push operand onto stack capref // Push capture group reference at operand onto stack str // Push string constant at operand onto stack iset // Set a variable value iadd // Add top values on stack and push to stack isub // Subtract top value from second top value on stack, and push to stack. imul // Multiply top values on stack and push to stack idiv // Divide top value into second top on stack, and push imod // Integer divide top value into second top on stack, and push remainder ipow // Put second TOS to power of TOS, and push. and // Bitwise AND the 2 at top of stack, and push result or // Bitwise OR the 2 at top of stack, and push result xor // Bitwise XOR the 2 at top of stack, and push result not // Bitwise NOT the top of stack, and push result shl // Shift TOS left, push result shr // Shift TOS right, push result mload // Load metric at operand onto top of stack dload // Pop `operand` keys and metric off stack, and push datum at metric[key,...] onto stack. tolower // Convert the string at the top of the stack to lowercase. length // Compute the length of a string. strtol // Convert a string to a number, given a base. setmatched // Set "matched" flag otherwise // Only match if "matched" flag is false. del // Pop `operand` keys and metric off stack, and remove the datum at metric[key,...] from memory // Floating point ops fadd fsub fmul fdiv fmod fpow fset // Floating point assignment ) var opNames = map[opcode]string{ match: "match", cmp: "cmp", jnm: "jnm", jm: "jm", jmp: "jmp", inc: "inc", strptime: "strptime", timestamp: "timestamp", settime: "settime", push: "push", capref: "capref", str: "str", iset: "iset", iadd: "iadd", isub: "isub", imul: "imul", idiv: "idiv", imod: "imod", ipow: "ipow", shl: "shl", shr: "shr", and: "and", or: "or", xor: "xor", not: "not", mload: "mload", dload: "dload", tolower: "tolower", length: "length", strtol: "strtol", setmatched: "setmatched", otherwise: "otherwise", fadd: "fadd", fsub: "fsub", fmul: "fmul", fdiv: "fdiv", fmod: "fmod", fpow: "fpow", fset: "fset", } var builtin = map[string]opcode{ "timestamp": timestamp, "len": length, "settime": settime, "strptime": strptime, "strtol": strtol, "tolower": tolower, } type instr struct { op opcode opnd interface{} } // debug print for instructions func (i instr) String() string { return fmt.Sprintf("{%s %v}", opNames[i.op], i.opnd) } type thread struct { pc int // Program counter. match bool // Match register. matched bool // Flag set if any match has been found. matches map[int][]string // Match result variables. time time.Time // Time register. stack []interface{} // Data stack. } // VM describes the virtual machine for each program. It contains virtual // segments of the executable bytecode, constant data (string and regular // expressions), mutable state (metrics), and a stack for the current thread of // execution. type VM struct { name string prog []instr re []*regexp.Regexp // Regular expression constants str []string // String constants m []*metrics.Metric // Metrics accessible to this program. timeMemos map[string]time.Time // memo of time string parse results t *thread // Current thread of execution input string // Log line input to this round of execution. terminate bool // Flag to stop the VM program. syslogUseCurrentYear bool // Overwrite zero years with the current year in a strptime. } // Push a value onto the stack func (t *thread) Push(value interface{}) { t.stack = append(t.stack, value) } // Pop a value off the stack func (t *thread) Pop() (value interface{}) { last := len(t.stack) - 1 value = t.stack[last] t.stack = t.stack[:last] return } // Log a runtime error and terminate the program func (v *VM) errorf(format string, args ...interface{}) { glog.Infof(v.name+": Runtime error: "+format+"\n", args...) glog.Infof("VM stack:\n%s", debug.Stack()) glog.Infof("Dumping vm state") glog.Infof("Name: %s", v.name) glog.Infof("Input: %q", v.input) glog.Infof("Thread:") glog.Infof(" PC %v", v.t.pc-1) glog.Infof(" Match %v", v.t.match) glog.Infof(" Matched %v", v.t.matched) glog.Infof(" Matches %v", v.t.matches) glog.Infof(" Timestamp %v", v.t.time) glog.Infof(" Stack %v", v.t.stack) glog.Infof(v.DumpByteCode(v.name)) v.terminate = true } func (t *thread) PopInt() (int64, error) { val := t.Pop() switch n := val.(type) { case int64: return n, nil case int: return int64(n), nil case string: r, err := strconv.ParseInt(n, 10, 64) if err != nil { return 0, fmt.Errorf("conversion of %q to int failed: %s", val, err) } return r, nil case time.Time: return n.Unix(), nil case datum.Datum: return datum.GetInt(n), nil } return 0, fmt.Errorf("unexpected int type %T %q", val, val) } func (t *thread) PopFloat() (float64, error) { val := t.Pop() switch n := val.(type) { case float64: return n, nil case int: return float64(n), nil case string: r, err := strconv.ParseFloat(n, 64) if err != nil { return 0, fmt.Errorf("conversion of %q to float failed: %s", val, err) } return r, nil case datum.Datum: return datum.GetFloat(n), nil } return 0, fmt.Errorf("unexpected float type %T %q", val, val) } // Execute performs an instruction cycle in the VM -- acting on the current // instruction, and returns a boolean indicating if the current thread should // terminate. func (v *VM) execute(t *thread, i instr) { switch i.op { case match: // match regex and store success // Store the results in the operandth element of the stack, // where i.opnd == the matched re index index := i.opnd.(int) t.matches[index] = v.re[index].FindStringSubmatch(v.input) t.match = t.matches[index] != nil case cmp: // Compare two elements on the stack. // Set the match register based on the truthiness of the comparison. // Operand contains the expected result. b, err := t.PopInt() if err != nil { v.errorf("%s", err) } a, err := t.PopInt() if err != nil { v.errorf("%s", err) } switch i.opnd { case -1: t.match = a < b case 0: t.match = a == b case 1: t.match = a > b } case jnm: if !t.match { t.pc = i.opnd.(int) } case jm: if t.match { t.pc = i.opnd.(int) } case jmp: t.pc = i.opnd.(int) case inc: // Increment a datum var delta int64 = 1 // If opnd is non-nil, the delta is on the stack. if i.opnd != nil { var err error delta, err = t.PopInt() if err != nil { v.errorf("%s", err) } } // TODO(jaq): the stack should only have the datum, not the offset switch n := t.Pop().(type) { case datum.Datum: datum.IncIntBy(n, delta, t.time) case int: // offset into metric m := v.m[n] d, err := m.GetDatum() if err != nil { v.errorf("GetDatum failed: %s", err) } datum.IncIntBy(d, delta, t.time) default: v.errorf("Unexpected type to increment: %T %q", n, n) } case iset: // Set a datum value, err := t.PopInt() if err != nil { v.errorf("%s", err) } // TODO(jaq): the stack should only have the datum, not the offset switch n := t.Pop().(type) { case datum.Datum: datum.SetInt(n, value, t.time) case int: // offset into metric m := v.m[n] d, err := m.GetDatum() if err != nil { v.errorf("GetDatum failed: %s", err) } datum.SetInt(d, value, t.time) default: v.errorf("Unexpected type to set: %T %q", n, n) } case fset: // Set a datum value, err := t.PopFloat() if err != nil { v.errorf("%s", err) } // TODO(jaq): the stack should only have the datum, not the offset, unfortunately used by test switch n := t.Pop().(type) { case datum.Datum: datum.SetFloat(n, value, t.time) case int: // offset into metric m := v.m[n] d, err := m.GetDatum() if err != nil { v.errorf("GetDatum failed: %s", err) } datum.SetFloat(d, value, t.time) default: v.errorf("Unexpected type to set: %T %q", n, n) } case strptime: // Parse a time string into the time register layout := t.Pop().(string) var ts string switch s := t.Pop().(type) { case string: ts = s case int: /* capref */ // First find the match storage index on the stack re := t.Pop().(int) // Store the result from the re'th index at the s'th index ts = t.matches[re][s] } if tm, ok := v.timeMemos[ts]; !ok { tm, err := time.Parse(layout, ts) if err != nil { v.errorf("time.Parse(%s, %s) failed: %s", layout, ts, err) } // Hack for yearless syslog. if tm.Year() == 0 && v.syslogUseCurrentYear { // No .UTC() as we use local time to match the local log. tm = tm.AddDate(time.Now().Year(), 0, 0) } v.timeMemos[ts] = tm t.time = tm } else { t.time = tm } case timestamp: // Put the time register onto the stack t.Push(t.time.Unix()) case settime: // Pop TOS and store in time register t.time = time.Unix(t.Pop().(int64), 0).UTC() case capref: // Put a capture group reference onto the stack. // First find the match storage index on the stack, re := t.Pop().(int) // Push the result from the re'th match at operandth index t.Push(t.matches[re][i.opnd.(int)]) case str: // Put a string constant onto the stack t.Push(v.str[i.opnd.(int)]) case push: // Push a value onto the stack t.Push(i.opnd) case fadd, fsub, fmul, fdiv, fmod, fpow: b, err := t.PopFloat() if err != nil { v.errorf("%s", err) } a, err := t.PopFloat() if err != nil { v.errorf("%s", err) } switch i.op { case fadd: t.Push(a + b) case fsub: t.Push(a - b) case fmul: t.Push(a * b) case fdiv: t.Push(a / b) case fmod: t.Push(math.Mod(a, b)) case fpow: t.Push(math.Pow(a, b)) } case iadd, isub, imul, idiv, imod, ipow, shl, shr, and, or, xor: // Op two values at TOS, and push result onto stack b, err := t.PopInt() if err != nil { v.errorf("%s", err) } a, err := t.PopInt() if err != nil { v.errorf("%s", err) } switch i.op { case iadd: t.Push(a + b) case isub: t.Push(a - b) case imul: t.Push(a * b) case idiv: // Integer division t.Push(a / b) case imod: t.Push(a % b) case ipow: // TODO(jaq): replace with type coercion t.Push(int64(math.Pow(float64(a), float64(b)))) case shl: t.Push(a << uint(b)) case shr: t.Push(a >> uint(b)) case and: t.Push(a & b) case or: t.Push(a | b) case xor: t.Push(a ^ b) } case not: a, err := t.PopInt() if err != nil { v.errorf("%s", err) } t.Push(^a) case mload: // Load a metric at operand onto stack t.Push(v.m[i.opnd.(int)]) case dload: // Load a datum from metric at TOS onto stack //fmt.Printf("Stack: %v\n", t.stack) m := t.Pop().(*metrics.Metric) //fmt.Printf("Metric: %v\n", m) index := i.opnd.(int) keys := make([]string, index) //fmt.Printf("keys: %v\n", keys) for a := 0; a < index; a++ { s := t.Pop().(string) //fmt.Printf("s: %v\n", s) keys[a] = s //fmt.Printf("Keys: %v\n", keys) } //fmt.Printf("Keys: %v\n", keys) d, err := m.GetDatum(keys...) if err != nil { v.errorf("dload (GetDatum) failed: %s", err) } //fmt.Printf("Found %v\n", d) t.Push(d) case del: m := t.Pop().(*metrics.Metric) index := i.opnd.(int) keys := make([]string, index) for j := 0; j < index; j++ { s := t.Pop().(string) keys[j] = s } err := m.RemoveDatum(keys...) if err != nil { v.errorf("del (RemoveDatum) failed: %s", err) } case tolower: // Lowercase a string from TOS, and push result back. s := t.Pop().(string) t.Push(strings.ToLower(s)) case length: // Compute the length of a string from TOS, and push result back. s := t.Pop().(string) t.Push(len(s)) case strtol: base, err := t.PopInt() if err != nil { v.errorf("%s", err) } str := t.Pop().(string) i, err := strconv.ParseInt(str, int(base), 64) if err != nil { v.errorf("%s", err) } t.Push(i) case setmatched: t.matched = i.opnd.(bool) case otherwise: // Only match if the matched flag is false. t.match = !t.matched default: v.errorf("illegal instruction: %d", i.op) } } // processLine handles the incoming lines from the input channel, by running a // fetch-execute cycle on the VM bytecode with the line as input to the // program, until termination. func (v *VM) processLine(input string) { t := new(thread) t.matched = false v.t = t v.input = input t.stack = make([]interface{}, 0) t.matches = make(map[int][]string, 0) for { if t.pc >= len(v.prog) { return } i := v.prog[t.pc] t.pc++ v.execute(t, i) if v.terminate { // Terminate only stops this invocation on this line of input; reset the terminate flag. v.terminate = false return } } } // Run executes the virtual machine on each line of input received. When the // input closes, it signals to the loader that it has terminated by closing the // shutdown channel. func (v *VM) Run(_ uint32, lines <-chan string, shutdown chan<- struct{}) { glog.Infof("Starting program %s", v.name) defer close(shutdown) for line := range lines { v.processLine(line) } glog.Infof("Stopping program %s", v.name) } // New creates a new virtual machine with the given name, and compiler // artifacts for executable and data segments. func New(name string, obj *object, syslogUseCurrentYear bool) *VM { return &VM{ name: name, re: obj.re, str: obj.str, m: obj.m, prog: obj.prog, timeMemos: make(map[string]time.Time, 0), syslogUseCurrentYear: syslogUseCurrentYear, } } // DumpByteCode emits the program disassembly and program objects to string. func (v *VM) DumpByteCode(name string) string { b := new(bytes.Buffer) fmt.Fprintf(b, "Prog: %s\n", name) fmt.Fprintln(b, "Metrics") for i, m := range v.m { if m.Program == v.name { fmt.Fprintf(b, " %8d %s\n", i, m) } } fmt.Fprintln(b, "Regexps") for i, re := range v.re { fmt.Fprintf(b, " %8d /%s/\n", i, re) } fmt.Fprintln(b, "Strings") for i, str := range v.str { fmt.Fprintf(b, " %8d \"%s\"\n", i, str) } w := new(tabwriter.Writer) w.Init(b, 0, 0, 1, ' ', tabwriter.AlignRight) fmt.Fprintln(w, "disasm\tl\top\topnd\t") for n, i := range v.prog { fmt.Fprintf(w, "\t%d\t%s\t%v\t\n", n, opNames[i.op], i.opnd) } w.Flush() return b.String() } preallocate the size of the matches list to the number of regular expressions in the VM. // Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. // Package vm provides a compiler and virtual machine environment for executing // mtail programs. package vm import ( "bytes" "fmt" "math" "regexp" "runtime/debug" "strconv" "strings" "text/tabwriter" "time" "github.com/golang/glog" "github.com/google/mtail/metrics" "github.com/google/mtail/metrics/datum" ) type opcode int const ( match opcode = iota // Match a regular expression against input, and set the match register. cmp // Compare two values on the stack and set the match register. jnm // Jump if no match. jm // Jump if match. jmp // Unconditional jump inc // Increment a variable value strptime // Parse into the timestamp register timestamp // Return value of timestamp register onto TOS. settime // Set timestamp register to value at TOS. push // Push operand onto stack capref // Push capture group reference at operand onto stack str // Push string constant at operand onto stack iset // Set a variable value iadd // Add top values on stack and push to stack isub // Subtract top value from second top value on stack, and push to stack. imul // Multiply top values on stack and push to stack idiv // Divide top value into second top on stack, and push imod // Integer divide top value into second top on stack, and push remainder ipow // Put second TOS to power of TOS, and push. and // Bitwise AND the 2 at top of stack, and push result or // Bitwise OR the 2 at top of stack, and push result xor // Bitwise XOR the 2 at top of stack, and push result not // Bitwise NOT the top of stack, and push result shl // Shift TOS left, push result shr // Shift TOS right, push result mload // Load metric at operand onto top of stack dload // Pop `operand` keys and metric off stack, and push datum at metric[key,...] onto stack. tolower // Convert the string at the top of the stack to lowercase. length // Compute the length of a string. strtol // Convert a string to a number, given a base. setmatched // Set "matched" flag otherwise // Only match if "matched" flag is false. del // Pop `operand` keys and metric off stack, and remove the datum at metric[key,...] from memory // Floating point ops fadd fsub fmul fdiv fmod fpow fset // Floating point assignment ) var opNames = map[opcode]string{ match: "match", cmp: "cmp", jnm: "jnm", jm: "jm", jmp: "jmp", inc: "inc", strptime: "strptime", timestamp: "timestamp", settime: "settime", push: "push", capref: "capref", str: "str", iset: "iset", iadd: "iadd", isub: "isub", imul: "imul", idiv: "idiv", imod: "imod", ipow: "ipow", shl: "shl", shr: "shr", and: "and", or: "or", xor: "xor", not: "not", mload: "mload", dload: "dload", tolower: "tolower", length: "length", strtol: "strtol", setmatched: "setmatched", otherwise: "otherwise", fadd: "fadd", fsub: "fsub", fmul: "fmul", fdiv: "fdiv", fmod: "fmod", fpow: "fpow", fset: "fset", } var builtin = map[string]opcode{ "timestamp": timestamp, "len": length, "settime": settime, "strptime": strptime, "strtol": strtol, "tolower": tolower, } type instr struct { op opcode opnd interface{} } // debug print for instructions func (i instr) String() string { return fmt.Sprintf("{%s %v}", opNames[i.op], i.opnd) } type thread struct { pc int // Program counter. match bool // Match register. matched bool // Flag set if any match has been found. matches map[int][]string // Match result variables. time time.Time // Time register. stack []interface{} // Data stack. } // VM describes the virtual machine for each program. It contains virtual // segments of the executable bytecode, constant data (string and regular // expressions), mutable state (metrics), and a stack for the current thread of // execution. type VM struct { name string prog []instr re []*regexp.Regexp // Regular expression constants str []string // String constants m []*metrics.Metric // Metrics accessible to this program. timeMemos map[string]time.Time // memo of time string parse results t *thread // Current thread of execution input string // Log line input to this round of execution. terminate bool // Flag to stop the VM program. syslogUseCurrentYear bool // Overwrite zero years with the current year in a strptime. } // Push a value onto the stack func (t *thread) Push(value interface{}) { t.stack = append(t.stack, value) } // Pop a value off the stack func (t *thread) Pop() (value interface{}) { last := len(t.stack) - 1 value = t.stack[last] t.stack = t.stack[:last] return } // Log a runtime error and terminate the program func (v *VM) errorf(format string, args ...interface{}) { glog.Infof(v.name+": Runtime error: "+format+"\n", args...) glog.Infof("VM stack:\n%s", debug.Stack()) glog.Infof("Dumping vm state") glog.Infof("Name: %s", v.name) glog.Infof("Input: %q", v.input) glog.Infof("Thread:") glog.Infof(" PC %v", v.t.pc-1) glog.Infof(" Match %v", v.t.match) glog.Infof(" Matched %v", v.t.matched) glog.Infof(" Matches %v", v.t.matches) glog.Infof(" Timestamp %v", v.t.time) glog.Infof(" Stack %v", v.t.stack) glog.Infof(v.DumpByteCode(v.name)) v.terminate = true } func (t *thread) PopInt() (int64, error) { val := t.Pop() switch n := val.(type) { case int64: return n, nil case int: return int64(n), nil case string: r, err := strconv.ParseInt(n, 10, 64) if err != nil { return 0, fmt.Errorf("conversion of %q to int failed: %s", val, err) } return r, nil case time.Time: return n.Unix(), nil case datum.Datum: return datum.GetInt(n), nil } return 0, fmt.Errorf("unexpected int type %T %q", val, val) } func (t *thread) PopFloat() (float64, error) { val := t.Pop() switch n := val.(type) { case float64: return n, nil case int: return float64(n), nil case string: r, err := strconv.ParseFloat(n, 64) if err != nil { return 0, fmt.Errorf("conversion of %q to float failed: %s", val, err) } return r, nil case datum.Datum: return datum.GetFloat(n), nil } return 0, fmt.Errorf("unexpected float type %T %q", val, val) } // Execute performs an instruction cycle in the VM -- acting on the current // instruction, and returns a boolean indicating if the current thread should // terminate. func (v *VM) execute(t *thread, i instr) { switch i.op { case match: // match regex and store success // Store the results in the operandth element of the stack, // where i.opnd == the matched re index index := i.opnd.(int) t.matches[index] = v.re[index].FindStringSubmatch(v.input) t.match = t.matches[index] != nil case cmp: // Compare two elements on the stack. // Set the match register based on the truthiness of the comparison. // Operand contains the expected result. b, err := t.PopInt() if err != nil { v.errorf("%s", err) } a, err := t.PopInt() if err != nil { v.errorf("%s", err) } switch i.opnd { case -1: t.match = a < b case 0: t.match = a == b case 1: t.match = a > b } case jnm: if !t.match { t.pc = i.opnd.(int) } case jm: if t.match { t.pc = i.opnd.(int) } case jmp: t.pc = i.opnd.(int) case inc: // Increment a datum var delta int64 = 1 // If opnd is non-nil, the delta is on the stack. if i.opnd != nil { var err error delta, err = t.PopInt() if err != nil { v.errorf("%s", err) } } // TODO(jaq): the stack should only have the datum, not the offset switch n := t.Pop().(type) { case datum.Datum: datum.IncIntBy(n, delta, t.time) case int: // offset into metric m := v.m[n] d, err := m.GetDatum() if err != nil { v.errorf("GetDatum failed: %s", err) } datum.IncIntBy(d, delta, t.time) default: v.errorf("Unexpected type to increment: %T %q", n, n) } case iset: // Set a datum value, err := t.PopInt() if err != nil { v.errorf("%s", err) } // TODO(jaq): the stack should only have the datum, not the offset switch n := t.Pop().(type) { case datum.Datum: datum.SetInt(n, value, t.time) case int: // offset into metric m := v.m[n] d, err := m.GetDatum() if err != nil { v.errorf("GetDatum failed: %s", err) } datum.SetInt(d, value, t.time) default: v.errorf("Unexpected type to set: %T %q", n, n) } case fset: // Set a datum value, err := t.PopFloat() if err != nil { v.errorf("%s", err) } // TODO(jaq): the stack should only have the datum, not the offset, unfortunately used by test switch n := t.Pop().(type) { case datum.Datum: datum.SetFloat(n, value, t.time) case int: // offset into metric m := v.m[n] d, err := m.GetDatum() if err != nil { v.errorf("GetDatum failed: %s", err) } datum.SetFloat(d, value, t.time) default: v.errorf("Unexpected type to set: %T %q", n, n) } case strptime: // Parse a time string into the time register layout := t.Pop().(string) var ts string switch s := t.Pop().(type) { case string: ts = s case int: /* capref */ // First find the match storage index on the stack re := t.Pop().(int) // Store the result from the re'th index at the s'th index ts = t.matches[re][s] } if tm, ok := v.timeMemos[ts]; !ok { tm, err := time.Parse(layout, ts) if err != nil { v.errorf("time.Parse(%s, %s) failed: %s", layout, ts, err) } // Hack for yearless syslog. if tm.Year() == 0 && v.syslogUseCurrentYear { // No .UTC() as we use local time to match the local log. tm = tm.AddDate(time.Now().Year(), 0, 0) } v.timeMemos[ts] = tm t.time = tm } else { t.time = tm } case timestamp: // Put the time register onto the stack t.Push(t.time.Unix()) case settime: // Pop TOS and store in time register t.time = time.Unix(t.Pop().(int64), 0).UTC() case capref: // Put a capture group reference onto the stack. // First find the match storage index on the stack, re := t.Pop().(int) // Push the result from the re'th match at operandth index t.Push(t.matches[re][i.opnd.(int)]) case str: // Put a string constant onto the stack t.Push(v.str[i.opnd.(int)]) case push: // Push a value onto the stack t.Push(i.opnd) case fadd, fsub, fmul, fdiv, fmod, fpow: b, err := t.PopFloat() if err != nil { v.errorf("%s", err) } a, err := t.PopFloat() if err != nil { v.errorf("%s", err) } switch i.op { case fadd: t.Push(a + b) case fsub: t.Push(a - b) case fmul: t.Push(a * b) case fdiv: t.Push(a / b) case fmod: t.Push(math.Mod(a, b)) case fpow: t.Push(math.Pow(a, b)) } case iadd, isub, imul, idiv, imod, ipow, shl, shr, and, or, xor: // Op two values at TOS, and push result onto stack b, err := t.PopInt() if err != nil { v.errorf("%s", err) } a, err := t.PopInt() if err != nil { v.errorf("%s", err) } switch i.op { case iadd: t.Push(a + b) case isub: t.Push(a - b) case imul: t.Push(a * b) case idiv: // Integer division t.Push(a / b) case imod: t.Push(a % b) case ipow: // TODO(jaq): replace with type coercion t.Push(int64(math.Pow(float64(a), float64(b)))) case shl: t.Push(a << uint(b)) case shr: t.Push(a >> uint(b)) case and: t.Push(a & b) case or: t.Push(a | b) case xor: t.Push(a ^ b) } case not: a, err := t.PopInt() if err != nil { v.errorf("%s", err) } t.Push(^a) case mload: // Load a metric at operand onto stack t.Push(v.m[i.opnd.(int)]) case dload: // Load a datum from metric at TOS onto stack //fmt.Printf("Stack: %v\n", t.stack) m := t.Pop().(*metrics.Metric) //fmt.Printf("Metric: %v\n", m) index := i.opnd.(int) keys := make([]string, index) //fmt.Printf("keys: %v\n", keys) for a := 0; a < index; a++ { s := t.Pop().(string) //fmt.Printf("s: %v\n", s) keys[a] = s //fmt.Printf("Keys: %v\n", keys) } //fmt.Printf("Keys: %v\n", keys) d, err := m.GetDatum(keys...) if err != nil { v.errorf("dload (GetDatum) failed: %s", err) } //fmt.Printf("Found %v\n", d) t.Push(d) case del: m := t.Pop().(*metrics.Metric) index := i.opnd.(int) keys := make([]string, index) for j := 0; j < index; j++ { s := t.Pop().(string) keys[j] = s } err := m.RemoveDatum(keys...) if err != nil { v.errorf("del (RemoveDatum) failed: %s", err) } case tolower: // Lowercase a string from TOS, and push result back. s := t.Pop().(string) t.Push(strings.ToLower(s)) case length: // Compute the length of a string from TOS, and push result back. s := t.Pop().(string) t.Push(len(s)) case strtol: base, err := t.PopInt() if err != nil { v.errorf("%s", err) } str := t.Pop().(string) i, err := strconv.ParseInt(str, int(base), 64) if err != nil { v.errorf("%s", err) } t.Push(i) case setmatched: t.matched = i.opnd.(bool) case otherwise: // Only match if the matched flag is false. t.match = !t.matched default: v.errorf("illegal instruction: %d", i.op) } } // processLine handles the incoming lines from the input channel, by running a // fetch-execute cycle on the VM bytecode with the line as input to the // program, until termination. func (v *VM) processLine(input string) { t := new(thread) t.matched = false v.t = t v.input = input t.stack = make([]interface{}, 0) t.matches = make(map[int][]string, len(v.re)) for { if t.pc >= len(v.prog) { return } i := v.prog[t.pc] t.pc++ v.execute(t, i) if v.terminate { // Terminate only stops this invocation on this line of input; reset the terminate flag. v.terminate = false return } } } // Run executes the virtual machine on each line of input received. When the // input closes, it signals to the loader that it has terminated by closing the // shutdown channel. func (v *VM) Run(_ uint32, lines <-chan string, shutdown chan<- struct{}) { glog.Infof("Starting program %s", v.name) defer close(shutdown) for line := range lines { v.processLine(line) } glog.Infof("Stopping program %s", v.name) } // New creates a new virtual machine with the given name, and compiler // artifacts for executable and data segments. func New(name string, obj *object, syslogUseCurrentYear bool) *VM { return &VM{ name: name, re: obj.re, str: obj.str, m: obj.m, prog: obj.prog, timeMemos: make(map[string]time.Time, 0), syslogUseCurrentYear: syslogUseCurrentYear, } } // DumpByteCode emits the program disassembly and program objects to string. func (v *VM) DumpByteCode(name string) string { b := new(bytes.Buffer) fmt.Fprintf(b, "Prog: %s\n", name) fmt.Fprintln(b, "Metrics") for i, m := range v.m { if m.Program == v.name { fmt.Fprintf(b, " %8d %s\n", i, m) } } fmt.Fprintln(b, "Regexps") for i, re := range v.re { fmt.Fprintf(b, " %8d /%s/\n", i, re) } fmt.Fprintln(b, "Strings") for i, str := range v.str { fmt.Fprintf(b, " %8d \"%s\"\n", i, str) } w := new(tabwriter.Writer) w.Init(b, 0, 0, 1, ' ', tabwriter.AlignRight) fmt.Fprintln(w, "disasm\tl\top\topnd\t") for n, i := range v.prog { fmt.Fprintf(w, "\t%d\t%s\t%v\t\n", n, opNames[i.op], i.opnd) } w.Flush() return b.String() }
// Copyright 2017 The go-vm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package vmware import ( "bytes" "fmt" "os/exec" "syscall" ) var vmrunPath = vmwareCmd("vmrun") // VMRun run the vmrun command with the app name and args. // Return the stdout result and cmd error. // // Usage: vmrun [AUTHENTICATION-FLAGS] COMMAND [PARAMETERS] // // AUTHENTICATION-FLAGS // -------------------- // These must appear before the command and any command parameters. // // -h <hostName> (not needed for Fusion) // -P <hostPort> (not needed for Fusion) // -T <hostType> (ws|fusion) // -u <userName in host OS> (not needed for Fusion) // -p <password in host OS> (not needed for Fusion) // -vp <password for encrypted virtual machine> // -gu <userName in guest OS> // -gp <password in guest OS> func VMRun(app string, arg ...string) (string, error) { // vmrun with nogui on VMware Fusion through at least 8.0.1 doesn't work right // if the umask is set to not allow world-readable permissions _ = syscall.Umask(022) cmd := exec.Command(vmrunPath, "-T", app) cmd.Args = append(cmd.Args, arg...) var stdout bytes.Buffer cmd.Stdout = &stdout if runErr := cmd.Run(); runErr != nil { if err := runErr.(*exec.ExitError); err != nil { return "", fmt.Errorf(stdout.String()) } return "", runErr } return stdout.String(), nil } vmrun: remove unnecessary comment and rename error variables Signed-off-by: Koichi Shiraishi <2e5bdfebde234ed3509bcfc18121c70b6631e207@gmail.com> // Copyright 2017 The go-vm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package vmware import ( "bytes" "fmt" "os/exec" "syscall" ) var vmrunPath = vmwareCmd("vmrun") // VMRun run the vmrun command with the app name and args. // Return the stdout result and cmd error. func VMRun(app string, arg ...string) (string, error) { // vmrun with nogui on VMware Fusion through at least 8.0.1 doesn't work right // if the umask is set to not allow world-readable permissions _ = syscall.Umask(022) cmd := exec.Command(vmrunPath, "-T", app) cmd.Args = append(cmd.Args, arg...) var stdout bytes.Buffer cmd.Stdout = &stdout err := cmd.Run() if err != nil { if runErr := err.(*exec.ExitError); runErr != nil { return "", fmt.Errorf(stdout.String()) } } return stdout.String(), err }
package main import ( "fmt" ) func main() { code := "1 2 3 + eq? 'Hello World' \"Hello Josh\" [ dup inc ] 5 times" fmt.Println(Parse(code)) BareEval("1 2 3 + *") } Added simple REPL package main import ( "fmt" "os" "bufio" ) func toString(bytes []uint8) string { str := "" for _, c := range bytes { str += string(byte(c)) } return str } func promptLine(prompt string) string { fmt.Print(prompt) r := bufio.NewReader(os.Stdin) l, _, _ := r.ReadLine() return toString(l) } func main() { fmt.Println("Vodka REPL, CTRL+C or type 'quit' to quit") stk := NewStack() tbl := BootedTable() for { line := promptLine(">> ") if line == "quit" { break } stk, tbl = Eval(line, stk, tbl) fmt.Printf("=> %s\n", stk.String()) } }
package wipro import ( "errors" "io" ) var ( ErrPrefix = "proto: " ErrUnexpectedEOF = errors.New(ErrPrefix + "unexpected EOF") ErrConn = errors.New(ErrPrefix + "network connection error") ) type M interface { Marshal(*Writer) Unmarshal(*Reader) } func Send(m M, conn io.Writer) error { var w Writer m.Marshal(&w) if _, err := conn.Write(w.B); err != nil { return ErrConn } return nil } func Receive(conn io.Reader, m M) error { r := Reader{B: make([]byte, 4)} if _, err := conn.Read(r.B); err != nil { return ErrConn } size := int(r.ReadInt32()) r.Grow(size) if _, err := io.ReadAtLeast(conn, r.B[4:], size); err != nil { return ErrConn } r.Reset() m.Unmarshal(&r) return r.Err } type Writer struct { B []byte } type Reader struct { B []byte Offset int Err error } func (w *Writer) WriteInt8(i int8) { w.B = append(w.B, byte(i)) } func (r *Reader) ReadInt8() int8 { if r.Err != nil { return 0 } i := r.Offset if i+1 > len(r.B) { r.Err = ErrUnexpectedEOF return 0 } r.Offset++ return int8(r.B[i]) } func (w *Writer) WriteInt16(i int16) { w.B = append(w.B, byte(i>>8), byte(i)) } func (r *Reader) ReadInt16() int16 { if r.Err != nil { return 0 } i := r.Offset if i+2 > len(r.B) { r.Err = ErrUnexpectedEOF return 0 } r.Offset += 2 return int16(r.B[i])<<8 | int16(r.B[i+1]) } func (w *Writer) WriteInt32(i int32) { w.B = append(w.B, byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) } func (w *Writer) WriteUint32(i uint32) { w.B = append(w.B, byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) } func (r *Reader) ReadInt32() int32 { if r.Err != nil { return 0 } i := r.Offset if i+4 > len(r.B) { r.Err = ErrUnexpectedEOF return 0 } r.Offset += 4 return int32(r.B[i])<<24 | int32(r.B[i+1])<<16 | int32(r.B[i+2])<<8 | int32(r.B[i+3]) } func (r *Reader) ReadUint32() uint32 { if r.Err != nil { return 0 } i := r.Offset if i+4 > len(r.B) { r.Err = ErrUnexpectedEOF return 0 } r.Offset += 4 return uint32(r.B[i])<<24 | uint32(r.B[i+1])<<16 | uint32(r.B[i+2])<<8 | uint32(r.B[i+3]) } func (w *Writer) WriteInt64(i int64) { w.B = append(w.B, byte(i>>56), byte(i>>48), byte(i>>40), byte(i>>32), byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) } func (r *Reader) ReadInt64() int64 { if r.Err != nil { return 0 } i := r.Offset if i+8 > len(r.B) { r.Err = ErrUnexpectedEOF return 0 } r.Offset += 8 return int64(r.B[i])<<56 | int64(r.B[i+1])<<48 | int64(r.B[i+2])<<40 | int64(r.B[i+3])<<32 | int64(r.B[i+4])<<24 | int64(r.B[i+5])<<16 | int64(r.B[i+6])<<8 | int64(r.B[i+7]) } func (w *Writer) WriteString(s string) { w.WriteInt16(int16(len(s))) w.B = append(w.B, s...) } func (r *Reader) ReadString() string { l := int(r.ReadInt16()) if r.Err != nil { return "" } if l < 0 { r.Err = ErrUnexpectedEOF return "" } i := r.Offset if i+l > len(r.B) { r.Err = ErrUnexpectedEOF return "" } r.Offset += l return string(r.B[i : i+l]) } func (w *Writer) WriteBytes(bs []byte) { w.WriteInt32(int32(len(bs))) w.B = append(w.B, bs...) } func (r *Reader) ReadBytes() []byte { l := int(r.ReadInt32()) if r.Err != nil { return nil } if l < 0 { r.Err = ErrUnexpectedEOF return nil } i := r.Offset if i+l > len(r.B) { r.Err = ErrUnexpectedEOF return nil } r.Offset += l return r.B[i : i+l] } func (w *Writer) SetInt32(offset int, i int32) { w.B[offset] = byte(i >> 24) w.B[offset+1] = byte(i >> 16) w.B[offset+2] = byte(i >> 8) w.B[offset+3] = byte(i) } func (w *Writer) SetUint32(offset int, i uint32) { w.B[offset] = byte(i >> 24) w.B[offset+1] = byte(i >> 16) w.B[offset+2] = byte(i >> 8) w.B[offset+3] = byte(i) } func (r *Reader) Grow(n int) { b := make([]byte, len(r.B)+n) copy(b, r.B) r.B = b } func (r *Reader) Reset() { r.Offset = 0 } negative size is allowed in compressed message set package wipro import ( "errors" "io" ) var ( ErrPrefix = "proto: " ErrUnexpectedEOF = errors.New(ErrPrefix + "unexpected EOF") ErrConn = errors.New(ErrPrefix + "network connection error") ) type M interface { Marshal(*Writer) Unmarshal(*Reader) } func Send(m M, conn io.Writer) error { var w Writer m.Marshal(&w) if _, err := conn.Write(w.B); err != nil { return ErrConn } return nil } func Receive(conn io.Reader, m M) error { r := Reader{B: make([]byte, 4)} if _, err := conn.Read(r.B); err != nil { return ErrConn } size := int(r.ReadInt32()) r.Grow(size) if _, err := io.ReadAtLeast(conn, r.B[4:], size); err != nil { return ErrConn } r.Reset() m.Unmarshal(&r) return r.Err } type Writer struct { B []byte } type Reader struct { B []byte Offset int Err error } func (w *Writer) WriteInt8(i int8) { w.B = append(w.B, byte(i)) } func (r *Reader) ReadInt8() int8 { if r.Err != nil { return 0 } i := r.Offset if i+1 > len(r.B) { r.Err = ErrUnexpectedEOF return 0 } r.Offset++ return int8(r.B[i]) } func (w *Writer) WriteInt16(i int16) { w.B = append(w.B, byte(i>>8), byte(i)) } func (r *Reader) ReadInt16() int16 { if r.Err != nil { return 0 } i := r.Offset if i+2 > len(r.B) { r.Err = ErrUnexpectedEOF return 0 } r.Offset += 2 return int16(r.B[i])<<8 | int16(r.B[i+1]) } func (w *Writer) WriteInt32(i int32) { w.B = append(w.B, byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) } func (w *Writer) WriteUint32(i uint32) { w.B = append(w.B, byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) } func (r *Reader) ReadInt32() int32 { if r.Err != nil { return 0 } i := r.Offset if i+4 > len(r.B) { r.Err = ErrUnexpectedEOF return 0 } r.Offset += 4 return int32(r.B[i])<<24 | int32(r.B[i+1])<<16 | int32(r.B[i+2])<<8 | int32(r.B[i+3]) } func (r *Reader) ReadUint32() uint32 { if r.Err != nil { return 0 } i := r.Offset if i+4 > len(r.B) { r.Err = ErrUnexpectedEOF return 0 } r.Offset += 4 return uint32(r.B[i])<<24 | uint32(r.B[i+1])<<16 | uint32(r.B[i+2])<<8 | uint32(r.B[i+3]) } func (w *Writer) WriteInt64(i int64) { w.B = append(w.B, byte(i>>56), byte(i>>48), byte(i>>40), byte(i>>32), byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) } func (r *Reader) ReadInt64() int64 { if r.Err != nil { return 0 } i := r.Offset if i+8 > len(r.B) { r.Err = ErrUnexpectedEOF return 0 } r.Offset += 8 return int64(r.B[i])<<56 | int64(r.B[i+1])<<48 | int64(r.B[i+2])<<40 | int64(r.B[i+3])<<32 | int64(r.B[i+4])<<24 | int64(r.B[i+5])<<16 | int64(r.B[i+6])<<8 | int64(r.B[i+7]) } func (w *Writer) WriteString(s string) { w.WriteInt16(int16(len(s))) w.B = append(w.B, s...) } func (r *Reader) ReadString() string { if r.Err != nil { return "" } l := int(r.ReadInt16()) if l <= 0 { return "" } i := r.Offset if i+l > len(r.B) { r.Err = ErrUnexpectedEOF return "" } r.Offset += l return string(r.B[i : i+l]) } func (w *Writer) WriteBytes(bs []byte) { w.WriteInt32(int32(len(bs))) w.B = append(w.B, bs...) } func (r *Reader) ReadBytes() []byte { if r.Err != nil { return nil } l := int(r.ReadInt32()) if l <= 0 { return nil } i := r.Offset if i+l > len(r.B) { r.Err = ErrUnexpectedEOF return nil } r.Offset += l return r.B[i : i+l] } func (w *Writer) SetInt32(offset int, i int32) { w.B[offset] = byte(i >> 24) w.B[offset+1] = byte(i >> 16) w.B[offset+2] = byte(i >> 8) w.B[offset+3] = byte(i) } func (w *Writer) SetUint32(offset int, i uint32) { w.B[offset] = byte(i >> 24) w.B[offset+1] = byte(i >> 16) w.B[offset+2] = byte(i >> 8) w.B[offset+3] = byte(i) } func (r *Reader) Grow(n int) { b := make([]byte, len(r.B)+n) copy(b, r.B) r.B = b } func (r *Reader) Reset() { r.Offset = 0 }
package japanese import "fmt" /* === WORDS === */ type Word struct { kanji string kana string english string } func (w *Word) Print() { fmt.Println(w.kana, w.kanji) } func (w *Word) GetWord() Word { return *w } func (w *Word) GetRoot() string { return w.kana } func (w *Word) GetLastKana() string { // get the last kana character in a word kanaRune := []rune(w.kana) return string(kanaRune[len(kanaRune)-1:]) } func (w *Word) GetAllButLast() (kanji, kana string) { // get all but the last character of a word kanjiRune := []rune(w.kanji) kanaRune := []rune(w.kana) return string(kanjiRune[:len(kanjiRune)-1]), string(kanaRune[:len(kanaRune)-1]) } /* === VERBS === */ type Verb struct { Word } type RuVerb struct { Verb } func (v *RuVerb) Negative() Word { // one exception for case "ある": if v.kanji == "ある" { return Word{"ない", "ない", "not " + v.english} } // drop the る and attach ない restOfKanji, restOfKana := v.GetAllButLast() return Word{restOfKanji + "ない", restOfKana + "ない", "not " + v.english} } func (v *RuVerb) Past() Word { // drop the る and attach た restOfKanji, restOfKana := v.GetAllButLast() return Word{restOfKanji + "た", restOfKana + "た", "did " + v.english} } type UVerb struct { Verb } func (v *UVerb) Negative() Word { lastCharacter := v.GetLastKana() restOfKanji, restOfKana := v.GetAllButLast() // if verb ends in う, replace う with わない if lastCharacter == "う" { extra := "わない" return Word{restOfKanji + extra, restOfKana + extra, "not " + v.english} // otherwise replace with the -a equivalent } else { original := []string{"つ", "く", "ゅ", "す", "ぬ", "ふ", "む", "ゆ", "ぐ", "ず", "づ", "ぶ", "ぷ", "る"} replace := []string{"た", "か", "ゃ", "さ", "な", "は", "ま", "や", "が", "ざ", "ざ", "ば", "ぱ", "ら"} for i, o := range original { if o == lastCharacter { extra := replace[i] + "ない" return Word{restOfKanji + extra, restOfKana + extra, "not " + v.english} } } } // return original word if all else fails return v.GetWord() } func (v *UVerb) Past() Word { /* Get the past-tense form of an U-verb */ lastCharacter := v.GetLastKana() restOfKanji, restOfKana := v.GetAllButLast() // 行く is only an exception for this rule if v.kanji == "行く" { return Word{"行った", "いった", "did" + v.english} } switch lastCharacter { case "す": return Word{restOfKanji + "した", restOfKana + "した", "did " + v.english} case "く", "ぐ": return Word{restOfKanji + "いた", restOfKana + "いた", "did " + v.english} case "む", "ぶ", "ぬ": return Word{restOfKanji + "んだ", restOfKana + "んだ", "did " + v.english} case "る", "う", "つ": return Word{restOfKanji + "った", restOfKana + "った", "did " + v.english} } // otherwise we just return the same word, because we don't know what to do: return v.GetWord() } type ExceptionVerb struct { Verb } func (v *ExceptionVerb) Negative() Word { switch v.kanji { case "する": return Word{"しない", "しない", "not " + v.english} case "くる": return Word{"こない", "こない", "not " + v.english} } return v.GetWord() } func (v *ExceptionVerb) Past() Word { if v.kanji == "する" { return Word{"した", "した", "did " + v.english} } if v.kanji == "くる" { return Word{"きた", "きた", "did " + v.english} } return v.GetWord() } /* === ADJECTIVES === */ type Adjective struct { Word } type NaAdjective struct { Adjective } Fix aru negative exception case package japanese import "fmt" /* === WORDS === */ type Word struct { kanji string kana string english string } func (w *Word) Print() { fmt.Println(w.kana, w.kanji) } func (w *Word) GetWord() Word { return *w } func (w *Word) GetRoot() string { return w.kana } func (w *Word) GetLastKana() string { // get the last kana character in a word kanaRune := []rune(w.kana) return string(kanaRune[len(kanaRune)-1:]) } func (w *Word) GetAllButLast() (kanji, kana string) { // get all but the last character of a word kanjiRune := []rune(w.kanji) kanaRune := []rune(w.kana) return string(kanjiRune[:len(kanjiRune)-1]), string(kanaRune[:len(kanaRune)-1]) } /* === VERBS === */ type Verb struct { Word } type RuVerb struct { Verb } func (v *RuVerb) Negative() Word { // drop the る and attach ない restOfKanji, restOfKana := v.GetAllButLast() return Word{restOfKanji + "ない", restOfKana + "ない", "not " + v.english} } func (v *RuVerb) Past() Word { // drop the る and attach た restOfKanji, restOfKana := v.GetAllButLast() return Word{restOfKanji + "た", restOfKana + "た", "did " + v.english} } type UVerb struct { Verb } func (v *UVerb) Negative() Word { lastCharacter := v.GetLastKana() restOfKanji, restOfKana := v.GetAllButLast() // one exception for case "ある": if v.kanji == "ある" { return Word{"ない", "ない", "not " + v.english} } // if verb ends in う, replace う with わない if lastCharacter == "う" { extra := "わない" return Word{restOfKanji + extra, restOfKana + extra, "not " + v.english} // otherwise replace with the -a equivalent } else { original := []string{"つ", "く", "ゅ", "す", "ぬ", "ふ", "む", "ゆ", "ぐ", "ず", "づ", "ぶ", "ぷ", "る"} replace := []string{"た", "か", "ゃ", "さ", "な", "は", "ま", "や", "が", "ざ", "ざ", "ば", "ぱ", "ら"} for i, o := range original { if o == lastCharacter { extra := replace[i] + "ない" return Word{restOfKanji + extra, restOfKana + extra, "not " + v.english} } } } // return original word if all else fails return v.GetWord() } func (v *UVerb) Past() Word { /* Get the past-tense form of an U-verb */ lastCharacter := v.GetLastKana() restOfKanji, restOfKana := v.GetAllButLast() // 行く is only an exception for this rule if v.kanji == "行く" { return Word{"行った", "いった", "did" + v.english} } switch lastCharacter { case "す": return Word{restOfKanji + "した", restOfKana + "した", "did " + v.english} case "く", "ぐ": return Word{restOfKanji + "いた", restOfKana + "いた", "did " + v.english} case "む", "ぶ", "ぬ": return Word{restOfKanji + "んだ", restOfKana + "んだ", "did " + v.english} case "る", "う", "つ": return Word{restOfKanji + "った", restOfKana + "った", "did " + v.english} } // otherwise we just return the same word, because we don't know what to do: return v.GetWord() } type ExceptionVerb struct { Verb } func (v *ExceptionVerb) Negative() Word { switch v.kanji { case "する": return Word{"しない", "しない", "not " + v.english} case "くる": return Word{"こない", "こない", "not " + v.english} } return v.GetWord() } func (v *ExceptionVerb) Past() Word { if v.kanji == "する" { return Word{"した", "した", "did " + v.english} } if v.kanji == "くる" { return Word{"きた", "きた", "did " + v.english} } return v.GetWord() } /* === ADJECTIVES === */ type Adjective struct { Word } type NaAdjective struct { Adjective }
// Discordgo - Discord bindings for Go // Available at https://github.com/bwmarrin/discordgo // Copyright 2015-2016 Bruce Marriner <bruce@sqls.net>. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file contains low level functions for interacting with the Discord // data websocket interface. package discordgo import ( "bytes" "compress/zlib" "encoding/json" "errors" "fmt" "io" "log" "net/http" "reflect" "runtime" "time" "github.com/gorilla/websocket" ) var GATEWAY_VERSION int = 4 type handshakeProperties struct { OS string `json:"$os"` Browser string `json:"$browser"` Device string `json:"$device"` Referer string `json:"$referer"` ReferringDomain string `json:"$referring_domain"` } type handshakeData struct { Token string `json:"token"` Properties handshakeProperties `json:"properties"` LargeThreshold int `json:"large_threshold"` Compress bool `json:"compress"` } type handshakeOp struct { Op int `json:"op"` Data handshakeData `json:"d"` } // Open opens a websocket connection to Discord. func (s *Session) Open() (err error) { s.Lock() defer func() { if err != nil { s.Unlock() } }() s.VoiceConnections = make(map[string]*VoiceConnection) if s.wsConn != nil { err = errors.New("Web socket already opened.") return } // Get the gateway to use for the Websocket connection g, err := s.Gateway() if err != nil { return } // Add the version and encoding to the URL g = g + fmt.Sprintf("?v=%v&encoding=json", GATEWAY_VERSION) header := http.Header{} header.Add("accept-encoding", "zlib") // TODO: See if there's a use for the http response. // conn, response, err := websocket.DefaultDialer.Dial(session.Gateway, nil) s.wsConn, _, err = websocket.DefaultDialer.Dial(g, header) if err != nil { return } err = s.wsConn.WriteJSON(handshakeOp{2, handshakeData{s.Token, handshakeProperties{runtime.GOOS, "Discordgo v" + VERSION, "", "", ""}, 250, s.Compress}}) if err != nil { return } // Create listening outside of listen, as it needs to happen inside the mutex // lock. s.listening = make(chan interface{}) go s.listen(s.wsConn, s.listening) s.Unlock() s.initialize() s.handle(&Connect{}) return } // Close closes a websocket and stops all listening/heartbeat goroutines. // TODO: Add support for Voice WS/UDP connections func (s *Session) Close() (err error) { s.Lock() s.DataReady = false if s.listening != nil { close(s.listening) s.listening = nil } if s.wsConn != nil { err = s.wsConn.Close() s.wsConn = nil } s.Unlock() s.handle(&Disconnect{}) return } // listen polls the websocket connection for events, it will stop when // the listening channel is closed, or an error occurs. func (s *Session) listen(wsConn *websocket.Conn, listening <-chan interface{}) { for { messageType, message, err := wsConn.ReadMessage() if err != nil { // Detect if we have been closed manually. If a Close() has already // happened, the websocket we are listening on will be different to the // current session. s.RLock() sameConnection := s.wsConn == wsConn s.RUnlock() if sameConnection { // There has been an error reading, Close() the websocket so that // OnDisconnect is fired. err := s.Close() if err != nil { log.Println("error closing session connection: ", err) } // Attempt to reconnect, with expenonential backoff up to 10 minutes. if s.ShouldReconnectOnError { wait := time.Duration(1) for { if s.Open() == nil { return } <-time.After(wait * time.Second) wait *= 2 if wait > 600 { wait = 600 } } } } return } select { case <-listening: return default: go s.event(messageType, message) } } } type heartbeatOp struct { Op int `json:"op"` Data int `json:"d"` } // heartbeat sends regular heartbeats to Discord so it knows the client // is still connected. If you do not send these heartbeats Discord will // disconnect the websocket connection after a few seconds. func (s *Session) heartbeat(wsConn *websocket.Conn, listening <-chan interface{}, i time.Duration) { if listening == nil || wsConn == nil { return } s.Lock() s.DataReady = true s.Unlock() var err error ticker := time.NewTicker(i * time.Millisecond) for { err = wsConn.WriteJSON(heartbeatOp{1, int(time.Now().Unix())}) if err != nil { log.Println("Error sending heartbeat:", err) return } select { case <-ticker.C: // continue loop and send heartbeat case <-listening: return } } } type updateStatusGame struct { Name string `json:"name"` } type updateStatusData struct { IdleSince *int `json:"idle_since"` Game *updateStatusGame `json:"game"` } type updateStatusOp struct { Op int `json:"op"` Data updateStatusData `json:"d"` } // UpdateStatus is used to update the authenticated user's status. // If idle>0 then set status to idle. If game>0 then set game. // if otherwise, set status to active, and no game. func (s *Session) UpdateStatus(idle int, game string) (err error) { s.RLock() defer s.RUnlock() if s.wsConn == nil { return errors.New("No websocket connection exists.") } var usd updateStatusData if idle > 0 { usd.IdleSince = &idle } if game != "" { usd.Game = &updateStatusGame{game} } err = s.wsConn.WriteJSON(updateStatusOp{3, usd}) return } // Front line handler for all Websocket Events. Determines the // event type and passes the message along to the next handler. // event is the front line handler for all events. This needs to be // broken up into smaller functions to be more idiomatic Go. // Events will be handled by any implemented handler in Session. // All unhandled events will then be handled by OnEvent. func (s *Session) event(messageType int, message []byte) { var err error var reader io.Reader reader = bytes.NewBuffer(message) if messageType == 2 { z, err1 := zlib.NewReader(reader) if err1 != nil { log.Println(fmt.Sprintf("Error uncompressing message type %d: %s", messageType, err1)) return } defer func() { err := z.Close() if err != nil { log.Println("error closing zlib:", err) } }() reader = z } var e *Event decoder := json.NewDecoder(reader) if err = decoder.Decode(&e); err != nil { log.Println(fmt.Sprintf("Error decoding message type %d: %s", messageType, err)) return } if s.Debug { printEvent(e) } i := eventToInterface[e.Type] if i != nil { // Create a new instance of the event type. i = reflect.New(reflect.TypeOf(i)).Interface() // Attempt to unmarshal our event. // If there is an error we should handle the event itself. if err = json.Unmarshal(e.RawData, i); err != nil { log.Printf("error unmarshalling %s event, %s\n", e.Type, err) // Ready events must fire, even if they are empty. if e.Type != "READY" { i = nil } } } else { log.Println("Unknown event.") i = nil } if i != nil { s.handle(i) } e.Struct = i s.handle(e) return } // ------------------------------------------------------------------------------------------------ // Code related to voice connections that initiate over the data websocket // ------------------------------------------------------------------------------------------------ // A VoiceServerUpdate stores the data received during the Voice Server Update // data websocket event. This data is used during the initial Voice Channel // join handshaking. type VoiceServerUpdate struct { Token string `json:"token"` GuildID string `json:"guild_id"` Endpoint string `json:"endpoint"` } type voiceChannelJoinData struct { GuildID *string `json:"guild_id"` ChannelID *string `json:"channel_id"` SelfMute bool `json:"self_mute"` SelfDeaf bool `json:"self_deaf"` } type voiceChannelJoinOp struct { Op int `json:"op"` Data voiceChannelJoinData `json:"d"` } // ChannelVoiceJoin joins the session user to a voice channel. // // gID : Guild ID of the channel to join. // cID : Channel ID of the channel to join. // mute : If true, you will be set to muted upon joining. // deaf : If true, you will be set to deafened upon joining. func (s *Session) ChannelVoiceJoin(gID, cID string, mute, deaf bool) (voice *VoiceConnection, err error) { // If a voice connection already exists for this guild then // return that connection. If the channel differs, also change channels. var ok bool if voice, ok = s.VoiceConnections[gID]; ok && voice.GuildID != "" { //TODO: consider a better variable than GuildID in the above check // to verify if this connection is valid or not. if voice.ChannelID != cID { err = voice.ChangeChannel(cID, mute, deaf) } return } // Create a new voice session // TODO review what all these things are for.... voice = &VoiceConnection{ GuildID: gID, ChannelID: cID, deaf: deaf, mute: mute, session: s, } // Store voice in VoiceConnections map for this GuildID s.VoiceConnections[gID] = voice // Send the request to Discord that we want to join the voice channel data := voiceChannelJoinOp{4, voiceChannelJoinData{&gID, &cID, mute, deaf}} err = s.wsConn.WriteJSON(data) if err != nil { s.log(LogInformational, "Deleting VoiceConnection %s", gID) delete(s.VoiceConnections, gID) return } // doesn't exactly work perfect yet.. TODO err = voice.waitUntilConnected() if err != nil { voice.Close() s.log(LogInformational, "Deleting VoiceConnection %s", gID) delete(s.VoiceConnections, gID) return } return } // onVoiceStateUpdate handles Voice State Update events on the data websocket. func (s *Session) onVoiceStateUpdate(se *Session, st *VoiceStateUpdate) { // If we don't have a connection for the channel, don't bother if st.ChannelID == "" { return } // Check if we have a voice connection to update voice, exists := s.VoiceConnections[st.GuildID] if !exists { return } // Need to have this happen at login and store it in the Session // TODO : This should be done upon connecting to Discord, or // be moved to a small helper function self, err := s.User("@me") // TODO: move to Login/New if err != nil { log.Println(err) return } // We only care about events that are about us if st.UserID != self.ID { return } // Store the SessionID for later use. voice.UserID = self.ID // TODO: Review voice.sessionID = st.SessionID } // onVoiceServerUpdate handles the Voice Server Update data websocket event. // // This is also fired if the Guild's voice region changes while connected // to a voice channel. In that case, need to re-establish connection to // the new region endpoint. func (s *Session) onVoiceServerUpdate(se *Session, st *VoiceServerUpdate) { voice, exists := s.VoiceConnections[st.GuildID] // If no VoiceConnection exists, just skip this if !exists { return } // If currently connected to voice ws/udp, then disconnect. // Has no effect if not connected. voice.Close() // Store values for later use voice.token = st.Token voice.endpoint = st.Endpoint voice.GuildID = st.GuildID // Open a conenction to the voice server err := voice.open() if err != nil { s.log(LogError, "onVoiceServerUpdate voice.open, ", err) } } Clean up Gateway API Event handling. // Discordgo - Discord bindings for Go // Available at https://github.com/bwmarrin/discordgo // Copyright 2015-2016 Bruce Marriner <bruce@sqls.net>. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file contains low level functions for interacting with the Discord // data websocket interface. package discordgo import ( "bytes" "compress/zlib" "encoding/json" "errors" "fmt" "io" "log" "net/http" "reflect" "runtime" "time" "github.com/gorilla/websocket" ) var GATEWAY_VERSION int = 4 type handshakeProperties struct { OS string `json:"$os"` Browser string `json:"$browser"` Device string `json:"$device"` Referer string `json:"$referer"` ReferringDomain string `json:"$referring_domain"` } type handshakeData struct { Token string `json:"token"` Properties handshakeProperties `json:"properties"` LargeThreshold int `json:"large_threshold"` Compress bool `json:"compress"` } type handshakeOp struct { Op int `json:"op"` Data handshakeData `json:"d"` } // Open opens a websocket connection to Discord. func (s *Session) Open() (err error) { s.Lock() defer func() { if err != nil { s.Unlock() } }() s.VoiceConnections = make(map[string]*VoiceConnection) if s.wsConn != nil { err = errors.New("Web socket already opened.") return } // Get the gateway to use for the Websocket connection g, err := s.Gateway() if err != nil { return } // Add the version and encoding to the URL g = g + fmt.Sprintf("?v=%v&encoding=json", GATEWAY_VERSION) header := http.Header{} header.Add("accept-encoding", "zlib") // TODO: See if there's a use for the http response. // conn, response, err := websocket.DefaultDialer.Dial(session.Gateway, nil) s.wsConn, _, err = websocket.DefaultDialer.Dial(g, header) if err != nil { return } err = s.wsConn.WriteJSON(handshakeOp{2, handshakeData{s.Token, handshakeProperties{runtime.GOOS, "Discordgo v" + VERSION, "", "", ""}, 250, s.Compress}}) if err != nil { return } // Create listening outside of listen, as it needs to happen inside the mutex // lock. s.listening = make(chan interface{}) go s.listen(s.wsConn, s.listening) s.Unlock() s.initialize() s.handle(&Connect{}) return } // Close closes a websocket and stops all listening/heartbeat goroutines. // TODO: Add support for Voice WS/UDP connections func (s *Session) Close() (err error) { s.Lock() s.DataReady = false if s.listening != nil { close(s.listening) s.listening = nil } if s.wsConn != nil { err = s.wsConn.Close() s.wsConn = nil } s.Unlock() s.handle(&Disconnect{}) return } // listen polls the websocket connection for events, it will stop when // the listening channel is closed, or an error occurs. func (s *Session) listen(wsConn *websocket.Conn, listening <-chan interface{}) { for { messageType, message, err := wsConn.ReadMessage() if err != nil { // Detect if we have been closed manually. If a Close() has already // happened, the websocket we are listening on will be different to the // current session. s.RLock() sameConnection := s.wsConn == wsConn s.RUnlock() if sameConnection { // There has been an error reading, Close() the websocket so that // OnDisconnect is fired. err := s.Close() if err != nil { log.Println("error closing session connection: ", err) } // Attempt to reconnect, with expenonential backoff up to 10 minutes. if s.ShouldReconnectOnError { wait := time.Duration(1) for { if s.Open() == nil { return } <-time.After(wait * time.Second) wait *= 2 if wait > 600 { wait = 600 } } } } return } select { case <-listening: return default: // TODO make s.event a variable that points to a function // this way it will be possible for an end-user to write // a completely custom event handler if needed. go s.onEvent(messageType, message) } } } type heartbeatOp struct { Op int `json:"op"` Data int `json:"d"` } // heartbeat sends regular heartbeats to Discord so it knows the client // is still connected. If you do not send these heartbeats Discord will // disconnect the websocket connection after a few seconds. func (s *Session) heartbeat(wsConn *websocket.Conn, listening <-chan interface{}, i time.Duration) { if listening == nil || wsConn == nil { return } s.Lock() s.DataReady = true s.Unlock() var err error ticker := time.NewTicker(i * time.Millisecond) for { err = wsConn.WriteJSON(heartbeatOp{1, int(time.Now().Unix())}) if err != nil { log.Println("Error sending heartbeat:", err) return } select { case <-ticker.C: // continue loop and send heartbeat case <-listening: return } } } type updateStatusGame struct { Name string `json:"name"` } type updateStatusData struct { IdleSince *int `json:"idle_since"` Game *updateStatusGame `json:"game"` } type updateStatusOp struct { Op int `json:"op"` Data updateStatusData `json:"d"` } // UpdateStatus is used to update the authenticated user's status. // If idle>0 then set status to idle. If game>0 then set game. // if otherwise, set status to active, and no game. func (s *Session) UpdateStatus(idle int, game string) (err error) { s.RLock() defer s.RUnlock() if s.wsConn == nil { return errors.New("No websocket connection exists.") } var usd updateStatusData if idle > 0 { usd.IdleSince = &idle } if game != "" { usd.Game = &updateStatusGame{game} } err = s.wsConn.WriteJSON(updateStatusOp{3, usd}) return } // onEvent is the "event handler" for all messages received on the // Discord Gateway API websocket connection. // // If you use the AddHandler() function to register a handler for a // specific event this function will pass the event along to that handler. // // If you use the AddHandler() function to register a handler for the // "OnEvent" event then all events will be passed to that handler. // // TODO: You may also register a custom event handler entirely using... func (s *Session) onEvent(messageType int, message []byte) { var err error var reader io.Reader reader = bytes.NewBuffer(message) // If this is a compressed message, uncompress it. if messageType == 2 { z, err := zlib.NewReader(reader) if err != nil { s.log(LogError, "error uncompressing websocket message, %s", err) return } defer func() { err := z.Close() if err != nil { s.log(LogWarning, "error closing zlib, %s", err) } }() reader = z } // Decode the event into an Event struct. var e *Event decoder := json.NewDecoder(reader) if err = decoder.Decode(&e); err != nil { s.log(LogError, "error decoding websocket message, %s", err) return } if s.Debug { // TODO: refactor using s.log() printEvent(e) } // Map event to registered event handlers and pass it along // to any registered functions i := eventToInterface[e.Type] if i != nil { // Create a new instance of the event type. i = reflect.New(reflect.TypeOf(i)).Interface() // Attempt to unmarshal our event. if err = json.Unmarshal(e.RawData, i); err != nil { s.log(LogError, "error unmarshalling %s event, %s", e.Type, err) } // Send event to any registered event handlers for it's type. // Because the above doesn't cancel this, in case of an error // the struct could be partially populated or at default values. // However, most errors are due to a single field and I feel // it's better to pass along what we received than nothing at all. // TODO: Think about that decision :) // Either way, READY events must fire, even with errors. s.handle(i) } else { s.log(LogWarning, "unknown event type %s", e.Type) printEvent(e) } // Emit event to the OnEvent handler e.Struct = i s.handle(e) } // ------------------------------------------------------------------------------------------------ // Code related to voice connections that initiate over the data websocket // ------------------------------------------------------------------------------------------------ // A VoiceServerUpdate stores the data received during the Voice Server Update // data websocket event. This data is used during the initial Voice Channel // join handshaking. type VoiceServerUpdate struct { Token string `json:"token"` GuildID string `json:"guild_id"` Endpoint string `json:"endpoint"` } type voiceChannelJoinData struct { GuildID *string `json:"guild_id"` ChannelID *string `json:"channel_id"` SelfMute bool `json:"self_mute"` SelfDeaf bool `json:"self_deaf"` } type voiceChannelJoinOp struct { Op int `json:"op"` Data voiceChannelJoinData `json:"d"` } // ChannelVoiceJoin joins the session user to a voice channel. // // gID : Guild ID of the channel to join. // cID : Channel ID of the channel to join. // mute : If true, you will be set to muted upon joining. // deaf : If true, you will be set to deafened upon joining. func (s *Session) ChannelVoiceJoin(gID, cID string, mute, deaf bool) (voice *VoiceConnection, err error) { // If a voice connection already exists for this guild then // return that connection. If the channel differs, also change channels. var ok bool if voice, ok = s.VoiceConnections[gID]; ok && voice.GuildID != "" { //TODO: consider a better variable than GuildID in the above check // to verify if this connection is valid or not. if voice.ChannelID != cID { err = voice.ChangeChannel(cID, mute, deaf) } return } // Create a new voice session // TODO review what all these things are for.... voice = &VoiceConnection{ GuildID: gID, ChannelID: cID, deaf: deaf, mute: mute, session: s, } // Store voice in VoiceConnections map for this GuildID s.VoiceConnections[gID] = voice // Send the request to Discord that we want to join the voice channel data := voiceChannelJoinOp{4, voiceChannelJoinData{&gID, &cID, mute, deaf}} err = s.wsConn.WriteJSON(data) if err != nil { s.log(LogInformational, "Deleting VoiceConnection %s", gID) delete(s.VoiceConnections, gID) return } // doesn't exactly work perfect yet.. TODO err = voice.waitUntilConnected() if err != nil { voice.Close() s.log(LogInformational, "Deleting VoiceConnection %s", gID) delete(s.VoiceConnections, gID) return } return } // onVoiceStateUpdate handles Voice State Update events on the data websocket. func (s *Session) onVoiceStateUpdate(se *Session, st *VoiceStateUpdate) { // If we don't have a connection for the channel, don't bother if st.ChannelID == "" { return } // Check if we have a voice connection to update voice, exists := s.VoiceConnections[st.GuildID] if !exists { return } // Need to have this happen at login and store it in the Session // TODO : This should be done upon connecting to Discord, or // be moved to a small helper function self, err := s.User("@me") // TODO: move to Login/New if err != nil { log.Println(err) return } // We only care about events that are about us if st.UserID != self.ID { return } // Store the SessionID for later use. voice.UserID = self.ID // TODO: Review voice.sessionID = st.SessionID } // onVoiceServerUpdate handles the Voice Server Update data websocket event. // // This is also fired if the Guild's voice region changes while connected // to a voice channel. In that case, need to re-establish connection to // the new region endpoint. func (s *Session) onVoiceServerUpdate(se *Session, st *VoiceServerUpdate) { voice, exists := s.VoiceConnections[st.GuildID] // If no VoiceConnection exists, just skip this if !exists { return } // If currently connected to voice ws/udp, then disconnect. // Has no effect if not connected. voice.Close() // Store values for later use voice.token = st.Token voice.endpoint = st.Endpoint voice.GuildID = st.GuildID // Open a conenction to the voice server err := voice.open() if err != nil { s.log(LogError, "onVoiceServerUpdate voice.open, ", err) } }
package terminal import ( "fmt" "strings" ) type Table interface { Print(rows [][]string) } type PrintableTable struct { ui UI header []string headerPrinted bool maxSizes []int } func NewTable(ui UI, header []string) Table { return &PrintableTable{ ui: ui, header: header, maxSizes: make([]int, len(header)), } } func (t *PrintableTable) Print(rows [][]string) { for _, row := range append(rows, t.header) { t.calculateMaxSize(row) } if t.headerPrinted == false { t.printHeader() t.headerPrinted = true } for _, line := range rows { t.printRow(line) } } func (t *PrintableTable) calculateMaxSize(row []string) { for index, value := range row { cellLength := len(decolorize(value)) if t.maxSizes[index] < cellLength { t.maxSizes[index] = cellLength } } } func (t *PrintableTable) printHeader() { output := "" for col, value := range t.header { output = output + t.cellValue(col, HeaderColor(value)) } t.ui.Say(output) } func (t *PrintableTable) printRow(row []string) { output := "" for col, value := range row { if col == 0 { value = TableContentHeaderColor(value) } else { value = TableContentColor(value) } output = output + t.cellValue(col, value) } t.ui.Say(output) } func (t *PrintableTable) cellValue(col int, value string) string { padding := strings.Repeat(" ", t.maxSizes[col]-len(decolorize(value))) return fmt.Sprintf("%s%s ", value, padding) } trim padding from last table column [finishes #61352360] package terminal import ( "fmt" "strings" ) type Table interface { Print(rows [][]string) } type PrintableTable struct { ui UI header []string headerPrinted bool maxSizes []int } func NewTable(ui UI, header []string) Table { return &PrintableTable{ ui: ui, header: header, maxSizes: make([]int, len(header)), } } func (t *PrintableTable) Print(rows [][]string) { for _, row := range append(rows, t.header) { t.calculateMaxSize(row) } if t.headerPrinted == false { t.printHeader() t.headerPrinted = true } for _, line := range rows { t.printRow(line) } } func (t *PrintableTable) calculateMaxSize(row []string) { for index, value := range row { cellLength := len(decolorize(value)) if t.maxSizes[index] < cellLength { t.maxSizes[index] = cellLength } } } func (t *PrintableTable) printHeader() { output := "" for col, value := range t.header { output = output + t.cellValue(col, HeaderColor(value)) } t.ui.Say(output) } func (t *PrintableTable) printRow(row []string) { output := "" for col, value := range row { if col == 0 { value = TableContentHeaderColor(value) } else { value = TableContentColor(value) } output = output + t.cellValue(col, value) } t.ui.Say(output) } func (t *PrintableTable) cellValue(col int, value string) string { padding := "" if col < len(t.header)-1 { padding = strings.Repeat(" ", t.maxSizes[col]-len(decolorize(value))) } return fmt.Sprintf("%s%s ", value, padding) }
// Package xattr provides a simple interface to user extended attributes on Linux and OSX. // Support for xattrs is filesystem dependant, so not a given even if you are running one of those operating systems. // // On Linux you have to edit /etc/fstab to include "user_xattr". Also, Linux extended attributes have a manditory // prefix of "user.". This is prepended transparently for Get/Set/Remove and hidden in List. package xattr // XAttrError records an error and the operation, file path and attribute that caused it. type XAttrError struct { Op string Path string Attr string Err error } func (e *XAttrError) Error() string { return e.Op + " " + e.Path + " " + e.Attr + ": " + e.Err.Error() } // Returns whether the error is known to report that a extended attribute does not exist. func IsNotExist(err error) bool { e, ok := err.(*XAttrError) if ok { return isNotExist(e) } return false } // Converts an array of NUL terminated UTF-8 strings // to a []string. func nullTermToStrings(buf []byte) (result []string) { offset := 0 for index, b := range buf { if b == 0 { result = append(result, string(buf[offset:index])) offset = index + 1 } } return } // Retrieves extended attribute data associated with path. func Get(path, attr string) ([]byte, error) { attr = prefix + attr // find size size, err := get(path, attr, nil) if err != nil { return nil, &XAttrError{"getxattr", path, attr, err} } if size == 0 { return []byte{}, nil } // read into buffer of that size buf := make([]byte, size) size, err = get(path, attr, buf) if err != nil { return nil, &XAttrError{"getxattr", path, attr, err} } return buf[:size], nil } // Retrieves a list of names of extended attributes associated with path. func List(path string) ([]string, error) { // find size size, err := list(path, nil) if err != nil { return nil, &XAttrError{"listxattr", path, "", err} } if size == 0 { return []string{}, nil } // read into buffer of that size buf := make([]byte, size) size, err = list(path, buf) if err != nil { return nil, &XAttrError{"listxattr", path, "", err} } return stripPrefix(nullTermToStrings(buf[:size])), nil } // Associates data as an extended attribute of path. func Set(path, attr string, data []byte) error { attr = prefix + attr if err := set(path, attr, data, 0); err != nil { return &XAttrError{"setxattr", path, attr, err} } return nil } // Removes the extended attribute. func Remove(path, attr string) error { attr = prefix + attr if err := remove(path, attr); err != nil { return &XAttrError{"removexattr", path, attr, err} } return nil } add GetTo: efficient variant of Get // Package xattr provides a simple interface to user extended attributes on Linux and OSX. // Support for xattrs is filesystem dependant, so not a given even if you are running one of those operating systems. // // On Linux you have to edit /etc/fstab to include "user_xattr". Also, Linux extended attributes have a manditory // prefix of "user.". This is prepended transparently for Get/Set/Remove and hidden in List. package xattr // XAttrError records an error and the operation, file path and attribute that caused it. type XAttrError struct { Op string Path string Attr string Err error } func (e *XAttrError) Error() string { return e.Op + " " + e.Path + " " + e.Attr + ": " + e.Err.Error() } // Returns whether the error is known to report that a extended attribute does not exist. func IsNotExist(err error) bool { e, ok := err.(*XAttrError) if ok { return isNotExist(e) } return false } // Converts an array of NUL terminated UTF-8 strings // to a []string. func nullTermToStrings(buf []byte) (result []string) { offset := 0 for index, b := range buf { if b == 0 { result = append(result, string(buf[offset:index])) offset = index + 1 } } return } // Retrieves extended attribute data associated with path. func Get(path, attr string) ([]byte, error) { attr = prefix + attr // find size size, err := get(path, attr, nil) if err != nil { return nil, &XAttrError{"getxattr", path, attr, err} } if size == 0 { return []byte{}, nil } // read into buffer of that size buf := make([]byte, size) size, err = get(path, attr, buf) if err != nil { return nil, &XAttrError{"getxattr", path, attr, err} } return buf[:size], nil } // GetTo retrivies extended attribute data associated with path into dest. It // returns number of bytes retrived or non nil error. // // If attribute size is unknown caller should call GetTo with empty buffer or // guess it. If buffer is too short for value, GetTo returns error. // // GetTo is similar to Get but more efficient, because it issues one // getxattr(2) syscall per call, doesn't allocate memory for attribute data and // allows reuse buffer. func GetTo(path, attr string, dest []byte) (n int, err error) { attr = prefix + attr return get(path, attr, dest) } // Retrieves a list of names of extended attributes associated with path. func List(path string) ([]string, error) { // find size size, err := list(path, nil) if err != nil { return nil, &XAttrError{"listxattr", path, "", err} } if size == 0 { return []string{}, nil } // read into buffer of that size buf := make([]byte, size) size, err = list(path, buf) if err != nil { return nil, &XAttrError{"listxattr", path, "", err} } return stripPrefix(nullTermToStrings(buf[:size])), nil } // Associates data as an extended attribute of path. func Set(path, attr string, data []byte) error { attr = prefix + attr if err := set(path, attr, data, 0); err != nil { return &XAttrError{"setxattr", path, attr, err} } return nil } // Removes the extended attribute. func Remove(path, attr string) error { attr = prefix + attr if err := remove(path, attr); err != nil { return &XAttrError{"removexattr", path, attr, err} } return nil }
package main import ( "model" "runner" "runner/sshrunner" "fmt" "util" "github.com/codegangsta/cli" ) var ( // ErrCmdRequired require cmd option ErrCmdRequired = fmt.Errorf("option -c/--cmd is required") // ErrNoNodeToExec no more node to execute ErrNoNodeToExec = fmt.Errorf("found no node to execute") ) type execParams struct { GroupName string NodeName string User string Cmd string Yes bool } func initExecSubCmd(app *cli.App) { execSubCmd := cli.Command{ Name: "exec", Usage: "exec <options>", Description: "exec command on groups or nodes", Flags: []cli.Flag{ cli.StringFlag{ Name: "g,group", Value: "*", Usage: "exec command on group", }, cli.StringFlag{ Name: "n,node", Value: "", Usage: "exec command on node", }, cli.StringFlag{ Name: "u,user", Value: "root", Usage: "user who exec the command", }, cli.StringFlag{ Name: "c,cmd", Value: "", Usage: "command for exec", }, cli.BoolFlag{ Name: "y,yes", Usage: "is confirm before excute command?", }, }, Action: func(c *cli.Context) { var ep, err = checkExecParams(c) if err != nil { fmt.Println(util.FgRed(err)) cli.ShowCommandHelp(c, "exec") return } if err = execCmd(ep); err != nil { fmt.Println(util.FgRed(err)) } }, } if app.Commands == nil { app.Commands = cli.Commands{execSubCmd} } else { app.Commands = append(app.Commands, execSubCmd) } } func checkExecParams(c *cli.Context) (execParams, error) { var ep = execParams{ GroupName: c.String("group"), NodeName: c.String("node"), User: c.String("user"), Cmd: c.String("cmd"), Yes: c.Bool("yes"), } if ep.Cmd == "" { return ep, ErrCmdRequired } return ep, nil } func execCmd(ep execParams) error { // TODO should use sshrunner from config // get node info for exec repo := GetRepo() var nodes, err = repo.FilterNodes(ep.GroupName, ep.NodeName) if err != nil { return err } if len(nodes) == 0 { return ErrNoNodeToExec } if !ep.Yes && !confirmExec(nodes, ep.User, ep.Cmd) { return nil } // exec cmd on node for _, n := range nodes { fmt.Printf("Start to excute \"%s\" on %s(%s):\n", util.FgBoldGreen(ep.Cmd), util.FgBoldGreen(n.Name), util.FgBoldGreen(n.Host)) var runCmd = sshrunner.New(n.User, n.Password, n.KeyPath, n.Host, n.Port) var input = runner.Input{ ExecHost: n.Host, ExecUser: ep.User, Command: ep.Cmd, } // display result output, err := runCmd.SyncExec(input) displayExecResult(output, err) } return nil } func displayExecResult(output *runner.Output, err error) { if err != nil { fmt.Printf("Command exec failed: %s\n", util.FgRed(err)) } if output != nil { fmt.Printf(">>>>>>>>>>>>>>>>>>>> STDOUT >>>>>>>>>>>>>>>>>>>>\n%s\n", output.StdOutput) if output.StdError != "" { fmt.Printf(">>>>>>>>>>>>>>>>>>>> STDERR >>>>>>>>>>>>>>>>>>>>\n%s\n", output.StdError) } fmt.Printf("time costs: %v\n", output.ExecEnd.Sub(output.ExecStart)) } fmt.Println(util.FgBoldBlue("==========================================================\n")) } func confirmExec(nodes []model.Node, user, cmd string) bool { fmt.Printf("%-3s\t%-10s\t%-10s\n", "No.", "Name", "IP") fmt.Println("----------------------------------------------------------------------") for index, n := range nodes { fmt.Printf("%-3d\t%-10s\t%-10s\n", index+1, n.Name, n.Host) } fmt.Println() return util.Confirm(fmt.Sprintf("You want to exec COMMAND(%s) by UESR(%s) at the above nodes, yes/no(y/n) ?", util.FgBoldRed(cmd), util.FgBoldRed(user))) } add auto complete for exec command's -g and -n option package main import ( "model" "os" "runner" "runner/sshrunner" "fmt" "util" "github.com/codegangsta/cli" ) var ( // ErrCmdRequired require cmd option ErrCmdRequired = fmt.Errorf("option -c/--cmd is required") // ErrNoNodeToExec no more node to execute ErrNoNodeToExec = fmt.Errorf("found no node to execute") ) type execParams struct { GroupName string NodeName string User string Cmd string Yes bool } func initExecSubCmd(app *cli.App) { execSubCmd := cli.Command{ Name: "exec", Usage: "exec <options>", Description: "exec command on groups or nodes", Flags: []cli.Flag{ cli.StringFlag{ Name: "g,group", Value: "*", Usage: "exec command on group", }, cli.StringFlag{ Name: "n,node", Value: "", Usage: "exec command on node", }, cli.StringFlag{ Name: "u,user", Value: "root", Usage: "user who exec the command", }, cli.StringFlag{ Name: "c,cmd", Value: "", Usage: "command for exec", }, cli.BoolFlag{ Name: "y,yes", Usage: "is confirm before excute command?", }, }, BashComplete: func(c *cli.Context) { bashComplete(c) }, Action: func(c *cli.Context) { // 如果有 --generate-bash-completion 参数, 则不执行默认命令 if os.Args[len(os.Args)-1] == "--generate-bash-completion" { bashComplete(c) return } var ep, err = checkExecParams(c) if err != nil { fmt.Println(util.FgRed(err)) cli.ShowCommandHelp(c, "exec") return } if err = execCmd(ep); err != nil { fmt.Println(util.FgRed(err)) } }, } if app.Commands == nil { app.Commands = cli.Commands{execSubCmd} } else { app.Commands = append(app.Commands, execSubCmd) } } func checkExecParams(c *cli.Context) (execParams, error) { var ep = execParams{ GroupName: c.String("group"), NodeName: c.String("node"), User: c.String("user"), Cmd: c.String("cmd"), Yes: c.Bool("yes"), } if ep.Cmd == "" { return ep, ErrCmdRequired } return ep, nil } func execCmd(ep execParams) error { // TODO should use sshrunner from config // get node info for exec repo := GetRepo() var nodes, err = repo.FilterNodes(ep.GroupName, ep.NodeName) if err != nil { return err } if len(nodes) == 0 { return ErrNoNodeToExec } if !ep.Yes && !confirmExec(nodes, ep.User, ep.Cmd) { return nil } // exec cmd on node for _, n := range nodes { fmt.Printf("Start to excute \"%s\" on %s(%s):\n", util.FgBoldGreen(ep.Cmd), util.FgBoldGreen(n.Name), util.FgBoldGreen(n.Host)) var runCmd = sshrunner.New(n.User, n.Password, n.KeyPath, n.Host, n.Port) var input = runner.Input{ ExecHost: n.Host, ExecUser: ep.User, Command: ep.Cmd, } // display result output, err := runCmd.SyncExec(input) displayExecResult(output, err) } return nil } func completeGroups() { repo := GetRepo() groups, _ := repo.FilterNodeGroups("*") for _, g := range groups { fmt.Println(g.Name) } } func completeNodes(gName string) { repo := GetRepo() nodes, _ := repo.FilterNodes(gName, "*") for _, n := range nodes { fmt.Println(n.Name) } } func bashComplete(c *cli.Context) { if isAutoComplete(c.String("group")) { completeGroups() } if isAutoComplete(c.String("node")) { completeNodes(c.String("group")) } } func isAutoComplete(curStr string) bool { // --generate-bash-completion is global option for cli if curStr == "--generate-bash-completion" { return true } return false } func displayExecResult(output *runner.Output, err error) { if err != nil { fmt.Printf("Command exec failed: %s\n", util.FgRed(err)) } if output != nil { fmt.Printf(">>>>>>>>>>>>>>>>>>>> STDOUT >>>>>>>>>>>>>>>>>>>>\n%s\n", output.StdOutput) if output.StdError != "" { fmt.Printf(">>>>>>>>>>>>>>>>>>>> STDERR >>>>>>>>>>>>>>>>>>>>\n%s\n", output.StdError) } fmt.Printf("time costs: %v\n", output.ExecEnd.Sub(output.ExecStart)) } fmt.Println(util.FgBoldBlue("==========================================================\n")) } func confirmExec(nodes []model.Node, user, cmd string) bool { fmt.Printf("%-3s\t%-10s\t%-10s\n", "No.", "Name", "IP") fmt.Println("----------------------------------------------------------------------") for index, n := range nodes { fmt.Printf("%-3d\t%-10s\t%-10s\n", index+1, n.Name, n.Host) } fmt.Println() return util.Confirm(fmt.Sprintf("You want to exec COMMAND(%s) by UESR(%s) at the above nodes, yes/no(y/n) ?", util.FgBoldRed(cmd), util.FgBoldRed(user))) }
package handlers import ( "net/http" "code.cloudfoundry.org/bbs/events" "code.cloudfoundry.org/bbs/models" "code.cloudfoundry.org/lager" ) type EventController interface { Subscribe_r0(logger lager.Logger, w http.ResponseWriter, req *http.Request) Subscribe_r1(logger lager.Logger, w http.ResponseWriter, req *http.Request) } // DEPRECATED type LRPGroupEventsHandler struct { desiredHub events.Hub actualHub events.Hub } type TaskEventHandler struct { taskHub events.Hub } type LRPInstanceEventHandler struct { desiredHub events.Hub lrpInstanceHub events.Hub } // DEPRECATED func NewLRPGroupEventsHandler(desiredHub, actualHub events.Hub) *LRPGroupEventsHandler { return &LRPGroupEventsHandler{ desiredHub: desiredHub, actualHub: actualHub, } } func NewTaskEventHandler(taskHub events.Hub) *TaskEventHandler { return &TaskEventHandler{ taskHub: taskHub, } } func NewLRPInstanceEventHandler(desiredHub, lrpInstanceHub events.Hub) *LRPInstanceEventHandler { return &LRPInstanceEventHandler{ desiredHub: desiredHub, lrpInstanceHub: lrpInstanceHub, } } func streamEventsToResponse(logger lager.Logger, w http.ResponseWriter, eventChan <-chan models.Event, errorChan <-chan error) { w.Header().Add("Content-Type", "text/event-stream; charset=utf-8") w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Connection", "keep-alive") w.Header().Set("Transfer-Encoding", "identity") w.WriteHeader(http.StatusOK) conn, rw, err := w.(http.Hijacker).Hijack() if err != nil { return } defer func() { err := conn.Close() if err != nil { logger.Error("failed-to-close-connection", err) } }() if err := rw.Flush(); err != nil { logger.Error("failed-to-flush", err) return } var event models.Event eventID := 0 closeNotifier := w.(http.CloseNotifier).CloseNotify() for { select { case event = <-eventChan: case err := <-errorChan: logger.Error("failed-to-get-next-event", err) return case <-closeNotifier: logger.Debug("received-close-notify") return } sseEvent, err := events.NewEventFromModelEvent(eventID, event) if err != nil { logger.Error("failed-to-marshal-event", err) return } err = sseEvent.Write(conn) if err != nil { logger.Error("failed-to-write-event", err) return } eventID++ } } type EventFetcher func() (models.Event, error) func streamSource(eventChan chan<- models.Event, errorChan chan<- error, closeChan chan struct{}, fetchEvent EventFetcher) { for { event, err := fetchEvent() if err != nil { select { case errorChan <- err: case <-closeChan: } return } select { case eventChan <- event: case <-closeChan: return } } } Use chunked transfer encoding (default for go 1.15) Since the event-stream is using a chunked transfer-encoding, we have to calculate the size of the data so that client knows how big the chunk was format for every chunked event: size(in hex)\r\n data\r\n data itself has to be a sever-sent event following the spec (https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) Whent the stream is done, before closing the connection, we have to end the chunk stream by sending the following data: 0\r\n \r\n Sending a 0 length data with along with an empty line will end the keep-alive event-stream connection. [rfc2616 3.6.1](https://tools.ietf.org/html/rfc2616#section-3.6.1) Context: We have to hijack the connection so that bbs can be shutdown in the case of blocked write (slow consumer). If the connection is not in the hijacked state, ifrit can't terminate the process until the process get io. [Previous story](https://www.pivotaltracker.com/n/projects/1003146/stories/146272021). Refers cloudfoundry/diego-release#552 package handlers import ( "bytes" "fmt" "net/http" "code.cloudfoundry.org/bbs/events" "code.cloudfoundry.org/bbs/models" "code.cloudfoundry.org/lager" ) type EventController interface { Subscribe_r0(logger lager.Logger, w http.ResponseWriter, req *http.Request) Subscribe_r1(logger lager.Logger, w http.ResponseWriter, req *http.Request) } // DEPRECATED type LRPGroupEventsHandler struct { desiredHub events.Hub actualHub events.Hub } type TaskEventHandler struct { taskHub events.Hub } type LRPInstanceEventHandler struct { desiredHub events.Hub lrpInstanceHub events.Hub } // DEPRECATED func NewLRPGroupEventsHandler(desiredHub, actualHub events.Hub) *LRPGroupEventsHandler { return &LRPGroupEventsHandler{ desiredHub: desiredHub, actualHub: actualHub, } } func NewTaskEventHandler(taskHub events.Hub) *TaskEventHandler { return &TaskEventHandler{ taskHub: taskHub, } } func NewLRPInstanceEventHandler(desiredHub, lrpInstanceHub events.Hub) *LRPInstanceEventHandler { return &LRPInstanceEventHandler{ desiredHub: desiredHub, lrpInstanceHub: lrpInstanceHub, } } func streamEventsToResponse(logger lager.Logger, w http.ResponseWriter, eventChan <-chan models.Event, errorChan <-chan error) { w.Header().Add("Content-Type", "text/event-stream; charset=utf-8") w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Connection", "keep-alive") w.WriteHeader(http.StatusOK) conn, rw, err := w.(http.Hijacker).Hijack() if err != nil { return } defer func() { fmt.Fprintf(conn, "0\r\n\r\n") err := conn.Close() if err != nil { logger.Error("failed-to-close-connection", err) } }() if err := rw.Flush(); err != nil { logger.Error("failed-to-flush", err) return } var event models.Event eventID := 0 closeNotifier := w.(http.CloseNotifier).CloseNotify() for { select { case event = <-eventChan: case err := <-errorChan: logger.Error("failed-to-get-next-event", err) return case <-closeNotifier: logger.Debug("received-close-notify") return } sseEvent, err := events.NewEventFromModelEvent(eventID, event) if err != nil { logger.Error("failed-to-marshal-event", err) return } buf := new(bytes.Buffer) err = sseEvent.Write(buf) if err != nil { logger.Error("failed-to-write-event", err) return } fmt.Fprintf(conn, "%x;\r\n", buf.Len()) fmt.Fprintf(conn, "%s\r\n", buf.String()) eventID++ } } type EventFetcher func() (models.Event, error) func streamSource(eventChan chan<- models.Event, errorChan chan<- error, closeChan chan struct{}, fetchEvent EventFetcher) { for { event, err := fetchEvent() if err != nil { select { case errorChan <- err: case <-closeChan: } return } select { case eventChan <- event: case <-closeChan: return } } }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package healthz import ( "bytes" "fmt" "net/http" "strings" "sync" "sync/atomic" "time" "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" ) // HealthzChecker is a named healthz checker. type HealthzChecker interface { Name() string Check(req *http.Request) error } var defaultHealthz = sync.Once{} // DefaultHealthz installs the default healthz check to the http.DefaultServeMux. func DefaultHealthz(checks ...HealthzChecker) { defaultHealthz.Do(func() { InstallHandler(http.DefaultServeMux, checks...) }) } // PingHealthz returns true automatically when checked var PingHealthz HealthzChecker = ping{} // ping implements the simplest possible healthz checker. type ping struct{} func (ping) Name() string { return "ping" } // PingHealthz is a health check that returns true. func (ping) Check(_ *http.Request) error { return nil } // LogHealthz returns true if logging is not blocked var LogHealthz HealthzChecker = &log{} type log struct { startOnce sync.Once lastVerified atomic.Value } func (l *log) Name() string { return "log" } func (l *log) Check(_ *http.Request) error { l.startOnce.Do(func() { l.lastVerified.Store(time.Now()) go wait.Forever(func() { klog.Flush() l.lastVerified.Store(time.Now()) }, time.Minute) }) lastVerified := l.lastVerified.Load().(time.Time) if time.Since(lastVerified) < (2 * time.Minute) { return nil } return fmt.Errorf("logging blocked") } // NamedCheck returns a healthz checker for the given name and function. func NamedCheck(name string, check func(r *http.Request) error) HealthzChecker { return &healthzCheck{name, check} } // InstallHandler registers handlers for health checking on the path // "/healthz" to mux. *All handlers* for mux must be specified in // exactly one call to InstallHandler. Calling InstallHandler more // than once for the same mux will result in a panic. func InstallHandler(mux mux, checks ...HealthzChecker) { InstallPathHandler(mux, "/healthz", checks...) } // InstallPathHandler registers handlers for health checking on // a specific path to mux. *All handlers* for the path must be // specified in exactly one call to InstallPathHandler. Calling // InstallPathHandler more than once for the same path and mux will // result in a panic. func InstallPathHandler(mux mux, path string, checks ...HealthzChecker) { if len(checks) == 0 { klog.V(5).Info("No default health checks specified. Installing the ping handler.") checks = []HealthzChecker{PingHealthz} } klog.V(5).Info("Installing healthz checkers:", formatQuoted(checkerNames(checks...)...)) mux.Handle(path, handleRootHealthz(checks...)) for _, check := range checks { mux.Handle(fmt.Sprintf("%s/%v", path, check.Name()), adaptCheckToHandler(check.Check)) } } // mux is an interface describing the methods InstallHandler requires. type mux interface { Handle(pattern string, handler http.Handler) } // healthzCheck implements HealthzChecker on an arbitrary name and check function. type healthzCheck struct { name string check func(r *http.Request) error } var _ HealthzChecker = &healthzCheck{} func (c *healthzCheck) Name() string { return c.name } func (c *healthzCheck) Check(r *http.Request) error { return c.check(r) } // getExcludedChecks extracts the health check names to be excluded from the query param func getExcludedChecks(r *http.Request) sets.String { checks, found := r.URL.Query()["exclude"] if found { return sets.NewString(checks...) } return sets.NewString() } // handleRootHealthz returns an http.HandlerFunc that serves the provided checks. func handleRootHealthz(checks ...HealthzChecker) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { failed := false excluded := getExcludedChecks(r) var verboseOut bytes.Buffer for _, check := range checks { // no-op the check if we've specified we want to exclude the check if excluded.Has(check.Name()) { excluded.Delete(check.Name()) fmt.Fprintf(&verboseOut, "[+]%v excluded: ok\n", check.Name()) continue } if err := check.Check(r); err != nil { // don't include the error since this endpoint is public. If someone wants more detail // they should have explicit permission to the detailed checks. klog.V(4).Infof("healthz check %v failed: %v", check.Name(), err) fmt.Fprintf(&verboseOut, "[-]%v failed: reason withheld\n", check.Name()) failed = true } else { fmt.Fprintf(&verboseOut, "[+]%v ok\n", check.Name()) } } if excluded.Len() > 0 { fmt.Fprintf(&verboseOut, "warn: some health checks cannot be excluded: no matches for %v\n", formatQuoted(excluded.List()...)) klog.Warningf("cannot exclude some health checks, no health checks are installed matching %v", formatQuoted(excluded.List()...)) } // always be verbose on failure if failed { http.Error(w, fmt.Sprintf("%vhealthz check failed", verboseOut.String()), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") if _, found := r.URL.Query()["verbose"]; !found { fmt.Fprint(w, "ok") return } verboseOut.WriteTo(w) fmt.Fprint(w, "healthz check passed\n") }) } // adaptCheckToHandler returns an http.HandlerFunc that serves the provided checks. func adaptCheckToHandler(c func(r *http.Request) error) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := c(r) if err != nil { http.Error(w, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) } else { fmt.Fprint(w, "ok") } }) } // checkerNames returns the names of the checks in the same order as passed in. func checkerNames(checks ...HealthzChecker) []string { // accumulate the names of checks for printing them out. checkerNames := make([]string, 0, len(checks)) for _, check := range checks { checkerNames = append(checkerNames, check.Name()) } return checkerNames } // formatQuoted returns a formatted string of the health check names, // preserving the order passed in. func formatQuoted(names ...string) string { quoted := make([]string, 0, len(names)) for _, name := range names { quoted = append(quoted, fmt.Sprintf("%q", name)) } return strings.Join(quoted, ",") } provide verbose output when health check fails /* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package healthz import ( "bytes" "fmt" "net/http" "strings" "sync" "sync/atomic" "time" "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" ) // HealthzChecker is a named healthz checker. type HealthzChecker interface { Name() string Check(req *http.Request) error } var defaultHealthz = sync.Once{} // DefaultHealthz installs the default healthz check to the http.DefaultServeMux. func DefaultHealthz(checks ...HealthzChecker) { defaultHealthz.Do(func() { InstallHandler(http.DefaultServeMux, checks...) }) } // PingHealthz returns true automatically when checked var PingHealthz HealthzChecker = ping{} // ping implements the simplest possible healthz checker. type ping struct{} func (ping) Name() string { return "ping" } // PingHealthz is a health check that returns true. func (ping) Check(_ *http.Request) error { return nil } // LogHealthz returns true if logging is not blocked var LogHealthz HealthzChecker = &log{} type log struct { startOnce sync.Once lastVerified atomic.Value } func (l *log) Name() string { return "log" } func (l *log) Check(_ *http.Request) error { l.startOnce.Do(func() { l.lastVerified.Store(time.Now()) go wait.Forever(func() { klog.Flush() l.lastVerified.Store(time.Now()) }, time.Minute) }) lastVerified := l.lastVerified.Load().(time.Time) if time.Since(lastVerified) < (2 * time.Minute) { return nil } return fmt.Errorf("logging blocked") } // NamedCheck returns a healthz checker for the given name and function. func NamedCheck(name string, check func(r *http.Request) error) HealthzChecker { return &healthzCheck{name, check} } // InstallHandler registers handlers for health checking on the path // "/healthz" to mux. *All handlers* for mux must be specified in // exactly one call to InstallHandler. Calling InstallHandler more // than once for the same mux will result in a panic. func InstallHandler(mux mux, checks ...HealthzChecker) { InstallPathHandler(mux, "/healthz", checks...) } // InstallPathHandler registers handlers for health checking on // a specific path to mux. *All handlers* for the path must be // specified in exactly one call to InstallPathHandler. Calling // InstallPathHandler more than once for the same path and mux will // result in a panic. func InstallPathHandler(mux mux, path string, checks ...HealthzChecker) { if len(checks) == 0 { klog.V(5).Info("No default health checks specified. Installing the ping handler.") checks = []HealthzChecker{PingHealthz} } klog.V(5).Info("Installing healthz checkers:", formatQuoted(checkerNames(checks...)...)) mux.Handle(path, handleRootHealthz(checks...)) for _, check := range checks { mux.Handle(fmt.Sprintf("%s/%v", path, check.Name()), adaptCheckToHandler(check.Check)) } } // mux is an interface describing the methods InstallHandler requires. type mux interface { Handle(pattern string, handler http.Handler) } // healthzCheck implements HealthzChecker on an arbitrary name and check function. type healthzCheck struct { name string check func(r *http.Request) error } var _ HealthzChecker = &healthzCheck{} func (c *healthzCheck) Name() string { return c.name } func (c *healthzCheck) Check(r *http.Request) error { return c.check(r) } // getExcludedChecks extracts the health check names to be excluded from the query param func getExcludedChecks(r *http.Request) sets.String { checks, found := r.URL.Query()["exclude"] if found { return sets.NewString(checks...) } return sets.NewString() } // handleRootHealthz returns an http.HandlerFunc that serves the provided checks. func handleRootHealthz(checks ...HealthzChecker) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { failed := false excluded := getExcludedChecks(r) var verboseOut bytes.Buffer for _, check := range checks { // no-op the check if we've specified we want to exclude the check if excluded.Has(check.Name()) { excluded.Delete(check.Name()) fmt.Fprintf(&verboseOut, "[+]%v excluded: ok\n", check.Name()) continue } if err := check.Check(r); err != nil { // don't include the error since this endpoint is public. If someone wants more detail // they should have explicit permission to the detailed checks. klog.V(4).Infof("healthz check %v failed: %v", check.Name(), err) fmt.Fprintf(&verboseOut, "[-]%v failed: reason withheld\n", check.Name()) failed = true } else { fmt.Fprintf(&verboseOut, "[+]%v ok\n", check.Name()) } } if excluded.Len() > 0 { fmt.Fprintf(&verboseOut, "warn: some health checks cannot be excluded: no matches for %v\n", formatQuoted(excluded.List()...)) klog.Warningf("cannot exclude some health checks, no health checks are installed matching %v", formatQuoted(excluded.List()...)) } // always be verbose on failure if failed { klog.V(2).Infof("%vhealthz check failed", verboseOut.String()) http.Error(w, fmt.Sprintf("%vhealthz check failed", verboseOut.String()), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") if _, found := r.URL.Query()["verbose"]; !found { fmt.Fprint(w, "ok") return } verboseOut.WriteTo(w) fmt.Fprint(w, "healthz check passed\n") }) } // adaptCheckToHandler returns an http.HandlerFunc that serves the provided checks. func adaptCheckToHandler(c func(r *http.Request) error) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := c(r) if err != nil { http.Error(w, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) } else { fmt.Fprint(w, "ok") } }) } // checkerNames returns the names of the checks in the same order as passed in. func checkerNames(checks ...HealthzChecker) []string { // accumulate the names of checks for printing them out. checkerNames := make([]string, 0, len(checks)) for _, check := range checks { checkerNames = append(checkerNames, check.Name()) } return checkerNames } // formatQuoted returns a formatted string of the health check names, // preserving the order passed in. func formatQuoted(names ...string) string { quoted := make([]string, 0, len(names)) for _, name := range names { quoted = append(quoted, fmt.Sprintf("%q", name)) } return strings.Join(quoted, ",") }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "errors" "fmt" "os" "reflect" "strings" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) var ( ErrNoContext = errors.New("no context chosen") ErrEmptyConfig = NewEmptyConfigError("no configuration has been provided, try setting KUBERNETES_MASTER environment variable") // message is for consistency with old behavior ErrEmptyCluster = errors.New("cluster has no server defined") ) // NewEmptyConfigError returns an error wrapping the given message which IsEmptyConfig() will recognize as an empty config error func NewEmptyConfigError(message string) error { return &errEmptyConfig{message} } type errEmptyConfig struct { message string } func (e *errEmptyConfig) Error() string { return e.message } type errContextNotFound struct { ContextName string } func (e *errContextNotFound) Error() string { return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) } // IsContextNotFound returns a boolean indicating whether the error is known to // report that a context was not found func IsContextNotFound(err error) bool { if err == nil { return false } if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { return true } return strings.Contains(err.Error(), "context was not found for specified context") } // IsEmptyConfig returns true if the provided error indicates the provided configuration // is empty. func IsEmptyConfig(err error) bool { switch t := err.(type) { case errConfigurationInvalid: if len(t) != 1 { return false } _, ok := t[0].(*errEmptyConfig) return ok } _, ok := err.(*errEmptyConfig) return ok } // errConfigurationInvalid is a set of errors indicating the configuration is invalid. type errConfigurationInvalid []error // errConfigurationInvalid implements error and Aggregate var _ error = errConfigurationInvalid{} var _ utilerrors.Aggregate = errConfigurationInvalid{} func newErrConfigurationInvalid(errs []error) error { switch len(errs) { case 0: return nil default: return errConfigurationInvalid(errs) } } // Error implements the error interface func (e errConfigurationInvalid) Error() string { return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) } // Errors implements the utilerrors.Aggregate interface func (e errConfigurationInvalid) Errors() []error { return e } // Is implements the utilerrors.Aggregate interface func (e errConfigurationInvalid) Is(target error) bool { return e.visit(func(err error) bool { return errors.Is(err, target) }) } func (e errConfigurationInvalid) visit(f func(err error) bool) bool { for _, err := range e { switch err := err.(type) { case errConfigurationInvalid: if match := err.visit(f); match { return match } case utilerrors.Aggregate: for _, nestedErr := range err.Errors() { if match := f(nestedErr); match { return match } } default: if match := f(err); match { return match } } } return false } // IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. func IsConfigurationInvalid(err error) bool { switch err.(type) { case *errContextNotFound, errConfigurationInvalid: return true } return IsContextNotFound(err) } // Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. func Validate(config clientcmdapi.Config) error { validationErrors := make([]error, 0) if clientcmdapi.IsConfigEmpty(&config) { return newErrConfigurationInvalid([]error{ErrEmptyConfig}) } if len(config.CurrentContext) != 0 { if _, exists := config.Contexts[config.CurrentContext]; !exists { validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) } } for contextName, context := range config.Contexts { validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) } for authInfoName, authInfo := range config.AuthInfos { validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) } for clusterName, clusterInfo := range config.Clusters { validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) } return newErrConfigurationInvalid(validationErrors) } // ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, // but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { validationErrors := make([]error, 0) if clientcmdapi.IsConfigEmpty(&config) { return newErrConfigurationInvalid([]error{ErrEmptyConfig}) } var contextName string if len(passedContextName) != 0 { contextName = passedContextName } else { contextName = config.CurrentContext } if len(contextName) == 0 { return ErrNoContext } context, exists := config.Contexts[contextName] if !exists { validationErrors = append(validationErrors, &errContextNotFound{contextName}) } if exists { validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) } return newErrConfigurationInvalid(validationErrors) } // validateClusterInfo looks for conflicts and errors in the cluster info func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { validationErrors := make([]error, 0) emptyCluster := clientcmdapi.NewCluster() if reflect.DeepEqual(*emptyCluster, clusterInfo) { return []error{ErrEmptyCluster} } if len(clusterInfo.Server) == 0 { if len(clusterName) == 0 { validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) } else { validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) } } if proxyURL := clusterInfo.ProxyURL; proxyURL != "" { if _, err := parseProxyURL(proxyURL); err != nil { validationErrors = append(validationErrors, fmt.Errorf("invalid 'proxy-url' %q for cluster %q: %v", proxyURL, clusterName, err)) } } // Make sure CA data and CA file aren't both specified if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) } if len(clusterInfo.CertificateAuthority) != 0 { clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) } else { defer clientCertCA.Close() } } return validationErrors } // validateAuthInfo looks for conflicts and errors in the auth info func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { validationErrors := make([]error, 0) usingAuthPath := false methods := make([]string, 0, 3) if len(authInfo.Token) != 0 { methods = append(methods, "token") } if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { methods = append(methods, "basicAuth") } if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { // Make sure cert data and file aren't both specified if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) } // Make sure key data and file aren't both specified if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) } // Make sure a key is specified if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) } if len(authInfo.ClientCertificate) != 0 { clientCertFile, err := os.Open(authInfo.ClientCertificate) if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) } else { defer clientCertFile.Close() } } if len(authInfo.ClientKey) != 0 { clientKeyFile, err := os.Open(authInfo.ClientKey) if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) } else { defer clientKeyFile.Close() } } } if authInfo.Exec != nil { if authInfo.AuthProvider != nil { validationErrors = append(validationErrors, fmt.Errorf("authProvider cannot be provided in combination with an exec plugin for %s", authInfoName)) } if len(authInfo.Exec.Command) == 0 { validationErrors = append(validationErrors, fmt.Errorf("command must be specified for %v to use exec authentication plugin", authInfoName)) } if len(authInfo.Exec.APIVersion) == 0 { validationErrors = append(validationErrors, fmt.Errorf("apiVersion must be specified for %v to use exec authentication plugin", authInfoName)) } for _, v := range authInfo.Exec.Env { if len(v.Name) == 0 { validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName)) } } switch authInfo.Exec.InteractiveMode { case "": validationErrors = append(validationErrors, fmt.Errorf("interactiveMode must be specified for %v to use exec authentication plugin", authInfoName)) case clientcmdapi.NeverExecInteractiveMode, clientcmdapi.IfAvailableExecInteractiveMode, clientcmdapi.AlwaysExecInteractiveMode: // These are valid default: validationErrors = append(validationErrors, fmt.Errorf("invalid interactiveMode for %v: %q", authInfoName, authInfo.Exec.InteractiveMode)) } } // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case if (len(methods) > 1) && (!usingAuthPath) { validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) } // ImpersonateGroups or ImpersonateUserExtra should be requested with a user if (len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) { validationErrors = append(validationErrors, fmt.Errorf("requesting groups or user-extra for %v without impersonating a user", authInfoName)) } return validationErrors } // validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { validationErrors := make([]error, 0) if len(contextName) == 0 { validationErrors = append(validationErrors, fmt.Errorf("empty context name for %#v is not allowed", context)) } if len(context.AuthInfo) == 0 { validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) } if len(context.Cluster) == 0 { validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) } else if _, exists := config.Clusters[context.Cluster]; !exists { validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) } if len(context.Namespace) != 0 { if len(validation.IsDNS1123Label(context.Namespace)) != 0 { validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) } } return validationErrors } fix: wrap errors correct when validating kubeconfig This allows to check for specific errors using `errors.Is`. Signed-off-by: Andrey Smirnov <54dcec8d1c657583c267c6004c4ac3a9ac532148@talos-systems.com> /* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "errors" "fmt" "os" "reflect" "strings" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) var ( ErrNoContext = errors.New("no context chosen") ErrEmptyConfig = NewEmptyConfigError("no configuration has been provided, try setting KUBERNETES_MASTER environment variable") // message is for consistency with old behavior ErrEmptyCluster = errors.New("cluster has no server defined") ) // NewEmptyConfigError returns an error wrapping the given message which IsEmptyConfig() will recognize as an empty config error func NewEmptyConfigError(message string) error { return &errEmptyConfig{message} } type errEmptyConfig struct { message string } func (e *errEmptyConfig) Error() string { return e.message } type errContextNotFound struct { ContextName string } func (e *errContextNotFound) Error() string { return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) } // IsContextNotFound returns a boolean indicating whether the error is known to // report that a context was not found func IsContextNotFound(err error) bool { if err == nil { return false } if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { return true } return strings.Contains(err.Error(), "context was not found for specified context") } // IsEmptyConfig returns true if the provided error indicates the provided configuration // is empty. func IsEmptyConfig(err error) bool { switch t := err.(type) { case errConfigurationInvalid: if len(t) != 1 { return false } _, ok := t[0].(*errEmptyConfig) return ok } _, ok := err.(*errEmptyConfig) return ok } // errConfigurationInvalid is a set of errors indicating the configuration is invalid. type errConfigurationInvalid []error // errConfigurationInvalid implements error and Aggregate var _ error = errConfigurationInvalid{} var _ utilerrors.Aggregate = errConfigurationInvalid{} func newErrConfigurationInvalid(errs []error) error { switch len(errs) { case 0: return nil default: return errConfigurationInvalid(errs) } } // Error implements the error interface func (e errConfigurationInvalid) Error() string { return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) } // Errors implements the utilerrors.Aggregate interface func (e errConfigurationInvalid) Errors() []error { return e } // Is implements the utilerrors.Aggregate interface func (e errConfigurationInvalid) Is(target error) bool { return e.visit(func(err error) bool { return errors.Is(err, target) }) } func (e errConfigurationInvalid) visit(f func(err error) bool) bool { for _, err := range e { switch err := err.(type) { case errConfigurationInvalid: if match := err.visit(f); match { return match } case utilerrors.Aggregate: for _, nestedErr := range err.Errors() { if match := f(nestedErr); match { return match } } default: if match := f(err); match { return match } } } return false } // IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. func IsConfigurationInvalid(err error) bool { switch err.(type) { case *errContextNotFound, errConfigurationInvalid: return true } return IsContextNotFound(err) } // Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. func Validate(config clientcmdapi.Config) error { validationErrors := make([]error, 0) if clientcmdapi.IsConfigEmpty(&config) { return newErrConfigurationInvalid([]error{ErrEmptyConfig}) } if len(config.CurrentContext) != 0 { if _, exists := config.Contexts[config.CurrentContext]; !exists { validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) } } for contextName, context := range config.Contexts { validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) } for authInfoName, authInfo := range config.AuthInfos { validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) } for clusterName, clusterInfo := range config.Clusters { validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) } return newErrConfigurationInvalid(validationErrors) } // ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, // but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { validationErrors := make([]error, 0) if clientcmdapi.IsConfigEmpty(&config) { return newErrConfigurationInvalid([]error{ErrEmptyConfig}) } var contextName string if len(passedContextName) != 0 { contextName = passedContextName } else { contextName = config.CurrentContext } if len(contextName) == 0 { return ErrNoContext } context, exists := config.Contexts[contextName] if !exists { validationErrors = append(validationErrors, &errContextNotFound{contextName}) } if exists { validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) } return newErrConfigurationInvalid(validationErrors) } // validateClusterInfo looks for conflicts and errors in the cluster info func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { validationErrors := make([]error, 0) emptyCluster := clientcmdapi.NewCluster() if reflect.DeepEqual(*emptyCluster, clusterInfo) { return []error{ErrEmptyCluster} } if len(clusterInfo.Server) == 0 { if len(clusterName) == 0 { validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) } else { validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) } } if proxyURL := clusterInfo.ProxyURL; proxyURL != "" { if _, err := parseProxyURL(proxyURL); err != nil { validationErrors = append(validationErrors, fmt.Errorf("invalid 'proxy-url' %q for cluster %q: %w", proxyURL, clusterName, err)) } } // Make sure CA data and CA file aren't both specified if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) } if len(clusterInfo.CertificateAuthority) != 0 { clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err)) } else { defer clientCertCA.Close() } } return validationErrors } // validateAuthInfo looks for conflicts and errors in the auth info func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { validationErrors := make([]error, 0) usingAuthPath := false methods := make([]string, 0, 3) if len(authInfo.Token) != 0 { methods = append(methods, "token") } if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { methods = append(methods, "basicAuth") } if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { // Make sure cert data and file aren't both specified if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) } // Make sure key data and file aren't both specified if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) } // Make sure a key is specified if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) } if len(authInfo.ClientCertificate) != 0 { clientCertFile, err := os.Open(authInfo.ClientCertificate) if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err)) } else { defer clientCertFile.Close() } } if len(authInfo.ClientKey) != 0 { clientKeyFile, err := os.Open(authInfo.ClientKey) if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err)) } else { defer clientKeyFile.Close() } } } if authInfo.Exec != nil { if authInfo.AuthProvider != nil { validationErrors = append(validationErrors, fmt.Errorf("authProvider cannot be provided in combination with an exec plugin for %s", authInfoName)) } if len(authInfo.Exec.Command) == 0 { validationErrors = append(validationErrors, fmt.Errorf("command must be specified for %v to use exec authentication plugin", authInfoName)) } if len(authInfo.Exec.APIVersion) == 0 { validationErrors = append(validationErrors, fmt.Errorf("apiVersion must be specified for %v to use exec authentication plugin", authInfoName)) } for _, v := range authInfo.Exec.Env { if len(v.Name) == 0 { validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName)) } } switch authInfo.Exec.InteractiveMode { case "": validationErrors = append(validationErrors, fmt.Errorf("interactiveMode must be specified for %v to use exec authentication plugin", authInfoName)) case clientcmdapi.NeverExecInteractiveMode, clientcmdapi.IfAvailableExecInteractiveMode, clientcmdapi.AlwaysExecInteractiveMode: // These are valid default: validationErrors = append(validationErrors, fmt.Errorf("invalid interactiveMode for %v: %q", authInfoName, authInfo.Exec.InteractiveMode)) } } // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case if (len(methods) > 1) && (!usingAuthPath) { validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) } // ImpersonateGroups or ImpersonateUserExtra should be requested with a user if (len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) { validationErrors = append(validationErrors, fmt.Errorf("requesting groups or user-extra for %v without impersonating a user", authInfoName)) } return validationErrors } // validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { validationErrors := make([]error, 0) if len(contextName) == 0 { validationErrors = append(validationErrors, fmt.Errorf("empty context name for %#v is not allowed", context)) } if len(context.AuthInfo) == 0 { validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) } if len(context.Cluster) == 0 { validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) } else if _, exists := config.Clusters[context.Cluster]; !exists { validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) } if len(context.Namespace) != 0 { if len(validation.IsDNS1123Label(context.Namespace)) != 0 { validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) } } return validationErrors }
package deprecatedapirequest import "k8s.io/apimachinery/pkg/runtime/schema" var deprecatedApiRemovedRelease = map[schema.GroupVersionResource]string{ // Kubernetes APIs {Group: "apps", Version: "v1beta1", Resource: "controllerrevisions"}: "1.16", {Group: "apps", Version: "v1beta1", Resource: "deploymentrollbacks"}: "1.16", {Group: "apps", Version: "v1beta1", Resource: "deployments"}: "1.16", {Group: "apps", Version: "v1beta1", Resource: "scales"}: "1.16", {Group: "apps", Version: "v1beta1", Resource: "statefulsets"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "controllerrevisions"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "daemonsets"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "deployments"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "replicasets"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "scales"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "statefulsets"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "daemonsets"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "deploymentrollbacks"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "deployments"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "networkpolicies"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "podsecuritypolicies"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "replicasets"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "scales"}: "1.16", {Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Resource: "flowschemas"}: "1.21", {Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Resource: "prioritylevelconfigurations"}: "1.21", {Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "mutatingwebhookconfigurations"}: "1.22", {Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "validatingwebhookconfigurations"}: "1.22", {Group: "apiextensions.k8s.io", Version: "v1beta1", Resource: "customresourcedefinitions"}: "1.22", {Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}: "1.22", {Group: "extensions", Version: "v1beta1", Resource: "ingresses"}: "1.22", {Group: "networking.k8s.io", Version: "v1beta1", Resource: "ingresses"}: "1.22", {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterrolebindings"}: "1.22", {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterroles"}: "1.22", {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "rolebindings"}: "1.22", {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"}: "1.22", {Group: "scheduling.k8s.io", Version: "v1beta1", Resource: "priorityclasses"}: "1.22", {Group: "storage.k8s.io", Version: "v1beta1", Resource: "csinodes"}: "1.22", {Group: "batch", Version: "v1beta1", Resource: "cronjobs"}: "1.25", {Group: "discovery.k8s.io", Version: "v1beta1", Resource: "endpointslices"}: "1.25", {Group: "events.k8s.io", Version: "v1beta1", Resource: "events"}: "1.25", {Group: "autoscaling", Version: "v2beta1", Resource: "horizontalpodautoscalers"}: "1.25", {Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"}: "1.25", {Group: "policy", Version: "v1beta1", Resource: "podsecuritypolicies"}: "1.25", {Group: "node.k8s.io", Version: "v1beta1", Resource: "runtimeclasses"}: "1.25", {Group: "autoscaling", Version: "v2beta2", Resource: "horizontalpodautoscalers"}: "1.26", // OpenShift APIs {Group: "operator.openshift.io", Version: "v1beta1", Resource: "kubedeschedulers"}: "1.22", } // removedRelease of a specified resource.version.group. func removedRelease(resource schema.GroupVersionResource) string { return deprecatedApiRemovedRelease[resource] } UPSTREAM: <carry>: update list of deprecated apis Update the list of deprecated APIs marked for removal base on the latest [Deprecated API Migration Guide](https://kubernetes.io/docs/reference/using-api/deprecation-guide). package deprecatedapirequest import "k8s.io/apimachinery/pkg/runtime/schema" var deprecatedApiRemovedRelease = map[schema.GroupVersionResource]string{ // Kubernetes APIs {Group: "apps", Version: "v1beta1", Resource: "controllerrevisions"}: "1.16", {Group: "apps", Version: "v1beta1", Resource: "deploymentrollbacks"}: "1.16", {Group: "apps", Version: "v1beta1", Resource: "deployments"}: "1.16", {Group: "apps", Version: "v1beta1", Resource: "scales"}: "1.16", {Group: "apps", Version: "v1beta1", Resource: "statefulsets"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "controllerrevisions"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "daemonsets"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "deployments"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "replicasets"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "scales"}: "1.16", {Group: "apps", Version: "v1beta2", Resource: "statefulsets"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "daemonsets"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "deploymentrollbacks"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "deployments"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "networkpolicies"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "podsecuritypolicies"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "replicasets"}: "1.16", {Group: "extensions", Version: "v1beta1", Resource: "scales"}: "1.16", {Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Resource: "flowschemas"}: "1.21", {Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Resource: "prioritylevelconfigurations"}: "1.21", {Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "mutatingwebhookconfigurations"}: "1.22", {Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "validatingwebhookconfigurations"}: "1.22", {Group: "apiextensions.k8s.io", Version: "v1beta1", Resource: "customresourcedefinitions"}: "1.22", {Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}: "1.22", {Group: "extensions", Version: "v1beta1", Resource: "ingresses"}: "1.22", {Group: "networking.k8s.io", Version: "v1beta1", Resource: "ingresses"}: "1.22", {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterrolebindings"}: "1.22", {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterroles"}: "1.22", {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "rolebindings"}: "1.22", {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"}: "1.22", {Group: "scheduling.k8s.io", Version: "v1beta1", Resource: "priorityclasses"}: "1.22", {Group: "storage.k8s.io", Version: "v1beta1", Resource: "csinodes"}: "1.22", {Group: "batch", Version: "v1beta1", Resource: "cronjobs"}: "1.25", {Group: "discovery.k8s.io", Version: "v1beta1", Resource: "endpointslices"}: "1.25", {Group: "events.k8s.io", Version: "v1beta1", Resource: "events"}: "1.25", {Group: "autoscaling", Version: "v2beta1", Resource: "horizontalpodautoscalers"}: "1.25", {Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"}: "1.25", {Group: "policy", Version: "v1beta1", Resource: "podsecuritypolicies"}: "1.25", {Group: "node.k8s.io", Version: "v1beta1", Resource: "runtimeclasses"}: "1.25", {Group: "autoscaling", Version: "v2beta2", Resource: "horizontalpodautoscalers"}: "1.26", {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Resource: "flowschemas"}: "1.26", {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Resource: "prioritylevelconfigurations"}: "1.26", // OpenShift APIs {Group: "operator.openshift.io", Version: "v1beta1", Resource: "kubedeschedulers"}: "1.22", } // removedRelease of a specified resource.version.group. func removedRelease(resource schema.GroupVersionResource) string { return deprecatedApiRemovedRelease[resource] }
// Copyright 2010 The GoGo Authors. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. // This file holds the basic scanning routines that separate a source file // into the various tokens package main import "./libgogo/_obj/libgogo" // Token struct holding the relevant data of a parsed token. type Token struct { id uint64; // The id. Is one of TOKEN_* intValue uint64; // value storing the integer value if the token is TOKEN_INTEGER strValue string; // Value storing the token string if the token is TOKEN_STRING or TOKEN_IDENTIFIER nextChar byte; // Sometime the next char is already read. It is stored here to be re-assigned in the next GetNextToken() round }; func GetNextTokenRaw(fd uint64, tok *Token) { var singleChar byte; // Byte holding the last read value // Flag indicating whether we are in a comment. // 0 for no comment // 1 for a single line comment // 2 for a multi line comment var inComment uint64; var done uint64; // Flag indicating whether a cycle (Token) is finsihed var spaceDone uint64; // Flag indicating whether an abolishment cycle is finished // Initialize variables done = 0; spaceDone = 0; inComment = 0; tok.strValue = ""; // If the previous cycle had to read the next char (and stored it), it is // now used as first read if tok.nextChar == 0 { singleChar = libgogo.GetChar(fd) } else { singleChar = tok.nextChar; tok.nextChar = 0; } // check if it is a valid read, or an EOF if singleChar == 0 { tok.id = TOKEN_EOS; done = 1; spaceDone = 1; } // // Cleaning Tasks // The next part strips out spaces, newlines, tabs, and comments // Comments can either be single line with double slashes (//) or multiline // using C++ syntax /* */ // for ; spaceDone != 1; { // check whether a comment is starting if singleChar == '/' { // if we are in a comment skip the rest, get the next char otherwise if inComment == 0 { singleChar = libgogo.GetChar(fd); if singleChar == '/' { // we are in a single line comment (until newline is found) inComment = 1; } else { if singleChar == '*' { // we are in a multiline comment (until ending is found) inComment = 2; } else { libgogo.ExitError(">> Scanner: Unkown character combination for comments. Exiting.",1); } } } } // check whether a multi-line comment is ending if singleChar == '*' { singleChar = libgogo.GetChar(fd); if singleChar == '/' { if inComment == 2 { inComment = 0; singleChar = libgogo.GetChar(fd); } } } // if character is a newline: // *) if in a singleline comment, exit the comment // *) skip otherwise if singleChar == 10 { if inComment == 1 { inComment = 0; } } // handle everything that is not a space,tab,newline if singleChar != ' ' && singleChar != 9 && singleChar != 10 { // if not in a comment we have our current valid char if inComment == 0 { spaceDone = 1; } // check if GetChar() returned EOF while skipping if singleChar == 0 { tok.id = TOKEN_EOS; spaceDone = 1; done = 1; } } // if we are not done until now, get a new character and start another abolishing cycle if spaceDone == 0 { singleChar=libgogo.GetChar(fd); } } // // Actual scanning part starts here // // Catch identifiers // identifier = letter { letter | digit }. if (done != 1) && (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' { // check for letter or _ tok.id = TOKEN_IDENTIFIER; // preceding characters may be letter,_, or a number for ; (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' || (singleChar >= '0' && singleChar <= '9'); singleChar = libgogo.GetChar(fd) { tmp_TokAppendStr(tok,singleChar); } // save the last read character for the next GetNextToken() cycle tok.nextChar = singleChar; done = 1; } // string "..." if (done != 1) && singleChar == '"' { tok.id = TOKEN_STRING; for singleChar = libgogo.GetChar(fd); singleChar != '"' &&singleChar > 31 && singleChar < 127;singleChar = libgogo.GetChar(fd) { tmp_TokAppendStr(tok,singleChar); } if singleChar != '"' { libgogo.ExitError(">> Scanner: String not closing. Exiting.",1); } done = 1; } // Single Quoted Character if (done != 1) && singleChar == 39 { singleChar = libgogo.GetChar(fd); if singleChar != 39 && singleChar > 31 && singleChar < 127 { tok.id = TOKEN_INTEGER; tok.intValue = libgogo.ToIntFromByte(singleChar); } else { libgogo.ExitError(">> Scanner: Unknown character. Exiting.",1); } singleChar = libgogo.GetChar(fd); if singleChar != 39 { libgogo.ExitError(">> Scanner: Only single characters allowed. Use corresponding integer for special characters. Exiting.",1); } done = 1; } // left brace ( if (done != 1) && singleChar == '(' { tok.id = TOKEN_LBRAC; done = 1; } // right brace ) if (done != 1) && singleChar == ')' { tok.id = TOKEN_RBRAC; done = 1; } // left square bracket [ if (done != 1) && singleChar == '[' { tok.id = TOKEN_LSBRAC; done = 1; } // right square bracket ] if (done != 1) && singleChar == ']' { tok.id = TOKEN_RSBRAC; done = 1; } // integer if (done != 1) && singleChar > 47 && singleChar < 58 { var byteBuf [255]byte; var i uint64; for i = 0; singleChar > 47 && singleChar < 58 ; singleChar = libgogo.GetChar(fd) { byteBuf[i] = singleChar; i = i +1; } tok.nextChar = singleChar; tok.id = TOKEN_INTEGER; tok.intValue = libgogo.ByteBufToInt(byteBuf,i); done = 1; } // Left curly bracket '{' if (done != 1) && singleChar == '{' { tok.id = TOKEN_LCBRAC; done = 1; } // Right curly bracket '}' if (done != 1) && singleChar == '}' { tok.id = TOKEN_RCBRAC; done = 1; } // Point '.' if (done != 1) && singleChar == '.' { tok.id = TOKEN_PT; done = 1; } // Not ('!') or Not Equal ('!=') if (done != 1) && singleChar == '!' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_NOTEQUAL; } else { tok.id = TOKEN_NOT; tok.nextChar = singleChar; } done = 1; } // Semicolon ';' if (done != 1) && singleChar == ';' { tok.id = TOKEN_SEMICOLON; done = 1; } // Colon ',' if (done != 1) && singleChar == ',' { tok.id = TOKEN_COLON; done = 1; } // Assignment '=' or Equals comparison '==' if (done != 1) && singleChar == '=' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_EQUALS; } else { tok.id = TOKEN_ASSIGN; tok.nextChar = singleChar; } done = 1; } // AND Relation '&&' if (done != 1) && singleChar == '&' { singleChar = libgogo.GetChar(fd); if singleChar == '&' { tok.id = TOKEN_REL_AND; } else { tok.id = TOKEN_OP_ADR; tok.nextChar = singleChar; } done = 1; } // OR Relation '||' if (done != 1) && singleChar == '|' { singleChar = libgogo.GetChar(fd); if singleChar == '|' { tok.id = TOKEN_REL_OR; } else { libgogo.ExitError(">> Scanner: No binary OR (|) supported. Only ||.",1); } done = 1; } // Greater and Greater-Than relation if (done != 1) && singleChar == '>' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_REL_GTOE; } else { tok.id = TOKEN_REL_GT; tok.nextChar = singleChar; } done = 1; } // Less and Less-Than relation if (done != 1) && singleChar == '<' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_REL_LTOE; } else { tok.id = TOKEN_REL_LT; tok.nextChar = singleChar; } done = 1; } if (done != 1) && singleChar == '+' { tok.id = TOKEN_ARITH_PLUS; done = 1; } if (done != 1) && singleChar == '-' { tok.id = TOKEN_ARITH_MINUS; done = 1; } if (done != 1) && singleChar == '*' { tok.id = TOKEN_ARITH_MUL; done = 1; } if (done != 1) && singleChar == '/' { tok.id = TOKEN_ARITH_DIV; done = 1; } if done != 1 { libgogo.PrintString(">> Scanner: Unkown char '"); libgogo.PrintChar(singleChar); libgogo.PrintString("'. "); libgogo.ExitError("Exiting.",1); } } // // GetNextToken should be called by the parser. It bascially fetches the next // token by calling GetNextTokenRaw() and filters the identifiers for known // keywords. // func GetNextToken(fd uint64, tok *Token) { GetNextTokenRaw(fd,tok) // Convert identifier to keyworded tokens if tok.id == TOKEN_IDENTIFIER { if libgogo.StringCompare("if",tok.strValue) == 0 { tok.id = TOKEN_IF; } if libgogo.StringCompare("for",tok.strValue) == 0 { tok.id = TOKEN_FOR; } if libgogo.StringCompare("type",tok.strValue) == 0 { tok.id = TOKEN_TYPE; } if libgogo.StringCompare("const",tok.strValue) == 0 { tok.id = TOKEN_CONST; } if libgogo.StringCompare("var",tok.strValue) == 0 { tok.id = TOKEN_VAR; } if libgogo.StringCompare("struct", tok.strValue) == 0 { tok.id = TOKEN_STRUCT; } if libgogo.StringCompare("return", tok.strValue) == 0 { tok.id = TOKEN_RETURN; } if libgogo.StringCompare("func", tok.strValue) == 0 { tok.id = TOKEN_FUNC; } if libgogo.StringCompare("import", tok.strValue) == 0 { tok.id = TOKEN_IMPORT; } if libgogo.StringCompare("package", tok.strValue) == 0 { tok.id = TOKEN_PACKAGE; } } } // // Debugging and temporary functions // func debugToken(tok *Token) { libgogo.PrintString("---------------------\n"); libgogo.PrintString("Token Id: "); libgogo.PrintNumber(tok.id); libgogo.PrintString("\n"); if tok.id == TOKEN_IDENTIFIER || tok.id == TOKEN_STRING { libgogo.PrintString("Stored string: "); libgogo.PrintString(tok.strValue); libgogo.PrintString("\n"); } if tok.id == TOKEN_INTEGER { libgogo.PrintString("Stored integer: "); libgogo.PrintNumber(tok.intValue); libgogo.PrintString("\n"); } } // Temporary test function func ScannerTest(fd uint64) { var tok Token; tok.id = 0; tok.nextChar = 0; for GetNextToken(fd,&tok); tok.id != TOKEN_EOS; GetNextToken(fd,&tok) { debugToken(&tok); } } func tmp_TokAppendStr(tok *Token, b byte) { tok.strValue += string(b); } scanner: StringCompare fixes. Printing still bugged. // Copyright 2010 The GoGo Authors. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. // This file holds the basic scanning routines that separate a source file // into the various tokens package main import "./libgogo/_obj/libgogo" import "fmt" // Token struct holding the relevant data of a parsed token. type Token struct { id uint64; // The id. Is one of TOKEN_* intValue uint64; // value storing the integer value if the token is TOKEN_INTEGER strValue string; // Value storing the token string if the token is TOKEN_STRING or TOKEN_IDENTIFIER nextChar byte; // Sometime the next char is already read. It is stored here to be re-assigned in the next GetNextToken() round }; func GetNextTokenRaw(fd uint64, tok *Token) { var singleChar byte; // Byte holding the last read value // Flag indicating whether we are in a comment. // 0 for no comment // 1 for a single line comment // 2 for a multi line comment var inComment uint64; var done uint64; // Flag indicating whether a cycle (Token) is finsihed var spaceDone uint64; // Flag indicating whether an abolishment cycle is finished // Initialize variables done = 0; spaceDone = 0; inComment = 0; tok.strValue = ""; // If the previous cycle had to read the next char (and stored it), it is // now used as first read if tok.nextChar == 0 { singleChar = libgogo.GetChar(fd) } else { singleChar = tok.nextChar; tok.nextChar = 0; } // check if it is a valid read, or an EOF if singleChar == 0 { tok.id = TOKEN_EOS; done = 1; spaceDone = 1; } // // Cleaning Tasks // The next part strips out spaces, newlines, tabs, and comments // Comments can either be single line with double slashes (//) or multiline // using C++ syntax /* */ // for ; spaceDone != 1; { // check whether a comment is starting if singleChar == '/' { // if we are in a comment skip the rest, get the next char otherwise if inComment == 0 { singleChar = libgogo.GetChar(fd); if singleChar == '/' { // we are in a single line comment (until newline is found) inComment = 1; } else { if singleChar == '*' { // we are in a multiline comment (until ending is found) inComment = 2; } else { libgogo.ExitError(">> Scanner: Unkown character combination for comments. Exiting.",1); } } } } // check whether a multi-line comment is ending if singleChar == '*' { singleChar = libgogo.GetChar(fd); if singleChar == '/' { if inComment == 2 { inComment = 0; singleChar = libgogo.GetChar(fd); } } } // if character is a newline: // *) if in a singleline comment, exit the comment // *) skip otherwise if singleChar == 10 { if inComment == 1 { inComment = 0; } } // handle everything that is not a space,tab,newline if singleChar != ' ' && singleChar != 9 && singleChar != 10 { // if not in a comment we have our current valid char if inComment == 0 { spaceDone = 1; } // check if GetChar() returned EOF while skipping if singleChar == 0 { tok.id = TOKEN_EOS; spaceDone = 1; done = 1; } } // if we are not done until now, get a new character and start another abolishing cycle if spaceDone == 0 { singleChar=libgogo.GetChar(fd); } } // // Actual scanning part starts here // // Catch identifiers // identifier = letter { letter | digit }. if (done != 1) && (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' { // check for letter or _ tok.id = TOKEN_IDENTIFIER; // preceding characters may be letter,_, or a number for ; (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' || (singleChar >= '0' && singleChar <= '9'); singleChar = libgogo.GetChar(fd) { tmp_TokAppendStr(tok,singleChar); } // save the last read character for the next GetNextToken() cycle tok.nextChar = singleChar; done = 1; } // string "..." if (done != 1) && singleChar == '"' { tok.id = TOKEN_STRING; for singleChar = libgogo.GetChar(fd); singleChar != '"' &&singleChar > 31 && singleChar < 127;singleChar = libgogo.GetChar(fd) { tmp_TokAppendStr(tok,singleChar); } if singleChar != '"' { libgogo.ExitError(">> Scanner: String not closing. Exiting.",1); } done = 1; } // Single Quoted Character if (done != 1) && singleChar == 39 { singleChar = libgogo.GetChar(fd); if singleChar != 39 && singleChar > 31 && singleChar < 127 { tok.id = TOKEN_INTEGER; tok.intValue = libgogo.ToIntFromByte(singleChar); } else { libgogo.ExitError(">> Scanner: Unknown character. Exiting.",1); } singleChar = libgogo.GetChar(fd); if singleChar != 39 { libgogo.ExitError(">> Scanner: Only single characters allowed. Use corresponding integer for special characters. Exiting.",1); } done = 1; } // left brace ( if (done != 1) && singleChar == '(' { tok.id = TOKEN_LBRAC; done = 1; } // right brace ) if (done != 1) && singleChar == ')' { tok.id = TOKEN_RBRAC; done = 1; } // left square bracket [ if (done != 1) && singleChar == '[' { tok.id = TOKEN_LSBRAC; done = 1; } // right square bracket ] if (done != 1) && singleChar == ']' { tok.id = TOKEN_RSBRAC; done = 1; } // integer if (done != 1) && singleChar > 47 && singleChar < 58 { var byteBuf [255]byte; var i uint64; for i = 0; singleChar > 47 && singleChar < 58 ; singleChar = libgogo.GetChar(fd) { byteBuf[i] = singleChar; i = i +1; } tok.nextChar = singleChar; tok.id = TOKEN_INTEGER; tok.intValue = libgogo.ByteBufToInt(byteBuf,i); done = 1; } // Left curly bracket '{' if (done != 1) && singleChar == '{' { tok.id = TOKEN_LCBRAC; done = 1; } // Right curly bracket '}' if (done != 1) && singleChar == '}' { tok.id = TOKEN_RCBRAC; done = 1; } // Point '.' if (done != 1) && singleChar == '.' { tok.id = TOKEN_PT; done = 1; } // Not ('!') or Not Equal ('!=') if (done != 1) && singleChar == '!' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_NOTEQUAL; } else { tok.id = TOKEN_NOT; tok.nextChar = singleChar; } done = 1; } // Semicolon ';' if (done != 1) && singleChar == ';' { tok.id = TOKEN_SEMICOLON; done = 1; } // Colon ',' if (done != 1) && singleChar == ',' { tok.id = TOKEN_COLON; done = 1; } // Assignment '=' or Equals comparison '==' if (done != 1) && singleChar == '=' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_EQUALS; } else { tok.id = TOKEN_ASSIGN; tok.nextChar = singleChar; } done = 1; } // AND Relation '&&' if (done != 1) && singleChar == '&' { singleChar = libgogo.GetChar(fd); if singleChar == '&' { tok.id = TOKEN_REL_AND; } else { tok.id = TOKEN_OP_ADR; tok.nextChar = singleChar; } done = 1; } // OR Relation '||' if (done != 1) && singleChar == '|' { singleChar = libgogo.GetChar(fd); if singleChar == '|' { tok.id = TOKEN_REL_OR; } else { libgogo.ExitError(">> Scanner: No binary OR (|) supported. Only ||.",1); } done = 1; } // Greater and Greater-Than relation if (done != 1) && singleChar == '>' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_REL_GTOE; } else { tok.id = TOKEN_REL_GT; tok.nextChar = singleChar; } done = 1; } // Less and Less-Than relation if (done != 1) && singleChar == '<' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_REL_LTOE; } else { tok.id = TOKEN_REL_LT; tok.nextChar = singleChar; } done = 1; } if (done != 1) && singleChar == '+' { tok.id = TOKEN_ARITH_PLUS; done = 1; } if (done != 1) && singleChar == '-' { tok.id = TOKEN_ARITH_MINUS; done = 1; } if (done != 1) && singleChar == '*' { tok.id = TOKEN_ARITH_MUL; done = 1; } if (done != 1) && singleChar == '/' { tok.id = TOKEN_ARITH_DIV; done = 1; } if done != 1 { libgogo.PrintString(">> Scanner: Unkown char '"); libgogo.PrintChar(singleChar); libgogo.PrintString("'. "); libgogo.ExitError("Exiting.",1); } } // // GetNextToken should be called by the parser. It bascially fetches the next // token by calling GetNextTokenRaw() and filters the identifiers for known // keywords. // func GetNextToken(fd uint64, tok *Token) { GetNextTokenRaw(fd,tok) // Convert identifier to keyworded tokens if tok.id == TOKEN_IDENTIFIER { if libgogo.StringCompare("if",tok.strValue) != 0 { tok.id = TOKEN_IF; } if libgogo.StringCompare("for",tok.strValue) != 0 { tok.id = TOKEN_FOR; } if libgogo.StringCompare("type",tok.strValue) != 0 { tok.id = TOKEN_TYPE; } if libgogo.StringCompare("const",tok.strValue) != 0 { tok.id = TOKEN_CONST; } if libgogo.StringCompare("var",tok.strValue) != 0 { tok.id = TOKEN_VAR; } if libgogo.StringCompare("struct", tok.strValue) != 0 { tok.id = TOKEN_STRUCT; } if libgogo.StringCompare("return", tok.strValue) != 0 { tok.id = TOKEN_RETURN; } if libgogo.StringCompare("func", tok.strValue) != 0 { tok.id = TOKEN_FUNC; } if libgogo.StringCompare("import", tok.strValue) != 0 { tok.id = TOKEN_IMPORT; } if libgogo.StringCompare("package", tok.strValue) != 0 { tok.id = TOKEN_PACKAGE; } } } // // Debugging and temporary functions // func debugToken(tok *Token) { libgogo.PrintString("---------------------\n"); libgogo.PrintString("Token Id: "); libgogo.PrintNumber(tok.id); libgogo.PrintString("\n"); if tok.id == TOKEN_IDENTIFIER || tok.id == TOKEN_STRING { libgogo.PrintString("Stored string: "); fmt.Printf(tok.strValue); libgogo.PrintString("\n"); } if tok.id == TOKEN_INTEGER { libgogo.PrintString("Stored integer: "); libgogo.PrintNumber(tok.intValue); libgogo.PrintString("\n"); } } // Temporary test function func ScannerTest(fd uint64) { var tok Token; var tmpCnt uint64; tok.id = 0; tok.nextChar = 0; tmpCnt = 0; for GetNextToken(fd,&tok); tok.id != TOKEN_EOS; GetNextToken(fd,&tok) { debugToken(&tok); if tmpCnt == 5 { libgogo.Exit(0); } tmpCnt = tmpCnt + 1; } } func tmp_TokAppendStr(tok *Token, b byte) { tok.strValue += string(b); }
// Copyright 2010 The GoGo Authors. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package main import "./libgogo/_obj/libgogo" import "fmt" // // Token struct holding the relevant data of a parsed token. // type Token struct { id uint64; // The id. Is one of TOKEN_* /* value storing the integer value if the token is TOKEN_INTEGER */ intValue uint64; /* Value that should be used instead of byte arrays */ newValue string; nextChar byte; // Sometime the next char is already read. It is stored here to be re-assigned in the next GetNextToken() round }; func GetNextTokenRaw(fd uint64, tok *Token) { var singleChar byte; // Byte holding the last read value /* * Flag indicating whether we are in a comment. * 0 for no comment * 1 for a single line comment * 2 for a multi line comment */ var inComment uint64; var done uint64; // Flag indicating whether a cycle (Token) is finsihed var spaceDone uint64; // Flag indicating whether an abolishment cycle is finished // Initialize variables done = 0; spaceDone = 0; inComment = 0; // If the old Token had to read the next char (and stored it), we can now // get it back if tok.nextChar == 0 { singleChar=libgogo.GetChar(fd) } else { singleChar = tok.nextChar; tok.nextChar = 0; } // check if it is a valid read, or an EOF if singleChar == 0 { tok.id = TOKEN_EOS; done = 1; spaceDone = 1; } // // Cleaning Tasks // The next part strips out spaces, newlines, tabs, and comments // Comments can either be single line with double slashes (//) or multiline // using C++ syntax /* */ // for ; spaceDone != 1; { // check whether a comment is starting if singleChar == '/' { // if we are in a comment skip the rest, get the next char otherwise if inComment == 0 { singleChar = libgogo.GetChar(fd); if singleChar == '/' { // we are in a single line comment (until newline is found) inComment = 1; } else { if singleChar == '*' { // we are in a multiline comment (until ending is found) inComment = 2; } else { libgogo.ExitError(">> Scanner: Unkown character combination for comments. Exiting.",1); } } } } // check whether a multi-line comment is ending if singleChar == '*' { singleChar = libgogo.GetChar(fd); if singleChar == '/' { if inComment == 2 { inComment = 0; singleChar = libgogo.GetChar(fd); } } } // if character is a newline: // *) if in a singleline comment, exit the comment // *) skip otherwise if singleChar == 10 { if inComment == 1 { inComment = 0; } } // handle everything that is not a space,tab,newline if singleChar != ' ' && singleChar != 9 && singleChar != 10 { // if not in a comment we have our current valid char if inComment == 0 { spaceDone = 1; } // check if GetChar() returned EOF while skipping if singleChar == 0 { tok.id = TOKEN_EOS; spaceDone = 1; done = 1; } } // if we are not done until now, get a new character and start another abolishing cycle if spaceDone == 0 { singleChar=libgogo.GetChar(fd); } } // // Actual scanning part starts here // // Catch identifiers // identifier = letter { letter | digit }. if (done != 1) && (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' { // check for letter or _ tok.id = TOKEN_IDENTIFIER; // preceding characters may be letter,_, or a number for ; (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' || (singleChar >= '0' && singleChar <= '9'); singleChar = libgogo.GetChar(fd) { tmp_StrAppend(tok.newValue,singleChar); } // save the last read character for the next GetNextToken() cycle tok.nextChar = singleChar; done = 1; } // string "..." if (done != 1) && singleChar == '"' { tok.id = TOKEN_STRING; for singleChar = libgogo.GetChar(fd); singleChar != '"' &&singleChar > 31 && singleChar < 127;singleChar = libgogo.GetChar(fd) { tmp_StrAppend(tok.newValue,singleChar); } if singleChar != '"' { libgogo.ExitError(">> Scanner: String not closing. Exiting.",1); } done = 1; } // Single Quoted Character if (done != 1) && singleChar == 39 { singleChar = libgogo.GetChar(fd); if singleChar != 39 && singleChar > 31 && singleChar < 127 { tok.id = TOKEN_INTEGER; tok.intValue = tmp_toInt(singleChar); } else { libgogo.ExitError(">> Scanner: Unknown character. Exiting.",1); } singleChar = libgogo.GetChar(fd); if singleChar != 39 { libgogo.ExitError(">> Scanner: Only single characters allowed. Use corresponding integer for special characters. Exiting.",1); } done = 1; } // left brace ( if (done != 1) && singleChar == '(' { tok.id = TOKEN_LBRAC; done = 1; } // right brace ) if (done != 1) && singleChar == ')' { tok.id = TOKEN_RBRAC; done = 1; } // left square bracket [ if (done != 1) && singleChar == '[' { tok.id = TOKEN_LSBRAC; done = 1; } // right square bracket ] if (done != 1) && singleChar == ']' { tok.id = TOKEN_RSBRAC; done = 1; } // integer if (done != 1) && singleChar > 47 && singleChar < 58 { var byteBuf [255]byte; var i uint64; for i = 0; singleChar > 47 && singleChar < 58 ; singleChar = libgogo.GetChar(fd) { byteBuf[i] = singleChar; i = i +1; } tok.nextChar = singleChar; tok.id = TOKEN_INTEGER; tok.intValue = libgogo.ByteBufToInt(byteBuf,i); done = 1; } // Left curly bracket '{' if (done != 1) && singleChar == '{' { tok.id = TOKEN_LCBRAC; done = 1; } // Right curly bracket '}' if (done != 1) && singleChar == '}' { tok.id = TOKEN_RCBRAC; done = 1; } // Point '.' if (done != 1) && singleChar == '.' { tok.id = TOKEN_PT; done = 1; } // Not ('!') or Not Equal ('!=') if (done != 1) && singleChar == '!' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_NOTEQUAL; } else { tok.id = TOKEN_NOT; tok.nextChar = singleChar; } done = 1; } // Semicolon ';' if (done != 1) && singleChar == ';' { tok.id = TOKEN_SEMICOLON; done = 1; } // Colon ',' if (done != 1) && singleChar == ',' { tok.id = TOKEN_COLON; done = 1; } // Assignment '=' or Equals comparison '==' if (done != 1) && singleChar == '=' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_EQUALS; } else { tok.id = TOKEN_ASSIGN; tok.nextChar = singleChar; } done = 1; } // AND Relation '&&' if (done != 1) && singleChar == '&' { singleChar = libgogo.GetChar(fd); if singleChar == '&' { tok.id = TOKEN_REL_AND; } else { tok.id = TOKEN_OP_ADR; tok.nextChar = singleChar; } done = 1; } // OR Relation '||' if (done != 1) && singleChar == '|' { singleChar = libgogo.GetChar(fd); if singleChar == '|' { tok.id = TOKEN_REL_OR; } else { libgogo.ExitError(">> Scanner: No binary OR (|) supported. Only ||.",1); } done = 1; } // Greater and Greater-Than relation if (done != 1) && singleChar == '>' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_REL_GTOE; } else { tok.id = TOKEN_REL_GT; tok.nextChar = singleChar; } done = 1; } // Less and Less-Than relation if (done != 1) && singleChar == '<' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_REL_LTOE; } else { tok.id = TOKEN_REL_LT; tok.nextChar = singleChar; } done = 1; } if (done != 1) && singleChar == '+' { tok.id = TOKEN_ARITH_PLUS; done = 1; } if (done != 1) && singleChar == '-' { tok.id = TOKEN_ARITH_MINUS; done = 1; } if (done != 1) && singleChar == '*' { tok.id = TOKEN_ARITH_MUL; done = 1; } if (done != 1) && singleChar == '/' { tok.id = TOKEN_ARITH_DIV; done = 1; } if done != 1 { libgogo.PrintString(">> Scanner: Unkown char '"); libgogo.PrintChar(singleChar); libgogo.PrintString("'. "); libgogo.ExitError("Exiting.",1); } } func GetNextToken(fd uint64, tok *Token) { GetNextTokenRaw(fd,tok) // Convert identifier to keyworded tokens if tok.id == TOKEN_IDENTIFIER { if tmp_StrCmp("if",tok.newValue) == 0 { tok.id = TOKEN_IF; } if tmp_StrCmp("for",tok.newValue) == 0 { tok.id = TOKEN_FOR; } if tmp_StrCmp("type",tok.newValue) == 0 { tok.id = TOKEN_TYPE; } if tmp_StrCmp("const",tok.newValue) == 0 { tok.id = TOKEN_CONST; } if tmp_StrCmp("var",tok.newValue) == 0 { tok.id = TOKEN_VAR; } if tmp_StrCmp("struct", tok.newValue) == 0 { tok.id = TOKEN_STRUCT; } if tmp_StrCmp("return", tok.newValue) == 0 { tok.id = TOKEN_RETURN; } if tmp_StrCmp("func", tok.newValue) == 0 { tok.id = TOKEN_FUNC; } if tmp_StrCmp("import", tok.newValue) == 0 { tok.id = TOKEN_IMPORT; } if tmp_StrCmp("package", tok.newValue) == 0 { tok.id = TOKEN_PACKAGE; } } } func debugToken (tok Token) { /* libgogo.PrintString("Token Id: "); libgogo.PrintNumber(tok.id); libgogo.PrintString("\n"); if tok.id == TOKEN_IDENTIFIER || tok.id == TOKEN_STRING { libgogo.PrintString("Identifier/String value: "); libgogo.PrintByteBuf(tok.value); libgogo.PrintString("\n"); }*/ } // Temporary test function func ScannerTest(fd uint64) { var tok Token; tok.id = 0; tok.nextChar = 0; for GetNextToken(fd,&tok); tok.id != TOKEN_EOS; GetNextToken(fd,&tok) { fmt.Printf("%d\n",tok.id); } } // libgogo ... func tmp_StrAppend(str string, b byte) { str += string(b); } // libgogo ... func tmp_StrLen(str string) int { return len(str); } // libgogo ... func tmp_StrCmp(str1 string, str2 string) uint64 { var ret uint64; if str1 == str2 { ret = 0; } else { ret = 1; } return ret; } // libgogo ... func tmp_toInt(b byte) uint64 { return uint64(b); } scanner: Add comments and remove useless functions (cleanup) // Copyright 2010 The GoGo Authors. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. // This file holds the basic scanning routines that separate a source file // into the various tokens package main import "./libgogo/_obj/libgogo" import "fmt" // Token struct holding the relevant data of a parsed token. type Token struct { id uint64; // The id. Is one of TOKEN_* intValue uint64; // value storing the integer value if the token is TOKEN_INTEGER newValue string; // Value that should be used instead of byte arrays nextChar byte; // Sometime the next char is already read. It is stored here to be re-assigned in the next GetNextToken() round }; func GetNextTokenRaw(fd uint64, tok *Token) { var singleChar byte; // Byte holding the last read value // Flag indicating whether we are in a comment. // 0 for no comment // 1 for a single line comment // 2 for a multi line comment var inComment uint64; var done uint64; // Flag indicating whether a cycle (Token) is finsihed var spaceDone uint64; // Flag indicating whether an abolishment cycle is finished // Initialize variables done = 0; spaceDone = 0; inComment = 0; // If the previous cycle had to read the next char (and stored it), it is // now used as first read if tok.nextChar == 0 { singleChar = libgogo.GetChar(fd) } else { singleChar = tok.nextChar; tok.nextChar = 0; } // check if it is a valid read, or an EOF if singleChar == 0 { tok.id = TOKEN_EOS; done = 1; spaceDone = 1; } // // Cleaning Tasks // The next part strips out spaces, newlines, tabs, and comments // Comments can either be single line with double slashes (//) or multiline // using C++ syntax /* */ // for ; spaceDone != 1; { // check whether a comment is starting if singleChar == '/' { // if we are in a comment skip the rest, get the next char otherwise if inComment == 0 { singleChar = libgogo.GetChar(fd); if singleChar == '/' { // we are in a single line comment (until newline is found) inComment = 1; } else { if singleChar == '*' { // we are in a multiline comment (until ending is found) inComment = 2; } else { libgogo.ExitError(">> Scanner: Unkown character combination for comments. Exiting.",1); } } } } // check whether a multi-line comment is ending if singleChar == '*' { singleChar = libgogo.GetChar(fd); if singleChar == '/' { if inComment == 2 { inComment = 0; singleChar = libgogo.GetChar(fd); } } } // if character is a newline: // *) if in a singleline comment, exit the comment // *) skip otherwise if singleChar == 10 { if inComment == 1 { inComment = 0; } } // handle everything that is not a space,tab,newline if singleChar != ' ' && singleChar != 9 && singleChar != 10 { // if not in a comment we have our current valid char if inComment == 0 { spaceDone = 1; } // check if GetChar() returned EOF while skipping if singleChar == 0 { tok.id = TOKEN_EOS; spaceDone = 1; done = 1; } } // if we are not done until now, get a new character and start another abolishing cycle if spaceDone == 0 { singleChar=libgogo.GetChar(fd); } } // // Actual scanning part starts here // // Catch identifiers // identifier = letter { letter | digit }. if (done != 1) && (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' { // check for letter or _ tok.id = TOKEN_IDENTIFIER; // preceding characters may be letter,_, or a number for ; (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' || (singleChar >= '0' && singleChar <= '9'); singleChar = libgogo.GetChar(fd) { tmp_StrAppend(tok.newValue,singleChar); } // save the last read character for the next GetNextToken() cycle tok.nextChar = singleChar; done = 1; } // string "..." if (done != 1) && singleChar == '"' { tok.id = TOKEN_STRING; for singleChar = libgogo.GetChar(fd); singleChar != '"' &&singleChar > 31 && singleChar < 127;singleChar = libgogo.GetChar(fd) { tmp_StrAppend(tok.newValue,singleChar); } if singleChar != '"' { libgogo.ExitError(">> Scanner: String not closing. Exiting.",1); } done = 1; } // Single Quoted Character if (done != 1) && singleChar == 39 { singleChar = libgogo.GetChar(fd); if singleChar != 39 && singleChar > 31 && singleChar < 127 { tok.id = TOKEN_INTEGER; tok.intValue = tmp_toInt(singleChar); } else { libgogo.ExitError(">> Scanner: Unknown character. Exiting.",1); } singleChar = libgogo.GetChar(fd); if singleChar != 39 { libgogo.ExitError(">> Scanner: Only single characters allowed. Use corresponding integer for special characters. Exiting.",1); } done = 1; } // left brace ( if (done != 1) && singleChar == '(' { tok.id = TOKEN_LBRAC; done = 1; } // right brace ) if (done != 1) && singleChar == ')' { tok.id = TOKEN_RBRAC; done = 1; } // left square bracket [ if (done != 1) && singleChar == '[' { tok.id = TOKEN_LSBRAC; done = 1; } // right square bracket ] if (done != 1) && singleChar == ']' { tok.id = TOKEN_RSBRAC; done = 1; } // integer if (done != 1) && singleChar > 47 && singleChar < 58 { var byteBuf [255]byte; var i uint64; for i = 0; singleChar > 47 && singleChar < 58 ; singleChar = libgogo.GetChar(fd) { byteBuf[i] = singleChar; i = i +1; } tok.nextChar = singleChar; tok.id = TOKEN_INTEGER; tok.intValue = libgogo.ByteBufToInt(byteBuf,i); done = 1; } // Left curly bracket '{' if (done != 1) && singleChar == '{' { tok.id = TOKEN_LCBRAC; done = 1; } // Right curly bracket '}' if (done != 1) && singleChar == '}' { tok.id = TOKEN_RCBRAC; done = 1; } // Point '.' if (done != 1) && singleChar == '.' { tok.id = TOKEN_PT; done = 1; } // Not ('!') or Not Equal ('!=') if (done != 1) && singleChar == '!' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_NOTEQUAL; } else { tok.id = TOKEN_NOT; tok.nextChar = singleChar; } done = 1; } // Semicolon ';' if (done != 1) && singleChar == ';' { tok.id = TOKEN_SEMICOLON; done = 1; } // Colon ',' if (done != 1) && singleChar == ',' { tok.id = TOKEN_COLON; done = 1; } // Assignment '=' or Equals comparison '==' if (done != 1) && singleChar == '=' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_EQUALS; } else { tok.id = TOKEN_ASSIGN; tok.nextChar = singleChar; } done = 1; } // AND Relation '&&' if (done != 1) && singleChar == '&' { singleChar = libgogo.GetChar(fd); if singleChar == '&' { tok.id = TOKEN_REL_AND; } else { tok.id = TOKEN_OP_ADR; tok.nextChar = singleChar; } done = 1; } // OR Relation '||' if (done != 1) && singleChar == '|' { singleChar = libgogo.GetChar(fd); if singleChar == '|' { tok.id = TOKEN_REL_OR; } else { libgogo.ExitError(">> Scanner: No binary OR (|) supported. Only ||.",1); } done = 1; } // Greater and Greater-Than relation if (done != 1) && singleChar == '>' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_REL_GTOE; } else { tok.id = TOKEN_REL_GT; tok.nextChar = singleChar; } done = 1; } // Less and Less-Than relation if (done != 1) && singleChar == '<' { singleChar = libgogo.GetChar(fd); if singleChar == '=' { tok.id = TOKEN_REL_LTOE; } else { tok.id = TOKEN_REL_LT; tok.nextChar = singleChar; } done = 1; } if (done != 1) && singleChar == '+' { tok.id = TOKEN_ARITH_PLUS; done = 1; } if (done != 1) && singleChar == '-' { tok.id = TOKEN_ARITH_MINUS; done = 1; } if (done != 1) && singleChar == '*' { tok.id = TOKEN_ARITH_MUL; done = 1; } if (done != 1) && singleChar == '/' { tok.id = TOKEN_ARITH_DIV; done = 1; } if done != 1 { libgogo.PrintString(">> Scanner: Unkown char '"); libgogo.PrintChar(singleChar); libgogo.PrintString("'. "); libgogo.ExitError("Exiting.",1); } } // // GetNextToken should be called by the parser. It bascially fetches the next // token by calling GetNextTokenRaw() and filters the identifiers for known // keywords. // func GetNextToken(fd uint64, tok *Token) { GetNextTokenRaw(fd,tok) // Convert identifier to keyworded tokens if tok.id == TOKEN_IDENTIFIER { if tmp_StrCmp("if",tok.newValue) == 0 { tok.id = TOKEN_IF; } if tmp_StrCmp("for",tok.newValue) == 0 { tok.id = TOKEN_FOR; } if tmp_StrCmp("type",tok.newValue) == 0 { tok.id = TOKEN_TYPE; } if tmp_StrCmp("const",tok.newValue) == 0 { tok.id = TOKEN_CONST; } if tmp_StrCmp("var",tok.newValue) == 0 { tok.id = TOKEN_VAR; } if tmp_StrCmp("struct", tok.newValue) == 0 { tok.id = TOKEN_STRUCT; } if tmp_StrCmp("return", tok.newValue) == 0 { tok.id = TOKEN_RETURN; } if tmp_StrCmp("func", tok.newValue) == 0 { tok.id = TOKEN_FUNC; } if tmp_StrCmp("import", tok.newValue) == 0 { tok.id = TOKEN_IMPORT; } if tmp_StrCmp("package", tok.newValue) == 0 { tok.id = TOKEN_PACKAGE; } } } // // Debugging and temporary functions // // Temporary test function func ScannerTest(fd uint64) { var tok Token; tok.id = 0; tok.nextChar = 0; for GetNextToken(fd,&tok); tok.id != TOKEN_EOS; GetNextToken(fd,&tok) { fmt.Printf("%d\n",tok.id); } } // libgogo ... func tmp_StrAppend(str string, b byte) { str += string(b); } // libgogo ... func tmp_StrLen(str string) int { return len(str); } // libgogo ... func tmp_StrCmp(str1 string, str2 string) uint64 { var ret uint64; if str1 == str2 { ret = 0; } else { ret = 1; } return ret; } // libgogo ... func tmp_toInt(b byte) uint64 { return uint64(b); }
package server import ( "context" "strconv" client "github.com/pachyderm/pachyderm/src/client" "github.com/pachyderm/pachyderm/src/client/enterprise" "github.com/pachyderm/pachyderm/src/client/pps" "github.com/pachyderm/pachyderm/src/client/version" "github.com/pachyderm/pachyderm/src/server/pkg/deploy/assets" "github.com/pachyderm/pachyderm/src/server/pkg/ppsutil" "github.com/pachyderm/pachyderm/src/server/worker" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) // Parameters used when creating the kubernetes replication controller in charge // of a job or pipeline's workers type workerOptions struct { rcName string // Name of the replication controller managing workers userImage string // The user's pipeline/job image labels map[string]string // k8s labels attached to the RC and workers annotations map[string]string // k8s annotations attached to the RC and workers parallelism int32 // Number of replicas the RC maintains cacheSize string // Size of cache that sidecar uses resourceRequests *v1.ResourceList // Resources requested by pipeline/job pods resourceLimits *v1.ResourceList // Resources requested by pipeline/job pods workerEnv []v1.EnvVar // Environment vars set in the user container volumes []v1.Volume // Volumes that we expose to the user container volumeMounts []v1.VolumeMount // Paths where we mount each volume in 'volumes' etcdPrefix string // the prefix in etcd to use // Secrets that we mount in the worker container (e.g. for reading/writing to // s3) imagePullSecrets []v1.LocalObjectReference service *pps.Service } func (a *apiServer) workerPodSpec(options *workerOptions) (v1.PodSpec, error) { pullPolicy := a.workerImagePullPolicy if pullPolicy == "" { pullPolicy = "IfNotPresent" } sidecarEnv := []v1.EnvVar{{ Name: "BLOCK_CACHE_BYTES", Value: options.cacheSize, }, { Name: "PFS_CACHE_SIZE", Value: "16", }, { Name: "PACH_ROOT", Value: a.storageRoot, }, { Name: "STORAGE_BACKEND", Value: a.storageBackend, }} sidecarEnv = append(sidecarEnv, assets.GetSecretEnvVars(a.storageBackend)...) workerEnv := options.workerEnv workerEnv = append(options.workerEnv, v1.EnvVar{Name: "PACH_ROOT", Value: a.storageRoot}) workerEnv = append(workerEnv, assets.GetSecretEnvVars(a.storageBackend)...) // This only happens in local deployment. We want the workers to be // able to read from/write to the hostpath volume as well. storageVolumeName := "pach-disk" var sidecarVolumeMounts []v1.VolumeMount userVolumeMounts := options.volumeMounts if a.storageHostPath != "" { options.volumes = append(options.volumes, v1.Volume{ Name: storageVolumeName, VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ Path: a.storageHostPath, }, }, }) storageMount := v1.VolumeMount{ Name: storageVolumeName, MountPath: a.storageRoot, } sidecarVolumeMounts = append(sidecarVolumeMounts, storageMount) userVolumeMounts = append(userVolumeMounts, storageMount) } secretVolume, secretMount := assets.GetBackendSecretVolumeAndMount(a.storageBackend) options.volumes = append(options.volumes, secretVolume) options.volumeMounts = append(options.volumeMounts, secretMount) sidecarVolumeMounts = append(sidecarVolumeMounts, secretMount) userVolumeMounts = append(userVolumeMounts, secretMount) // Explicitly set CPU and MEM requests to zero because some cloud // providers set their own defaults which are usually not what we want. cpuZeroQuantity := resource.MustParse("0") memZeroQuantity := resource.MustParse("0M") memSidecarQuantity := resource.MustParse(options.cacheSize) if !a.noExposeDockerSocket { options.volumes = append(options.volumes, v1.Volume{ Name: "docker", VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ Path: "/var/run/docker.sock", }, }, }) userVolumeMounts = append(userVolumeMounts, v1.VolumeMount{ Name: "docker", MountPath: "/var/run/docker.sock", }) } zeroVal := int64(0) workerImage := a.workerImage resp, err := a.getPachClient().Enterprise.GetState(context.Background(), &enterprise.GetStateRequest{}) if err != nil { return v1.PodSpec{}, err } if resp.State != enterprise.State_ACTIVE { workerImage = assets.AddRegistry("", workerImage) } podSpec := v1.PodSpec{ InitContainers: []v1.Container{ { Name: "init", Image: workerImage, Command: []string{"/pach/worker.sh"}, ImagePullPolicy: v1.PullPolicy(pullPolicy), Env: options.workerEnv, VolumeMounts: options.volumeMounts, }, }, Containers: []v1.Container{ { Name: client.PPSWorkerUserContainerName, Image: options.userImage, Command: []string{"/pach-bin/worker"}, ImagePullPolicy: v1.PullPolicy(pullPolicy), Env: workerEnv, Resources: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceCPU: cpuZeroQuantity, v1.ResourceMemory: memZeroQuantity, }, }, VolumeMounts: userVolumeMounts, }, { Name: client.PPSWorkerSidecarContainerName, Image: a.workerSidecarImage, Command: []string{"/pachd", "--mode", "sidecar"}, ImagePullPolicy: v1.PullPolicy(pullPolicy), Env: sidecarEnv, VolumeMounts: sidecarVolumeMounts, Resources: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceCPU: cpuZeroQuantity, v1.ResourceMemory: memSidecarQuantity, }, }, }, }, RestartPolicy: "Always", Volumes: options.volumes, ImagePullSecrets: options.imagePullSecrets, TerminationGracePeriodSeconds: &zeroVal, SecurityContext: &v1.PodSecurityContext{RunAsUser: &zeroVal}, ServiceAccountName: assets.ServiceAccountName, } resourceRequirements := v1.ResourceRequirements{} if options.resourceRequests != nil { resourceRequirements.Requests = *options.resourceRequests } if options.resourceLimits != nil { resourceRequirements.Limits = *options.resourceLimits } podSpec.Containers[0].Resources = resourceRequirements return podSpec, nil } func (a *apiServer) getWorkerOptions(pipelineName string, pipelineVersion uint64, parallelism int32, resourceRequests *v1.ResourceList, resourceLimits *v1.ResourceList, transform *pps.Transform, cacheSize string, service *pps.Service, specCommitID string) *workerOptions { rcName := ppsutil.PipelineRcName(pipelineName, pipelineVersion) labels := labels(rcName) labels["version"] = version.PrettyVersion() labels["pipelineName"] = pipelineName userImage := transform.Image if userImage == "" { userImage = DefaultUserImage } var workerEnv []v1.EnvVar for name, value := range transform.Env { workerEnv = append( workerEnv, v1.EnvVar{ Name: name, Value: value, }, ) } // We use Kubernetes' "Downward API" so the workers know their IP // addresses, which they will then post on etcd so the job managers // can discover the workers. workerEnv = append(workerEnv, v1.EnvVar{ Name: client.PPSWorkerIPEnv, ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ APIVersion: "v1", FieldPath: "status.podIP", }, }, }) workerEnv = append(workerEnv, v1.EnvVar{ Name: client.PPSPodNameEnv, ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ APIVersion: "v1", FieldPath: "metadata.name", }, }, }) // Set the etcd prefix env workerEnv = append(workerEnv, v1.EnvVar{ Name: client.PPSEtcdPrefixEnv, Value: a.etcdPrefix, }) // Pass along the namespace workerEnv = append(workerEnv, v1.EnvVar{ Name: client.PPSNamespaceEnv, Value: a.namespace, }) workerEnv = append(workerEnv, v1.EnvVar{ Name: client.PPSSpecCommitEnv, Value: specCommitID, }) var volumes []v1.Volume var volumeMounts []v1.VolumeMount for _, secret := range transform.Secrets { if secret.MountPath != "" { volumes = append(volumes, v1.Volume{ Name: secret.Name, VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: secret.Name, }, }, }) volumeMounts = append(volumeMounts, v1.VolumeMount{ Name: secret.Name, MountPath: secret.MountPath, }) } if secret.EnvVar != "" { workerEnv = append(workerEnv, v1.EnvVar{ Name: secret.EnvVar, ValueFrom: &v1.EnvVarSource{ SecretKeyRef: &v1.SecretKeySelector{ LocalObjectReference: v1.LocalObjectReference{ Name: secret.Name, }, Key: secret.Key, }, }, }) } } volumes = append(volumes, v1.Volume{ Name: "pach-bin", VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{}, }, }) volumeMounts = append(volumeMounts, v1.VolumeMount{ Name: "pach-bin", MountPath: "/pach-bin", }) volumes = append(volumes, v1.Volume{ Name: client.PPSWorkerVolume, VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{}, }, }) volumeMounts = append(volumeMounts, v1.VolumeMount{ Name: client.PPSWorkerVolume, MountPath: client.PPSInputPrefix, }) if resourceLimits != nil && resourceLimits.NvidiaGPU() != nil && !resourceLimits.NvidiaGPU().IsZero() { volumes = append(volumes, v1.Volume{ Name: "root-lib", VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ Path: "/usr/lib", }, }, }) volumeMounts = append(volumeMounts, v1.VolumeMount{ Name: "root-lib", MountPath: "/rootfs/usr/lib", }) } var imagePullSecrets []v1.LocalObjectReference for _, secret := range transform.ImagePullSecrets { imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: secret}) } if a.imagePullSecret != "" { imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: a.imagePullSecret}) } annotations := map[string]string{"pipelineName": pipelineName} if a.iamRole != "" { annotations["iam.amazonaws.com/role"] = a.iamRole } return &workerOptions{ rcName: rcName, labels: labels, annotations: annotations, parallelism: int32(parallelism), resourceRequests: resourceRequests, resourceLimits: resourceLimits, userImage: userImage, workerEnv: workerEnv, volumes: volumes, volumeMounts: volumeMounts, imagePullSecrets: imagePullSecrets, cacheSize: cacheSize, service: service, } } func (a *apiServer) createWorkerRc(options *workerOptions) error { podSpec, err := a.workerPodSpec(options) if err != nil { return err } rc := &v1.ReplicationController{ TypeMeta: metav1.TypeMeta{ Kind: "ReplicationController", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: options.rcName, Labels: options.labels, Annotations: options.annotations, }, Spec: v1.ReplicationControllerSpec{ Selector: options.labels, Replicas: &options.parallelism, Template: &v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: options.rcName, Labels: options.labels, Annotations: options.annotations, }, Spec: podSpec, }, }, } if _, err := a.kubeClient.CoreV1().ReplicationControllers(a.namespace).Create(rc); err != nil { if !isAlreadyExistsErr(err) { return err } } serviceAnnotations := map[string]string{ "prometheus.io/scrape": "true", "prometheus.io/port": strconv.Itoa(worker.PrometheusPort), } service := &v1.Service{ TypeMeta: metav1.TypeMeta{ Kind: "Service", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: options.rcName, Labels: options.labels, Annotations: serviceAnnotations, }, Spec: v1.ServiceSpec{ Selector: options.labels, Ports: []v1.ServicePort{ { Port: client.PPSWorkerPort, Name: "grpc-port", }, { Port: worker.PrometheusPort, Name: "prometheus-metrics", }, }, }, } if _, err := a.kubeClient.CoreV1().Services(a.namespace).Create(service); err != nil { if !isAlreadyExistsErr(err) { return err } } if options.service != nil { service := &v1.Service{ TypeMeta: metav1.TypeMeta{ Kind: "Service", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: options.rcName + "-user", Labels: options.labels, }, Spec: v1.ServiceSpec{ Selector: options.labels, Type: v1.ServiceTypeNodePort, Ports: []v1.ServicePort{ { Port: options.service.ExternalPort, TargetPort: intstr.FromInt(int(options.service.InternalPort)), Name: "user-port", NodePort: options.service.ExternalPort, }, }, }, } if _, err := a.kubeClient.CoreV1().Services(a.namespace).Create(service); err != nil { if !isAlreadyExistsErr(err) { return err } } } return nil } Fix issue with volume mounts (was copying slice structure rather than actual slice) package server import ( "context" "strconv" client "github.com/pachyderm/pachyderm/src/client" "github.com/pachyderm/pachyderm/src/client/enterprise" "github.com/pachyderm/pachyderm/src/client/pps" "github.com/pachyderm/pachyderm/src/client/version" "github.com/pachyderm/pachyderm/src/server/pkg/deploy/assets" "github.com/pachyderm/pachyderm/src/server/pkg/ppsutil" "github.com/pachyderm/pachyderm/src/server/worker" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) // Parameters used when creating the kubernetes replication controller in charge // of a job or pipeline's workers type workerOptions struct { rcName string // Name of the replication controller managing workers userImage string // The user's pipeline/job image labels map[string]string // k8s labels attached to the RC and workers annotations map[string]string // k8s annotations attached to the RC and workers parallelism int32 // Number of replicas the RC maintains cacheSize string // Size of cache that sidecar uses resourceRequests *v1.ResourceList // Resources requested by pipeline/job pods resourceLimits *v1.ResourceList // Resources requested by pipeline/job pods workerEnv []v1.EnvVar // Environment vars set in the user container volumes []v1.Volume // Volumes that we expose to the user container volumeMounts []v1.VolumeMount // Paths where we mount each volume in 'volumes' etcdPrefix string // the prefix in etcd to use // Secrets that we mount in the worker container (e.g. for reading/writing to // s3) imagePullSecrets []v1.LocalObjectReference service *pps.Service } func (a *apiServer) workerPodSpec(options *workerOptions) (v1.PodSpec, error) { pullPolicy := a.workerImagePullPolicy if pullPolicy == "" { pullPolicy = "IfNotPresent" } sidecarEnv := []v1.EnvVar{{ Name: "BLOCK_CACHE_BYTES", Value: options.cacheSize, }, { Name: "PFS_CACHE_SIZE", Value: "16", }, { Name: "PACH_ROOT", Value: a.storageRoot, }, { Name: "STORAGE_BACKEND", Value: a.storageBackend, }} sidecarEnv = append(sidecarEnv, assets.GetSecretEnvVars(a.storageBackend)...) workerEnv := options.workerEnv workerEnv = append(options.workerEnv, v1.EnvVar{Name: "PACH_ROOT", Value: a.storageRoot}) workerEnv = append(workerEnv, assets.GetSecretEnvVars(a.storageBackend)...) // This only happens in local deployment. We want the workers to be // able to read from/write to the hostpath volume as well. storageVolumeName := "pach-disk" var sidecarVolumeMounts []v1.VolumeMount userVolumeMounts := make([]v1.VolumeMount, len(options.volumeMounts)) copy(userVolumeMounts, options.volumeMounts) if a.storageHostPath != "" { options.volumes = append(options.volumes, v1.Volume{ Name: storageVolumeName, VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ Path: a.storageHostPath, }, }, }) storageMount := v1.VolumeMount{ Name: storageVolumeName, MountPath: a.storageRoot, } sidecarVolumeMounts = append(sidecarVolumeMounts, storageMount) userVolumeMounts = append(userVolumeMounts, storageMount) } secretVolume, secretMount := assets.GetBackendSecretVolumeAndMount(a.storageBackend) options.volumes = append(options.volumes, secretVolume) options.volumeMounts = append(options.volumeMounts, secretMount) sidecarVolumeMounts = append(sidecarVolumeMounts, secretMount) userVolumeMounts = append(userVolumeMounts, secretMount) // Explicitly set CPU and MEM requests to zero because some cloud // providers set their own defaults which are usually not what we want. cpuZeroQuantity := resource.MustParse("0") memZeroQuantity := resource.MustParse("0M") memSidecarQuantity := resource.MustParse(options.cacheSize) if !a.noExposeDockerSocket { options.volumes = append(options.volumes, v1.Volume{ Name: "docker", VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ Path: "/var/run/docker.sock", }, }, }) userVolumeMounts = append(userVolumeMounts, v1.VolumeMount{ Name: "docker", MountPath: "/var/run/docker.sock", }) } zeroVal := int64(0) workerImage := a.workerImage resp, err := a.getPachClient().Enterprise.GetState(context.Background(), &enterprise.GetStateRequest{}) if err != nil { return v1.PodSpec{}, err } if resp.State != enterprise.State_ACTIVE { workerImage = assets.AddRegistry("", workerImage) } podSpec := v1.PodSpec{ InitContainers: []v1.Container{ { Name: "init", Image: workerImage, Command: []string{"/pach/worker.sh"}, ImagePullPolicy: v1.PullPolicy(pullPolicy), Env: options.workerEnv, VolumeMounts: options.volumeMounts, }, }, Containers: []v1.Container{ { Name: client.PPSWorkerUserContainerName, Image: options.userImage, Command: []string{"/pach-bin/worker"}, ImagePullPolicy: v1.PullPolicy(pullPolicy), Env: workerEnv, Resources: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceCPU: cpuZeroQuantity, v1.ResourceMemory: memZeroQuantity, }, }, VolumeMounts: userVolumeMounts, }, { Name: client.PPSWorkerSidecarContainerName, Image: a.workerSidecarImage, Command: []string{"/pachd", "--mode", "sidecar"}, ImagePullPolicy: v1.PullPolicy(pullPolicy), Env: sidecarEnv, VolumeMounts: sidecarVolumeMounts, Resources: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceCPU: cpuZeroQuantity, v1.ResourceMemory: memSidecarQuantity, }, }, }, }, RestartPolicy: "Always", Volumes: options.volumes, ImagePullSecrets: options.imagePullSecrets, TerminationGracePeriodSeconds: &zeroVal, SecurityContext: &v1.PodSecurityContext{RunAsUser: &zeroVal}, ServiceAccountName: assets.ServiceAccountName, } resourceRequirements := v1.ResourceRequirements{} if options.resourceRequests != nil { resourceRequirements.Requests = *options.resourceRequests } if options.resourceLimits != nil { resourceRequirements.Limits = *options.resourceLimits } podSpec.Containers[0].Resources = resourceRequirements return podSpec, nil } func (a *apiServer) getWorkerOptions(pipelineName string, pipelineVersion uint64, parallelism int32, resourceRequests *v1.ResourceList, resourceLimits *v1.ResourceList, transform *pps.Transform, cacheSize string, service *pps.Service, specCommitID string) *workerOptions { rcName := ppsutil.PipelineRcName(pipelineName, pipelineVersion) labels := labels(rcName) labels["version"] = version.PrettyVersion() labels["pipelineName"] = pipelineName userImage := transform.Image if userImage == "" { userImage = DefaultUserImage } var workerEnv []v1.EnvVar for name, value := range transform.Env { workerEnv = append( workerEnv, v1.EnvVar{ Name: name, Value: value, }, ) } // We use Kubernetes' "Downward API" so the workers know their IP // addresses, which they will then post on etcd so the job managers // can discover the workers. workerEnv = append(workerEnv, v1.EnvVar{ Name: client.PPSWorkerIPEnv, ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ APIVersion: "v1", FieldPath: "status.podIP", }, }, }) workerEnv = append(workerEnv, v1.EnvVar{ Name: client.PPSPodNameEnv, ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ APIVersion: "v1", FieldPath: "metadata.name", }, }, }) // Set the etcd prefix env workerEnv = append(workerEnv, v1.EnvVar{ Name: client.PPSEtcdPrefixEnv, Value: a.etcdPrefix, }) // Pass along the namespace workerEnv = append(workerEnv, v1.EnvVar{ Name: client.PPSNamespaceEnv, Value: a.namespace, }) workerEnv = append(workerEnv, v1.EnvVar{ Name: client.PPSSpecCommitEnv, Value: specCommitID, }) var volumes []v1.Volume var volumeMounts []v1.VolumeMount for _, secret := range transform.Secrets { if secret.MountPath != "" { volumes = append(volumes, v1.Volume{ Name: secret.Name, VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: secret.Name, }, }, }) volumeMounts = append(volumeMounts, v1.VolumeMount{ Name: secret.Name, MountPath: secret.MountPath, }) } if secret.EnvVar != "" { workerEnv = append(workerEnv, v1.EnvVar{ Name: secret.EnvVar, ValueFrom: &v1.EnvVarSource{ SecretKeyRef: &v1.SecretKeySelector{ LocalObjectReference: v1.LocalObjectReference{ Name: secret.Name, }, Key: secret.Key, }, }, }) } } volumes = append(volumes, v1.Volume{ Name: "pach-bin", VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{}, }, }) volumeMounts = append(volumeMounts, v1.VolumeMount{ Name: "pach-bin", MountPath: "/pach-bin", }) volumes = append(volumes, v1.Volume{ Name: client.PPSWorkerVolume, VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{}, }, }) volumeMounts = append(volumeMounts, v1.VolumeMount{ Name: client.PPSWorkerVolume, MountPath: client.PPSInputPrefix, }) if resourceLimits != nil && resourceLimits.NvidiaGPU() != nil && !resourceLimits.NvidiaGPU().IsZero() { volumes = append(volumes, v1.Volume{ Name: "root-lib", VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ Path: "/usr/lib", }, }, }) volumeMounts = append(volumeMounts, v1.VolumeMount{ Name: "root-lib", MountPath: "/rootfs/usr/lib", }) } var imagePullSecrets []v1.LocalObjectReference for _, secret := range transform.ImagePullSecrets { imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: secret}) } if a.imagePullSecret != "" { imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: a.imagePullSecret}) } annotations := map[string]string{"pipelineName": pipelineName} if a.iamRole != "" { annotations["iam.amazonaws.com/role"] = a.iamRole } return &workerOptions{ rcName: rcName, labels: labels, annotations: annotations, parallelism: int32(parallelism), resourceRequests: resourceRequests, resourceLimits: resourceLimits, userImage: userImage, workerEnv: workerEnv, volumes: volumes, volumeMounts: volumeMounts, imagePullSecrets: imagePullSecrets, cacheSize: cacheSize, service: service, } } func (a *apiServer) createWorkerRc(options *workerOptions) error { podSpec, err := a.workerPodSpec(options) if err != nil { return err } rc := &v1.ReplicationController{ TypeMeta: metav1.TypeMeta{ Kind: "ReplicationController", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: options.rcName, Labels: options.labels, Annotations: options.annotations, }, Spec: v1.ReplicationControllerSpec{ Selector: options.labels, Replicas: &options.parallelism, Template: &v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: options.rcName, Labels: options.labels, Annotations: options.annotations, }, Spec: podSpec, }, }, } if _, err := a.kubeClient.CoreV1().ReplicationControllers(a.namespace).Create(rc); err != nil { if !isAlreadyExistsErr(err) { return err } } serviceAnnotations := map[string]string{ "prometheus.io/scrape": "true", "prometheus.io/port": strconv.Itoa(worker.PrometheusPort), } service := &v1.Service{ TypeMeta: metav1.TypeMeta{ Kind: "Service", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: options.rcName, Labels: options.labels, Annotations: serviceAnnotations, }, Spec: v1.ServiceSpec{ Selector: options.labels, Ports: []v1.ServicePort{ { Port: client.PPSWorkerPort, Name: "grpc-port", }, { Port: worker.PrometheusPort, Name: "prometheus-metrics", }, }, }, } if _, err := a.kubeClient.CoreV1().Services(a.namespace).Create(service); err != nil { if !isAlreadyExistsErr(err) { return err } } if options.service != nil { service := &v1.Service{ TypeMeta: metav1.TypeMeta{ Kind: "Service", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: options.rcName + "-user", Labels: options.labels, }, Spec: v1.ServiceSpec{ Selector: options.labels, Type: v1.ServiceTypeNodePort, Ports: []v1.ServicePort{ { Port: options.service.ExternalPort, TargetPort: intstr.FromInt(int(options.service.InternalPort)), Name: "user-port", NodePort: options.service.ExternalPort, }, }, }, } if _, err := a.kubeClient.CoreV1().Services(a.namespace).Create(service); err != nil { if !isAlreadyExistsErr(err) { return err } } } return nil }