text
stringlengths
11
4.05M
package version import ( "encoding/json" "github.com/gin-gonic/gin" _ "github.com/wangfmD/rvs/log" "github.com/wangfmD/rvs/models" "log" "net/http" "strings" ) func QueryTagsHandle(c *gin.Context) { var tag = models.VersionTag{} tags := tag.GetAll() c.JSON(http.StatusOK, gin.H{ "status": "success", "result": tags, "message": "query", }) } func AddTagsHandle(c *gin.Context) { tags := c.PostForm("tags") versionid := c.PostForm("versionid") log.Println("tags is :", tags) log.Println("versionid is :", versionid) m := analysisString(tags) bData, err := json.Marshal(m) if err != nil { c.JSON(http.StatusOK, gin.H{ "status": "failed", "message": err, }) } else { log.Printf("tags is : %s\n", bData) tag := models.VersionTag{} err := json.Unmarshal(bData, &tag) tag.Versionid = versionid if err != nil { c.JSON(http.StatusOK, gin.H{ "status": "failed", "message": err, }) } num, err := tag.Add() if err != nil { c.JSON(http.StatusOK, gin.H{ "status": "failed", "message": err, }) } log.Println("add tags total:", num) c.JSON(http.StatusOK, gin.H{ "status": "success", "message": m, }) } } func analysisString(s string) map[string]string { m := make(map[string]string) s1 := strings.TrimSpace(s) ss := strings.Split(s1, "\n") for _, value := range ss { log.Println("line split: ", value) tag := strings.TrimSpace(value) ss1 := strings.Split(tag, "=") log.Println("= split: ", ss1) m[strings.TrimSpace(ss1[0])] = strings.TrimSpace(ss1[1]) } return m } func DeleteHandle(c *gin.Context) { m := c.PostForm("tagsId") log.Println("delete versionid:", m) s := make([]string, 5) err := json.Unmarshal([]byte(m), &s) if err != nil { log.Println(err) } else { for _, value := range s { tag := models.VersionTag{} tag.DeleteOne(value) } } c.JSON(http.StatusOK, m) } func GetTagsHandle(c *gin.Context) { tags := make([]string, 0) tagsM := models.GetVersionIds() for _, value := range tagsM { tags = append(tags, value.Versionid) } log.Println(tags) c.JSON(http.StatusOK, gin.H{ "tags": tags, }) }
package AddressRecover import ( "encoding/json" "io/ioutil" "net/http" ) // GetIP method will return the user IP address. func GetAddress() string { resp, err := http.Get("https://ipinfo.io/json") if err != nil { panic("Something went wrong!") } body, _ := ioutil.ReadAll(resp.Body) var dat map[string]interface{} if err := json.Unmarshal([]byte(string(body)), &dat); err != nil { panic(err) } return dat["ip"].(string) }
package mempool import ( "strconv" "sync/atomic" "time" "github.com/meshplus/bitxhub-model/pb" raftproto "github.com/meshplus/bitxhub/pkg/order/etcdraft/proto" cmap "github.com/orcaman/concurrent-map" ) func (mpi *mempoolImpl) getBatchSeqNo() uint64 { return atomic.LoadUint64(&mpi.batchSeqNo) } func (mpi *mempoolImpl) increaseBatchSeqNo() { atomic.AddUint64(&mpi.batchSeqNo, 1) } func (mpi *mempoolImpl) msgToConsensusPbMsg(data []byte, tyr raftproto.RaftMessage_Type) *pb.Message { rm := &raftproto.RaftMessage{ Type: tyr, FromId: mpi.localID, Data: data, } cmData, err := rm.Marshal() if err != nil { return nil } msg := &pb.Message{ Type: pb.Message_CONSENSUS, Data: cmData, } return msg } func newSubscribe() *subscribeEvent { return &subscribeEvent{ txForwardC: make(chan *TxSlice), localMissingTxnEvent: make(chan *LocalMissingTxnEvent), fetchTxnRequestC: make(chan *FetchTxnRequest), updateLeaderC: make(chan uint64), fetchTxnResponseC: make(chan *FetchTxnResponse), commitTxnC: make(chan *raftproto.Ready), getBlockC: make(chan *constructBatchEvent), pendingNonceC: make(chan *getNonceRequest), } } // TODO (YH): restore commitNonce and pendingNonce from db. func newNonceCache() *nonceCache { return &nonceCache{ commitNonces: make(map[string]uint64), pendingNonces: make(map[string]uint64), } } func (mpi *mempoolImpl) poolIsFull() bool { return atomic.LoadInt32(&mpi.txStore.poolSize) >= DefaultPoolSize } func (mpi *mempoolImpl) isLeader() bool { return mpi.leader == mpi.localID } func (mpi *mempoolImpl) isBatchTimerActive() bool { return !mpi.batchTimerMgr.isActive.IsEmpty() } // startBatchTimer starts the batch timer and reset the batchTimerActive to true. func (mpi *mempoolImpl) startBatchTimer(reason string) { // stop old timer mpi.stopBatchTimer(StopReason3) mpi.logger.Debugf("Start batch timer, reason: %s", reason) timestamp := time.Now().UnixNano() key := strconv.FormatInt(timestamp, 10) mpi.batchTimerMgr.isActive.Set(key, true) time.AfterFunc(mpi.batchTimerMgr.timeout, func() { if mpi.batchTimerMgr.isActive.Has(key) { mpi.batchTimerMgr.timeoutEventC <- true } }) } // stopBatchTimer stops the batch timer and reset the batchTimerActive to false. func (mpi *mempoolImpl) stopBatchTimer(reason string) { if mpi.batchTimerMgr.isActive.IsEmpty() { return } mpi.logger.Debugf("Stop batch timer, reason: %s", reason) mpi.batchTimerMgr.isActive = cmap.New() } // newTimer news a timer with default timeout. func newTimer(d time.Duration) *timerManager { return &timerManager{ timeout: d, isActive: cmap.New(), timeoutEventC: make(chan bool), } }
package main import ( "bufio" "encoding/json" "fmt" "io" "net" "os" "os/exec" "strings" "github.com/coreos/go-systemd/unit" "github.com/urfave/cli" ) const ( networkBase = "/etc/systemd/network" apConfigBase = "/etc/" ethernetService = "fconf-wired-%s.network" fourgService = "fconf-4g-%s.network" threeGService = "fconf-wvdial.conf" wirelessService = "fconf-wireless-%s.network" apConfigFile = "create_ap-%s.conf" enableFlag = "enable" disableFlag = "disable" removeFlag = "remove" configFlag = "config" fconfConfigDir = "/etc/fconf" ) //Ethernet is the ehternet configuration. type Ethernet struct { Network } //ToSystemdUnit implement UnitFile interface func (e Ethernet) ToSystemdUnit() ([]*unit.UnitOption, error) { if e.Interface == "" { e.Interface = "eth0" } return e.Network.ToSystemdUnit() } //Wifi is the wifi configuration. type Wifi struct { Network Username string `json:"ssid"` Password string `json:"passphrase"` } //UnitFile is an interface for systemd uni file type UnitFile interface { ToSystemdUnit() ([]*unit.UnitOption, error) } //ToSystemdUnit implement UnitFile interface func (w Wifi) ToSystemdUnit() ([]*unit.UnitOption, error) { if w.Interface == "" { w.Interface = "wlan0" } return w.Network.ToSystemdUnit() } //CreateSystemdFile creates a file that has systemd unit file content. func CreateSystemdFile(u UnitFile, filename string, mode os.FileMode, out ...io.Writer) error { x, err := u.ToSystemdUnit() if err != nil { return err } r := unit.Serialize(x) if len(out) > 0 { _, err := io.Copy(out[0], r) return err } f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) if err != nil { return err } defer func() { _ = f.Close() }() _, err = io.Copy(f, r) return err } // Checks if the directory exists. If the directory doesnt exist, this function // will create the directory with permission 0755. // // The directory created will recursively create subdirectory. It will behave // something like mkdir -p /dir/subdir. func checkDir(dir string) error { _, err := os.Stat(dir) if os.IsNotExist(err) { err = os.MkdirAll(dir, 07755) if err != nil { return err } } return nil } func restartService(name string) error { return systemdCMD("restart", name) } func startService(name string) error { return systemdCMD("start", name) } func enableService(name string) error { return systemdCMD("enable", name) } func disableService(name string) error { return systemdCMD("disable", name) } func stopService(name string) error { return systemdCMD("stop", name) } func systemdCMD(name, service string) error { fmt.Printf("%s %s ...", name, service) _, err := exec.Command("systemctl", name, service).Output() if err != nil { fmt.Println("done with error") return err } fmt.Println("done without error") return nil } func ReadFromStdin() ([]byte, error) { r := bufio.NewReader(os.Stdin) return r.ReadBytes('\n') } func FlushInterface(i string) error { return exec.Command("ip", "addr", "flush", "dev", i).Run() } func ListInterface(ctx *cli.Context) error { i, err := net.Interfaces() if err != nil { return err } var r []map[string]interface{} for _, v := range i { o := make(map[string]interface{}) o["Name"] = v.Name o["MTU"] = v.MTU o["HardwareAddr"] = v.HardwareAddr o["Flags"] = getFlags(v.Flags) r = append(r, o) } b, err := json.Marshal(r) if err != nil { return err } fmt.Println(string(b)) return nil } func getFlags(f net.Flags) []string { return strings.Split(f.String(), "|") }
package main import ( "fmt" ) type Result struct { GoodsId int GoodsName string } const solutionQuery = ` WITH all_tags AS ( SELECT COUNT(1) AS count FROM tags ) SELECT goods.id, goods.name FROM ( SELECT goods_id, COUNT(tags_goods.tag_id) AS tags_count FROM tags_goods, all_tags GROUP BY goods_id HAVING tags_count = all_tags.count ) AS goods_with_all_tags INNER JOIN goods ON goods.id = goods_with_all_tags.goods_id ` func Solution() ([]Result, error) { db, err := NewDb() if err != nil { return nil, fmt.Errorf("NewDb(): %w", err) } rows, err := db.Query(solutionQuery) if err != nil { return nil, fmt.Errorf("db.Query(\"%v\"): %w", solutionQuery, err) } defer rows.Close() results := make([]Result, 0, 10) var i int for rows.Next() { results = append(results, Result{}) err := rows.Scan(&results[i].GoodsId, &results[i].GoodsName) if err != nil { return nil, fmt.Errorf("rows.Scan(): %w", err) } i++ } return results, nil }
package main import ( "context" "fmt" "time" "cloud.google.com/go/firestore" "google.golang.org/genproto/googleapis/type/latlng" ) type Person struct { Id string `firestore:"id"` Firstname string `firestore:"firstname"` Lastname string `firestore:"lastname"` Dob time.Time `firestore:"dob"` CreatedAt time.Time `firestore:"createdAt"` Postcode string `firestore:"postcode"` Location *latlng.LatLng `firestore:"location"` } func (p *Person) Print() { fmt.Printf( "Person added at %v:\n ID: %s\n Name: %s %s\n DOB: %v\n Postcode: %v\n Location: %v\n", p.CreatedAt, p.Id, p.Firstname, p.Lastname, p.Dob, p.Postcode, p.Location, ) } func NewPerson(ctx context.Context, docRef *firestore.DocumentRef) (Person, error) { docsnap, err := docRef.Get(ctx) if err != nil { return Person{}, fmt.Errorf("error reading data: %v", err) } if !docsnap.Exists() { return Person{}, fmt.Errorf("person doesn't exist") } var person Person if err := docsnap.DataTo(&person); err != nil { return Person{}, fmt.Errorf("error marshalling data: %v", err) } person.Id = docRef.ID return person, nil }
package service import ( "testing" ) func TestInitializationTables(t *testing.T) { service := DatabaseInitializerService{&dbProperties} service.InitializeDBTables() }
package informer import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" "github.com/argoproj/argo/pkg/apis/workflow" extwfv1 "github.com/argoproj/argo/pkg/client/informers/externalversions/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/listers/workflow/v1alpha1" ) type tolerantClusterWorkflowTemplateInformer struct { delegate informers.GenericInformer } // a drop-in replacement for `extwfv1.ClusterWorkflowTemplateInformer` that ignores malformed resources func NewTolerantClusterWorkflowTemplateInformer(dynamicInterface dynamic.Interface, defaultResync time.Duration) extwfv1.ClusterWorkflowTemplateInformer { return &tolerantClusterWorkflowTemplateInformer{delegate: dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicInterface, defaultResync, "", func(options *metav1.ListOptions) {}). ForResource(schema.GroupVersionResource{Group: workflow.Group, Version: workflow.Version, Resource: workflow.ClusterWorkflowTemplatePlural})} } func (t *tolerantClusterWorkflowTemplateInformer) Informer() cache.SharedIndexInformer { return t.delegate.Informer() } func (t *tolerantClusterWorkflowTemplateInformer) Lister() v1alpha1.ClusterWorkflowTemplateLister { return &tolerantClusterWorkflowTemplateLister{delegate: t.delegate.Lister()} }
package flow import ( "sync" "testing" "time" ) // A component that doubles its int input type doubler struct { Component In <-chan int Out chan<- int } // Doubles the input and sends it to output func (d *doubler) OnIn(i int) { d.Out <- i * 2 } // A constructor that can be used by component registry/factory func newDoubler() interface{} { return new(doubler) } func init() { Register("doubler", newDoubler) } // Tests a component with single input and single output func TestSingleInput(t *testing.T) { d := new(doubler) in := make(chan int, 10) out := make(chan int, 10) d.In = in d.Out = out RunProc(d) for i := 0; i < 10; i++ { in <- i i2 := <-out ix2 := i * 2 if i2 != ix2 { t.Errorf("%d != %d", i2, ix2) } } // Shutdown the component close(in) } // A component that locks to preserve concurrent modification of its state type locker struct { Component In <-chan int Out chan<- int StateLock *sync.Mutex counter int sum int } // Creates a locker instance. This is required because StateLock must be a pointer func newLocker() *locker { l := new(locker) l.counter = 0 l.sum = 0 l.StateLock = new(sync.Mutex) return l } // A constructor that can be used by component registry/factory func newLockerConstructor() interface{} { return newLocker() } func init() { Register("locker", newLockerConstructor) } // Simulates long processing and read/write access func (l *locker) OnIn(i int) { l.counter++ // Half of the calls will wait to simulate long processing if l.counter%2 == 0 { time.Sleep(1000) } // Parellel write data race danger is here l.sum += i } func (l *locker) Shutdown() { // Emit the result and don't close the outport l.Out <- l.sum } // Tests internal state locking feature. // Run with GOMAXPROCS > 1. func TestStateLock(t *testing.T) { l := newLocker() in := make(chan int, 10) out := make(chan int, 10) l.In = in l.Out = out RunProc(l) // Simulate parallel writing and count the sum sum := 0 for i := 1; i <= 1000; i++ { in <- i sum += i } // Send the close signal close(in) // Get the result and check if it is consistent sum2 := <-out if sum2 != sum { t.Errorf("%d != %d", sum2, sum) } } // Similar to locker, but intended to test ComponentModeSync type syncLocker struct { Component In <-chan int Out chan<- int counter int sum int } // Creates a syncLocker instance func newSyncLocker() *syncLocker { l := new(syncLocker) l.counter = 0 l.sum = 0 l.Component.Mode = ComponentModeSync // Change this to ComponentModeAsync and the test will fail return l } // A constructor that can be used by component registry/factory func newSyncLockerConstructor() interface{} { return newSyncLocker() } func init() { Register("syncLocker", newSyncLockerConstructor) } // Simulates long processing and read/write access func (l *syncLocker) OnIn(i int) { l.counter++ // Half of the calls will wait to simulate long processing if l.counter%2 == 0 { time.Sleep(1000) } // Parellel write data race danger is here l.sum += i } func (l *syncLocker) Shutdown() { // Emit the result and don't close the outport l.Out <- l.sum } // Tests synchronous process execution feature. // Run with GOMAXPROCS > 1. func TestSyncLock(t *testing.T) { l := newSyncLocker() in := make(chan int, 10) out := make(chan int, 10) l.In = in l.Out = out RunProc(l) // Simulate parallel writing and count the sum sum := 0 for i := 1; i <= 1000; i++ { in <- i sum += i } // Send the close signal close(in) // Get the result and check if it is consistent sum2 := <-out if sum2 != sum { t.Errorf("%d != %d", sum2, sum) } } // An external variable var testInitFinFlag int // Simple component type initfin struct { Component In <-chan int Out chan<- int } // Echo input func (i *initfin) OnIn(n int) { // Dependent behavior if testInitFinFlag == 123 { i.Out <- n * 2 } else { i.Out <- n } } // Initialization code, affects a global var func (i *initfin) Init() { testInitFinFlag = 123 } // Finalization code func (i *initfin) Finish() { testInitFinFlag = 456 } // Tests user initialization and finalization functions func TestInitFinish(t *testing.T) { // Create and run the component i := new(initfin) i.Net = new(Graph) i.Net.InitGraphState() i.Net.waitGrp.Add(1) in := make(chan int) out := make(chan int) i.In = in i.Out = out RunProc(i) // Pass a value, the result must be affected by flag state in <- 2 n2 := <-out if n2 != 4 { t.Errorf("%d != %d", n2, 4) } // Shut the component down and wait for Finish() code close(in) i.Net.waitGrp.Wait() if testInitFinFlag != 456 { t.Errorf("%d != %d", testInitFinFlag, 456) } } // A flag to test OnClose var closeTestFlag int // A component to test OnClose handlers type closeTest struct { Component In <-chan int } // In channel close event handler func (c *closeTest) OnInClose() { closeTestFlag = 789 } // Tests close handler of input ports func TestClose(t *testing.T) { c := new(closeTest) c.Net = new(Graph) c.Net.InitGraphState() c.Net.waitGrp.Add(1) in := make(chan int) c.In = in RunProc(c) in <- 1 close(in) c.Net.waitGrp.Wait() if closeTestFlag != 789 { t.Errorf("%d != %d", closeTestFlag, 789) } } // A flag to test OnClose var shutdownTestFlag int // A component to test OnClose handlers type shutdownTest struct { Component In <-chan int } // In channel close event handler func (s *shutdownTest) OnIn(i int) { shutdownTestFlag = i } // Custom shutdown handler func (s *shutdownTest) Shutdown() { shutdownTestFlag = 789 } // Tests close handler of input ports func TestShutdown(t *testing.T) { s := new(shutdownTest) s.Net = new(Graph) s.Net.InitGraphState() s.Net.waitGrp.Add(1) in := make(chan int) s.In = in RunProc(s) in <- 1 close(in) s.Net.waitGrp.Wait() if shutdownTestFlag != 789 { t.Errorf("%d != %d", shutdownTestFlag, 789) } } func TestPoolMode(t *testing.T) { d := new(doubler) d.Component.Mode = ComponentModePool d.Component.PoolSize = 4 in := make(chan int, 20) out := make(chan int, 20) d.In = in d.Out = out RunProc(d) for i := 0; i < 10; i++ { in <- i } for i := 0; i < 10; i++ { i2 := <-out if i2 < 0 { t.Errorf("%d < 0", i2) } } // Shutdown the component close(in) } // A component to test manual termination type stopMe struct { Component In <-chan int Out chan<- int } func (s *stopMe) OnIn(i int) { s.Out <- i * 2 } func (s *stopMe) Finish() { s.Out <- 909 } // Tests manual termination via StopProc() func TestStopProc(t *testing.T) { s := new(stopMe) in := make(chan int, 20) out := make(chan int, 20) s.In = in s.Out = out // Test normal mode first RunProc(s) for i := 0; i < 10; i++ { in <- i } for i := 0; i < 10; i++ { i2 := <-out if i2 < 0 { t.Errorf("%d < 0", i2) } } // Stop without closing chans StopProc(s) // Wait for finish signal fin := <-out if fin != 909 { t.Errorf("Invalid final signal: %d", fin) } // Run again in Pool mode s.Component.Mode = ComponentModePool s.Component.PoolSize = 4 RunProc(s) for i := 0; i < 10; i++ { in <- i } for i := 0; i < 10; i++ { i2 := <-out if i2 < 0 { t.Errorf("%d < 0", i2) } } // Stop without closing chans StopProc(s) // Wait for finish signal fin = <-out if fin != 909 { t.Errorf("Invalid final signal: %d", fin) } }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "context" "github.com/pingcap/errors" "github.com/pingcap/tidb/executor/aggfuncs" "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/executor/internal/vecgroupchecker" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/mathutil" ) // WindowExec is the executor for window functions. type WindowExec struct { exec.BaseExecutor groupChecker *vecgroupchecker.VecGroupChecker // childResult stores the child chunk childResult *chunk.Chunk // executed indicates the child executor is drained or something unexpected happened. executed bool // resultChunks stores the chunks to return resultChunks []*chunk.Chunk // remainingRowsInChunk indicates how many rows the resultChunks[i] is not prepared. remainingRowsInChunk []int numWindowFuncs int processor windowProcessor } // Close implements the Executor Close interface. func (e *WindowExec) Close() error { return errors.Trace(e.BaseExecutor.Close()) } // Next implements the Executor Next interface. func (e *WindowExec) Next(ctx context.Context, chk *chunk.Chunk) error { chk.Reset() for !e.executed && !e.preparedChunkAvailable() { err := e.consumeOneGroup(ctx) if err != nil { e.executed = true return err } } if len(e.resultChunks) > 0 { chk.SwapColumns(e.resultChunks[0]) e.resultChunks[0] = nil // GC it. TODO: Reuse it. e.resultChunks = e.resultChunks[1:] e.remainingRowsInChunk = e.remainingRowsInChunk[1:] } return nil } func (e *WindowExec) preparedChunkAvailable() bool { return len(e.resultChunks) > 0 && e.remainingRowsInChunk[0] == 0 } func (e *WindowExec) consumeOneGroup(ctx context.Context) error { var groupRows []chunk.Row if e.groupChecker.IsExhausted() { eof, err := e.fetchChild(ctx) if err != nil { return errors.Trace(err) } if eof { e.executed = true return e.consumeGroupRows(groupRows) } _, err = e.groupChecker.SplitIntoGroups(e.childResult) if err != nil { return errors.Trace(err) } } begin, end := e.groupChecker.GetNextGroup() for i := begin; i < end; i++ { groupRows = append(groupRows, e.childResult.GetRow(i)) } for meetLastGroup := end == e.childResult.NumRows(); meetLastGroup; { meetLastGroup = false eof, err := e.fetchChild(ctx) if err != nil { return errors.Trace(err) } if eof { e.executed = true return e.consumeGroupRows(groupRows) } isFirstGroupSameAsPrev, err := e.groupChecker.SplitIntoGroups(e.childResult) if err != nil { return errors.Trace(err) } if isFirstGroupSameAsPrev { begin, end = e.groupChecker.GetNextGroup() for i := begin; i < end; i++ { groupRows = append(groupRows, e.childResult.GetRow(i)) } meetLastGroup = end == e.childResult.NumRows() } } return e.consumeGroupRows(groupRows) } func (e *WindowExec) consumeGroupRows(groupRows []chunk.Row) (err error) { remainingRowsInGroup := len(groupRows) if remainingRowsInGroup == 0 { return nil } for i := 0; i < len(e.resultChunks); i++ { remained := mathutil.Min(e.remainingRowsInChunk[i], remainingRowsInGroup) e.remainingRowsInChunk[i] -= remained remainingRowsInGroup -= remained // TODO: Combine these three methods. // The old implementation needs the processor has these three methods // but now it does not have to. groupRows, err = e.processor.consumeGroupRows(e.Ctx(), groupRows) if err != nil { return errors.Trace(err) } _, err = e.processor.appendResult2Chunk(e.Ctx(), groupRows, e.resultChunks[i], remained) if err != nil { return errors.Trace(err) } if remainingRowsInGroup == 0 { e.processor.resetPartialResult() break } } return nil } func (e *WindowExec) fetchChild(ctx context.Context) (eof bool, err error) { childResult := tryNewCacheChunk(e.Children(0)) err = Next(ctx, e.Children(0), childResult) if err != nil { return false, errors.Trace(err) } // No more data. numRows := childResult.NumRows() if numRows == 0 { return true, nil } resultChk := e.Ctx().GetSessionVars().GetNewChunkWithCapacity(e.RetFieldTypes(), 0, numRows, e.AllocPool) err = e.copyChk(childResult, resultChk) if err != nil { return false, err } e.resultChunks = append(e.resultChunks, resultChk) e.remainingRowsInChunk = append(e.remainingRowsInChunk, numRows) e.childResult = childResult return false, nil } func (e *WindowExec) copyChk(src, dst *chunk.Chunk) error { columns := e.Schema().Columns[:len(e.Schema().Columns)-e.numWindowFuncs] for i, col := range columns { if err := dst.MakeRefTo(i, src, col.Index); err != nil { return err } } return nil } // windowProcessor is the interface for processing different kinds of windows. type windowProcessor interface { // consumeGroupRows updates the result for an window function using the input rows // which belong to the same partition. consumeGroupRows(ctx sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) // appendResult2Chunk appends the final results to chunk. // It is called when there are no more rows in current partition. appendResult2Chunk(ctx sessionctx.Context, rows []chunk.Row, chk *chunk.Chunk, remained int) ([]chunk.Row, error) // resetPartialResult resets the partial result to the original state for a specific window function. resetPartialResult() } type aggWindowProcessor struct { windowFuncs []aggfuncs.AggFunc partialResults []aggfuncs.PartialResult } func (p *aggWindowProcessor) consumeGroupRows(ctx sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { for i, windowFunc := range p.windowFuncs { // @todo Add memory trace _, err := windowFunc.UpdatePartialResult(ctx, rows, p.partialResults[i]) if err != nil { return nil, err } } rows = rows[:0] return rows, nil } func (p *aggWindowProcessor) appendResult2Chunk(ctx sessionctx.Context, rows []chunk.Row, chk *chunk.Chunk, remained int) ([]chunk.Row, error) { for remained > 0 { for i, windowFunc := range p.windowFuncs { // TODO: We can extend the agg func interface to avoid the `for` loop here. err := windowFunc.AppendFinalResult2Chunk(ctx, p.partialResults[i], chk) if err != nil { return nil, err } } remained-- } return rows, nil } func (p *aggWindowProcessor) resetPartialResult() { for i, windowFunc := range p.windowFuncs { windowFunc.ResetPartialResult(p.partialResults[i]) } } type rowFrameWindowProcessor struct { windowFuncs []aggfuncs.AggFunc partialResults []aggfuncs.PartialResult start *core.FrameBound end *core.FrameBound curRowIdx uint64 } func (p *rowFrameWindowProcessor) getStartOffset(numRows uint64) uint64 { if p.start.UnBounded { return 0 } switch p.start.Type { case ast.Preceding: if p.curRowIdx >= p.start.Num { return p.curRowIdx - p.start.Num } return 0 case ast.Following: offset := p.curRowIdx + p.start.Num if offset >= numRows { return numRows } return offset case ast.CurrentRow: return p.curRowIdx } // It will never reach here. return 0 } func (p *rowFrameWindowProcessor) getEndOffset(numRows uint64) uint64 { if p.end.UnBounded { return numRows } switch p.end.Type { case ast.Preceding: if p.curRowIdx >= p.end.Num { return p.curRowIdx - p.end.Num + 1 } return 0 case ast.Following: offset := p.curRowIdx + p.end.Num if offset >= numRows { return numRows } return offset + 1 case ast.CurrentRow: return p.curRowIdx + 1 } // It will never reach here. return 0 } func (*rowFrameWindowProcessor) consumeGroupRows(_ sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { return rows, nil } func (p *rowFrameWindowProcessor) appendResult2Chunk(ctx sessionctx.Context, rows []chunk.Row, chk *chunk.Chunk, remained int) ([]chunk.Row, error) { numRows := uint64(len(rows)) var ( err error initializedSlidingWindow bool start uint64 end uint64 lastStart uint64 lastEnd uint64 shiftStart uint64 shiftEnd uint64 ) slidingWindowAggFuncs := make([]aggfuncs.SlidingWindowAggFunc, len(p.windowFuncs)) for i, windowFunc := range p.windowFuncs { if slidingWindowAggFunc, ok := windowFunc.(aggfuncs.SlidingWindowAggFunc); ok { slidingWindowAggFuncs[i] = slidingWindowAggFunc } } for ; remained > 0; lastStart, lastEnd = start, end { start = p.getStartOffset(numRows) end = p.getEndOffset(numRows) p.curRowIdx++ remained-- shiftStart = start - lastStart shiftEnd = end - lastEnd if start >= end { for i, windowFunc := range p.windowFuncs { slidingWindowAggFunc := slidingWindowAggFuncs[i] if slidingWindowAggFunc != nil && initializedSlidingWindow { err = slidingWindowAggFunc.Slide(ctx, func(u uint64) chunk.Row { return rows[u] }, lastStart, lastEnd, shiftStart, shiftEnd, p.partialResults[i]) if err != nil { return nil, err } } err = windowFunc.AppendFinalResult2Chunk(ctx, p.partialResults[i], chk) if err != nil { return nil, err } } continue } for i, windowFunc := range p.windowFuncs { slidingWindowAggFunc := slidingWindowAggFuncs[i] if slidingWindowAggFunc != nil && initializedSlidingWindow { err = slidingWindowAggFunc.Slide(ctx, func(u uint64) chunk.Row { return rows[u] }, lastStart, lastEnd, shiftStart, shiftEnd, p.partialResults[i]) } else { // For MinMaxSlidingWindowAggFuncs, it needs the absolute value of each start of window, to compare // whether elements inside deque are out of current window. if minMaxSlidingWindowAggFunc, ok := windowFunc.(aggfuncs.MaxMinSlidingWindowAggFunc); ok { // Store start inside MaxMinSlidingWindowAggFunc.windowInfo minMaxSlidingWindowAggFunc.SetWindowStart(start) } _, err = windowFunc.UpdatePartialResult(ctx, rows[start:end], p.partialResults[i]) } if err != nil { return nil, err } err = windowFunc.AppendFinalResult2Chunk(ctx, p.partialResults[i], chk) if err != nil { return nil, err } if slidingWindowAggFunc == nil { windowFunc.ResetPartialResult(p.partialResults[i]) } } if !initializedSlidingWindow { initializedSlidingWindow = true } } for i, windowFunc := range p.windowFuncs { windowFunc.ResetPartialResult(p.partialResults[i]) } return rows, nil } func (p *rowFrameWindowProcessor) resetPartialResult() { p.curRowIdx = 0 } type rangeFrameWindowProcessor struct { windowFuncs []aggfuncs.AggFunc partialResults []aggfuncs.PartialResult start *core.FrameBound end *core.FrameBound curRowIdx uint64 lastStartOffset uint64 lastEndOffset uint64 orderByCols []*expression.Column // expectedCmpResult is used to decide if one value is included in the frame. expectedCmpResult int64 } func (p *rangeFrameWindowProcessor) getStartOffset(ctx sessionctx.Context, rows []chunk.Row) (uint64, error) { if p.start.UnBounded { return 0, nil } numRows := uint64(len(rows)) for ; p.lastStartOffset < numRows; p.lastStartOffset++ { var res int64 var err error for i := range p.orderByCols { res, _, err = p.start.CmpFuncs[i](ctx, p.orderByCols[i], p.start.CalcFuncs[i], rows[p.lastStartOffset], rows[p.curRowIdx]) if err != nil { return 0, err } if res != 0 { break } } // For asc, break when the current value is greater or equal to the calculated result; // For desc, break when the current value is less or equal to the calculated result. if res != p.expectedCmpResult { break } } return p.lastStartOffset, nil } func (p *rangeFrameWindowProcessor) getEndOffset(ctx sessionctx.Context, rows []chunk.Row) (uint64, error) { numRows := uint64(len(rows)) if p.end.UnBounded { return numRows, nil } for ; p.lastEndOffset < numRows; p.lastEndOffset++ { var res int64 var err error for i := range p.orderByCols { res, _, err = p.end.CmpFuncs[i](ctx, p.end.CalcFuncs[i], p.orderByCols[i], rows[p.curRowIdx], rows[p.lastEndOffset]) if err != nil { return 0, err } if res != 0 { break } } // For asc, break when the calculated result is greater than the current value. // For desc, break when the calculated result is less than the current value. if res == p.expectedCmpResult { break } } return p.lastEndOffset, nil } func (p *rangeFrameWindowProcessor) appendResult2Chunk(ctx sessionctx.Context, rows []chunk.Row, chk *chunk.Chunk, remained int) ([]chunk.Row, error) { var ( err error initializedSlidingWindow bool start uint64 end uint64 lastStart uint64 lastEnd uint64 shiftStart uint64 shiftEnd uint64 ) slidingWindowAggFuncs := make([]aggfuncs.SlidingWindowAggFunc, len(p.windowFuncs)) for i, windowFunc := range p.windowFuncs { if slidingWindowAggFunc, ok := windowFunc.(aggfuncs.SlidingWindowAggFunc); ok { slidingWindowAggFuncs[i] = slidingWindowAggFunc } } for ; remained > 0; lastStart, lastEnd = start, end { start, err = p.getStartOffset(ctx, rows) if err != nil { return nil, err } end, err = p.getEndOffset(ctx, rows) if err != nil { return nil, err } p.curRowIdx++ remained-- shiftStart = start - lastStart shiftEnd = end - lastEnd if start >= end { for i, windowFunc := range p.windowFuncs { slidingWindowAggFunc := slidingWindowAggFuncs[i] if slidingWindowAggFunc != nil && initializedSlidingWindow { err = slidingWindowAggFunc.Slide(ctx, func(u uint64) chunk.Row { return rows[u] }, lastStart, lastEnd, shiftStart, shiftEnd, p.partialResults[i]) if err != nil { return nil, err } } err = windowFunc.AppendFinalResult2Chunk(ctx, p.partialResults[i], chk) if err != nil { return nil, err } } continue } for i, windowFunc := range p.windowFuncs { slidingWindowAggFunc := slidingWindowAggFuncs[i] if slidingWindowAggFunc != nil && initializedSlidingWindow { err = slidingWindowAggFunc.Slide(ctx, func(u uint64) chunk.Row { return rows[u] }, lastStart, lastEnd, shiftStart, shiftEnd, p.partialResults[i]) } else { if minMaxSlidingWindowAggFunc, ok := windowFunc.(aggfuncs.MaxMinSlidingWindowAggFunc); ok { minMaxSlidingWindowAggFunc.SetWindowStart(start) } _, err = windowFunc.UpdatePartialResult(ctx, rows[start:end], p.partialResults[i]) } if err != nil { return nil, err } err = windowFunc.AppendFinalResult2Chunk(ctx, p.partialResults[i], chk) if err != nil { return nil, err } if slidingWindowAggFunc == nil { windowFunc.ResetPartialResult(p.partialResults[i]) } } if !initializedSlidingWindow { initializedSlidingWindow = true } } for i, windowFunc := range p.windowFuncs { windowFunc.ResetPartialResult(p.partialResults[i]) } return rows, nil } func (*rangeFrameWindowProcessor) consumeGroupRows(_ sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { return rows, nil } func (p *rangeFrameWindowProcessor) resetPartialResult() { p.curRowIdx = 0 p.lastStartOffset = 0 p.lastEndOffset = 0 }
package main import ( "bufio" "fmt" "io" "os" "strings" cidutil "gx/ipfs/QmdPQx9fvN5ExVwMhRmh7YpCQJzJrFhd1AjVBwJmRMFJeX/go-cidutil" c "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" mb "gx/ipfs/QmekxXDhCxCJRNuzmHreuaT3BsuJcsjcXWNrtV9C8DRHtd/go-multibase" ) func usage() { fmt.Fprintf(os.Stderr, "usage: %s [-b multibase-code] [-v cid-version] [--filter] <fmt-str> <cid> ...\n", os.Args[0]) fmt.Fprintf(os.Stderr, "--filter will read from stdin and convert anything that looks like a <cid>\n") fmt.Fprintf(os.Stderr, " -- including any non-cids that are valid Multihashes).\n") fmt.Fprintf(os.Stderr, "<fmt-str> is either 'prefix' or a printf style format string:\n%s", cidutil.FormatRef) os.Exit(2) } func main() { if len(os.Args) < 2 { usage() } newBase := mb.Encoding(-1) var verConv func(cid c.Cid) (c.Cid, error) args := os.Args[1:] filter := false outer: for len(args) > 0 { switch args[0] { case "-b": if len(args) < 2 { usage() } encoder, err := mb.EncoderByName(args[1]) if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(2) } newBase = encoder.Encoding() args = args[2:] case "-v": if len(args) < 2 { usage() } switch args[1] { case "0": verConv = toCidV0 case "1": verConv = toCidV1 default: fmt.Fprintf(os.Stderr, "Error: Invalid cid version: %s\n", args[1]) os.Exit(2) } args = args[2:] case "--filter": filter = true args = args[1:] default: break outer } } if len(args) < 1 { usage() } fmtStr := args[0] switch fmtStr { case "prefix": fmtStr = "%P" default: if strings.IndexByte(fmtStr, '%') == -1 { fmt.Fprintf(os.Stderr, "Error: Invalid format string: %s\n", fmtStr) os.Exit(2) } } format := func(cid c.Cid, cidStr string) (string, error) { base := newBase if base == -1 { base, _ = c.ExtractEncoding(cidStr) } var err error if verConv != nil { cid, err = verConv(cid) if err != nil { return "", err } } return cidutil.Format(fmtStr, base, cid) } if filter { scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { buf := scanner.Bytes() for { i, j, cid, cidStr := cidutil.ScanForCid(buf) os.Stdout.Write(buf[0:i]) if i == len(buf) { os.Stdout.Write([]byte{'\n'}) break } str, err := format(cid, cidStr) switch err.(type) { case cidutil.FormatStringError: fmt.Fprintf(os.Stderr, "Error: %v\n", err) os.Exit(2) default: // just use the orignal sting on non-fatal error str = cidStr case nil: } io.WriteString(os.Stdout, str) buf = buf[j:] } } if err := scanner.Err(); err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) os.Exit(2) } } else { for _, cidStr := range args[1:] { cid, err := c.Decode(cidStr) if err != nil { fmt.Fprintf(os.Stdout, "!INVALID_CID!\n") errorMsg("%s: %v", cidStr, err) // Don't abort on a bad cid continue } str, err := format(cid, cidStr) switch err.(type) { case cidutil.FormatStringError: fmt.Fprintf(os.Stderr, "Error: %v\n", err) os.Exit(2) default: fmt.Fprintf(os.Stdout, "!ERROR!\n") errorMsg("%s: %v", cidStr, err) // Don't abort on cid specific errors continue case nil: // no error } fmt.Fprintf(os.Stdout, "%s\n", str) } } os.Exit(exitCode) } var exitCode = 0 func errorMsg(fmtStr string, a ...interface{}) { fmt.Fprintf(os.Stderr, "Error: ") fmt.Fprintf(os.Stderr, fmtStr, a...) fmt.Fprintf(os.Stderr, "\n") exitCode = 1 } func toCidV0(cid c.Cid) (c.Cid, error) { if cid.Type() != c.DagProtobuf { return c.Cid{}, fmt.Errorf("can't convert non-protobuf nodes to cidv0") } return c.NewCidV0(cid.Hash()), nil } func toCidV1(cid c.Cid) (c.Cid, error) { return c.NewCidV1(cid.Type(), cid.Hash()), nil }
package common var l *Logger var c *Config var d *Db func init() { l = NewLogger() c = NewConfig() d = NewDb() }
package main import ( "testing" "time" ) type Person struct { Name string PhoneNumber string Date time.Time } func TestConnectMongo(t *testing.T) { connectMongo("mongodb://localhost:27017") insertMongoDocument("foo", "person", Person{ Name: "Tom", PhoneNumber: "1662777", Date: time.Now().Local(), }) }
package yaml import ( "fmt" "io" "strings" ) type Error struct { Err error } func (le Error) HasErr() bool { return true } type node interface { Previous() error Info() string GetName() (fileName, packageName, functionName string) GetLine() (line int) } func (le Error) Output(context interface{}, prefix string, w io.Writer) { var linePrefix []byte for e := le.Err; e != nil; { var info string if x, ok := e.(node); ok { fileName, packageName, functionName := x.GetName() if linePrefix == nil { linePrefix = []byte("\n" + prefix + "- ") _, _ = w.Write([]byte("- ")) } else { _, _ = w.Write(linePrefix) } _, _ = fmt.Fprintf(w, "[%v, %v, %v, %v()]", packageName, fileName, x.GetLine(), functionName) info, e = strings.TrimSpace(x.Info()), x.Previous() } else { info, e = strings.TrimSpace(e.Error()), nil } if len(info) > 0 { if linePrefix == nil { linePrefix = []byte("\n" + prefix + "- ") _, _ = w.Write([]byte("- ")) } else { _, _ = w.Write(linePrefix) } OutputText(prefix, info, w) } } } func NewError(key interface{}, value error) Line { return Line{key, Error{value}} }
// provides fast matching algorithms // TODO: aho-corasic on substring matching package sieve import ( "github.com/ActiveState/log" "regexp" "strings" ) // MultiRegexpMatch allows matching a string against multiple regular // expressions along with substrings for a fast fail-early matching. type MultiRegexpMatcher struct { substrings map[string]string // substring to name regexps map[string]*regexp.Regexp // name to regexp substringsRegexp *regexp.Regexp // substring regex combined } func NewMultiRegexpMatcher() *MultiRegexpMatcher { return &MultiRegexpMatcher{ make(map[string]string), make(map[string]*regexp.Regexp), nil} } func (m *MultiRegexpMatcher) MustAdd(name string, substring string, re string) { if oldName, ok := m.substrings[substring]; ok { log.Fatalf( "substring %s already added under %s; being added again by %s", substring, oldName, name) } if _, ok := m.regexps[name]; ok { log.Fatal("already in regexps") } m.substrings[substring] = name m.regexps[name] = regexp.MustCompile(re) } func (m *MultiRegexpMatcher) Build() { escaped := make([]string, 0, len(m.substrings)) for substring, _ := range m.substrings { escaped = append(escaped, regexp.QuoteMeta(substring)) } m.substringsRegexp = regexp.MustCompile(strings.Join(escaped, "|")) } // Match tries to match the text against one of the substring/regexp // as efficiently as possible. func (m *MultiRegexpMatcher) Match(text string) (string, []string) { // TODO: use aho-corasick instead of regexp to match the substrings. substring := m.substringsRegexp.FindString(text) if substring == "" { // fail return early so we don't have to waste time on futile regex // matching (below) return "", nil } if name, ok := m.substrings[substring]; ok { if re, ok := m.regexps[name]; ok { // TODO: if this regex fails, should we try the next // matching substring? return name, re.FindStringSubmatch(text) } } panic("not reachable") }
package easypost_test import ( "io/ioutil" "net/http" "strings" "github.com/EasyPost/easypost-go/v3" ) // TestApiError tests that a bad API request returns an InvalidRequestError (a subclass of APIError), and that the // error is parsed and pretty-printed correctly. func (c *ClientTests) TestApiError() { client := c.TestClient() assert, require := c.Assert(), c.Require() // Create a bad shipment so we can work with errors _, err := client.CreateShipment(&easypost.Shipment{}) require.Error(err) if err, ok := err.(*easypost.InvalidRequestError); ok { assert.Equal(422, err.StatusCode) assert.Equal("PARAMETER.REQUIRED", err.Code) assert.Equal("Missing required parameter.", err.Message) assert.Equal(1, len(err.Errors)) subError := err.Errors[0] assert.Equal("shipment", subError.Field) assert.Equal("cannot be blank", subError.Message) } // Assert that the pretty printed error is the same errorString := err.Error() assert.Equal("PARAMETER.REQUIRED Missing required parameter.", errorString) } // TestApiErrorStatusCodes tests that the correct API error type is determined for each HTTP status code. func (c *ClientTests) TestApiErrorStatusCodes() { assert := c.Assert() res := &http.Response{ StatusCode: 0, Body: ioutil.NopCloser(strings.NewReader("")), } res.StatusCode = 0 err := easypost.BuildErrorFromResponse(res) _, ok := err.(*easypost.ConnectionError) assert.True(ok) res.StatusCode = 100 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RetryError) assert.True(ok) res.StatusCode = 101 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RetryError) assert.True(ok) res.StatusCode = 102 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RetryError) assert.True(ok) res.StatusCode = 103 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RetryError) assert.True(ok) res.StatusCode = 300 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RedirectError) assert.True(ok) res.StatusCode = 301 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RedirectError) assert.True(ok) res.StatusCode = 302 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RedirectError) assert.True(ok) res.StatusCode = 303 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RedirectError) assert.True(ok) res.StatusCode = 304 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RedirectError) assert.True(ok) res.StatusCode = 305 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RedirectError) assert.True(ok) res.StatusCode = 306 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RedirectError) assert.True(ok) res.StatusCode = 307 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RedirectError) assert.True(ok) res.StatusCode = 308 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RedirectError) assert.True(ok) res.StatusCode = 400 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.BadRequestError) assert.True(ok) res.StatusCode = 401 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.UnauthorizedError) assert.True(ok) res.StatusCode = 402 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.PaymentError) assert.True(ok) res.StatusCode = 403 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.ForbiddenError) assert.True(ok) res.StatusCode = 404 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.NotFoundError) assert.True(ok) res.StatusCode = 405 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.MethodNotAllowedError) assert.True(ok) res.StatusCode = 407 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.ProxyError) assert.True(ok) res.StatusCode = 408 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.TimeoutError) assert.True(ok) res.StatusCode = 422 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.InvalidRequestError) assert.True(ok) res.StatusCode = 429 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.RateLimitError) assert.True(ok) res.StatusCode = 500 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.InternalServerError) assert.True(ok) res.StatusCode = 503 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.ServiceUnavailableError) assert.True(ok) res.StatusCode = 504 err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.GatewayTimeoutError) assert.True(ok) res.StatusCode = 7000 // unaccounted for status code err = easypost.BuildErrorFromResponse(res) _, ok = err.(*easypost.UnknownHttpError) assert.True(ok) } // TestApiErrorMessageParseArray tests that the internal API error parsing works correctly when the error message is an array. func (c *ClientTests) TestApiErrorMessageParseArray() { assert := c.Assert() fakeErrorResponse := `{ "error": { "code": "UNPROCESSABLE_ENTITY", "message": ["Bad format", "Bad format 2"], "errors": [] } }` res := &http.Response{ StatusCode: 422, Body: ioutil.NopCloser(strings.NewReader(fakeErrorResponse)), } err := easypost.BuildErrorFromResponse(res) errorMessage := err.Error() assert.Equal("UNPROCESSABLE_ENTITY Bad format, Bad format 2", errorMessage) } // TestErrorMessageParseMap tests that the internal API error parsing works correctly when the error message is a map. func (c *ClientTests) TestErrorMessageParseMap() { assert := c.Assert() fakeErrorResponse := `{ "error": { "code": "UNPROCESSABLE_ENTITY", "message": { "errors": [ "Bad format", "Bad format 2" ] }, "errors": [] } }` res := &http.Response{ StatusCode: 422, Body: ioutil.NopCloser(strings.NewReader(fakeErrorResponse)), } err := easypost.BuildErrorFromResponse(res) errorMessage := err.Error() assert.Equal("UNPROCESSABLE_ENTITY Bad format, Bad format 2", errorMessage) } // TestErrorMessageParseExtreme tests that the internal API error parsing works correctly when the error message is a combination of maps and arrays. func (c *ClientTests) TestErrorMessageParseExtreme() { assert := c.Assert() fakeErrorResponse := `{ "error": { "code": "UNPROCESSABLE_ENTITY", "message": { "errors": [ { "message1": "message1", "errors": ["message2", "message3"], "errors2": { "key": { "key2": "message4" } } }, "message5", { "message6": "message6" } ] }, "errors": [] } }` res := &http.Response{ StatusCode: 422, Body: ioutil.NopCloser(strings.NewReader(fakeErrorResponse)), } err := easypost.BuildErrorFromResponse(res) errorMessage := err.Error() messages := []string{"message1", "message2", "message3", "message4", "message5", "message6"} for _, message := range messages { assert.Contains(errorMessage, message) } }
package worker import ( "Edwardz43/tgbot/message/from" "time" ) type Worker interface { Do(func(args ...interface{}) error) } type Job struct { ID int64 `json:"id"` DeliverDate *time.Time `json:"deliver_date"` FinishDate *time.Time `json:"finish_date"` Done bool `json:"done"` Result *from.Result `json:"result"` }
package main type cake []*layer func (c *cake) String() string { s := "" for i := len(*c) - 1; i >= 0; i-- { s = s + (*c)[i].String() + "\n" } return s } func (c *cake) bottom() *layer { if len(*c) == 0 { return nil } return (*c)[0] } func (c *cake) top() *layer { if len(*c) == 0 { return nil } return (*c)[len(*c)-1] } func (c *cake) copy() *cake { if len(*c) == 0 { return &cake{} } return &cake{} } type layer struct { i int // left index j int // right index chars []byte left byte right byte up *layer down *layer } func (l *layer) push(top *layer) { if l != nil { l.up = top top.down = l } } func (l layer) String() string { bb := make([]byte, len(l.chars)) spaceFill(bb) bb[l.i] = l.chars[l.i] bb[l.j] = l.chars[l.j] return string(bb) } func (l layer) val() string { return "" } func bake(chars []byte) *cake { if len(chars) == 0 { return nil } // we'll destructively modify slice header: c := make([]byte, len(chars), len(chars)) copy(c, chars) i := 0 j := len(c) - 1 layers := make([]*layer, 0) for i <= j { layers, i, j = updateLayers(layers, c, i, j) } _, _ = i, j if len(layers) == 0 { return nil } ck := cake(layers) return &ck } func spaceFill(bb []byte) { for i := range bb { bb[i] = byte('_') } } // layers: all lower layers, lowest to highest // chars: original input // i: left index of this layer // j: right index of this layer // On an asymetric layer, i.e. chars[i] != chars[j], this pushes the left character onto layers first, then the right character func updateLayers(layers []*layer, chars []byte, i, j int) ([]*layer, int, int) { var ( top *layer ) if len(layers) > 0 { top = layers[len(layers)-1] } asymetric := chars[i] != chars[j] if asymetric { lay1 := &layer{ i: i, j: i, chars: chars, } lay2 := &layer{ i: j, j: j, chars: chars, } top.push(lay1) lay1.push(lay2) layers = append(layers, lay1) layers = append(layers, lay2) } else { lay1 := &layer{ i: i, j: j, chars: chars, } top.push(lay1) layers = append(layers, lay1) } i++ j-- return layers, i, j }
package fakes import "github.com/cloudfoundry-incubator/notifications/models" type UnsubscribesRepo struct { Unsubscribes map[string]models.Unsubscribe } func NewUnsubscribesRepo() *UnsubscribesRepo { return &UnsubscribesRepo{ Unsubscribes: map[string]models.Unsubscribe{}, } } func (fake *UnsubscribesRepo) Create(conn models.ConnectionInterface, unsubscribe models.Unsubscribe) (models.Unsubscribe, error) { key := unsubscribe.ClientID + unsubscribe.KindID + unsubscribe.UserID if _, ok := fake.Unsubscribes[key]; ok { return unsubscribe, models.ErrDuplicateRecord{} } fake.Unsubscribes[key] = unsubscribe return unsubscribe, nil } func (fake *UnsubscribesRepo) Upsert(conn models.ConnectionInterface, unsubscribe models.Unsubscribe) (models.Unsubscribe, error) { key := unsubscribe.ClientID + unsubscribe.KindID + unsubscribe.UserID fake.Unsubscribes[key] = unsubscribe return unsubscribe, nil } func (fake *UnsubscribesRepo) Find(conn models.ConnectionInterface, clientID string, kindID string, userID string) (models.Unsubscribe, error) { key := clientID + kindID + userID if unsubscribe, ok := fake.Unsubscribes[key]; ok { return unsubscribe, nil } return models.Unsubscribe{}, models.ErrRecordNotFound{} } func (fake *UnsubscribesRepo) Destroy(conn models.ConnectionInterface, unsubscribe models.Unsubscribe) (int, error) { key := unsubscribe.ClientID + unsubscribe.KindID + unsubscribe.UserID delete(fake.Unsubscribes, key) return 0, nil }
// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oteltest // import "go.opentelemetry.io/otel/oteltest" import ( "testing" "go.opentelemetry.io/otel/internal/internaltest" ) // Harness is a testing harness used to test implementations of the // OpenTelemetry API. // // Deprecated: this will be removed in the next major release. type Harness = internaltest.Harness // NewHarness returns an instantiated *Harness using t. // // Deprecated: this will be removed in the next major release. func NewHarness(t *testing.T) *Harness { return internaltest.NewHarness(t) }
package main import . "./topic" import . "./message" func main(){ msg := Message{} msg.CreateMessage("mensagem sobre o assunto",7) topic := Topic{} topic.CreateTopic("assunto") topic.AddMessage(msg) println(topic.Messages.Pop()) topic.AddSubscriber("fulana") println(topic.Subscribed[0]) }
package bslib import ( "strings" "testing" ) const nonExistingCypher = "blah-blah" func TestInitNonExistingCypher(t *testing.T) { newCypher := new(bsEncryptor) err := newCypher.Init(nonExistingCypher) if err == nil { t.Error("Should return error, cypher does not exist") } else if !strings.Contains(err.Error(), BSERR00004EncCypherNotExist) { t.Error("Wrong error message, cypher does not exist") } } func TestCypherNames(t *testing.T) { newCypher := new(bsEncryptor) names := newCypher.getCypherNames() if names == nil || len(names) <= 0 { t.Error("Cannot retrieve cypher's names") } } func TestEncryptWithoutInit(t *testing.T) { newCypher := new(bsEncryptor) _, err := newCypher.Encrypt(nonExistingCypher) if err == nil { t.Error("Should return error, cypher is not initialized") } } const cCryptId = "8GA63DMN" // AES-256 func TestEncryptEmptyString(t *testing.T) { newCypher := new(bsEncryptor) err := newCypher.Init(cCryptId) if err != nil { t.Error(err.Error()) return } _, err = newCypher.Encrypt("") if err == nil { t.Error("Should return error, cypher is not initialized") } } func TestCheckNonExistingCypher(t *testing.T) { newCypher := new(bsEncryptor) _, err := newCypher.getCryptIDbyName(nonExistingCypher) if err == nil { t.Error("Should return error, cypher is fake") } }
package search import "log" type Result struct { Field string Content string } type Matcher interface { Search(feed *Feed, searchTerm string) ([]*Result, error) } // 参数中的results chan <- *Result 表示results 是一个 只写*Result 的channel(不能读数据,即使用 <-results 会报错。), // 同理,还可以定义一个 只读*Result 的 channel : results <-chan *Result,这个results 是只读的,只能从中读数据,不能写数据 func Match(matcher Matcher, feed *Feed, searchTerm string, results chan<- *Result) { searchResults, err := matcher.Search(feed, searchTerm) if err != nil { log.Println("err:", err) return } for _, result := range searchResults { results <- result } } func Display(results chan *Result) { for result := range results { log.Printf("%s:\n %s\n", result.Field, result.Content) } }
package sudoku import ( "math/rand" ) //GenerationOptions provides configuration options for generating a sudoku puzzle. type GenerationOptions struct { //symmetrty and symmetryType control the aesthetics of the generated grid. symmetryPercentage //controls roughly what percentage of cells with have a filled partner across the provided plane of //symmetry. Symmetry SymmetryType SymmetryPercentage float64 //The minimum number of cells to leave filled in the puzzle. The generated puzzle might have //more filled cells. A value of DIM * DIM - 1, for example, would return an extremely trivial //puzzle. MinFilledCells int } //DefaultGenerationOptions returns a GenerationOptions object configured to //have reasonable defaults. func DefaultGenerationOptions() *GenerationOptions { result := &GenerationOptions{} result.Symmetry = SYMMETRY_VERTICAL result.SymmetryPercentage = 0.7 result.MinFilledCells = 0 return result } func (self *mutableGridImpl) Fill() bool { solutions := nOrFewerSolutions(self, 1) if len(solutions) != 0 { //We use Load instead of loadSDK because we are just incidentally using it to load state. self.Load(solutions[0].DataString()) return true } return false } //GenerateGrid returns a new sudoku puzzle with a single unique solution and //many of its cells unfilled--a puzzle that is appropriate (and hopefully fun) //for humans to solve. Puzzles returned will have filled cells locked (see //cell.Lock for more on locking). GenerateGrid first finds a random full //filling of the grid, then iteratively removes cells until just before the //grid begins having multiple solutions. The result is a grid that has a //single valid solution but many of its cells unfilled. Pass nil for options //to use reasonable defaults. GenerateGrid doesn't currently give any way to //define the desired difficulty; the best option is to repeatedly generate //puzzles until you find one that matches your desired difficulty. cmd/dokugen //applies this technique. func GenerateGrid(options *GenerationOptions) MutableGrid { if options == nil { options = DefaultGenerationOptions() } grid := NewGrid() //Do a random fill of the grid grid.Fill() //Make a copy so we don't mutate the passed in dict symmetryPercentage := options.SymmetryPercentage //Make sure symmetry percentage is within the legal range. if symmetryPercentage < 0.0 { symmetryPercentage = 0.0 } if symmetryPercentage > 1.0 { symmetryPercentage = 1.0 } originalCells := grid.MutableCells() cells := make(MutableCellSlice, len(originalCells)) for i, j := range rand.Perm(len(cells)) { cells[i] = originalCells[j] } for _, cell := range cells { num := cell.Number() if num == 0 { continue } var otherNum int var otherCell MutableCell if rand.Float64() < symmetryPercentage { //Pick a symmetrical partner for symmetryPercentage number of cells. otherCell = cell.MutableSymmetricalPartner(options.Symmetry) if otherCell != nil { if otherCell.Number() == 0 { //We must have already un-filled it as a primary cell. //If we were to unfill this, we could get in a weird state where //we get multiple solutions without noticing (which caused bug #134). //So pretend like we didn't draw one. otherCell = nil } else { otherNum = otherCell.Number() } } } numCellsToFillThisStep := 1 if otherCell != nil { numCellsToFillThisStep = 2 } if grid.numFilledCells()-numCellsToFillThisStep < options.MinFilledCells { //Doing this step would leave us with too few cells filled. Finish. break } //Unfill it. cell.SetNumber(0) if otherCell != nil { otherCell.SetNumber(0) } if grid.HasMultipleSolutions() { //Put it back in. cell.SetNumber(num) if otherCell != nil { otherCell.SetNumber(otherNum) } } } grid.LockFilledCells() return grid }
package initialize import "go.uber.org/zap" func Logger() { // 定义全局 logger, _ := zap.NewDevelopment() zap.ReplaceGlobals(logger) }
package upload import ( "io/ioutil" "net/http" "testing" ) func TestGetURL(t *testing.T) { filename := UploadTestFile(t) url := GetURL(filename) res, err := http.Get(url) if err != nil { t.Error(err) } defer res.Body.Close() body, err := ioutil.ReadAll(res.Body) if err != nil { t.Error(err) } if string(body) != TEST_FILE_DATA { t.Errorf("HTTP response %s is not %s", string(body), TEST_FILE_DATA) } }
package main type BlockService interface { Upload([]byte) }
package models import "time" const ( UserBodyTemplateName = "user_body" SpaceBodyTemplateName = "space_body" EmailBodyTemplateName = "email_body" OrganizationBodyTemplateName = "organization_body" SubjectMissingTemplateName = "subject.missing" SubjectProvidedTemplateName = "subject.provided" ) var TemplateNames = []string{ UserBodyTemplateName, SpaceBodyTemplateName, EmailBodyTemplateName, OrganizationBodyTemplateName, SubjectMissingTemplateName, SubjectProvidedTemplateName, } type Template struct { Primary int `db:"primary"` Name string `db:"name"` Text string `db:"text"` HTML string `db:"html"` Overridden bool `db:"-"` CreatedAt time.Time `db:"created_at"` }
package server import "github.com/sirupsen/logrus" var ( log = logrus.WithField("pkg", "server") gameSubscriberBuffer = 10 gameChatBuffer = 10 gameMoveBuffer = 2 )
package database import ( "database/sql" "fmt" "log" _ "github.com/go-sql-driver/mysql" ) func MysqlVersion(host string, port int, database, username, password string) (string, error) { connString := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", username, password, host, port, database) conn, err := sql.Open("mysql", connString) if err != nil { log.Println("Parameter error.") return "", err } defer conn.Close() err = conn.Ping() if err != nil { log.Println("Connection failed.") return "", err } var version string err = conn.QueryRow(`select version()`).Scan(&version) if err == nil { log.Println("Get version failed.") return "", err } return version, nil }
package web import ( "net/http" "github.com/steam-authority/steam-authority/db" "github.com/steam-authority/steam-authority/logging" ) func StatsTagsHandler(w http.ResponseWriter, r *http.Request) { // Get config config, err := db.GetConfig(db.ConfTagsUpdated) logging.Error(err) // Get tags tags, err := db.GetAllTags() if err != nil { logging.Error(err) returnErrorTemplate(w, r, 500, "Error getting tags") return } // Template t := statsTagsTemplate{} t.Fill(w, r, "Tags") t.Tags = tags t.Date = config.Value returnTemplate(w, r, "tags", t) return } type statsTagsTemplate struct { GlobalTemplate Tags []db.Tag Date string }
// Package manifests deals with creating manifests for all manifests to be installed for the cluster package manifests import ( "bytes" "encoding/base64" "path/filepath" "strings" "text/template" "github.com/pkg/errors" "sigs.k8s.io/yaml" "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/templates/content/bootkube" "github.com/openshift/installer/pkg/asset/tls" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/vsphere" ) const ( manifestDir = "manifests" ) var ( kubeSysConfigPath = filepath.Join(manifestDir, "cluster-config.yaml") _ asset.WritableAsset = (*Manifests)(nil) customTmplFuncs = template.FuncMap{ "indent": indent, "add": func(i, j int) int { return i + j }, } ) // Manifests generates the dependent operator config.yaml files type Manifests struct { KubeSysConfig *configurationObject FileList []*asset.File } type genericData map[string]string // Name returns a human friendly name for the operator func (m *Manifests) Name() string { return "Common Manifests" } // Dependencies returns all of the dependencies directly needed by a // Manifests asset. func (m *Manifests) Dependencies() []asset.Asset { return []asset.Asset{ &installconfig.ClusterID{}, &installconfig.InstallConfig{}, &Ingress{}, &DNS{}, &Infrastructure{}, &Networking{}, &Proxy{}, &Scheduler{}, &ImageContentSourcePolicy{}, &ClusterCSIDriverConfig{}, &ImageDigestMirrorSet{}, &tls.RootCA{}, &tls.MCSCertKey{}, &bootkube.CVOOverrides{}, &bootkube.KubeCloudConfig{}, &bootkube.KubeSystemConfigmapRootCA{}, &bootkube.MachineConfigServerTLSSecret{}, &bootkube.OpenshiftConfigSecretPullSecret{}, } } // Generate generates the respective operator config.yml files func (m *Manifests) Generate(dependencies asset.Parents) error { ingress := &Ingress{} dns := &DNS{} network := &Networking{} infra := &Infrastructure{} installConfig := &installconfig.InstallConfig{} proxy := &Proxy{} scheduler := &Scheduler{} imageContentSourcePolicy := &ImageContentSourcePolicy{} clusterCSIDriverConfig := &ClusterCSIDriverConfig{} imageDigestMirrorSet := &ImageDigestMirrorSet{} dependencies.Get(installConfig, ingress, dns, network, infra, proxy, scheduler, imageContentSourcePolicy, imageDigestMirrorSet, clusterCSIDriverConfig) redactedConfig, err := redactedInstallConfig(*installConfig.Config) if err != nil { return errors.Wrap(err, "failed to redact install-config") } // mao go to kube-system config map m.KubeSysConfig = configMap("kube-system", "cluster-config-v1", genericData{ "install-config": string(redactedConfig), }) if m.KubeSysConfig.Metadata.Annotations == nil { m.KubeSysConfig.Metadata.Annotations = make(map[string]string, 1) } m.KubeSysConfig.Metadata.Annotations["kubernetes.io/description"] = "The install-config content used to create the cluster. The cluster configuration may have evolved since installation, so check cluster configuration resources directly if you are interested in the current cluster state." kubeSysConfigData, err := yaml.Marshal(m.KubeSysConfig) if err != nil { return errors.Wrap(err, "failed to create kube-system/cluster-config-v1 configmap") } m.FileList = []*asset.File{ { Filename: kubeSysConfigPath, Data: kubeSysConfigData, }, } m.FileList = append(m.FileList, m.generateBootKubeManifests(dependencies)...) m.FileList = append(m.FileList, ingress.Files()...) m.FileList = append(m.FileList, dns.Files()...) m.FileList = append(m.FileList, network.Files()...) m.FileList = append(m.FileList, infra.Files()...) m.FileList = append(m.FileList, proxy.Files()...) m.FileList = append(m.FileList, scheduler.Files()...) m.FileList = append(m.FileList, imageContentSourcePolicy.Files()...) m.FileList = append(m.FileList, clusterCSIDriverConfig.Files()...) m.FileList = append(m.FileList, imageDigestMirrorSet.Files()...) asset.SortFiles(m.FileList) return nil } // Files returns the files generated by the asset. func (m *Manifests) Files() []*asset.File { return m.FileList } func (m *Manifests) generateBootKubeManifests(dependencies asset.Parents) []*asset.File { clusterID := &installconfig.ClusterID{} installConfig := &installconfig.InstallConfig{} mcsCertKey := &tls.MCSCertKey{} rootCA := &tls.RootCA{} dependencies.Get( clusterID, installConfig, mcsCertKey, rootCA, ) templateData := &bootkubeTemplateData{ CVOCapabilities: installConfig.Config.Capabilities, CVOClusterID: clusterID.UUID, McsTLSCert: base64.StdEncoding.EncodeToString(mcsCertKey.Cert()), McsTLSKey: base64.StdEncoding.EncodeToString(mcsCertKey.Key()), PullSecretBase64: base64.StdEncoding.EncodeToString([]byte(installConfig.Config.PullSecret)), RootCaCert: string(rootCA.Cert()), IsFCOS: installConfig.Config.IsFCOS(), IsSCOS: installConfig.Config.IsSCOS(), IsOKD: installConfig.Config.IsOKD(), } files := []*asset.File{} for _, a := range []asset.WritableAsset{ &bootkube.CVOOverrides{}, &bootkube.KubeCloudConfig{}, &bootkube.KubeSystemConfigmapRootCA{}, &bootkube.MachineConfigServerTLSSecret{}, &bootkube.OpenshiftConfigSecretPullSecret{}, } { dependencies.Get(a) for _, f := range a.Files() { files = append(files, &asset.File{ Filename: filepath.Join(manifestDir, strings.TrimSuffix(filepath.Base(f.Filename), ".template")), Data: applyTemplateData(f.Data, templateData), }) } } return files } func applyTemplateData(data []byte, templateData interface{}) []byte { template := template.Must(template.New("template").Funcs(customTmplFuncs).Parse(string(data))) buf := &bytes.Buffer{} if err := template.Execute(buf, templateData); err != nil { panic(err) } return buf.Bytes() } // Load returns the manifests asset from disk. func (m *Manifests) Load(f asset.FileFetcher) (bool, error) { yamlFileList, err := f.FetchByPattern(filepath.Join(manifestDir, "*.yaml")) if err != nil { return false, errors.Wrap(err, "failed to load *.yaml files") } ymlFileList, err := f.FetchByPattern(filepath.Join(manifestDir, "*.yml")) if err != nil { return false, errors.Wrap(err, "failed to load *.yml files") } jsonFileList, err := f.FetchByPattern(filepath.Join(manifestDir, "*.json")) if err != nil { return false, errors.Wrap(err, "failed to load *.json files") } fileList := append(yamlFileList, ymlFileList...) fileList = append(fileList, jsonFileList...) if len(fileList) == 0 { return false, nil } kubeSysConfig := &configurationObject{} var found bool for _, file := range fileList { if file.Filename == kubeSysConfigPath { if err := yaml.Unmarshal(file.Data, kubeSysConfig); err != nil { return false, errors.Wrapf(err, "failed to unmarshal %s", kubeSysConfigPath) } found = true } } if !found { return false, nil } m.FileList, m.KubeSysConfig = fileList, kubeSysConfig asset.SortFiles(m.FileList) return true, nil } func redactedInstallConfig(config types.InstallConfig) ([]byte, error) { newConfig := config newConfig.PullSecret = "" if newConfig.Platform.VSphere != nil { p := config.VSphere newVCenters := make([]vsphere.VCenter, len(p.VCenters)) for i, v := range p.VCenters { newVCenters[i].Server = v.Server newVCenters[i].Datacenters = v.Datacenters } newVSpherePlatform := vsphere.Platform{ DeprecatedVCenter: p.DeprecatedVCenter, DeprecatedUsername: "", DeprecatedPassword: "", DeprecatedDatacenter: p.DeprecatedDatacenter, DeprecatedDefaultDatastore: p.DeprecatedDefaultDatastore, DeprecatedFolder: p.DeprecatedFolder, DeprecatedCluster: p.DeprecatedCluster, DeprecatedResourcePool: p.DeprecatedResourcePool, ClusterOSImage: p.ClusterOSImage, DeprecatedAPIVIP: p.DeprecatedAPIVIP, APIVIPs: p.APIVIPs, DeprecatedIngressVIP: p.DeprecatedIngressVIP, IngressVIPs: p.IngressVIPs, DefaultMachinePlatform: p.DefaultMachinePlatform, DeprecatedNetwork: p.DeprecatedNetwork, DiskType: p.DiskType, VCenters: newVCenters, FailureDomains: p.FailureDomains, } newConfig.Platform.VSphere = &newVSpherePlatform } return yaml.Marshal(newConfig) } func indent(indention int, v string) string { newline := "\n" + strings.Repeat(" ", indention) return strings.Replace(v, "\n", newline, -1) }
package database import ( "database/sql" "log" ) func Query(q string) *sql.Rows { /* Return rows from query to DB Error handling is handled here in the factory */ // prepare query stmt, err := DB.Prepare(q) if err != nil { log.Fatal(err) return nil } defer stmt.Close() rows, err := stmt.Query() if err != nil { panic(err) } return rows } func QueryOne(q string) *sql.Row { stmt, err := DB.Prepare(q) if err != nil { log.Fatal(err) return nil } defer stmt.Close() row := stmt.QueryRow() return row } func Insert(q string) error { stmt, err := DB.Prepare(q) if err != nil { log.Fatal(err) } defer stmt.Close() // TODO: handle error _, err = stmt.Exec() if err != nil { return err } return nil }
package game_map import ( "github.com/faiface/pixel/pixelgl" ) type SBEvent interface { Update(dt float64) IsBlocking() bool IsFinished() bool Render(win *pixelgl.Window) } type WaitEvent struct { Seconds float64 } func WaitEventCreate(seconds float64) *WaitEvent { return &WaitEvent{ Seconds: seconds, } } func (e *WaitEvent) Update(dt float64) { e.Seconds = e.Seconds - dt } func (e WaitEvent) IsBlocking() bool { return true } func (e WaitEvent) IsFinished() bool { return e.Seconds <= 0 } func (e WaitEvent) Render(win *pixelgl.Window) { }
// Insert and search for numbers in a binary tree. // left child <= parent, right chile > parent // For example, if we had a node containing the data 4, and we added the // data 2, our tree would look like this: // 4 // / // 2 // If we then added 6, it would look like this: // 4 // / \ // 2 6 package binarysearchtree import () const testVersion = 1 type SearchTreeData struct { left *SearchTreeData data int right *SearchTreeData } // create a search tree with data n func Bst(n int) *SearchTreeData { return &SearchTreeData{data: n} } // Insert an element into the tree func (s *SearchTreeData) Insert(n int) { for s != nil { if n > s.data { if s.right != nil { s = s.right } else { s.right = &SearchTreeData{data: n} return } } else { // n <= s.data if s.left != nil { s = s.left } else { s.left = &SearchTreeData{data: n} return } } } } // Return sorted array after applying the function to the elements func (s *SearchTreeData) MapString(f func(int) string) (ret []string) { if s == nil { return nil } if s.left != nil { left := s.left.MapString(f) ret = append(ret, left...) } ret = append(ret, f(s.data)) if s.right != nil { right := s.right.MapString(f) ret = append(ret, right...) } return ret } // Return sorted array after applying the function to the elements func (s *SearchTreeData) MapInt(f func(int) int) (ret []int) { if s == nil { return nil } if s.left != nil { left := s.left.MapInt(f) ret = append(ret, left...) } ret = append(ret, f(s.data)) if s.right != nil { right := s.right.MapInt(f) ret = append(ret, right...) } return ret }
package main import ( "bytes" "encoding/json" "errors" "fmt" "io/ioutil" "main/hosts/anonfiles" "main/hosts/catbox" "main/hosts/fileio" "main/hosts/filemail" "main/hosts/ftp" "main/hosts/gofile" "main/hosts/krakenfiles" "main/hosts/letsupload" "main/hosts/megaup" "main/hosts/mixdrop" "main/hosts/pixeldrain" "main/hosts/racaty" "main/hosts/transfersh" "main/hosts/uguu" "main/hosts/wetransfer" "main/hosts/workupload" "main/hosts/zippyshare" "main/utils" "os" "path/filepath" "strings" "text/template" "github.com/alexflint/go-arg" "github.com/dustin/go-humanize" ) const megabyte = 1000000 var ( funcMap = map[string]func(*utils.Args, string) (string, error){ "anonfiles": anonfiles.Run, "catbox": catbox.Run, "fileio": fileio.Run, "filemail": filemail.Run, "ftp": ftp.Run, "gofile": gofile.Run, "krakenfiles": krakenfiles.Run, "letsupload": letsupload.Run, "megaup": megaup.Run, "mixdrop": mixdrop.Run, "pixeldrain": pixeldrain.Run, "racaty": racaty.Run, "transfersh": transfersh.Run, "uguu": uguu.Run, "wetransfer": wetransfer.Run, "zippyshare": zippyshare.Run, "workupload": workupload.Run, } templateEscPairs = []utils.TemplateEscPair{ // Newline {From: []byte{'\x5C', '\x6E'}, To: []byte{'\x0A'}}, // Tab {From: []byte{'\x5C', '\x74'}, To: []byte{'\x09'}}, } ) func populateDirs(path string) ([]string, error) { var paths []string files, err := ioutil.ReadDir(path) if err != nil { return nil, err } for _, f := range files { if !f.IsDir() { filePath := filepath.Join(path, f.Name()) paths = append(paths, filePath) } } return paths, nil } func populateDirsRec(srcPath string) ([]string, error) { var dirs []string err := filepath.Walk(srcPath, func(path string, f os.FileInfo, err error) error { if !f.IsDir() { dirs = append(dirs, path) } return nil }) return dirs, err } func checkExists(path string, isDir bool) (bool, error) { f, err := os.Stat(path) if err == nil { if isDir { return f.IsDir(), nil } else { return !f.IsDir(), nil } } else if os.IsNotExist(err) { return false, nil } return false, err } func processDirs(args *utils.Args) error { var ( allDirs []string popPaths []string ) for _, dir := range args.Directories { exists, err := checkExists(dir, true) if err != nil { return err } if exists { if !foldContains(allDirs, dir) { allDirs = append(allDirs, dir) if args.Recursive { popPaths, err = populateDirsRec(dir) } else { popPaths, err = populateDirs(dir) } if err != nil { return err } args.Files = append(args.Files, popPaths...) } else { fmt.Println("Filtered duplicate directory:", dir) } } else { fmt.Println("Filtered non-existent directory:", dir) } } return nil } func foldContains(arr []string, value string) bool { for _, item := range arr { if strings.EqualFold(item, value) { return true } } return false } func filterHosts(hosts []string) []string { var filteredHosts []string for _, host := range hosts { if !foldContains(filteredHosts, host) { filteredHosts = append(filteredHosts, host) } } return filteredHosts } func filterPaths(paths []string) ([]string, error) { var filteredPaths []string wd, err := os.Getwd() if err != nil { return nil, err } for _, path := range paths { if !filepath.IsAbs(path) { path = filepath.Join(wd, path) } exists, err := checkExists(path, false) if err != nil { return nil, err } if exists { if !foldContains(filteredPaths, path) { filteredPaths = append(filteredPaths, path) } else { fmt.Println("Filtered duplicate file:", path) } } else { fmt.Println("Filtered non-existent file:", path) } } return filteredPaths, nil } func parseArgs() (*utils.Args, error) { var args utils.Args arg.MustParse(&args) if args.SpeedLimit != -1 && args.SpeedLimit <= 0 { return nil, errors.New("Invalid speed limit.") } if len(args.Files) == 0 && len(args.Directories) == 0 { return nil, errors.New("File path and/or directory required.") } args.ByteLimit = int64(megabyte * args.SpeedLimit) if args.SpeedLimit != -1 { fmt.Printf("Upload speed limiting is active, limit: %s/s.\n", humanize.Bytes(uint64(args.ByteLimit))) } if len(args.Directories) > 0 { err := processDirs(&args) if err != nil { return nil, err } } paths, err := filterPaths(args.Files) if err != nil { errString := fmt.Sprintf("Failed to filter paths.\n%s", err) return nil, errors.New(errString) } if len(paths) == 0 { return nil, errors.New("All files were filtered.") } hosts := filterHosts(args.Hosts) args.Hosts = hosts args.Files = paths return &args, nil } func escapeTemplate(template []byte) []byte { var escaped []byte for i, pair := range templateEscPairs { if i != 0 { template = escaped } escaped = bytes.ReplaceAll(template, pair.From, pair.To) } return escaped } func parseTemplate(templateText string, meta map[string]string) []byte { var buffer bytes.Buffer for { err := template.Must(template.New("").Parse(templateText)).Execute(&buffer, meta) if err == nil { break } fmt.Println("Failed to parse template. Default will be used instead.") templateText = "# {{.filename}}\n{{.fileUrl}}\n" buffer.Reset() } return escapeTemplate(buffer.Bytes()) } func writeTxt(path, filePath, fileUrl, templateText string) error { f, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0755) if err != nil { return err } meta := map[string]string{ "filename": filepath.Base(filePath), "filePath": filePath, "fileUrl": fileUrl, } parsed := parseTemplate(templateText, meta) _, err = f.Write(parsed) f.Close() return err } func outSetup(path string, wipe bool) error { f, err := os.OpenFile(path, os.O_CREATE|os.O_RDONLY, 0755) if err != nil { return err } defer f.Close() if wipe { err = f.Truncate(0) if err != nil { return err } } return nil } func outSetupJob(path string) error { f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755) if err != nil { return err } defer f.Close() jobs := &utils.UploadJobs{ Jobs: []utils.UploadJob{}, } m, err := json.MarshalIndent(&jobs, "", "\t") if err != nil { return err } _, err = f.Write(m) return err } func writeJob(jobPath, _url, host, filePath string, jobErr error) error { var ( ok = true errText string ) if jobErr != nil { ok = false errText = jobErr.Error() } job := &utils.UploadJob{ URL: _url, Host: host, Filename: filepath.Base(filePath), FilePath: filePath, Ok: ok, ErrorText: errText, } data, err := ioutil.ReadFile(jobPath) if err != nil { return err } var jobs utils.UploadJobs err = json.Unmarshal(data, &jobs) if err != nil { return err } jobs.Jobs = append(jobs.Jobs, *job) m, err := json.MarshalIndent(&jobs, "", "\t") if err != nil { return err } err = ioutil.WriteFile(jobPath, m, 0755) return err } func main() { args, err := parseArgs() if err != nil { panic(err) } outPath := args.OutPath if outPath != "" { err := outSetup(outPath, args.Wipe) if err != nil { panic(err) } } if args.JobOutPath != "" { err := outSetupJob(args.JobOutPath) if err != nil { panic(err) } } for i, host := range args.Hosts { lowerHost := strings.ToLower(host) hostFunc, ok := funcMap[lowerHost] if !ok { fmt.Println("Invalid host:", host) continue } if i != 0 { fmt.Println("") } fmt.Println("--" + lowerHost + "--") pathTotal := len(args.Files) for num, path := range args.Files { fmt.Printf("File %d of %d:\n", num+1, pathTotal) fmt.Println(path) fileUrl, err := hostFunc(args, path) if args.JobOutPath != "" { jobErr := writeJob(args.JobOutPath, fileUrl, host, path, err) if jobErr != nil { // Intentional. panic(jobErr) } } if err != nil { fmt.Println("Upload failed.\n" + err.Error()) continue } fmt.Println(fileUrl) if outPath != "" { err = writeTxt(outPath, path, fileUrl, args.Template) if err != nil { fmt.Println("Failed to write to output text file.\n" + err.Error()) } } } } }
// // Copyright 2020 The AVFS authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // +build !datarace package dummyidm_test import ( "testing" "github.com/avfs/avfs" "github.com/avfs/avfs/idm/dummyidm" "github.com/avfs/avfs/test" ) var ( // DummyIdm implements avfs.IdentityMgr interface. _ avfs.IdentityMgr = &dummyidm.DummyIdm{} // DummyIdm.User struct implements avfs.UserReader interface. _ avfs.UserReader = &dummyidm.User{} // DummyIdm.Group struct implements avfs.GroupReader interface. _ avfs.GroupReader = &dummyidm.Group{} ) func TestDummyIdm(t *testing.T) { idm := dummyidm.New() t.Logf("Idm = %v", idm.Type()) sIdm := test.NewSuiteIdm(t, idm) sIdm.TestAll(t) } func TestDummyIdmFeatures(t *testing.T) { idm := dummyidm.New() if idm.Features() != 0 { t.Errorf("Features : want Features to be 0, got %d", idm.Features()) } }
package log import "os" const ( productionMode = "production" developmentMode = "development" ) const ( envLogLevel = "BE_LOG_LEVEL" envLogMode = "BE_LOG_MODE" defLogLevel = "debug" defLogMode = developmentMode ) var ( cfgLogLevel = EnvStr(envLogLevel, defLogLevel) cfgLogMode = EnvStr(defLogMode, defLogMode) ) // IsProduction return true if it is on production environment func IsProduction() bool { return cfgLogMode == productionMode } // EnvStr func func EnvStr(aKey string, aDef string) string { _val, _ok := os.LookupEnv(aKey) if _ok { return _val } return aDef }
package domain import ( "encoding/gob" ) type Product struct { ProductId string CategoryId string Name string Description string } func (p *Product) String() string { return p.ProductId } // 序列化注册 product,用于 session 存储 func init() { gob.Register(&Product{}) }
// Package report Amazon Seller Utilities API Responses package report import ( "encoding/json" "log" "net/http" ) // DownloadReportError An error response from the API func DownloadReportError(w http.ResponseWriter, version int, code int, err error) error { apiResponse := DownloadReportAPIResponse{ Version: version, Success: true, Status: code, // Results: DownloadReportResponse{}, Error: err.Error(), } apiResponseJSON, err := json.Marshal(apiResponse) if err != nil { log.Panic(err) panic(err) } log.Output(1, string(apiResponseJSON)) return json.NewEncoder(w).Encode(apiResponse) }
package main import "fmt" func main() { size := 49 var numArray []int for i := 0; i < size; i++ { numArray = append(numArray, 1) } fmt.Println(numArray) }
package main import ( "net" "fmt" "bufio" "strings" ) func main() { l,err:=net.Listen("tcp",":8080") if err!=nil{ fmt.Println(err) } for{ c,err:=l.Accept() //here we accept the tcp connection in c and now we can read and write on this connection if err!=nil{ fmt.Println(err) continue } go handleconn(c) } } func handleconn(c net.Conn){ defer c.Close() request(c) respond(c) } func request(c net.Conn){ var i int scanner:=bufio.NewScanner(c) //scanner.Split(bufio.ScanWords) //to print each word in new line for scanner.Scan(){ data:=scanner.Text() fmt.Println(data) //fmt.Fprintln(c,"Received : ",data) // video no. 023 if i==0 { fmt.Println("Method : ",strings.Fields(data)[i],"URI :",strings.Fields(data)[i+1]) } if data==""{ break } i++ } //defer c.Close() fmt.Println("End Of Prog") //program reaches here when we close the connection i.e close localhost 8080 } func respond(c net.Conn) { body := `<!DOCTYPE html><html lang="en"><head><meta charet="UTF-8"><title></title></head><body><strong>Hello GoLang Prog</strong></body></html>` fmt.Fprint(c, "HTTP/1.1 200 OK\r\n") //to respond with ok status fmt.Fprintf(c, "Content-Length: %d\r\n", len(body)) //optional fmt.Fprint(c, "Content-Type: text/html\r\n") //optional fmt.Fprint(c, "\r\n") fmt.Fprint(c, body) //any html response }
// Copyright 2023 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/beta/compute_beta_go_proto" emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto" "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta" ) // PacketMirroringServer implements the gRPC interface for PacketMirroring. type PacketMirroringServer struct{} // ProtoToPacketMirroringFilterDirectionEnum converts a PacketMirroringFilterDirectionEnum enum from its proto representation. func ProtoToComputeBetaPacketMirroringFilterDirectionEnum(e betapb.ComputeBetaPacketMirroringFilterDirectionEnum) *beta.PacketMirroringFilterDirectionEnum { if e == 0 { return nil } if n, ok := betapb.ComputeBetaPacketMirroringFilterDirectionEnum_name[int32(e)]; ok { e := beta.PacketMirroringFilterDirectionEnum(n[len("ComputeBetaPacketMirroringFilterDirectionEnum"):]) return &e } return nil } // ProtoToPacketMirroringEnableEnum converts a PacketMirroringEnableEnum enum from its proto representation. func ProtoToComputeBetaPacketMirroringEnableEnum(e betapb.ComputeBetaPacketMirroringEnableEnum) *beta.PacketMirroringEnableEnum { if e == 0 { return nil } if n, ok := betapb.ComputeBetaPacketMirroringEnableEnum_name[int32(e)]; ok { e := beta.PacketMirroringEnableEnum(n[len("ComputeBetaPacketMirroringEnableEnum"):]) return &e } return nil } // ProtoToPacketMirroringNetwork converts a PacketMirroringNetwork object from its proto representation. func ProtoToComputeBetaPacketMirroringNetwork(p *betapb.ComputeBetaPacketMirroringNetwork) *beta.PacketMirroringNetwork { if p == nil { return nil } obj := &beta.PacketMirroringNetwork{ Url: dcl.StringOrNil(p.GetUrl()), CanonicalUrl: dcl.StringOrNil(p.GetCanonicalUrl()), } return obj } // ProtoToPacketMirroringCollectorIlb converts a PacketMirroringCollectorIlb object from its proto representation. func ProtoToComputeBetaPacketMirroringCollectorIlb(p *betapb.ComputeBetaPacketMirroringCollectorIlb) *beta.PacketMirroringCollectorIlb { if p == nil { return nil } obj := &beta.PacketMirroringCollectorIlb{ Url: dcl.StringOrNil(p.GetUrl()), CanonicalUrl: dcl.StringOrNil(p.GetCanonicalUrl()), } return obj } // ProtoToPacketMirroringMirroredResources converts a PacketMirroringMirroredResources object from its proto representation. func ProtoToComputeBetaPacketMirroringMirroredResources(p *betapb.ComputeBetaPacketMirroringMirroredResources) *beta.PacketMirroringMirroredResources { if p == nil { return nil } obj := &beta.PacketMirroringMirroredResources{} for _, r := range p.GetSubnetworks() { obj.Subnetworks = append(obj.Subnetworks, *ProtoToComputeBetaPacketMirroringMirroredResourcesSubnetworks(r)) } for _, r := range p.GetInstances() { obj.Instances = append(obj.Instances, *ProtoToComputeBetaPacketMirroringMirroredResourcesInstances(r)) } for _, r := range p.GetTags() { obj.Tags = append(obj.Tags, r) } return obj } // ProtoToPacketMirroringMirroredResourcesSubnetworks converts a PacketMirroringMirroredResourcesSubnetworks object from its proto representation. func ProtoToComputeBetaPacketMirroringMirroredResourcesSubnetworks(p *betapb.ComputeBetaPacketMirroringMirroredResourcesSubnetworks) *beta.PacketMirroringMirroredResourcesSubnetworks { if p == nil { return nil } obj := &beta.PacketMirroringMirroredResourcesSubnetworks{ Url: dcl.StringOrNil(p.GetUrl()), CanonicalUrl: dcl.StringOrNil(p.GetCanonicalUrl()), } return obj } // ProtoToPacketMirroringMirroredResourcesInstances converts a PacketMirroringMirroredResourcesInstances object from its proto representation. func ProtoToComputeBetaPacketMirroringMirroredResourcesInstances(p *betapb.ComputeBetaPacketMirroringMirroredResourcesInstances) *beta.PacketMirroringMirroredResourcesInstances { if p == nil { return nil } obj := &beta.PacketMirroringMirroredResourcesInstances{ Url: dcl.StringOrNil(p.GetUrl()), CanonicalUrl: dcl.StringOrNil(p.GetCanonicalUrl()), } return obj } // ProtoToPacketMirroringFilter converts a PacketMirroringFilter object from its proto representation. func ProtoToComputeBetaPacketMirroringFilter(p *betapb.ComputeBetaPacketMirroringFilter) *beta.PacketMirroringFilter { if p == nil { return nil } obj := &beta.PacketMirroringFilter{ Direction: ProtoToComputeBetaPacketMirroringFilterDirectionEnum(p.GetDirection()), } for _, r := range p.GetCidrRanges() { obj.CidrRanges = append(obj.CidrRanges, r) } for _, r := range p.GetIpProtocols() { obj.IPProtocols = append(obj.IPProtocols, r) } return obj } // ProtoToPacketMirroring converts a PacketMirroring resource from its proto representation. func ProtoToPacketMirroring(p *betapb.ComputeBetaPacketMirroring) *beta.PacketMirroring { obj := &beta.PacketMirroring{ Id: dcl.Int64OrNil(p.GetId()), SelfLink: dcl.StringOrNil(p.GetSelfLink()), Name: dcl.StringOrNil(p.GetName()), Description: dcl.StringOrNil(p.GetDescription()), Region: dcl.StringOrNil(p.GetRegion()), Network: ProtoToComputeBetaPacketMirroringNetwork(p.GetNetwork()), Priority: dcl.Int64OrNil(p.GetPriority()), CollectorIlb: ProtoToComputeBetaPacketMirroringCollectorIlb(p.GetCollectorIlb()), MirroredResources: ProtoToComputeBetaPacketMirroringMirroredResources(p.GetMirroredResources()), Filter: ProtoToComputeBetaPacketMirroringFilter(p.GetFilter()), Enable: ProtoToComputeBetaPacketMirroringEnableEnum(p.GetEnable()), Project: dcl.StringOrNil(p.GetProject()), Location: dcl.StringOrNil(p.GetLocation()), } return obj } // PacketMirroringFilterDirectionEnumToProto converts a PacketMirroringFilterDirectionEnum enum to its proto representation. func ComputeBetaPacketMirroringFilterDirectionEnumToProto(e *beta.PacketMirroringFilterDirectionEnum) betapb.ComputeBetaPacketMirroringFilterDirectionEnum { if e == nil { return betapb.ComputeBetaPacketMirroringFilterDirectionEnum(0) } if v, ok := betapb.ComputeBetaPacketMirroringFilterDirectionEnum_value["PacketMirroringFilterDirectionEnum"+string(*e)]; ok { return betapb.ComputeBetaPacketMirroringFilterDirectionEnum(v) } return betapb.ComputeBetaPacketMirroringFilterDirectionEnum(0) } // PacketMirroringEnableEnumToProto converts a PacketMirroringEnableEnum enum to its proto representation. func ComputeBetaPacketMirroringEnableEnumToProto(e *beta.PacketMirroringEnableEnum) betapb.ComputeBetaPacketMirroringEnableEnum { if e == nil { return betapb.ComputeBetaPacketMirroringEnableEnum(0) } if v, ok := betapb.ComputeBetaPacketMirroringEnableEnum_value["PacketMirroringEnableEnum"+string(*e)]; ok { return betapb.ComputeBetaPacketMirroringEnableEnum(v) } return betapb.ComputeBetaPacketMirroringEnableEnum(0) } // PacketMirroringNetworkToProto converts a PacketMirroringNetwork object to its proto representation. func ComputeBetaPacketMirroringNetworkToProto(o *beta.PacketMirroringNetwork) *betapb.ComputeBetaPacketMirroringNetwork { if o == nil { return nil } p := &betapb.ComputeBetaPacketMirroringNetwork{} p.SetUrl(dcl.ValueOrEmptyString(o.Url)) p.SetCanonicalUrl(dcl.ValueOrEmptyString(o.CanonicalUrl)) return p } // PacketMirroringCollectorIlbToProto converts a PacketMirroringCollectorIlb object to its proto representation. func ComputeBetaPacketMirroringCollectorIlbToProto(o *beta.PacketMirroringCollectorIlb) *betapb.ComputeBetaPacketMirroringCollectorIlb { if o == nil { return nil } p := &betapb.ComputeBetaPacketMirroringCollectorIlb{} p.SetUrl(dcl.ValueOrEmptyString(o.Url)) p.SetCanonicalUrl(dcl.ValueOrEmptyString(o.CanonicalUrl)) return p } // PacketMirroringMirroredResourcesToProto converts a PacketMirroringMirroredResources object to its proto representation. func ComputeBetaPacketMirroringMirroredResourcesToProto(o *beta.PacketMirroringMirroredResources) *betapb.ComputeBetaPacketMirroringMirroredResources { if o == nil { return nil } p := &betapb.ComputeBetaPacketMirroringMirroredResources{} sSubnetworks := make([]*betapb.ComputeBetaPacketMirroringMirroredResourcesSubnetworks, len(o.Subnetworks)) for i, r := range o.Subnetworks { sSubnetworks[i] = ComputeBetaPacketMirroringMirroredResourcesSubnetworksToProto(&r) } p.SetSubnetworks(sSubnetworks) sInstances := make([]*betapb.ComputeBetaPacketMirroringMirroredResourcesInstances, len(o.Instances)) for i, r := range o.Instances { sInstances[i] = ComputeBetaPacketMirroringMirroredResourcesInstancesToProto(&r) } p.SetInstances(sInstances) sTags := make([]string, len(o.Tags)) for i, r := range o.Tags { sTags[i] = r } p.SetTags(sTags) return p } // PacketMirroringMirroredResourcesSubnetworksToProto converts a PacketMirroringMirroredResourcesSubnetworks object to its proto representation. func ComputeBetaPacketMirroringMirroredResourcesSubnetworksToProto(o *beta.PacketMirroringMirroredResourcesSubnetworks) *betapb.ComputeBetaPacketMirroringMirroredResourcesSubnetworks { if o == nil { return nil } p := &betapb.ComputeBetaPacketMirroringMirroredResourcesSubnetworks{} p.SetUrl(dcl.ValueOrEmptyString(o.Url)) p.SetCanonicalUrl(dcl.ValueOrEmptyString(o.CanonicalUrl)) return p } // PacketMirroringMirroredResourcesInstancesToProto converts a PacketMirroringMirroredResourcesInstances object to its proto representation. func ComputeBetaPacketMirroringMirroredResourcesInstancesToProto(o *beta.PacketMirroringMirroredResourcesInstances) *betapb.ComputeBetaPacketMirroringMirroredResourcesInstances { if o == nil { return nil } p := &betapb.ComputeBetaPacketMirroringMirroredResourcesInstances{} p.SetUrl(dcl.ValueOrEmptyString(o.Url)) p.SetCanonicalUrl(dcl.ValueOrEmptyString(o.CanonicalUrl)) return p } // PacketMirroringFilterToProto converts a PacketMirroringFilter object to its proto representation. func ComputeBetaPacketMirroringFilterToProto(o *beta.PacketMirroringFilter) *betapb.ComputeBetaPacketMirroringFilter { if o == nil { return nil } p := &betapb.ComputeBetaPacketMirroringFilter{} p.SetDirection(ComputeBetaPacketMirroringFilterDirectionEnumToProto(o.Direction)) sCidrRanges := make([]string, len(o.CidrRanges)) for i, r := range o.CidrRanges { sCidrRanges[i] = r } p.SetCidrRanges(sCidrRanges) sIPProtocols := make([]string, len(o.IPProtocols)) for i, r := range o.IPProtocols { sIPProtocols[i] = r } p.SetIpProtocols(sIPProtocols) return p } // PacketMirroringToProto converts a PacketMirroring resource to its proto representation. func PacketMirroringToProto(resource *beta.PacketMirroring) *betapb.ComputeBetaPacketMirroring { p := &betapb.ComputeBetaPacketMirroring{} p.SetId(dcl.ValueOrEmptyInt64(resource.Id)) p.SetSelfLink(dcl.ValueOrEmptyString(resource.SelfLink)) p.SetName(dcl.ValueOrEmptyString(resource.Name)) p.SetDescription(dcl.ValueOrEmptyString(resource.Description)) p.SetRegion(dcl.ValueOrEmptyString(resource.Region)) p.SetNetwork(ComputeBetaPacketMirroringNetworkToProto(resource.Network)) p.SetPriority(dcl.ValueOrEmptyInt64(resource.Priority)) p.SetCollectorIlb(ComputeBetaPacketMirroringCollectorIlbToProto(resource.CollectorIlb)) p.SetMirroredResources(ComputeBetaPacketMirroringMirroredResourcesToProto(resource.MirroredResources)) p.SetFilter(ComputeBetaPacketMirroringFilterToProto(resource.Filter)) p.SetEnable(ComputeBetaPacketMirroringEnableEnumToProto(resource.Enable)) p.SetProject(dcl.ValueOrEmptyString(resource.Project)) p.SetLocation(dcl.ValueOrEmptyString(resource.Location)) return p } // applyPacketMirroring handles the gRPC request by passing it to the underlying PacketMirroring Apply() method. func (s *PacketMirroringServer) applyPacketMirroring(ctx context.Context, c *beta.Client, request *betapb.ApplyComputeBetaPacketMirroringRequest) (*betapb.ComputeBetaPacketMirroring, error) { p := ProtoToPacketMirroring(request.GetResource()) res, err := c.ApplyPacketMirroring(ctx, p) if err != nil { return nil, err } r := PacketMirroringToProto(res) return r, nil } // applyComputeBetaPacketMirroring handles the gRPC request by passing it to the underlying PacketMirroring Apply() method. func (s *PacketMirroringServer) ApplyComputeBetaPacketMirroring(ctx context.Context, request *betapb.ApplyComputeBetaPacketMirroringRequest) (*betapb.ComputeBetaPacketMirroring, error) { cl, err := createConfigPacketMirroring(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } return s.applyPacketMirroring(ctx, cl, request) } // DeletePacketMirroring handles the gRPC request by passing it to the underlying PacketMirroring Delete() method. func (s *PacketMirroringServer) DeleteComputeBetaPacketMirroring(ctx context.Context, request *betapb.DeleteComputeBetaPacketMirroringRequest) (*emptypb.Empty, error) { cl, err := createConfigPacketMirroring(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } return &emptypb.Empty{}, cl.DeletePacketMirroring(ctx, ProtoToPacketMirroring(request.GetResource())) } // ListComputeBetaPacketMirroring handles the gRPC request by passing it to the underlying PacketMirroringList() method. func (s *PacketMirroringServer) ListComputeBetaPacketMirroring(ctx context.Context, request *betapb.ListComputeBetaPacketMirroringRequest) (*betapb.ListComputeBetaPacketMirroringResponse, error) { cl, err := createConfigPacketMirroring(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } resources, err := cl.ListPacketMirroring(ctx, request.GetProject(), request.GetLocation()) if err != nil { return nil, err } var protos []*betapb.ComputeBetaPacketMirroring for _, r := range resources.Items { rp := PacketMirroringToProto(r) protos = append(protos, rp) } p := &betapb.ListComputeBetaPacketMirroringResponse{} p.SetItems(protos) return p, nil } func createConfigPacketMirroring(ctx context.Context, service_account_file string) (*beta.Client, error) { conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file)) return beta.NewClient(conf), nil }
package main import ( "encoding/json" "io/ioutil" "lucastetreault/did-tangaroa/pkg/linkeddata" ) var clusterDdoc *linkeddata.DidDocument func loadClusterDdoc() { if clusterDdoc == nil { b, err := ioutil.ReadFile("./cluster.json") if err != nil { panic(err.Error()) } var ddoc linkeddata.DidDocument err = json.Unmarshal(b, &ddoc) if err != nil { panic(err.Error()) } clusterDdoc = &ddoc } }
/***************************************************************** * Copyright©,2020-2022, email: 279197148@qq.com * Version: 1.0.0 * @Author: yangtxiang * @Date: 2020-08-20 16:02 * Description: *****************************************************************/ package pdl import ( "github.com/go-xe2/x/os/xfile" "io" "strings" ) type TFileExportYaml struct { root string mgr FileIOManager fileMap map[*FileNamespace]string } var _ FileExport = (*TFileExportYaml)(nil) func NewFileExportYaml(root string, mgr FileIOManager) FileExport { return &TFileExportYaml{ root: root, mgr: mgr, fileMap: make(map[*FileNamespace]string), } } func (p *TFileExportYaml) BeginProjectWrite() error { return nil } func (p *TFileExportYaml) EndProjectWrite() { } func (p *TFileExportYaml) BeginNamespace(ns string) error { return nil } func (p *TFileExportYaml) EndNamespace(ns string) { } func (p *TFileExportYaml) BeginFileWrite(ns *FileNamespace, fileName string) (w io.Writer, cxt interface{}, err error) { path := strings.Replace(ns.Namespace, ".", xfile.Separator, -1) realPath := xfile.Join(p.root, path) if !xfile.Exists(realPath) { if err := xfile.Mkdir(realPath); err != nil { return nil, nil, err } } file := xfile.Join(realPath, fileName+".yaml") w, err = p.mgr.Create(ns.Namespace, file) if err != nil { return nil, nil, err } p.fileMap[ns] = file iw := newYamlWriter() if err := iw.WriteBegin(); err != nil { return nil, nil, err } return w, iw, nil } func (TFileExportYaml) WriteNamespace(w io.Writer, cxt interface{}, namespace string) error { iw := cxt.(*tYamlWriter) if err := iw.WriteNamespace(namespace); err != nil { return err } if err := iw.WriteBasicBegin(); err != nil { return err } if err := iw.WriteBasic(ProtoBasicTypes); err != nil { return err } return iw.WriteBasicEnd() } func (TFileExportYaml) WriteImports(w io.Writer, cxt interface{}, im []string) error { iw := cxt.(*tYamlWriter) return iw.WriteImports(im) } func (TFileExportYaml) WriteTypedefs(w io.Writer, cxt interface{}, defs map[string]*FileTypeDef) error { iw := cxt.(*tYamlWriter) if err := iw.WriteTypeDefBegin(); err != nil { return err } if err := iw.WriteTypeDefs(defs); err != nil { return err } return iw.WriteTypeDefEnd() } func (TFileExportYaml) WriteTypes(w io.Writer, cxt interface{}, types map[string]*FileStruct) error { iw := cxt.(*tYamlWriter) if err := iw.WriteTypesBegin(); err != nil { return err } if err := iw.WriteTypes(types); err != nil { return err } return iw.WriteTypesEnd() } func (TFileExportYaml) WriteServices(w io.Writer, cxt interface{}, ss map[string]*FileService) error { iw := cxt.(*tYamlWriter) if err := iw.WriteInterfacesBegin(); err != nil { return err } if err := iw.WriteInterfaces(ss); err != nil { return err } return iw.WriteInterfacesEnd() } func (TFileExportYaml) Flush(w io.Writer, cxt interface{}) error { iw := cxt.(*tYamlWriter) if err := iw.WriteEnd(); err != nil { return err } if _, err := w.Write(iw.Data()); err != nil { return err } return nil } func (p *TFileExportYaml) EndFileWrite(w io.Writer, ns *FileNamespace, fileName string) { if f, ok := p.fileMap[ns]; ok { p.mgr.Close(ns.Namespace, f) } }
package setup import ( "fmt" "io/ioutil" "os" "os/exec" "path" "runtime" "strings" "github.com/jrperritt/rack/internal/github.com/codegangsta/cli" "github.com/jrperritt/rack/util" ) var rackBashAutocomplete = ` #! /bin/bash _cli_bash_autocomplete() { local cur prev opts COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" # The first 5 words should always be completed by rack if [[ ${#COMP_WORDS[@]} -lt 5 ]]; then opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) # All flags should be completed by rack elif [[ ${cur} == -* ]]; then opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) # If the previous word wasn't a flag, then the next on has to be, given the 2 conditions above elif [[ ${prev} != -* ]]; then opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) fi return 0 } complete -o default -F _cli_bash_autocomplete rack ` // Init runs logic for setting up amenities such as command completion. func Init(c *cli.Context) { w := c.App.Writer switch runtime.GOOS { case "linux", "darwin": rackDir, err := util.RackDir() if err != nil { fmt.Fprintf(w, "Error running `rack init`: %s\n", err) return } rackCompletionPath := path.Join(rackDir, "bash_autocomplete") rackCompletionFile, err := os.Create(rackCompletionPath) if err != nil { fmt.Fprintf(w, "Error creating `rack` bash completion file: %s\n", err) return } _, err = rackCompletionFile.WriteString(rackBashAutocomplete) if err != nil { fmt.Fprintf(w, "Error writing to `rack` bash completion file: %s\n", err) return } rackCompletionFile.Close() var bashName string if runtime.GOOS == "linux" { bashName = ".bashrc" } else { bashName = ".bash_profile" } homeDir, err := util.HomeDir() if err != nil { fmt.Fprintf(w, "Unable to access home directory: %s\n", err) } bashPath := path.Join(homeDir, bashName) fmt.Fprintf(w, "Looking for %s in %s\n", bashName, bashPath) if _, err := os.Stat(bashPath); os.IsNotExist(err) { fmt.Fprintf(w, "%s doesn't exist. You should create it and/or install your operating system's `bash_completion` package.", bashPath) } else { bashFile, err := os.OpenFile(bashPath, os.O_RDWR|os.O_APPEND, 0644) if err != nil { fmt.Fprintf(w, "Error opening %s: %s\n", bashPath, err) return } defer bashFile.Close() sourceContent := fmt.Sprintf("source %s\n", rackCompletionPath) bashContentsBytes, err := ioutil.ReadAll(bashFile) if strings.Contains(string(bashContentsBytes), sourceContent) { fmt.Fprintf(w, "Command completion enabled in %s\n", bashPath) return } _, err = bashFile.WriteString(sourceContent) if err != nil { fmt.Fprintf(w, "Error writing to %s: %s\n", bashPath, err) return } _, err = exec.Command("/bin/bash", bashPath).Output() if err != nil { fmt.Fprintf(w, "Error sourcing %s: %s\n", bashPath, err) return } fmt.Fprintf(w, "Command completion enabled in %s\n", bashPath) return } default: fmt.Fprintf(w, "Command completion is not currently available for %s\n", runtime.GOOS) return } }
package main import ( "fmt" "log" "github.com/jonmorehouse/gatekeeper/gatekeeper" metric_plugin "github.com/jonmorehouse/gatekeeper/plugin/metric" ) func maxIdx(vals []uint64) int { return 0 } // Plugin is a type that implements the event_plugin.Plugin interface type plugin struct{} func (*plugin) Start() error { return nil } func (*plugin) Stop() error { return nil } func (*plugin) Configure(map[string]interface{}) error { return nil } func (*plugin) Heartbeat() error { log.Println("metric-logger heartbeat ...") return nil } func (*plugin) EventMetric(metric *gatekeeper.EventMetric) error { msg := fmt.Sprintf("metric.event.%s ", metric.Event.String()) for k, v := range metric.Extra { msg += fmt.Sprintf("extra.%s=%s ", k, v) } log.Println(msg) return nil } func (*plugin) ProfilingMetric(metric *gatekeeper.ProfilingMetric) error { // write out general profiling statistics log.Println(fmt.Sprintf("metric.profiling.memstats.alloc bytes=%v", metric.MemStats.Alloc)) log.Println(fmt.Sprintf("metric.profiling.memstats.total_alloc bytes=%v", metric.MemStats.TotalAlloc)) log.Println(fmt.Sprintf("metric.profiling.memstats.sys bytes=%v", metric.MemStats.Sys)) log.Println(fmt.Sprintf("metric.profiling.memstats.lookups count=%v", metric.MemStats.Lookups)) log.Println(fmt.Sprintf("metric.profiling.memstats.mallocs count=%v", metric.MemStats.Mallocs)) log.Println(fmt.Sprintf("metric.profiling.memstats.frees count=%v", metric.MemStats.Frees)) // heap allocation statistics log.Println(fmt.Sprintf("metric.profiling.memstats.heap_alloc bytes=%v", metric.MemStats.HeapAlloc)) log.Println(fmt.Sprintf("metric.profiling.memstats.heap_sys bytes=%v", metric.MemStats.HeapSys)) log.Println(fmt.Sprintf("metric.profiling.memstats.heap_idle bytes=%v", metric.MemStats.HeapIdle)) log.Println(fmt.Sprintf("metric.profiling.memstats.heap_inuse bytes=%v", metric.MemStats.HeapInuse)) log.Println(fmt.Sprintf("metric.profiling.memstats.heap_released bytes%v=", metric.MemStats.HeapReleased)) log.Println(fmt.Sprintf("metric.profiling.memstats.heap_objects count=%v", metric.MemStats.HeapObjects)) // low level structure allocation statistics log.Println(fmt.Sprintf("metric.profiling.memstats.stack_inuse bytes=%v", metric.MemStats.StackInuse)) log.Println(fmt.Sprintf("metric.profiling.memstats.stack_sys bytes=%v", metric.MemStats.StackSys)) log.Println(fmt.Sprintf("metric.profiling.memstats.mspan_inuse bytes=%v", metric.MemStats.MSpanInuse)) log.Println(fmt.Sprintf("metric.profiling.memstats.mspan_sys bytes=%v", metric.MemStats.MSpanSys)) log.Println(fmt.Sprintf("metric.profiling.memstats.mcache_inuse bytes=%v", metric.MemStats.MCacheInuse)) log.Println(fmt.Sprintf("metric.profiling.memstats.mcache_sys bytes=%v", metric.MemStats.MCacheSys)) log.Println(fmt.Sprintf("metric.profiling.memstats.buck_hash_sys bytes=%v", metric.MemStats.BuckHashSys)) log.Println(fmt.Sprintf("metric.profiling.memstats.gc_sys bytes=%v", metric.MemStats.GCSys)) log.Println(fmt.Sprintf("metric.profiling.memstats.other_sys bytes=%v", metric.MemStats.OtherSys)) // garbage collector statistics log.Println(fmt.Sprintf("metric.profiling.memstats.next_gc count=%v", metric.MemStats.NextGC)) log.Println(fmt.Sprintf("metric.profiling.memstats.last_gc ts=%v", metric.MemStats.LastGC)) log.Println(fmt.Sprintf("metric.profiling.memstats.pause_total_ns ns=%v", metric.MemStats.PauseTotalNs)) idx := maxIdx(metric.MemStats.PauseNs[:]) log.Println(fmt.Sprintf("metric.profiling.memstats.longest_recent_pause ns%v=", metric.MemStats.PauseNs[idx])) log.Println(fmt.Sprintf("metric.profiling.memstats.longest_recent_pause_end ts%v=", metric.MemStats.PauseEnd[idx])) log.Println(fmt.Sprintf("metric.profiling.memstats.num_gc count=%v", metric.MemStats.NumGC)) log.Println(fmt.Sprintf("metric.profiling.memstats.gc_cpu_fraction percent=%v", metric.MemStats.GCCPUFraction*100)) return nil } func (*plugin) PluginMetric(metric *gatekeeper.PluginMetric) error { log.Println(fmt.Sprintf("metric.plugin.%s.%s.%s latency=%s", metric.PluginType, metric.PluginName, metric.MethodName, metric.Latency)) return nil } func (*plugin) RequestMetric(metric *gatekeeper.RequestMetric) error { log := func(msg string) { // append upstream name / ID // append backend name / ID msg = fmt.Sprintf("%s upstream.name=%s upstream.id=%s", metric.Upstream.Name, metric.Upstream.ID) if metric.Backend == nil { msg = fmt.Sprintf("%s backend.id=%s backend.address=%s", metric.Backend.ID, metric.Backend.Address) } log.Println(msg) } // print out request metrics log(fmt.Sprintf("metric.request.remote_addr value=%s", metric.Request.RemoteAddr)) log(fmt.Sprintf("metric.request.method value=%s", metric.Request.Method)) log(fmt.Sprintf("metric.request.host value=%s", metric.Request.Host)) log(fmt.Sprintf("metric.request.prefix value=%s", metric.Request.Prefix)) log(fmt.Sprintf("metric.request.path value=%s", metric.Request.Path)) log(fmt.Sprintf("metric.request.upstream_match_type value=%s", metric.Request.UpstreamMatchType.String())) for k, vs := range metric.Request.Header { for _, v := range vs { log(fmt.Sprintf("metric.request.header %s=%s", k, v)) } } // print out response metrics log(fmt.Sprintf("metric.response.status value=%d", metric.Response.StatusCode)) log(fmt.Sprintf("metric.response.proto value=%s", metric.Response.Proto)) log(fmt.Sprintf("metric.response.content_length value=%d", metric.Response.ContentLength)) log(fmt.Sprintf("metric.response.transfer_encoding value=%s", metric.Response.TransferEncoding)) for k, vs := range metric.Response.Header { for _, v := range vs { log(fmt.Sprintf("metric.response.header %s=%s", k, v)) } } // internal latencies log(fmt.Sprintf("metric.request.start_ts value=%d", metric.RequestStartTS.Unix())) log(fmt.Sprintf("metric.request.end_ts value=%d", metric.RequestEndTS.Unix())) log(fmt.Sprintf("metric.request.latency value=%d", metric.Latency)) log(fmt.Sprintf("metric.request.internal_latency value=%d", metric.InternalLatency)) log(fmt.Sprintf("metric.request.dns_lookup_latency value=%d", metric.DNSLookupLatency)) log(fmt.Sprintf("metric.request.tcp_connect_latency value=%d", metric.TCPConnectLatency)) log(fmt.Sprintf("metric.request.proxy_latency value=%d", metric.ProxyLatency)) // connection meta information log(fmt.Sprintf("metric.request.dns_lookup value=%b", metric.DNSLookup)) log(fmt.Sprintf("metric.request.conn_reused value=%b", metric.ConnReused)) log(fmt.Sprintf("metric.request.conn_was_idle value=%b", metric.ConnReused)) log(fmt.Sprintf("metric.request.conn_idle_time value=%b", metric.ConnIdleTime)) // plugin latencies log(fmt.Sprintf("metric.request.router_latency value=%s", metric.RouterLatency)) log(fmt.Sprintf("metric.request.loadbalancer_latency value=%s", metric.LoadBalancerLatency)) log(fmt.Sprintf("metric.request.response_modifier_latency value=%s", metric.ResponseModifierLatency)) log(fmt.Sprintf("metric.request.request_modifier_latency value=%s", metric.RequestModifierLatency)) if metric.Error != nil { log(fmt.Sprintf("metric.request.error value=%s", metric.Error)) log(fmt.Sprintf("metric.request.error_response_modifier value=%s", metric.ErrorResponseModifierLatency)) } return nil } func (*plugin) UpstreamMetric(metric *gatekeeper.UpstreamMetric) error { msg := fmt.Sprintf("metric.upstream.%s upstream.name=%s upstream.id=%s", metric.Event.String(), metric.Upstream.Name, metric.Upstream.ID) if metric.Backend != nil { msg += fmt.Sprintf(" backend.ID=%s backend.Address=%s", metric.Backend.ID, metric.Backend.Address) } log.Println(msg) return nil } func main() { if err := metric_plugin.RunPlugin("metric-logger", &plugin{}); err != nil { log.Fatal(err) } }
package cachetable import ( "strconv" "testing" ) func TestNewCacheTableInit(t *testing.T) { cases := []struct { ina, inb int }{ {10, 10}, {10, 2}, {0, 0}, {1, 0}, {1, 1}, } mytest := func(prealloc bool) { for _, c := range cases { h, err := NewCacheTable(c.ina, c.inb, prealloc) if c.ina == 0 || c.inb == 0 { if err == nil { t.Errorf("Expected error, didn't get it") } } else { if h == nil || err != nil { t.Errorf("NewCacheTable(%d,%d) threw unexpected error: %s", c.ina, c.inb, err) } if h.numbuckets != c.ina || h.bucketcapacity != c.inb { t.Errorf("NewCacheTable(%d,%d) == %d,%d", c.ina, c.inb, h.numbuckets, h.bucketcapacity) } } } } mytest(false) //don't preallocate mytest(true) //preallocated } func TestNewCacheTableCapacity(t *testing.T) { cases := []struct { ina, inb int }{ {10, 10}, {10, 2}, {0, 0}, {1, 0}, {1, 1}, } mytest := func(prealloc bool) { for _, c := range cases { h, err := NewCacheTable(c.ina, c.inb, prealloc) if c.ina == 0 || c.inb == 0 { if err == nil { t.Errorf("Expected error, didn't get it") } } else { if h.Capacity() != c.ina*c.inb { t.Errorf("NewCacheTable(%q,%q).Capacity() == %q, want %q", c.ina, c.inb, h.Capacity(), c.ina*c.inb) } } } } mytest(false) //don't preallocate mytest(true) //preallocated } func TestLenAndLoad(t *testing.T) { cases := []struct { ina, inb, want int }{ {10, 1, 10}, {10, 2, 20}, } for _, c := range cases { h, _ := NewCacheTable(c.ina, c.inb*10, true) for i := 1; i <= c.ina*c.inb; i++ { key := strconv.Itoa(i) h.Set(key, i) } got := h.Len() if got != c.want { t.Errorf("Len(%d,%d) == %d, want %d", c.ina, c.inb, got, c.want) } load := h.Load() want := float32(c.ina*c.inb) / float32(c.ina*c.inb*10) if load != want { t.Errorf("Load(%d) == %f, want %f", c.ina, load, want) } } } func TestGetAndSet(t *testing.T) { h, _ := NewCacheTable(10, 10, true) keys := []string{"alpha5", "beta4", "charlie7", "gamma_6", "delta__8"} // testing primitives for _, key := range keys { h.Set(key, len(key)) } for _, key := range keys { got, _ := h.Get(key) want := len(key) if got.Value.(int) != len(key) { t.Errorf("want: %q, got: %q", want, got) } } // testing strings for _, key := range keys { h.Set(key, key+key) } for _, key := range keys { got, _ := h.Get(key) want := key + key if got.Value.(string) != want { t.Errorf("want: %q, got: %q", want, got) } } // testing references to compound types arr := []int{2, 3, 4} h.Set("myArray", arr) a, _ := h.Get("myArray") k := a.Value.([]int) k[0] = 100 if k[0] != arr[0] { t.Errorf("Reference has not been mutated") } } func TestCollisionsAndMemoryConstrain(t *testing.T) { // a small cachetable that is bound to have collisions numbuckets := 2 bucketcap := 2 h, _ := NewCacheTable(numbuckets, bucketcap, true) keys := []string{"alpha5", "beta4", "charlie7", "gamma_6", "delta__8"} if h.Capacity() != numbuckets*bucketcap { t.Errorf("Wrong Capacity") } for _, key := range keys { h.Set(key, len(key)) if h.Len() > numbuckets*bucketcap { t.Errorf("Constraint did not work, buckets magically increased") } } number_of_items_recovered := 0 for _, key := range keys { got, inmap := h.Get(key) want := len(key) if inmap && got.Value.(int) == want { number_of_items_recovered++ } } if number_of_items_recovered != numbuckets*bucketcap { t.Errorf("expected to recover %q elements but got %q", numbuckets*bucketcap, number_of_items_recovered) } } func TestOverwrite(t *testing.T) { // a cachetable with just one elem h, _ := NewCacheTable(1, 1, true) keya := "alpha" keyb := "beta" vala := 10 valb := 20 // verify Stats if h.Len() != 0 { t.Errorf("Len incorrect. Got %d, Want %d", h.Len(), 0) } if h.Capacity() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Capacity(), 1*1) } h.Set(keya, vala) if h.Len() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Len(), 1) } if h.Capacity() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Capacity(), 1*1) } // verify alpha is there got, inmap := h.Get(keya) if inmap == false || got.Value.(int) != vala { t.Errorf("Element just added not found!") } // add beta now h.Set(keyb, valb) if h.Len() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Len(), 1) } if h.Capacity() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Capacity(), 1*1) } // verify beta is there got, inmap = h.Get(keyb) if inmap == false || got.Value.(int) != valb { t.Errorf("Element just added not found!") } // verify alpha is gone got, inmap = h.Get(keya) if inmap == true || got != nil { t.Errorf("Found element we should have overwritten! h.len: %d", h.Len()) } } func TestDelete(t *testing.T) { // a cachetable with just one elem h, _ := NewCacheTable(1, 1, true) var status bool keya := "alpha" keyb := "beta" vala := 10 valb := 20 if h.Len() != 0 { t.Errorf("Len incorrect. Got %d, Want %d", h.Len(), 0) } if h.Capacity() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Capacity(), 1*1) } h.Set(keya, vala) if h.Len() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Len(), 1) } if h.Capacity() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Capacity(), 1*1) } // verify it's there got, inmap := h.Get(keya) if inmap == false || got.Value.(int) != vala { t.Errorf("Element just added not found!") } // lets delete it _, status = h.Delete(keya) if !status { t.Errorf("Unable to delete") } // verify it's gone got, inmap = h.Get(keya) if inmap == true || got != nil { t.Errorf("Found element we just deleted!") } if h.Len() != 0 { t.Errorf("Len incorrect. Got %d, Want %d", h.Len(), 0) } if h.Capacity() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Capacity(), 1*1) } // add beta now h.Set(keyb, valb) // lastly, lets delete a non-existent element _, status = h.Delete("gamma") if status { t.Errorf("Deleted a missing key") } if h.Len() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Len(), 1) } if h.Capacity() != 1 { t.Errorf("Len incorrect. Got %d, Want %d", h.Capacity(), 1*1) } } func TestAging(t *testing.T) { // a small cachetable that is bound to have collisions numbuckets := 1 bucketcap := 3 h, _ := NewCacheTable(numbuckets, bucketcap, true) keys := []string{"alpha5", "beta4", "charlie7", "gamma_6", "delta__8", "tst3"} if len(keys) <= numbuckets*bucketcap { t.Error("The test does not work that way, we need more keys than capacity") } if h.Capacity() != numbuckets*bucketcap { t.Errorf("Wrong Capacity") } for idx, key := range keys { h.Set(key, len(key)) //check if correct number of nodes in cachetable if (h.Len() < h.Capacity() && h.Len() != idx+1) || h.Len() > h.Capacity() || (idx > h.Len() && h.Len() != h.Capacity()) { t.Errorf("Something is wrong here: idx: %d, len: %d, capacity: %d", idx, h.Len(), h.Capacity()) } } //verify the oldest elements are now missing for idx := 0; idx < len(keys)-numbuckets*bucketcap; idx++ { got, inmap := h.Get(keys[idx]) if inmap || got != nil { t.Errorf("Found Element %d:%s in map even though it should have been overwritten", idx, keys[idx]) } } //check only the newest elements are in cachetable for idx := len(keys) - numbuckets*bucketcap; idx < len(keys); idx++ { got, inmap := h.Get(keys[idx]) want := len(keys[idx]) if !inmap || got.Value.(int) != want { t.Errorf("Could not find Element %d:%s in map even though it should be there", idx, keys[idx]) } } } func TestUintRolloverSafety(t *testing.T) { type RolloverTest struct { current_time uint times []uint want_index int } //test rotest := func(test RolloverTest) { chain := make([]Node, len(test.times)) for i, time := range test.times { chain[i].create_time = time } if index_found, _ := findElementOrOldestIndex(chain, test.current_time, "a key not of this chain"); index_found != test.want_index { t.Errorf("Failed to find oldest index after rollover. index_found: %d != want:%d, current_time:%d, times:%+v", index_found, test.want_index, test.current_time, test.times) } } //just some examples that test go behaves as expected rotest(RolloverTest{ current_time: 4, times: []uint{0, 1, 2, 3, 4, MaxUint - 1, MaxUint - 2, MaxUint - 3, MaxUint - 4}, want_index: 4, }) rotest(RolloverTest{ current_time: MaxUint / 2, times: []uint{0, 1, 2, 3, 4, MaxUint/2 - 1}, want_index: 0, }) rotest(RolloverTest{ current_time: MaxUint / 2, times: []uint{0, 1, 2, 3, 4, MaxUint/2 + 1}, want_index: 5, }) rotest(RolloverTest{ current_time: MaxUint / 2, times: []uint{0, 1, 2, 3, 4, MaxUint / 2, MaxUint/2 + 1}, want_index: 5, }) rotest(RolloverTest{ current_time: 0, times: []uint{0, 1, 2, 3, 4, MaxUint/2 - 10}, want_index: 0, }) rotest(RolloverTest{ current_time: MaxUint, times: []uint{0, 1, 2, 3, 4, MaxUint/2 - 10, MaxUint - 1}, want_index: 0, }) rotest(RolloverTest{ current_time: MaxUint, times: []uint{0, 1, 2, 3, 4, MaxUint/2 - 10, MaxUint - 1, MaxUint}, want_index: 7, }) //verify <= rotest(RolloverTest{ current_time: 3, times: []uint{2, 2}, want_index: 1, }) //verify <= and direction of range rotest(RolloverTest{ current_time: 0, times: []uint{0, 1, 2, 3, 4, MaxUint/2 - 10, MaxUint - 1, MaxUint, 0}, want_index: 8, }) } func TestElementIsFoundRatherThanOldest(t *testing.T) { type FindTest struct { current_time uint times []uint want_index int } //test rotest := func(test FindTest) { testkey := "TestKey" chain := make([]Node, len(test.times)) for i, time := range test.times { chain[i].create_time = time } chain[test.want_index].key = testkey if index_found, inchain := findElementOrOldestIndex(chain, test.current_time, testkey); inchain == false || index_found != test.want_index { t.Errorf("Failed to find element at expected index: inchain:%+v, index_found: %d != want:%d, current_time:%d, times:%+v", inchain, index_found, test.want_index, test.current_time, test.times) } } //just some examples that test go behaves as expected rotest(FindTest{ current_time: 4, times: []uint{0, 1, 2, 3, 4, MaxUint - 1, MaxUint - 2, MaxUint - 3, MaxUint - 4}, want_index: 5, }) rotest(FindTest{ current_time: MaxUint / 2, times: []uint{0, 1, 2, 3, 4, MaxUint/2 - 1}, want_index: 2, }) rotest(FindTest{ current_time: MaxUint / 2, times: []uint{0, 1, 2, 3, 4, MaxUint/2 + 1}, want_index: 2, }) rotest(FindTest{ current_time: MaxUint / 2, times: []uint{0, 1, 2, 3, 4, MaxUint / 2, MaxUint/2 + 1}, want_index: 3, }) rotest(FindTest{ current_time: 0, times: []uint{0, 1, 2, 3, 4, MaxUint/2 - 10}, want_index: 1, }) rotest(FindTest{ current_time: MaxUint, times: []uint{0, 1, 2, 3, 4, MaxUint/2 - 10, MaxUint - 1}, want_index: 3, }) rotest(FindTest{ current_time: MaxUint, times: []uint{0, 1, 2, 3, 4, MaxUint/2 - 10, MaxUint - 1, MaxUint}, want_index: 2, }) rotest(FindTest{ current_time: 3, times: []uint{2, 2}, want_index: 0, }) rotest(FindTest{ current_time: 0, times: []uint{0, 1, 2, 3, 4, MaxUint/2 - 10, MaxUint - 1, MaxUint, 0}, want_index: 1, }) }
package rpc // CommandConfig configures a cli command type CommandConfig struct { // Order is the listing order on cli help Order int // NumArgs is the # of arguments the command expects NumArgs int // Description is the cli help string Description string // RpcMethod is the method to call RpcMethod string } // RunOperationRpcCommand is the name of the Run Operation RPC command const RunOperationRpcCommand = "RunOperation" // Commands is the map of all cli commands. Key is the command name in cli. var Commands = map[string]CommandConfig{ "add": {1, 1, "Adds a host to werifyd", "AddHost"}, "del": {2, 1, "Removes a host from werifyd", "RemoveHost"}, "list": {3, 0, "Lists hosts in werifyd", "ListHost"}, "listactive": {4, 0, "Lists active hosts in werifyd", "ListHost"}, "listinactive": {5, 0, "Lists inactive hosts in werifyd", "ListHost"}, "operation": {6, 1, "Runs operations from file on werifyd", RunOperationRpcCommand}, "get": {7, 1, "Get status of operation with handle", "OperationStatusCheck"}, "refresh": {8, 0, "Start health check on all hosts", "Refresh"}, }
package container import ( "bytes" "encoding/gob" "fmt" "github.com/criyle/go-sandbox/pkg/unixsocket" ) // 16k buffsize const bufferSize = 16 << 10 type socket struct { *unixsocket.Socket buff []byte decoder *gob.Decoder recvBuff bufferRotater encoder *gob.Encoder sendBuff bytes.Buffer } // bufferRotater replace the underlying Buffers to avoid allocation type bufferRotater struct { *bytes.Buffer } func (b *bufferRotater) Rotate(buffer *bytes.Buffer) { b.Buffer = buffer } func newSocket(s *unixsocket.Socket) *socket { soc := socket{ Socket: s, } soc.buff = make([]byte, bufferSize) soc.decoder = gob.NewDecoder(&soc.recvBuff) soc.encoder = gob.NewEncoder(&soc.sendBuff) return &soc } func (s *socket) RecvMsg(e interface{}) (msg unixsocket.Msg, err error) { n, msg, err := s.Socket.RecvMsg(s.buff) if err != nil { return msg, fmt.Errorf("RecvMsg: %v", err) } s.recvBuff.Rotate(bytes.NewBuffer(s.buff[:n])) if err := s.decoder.Decode(e); err != nil { return msg, fmt.Errorf("RecvMsg: failed to decode %v", err) } return msg, nil } func (s *socket) SendMsg(e interface{}, msg unixsocket.Msg) error { s.sendBuff.Reset() if err := s.encoder.Encode(e); err != nil { return fmt.Errorf("SendMsg: failed to encode %v", err) } if err := s.Socket.SendMsg(s.sendBuff.Bytes(), msg); err != nil { return fmt.Errorf("SendMsg: failed to SendMsg %v", err) } return nil }
package models import ( "database/sql" _ "github.com/go-sql-driver/mysql" ) // DB Variables var UserDB string var PassDB string var DatabaseDB string var HostDB string var PortDB string type JsonListClustersMap []jsonListClusters type JsonApps []jsonApps type JsonAppsByClustersMap map[string][]jsonAppsByClusters type DescriptionMap map[string][]DescriptionStruct type jsonListClusters struct { Aws AWS `json:"aws"` ClusterName string `json:"clusterName"` K8SVersion string `json:"k8sVersion"` Instances Instances `json:"instances"` } type Instances struct { TotalInstances int `json:"totalInstances"` Description DescriptionMap `json:"description"` } type DescriptionStruct struct { Description } type Description struct { Type string `json:"type"` TotalTypeInstances int `json:"totalTypeInstances"` } type AWS struct { Account int64 `json:"account"` Region string `json:"region"` Environment string `json:"environment"` } type jsonApps struct { ClusterName string `json:"clusterName"` Name string `json:"name"` Namespace string `json:"namespace"` Type string `json:"type"` HpaEnabled bool `json:"hpaEnabled"` VaultEnabled bool `json:"vaultEnabled"` Helm Helm `json:"helm"` } type Helm struct { Version string `json:"version"` Chart string `json:"chart"` APPVersion string `json:"appVersion"` } type jsonAppsByClusters struct { Name string `json:"name"` Namespace string `json:"namespace"` Type string `json:"type"` HpaEnabled bool `json:"hpaEnabled"` VaultEnabled bool `json:"vaultEnabled"` Helm Helm `json:"helm"` } // ListAllClusters - List all Clusters func ListAllClusters(response *JsonListClustersMap) *JsonListClustersMap { var SIDCluster int var SName string var SAWSAccount int64 var SAWSRegion string var SAWSEnvironment string var SK8sVersion string var SNodeType string var SNodeInstance string var STotalInstances int var totalInstances int description := make(DescriptionMap) db, err := sql.Open("mysql", UserDB+":"+PassDB+"@tcp("+HostDB+":"+PortDB+")/"+DatabaseDB+"?charset=utf8") checkErr(err) defer db.Close() rows, err := db.Query("SELECT id_cluster, nome, aws_account, aws_region, aws_env, k8s_version FROM clusters ORDER BY nome") checkErr(err) for rows.Next() { err = rows.Scan(&SIDCluster, &SName, &SAWSAccount, &SAWSRegion, &SAWSEnvironment, &SK8sVersion) checkErr(err) description = DescriptionMap{} totalInstances = 0 rows1, err := db.Query("SELECT node_type, node_instance, total_instances FROM nodes WHERE id_cluster=?", SIDCluster) checkErr(err) for rows1.Next() { err = rows1.Scan(&SNodeType, &SNodeInstance, &STotalInstances) checkErr(err) description[SNodeType] = append( description[SNodeType], DescriptionStruct{ Description{ Type: SNodeInstance, TotalTypeInstances: STotalInstances, }, }, ) totalInstances = totalInstances + STotalInstances } *response = append( *response, jsonListClusters{ ClusterName: SName, Aws: AWS{ Account: SAWSAccount, Region: SAWSRegion, Environment: SAWSEnvironment, }, K8SVersion: SK8sVersion, Instances: Instances{ TotalInstances: totalInstances, Description: description, }, }, ) } return response } // ListApps - List details apps in clusters func ListApps() JsonApps { var SClusterName string var SNamespace string var SAppName string var SAppType string var SHelmVersion string var SHelmChart string var SHelmAPPVersion string var SHpaEnabled bool var SVaultEnabled bool var response JsonApps db, err := sql.Open("mysql", UserDB+":"+PassDB+"@tcp("+HostDB+":"+PortDB+")/"+DatabaseDB+"?charset=utf8") checkErr(err) defer db.Close() rows, err := db.Query("SELECT clusters.nome, apps.namespace, apps.app, apps.type, IFNULL(helm.helm_version, \"\"), IFNULL(helm.chart, \"\"), IFNULL(helm.app_version, \"\"), apps.hpa_enabled, apps.vault_enabled FROM apps INNER JOIN clusters ON (apps.id_cluster=clusters.id_cluster) LEFT JOIN helm ON (apps.app=helm.app AND apps.namespace=helm.namespace AND apps.id_cluster=helm.id_cluster) ORDER BY clusters.nome,apps.namespace,apps.app") checkErr(err) for rows.Next() { err = rows.Scan(&SClusterName, &SNamespace, &SAppName, &SAppType, &SHelmVersion, &SHelmChart, &SHelmAPPVersion, &SHpaEnabled, &SVaultEnabled) checkErr(err) response = append( response, jsonApps{ ClusterName: SClusterName, Name: SAppName, Namespace: SNamespace, Type: SAppType, HpaEnabled: SHpaEnabled, VaultEnabled: SVaultEnabled, Helm: Helm{ Version: SHelmVersion, Chart: SHelmChart, APPVersion: SHelmAPPVersion, }, }, ) } return response } // ListAppsByClusters - List details apps by clusters func ListAppsByClusters() JsonAppsByClustersMap { var SClusterName string var SNamespace string var SAppName string var SAppType string var SHelmVersion string var SHelmChart string var SHelmAPPVersion string var SHpaEnabled bool var SVaultEnabled bool response := make(JsonAppsByClustersMap) db, err := sql.Open("mysql", UserDB+":"+PassDB+"@tcp("+HostDB+":"+PortDB+")/"+DatabaseDB+"?charset=utf8") checkErr(err) defer db.Close() rows, err := db.Query("SELECT clusters.nome, apps.namespace, apps.app, apps.type, IFNULL(helm.helm_version, \"\"), IFNULL(helm.chart, \"\"), IFNULL(helm.app_version, \"\"), apps.hpa_enabled, apps.vault_enabled FROM apps INNER JOIN clusters ON (apps.id_cluster=clusters.id_cluster) LEFT JOIN helm ON (apps.app=helm.app AND apps.namespace=helm.namespace AND apps.id_cluster=helm.id_cluster) ORDER BY apps.namespace,apps.app") checkErr(err) for rows.Next() { err = rows.Scan(&SClusterName, &SNamespace, &SAppName, &SAppType, &SHelmVersion, &SHelmChart, &SHelmAPPVersion, &SHpaEnabled, &SVaultEnabled) checkErr(err) response[SClusterName] = append( response[SClusterName], jsonAppsByClusters{ Name: SAppName, Namespace: SNamespace, Type: SAppType, HpaEnabled: SHpaEnabled, VaultEnabled: SVaultEnabled, Helm: Helm{ Version: SHelmVersion, Chart: SHelmChart, APPVersion: SHelmAPPVersion, }, }, ) } return response } func checkErr(err error) { if err != nil { panic(err) } }
package gcalc import ( . "github.com/Focinfi/gtester" "testing" ) func TestCalculator(t *testing.T) { result := Compute(" 1*(-1)* (5.00 + (-2)) * 3-(1.0+ 2)* 3 *(-1)") AssertEqual(t, result, float64(0)) }
package service import ( "context" "fmt" "io" pbAS "github.com/go-ocf/cloud/authorization/pb" pbCQRS "github.com/go-ocf/cloud/resource-aggregate/pb" pbDD "github.com/go-ocf/cloud/resource-directory/pb/device-directory" pbRD "github.com/go-ocf/cloud/resource-directory/pb/resource-directory" pbRS "github.com/go-ocf/cloud/resource-directory/pb/resource-shadow" "github.com/go-ocf/kit/log" kitNetGrpc "github.com/go-ocf/kit/net/grpc" grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) //RequestHandler for handling incoming request type RequestHandler struct { authClient pbAS.AuthorizationServiceClient projection *Projection } //NewRequestHandler factory for new RequestHandler func NewRequestHandler(authClient pbAS.AuthorizationServiceClient, projection *Projection) *RequestHandler { return &RequestHandler{ authClient: authClient, projection: projection, } } func logAndReturnError(err error) error { log.Errorf("%v", err) return err } func (r *RequestHandler) GetUsersDevices(ctx context.Context, authCtx *pbCQRS.AuthorizationContext, deviceIdsFilter []string) ([]string, error) { userIdsFilter := []string(nil) if authCtx.GetUserId() != "" { userIdsFilter = []string{authCtx.GetUserId()} } token, err := grpc_auth.AuthFromMD(ctx, "bearer") if err != nil { return nil, status.Errorf(codes.Unauthenticated, "cannot get users devices: %v", err) } getUserDevicesClient, err := r.authClient.GetUserDevices(kitNetGrpc.CtxWithToken(ctx, token), &pbAS.GetUserDevicesRequest{ UserIdsFilter: userIdsFilter, DeviceIdsFilter: deviceIdsFilter, }) if err != nil { return nil, status.Errorf(status.Convert(err).Code(), "cannot get users devices: %v", err) } userDevices := make([]string, 0, 32) for { userDevice, err := getUserDevicesClient.Recv() if err == io.EOF { break } if err != nil { return nil, status.Errorf(status.Convert(err).Code(), "cannot get users devices: %v", err) } if userDevice == nil { continue } userDevices = append(userDevices, userDevice.DeviceId) } return userDevices, nil } func (r *RequestHandler) RetrieveResourcesValues(req *pbRS.RetrieveResourcesValuesRequest, srv pbRS.ResourceShadow_RetrieveResourcesValuesServer) error { deviceIds, err := r.GetUsersDevices(srv.Context(), req.GetAuthorizationContext(), req.DeviceIdsFilter) if err != nil { return logAndReturnError(status.Errorf(status.Convert(err).Code(), "cannot retrieve resources values: %v", err)) } if len(deviceIds) == 0 { return logAndReturnError(status.Errorf(codes.NotFound, "cannot retrieve resources values: not found")) } rd := NewResourceShadow(r.projection, deviceIds) statusCode, err := rd.RetrieveResourcesValues(srv.Context(), req, func(resourceLink *pbRS.ResourceValue) error { err := srv.Send(resourceLink) if err != nil { return fmt.Errorf("cannot send resource value to client: %v", err) } return nil }) if err != nil { return logAndReturnError(status.Errorf(statusCode, "cannot retrieve resources values: %v", err)) } return nil } func (r *RequestHandler) GetResourceLinks(req *pbRD.GetResourceLinksRequest, srv pbRD.ResourceDirectory_GetResourceLinksServer) error { deviceIds, err := r.GetUsersDevices(srv.Context(), req.GetAuthorizationContext(), req.DeviceIdsFilter) if err != nil { return logAndReturnError(status.Errorf(status.Convert(err).Code(), "cannot retrieve resources values: %v", err)) } if len(deviceIds) == 0 { return logAndReturnError(status.Errorf(codes.NotFound, "cannot retrieve resources values: not found")) } rd := NewResourceDirectory(r.projection, deviceIds) code, err := rd.GetResourceLinks(srv.Context(), req, func(resourceLink *pbRD.ResourceLink) error { err := srv.Send(resourceLink) if err != nil { return fmt.Errorf("cannot send resource link to client: %v", err) } return nil }) if err != nil { return logAndReturnError(status.Errorf(code, "cannot get resource links: %v", err)) } return nil } func (r *RequestHandler) GetDevices(req *pbDD.GetDevicesRequest, srv pbDD.DeviceDirectory_GetDevicesServer) error { deviceIds, err := r.GetUsersDevices(srv.Context(), req.GetAuthorizationContext(), req.DeviceIdsFilter) if err != nil { return logAndReturnError(status.Errorf(status.Convert(err).Code(), "cannot get devices contents: %v", err)) } rd := NewDeviceDirectory(r.projection, deviceIds) code, err := rd.GetDevices(srv.Context(), req, func(device *pbDD.Device) error { err := srv.Send(device) if err != nil { return fmt.Errorf("cannot send device to client: %v", err) } return nil }) if err != nil { return logAndReturnError(status.Errorf(code, "cannot get devices contents: %v", err)) } return err }
package main import ( "encoding/json" "fmt" ) type Animal struct { Name string `json:"name"` } func (a *Animal) GetName() string { return a.Name } type Dog struct { *Animal Owner string `json:"owner"` } func main() { a := &Animal{Name: "this is an animal"} b, _ := json.Marshal(a) fmt.Println(string(b)) d := &Dog{a, "this is the owner name of this dog"} b, _ = json.Marshal(d) fmt.Println(string(b)) b, _ = json.Marshal(nil) fmt.Println(string(b)) fmt.Println(d.GetName()) }
package main import ( "fmt" ) func main() { type NamedParamsInit struct { _ struct{} name string age int } x := NamedParamsInit{name: "fan", age: 10} // failed // y := NamedParamsInit{"", "dong", 1} y := NamedParamsInit{struct{}{}, "dong", 1} fmt.Println(x, y) }
package cp import ( "io" "net/http" "os" "github.com/vbauerster/mpb" "github.com/vbauerster/mpb/decor" "golang.org/x/xerrors" ) type Source struct { r io.ReadCloser pb func() *mpb.Bar size int64 path string } func (s *Source) Read(p []byte) (n int, err error) { return s.r.Read(p) } func (s *Source) Close() error { return s.r.Close() } // Create Source struct from file func File(path string) (*Source, error) { fp, err := os.OpenFile(path, os.O_RDONLY, 0644) if err != nil { return nil, xerrors.Errorf("failed to open source file: %s\nerror-> %w", path, err) } fi, _ := fp.Stat() return &Source{r: fp, size: fi.Size(), path: path}, nil } // Create Source struct from http.get func HttpGet(url string) (*Source, error) { res, err := http.Get(url) return &Source{r: res.Body, size: res.ContentLength, path: url}, err } // enable func (s *Source) WithProgressBar(parent *mpb.Progress) { s.pb = func() *mpb.Bar { p := parent.AddBar(s.size, mpb.BarStyle("[=>-|"), mpb.PrependDecorators( decor.CountersKibiByte("% .2f / % .2f"), ), mpb.AppendDecorators( decor.EwmaETA(decor.ET_STYLE_GO, 90), decor.Name(" ] "), decor.EwmaSpeed(decor.UnitKiB, "% .2f", 60), )) s.r = p.ProxyReader(s.r) return p } }
package main import ( "flag" "fmt" "io" "io/ioutil" "log" "os" "os/exec" "path/filepath" "strings" "text/template" "github.com/pkg/errors" ) func main() { store := flag.String("store", "", "directory of the store") password := flag.String("password", "", "password used to decrypt") flag.Parse() err := execute(os.Stdin, os.Stdout, *store, *password) if err != nil { log.Fatalln(err) } } func execute(in io.Reader, out io.Writer, store, password string) error { resolve := func(filename string) string { return filepath.Join(store, filename) } secret := func(filename string) (string, error) { return sh("openssl aes-256-cbc -a -d -in %s -out - -pass pass:%s", resolve(filename), password) } funcMap := template.FuncMap{ "resolve": resolve, "sh": sh, "secret": secret, } bs, err := ioutil.ReadAll(in) if err != nil { return errors.Wrap(err, "could not read from input") } t, err := template.New("template").Funcs(funcMap).Parse(string(bs)) if err != nil { return errors.Wrap(err, "could not parse template file") } return errors.Wrap(t.Execute(out, nil), "could not execute") } func sh(cmdformat string, a ...interface{}) (string, error) { shCmd := fmt.Sprintf(cmdformat, a...) cmd := exec.Command("sh", "-c", shCmd) cmd.Stderr = os.Stderr stdout, err := cmd.Output() if err != nil { return "", errors.Wrapf(err, "could not run sh command: %s", shCmd) } return strings.TrimSpace(string(stdout)), nil }
package glman var ( //progTest *Program progTexFont *Program progTexFontEdge *Program progSimpleDraw *Program //progSimpleTex *Program //progColorDraw *Program ) // UseProgTexFont load and use the tex font program func UseProgTexFont(edge bool) (p *Program) { if edge { if progTexFontEdge == nil { progTexFontEdge = MustLoadProgram("texfont.vert", "texfont-edge.frag") } p = progTexFontEdge } else { if progTexFont == nil { progTexFont = MustLoadProgram("texfont.vert", "texfont.frag") } p = progTexFont } p.UseProgram() DbgCheckError() p.LoadMVPStack() p.LoadClip2DStack() return p } // UseProgSimpleDraw load and use the simple draw program func UseProgSimpleDraw() (p *Program) { if progSimpleDraw == nil { progSimpleDraw = MustLoadProgram("simple-draw.vert", "simple-draw.frag") } p = progSimpleDraw p.UseProgram() p.LoadMVPStack() p.LoadClip2DStack() return p }
package vkupload_tests import ( "strings" "testing" ) func TestSimpleSimple(t *testing.T) { uploadTest.Post("/upload"). SetHeader("Content-Disposition", `form-data; name="fieldname"; filename="filename.jpg"`). BodyString(simpleFileContent). Expect(t). Status(200). Type("json"). AssertFunc(assertUploadResultFields(simpleFileContent)). Done() } func TestSimpleAttachment(t *testing.T) { uploadTest.Post("/upload"). SetHeader("Content-Disposition", `attachment; name="fieldname"; filename="filename.jpg"`). BodyString(simpleFileContent). Expect(t). Status(200). Type("json"). AssertFunc(assertUploadResultFields(simpleFileContent)). Done() } func TestSimpleWithoutName(t *testing.T) { uploadTest.Post("/upload"). SetHeader("Content-Disposition", `form-data; filename="filename.jpg"`). BodyString(simpleFileContent). Expect(t). Status(200). Type("json"). AssertFunc(assertUploadResultFields(simpleFileContent)). Done() } func TestSimpleWithoutFilename(t *testing.T) { uploadTest.Post("/upload"). SetHeader("Content-Disposition", `attachment; name="fieldname";`). BodyString(simpleFileContent). Expect(t). Status(200). Type("json"). AssertFunc(assertUploadResultFields(simpleFileContent)). Done() } func TestSimpleWithoutNameAndFilename(t *testing.T) { uploadTest.Post("/upload"). SetHeader("Content-Disposition", `form-data;`). BodyString(simpleFileContent). Expect(t). Status(200). Type("json"). AssertFunc(assertUploadResultFields(simpleFileContent)). Done() } func TestSimpleWithoutNameAndFilename2(t *testing.T) { uploadTest.Post("/upload"). SetHeader("Content-Disposition", `form-data`). BodyString(simpleFileContent). Expect(t). Status(200). Type("json"). AssertFunc(assertUploadResultFields(simpleFileContent)). Done() } func TestSimpleBig(t *testing.T) { str := strings.Repeat("a", 1024*1024*1-1024) uploadTest.Post("/upload"). SetHeader("Content-Disposition", `form-data; name="fieldname"; filename="filename.jpg"`). BodyString(str). Expect(t). Status(200). Type("json"). AssertFunc(assertUploadResultFields(str)). Done() } func TestSimpleToBig(t *testing.T) { str := strings.Repeat("a", 1024*1024*2) uploadTest.Post("/upload"). SetHeader("Content-Disposition", `form-data; name="fieldname"; filename="filename.jpg"`). BodyString(str). Expect(t). Status(413). Done() }
/* progress.go WJ118 * written by Walter de Jong <walter@heiho.net> * This is free and unencumbered software released into the public domain. Please refer to http://unlicense.org/ */ package progress import ( "fmt" "strings" "time" ) const ( refresh = 250 * time.Millisecond spinnerText = "|/-\\" barWidth = 10 ) type Meter struct { Value, MaxValue int Label string Timestamp time.Time visible bool line string } type Bar struct { Meter Width int } type Spinner struct { Meter } type Percent struct { Meter } type Progresser interface { Show() Update(value int) Finish() } func (m *Meter) shouldRefresh() bool { // returns true if it's time to re-display the meter t := time.Now() elapsed := t.Sub(m.Timestamp) return elapsed >= refresh } func (m *Meter) render() { /* m.line = ... */ } func (m *Meter) Show() { m.Timestamp = time.Now() if m.visible { return } if m.Label != "" { fmt.Printf("%s ", m.Label) } m.visible = true } func (m *Meter) Update(value int) { /* m.Value = value if ! m.shouldRefresh() { return } line_copy := m.line m.render() if line_copy != m.line { print m.line } */ } func (m *Meter) Finish() { fmt.Println("") m.visible = false m.line = "" } func (b *Bar) render() { if b.Width <= 0 { b.Width = barWidth } one_unit := float32(b.Width) / float32(b.MaxValue) value := b.Value if value > b.MaxValue { value = b.MaxValue } units := int(float32(value)*one_unit + 0.5) b.line = "|" + strings.Repeat("=", units) + strings.Repeat(" ", b.Width-units) + "|" } func (b *Bar) Show() { b.Meter.Show() b.render() fmt.Printf("%s ", b.line) } func (b *Bar) Update(value int) { b.Value = value if !b.shouldRefresh() { return } line_copy := b.line b.render() if line_copy == b.line { return } erase := strings.Repeat("\b", b.Width+3) fmt.Printf("%s%s ", erase, b.line) } func (b *Bar) Finish() { // show 100% b.Value = b.MaxValue b.Update(b.Value) b.Meter.Finish() } func (s *Spinner) render() { s.Value++ if s.Value >= 4 { s.Value = 0 } s.line = fmt.Sprintf("%c", spinnerText[s.Value]) } func (s *Spinner) Show() { s.Meter.Show() s.render() fmt.Printf("%s ", s.line) } func (s *Spinner) Update(value int) { // s.Value = value if !s.shouldRefresh() { return } fmt.Printf("\b\b") s.render() fmt.Printf("%s ", s.line) } func (s *Spinner) Finish() { fmt.Printf("\b\b \b\b") s.Meter.Finish() } func (p *Percent) render() { one_percent := 100.0 / float32(p.MaxValue) value := p.Value if value > p.MaxValue { value = p.MaxValue } percent := int32(float32(value)*one_percent + 0.5) if percent > 100 { percent = 100 } p.line = fmt.Sprintf("%3d%%", percent) } func (p *Percent) Show() { p.Meter.Show() p.render() fmt.Printf("%s ", p.line) } func (p *Percent) Update(value int) { p.Value = value if !p.shouldRefresh() { return } line_copy := p.line p.render() if line_copy == p.line { return } fmt.Printf("\b\b\b\b\b%s ", p.line) } func (p *Percent) Finish() { // show 100% p.Value = p.MaxValue p.Update(p.Value) p.Meter.Finish() } // EOB
package main import ( "fmt" "strconv" "strings" "github.com/jnewmano/advent2020/input" "github.com/jnewmano/advent2020/output" ) type Rule struct { start int stop int } var ticketRules = make(map[string][]Rule) func main() { //input.SetRaw(raw) // var things = input.Load() // var things = input.LoadSliceSliceString("") var things = input.LoadSliceString("") // var list = make([]int) for i, v := range things { if v == "" { things = things[i:] break } processRule(v) } invalid := []int{} for _, v := range things[5:] { invalid = append(invalid, getInvalidNumbers(v)...) } fmt.Println("invalids", invalid) fmt.Println(output.Sum(invalid)) } func getInvalidNumbers(t string) []int { parts := strings.Split(t, ",") invalid := []int{} for idx, n := range parts { valid := false n, _ := strconv.Atoi(n) for _, rule := range ticketRules { if checkTicketField(t, idx, rule) { valid = true } } if valid == false { invalid = append(invalid, n) } } return invalid } func checkTicketField(t string, idx int, rule []Rule) bool { parts := strings.Split(t, ",") n, err := strconv.Atoi(parts[idx]) if err != nil { panic(err) } valid := false for _, v := range rule { if n >= v.start && n <= v.stop { valid = true } } return valid } func processRule(s string) { parts := strings.Split(s, ": ") name := parts[0] ranges := strings.Split(parts[1], " or ") rules := []Rule{} for _, v := range ranges { p := strings.Split(v, "-") start, err := strconv.Atoi(p[0]) if err != nil { panic(err) } end, err := strconv.Atoi(p[1]) if err != nil { panic(err) } rules = append(rules, Rule{start: start, stop: end}) } ticketRules[name] = rules } var raw = `class: 1-3 or 5-7 row: 6-11 or 33-44 seat: 13-40 or 45-50 your ticket: 7,1,14 nearby tickets: 7,3,47 40,4,50 55,2,20 38,6,12`
package services import ( "context" "sync" "github.com/golang/protobuf/ptypes/empty" "github.com/tppgit/we_service/core" ) var ( Server *WeService loadOnceServer sync.Once ) func NewServer() *WeService { loadOnceServer.Do(func() { Server = new(WeService) }) return Server } type WeService struct { UserServiceDefinition `inject:"user_grpc"` ServiceServiceDefinition `inject:"service_grpc"` OrderServiceDefinition `inject:"order_grpc"` ResidentServiceDefinition `inject:"resident_grpc"` CloudMessageServiceDefinition `inject:"cloud_message_grpc"` PaymentServiceDefinition `inject:"payment_grpc"` } func (WeService) Version(ctx context.Context, in *empty.Empty) (*core.AppVersion, error) { return &core.AppVersion{Value: "0.1"}, nil }
package pkg type GemMetadata struct { Name string `mapstructure:"name" json:"name"` Version string `mapstructure:"version" json:"version"` Files []string `mapstructure:"files" json:"files"` Authors []string `mapstructure:"authors" json:"authors"` Licenses []string `mapstructure:"licenses" json:"licenses"` Homepage string `mapstructure:"homepage" json:"homepage"` }
package client import ( "github.com/naruta/terraform-provider-kintone/kintone/raw_client" "reflect" "testing" ) func TestFieldPropertyMapper(t *testing.T) { testCases := []struct { title string property raw_client.FieldProperty shouldBeError bool }{ { title: "SINGLE_LINE_TEXT", property: raw_client.FieldProperty{ Type: "SINGLE_LINE_TEXT", Code: "text-1", Label: "🍣🍺", }, }, { title: "MULTI_LINE_TEXT", property: raw_client.FieldProperty{ Type: "MULTI_LINE_TEXT", Code: "text-2", Label: "🍣🍺", }, }, { title: "NUMBER", property: raw_client.FieldProperty{ Type: "NUMBER", Code: "number-1", Label: "🍣🍺", }, }, { title: "Unknown type", property: raw_client.FieldProperty{ Type: "ABCDEFG", Code: "xxx-1", Label: "🍣🍺", }, shouldBeError: true, }, } for _, tt := range testCases { t.Run(tt.title, func(t *testing.T) { mapper := fieldPropertyMapper{} f, err := mapper.PropertyToField(&tt.property) if tt.shouldBeError { if err == nil { t.Fatalf("expected: error, actual: no errors") } return } if err != nil { t.Fatalf("error: %+v", err) } property := mapper.FieldToProperty(f) if !reflect.DeepEqual(property, tt.property) { t.Fatalf("property != tt.property: property=%+v, tt.property=%+v", property, tt.property) } }) } }
package main import ( "flag" "bufio" "fmt" "gopkg.in/yaml.v2" "io/ioutil" "os" "strings" "regexp" ) var shortenKind *bool var findLowerCase = regexp.MustCompile("[a-z]*") func init() { shortenKind = flag.Bool("shorten", false, "Shorten the Kind used in the filename") flag.Parse() } func main() { reader := bufio.NewScanner(os.Stdin) reader.Split(splitOnDashes) for reader.Scan() { manifest := reader.Text() filename, _ := buildFilename(manifest) ioutil.WriteFile(filename, []byte(manifest), 0644) } } func buildFilename(manifest string) (string, error) { obj := manifestStub{} yaml.Unmarshal([]byte(manifest), &obj) kind := obj.Kind if *shortenKind { kind = findLowerCase.ReplaceAllString(kind, "") } return strings.ToLower(fmt.Sprintf("%s-%s.yaml", kind, obj.Metadata.Name)), nil } func splitOnDashes(data []byte, atEOF bool) (int, []byte, error) { if atEOF && len(data) == 0 { return 0, nil, nil } if i := strings.Index(string(data), "---"); i >= 0 { return i + 3, data[0:i], nil } if atEOF { return len(data), data, nil } return 0, nil, nil } type manifestStub struct { Kind string Metadata manifestMetadata } type manifestMetadata struct { Name string }
package dynamic func max(a, b int) int { if a > b { return a } return b } func min(a, b int) int { if a < b { return a } return b } func maxByte(a, b byte) byte { if a > b { return a } return b } func minByte(a, b byte) byte { if a < b { return a } return b } func minArray(a ...int) int { tmp := a[0] for i := 1; i < len(a); i ++ { tmp = min(tmp, a[i]) } return tmp } func maxArray(a ...int) int { tmp := a[0] for i := 1; i < len(a); i ++ { tmp = max(tmp, a[i]) } return tmp }
package main /* 给定一个正整数,输出它的补数。补数是对该数的二进制表示取反。 比如: 5 的二进制是: 101,则其补数为 2,对应于二进制就是010 */ // 解法1 (模拟获取补数的过程) func findComplement(num int) int { ans := 0 count := uint8(0) for num != 0 { ans = ans | (((num & 1) ^ 1) << count) num >>= 1 count++ } return ans } // 解法2 (获取掩码,将该掩码与原num异或) func findComplement(num int) int { mark := 0 tmp := num for tmp != 0 { tmp >>= 1 mark = (mark << 1) | 1 } return mark ^ num } /* 题目链接: https://leetcode-cn.com/problems/number-complement/submissions/ 数字的补数 */
//go:build e2e_test // +build e2e_test // Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package e2e import ( "context" "crypto/rand" "fmt" "net" "os" "strconv" "syscall" "testing" "time" "github.com/aws/amazon-vpc-cni-plugins/network/netns" "github.com/aws/amazon-vpc-cni-plugins/network/vpc" "github.com/containernetworking/cni/pkg/invoke" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vishvananda/netlink" ) const ( testENIName1 = "vpc-eni-test-1" // expected to exist on the host testENIName2 = "vpc-eni-test-2" // expected to exist on the host testENIName3 = "vpc-eni-test-3" // expected to exist on the host ifName = "eni-test-eth0" containerID = "contain-er" netConfFormat = ` { "type":"vpc-eni", "cniVersion":"1.0.0", "name":"eni-test", "eniName":"%s", "eniMACAddress":"%s", "eniIPAddresses":["%s", "%s"], "gatewayIPAddresses":["%s"], "useExistingNetwork":false, "blockInstanceMetadata":%s, "opState":true }` imdsEndpointIPv4 = "169.254.169.254/32" imdsEndpointIPv6 = "fd00:ec2::254/128" eniIPAddress1 = "166.0.0.2/16" eniIPAddress2 = "167.0.0.2/16" eniGatewayAddress = "166.0.0.1" ) type config struct { region string subnet string index int64 instanceID string securityGroups []string vpc string } // Tests Add and Del commands for vpc-eni plugin. func TestAddDel(t *testing.T) { testCases := []struct { name string testENIName string shouldPopulateENIName bool shouldBlockIMDS bool }{ {"without eni name", testENIName1, false, true}, {"with eni name", testENIName2, true, true}, {"allow imds", testENIName3, false, false}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { eniPluginPath := ensureCNIPluginExists(t) testLogDir := createTestLogsDir(t) defer cleanupLogsIfNeeded(t, testLogDir) logFileEnvVar := setLogFileEnvVar(t, testLogDir) defer os.Unsetenv(logFileEnvVar) logLevelEnvVar := setLogLevelEnvVar() defer os.Unsetenv(logLevelEnvVar) targetNS := createTestTargetNS(t) defer targetNS.Close() testENI := createTestInterface(t, tc.testENIName) defer deleteTestInterface(t, testENI.Attrs().HardwareAddr) testENILink, err := netlink.LinkByName(tc.testENIName) require.NoError(t, err, "test ENI not found: "+tc.testENIName) testENIMACAddress := testENILink.Attrs().HardwareAddr // Construct args to invoke the CNI plugin with execInvokeArgs := &invoke.Args{ Command: "ADD", ContainerID: containerID, NetNS: targetNS.GetPath(), IfName: ifName, Path: os.Getenv("CNI_PATH"), } netConfENIName := "" if tc.shouldPopulateENIName { netConfENIName = tc.testENIName } blockInstanceMetadata := "false" if tc.shouldBlockIMDS { blockInstanceMetadata = "true" } netConf := []byte(fmt.Sprintf(netConfFormat, netConfENIName, testENIMACAddress, eniIPAddress1, eniIPAddress2, eniGatewayAddress, blockInstanceMetadata)) t.Logf("Using config: %s", string(netConf)) // Invoke ADD command on the plugin err = invoke.ExecPluginWithoutResult(context.Background(), eniPluginPath, netConf, execInvokeArgs, nil) require.NoError(t, err) // Validate the target NetNS targetNS.Run(func() error { requireLinksCount(t, 2) // expecting lo and ENI requireInterface(t, ifName, testENIMACAddress) requireIPAddresses(t, testENIMACAddress, []string{eniIPAddress1, eniIPAddress2}) assertGatewayRoute(t, eniGatewayAddress) assertIMDSV4(t, tc.shouldBlockIMDS) assertIMDSV6(t, tc.shouldBlockIMDS) return nil }) // Invoke DEL command on the plugin execInvokeArgs.Command = "DEL" err = invoke.ExecPluginWithoutResult(context.Background(), eniPluginPath, netConf, execInvokeArgs, nil) require.NoError(t, err, "Unable to execute DEL command for vpc-eni plugin") // Validate the target NetNS targetNS.Run(func() error { // Validate that the ENI is no longer in the target netns _, err := netlink.LinkByName(ifName) assert.EqualError(t, err, "Link not found") return nil }) }) } } // Asserts that a gateway route exists and matces expected gateway address. func assertGatewayRoute(t *testing.T, expectedGatewayAddr string) { routes, err := netlink.RouteList(nil, netlink.FAMILY_V4) require.NoError(t, err, "Unable to list routes") var gatewayRoute netlink.Route for _, route := range routes { if route.Gw != nil && route.Dst == nil { gatewayRoute = route } } assert.Equal(t, gatewayRoute.Gw.String(), expectedGatewayAddr) } // Assertions for IMDS via IPv4 func assertIMDSV4(t *testing.T, shouldBeBlocked bool) { routes, err := netlink.RouteList(nil, netlink.FAMILY_V4) require.NoError(t, err, "Unable to list routes") var imdsRoute *netlink.Route for _, route := range routes { if route.Dst.String() == imdsEndpointIPv4 { imdsRoute = &route break } } if shouldBeBlocked { require.NotNil(t, imdsRoute, "IMDS v4 block route not found") assert.Equal(t, syscall.RTN_BLACKHOLE, imdsRoute.Type, "IMDS IPv4 route is not blocked") } else { assert.Nil(t, imdsRoute, "No route is expected for IMDS if it shouldn't be blocked") } } // Assertions for IMDS via IPv6 func assertIMDSV6(t *testing.T, shouldBeBlocked bool) { routes, err := netlink.RouteList(nil, netlink.FAMILY_V6) require.NoError(t, err, "Unable to list routes") var imdsRoute *netlink.Route for _, route := range routes { if route.Dst.String() == imdsEndpointIPv6 { imdsRoute = &route break } } if shouldBeBlocked { require.NotNil(t, imdsRoute, "IMDS v6 block route not found") assert.Equal(t, syscall.RTN_BLACKHOLE, imdsRoute.Type, "IMDS IPv6 route is not blocked") } else { assert.Nil(t, imdsRoute, "No route is expected for IMDS if it shouldn't be blocked") } } // Ensures that vpc-eni plugin executable is available. func ensureCNIPluginExists(t *testing.T) string { eniPluginPath, err := invoke.FindInPath("vpc-eni", []string{os.Getenv("CNI_PATH")}) require.NoError(t, err, "Unable to find eni plugin in path") return eniPluginPath } // Creates a temporary directory for storing plugin logs. func createTestLogsDir(t *testing.T) string { testLogDir, err := os.MkdirTemp("", "vpc-eni-cni-e2eTests-test-") err = os.Chmod(testLogDir, 0755) require.NoError(t, err, "Unable to create directory for storing test logs") return testLogDir } // Sets VPC_CNI_LOG_FILE environment variable to make plugin logs go to the // provided test log directory. func setLogFileEnvVar(t *testing.T, testLogDir string) string { varName := "VPC_CNI_LOG_FILE" os.Setenv(varName, fmt.Sprintf("%s/vpc-eni.log", testLogDir)) t.Logf("Using %s for test logs", testLogDir) return varName } // Sets VPC_CNI_LOG_LEVEL environment variable to debug so that debug logs are generated // by the plugin. func setLogLevelEnvVar() string { varName := "VPC_CNI_LOG_LEVEL" os.Setenv(varName, "debug") return varName } // Cleans up log files generated by the test unless ECS_PRESERVE_E2E_TEST_LOGS environment // variable is set to true. func cleanupLogsIfNeeded(t *testing.T, testLogDir string) { preserve, err := strconv.ParseBool(getEnvOrDefault("ECS_PRESERVE_E2E_TEST_LOGS", "false")) assert.NoError(t, err, "Unable to parse ECS_PRESERVE_E2E_TEST_LOGS env var") if !t.Failed() && !preserve { t.Logf("Removing test logs at %s", testLogDir) os.RemoveAll(testLogDir) } else { t.Logf("Preserving test logs at %s", testLogDir) } } // Creates a target netns for testing func createTestTargetNS(t *testing.T) netns.NetNS { targetNS, err := netns.NewNetNS(fmt.Sprintf("eni-test-ns-%d", time.Now().UnixMilli())) require.NoError(t, err, "Unable to create a target netns for testing") return targetNS } // getEnvOrDefault gets the value of an env var. It returns the fallback value // if the env var is not set func getEnvOrDefault(name string, fallback string) string { val := os.Getenv(name) if val == "" { return fallback } return val } // Requires that a given number of links are found in this netns. func requireLinksCount(t *testing.T, count int) { links, err := netlink.LinkList() require.NoError(t, err, "Unable to list devices in target network namespace") assert.Len(t, links, count, "Incorrect number of devices discovered in taget network namespace") } // Requires that an interface of a provided Name and MAC Address exists in this netns. func requireInterface(t *testing.T, ifName string, macAddress net.HardwareAddr) { eniLink, err := netlink.LinkByName(ifName) require.NoError(t, err, "ENI not found in target netns: "+ifName) require.Equal(t, macAddress.String(), eniLink.Attrs().HardwareAddr.String()) } // Requires that IP addresses of the interface with the provided MAC Address match the // provided expected IP addresses. func requireIPAddresses(t *testing.T, macAddress net.HardwareAddr, expectedAddrs []string) { interfaces, err := net.Interfaces() require.NoError(t, err, "Failed to get interfaces") iface := getInterfaceByMACAddress(macAddress, interfaces) addrs, err := iface.Addrs() require.NoError(t, err, "Failed to get addresses of interface: "+iface.Name) actualAddrs := []string{} for _, ip := range addrs { actualAddrs = append(actualAddrs, ip.String()) } for _, ip := range expectedAddrs { assert.Contains(t, actualAddrs, ip) } } // getInterfaceByMACAddress returns the interface with the specified MAC address. func getInterfaceByMACAddress(macAddress net.HardwareAddr, interfaces []net.Interface) *net.Interface { var chosenInterface *net.Interface // If there are multiple matches, pick the one with the shortest name. for i := 0; i < len(interfaces); i++ { iface := &interfaces[i] if vpc.CompareMACAddress(iface.HardwareAddr, macAddress) { if chosenInterface == nil || len(chosenInterface.Name) > len(iface.Name) { chosenInterface = iface } } } return chosenInterface } // Creates a test (dummy) network interface func createTestInterface(t *testing.T, linkName string) netlink.Link { t.Log("Creating test ENI", linkName) macAddr, err := generateMACAddress() require.NoError(t, err, "Failed to generate MAC Address for test ENI") la := netlink.NewLinkAttrs() la.Name = linkName la.HardwareAddr = macAddr err = netlink.LinkAdd(&netlink.Dummy{LinkAttrs: la}) require.NoError(t, err, "Failed to create test ENI") link, err := netlink.LinkByName(linkName) require.NoError(t, err, "Failed to find test ENI by name after creation") t.Log("Created test ENI", link) return link } // generateMACAddress generates a random MAC address. func generateMACAddress() (net.HardwareAddr, error) { buf := make([]byte, 6) var mac net.HardwareAddr _, err := rand.Read(buf) if err != nil { return mac, err } // Set locally administered addresses bit and reset multicast bit buf[0] = (buf[0] | 0x02) & 0xfe mac = append(mac, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]) return mac, nil } // Deletes test ENI func deleteTestInterface(t *testing.T, macAddress net.HardwareAddr) { // Find the interface by MAC address t.Log("Looking up test ENI with mac address", macAddress) interfaces, err := net.Interfaces() require.NoError(t, err, "Failed to get interfaces") iface := getInterfaceByMACAddress(macAddress, interfaces) require.NotNil(t, iface, fmt.Sprintf( "An interface with mac address %s was not found: %v", macAddress, interfaces)) t.Log("Found test ENI for deletion", iface.Name) // Delete the interface link, err := netlink.LinkByName(iface.Name) require.NoError(t, err, "Failed to find link to delete", iface.Name) err = netlink.LinkDel(link) require.NoError(t, err, "Failed to delete test ENI", iface.Name) t.Log("Deleted test ENI", link.Attrs().Name) }
package movie import ( "context" "fmt" "strconv" "strings" gomdb "github.com/eefret/go-imdb" "github.com/google/uuid" "github.com/ido50/sqlz" "github.com/jmoiron/sqlx" "github.com/labstack/gommon/log" "github.com/movieManagement/gen/models" "github.com/movieManagement/gen/restapi/operations/movie" ini "github.com/movieManagement/init" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( // MovieTable . . . MovieTable = "public.moviestbl as mv" ) // sorted by field alias. Same as movieReturnFields var movieReturnFields = []string{ "COALESCE(mv.createddate, '2019-01-01') as CreatedAt", "COALESCE(mv.title, '') as Title", "COALESCE(mv.rating, '') as Rating", "COALESCE(mv.releasedYear, '') as ReleasedYear", "COALESCE(mv.genres, '') as Genres", "COALESCE(mv.lastmodifieddate, '2019-01-01') as LastModifiedAt", "COALESCE(mv.sfid, '') as ID", } // Repository interface includes a list of supported repository operations type Repository interface { CreateMovie(ctx context.Context, params *movie.CreateMovieParams) (*models.Movie, error) SearchMovies(ctx context.Context, params *movie.SearchMoviesParams) ([]*models.Movie, int64, error) } type repository struct { db *sqlx.DB } // NewRepository creates a new repository from the specified DB reference func NewRepository(db *sqlx.DB) Repository { return &repository{ db: db, } } // GetDB returns a reference to the underlying database connection func (repo *repository) GetDB() *sqlx.DB { return repo.db } // CreateMovie create the affiliation.. func (repo *repository) CreateMovie(ctx context.Context, params *movie.CreateMovieParams) (*models.Movie, error) { logrus.Debugf("CreateMovie repo") var movies []*models.Movie sqlMovies := SQLMovies{} uuid := uuid.New().String() createMap := insertFields(params, repo) createMap["lastmodifieddate"] = sqlz.Indirect("now()::timestamp") createMap["createddate"] = sqlz.Indirect("now()::timestamp") createMap["sfid"] = uuid err := sqlz.Newx(repo.db). InsertInto(MovieTable). ValueMap(createMap). Returning(movieReturnFields...). GetRow(&sqlMovies) if err != nil { logrus.Errorf("error to create membership %v", err) //return nil, errors.Wrap(err, "CreateMembership.Exec") } var movie = sqlMovies.toMovie() movies = append(movies, movie) return movies[0], nil } func insertFields(params *movie.CreateMovieParams, repo *repository) map[string]interface{} { var genresDetails *string insertMap := make(map[string]interface{}) addIfNotEmpty(insertMap, "title", params.Movie.Title) addIfNotEmpty(insertMap, "releasedYear", params.Movie.ReleasedYear) addIfNotEmpty(insertMap, "rating", params.Movie.Rating) if len(params.Movie.Genres) > 0 { val := strings.Join(params.Movie.Genres[:], ",") genresDetails = &val } addIfNotNil(insertMap, "genres", genresDetails) return insertMap } // addIfNotEmpty simply adds the key/value pair if the key and value is not empty func addIfNotEmpty(m map[string]interface{}, key string, value string) { if key != "" && value != "" { m[key] = value } } /* // addIfNotNil simply adds the key/value pair if the key and value is not nil func addIfNotNilBool(m map[string]interface{}, key string, value *bool) { if key != "" && value != nil { m[key] = *value } } */ // addIfNotNil simply adds the key/value pair if the key and value is not nil func addIfNotNil(m map[string]interface{}, key string, value *string) { if key != "" && value != nil { m[key] = *value } } // SearchMovies returns a list of movies based on the input // parameters and security permissions func (repo *repository) SearchMovies(ctx context.Context, params *movie.SearchMoviesParams) ([]*models.Movie, int64, error) { log.Debugf("entered function ListCommunities") code := "SearchMovies" community, count, err := getMovies(ctx, params, repo, code) if err != nil { log.Error(err) return nil, 0, errors.Wrap(err, "ListCommunities.getCommunities") } return community, count, nil } func getMovies(ctx context.Context, params *movie.SearchMoviesParams, repo *repository, code string) ([]*models.Movie, int64, error) { pageSize, err := strconv.Atoi(*params.PageSize) if err != nil { log.Error(err) return nil, 0, errors.Wrap(err, fmt.Sprintf("%s.%s", code, "convertPageSize")) } offset, err := strconv.Atoi(*params.Offset) if err != nil { log.Error(err) return nil, 0, errors.Wrap(err, fmt.Sprintf("%s.%s", code, "convertOffset")) } var id, title, rating, year string if params.ID == nil { id = "%" } else { id = *params.ID } if params.Title == nil { title = "%" } else { title = *params.Title } if params.Rating == nil { rating = "%" } else { rating = *params.Rating } if params.Year == nil { year = "%" } else { year = *params.Year } var createdMovie *models.Movie var movieArray []*models.Movie sqlMovies := []SQLMovies{} conditions := []sqlz.WhereCondition{} if id != "%" { conditions = append(conditions, sqlz.Eq("mv.sfid", id)) } if title != "%" { conditions = append(conditions, sqlz.Eq("mv.title", title)) } if rating != "%" { conditions = append(conditions, sqlz.Eq("mv.rating", rating)) } if year != "%" { conditions = append(conditions, sqlz.Eq("mv.releasedYear", year)) } if len(params.Genres) != 0 { var genresList []interface{} for _, val := range params.Genres { genresList = append(genresList, val) } conditions = append(conditions, sqlz.In("mv.genres", genresList...)) } query := sqlz.Newx(repo.GetDB()). Select(movieReturnFields...). From(MovieTable). Where(conditions...). Limit(int64(pageSize)). Offset(int64(offset)) sql, b := query.ToSQL(true) log.Info(sql, b) count, errCount := query.GetCount() if errCount != nil { log.Error(err) return nil, 0, errors.Wrap(err, fmt.Sprintf("%s.%s", code, "GetCount")) } err = query.GetAll(&sqlMovies) if err != nil { log.Error(err) return nil, 0, errors.Wrap(err, fmt.Sprintf("%s.%s", code, "SelectQuery")) } for _, sqlMovie := range sqlMovies { var movieData = sqlMovie.toMovie() movieArray = append(movieArray, movieData) } if len(movieArray) == 0 { imdb := ini.GetImdbInit() movieObject, err := imdb.MovieByTitle(&gomdb.QueryData{Title: *params.Title}) if err != nil { log.Error(err) return nil, 0, errors.Wrap(err, fmt.Sprintf("%s.%s", code, "MovieByTitle")) } log.Debugf("movieObject %s", movieObject) if movieObject != nil { var in movie.CreateMovieParams in.Movie.Title = movieObject.Title in.Movie.Rating = movieObject.ImdbRating in.Movie.ReleasedYear = movieObject.Released in.Movie.Genres = []string{movieObject.Genre} createdMovie, err = repo.CreateMovie(ctx, &in) if err != nil { log.Error(err) return nil, 0, errors.Wrap(err, fmt.Sprintf("%s.%s", code, "CreateMovie")) } if createdMovie != nil { count = count + 1 } } } movieArray = append(movieArray, createdMovie) return movieArray, count, nil }
package main import ( "context" "flag" "net/http" "github.com/golang/glog" "github.com/grpc-ecosystem/grpc-gateway/runtime" "software/simple_test/pb_gen" ) func run() error { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() // Register gRPC server endpoint // Note: Make sure the gRPC server is running properly and accessible mux := runtime.NewServeMux() err := pb_gen.RegisterYourServiceHandlerServer(ctx, mux, newEchoServer()) if err != nil { return err } // Start HTTP server (and proxy calls to gRPC server endpoint) return http.ListenAndServe(":8081", mux) } func main() { flag.Parse() defer glog.Flush() if err := run(); err != nil { glog.Fatal(err) } }
package refImpl import ( "io/ioutil" "os" "path/filepath" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestInit(t *testing.T) { dir, err := ioutil.TempDir("", "gotesttmp") require.NoError(t, err) defer os.RemoveAll(dir) var cfg Config cfg = testSignerCerts(t, dir, cfg) type args struct { config Config } tests := []struct { name string args args wantServer bool wantErr bool }{ { name: "invalid", wantErr: true, }, { name: "valid", args: args{ config: Config{ SignerCertificate: cfg.SignerCertificate, SignerPrivateKey: cfg.SignerPrivateKey, SignerValidDuration: time.Hour * 10, Addr: ":1234", }, }, wantServer: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := Init(tt.args.config) if tt.wantErr { assert.Error(t, err) } else { assert.NoError(t, err) } if tt.wantServer { assert.NotEmpty(t, got) } else { assert.Empty(t, got) } }) } } func testSignerCerts(t *testing.T, dir string, cfg Config) Config { crt := filepath.Join(dir, "cert.crt") if err := ioutil.WriteFile(crt, IdentityIntermediateCA, 0600); err != nil { assert.NoError(t, err) } crtKey := filepath.Join(dir, "cert.key") if err := ioutil.WriteFile(crtKey, IdentityIntermediateCAKey, 0600); err != nil { assert.NoError(t, err) } cfg.SignerCertificate = crt cfg.SignerPrivateKey = crtKey return cfg } var ( IdentityIntermediateCA = []byte(`-----BEGIN CERTIFICATE----- MIIBczCCARmgAwIBAgIRANntjEpzu9krzL0EG6fcqqgwCgYIKoZIzj0EAwIwETEP MA0GA1UEAxMGUm9vdENBMCAXDTE5MDcxOTIwMzczOVoYDzIxMTkwNjI1MjAzNzM5 WjAZMRcwFQYDVQQDEw5JbnRlcm1lZGlhdGVDQTBZMBMGByqGSM49AgEGCCqGSM49 AwEHA0IABKw1/6WHFcWtw67hH5DzoZvHgA0suC6IYLKms4IP/pds9wU320eDaENo 5860TOyKrGn7vW/cj/OVe2Dzr4KSFVijSDBGMA4GA1UdDwEB/wQEAwIBBjATBgNV HSUEDDAKBggrBgEFBQcDATASBgNVHRMBAf8ECDAGAQH/AgEAMAsGA1UdEQQEMAKC ADAKBggqhkjOPQQDAgNIADBFAiEAgPtnYpgwxmPhN0Mo8VX582RORnhcdSHMzFjh P/li1WwCIFVVWBOrfBnTt7A6UfjP3ljAyHrJERlMauQR+tkD/aqm -----END CERTIFICATE----- `) IdentityIntermediateCAKey = []byte(`-----BEGIN EC PRIVATE KEY----- MHcCAQEEIPF4DPvFeiRL1G0ROd6MosoUGnvIG/2YxH0CbHwnLKxqoAoGCCqGSM49 AwEHoUQDQgAErDX/pYcVxa3DruEfkPOhm8eADSy4Lohgsqazgg/+l2z3BTfbR4No Q2jnzrRM7Iqsafu9b9yP85V7YPOvgpIVWA== -----END EC PRIVATE KEY----- `) )
package controller import ( "encoding/json" "errors" "fmt" "github.com/allentom/youcomic-api/auth" appconfig "github.com/allentom/youcomic-api/config" ApiError "github.com/allentom/youcomic-api/error" ApplicationError "github.com/allentom/youcomic-api/error" "github.com/allentom/youcomic-api/model" "github.com/allentom/youcomic-api/permission" "github.com/allentom/youcomic-api/serializer" "github.com/allentom/youcomic-api/services" "github.com/allentom/youcomic-api/utils" "github.com/allentom/youcomic-api/validate" "github.com/gin-gonic/gin" "github.com/jinzhu/copier" "github.com/jinzhu/gorm" "github.com/sirupsen/logrus" "mime/multipart" "net/http" "path" "path/filepath" "regexp" "strconv" ) type CreateBookRequestBody struct { Name string `form:"name" json:"name" xml:"name" binding:"required"` Library int `form:"library" json:"library" xml:"library"` } // create book handler // // path: /books // // method: post var CreateBookHandler gin.HandlerFunc = func(context *gin.Context) { var requestBody CreateBookRequestBody err := DecodeJsonBody(context, &requestBody) if err != nil { return } claims, err := auth.ParseAuthHeader(context) if err != nil { ApiError.RaiseApiError(context, ApplicationError.UserAuthFailError, nil) return } if hasPermission := permission.CheckPermissionAndServerError(context, &permission.StandardPermissionChecker{PermissionName: permission.CreateBookPermissionName, UserId: claims.UserId}, ); !hasPermission { return } if isValidate := validate.RunValidatorsAndRaiseApiError(context, &validate.UniqBookNameValidator{Value: requestBody.Name}, ); !isValidate { return } err, book := services.CreateBook(requestBody.Name, uint(requestBody.Library)) if err != nil { ApiError.RaiseApiError(context, err, nil) return } //serializer response template := serializer.BaseBookTemplate{} RenderTemplate(context, &template, *book) context.JSON(http.StatusCreated, template) } type UpdateBookRequestBody struct { Id int Name string `form:"name" json:"name" xml:"name" binding:"required"` } // update book handler // // path: /book/:id // // method: patch var UpdateBookHandler gin.HandlerFunc = func(context *gin.Context) { id, err := GetLookUpId(context, "id") if err != nil { ApiError.RaiseApiError(context, ApiError.RequestPathError, nil) return } claims, err := auth.ParseAuthHeader(context) if err != nil { ApiError.RaiseApiError(context, ApiError.UserAuthFailError, nil) return } //check permission if hasPermission := permission.CheckPermissionAndServerError(context, &permission.StandardPermissionChecker{PermissionName: permission.UpdateBookPermissionName, UserId: claims.UserId}, ); !hasPermission { return } requestBody := UpdateBookRequestBody{} err = DecodeJsonBody(context, &requestBody) if err != nil { return } //validate if isValidate := validate.RunValidatorsAndRaiseApiError(context, &validate.StringLengthValidator{Value: requestBody.Name, LessThan: 256, GreaterThan: 0, FieldName: "BookName"}, ); !isValidate { return } book := &model.Book{} err = AssignUpdateModel(&requestBody, book) if err != nil { ApiError.RaiseApiError(context, err, nil) return } book.ID = uint(id) err = services.UpdateBook(book, "Name") if err != nil { ApiError.RaiseApiError(context, err, nil) return } err = services.GetBook(book) if err != nil { ApiError.RaiseApiError(context, err, nil) return } template := &serializer.BaseBookTemplate{} RenderTemplate(context, template, *book) context.JSON(http.StatusOK, template) } // get book list handler // // path: /books // // method: get var BookListHandler gin.HandlerFunc = func(context *gin.Context) { //get page pagination := DefaultPagination{} pagination.Read(context) //get filter var books []model.Book queryBuilder := services.BooksQueryBuilder{} queryBuilder.SetPageFilter(pagination.Page, pagination.PageSize) filterMapping := []FilterMapping{ { Lookup: "id", Method: "InId", Many: true, }, { Lookup: "name", Method: "SetNameFilter", Many: true, }, { Lookup: "order", Method: "SetOrderFilter", Many: false, }, { Lookup: "collection", Method: "SetCollectionQueryFilter", Many: true, }, { Lookup: "tag", Method: "SetTagQueryFilter", Many: true, }, { Lookup: "startTime", Method: "SetStartTimeQueryFilter", Many: false, }, { Lookup: "endTime", Method: "SetEndTimeQueryFilter", Many: false, }, { Lookup: "nameSearch", Method: "SetNameSearchQueryFilter", Many: false, }, { Lookup: "library", Method: "SetLibraryQueryFilter", Many: true, }, } for _, filter := range filterMapping { utils.FilterByParam(context, filter.Lookup, &queryBuilder, filter.Method, filter.Many) } count, err := queryBuilder.ReadModels(&books) if err != nil { ApiError.RaiseApiError(context, err, nil) return } with := context.GetStringSlice("with") result := serializer.SerializeMultipleTemplate(books, &serializer.BaseBookTemplate{}, map[string]interface{}{"with": with}) responseBody := serializer.DefaultListContainer{} responseBody.SerializeList(result, map[string]interface{}{ "page": pagination.Page, "pageSize": pagination.PageSize, "count": count, "url": context.Request.URL, }) context.JSON(http.StatusOK, responseBody) } // delete book handler // // path: /book/:id // // method: delete var DeleteBookHandler gin.HandlerFunc = func(context *gin.Context) { id, err := GetLookUpId(context, "id") if err != nil { ApiError.RaiseApiError(context, ApiError.RequestPathError, nil) return } claims, err := auth.ParseAuthHeader(context) if err != nil { ApiError.RaiseApiError(context, ApiError.UserAuthFailError, nil) return } //check permission if hasPermission := permission.CheckPermissionAndServerError(context, &permission.StandardPermissionChecker{PermissionName: permission.DeleteBookPermissionName, UserId: claims.UserId}, ); !hasPermission { return } //permanently delete permission check permanently := context.Query("permanently") == "true" if permanently { if hasPermission := permission.CheckPermissionAndServerError(context, &permission.StandardPermissionChecker{PermissionName: permission.PermanentlyDeleteBookPermissionName, UserId: claims.UserId}, ); !hasPermission { return } } book := &model.Book{} book.ID = uint(id) err = services.DeleteById(&book) if err != nil { ApiError.RaiseApiError(context, err, nil) return } if permanently { err = services.DeleteBookFile(uint(id)) if err != nil { ApiError.RaiseApiError(context, err, nil) return } } ServerSuccessResponse(context) } type BatchRequestBody struct { Create []*CreateBookRequestBody `json:"create"` Update []*UpdateBookRequestBody `json:"update"` Delete []int `json:"delete"` } // books action handler // // path: /books/batch // // method: post var BookBatchHandler gin.HandlerFunc = func(context *gin.Context) { requestBody := BatchRequestBody{} err := DecodeJsonBody(context, &requestBody) if err != nil { return } //create action claims, err := auth.ParseAuthHeader(context) if err != nil { ApiError.RaiseApiError(context, ApiError.UserAuthFailError, nil) return } if hasPermission := permission.CheckPermissionAndServerError(context, &permission.StandardPermissionChecker{PermissionName: permission.CreateBookPermissionName, UserId: claims.UserId}, ); !hasPermission { return } booksToCreate := make([]model.Book, 0) for _, requestBook := range requestBody.Create { book := model.Book{} err = copier.Copy(&book, &requestBook) if err != nil { ApiError.RaiseApiError(context, err, nil) return } booksToCreate = append(booksToCreate, book) } err = services.CreateBooks(booksToCreate) if err != nil { ApiError.RaiseApiError(context, err, nil) return } //update if hasPermission := permission.CheckPermissionAndServerError(context, &permission.StandardPermissionChecker{PermissionName: permission.UpdateBookPermissionName, UserId: claims.UserId}, ); !hasPermission { return } booksToUpdate := make([]model.Book, 0) for _, updateBook := range requestBody.Update { book := model.Book{} err = AssignUpdateModel(&updateBook, &book) book.ID = uint(updateBook.Id) if err != nil { ApiError.RaiseApiError(context, err, nil) return } booksToUpdate = append(booksToUpdate, book) } err = services.UpdateBooks(booksToUpdate, "Name") if err != nil { ApiError.RaiseApiError(context, err, nil) return } //delete if hasPermission := permission.CheckPermissionAndServerError(context, &permission.StandardPermissionChecker{PermissionName: permission.DeleteBookPermissionName, UserId: claims.UserId}, ); !hasPermission { return } err = services.DeleteBooks(requestBody.Delete...) if err != nil { ApiError.RaiseApiError(context, err, nil) return } ServerSuccessResponse(context) } type AddTagToBookRequestBody struct { Tags []int `json:"tags"` } var BookTagBatch gin.HandlerFunc = func(context *gin.Context) { var err error id, err := GetLookUpId(context, "id") if err != nil { ApiError.RaiseApiError(context, err, nil) return } requestBody := AddTagToBookRequestBody{} err = context.ShouldBindJSON(&requestBody) if err != nil { ApiError.RaiseApiError(context, err, nil) return } err = services.AddTagToBook(id, requestBody.Tags...) if err != nil { ApiError.RaiseApiError(context, err, nil) return } ServerSuccessResponse(context) } func SaveCover(context *gin.Context, book model.Book, file *multipart.FileHeader) (error, string) { err, storePath := services.GetBookPath(book.Path, book.LibraryId) if err != nil { return err, "" } fileExt := filepath.Ext(file.Filename) coverImageFilePath := filepath.Join(storePath, fmt.Sprintf("cover%s", fileExt)) err = context.SaveUploadedFile(file, coverImageFilePath) if err != nil { return err, "" } return nil,coverImageFilePath } var AddBookCover gin.HandlerFunc = func(context *gin.Context) { var err error id, err := GetLookUpId(context, "id") if err != nil { ApiError.RaiseApiError(context, err, nil) return } form, err := context.MultipartForm() if form == nil { ApiError.RaiseApiError(context, errors.New("form not found"), nil) return } if _, isFileExistInForm := form.File["image"]; !isFileExistInForm { ApiError.RaiseApiError(context, errors.New("no such file in form"), nil) return } //update database book := model.Book{Model: gorm.Model{ID: uint(id)}} err = services.GetBook(&book) if err != nil { ApiError.RaiseApiError(context, err, nil) return } //get file from form fileHeader := form.File["image"][0] //save cover and generate thumbnail err, coverImageFilePath := SaveCover(context, book, fileHeader) if err != nil { ApiError.RaiseApiError(context, err, nil) return } coverThumbnailStorePath := filepath.Join(appconfig.Config.Store.Root, "generate", fmt.Sprintf("%d", book.ID)) _,err = services.GenerateCoverThumbnail(coverImageFilePath,coverThumbnailStorePath) // update cover book.Cover = filepath.Base(coverImageFilePath) err = services.UpdateModel(&book, "Cover") if err != nil { ApiError.RaiseApiError(context, err, nil) return } // render response template := &serializer.BaseBookTemplate{} RenderTemplate(context, template, book) context.JSON(http.StatusOK, template) } var AddBookPages gin.HandlerFunc = func(context *gin.Context) { var err error id, err := GetLookUpId(context, "id") if err != nil { ApiError.RaiseApiError(context, err, nil) return } form, err := context.MultipartForm() if form == nil { context.JSON(http.StatusOK, "template") return } re, err := regexp.Compile(`^page_(\d+)$`) if err != nil { ApiError.RaiseApiError(context, err, nil) return } book,err := services.GetBookById(uint(id)) if err != nil { ApiError.RaiseApiError(context, err, nil) return } err, storePath := services.GetBookPath(book.Path,book.LibraryId) if err != nil { ApiError.RaiseApiError(context, err, nil) return } createPages := make([]model.Page, 0) for fileField, file := range form.File { if re.MatchString(fileField) { matchGroups := re.FindAllStringSubmatch(fileField, 1) if len(matchGroups) > 0 && len(matchGroups[0]) > 1 { orderStr := matchGroups[0][1] order, err := strconv.Atoi(orderStr) if err != nil { ApiError.RaiseApiError(context, err, nil) return } //store storeFileHeader := file[0] fileExt := path.Ext(storeFileHeader.Filename) storeFileName := fmt.Sprintf("page_%d%s", order, fileExt) err = context.SaveUploadedFile(storeFileHeader, fmt.Sprintf("%s/%s", storePath, storeFileName)) if err != nil { ApiError.RaiseApiError(context, err, nil) return } page := &model.Page{Path: storeFileName, Order: order, BookId: id} err = services.CreateModel(page) if err != nil { ApiError.RaiseApiError(context, err, nil) return } createPages = append(createPages, *page) } } } result := serializer.SerializeMultipleTemplate(createPages, &serializer.BasePageTemplate{}, nil) responseBody := serializer.DefaultListContainer{} responseBody.SerializeList(result, map[string]interface{}{ "page": 1, "pageSize": len(createPages), "count": len(createPages), "url": context.Request.URL, }) context.JSON(http.StatusOK, responseBody) } var GetBookTags gin.HandlerFunc = func(context *gin.Context) { var err error id, err := GetLookUpId(context, "id") if err != nil { ApiError.RaiseApiError(context, err, nil) return } tags, err := services.GetBookTag(uint(id)) if err != nil { ApiError.RaiseApiError(context, err, nil) return } result := serializer.SerializeMultipleTemplate(tags, &serializer.BaseTagTemplate{}, nil) responseBody := serializer.DefaultListContainer{} responseBody.SerializeList(result, map[string]interface{}{ "page": 1, "pageSize": len(tags), "count": len(tags), "url": context.Request.URL, }) context.JSON(http.StatusOK, responseBody) } var DeleteBookTag gin.HandlerFunc = func(context *gin.Context) { id, err := GetLookUpId(context, "id") if err != nil { ApiError.RaiseApiError(context, err, nil) return } tagId, err := GetLookUpId(context, "tag") if err != nil { ApiError.RaiseApiError(context, err, nil) return } err = services.RemoveTagFromBook(uint(id), uint(tagId)) if err != nil { ApiError.RaiseApiError(context, err, nil) return } ServerSuccessResponse(context) } type UploadBookRequestBody struct { Name string `form:"name"` Library string `form:"library"` Tags string `form:"tags"` Pages string `form:"pages"` Cover string `form:"cover"` } var CreateBook gin.HandlerFunc = func(context *gin.Context) { var requestBody UploadBookRequestBody err := context.ShouldBind(&requestBody) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } libraryId, err := strconv.Atoi(requestBody.Library) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } err, book := services.CreateBook(requestBody.Name, uint(libraryId)) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } tagToAdd := make([]*model.Tag, 0) err = json.Unmarshal([]byte(requestBody.Tags), &tagToAdd) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } err = services.AddOrCreateTagToBook(book, tagToAdd) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } //handle with pages form, _ := context.MultipartForm() files := form.File["image"] pageFilenames := make([]string, 0) err = json.Unmarshal([]byte(requestBody.Pages), &pageFilenames) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } for _, pageFilename := range pageFilenames { for pageIdx, file := range files { if pageFilename == file.Filename { storePath, err := SavePageFile(context, file, int(book.ID), pageIdx) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } err = services.CreatePage(&model.Page{Order: pageIdx, Path: filepath.Base(storePath), BookId: int(book.ID)}) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } } } } for _, file := range files { if file.Filename == requestBody.Cover { //save cover err, coverPath:= SaveCover(context, *book, file) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } book.Cover = filepath.Base(coverPath) err = services.UpdateBook(book, "Cover") if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } } } template := &serializer.BaseBookTemplate{} RenderTemplate(context, template, *book) context.JSON(http.StatusOK, template) } var GetBook gin.HandlerFunc = func(context *gin.Context) { var err error id, err := GetLookUpId(context, "id") if err != nil { ApiError.RaiseApiError(context, ApiError.RequestPathError, nil) return } book := &model.Book{Model: gorm.Model{ID: uint(id)}} err = services.GetBook(book) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } // add query history if context.Query("history") == "True" { userClaimsInterface, exist := context.Get("claim") if exist { claims := userClaimsInterface.(*auth.UserClaims) err = services.AddBookHistory(claims.UserId, uint(id)) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } } } template := &serializer.BaseBookTemplate{} RenderTemplate(context, template, *book) context.JSON(http.StatusOK, template) } type ImportLibraryRequestBody struct { LibraryPath string `form:"library_path" json:"library_path" xml:"library_path" binding:"required"` } var ImportLibraryHandler gin.HandlerFunc = func(context *gin.Context) { var requestBody ImportLibraryRequestBody err := DecodeJsonBody(context, &requestBody) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } err = services.ImportLibrary(requestBody.LibraryPath) if err != nil { logrus.Error(err) ApiError.RaiseApiError(context, err, nil) return } ServerSuccessResponse(context) }
package main import ( "fmt" "time" ) func checkChannel(c chan bool) { select { case value := <-c: fmt.Println("received", value) return default: fmt.Println("waiting...") } } func main() { c := make(chan bool, 10) for i := 1; i <= 5; i++ { fmt.Println(i) time.Sleep(time.Second) go checkChannel(c) if i == 3 { fmt.Println("sending to channel...") c <- true } } }
package cli import ( "crypto/sha256" "encoding/base64" "encoding/hex" "fmt" rpcclient "github.com/tendermint/tendermint/rpc/client" "strings" "testing" ) func TestTxSearch(t *testing.T) { txStr := "nALZHnawCn4qlySsCjsKFDvFEHUEs4I/M1dfXMv4UZ0X4TtOEiMKCWlyaXMtYXR0bxIWMjA0MDkyODExMDAwMDAwMDAwMDAwMBI7ChR2IdpSv6fMUpP8LXkvm5zUrM7hgRIjCglpcmlzLWF0dG8SFjIwNDA5MjgxMTAwMDAwMDAwMDAwMDASJAofCglpcmlzLWF0dG8SEjEwMDAwMDAwMDAwMDAwMDAwMBCQThpwCibrWumHIQIANLPdNHAbKaKDVPNPhHSiH4jYKLHBio1R9VmYmWq0oBJAh2F4M4o512LL1ktxrYT/3bzgTQpJDaMtHojOsNTw3ygFo4R3joSH7NxGXxXD+m1brNZnqk9DYFvYEFsG5hPYphjAPiCDDw==" txBz, _ := base64.StdEncoding.DecodeString(txStr) txHashBz := sha256.Sum256(txBz) txHash := strings.ToUpper(hex.EncodeToString(txHashBz[:])) fmt.Println(txHash) rpc := rpcclient.NewHTTP("tcp://irisnet-rpc.rainbow.one:26657", "/websocket") rpc.Start() defer rpc.Stop() tx, err := rpc.Tx(txHashBz[:], false) if err != nil { fmt.Println(err.Error()) } fmt.Println(fmt.Sprintf("%v", tx)) }
package main import ( "flag" "github.com/zero-boilerplate/go-api-helpers/service" "log" ) var ( serviceName = flag.String("name", "", "Name of this script-wrapper service") ) func main() { defer func() { if r := recover(); r != nil { log.Fatalf("Service ERROR: %s", getStringFromRecovery(r)) } }() a := &app{} args := []string{} if len(*service.ServiceFlag) > 0 { args = flag.Args() if *serviceName == "" { panic("The service name is required") } if *service.ServiceFlag == "install" { if len(args) == 0 { panic("The list of arguments cannot be empty") } } } combinedArgs := []string{ "-name", *serviceName, } combinedArgs = append(combinedArgs, args...) service.NewServiceRunnerBuilder(*serviceName, a). WithAdditionalArguments(combinedArgs...). WithOnStopHandler(a). WithServiceUserName_AsCurrentUser(). Run() }
package main import "fmt" func sendCh(ch chan<- int) { ch <- 4 } func recvCh(ch <-chan int) { n := <-ch fmt.Println("读取到:", n) } func main() { ch := make(chan int) //双向 //var sendCh chan <- int=ch //sendCh<-9 //单向channel不能读取 err /*var recvCh <- chan int=ch fmt.Println(<-recvCh) //单向channel 不能写入*/ go func() { sendCh(ch) }() recvCh(ch) }
// Copyright 2016 IBM Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "io/ioutil" "os" "strings" "time" "fmt" "encoding/json" "github.com/Sirupsen/logrus" "github.com/amalgam8/registry/client" "github.com/amalgam8/sidecar/config" "github.com/amalgam8/sidecar/register" "github.com/amalgam8/sidecar/router/checker" "github.com/amalgam8/sidecar/router/clients" "github.com/amalgam8/sidecar/router/nginx" "github.com/amalgam8/sidecar/supervisor" "github.com/codegangsta/cli" ) func main() { // Initial logging until we parse the user provided log_level arg logrus.SetLevel(logrus.DebugLevel) logrus.SetOutput(os.Stderr) app := cli.NewApp() app.Name = "sidecar" app.Usage = "Amalgam8 Sidecar" app.Version = "0.1" app.Flags = config.TenantFlags app.Action = sidecarCommand err := app.Run(os.Args) if err != nil { logrus.WithError(err).Error("Failure running main") } } func sidecarCommand(context *cli.Context) { conf := config.New(context) if err := sidecarMain(*conf); err != nil { logrus.WithError(err).Error("Setup failed") } } func sidecarMain(conf config.Config) error { var err error logrus.SetLevel(conf.LogLevel) if err = conf.Validate(false); err != nil { logrus.WithError(err).Error("Validation of config failed") return err } if conf.Log { //Replace the LOGSTASH_REPLACEME string in filebeat.yml with //the value provided by the user //TODO: Make this configurable filebeatConf := "/etc/filebeat/filebeat.yml" filebeat, err := ioutil.ReadFile(filebeatConf) if err != nil { logrus.WithError(err).Error("Could not read filebeat conf") return err } fileContents := strings.Replace(string(filebeat), "LOGSTASH_REPLACEME", conf.LogstashServer, -1) err = ioutil.WriteFile("/tmp/filebeat.yml", []byte(fileContents), 0) if err != nil { logrus.WithError(err).Error("Could not write filebeat conf") return err } // TODO: Log failure? go supervisor.DoLogManagement("/tmp/filebeat.yml") } if conf.Proxy { if err = startProxy(&conf); err != nil { logrus.WithError(err).Error("Could not start proxy") } } if conf.Register { if err = conf.Validate(true); err != nil { logrus.WithError(err).Error("Validation of config failed") return err } logrus.Info("Registering") registryClient, err := client.New(client.Config{ URL: conf.Registry.URL, AuthToken: conf.Registry.Token, }) if err != nil { logrus.WithError(err).Error("Could not create registry client") return err } address := fmt.Sprintf("%v:%v", conf.EndpointHost, conf.EndpointPort) serviceInstance := &client.ServiceInstance{ ServiceName: conf.ServiceName, Endpoint: client.ServiceEndpoint{ Type: conf.EndpointType, Value: address, }, TTL: 60, } if conf.ServiceVersion != "" { data, err := json.Marshal(map[string]string{"version": conf.ServiceVersion}) if err == nil { serviceInstance.Metadata = data } else { logrus.WithError(err).Warn("Could not marshal service version metadata") } } agent, err := register.NewRegistrationAgent(register.RegistrationConfig{ Client: registryClient, ServiceInstance: serviceInstance, }) if err != nil { logrus.WithError(err).Error("Could not create registry agent") return err } agent.Start() } if conf.Supervise { supervisor.DoAppSupervision(conf.AppArgs) } else { select {} } return nil } func startProxy(conf *config.Config) error { var err error configBytes, err := ioutil.ReadFile("/etc/nginx/amalgam8.conf") if err != nil { logrus.WithError(err).Error("Missing /etc/nginx/amalgam8.conf") return err } configStr := string(configBytes) configStr = strings.Replace(configStr, "__SERVICE_NAME__", conf.ServiceName, -1) output, err := os.OpenFile("/etc/nginx/amalgam8.conf", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { logrus.WithError(err).Error("Couldn't open /etc/nginx/amalgam8.conf file for editing") return err } // Write the config fmt.Fprintf(output, configStr) output.Close() rc := clients.NewController(conf) nc := clients.NewNGINXClient("http://localhost:5813") nginx, err := nginx.NewNginx( nginx.Conf{ ServiceName: conf.ServiceName, Service: nginx.NewService(), Config: nginx.NewConfig(), Path: "/usr/bin/nginx.conf.tmpl", NGINXClient: nc, }, ) if err != nil { logrus.WithError(err).Error("Failed to initialize NGINX object") return err } err = checkIn(rc, conf) if err != nil { logrus.WithError(err).Error("Check in failed") return err } // for Kafka enabled tenants we should do both polling and listening if len(conf.Kafka.Brokers) != 0 { go func() { time.Sleep(time.Second * 10) logrus.Info("Attempting to connect to Kafka") var consumer checker.Consumer for { consumer, err = checker.NewConsumer(checker.ConsumerConfig{ Brokers: conf.Kafka.Brokers, Username: conf.Kafka.Username, Password: conf.Kafka.Password, ClientID: conf.Kafka.APIKey, Topic: "A8_NewRules", SASLEnabled: conf.Kafka.SASL, }) if err != nil { logrus.WithError(err).Error("Could not connect to Kafka, trying again . . .") time.Sleep(time.Second * 5) // TODO: exponential falloff? } else { break } } logrus.Info("Successfully connected to Kafka") listener := checker.NewListener(conf, consumer, rc, nginx) // listen to Kafka indefinitely if err := listener.Start(); err != nil { logrus.WithError(err).Error("Could not listen to Kafka") } }() } poller := checker.NewPoller(conf, rc, nginx) go func() { if err = poller.Start(); err != nil { logrus.WithError(err).Error("Could not poll Controller") } }() return nil } func getCredentials(controller clients.Controller) (clients.TenantCredentials, error) { for { creds, err := controller.GetCredentials() if err != nil { if isRetryable(err) { time.Sleep(time.Second * 5) continue } else { return creds, err } } return creds, err } } func registerWithProxy(controller clients.Controller, confNotValidErr error) error { if confNotValidErr != nil { // Config not valid, can't register logrus.WithError(confNotValidErr).Error("Validation of config failed") return confNotValidErr } for { err := controller.Register() if err != nil { if isRetryable(err) { time.Sleep(time.Second * 5) continue } else { return err } } return err } } func checkIn(controller clients.Controller, conf *config.Config) error { confNotValidErr := conf.Validate(true) creds, err := getCredentials(controller) if err != nil { // if id not found error if _, ok := err.(*clients.TenantNotFoundError); ok { logrus.Info("ID not found, registering with controller") err = registerWithProxy(controller, confNotValidErr) if err != nil { // tenant already exists, possible race condition in container group if _, ok = err.(*clients.ConflictError); ok { logrus.Warn("Possible race condition occurred during register") return nil } // unrecoverable error occurred registering with controller logrus.WithError(err).Error("Could not register with Controller") return err } // register succeeded return nil } // unrecoverable error occurred getting credentials from controller logrus.WithError(err).Error("Could not retrieve credentials") return err } if conf.ForceUpdate { // TODO } // if sidecar already has valid config do not need to set anything if confNotValidErr != nil { logrus.Info("Updating credentials with those from controller") conf.Kafka.APIKey = creds.Kafka.APIKey conf.Kafka.Brokers = creds.Kafka.Brokers conf.Kafka.Password = creds.Kafka.Password conf.Kafka.RestURL = creds.Kafka.RestURL conf.Kafka.SASL = creds.Kafka.SASL conf.Kafka.Username = creds.Kafka.User conf.Registry.Token = creds.Registry.Token conf.Registry.URL = creds.Registry.URL } return nil } func isRetryable(err error) bool { if _, ok := err.(*clients.ConnectionError); ok { return true } if _, ok := err.(*clients.NetworkError); ok { return true } if _, ok := err.(*clients.ServiceUnavailable); ok { return true } return false }
package bosh import ( "fmt" "github.com/cloudfoundry/bosh-bootloader/storage" yaml "gopkg.in/yaml.v2" ) type SSHKeyDeleter struct { } func NewSSHKeyDeleter() SSHKeyDeleter { return SSHKeyDeleter{} } func (SSHKeyDeleter) Delete(state storage.State) (storage.State, error) { var err error state.Jumpbox.Variables, err = deleteJumpboxSSHKey(state.Jumpbox.Variables) if err != nil { return storage.State{}, fmt.Errorf("Jumpbox variables: %s", err) } state.BOSH.Variables, err = deleteJumpboxSSHKey(state.BOSH.Variables) if err != nil { return storage.State{}, fmt.Errorf("BOSH variables: %s", err) } return state, nil } func deleteJumpboxSSHKey(varsString string) (string, error) { vars := make(map[string]interface{}) err := yaml.Unmarshal([]byte(varsString), &vars) if err != nil { return "", err } delete(vars, "jumpbox_ssh") newVars, err := yaml.Marshal(vars) if err != nil { return "", err // not tested } return string(newVars), nil }
package collectors import ( "encoding/json" "fmt" "os" "path/filepath" "strconv" "strings" "time" cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric" ) // running average power limit (RAPL) monitoring attributes for a zone type RAPLZoneInfo struct { // tags describing the RAPL zone: // * zone_name, subzone_name: e.g. psys, dram, core, uncore, package-0 // * zone_id: e.g. 0:1 (zone 0 sub zone 1) tags map[string]string energyFilepath string // path to a file containing the zones current energy counter in micro joules energy int64 // current reading of the energy counter in micro joules energyTimestamp time.Time // timestamp when energy counter was read maxEnergyRange int64 // Range of the above energy counter in micro-joules } type RAPLCollector struct { metricCollector config struct { // Exclude IDs for RAPL zones, e.g. // * 0 for zone 0 // * 0:1 for zone 0 subzone 1 ExcludeByID []string `json:"exclude_device_by_id,omitempty"` // Exclude names for RAPL zones, e.g. psys, dram, core, uncore, package-0 ExcludeByName []string `json:"exclude_device_by_name,omitempty"` } RAPLZoneInfo []RAPLZoneInfo meta map[string]string // default meta information } // Init initializes the running average power limit (RAPL) collector func (m *RAPLCollector) Init(config json.RawMessage) error { // Check if already initialized if m.init { return nil } var err error = nil m.name = "RAPLCollector" m.setup() m.parallel = true m.meta = map[string]string{ "source": m.name, "group": "energy", "unit": "Watt", } // Read in the JSON configuration if len(config) > 0 { err = json.Unmarshal(config, &m.config) if err != nil { cclog.ComponentError(m.name, "Error reading config:", err.Error()) return err } } // Configure excluded RAPL zones isIDExcluded := make(map[string]bool) if m.config.ExcludeByID != nil { for _, ID := range m.config.ExcludeByID { isIDExcluded[ID] = true } } isNameExcluded := make(map[string]bool) if m.config.ExcludeByName != nil { for _, name := range m.config.ExcludeByName { isNameExcluded[name] = true } } // readZoneInfo reads RAPL monitoring attributes for a zone given by zonePath // See: https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes readZoneInfo := func(zonePath string) (z struct { name string // zones name e.g. psys, dram, core, uncore, package-0 energyFilepath string // path to a file containing the zones current energy counter in micro joules energy int64 // current reading of the energy counter in micro joules energyTimestamp time.Time // timestamp when energy counter was read maxEnergyRange int64 // Range of the above energy counter in micro-joules ok bool // Are all information available? }) { // zones name e.g. psys, dram, core, uncore, package-0 foundName := false if v, err := os.ReadFile( filepath.Join(zonePath, "name")); err == nil { foundName = true z.name = strings.TrimSpace(string(v)) } // path to a file containing the zones current energy counter in micro joules z.energyFilepath = filepath.Join(zonePath, "energy_uj") // current reading of the energy counter in micro joules foundEnergy := false if v, err := os.ReadFile(z.energyFilepath); err == nil { // timestamp when energy counter was read z.energyTimestamp = time.Now() if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil { foundEnergy = true z.energy = i } } // Range of the above energy counter in micro-joules foundMaxEnergyRange := false if v, err := os.ReadFile( filepath.Join(zonePath, "max_energy_range_uj")); err == nil { if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil { foundMaxEnergyRange = true z.maxEnergyRange = i } } // Are all information available? z.ok = foundName && foundEnergy && foundMaxEnergyRange return } powerCapPrefix := "/sys/devices/virtual/powercap" controlType := "intel-rapl" controlTypePath := filepath.Join(powerCapPrefix, controlType) // Find all RAPL zones zonePrefix := filepath.Join(controlTypePath, controlType+":") zonesPath, err := filepath.Glob(zonePrefix + "*") if err != nil || zonesPath == nil { return fmt.Errorf("unable to find any zones under %s", controlTypePath) } for _, zonePath := range zonesPath { zoneID := strings.TrimPrefix(zonePath, zonePrefix) z := readZoneInfo(zonePath) if z.ok && !isIDExcluded[zoneID] && !isNameExcluded[z.name] { // Add RAPL monitoring attributes for a zone m.RAPLZoneInfo = append( m.RAPLZoneInfo, RAPLZoneInfo{ tags: map[string]string{ "id": zoneID, "zone_name": z.name, }, energyFilepath: z.energyFilepath, energy: z.energy, energyTimestamp: z.energyTimestamp, maxEnergyRange: z.maxEnergyRange, }) } // find all sub zones for the given zone subZonePrefix := filepath.Join(zonePath, controlType+":"+zoneID+":") subZonesPath, err := filepath.Glob(subZonePrefix + "*") if err != nil || subZonesPath == nil { continue } for _, subZonePath := range subZonesPath { subZoneID := strings.TrimPrefix(subZonePath, subZonePrefix) sz := readZoneInfo(subZonePath) if len(zoneID) > 0 && len(z.name) > 0 && sz.ok && !isIDExcluded[zoneID+":"+subZoneID] && !isNameExcluded[sz.name] { m.RAPLZoneInfo = append( m.RAPLZoneInfo, RAPLZoneInfo{ tags: map[string]string{ "id": zoneID + ":" + subZoneID, "zone_name": z.name, "sub_zone_name": sz.name, }, energyFilepath: sz.energyFilepath, energy: sz.energy, energyTimestamp: sz.energyTimestamp, maxEnergyRange: sz.maxEnergyRange, }) } } } if m.RAPLZoneInfo == nil { return fmt.Errorf("no running average power limit (RAPL) device found in %s", controlTypePath) } // Initialized cclog.ComponentDebug( m.name, "initialized", len(m.RAPLZoneInfo), "zones with running average power limit (RAPL) monitoring attributes") m.init = true return err } // Read reads running average power limit (RAPL) monitoring attributes for all initialized zones // See: https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes func (m *RAPLCollector) Read(interval time.Duration, output chan lp.CCMetric) { for i := range m.RAPLZoneInfo { p := &m.RAPLZoneInfo[i] // Read current value of the energy counter in micro joules if v, err := os.ReadFile(p.energyFilepath); err == nil { energyTimestamp := time.Now() if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil { energy := i // Compute average power (Δ energy / Δ time) energyDiff := energy - p.energy if energyDiff < 0 { // Handle overflow: // ( p.maxEnergyRange - p.energy ) + energy // = p.maxEnergyRange + ( energy - p.energy ) // = p.maxEnergyRange + diffEnergy energyDiff += p.maxEnergyRange } timeDiff := energyTimestamp.Sub(p.energyTimestamp) averagePower := float64(energyDiff) / float64(timeDiff.Microseconds()) y, err := lp.New( "rapl_average_power", p.tags, m.meta, map[string]interface{}{"value": averagePower}, energyTimestamp) if err == nil { output <- y } // Save current energy counter state p.energy = energy p.energyTimestamp = energyTimestamp } } } } // Close closes running average power limit (RAPL) metric collector func (m *RAPLCollector) Close() { // Unset flag m.init = false }
// Leap stub file // The package name is expected by the test program. package leap // testVersion should match the targetTestVersion in the test file. const testVersion = 3 // given a year and return whether that year is leap year // 1> if not century years, leap year is divisible by 4 // 2> if century years, leap year is divisible by 400 func IsLeapYear(year int) bool { if year%4 == 0 { if year%100 != 0 { return true } else if year%400 == 0 { return true } } return false }
package helper_test import ( "testing" "ms/sun/shared/helper" "fmt" ) func BenchmarkSqlManyDollars(b *testing.B) { s:=helper.SqlManyDollars(4, 10000, false) fmt.Println(s) for i := 0; i < b.N; i++ { helper.SqlManyDollars(4, 10000, true) } }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ddl import ( "fmt" "github.com/pingcap/errors" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/util/dbterror" ) // columnGenerationInDDL is a struct for validating generated columns in DDL. type columnGenerationInDDL struct { position int generated bool dependences map[string]struct{} } // verifyColumnGeneration is for CREATE TABLE, because we need verify all columns in the table. func verifyColumnGeneration(colName2Generation map[string]columnGenerationInDDL, colName string) error { attribute := colName2Generation[colName] if attribute.generated { for depCol := range attribute.dependences { attr, ok := colName2Generation[depCol] if !ok { err := dbterror.ErrBadField.GenWithStackByArgs(depCol, "generated column function") return errors.Trace(err) } if attr.generated && attribute.position <= attr.position { // A generated column definition can refer to other // generated columns occurring earlier in the table. err := dbterror.ErrGeneratedColumnNonPrior.GenWithStackByArgs() return errors.Trace(err) } } } return nil } // verifyColumnGenerationSingle is for ADD GENERATED COLUMN, we just need verify one column itself. func verifyColumnGenerationSingle(dependColNames map[string]struct{}, cols []*table.Column, position *ast.ColumnPosition) error { // Since the added column does not exist yet, we should derive it's offset from ColumnPosition. pos, err := findPositionRelativeColumn(cols, position) if err != nil { return errors.Trace(err) } // should check unknown column first, then the prior ones. for _, col := range cols { if _, ok := dependColNames[col.Name.L]; ok { if col.IsGenerated() && col.Offset >= pos { // Generated column can refer only to generated columns defined prior to it. return dbterror.ErrGeneratedColumnNonPrior.GenWithStackByArgs() } } } return nil } // checkDependedColExist ensure all depended columns exist and not hidden. // NOTE: this will MODIFY parameter `dependCols`. func checkDependedColExist(dependCols map[string]struct{}, cols []*table.Column) error { for _, col := range cols { if !col.Hidden { delete(dependCols, col.Name.L) } } if len(dependCols) != 0 { for arbitraryCol := range dependCols { return dbterror.ErrBadField.GenWithStackByArgs(arbitraryCol, "generated column function") } } return nil } // findPositionRelativeColumn returns a pos relative to added generated column position. func findPositionRelativeColumn(cols []*table.Column, pos *ast.ColumnPosition) (int, error) { position := len(cols) // Get the column position, default is cols's length means appending. // For "alter table ... add column(...)", the position will be nil. // For "alter table ... add column ... ", the position will be default one. if pos == nil { return position, nil } if pos.Tp == ast.ColumnPositionFirst { position = 0 } else if pos.Tp == ast.ColumnPositionAfter { var col *table.Column for _, c := range cols { if c.Name.L == pos.RelativeColumn.Name.L { col = c break } } if col == nil { return -1, dbterror.ErrBadField.GenWithStackByArgs(pos.RelativeColumn, "generated column function") } // Inserted position is after the mentioned column. position = col.Offset + 1 } return position, nil } // findDependedColumnNames returns a set of string, which indicates // the names of the columns that are depended by colDef. func findDependedColumnNames(schemaName model.CIStr, tableName model.CIStr, colDef *ast.ColumnDef) (generated bool, colsMap map[string]struct{}, err error) { colsMap = make(map[string]struct{}) for _, option := range colDef.Options { if option.Tp == ast.ColumnOptionGenerated { generated = true colNames := FindColumnNamesInExpr(option.Expr) for _, depCol := range colNames { if depCol.Schema.L != "" && schemaName.L != "" && depCol.Schema.L != schemaName.L { return false, nil, dbterror.ErrWrongDBName.GenWithStackByArgs(depCol.Schema.O) } if depCol.Table.L != "" && tableName.L != "" && depCol.Table.L != tableName.L { return false, nil, dbterror.ErrWrongTableName.GenWithStackByArgs(depCol.Table.O) } colsMap[depCol.Name.L] = struct{}{} } break } } return } // FindColumnNamesInExpr returns a slice of ast.ColumnName which is referred in expr. func FindColumnNamesInExpr(expr ast.ExprNode) []*ast.ColumnName { var c generatedColumnChecker expr.Accept(&c) return c.cols } // hasDependentByGeneratedColumn checks whether there are other columns depend on this column or not. func hasDependentByGeneratedColumn(tblInfo *model.TableInfo, colName model.CIStr) (bool, string, bool) { for _, col := range tblInfo.Columns { for dep := range col.Dependences { if dep == colName.L { return true, dep, col.Hidden } } } return false, "", false } func isGeneratedRelatedColumn(tblInfo *model.TableInfo, newCol, col *model.ColumnInfo) error { if newCol.IsGenerated() || col.IsGenerated() { // TODO: Make it compatible with MySQL error. msg := fmt.Sprintf("newCol IsGenerated %v, oldCol IsGenerated %v", newCol.IsGenerated(), col.IsGenerated()) return dbterror.ErrUnsupportedModifyColumn.GenWithStackByArgs(msg) } if ok, dep, _ := hasDependentByGeneratedColumn(tblInfo, col.Name); ok { msg := fmt.Sprintf("oldCol is a dependent column '%s' for generated column", dep) return dbterror.ErrUnsupportedModifyColumn.GenWithStackByArgs(msg) } return nil } type generatedColumnChecker struct { cols []*ast.ColumnName } func (*generatedColumnChecker) Enter(inNode ast.Node) (outNode ast.Node, skipChildren bool) { return inNode, false } func (c *generatedColumnChecker) Leave(inNode ast.Node) (node ast.Node, ok bool) { if x, ok := inNode.(*ast.ColumnName); ok { c.cols = append(c.cols, x) } return inNode, true } // checkModifyGeneratedColumn checks the modification between // old and new is valid or not by such rules: // 1. the modification can't change stored status; // 2. if the new is generated, check its refer rules. // 3. check if the modified expr contains non-deterministic functions // 4. check whether new column refers to any auto-increment columns. // 5. check if the new column is indexed or stored func checkModifyGeneratedColumn(sctx sessionctx.Context, schemaName model.CIStr, tbl table.Table, oldCol, newCol *table.Column, newColDef *ast.ColumnDef, pos *ast.ColumnPosition) error { // rule 1. oldColIsStored := !oldCol.IsGenerated() || oldCol.GeneratedStored newColIsStored := !newCol.IsGenerated() || newCol.GeneratedStored if oldColIsStored != newColIsStored { return dbterror.ErrUnsupportedOnGeneratedColumn.GenWithStackByArgs("Changing the STORED status") } // rule 2. originCols := tbl.Cols() var err error var colName2Generation = make(map[string]columnGenerationInDDL, len(originCols)) for i, column := range originCols { // We can compare the pointers simply. if column == oldCol { if pos != nil && pos.Tp != ast.ColumnPositionNone { i, err = findPositionRelativeColumn(originCols, pos) if err != nil { return errors.Trace(err) } } colName2Generation[newCol.Name.L] = columnGenerationInDDL{ position: i, generated: newCol.IsGenerated(), dependences: newCol.Dependences, } } else if !column.IsGenerated() { colName2Generation[column.Name.L] = columnGenerationInDDL{ position: i, generated: false, } } else { colName2Generation[column.Name.L] = columnGenerationInDDL{ position: i, generated: true, dependences: column.Dependences, } } } // We always need test all columns, even if it's not changed // because other can depend on it so its name can't be changed. for _, column := range originCols { var colName string if column == oldCol { colName = newCol.Name.L } else { colName = column.Name.L } if err := verifyColumnGeneration(colName2Generation, colName); err != nil { return errors.Trace(err) } } if newCol.IsGenerated() { // rule 3. if err := checkIllegalFn4Generated(newCol.Name.L, typeColumn, newCol.GeneratedExpr); err != nil { return errors.Trace(err) } // rule 4. _, dependColNames, err := findDependedColumnNames(schemaName, tbl.Meta().Name, newColDef) if err != nil { return errors.Trace(err) } if !sctx.GetSessionVars().EnableAutoIncrementInGenerated { if err := checkAutoIncrementRef(newColDef.Name.Name.L, dependColNames, tbl.Meta()); err != nil { return errors.Trace(err) } } // rule 5. if err := checkIndexOrStored(tbl, oldCol, newCol); err != nil { return errors.Trace(err) } } return nil } type illegalFunctionChecker struct { hasIllegalFunc bool hasAggFunc bool hasRowVal bool // hasRowVal checks whether the functional index refers to a row value hasWindowFunc bool hasNotGAFunc4ExprIdx bool hasCastArrayFunc bool disallowCastArrayFunc bool otherErr error } func (c *illegalFunctionChecker) Enter(inNode ast.Node) (outNode ast.Node, skipChildren bool) { switch node := inNode.(type) { case *ast.FuncCallExpr: // Blocked functions & non-builtin functions is not allowed _, isFunctionBlocked := expression.IllegalFunctions4GeneratedColumns[node.FnName.L] if isFunctionBlocked || !expression.IsFunctionSupported(node.FnName.L) { c.hasIllegalFunc = true return inNode, true } err := expression.VerifyArgsWrapper(node.FnName.L, len(node.Args)) if err != nil { c.otherErr = err return inNode, true } _, isFuncGA := variable.GAFunction4ExpressionIndex[node.FnName.L] if !isFuncGA { c.hasNotGAFunc4ExprIdx = true } case *ast.SubqueryExpr, *ast.ValuesExpr, *ast.VariableExpr: // Subquery & `values(x)` & variable is not allowed c.hasIllegalFunc = true return inNode, true case *ast.AggregateFuncExpr: // Aggregate function is not allowed c.hasAggFunc = true return inNode, true case *ast.RowExpr: c.hasRowVal = true return inNode, true case *ast.WindowFuncExpr: c.hasWindowFunc = true return inNode, true case *ast.FuncCastExpr: c.hasCastArrayFunc = c.hasCastArrayFunc || node.Tp.IsArray() if c.disallowCastArrayFunc && node.Tp.IsArray() { c.otherErr = expression.ErrNotSupportedYet.GenWithStackByArgs("Use of CAST( .. AS .. ARRAY) outside of functional index in CREATE(non-SELECT)/ALTER TABLE or in general expressions") return inNode, true } case *ast.ParenthesesExpr: return inNode, false } c.disallowCastArrayFunc = true return inNode, false } func (*illegalFunctionChecker) Leave(inNode ast.Node) (node ast.Node, ok bool) { return inNode, true } const ( typeColumn = iota typeIndex ) func checkIllegalFn4Generated(name string, genType int, expr ast.ExprNode) error { if expr == nil { return nil } var c illegalFunctionChecker expr.Accept(&c) if c.hasIllegalFunc { switch genType { case typeColumn: return dbterror.ErrGeneratedColumnFunctionIsNotAllowed.GenWithStackByArgs(name) case typeIndex: return dbterror.ErrFunctionalIndexFunctionIsNotAllowed.GenWithStackByArgs(name) } } if c.hasAggFunc { return dbterror.ErrInvalidGroupFuncUse } if c.hasRowVal { switch genType { case typeColumn: return dbterror.ErrGeneratedColumnRowValueIsNotAllowed.GenWithStackByArgs(name) case typeIndex: return dbterror.ErrFunctionalIndexRowValueIsNotAllowed.GenWithStackByArgs(name) } } if c.hasWindowFunc { return dbterror.ErrWindowInvalidWindowFuncUse.GenWithStackByArgs(name) } if c.otherErr != nil { return c.otherErr } if genType == typeIndex && c.hasNotGAFunc4ExprIdx && !config.GetGlobalConfig().Experimental.AllowsExpressionIndex { return dbterror.ErrUnsupportedExpressionIndex } if genType == typeColumn && c.hasCastArrayFunc { return expression.ErrNotSupportedYet.GenWithStackByArgs("Use of CAST( .. AS .. ARRAY) outside of functional index in CREATE(non-SELECT)/ALTER TABLE or in general expressions") } return nil } func checkIndexOrStored(tbl table.Table, oldCol, newCol *table.Column) error { if oldCol.GeneratedExprString == newCol.GeneratedExprString { return nil } if newCol.GeneratedStored { return dbterror.ErrUnsupportedOnGeneratedColumn.GenWithStackByArgs("modifying a stored column") } for _, idx := range tbl.Indices() { for _, col := range idx.Meta().Columns { if col.Name.L == newCol.Name.L { return dbterror.ErrUnsupportedOnGeneratedColumn.GenWithStackByArgs("modifying an indexed column") } } } return nil } // checkAutoIncrementRef checks if an generated column depends on an auto-increment column and raises an error if so. // See https://dev.mysql.com/doc/refman/5.7/en/create-table-generated-columns.html for details. func checkAutoIncrementRef(name string, dependencies map[string]struct{}, tbInfo *model.TableInfo) error { exists, autoIncrementColumn := infoschema.HasAutoIncrementColumn(tbInfo) if exists { if _, found := dependencies[autoIncrementColumn]; found { return dbterror.ErrGeneratedColumnRefAutoInc.GenWithStackByArgs(name) } } return nil } // checkExpressionIndexAutoIncrement checks if an generated column depends on an auto-increment column and raises an error if so. func checkExpressionIndexAutoIncrement(name string, dependencies map[string]struct{}, tbInfo *model.TableInfo) error { exists, autoIncrementColumn := infoschema.HasAutoIncrementColumn(tbInfo) if exists { if _, found := dependencies[autoIncrementColumn]; found { return dbterror.ErrExpressionIndexCanNotRefer.GenWithStackByArgs(name) } } return nil }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( mysql "github.com/pingcap/tidb/errno" parser_types "github.com/pingcap/tidb/parser/types" "github.com/pingcap/tidb/util/dbterror" ) // const strings for ErrWrongValue const ( DateTimeStr = "datetime" DateStr = "date" TimeStr = "time" TimestampStr = "timestamp" ) var ( // ErrInvalidDefault is returned when meet a invalid default value. ErrInvalidDefault = parser_types.ErrInvalidDefault // ErrDataTooLong is returned when converts a string value that is longer than field type length. ErrDataTooLong = dbterror.ClassTypes.NewStd(mysql.ErrDataTooLong) // ErrIllegalValueForType is returned when value of type is illegal. ErrIllegalValueForType = dbterror.ClassTypes.NewStd(mysql.ErrIllegalValueForType) // ErrTruncated is returned when data has been truncated during conversion. ErrTruncated = dbterror.ClassTypes.NewStd(mysql.WarnDataTruncated) // ErrOverflow is returned when data is out of range for a field type. ErrOverflow = dbterror.ClassTypes.NewStd(mysql.ErrDataOutOfRange) // ErrDivByZero is return when do division by 0. ErrDivByZero = dbterror.ClassTypes.NewStd(mysql.ErrDivisionByZero) // ErrTooBigDisplayWidth is return when display width out of range for column. ErrTooBigDisplayWidth = dbterror.ClassTypes.NewStd(mysql.ErrTooBigDisplaywidth) // ErrTooBigFieldLength is return when column length too big for column. ErrTooBigFieldLength = dbterror.ClassTypes.NewStd(mysql.ErrTooBigFieldlength) // ErrTooBigSet is returned when too many strings for column. ErrTooBigSet = dbterror.ClassTypes.NewStd(mysql.ErrTooBigSet) // ErrTooBigScale is returned when type DECIMAL/NUMERIC scale is bigger than mysql.MaxDecimalScale. ErrTooBigScale = dbterror.ClassTypes.NewStd(mysql.ErrTooBigScale) // ErrTooBigPrecision is returned when type DECIMAL/NUMERIC or DATETIME precision is bigger than mysql.MaxDecimalWidth or types.MaxFsp ErrTooBigPrecision = dbterror.ClassTypes.NewStd(mysql.ErrTooBigPrecision) // ErrBadNumber is return when parsing an invalid binary decimal number. ErrBadNumber = dbterror.ClassTypes.NewStd(mysql.ErrBadNumber) // ErrInvalidFieldSize is returned when the precision of a column is out of range. ErrInvalidFieldSize = dbterror.ClassTypes.NewStd(mysql.ErrInvalidFieldSize) // ErrMBiggerThanD is returned when precision less than the scale. ErrMBiggerThanD = dbterror.ClassTypes.NewStd(mysql.ErrMBiggerThanD) // ErrWarnDataOutOfRange is returned when the value in a numeric column that is outside the permissible range of the column data type. // See https://dev.mysql.com/doc/refman/5.5/en/out-of-range-and-overflow.html for details ErrWarnDataOutOfRange = dbterror.ClassTypes.NewStd(mysql.ErrWarnDataOutOfRange) // ErrDuplicatedValueInType is returned when enum column has duplicated value. ErrDuplicatedValueInType = dbterror.ClassTypes.NewStd(mysql.ErrDuplicatedValueInType) // ErrDatetimeFunctionOverflow is returned when the calculation in datetime function cause overflow. ErrDatetimeFunctionOverflow = dbterror.ClassTypes.NewStd(mysql.ErrDatetimeFunctionOverflow) // ErrCastAsSignedOverflow is returned when positive out-of-range integer, and convert to it's negative complement. ErrCastAsSignedOverflow = dbterror.ClassTypes.NewStd(mysql.ErrCastAsSignedOverflow) // ErrCastNegIntAsUnsigned is returned when a negative integer be casted to an unsigned int. ErrCastNegIntAsUnsigned = dbterror.ClassTypes.NewStd(mysql.ErrCastNegIntAsUnsigned) // ErrInvalidYearFormat is returned when the input is not a valid year format. ErrInvalidYearFormat = dbterror.ClassTypes.NewStd(mysql.ErrInvalidYearFormat) // ErrInvalidYear is returned when the input value is not a valid year. ErrInvalidYear = dbterror.ClassTypes.NewStd(mysql.ErrInvalidYear) // ErrTruncatedWrongVal is returned when data has been truncated during conversion. ErrTruncatedWrongVal = dbterror.ClassTypes.NewStd(mysql.ErrTruncatedWrongValue) // ErrInvalidWeekModeFormat is returned when the week mode is wrong. ErrInvalidWeekModeFormat = dbterror.ClassTypes.NewStd(mysql.ErrInvalidWeekModeFormat) // ErrWrongFieldSpec is returned when the column specifier incorrect. ErrWrongFieldSpec = dbterror.ClassTypes.NewStd(mysql.ErrWrongFieldSpec) // ErrSyntax is returned when the syntax is not allowed. ErrSyntax = dbterror.ClassTypes.NewStdErr(mysql.ErrParse, mysql.MySQLErrName[mysql.ErrSyntax]) // ErrWrongValue is returned when the input value is in wrong format. ErrWrongValue = dbterror.ClassTypes.NewStdErr(mysql.ErrTruncatedWrongValue, mysql.MySQLErrName[mysql.ErrWrongValue]) // ErrWrongValue2 is returned when the input value is in wrong format. ErrWrongValue2 = dbterror.ClassTypes.NewStdErr(mysql.ErrWrongValue, mysql.MySQLErrName[mysql.ErrWrongValue]) // ErrWrongValueForType is returned when the input value is in wrong format for function. ErrWrongValueForType = dbterror.ClassTypes.NewStdErr(mysql.ErrWrongValueForType, mysql.MySQLErrName[mysql.ErrWrongValueForType]) // ErrPartitionStatsMissing is returned when the partition-level stats is missing and the build global-level stats fails. // Put this error here is to prevent `import cycle not allowed`. ErrPartitionStatsMissing = dbterror.ClassTypes.NewStd(mysql.ErrPartitionStatsMissing) // ErrPartitionColumnStatsMissing is returned when the partition-level column stats is missing and the build global-level stats fails. // Put this error here is to prevent `import cycle not allowed`. ErrPartitionColumnStatsMissing = dbterror.ClassTypes.NewStd(mysql.ErrPartitionColumnStatsMissing) // ErrIncorrectDatetimeValue is returned when the input value is in wrong format for datetime. ErrIncorrectDatetimeValue = dbterror.ClassTypes.NewStd(mysql.ErrIncorrectDatetimeValue) )
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package apply import ( "encoding/json" "fmt" "io" "time" "github.com/jonboulle/clockwork" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/jsonmergepatch" "k8s.io/apimachinery/pkg/util/mergepatch" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/cli-runtime/pkg/resource" oapi "k8s.io/kube-openapi/pkg/util/proto" cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/scheme" "k8s.io/kubectl/pkg/util" "k8s.io/kubectl/pkg/util/openapi" ) const ( // maxPatchRetry is the maximum number of conflicts retry for during a patch operation before returning failure maxPatchRetry = 5 // backOffPeriod is the period to back off when apply patch results in error. backOffPeriod = 1 * time.Second // how many times we can retry before back off triesBeforeBackOff = 1 ) // Patcher defines options to patch OpenAPI objects. type Patcher struct { Mapping *meta.RESTMapping Helper *resource.Helper Overwrite bool BackOff clockwork.Clock Force bool Cascade bool Timeout time.Duration GracePeriod int // If set, forces the patch against a specific resourceVersion ResourceVersion *string // Number of retries to make if the patch fails with conflict Retries int OpenapiSchema openapi.Resources } func newPatcher(o *ApplyOptions, info *resource.Info, helper *resource.Helper) (*Patcher, error) { var openapiSchema openapi.Resources if o.OpenAPIPatch { openapiSchema = o.OpenAPISchema } return &Patcher{ Mapping: info.Mapping, Helper: helper, Overwrite: o.Overwrite, BackOff: clockwork.NewRealClock(), Force: o.DeleteOptions.ForceDeletion, Cascade: o.DeleteOptions.Cascade, Timeout: o.DeleteOptions.Timeout, GracePeriod: o.DeleteOptions.GracePeriod, OpenapiSchema: openapiSchema, Retries: maxPatchRetry, }, nil } func (p *Patcher) delete(namespace, name string) error { options := asDeleteOptions(p.Cascade, p.GracePeriod) _, err := p.Helper.DeleteWithOptions(namespace, name, &options) return err } func (p *Patcher) patchSimple(obj runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { // Serialize the current configuration of the object from the server. current, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf("serializing current configuration from:\n%v\nfor:", obj), source, err) } // Retrieve the original configuration of the object from the annotation. original, err := util.GetOriginalConfiguration(obj) if err != nil { return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf("retrieving original configuration from:\n%v\nfor:", obj), source, err) } var patchType types.PatchType var patch []byte var lookupPatchMeta strategicpatch.LookupPatchMeta var schema oapi.Schema createPatchErrFormat := "creating patch with:\noriginal:\n%s\nmodified:\n%s\ncurrent:\n%s\nfor:" // Create the versioned struct from the type defined in the restmapping // (which is the API version we'll be submitting the patch to) versionedObject, err := scheme.Scheme.New(p.Mapping.GroupVersionKind) switch { case runtime.IsNotRegisteredError(err): // fall back to generic JSON merge patch patchType = types.MergePatchType preconditions := []mergepatch.PreconditionFunc{mergepatch.RequireKeyUnchanged("apiVersion"), mergepatch.RequireKeyUnchanged("kind"), mergepatch.RequireMetadataKeyUnchanged("name")} patch, err = jsonmergepatch.CreateThreeWayJSONMergePatch(original, modified, current, preconditions...) if err != nil { if mergepatch.IsPreconditionFailed(err) { return nil, nil, fmt.Errorf("%s", "At least one of apiVersion, kind and name was changed") } return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf(createPatchErrFormat, original, modified, current), source, err) } case err != nil: return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf("getting instance of versioned object for %v:", p.Mapping.GroupVersionKind), source, err) case err == nil: // Compute a three way strategic merge patch to send to server. patchType = types.StrategicMergePatchType // Try to use openapi first if the openapi spec is available and can successfully calculate the patch. // Otherwise, fall back to baked-in types. if p.OpenapiSchema != nil { if schema = p.OpenapiSchema.LookupResource(p.Mapping.GroupVersionKind); schema != nil { lookupPatchMeta = strategicpatch.PatchMetaFromOpenAPI{Schema: schema} if openapiPatch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite); err != nil { fmt.Fprintf(errOut, "warning: error calculating patch from openapi spec: %v\n", err) } else { patchType = types.StrategicMergePatchType patch = openapiPatch } } } if patch == nil { lookupPatchMeta, err = strategicpatch.NewPatchMetaFromStruct(versionedObject) if err != nil { return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf(createPatchErrFormat, original, modified, current), source, err) } patch, err = strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite) if err != nil { return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf(createPatchErrFormat, original, modified, current), source, err) } } } if string(patch) == "{}" { return patch, obj, nil } if p.ResourceVersion != nil { patch, err = addResourceVersion(patch, *p.ResourceVersion) if err != nil { return nil, nil, cmdutil.AddSourceToErr("Failed to insert resourceVersion in patch", source, err) } } patchedObj, err := p.Helper.Patch(namespace, name, patchType, patch, nil) return patch, patchedObj, err } // Patch tries to patch an OpenAPI resource. On success, returns the merge patch as well // the final patched object. On failure, returns an error. func (p *Patcher) Patch(current runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { var getErr error patchBytes, patchObject, err := p.patchSimple(current, modified, source, namespace, name, errOut) if p.Retries == 0 { p.Retries = maxPatchRetry } for i := 1; i <= p.Retries && errors.IsConflict(err); i++ { if i > triesBeforeBackOff { p.BackOff.Sleep(backOffPeriod) } current, getErr = p.Helper.Get(namespace, name, false) if getErr != nil { return nil, nil, getErr } patchBytes, patchObject, err = p.patchSimple(current, modified, source, namespace, name, errOut) } if err != nil && (errors.IsConflict(err) || errors.IsInvalid(err)) && p.Force { patchBytes, patchObject, err = p.deleteAndCreate(current, modified, namespace, name) } return patchBytes, patchObject, err } func (p *Patcher) deleteAndCreate(original runtime.Object, modified []byte, namespace, name string) ([]byte, runtime.Object, error) { if err := p.delete(namespace, name); err != nil { return modified, nil, err } // TODO: use wait if err := wait.PollImmediate(1*time.Second, p.Timeout, func() (bool, error) { if _, err := p.Helper.Get(namespace, name, false); !errors.IsNotFound(err) { return false, err } return true, nil }); err != nil { return modified, nil, err } versionedObject, _, err := unstructured.UnstructuredJSONScheme.Decode(modified, nil, nil) if err != nil { return modified, nil, err } createdObject, err := p.Helper.Create(namespace, true, versionedObject) if err != nil { // restore the original object if we fail to create the new one // but still propagate and advertise error to user recreated, recreateErr := p.Helper.Create(namespace, true, original) if recreateErr != nil { err = fmt.Errorf("An error occurred force-replacing the existing object with the newly provided one:\n\n%v.\n\nAdditionally, an error occurred attempting to restore the original object:\n\n%v", err, recreateErr) } else { createdObject = recreated } } return modified, createdObject, err } func addResourceVersion(patch []byte, rv string) ([]byte, error) { var patchMap map[string]interface{} err := json.Unmarshal(patch, &patchMap) if err != nil { return nil, err } u := unstructured.Unstructured{Object: patchMap} a, err := meta.Accessor(&u) if err != nil { return nil, err } a.SetResourceVersion(rv) return json.Marshal(patchMap) }
package controller import ( "fmt" "os" "sync" "sync/atomic" "time" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" ) type VolumeStats struct { FsStats Name string PVCName string Namespace string } // FsStats contains data about filesystem usage. type FsStats struct { // The time at which these stats were updated. Time metav1.Time `json:"time"` // AvailableBytes represents the storage space available (bytes) for the filesystem. // +optional AvailableBytes *uint64 `json:"availableBytes,omitempty"` // CapacityBytes represents the total capacity (bytes) of the filesystems underlying storage. // +optional CapacityBytes *uint64 `json:"capacityBytes,omitempty"` // UsedBytes represents the bytes used for a specific task on the filesystem. // This may differ from the total bytes used on the filesystem and may not equal CapacityBytes - AvailableBytes. // e.g. For ContainerStats.Rootfs this is the bytes used by the container rootfs on the filesystem. // +optional UsedBytes *uint64 `json:"usedBytes,omitempty"` // InodesFree represents the free inodes in the filesystem. // +optional InodesFree *uint64 `json:"inodesFree,omitempty"` // Inodes represents the total inodes in the filesystem. // +optional Inodes *uint64 `json:"inodes,omitempty"` // InodesUsed represents the inodes used by the filesystem // This may not equal Inodes - InodesFree because this filesystem may share inodes with other "filesystems" // e.g. For ContainerStats.Rootfs, this is the inodes used only by that container, and does not count inodes used by other containers. InodesUsed *uint64 `json:"inodesUsed,omitempty"` } type volumesMetricProvider struct { pod *v1.Pod providers map[string]volume.MetricsProvider } type volumeStatCalculator struct { provider *volumesMetricProvider jitterPeriod time.Duration pod *v1.Pod stopChannel chan struct{} startO sync.Once stopO sync.Once latest atomic.Value } func newVolumesMetricProvider(cli *kubernetes.Clientset, pod *v1.Pod) (*volumesMetricProvider, error) { providers := make(map[string]volume.MetricsProvider) for _, vol := range pod.Spec.Volumes { if claim := vol.VolumeSource.PersistentVolumeClaim; claim != nil { klog.Infof("new pvc [%s] found for pod [%s/%s]", claim.ClaimName, pod.Namespace, pod.Name) pvc, err := cli.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(claim.ClaimName, metav1.GetOptions{}) if err != nil { klog.Errorf("find pvc info from apiserver failed, err: %v", err) return nil, PVCNotFound } path := getPath(pod, pvc) if _, err := os.Stat(path); os.IsNotExist(err) { klog.Errorf("pod [%s/%s] is watched, but mountpoint for pvc [%s] is not created, which is %s", pod.Namespace, pod.Name, pvc.Name, path) return nil, MountPointNotReady } providers[pvc.Name] = volume.NewMetricsStatFS(path) } } p := &volumesMetricProvider{ pod: pod, providers: providers, } return p, nil } func newVolumeStatCalculator(provider *volumesMetricProvider, jitterPeriod time.Duration, pod *v1.Pod) *volumeStatCalculator { return &volumeStatCalculator{ provider: provider, jitterPeriod: jitterPeriod, pod: pod, stopChannel: make(chan struct{}), } } // StartOnce starts pod volume calc that will occur periodically in the background until s.StopOnce is called func (s *volumeStatCalculator) StartOnce() *volumeStatCalculator { s.startO.Do(func() { go wait.JitterUntil(func() { s.calcAndStoreStats() }, s.jitterPeriod, 1.0, true, s.stopChannel) }) return s } // StopOnce stops background pod volume calculation. Will not stop a currently executing calculations until // they complete their current iteration. func (s *volumeStatCalculator) StopOnce() *volumeStatCalculator { s.stopO.Do(func() { close(s.stopChannel) }) return s } // getLatest returns the most recent PodVolumeStats from the cache func (s *volumeStatCalculator) GetLatest() ([]VolumeStats, bool) { if result := s.latest.Load(); result == nil { return []VolumeStats{}, false } else { return result.([]VolumeStats), true } } // calcAndStoreStats calculates PodVolumeStats for a given pod and writes the result to the s.latest cache. // If the pod references PVCs, the prometheus metrics for those are updated with the result. func (s *volumeStatCalculator) calcAndStoreStats() { // Call GetMetrics on each Volume and copy the result to a new VolumeStats.FsStats volumesStats := make([]VolumeStats, 0) for pvcname, provider := range s.provider.providers { metric, err := provider.GetMetrics() if err != nil { klog.Errorf("get metrics pvc [%s] of pod [%s/%s] failed, err: %s", pvcname, s.pod.Namespace, s.pod.Name, err) continue } volumeStats := s.parsePodVolumeStats(s.pod.Name, pvcname, s.pod.Namespace, metric) volumesStats = append(volumesStats, volumeStats) } // Store the new stats s.latest.Store(volumesStats) } // parsePodVolumeStats converts (internal) volume.Metrics to (external) stats.VolumeStats structures func (s *volumeStatCalculator) parsePodVolumeStats(podName string, pvcName string, namespace string, metric *volume.Metrics) VolumeStats { available := uint64(metric.Available.Value()) capacity := uint64(metric.Capacity.Value()) used := uint64(metric.Used.Value()) inodes := uint64(metric.Inodes.Value()) inodesFree := uint64(metric.InodesFree.Value()) inodesUsed := uint64(metric.InodesUsed.Value()) return VolumeStats{ Name: podName, PVCName: pvcName, Namespace: namespace, FsStats: FsStats{Time: metric.Time, AvailableBytes: &available, CapacityBytes: &capacity, UsedBytes: &used, Inodes: &inodes, InodesFree: &inodesFree, InodesUsed: &inodesUsed}, } } func getPath(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) string { return fmt.Sprintf("/var/lib/kubelet/pods/%s/volumes/kubernetes.io~csi/%s/mount", pod.UID, pvc.Spec.VolumeName) }
package main import ( "fmt" "io/ioutil" "log" ) func printTasks() { lc := lastestLeetCode() makeTasksFile(lc.Problems) } func makeTasksFile(problems problems) { content := "" for _, p := range problems { if !p.IsAccepted && p.IsAvailable { content = fmt.Sprintf("%d - #%d分 - %s - %s \n", p.ID, p.Difficulty, p.PassRate, p.Title) + content } } // 保存 taskFile 文件 err := ioutil.WriteFile(tasksFile, []byte(content), 0755) if err != nil { log.Fatal(err) } }
package main import ( "fmt" ) func getMessage() string { return "Go World" } // go run main.go func main() { var intro = "Message:" msg := getMessage() fmt.Println(intro, msg) }
package middleware import ( "github.com/alexliesenfeld/health" "net/http" ) // BasicAuth is a middleware that removes check details (such as service names, error messages, etc.) from the // HTTP response on authentication failure. Authentication is performed based on basic access authentication // (https://en.wikipedia.org/wiki/Basic_access_authentication). // // This is useful if you want to allow the aggregated // result to be visible to all clients, but provide details only to fully authenticated senders. // // Attention: To be able to prevent access altogether if authentication fails, consider using an // HTTP basic auth middleware instead. You can can easily use most such middleware implementations with the // Handler (e.g., https://github.com/99designs/basicauth-go). This libraries middleware (health.Middleware) // is only for pre- and post-processing results but not to deal with the HTTP request and response objects. func BasicAuth(username, password string) health.Middleware { return CustomAuth(func(r *http.Request) bool { reqUser, reqPassword, ok := r.BasicAuth() return ok && username == reqUser && password == reqPassword }) } // CustomAuth is a middleware that removes check details (such as service names, error messages, etc.) from the // HTTP response on authentication failure. To find out if authentication was successful, the provided function will be // executed (provided in argument 'authFunc'). // // This middleware is useful if you want to allow the aggregated result to be visible to all clients, but provide // details only to fully authenticated senders. // // Attention: To be able to prevent access altogether if authentication fails, consider using an // HTTP basic auth middleware instead. You can can easily use most such middleware implementations with the // Handler (e.g., https://github.com/99designs/basicauth-go). This libraries middleware (health.Middleware) // is only for pre- and post-processing results but not to deal with the HTTP request and response objects. func CustomAuth(authFunc func(r *http.Request) bool) health.Middleware { return func(next health.MiddlewareFunc) health.MiddlewareFunc { return func(r *http.Request) health.CheckerResult { authSuccess := authFunc(r) result := next(r) if !authSuccess { result.Details = nil } return result } } }
package files import ( "errors" "github.com/dgrijalva/jwt-go" "github.com/julienschmidt/httprouter" "github.com/sanato/sanato-lib/auth" "github.com/sanato/sanato-lib/config" "github.com/sanato/sanato-lib/storage" "net/http" "strings" ) func NewAPI(router *httprouter.Router, cp *config.ConfigProvider, ap *auth.AuthProvider, sp *storage.StorageProvider) (*API, error) { return &API{router, cp, ap, sp}, nil } type API struct { router *httprouter.Router configProvider *config.ConfigProvider authProvider *auth.AuthProvider storageProvider *storage.StorageProvider } func (api *API) Start() { api.router.Handle("GET", "/files_get/*path", api.get) api.router.Handle("PUT", "/files_put/*path", api.put) api.router.Handle("POST", "/files_delete/*path", api.delete) api.router.Handle("POST", "/files_mkcol/*path", api.mkcol) api.router.Handle("GET", "/files_stat/*path", api.stat) api.router.Handle("POST", "/files_rename", api.move) } func (api *API) basicAuth(r *http.Request) (*auth.AuthResource, error) { username, password, ok := r.BasicAuth() if !ok { return nil, errors.New("no basic auth provided") } authRes, err := api.authProvider.Authenticate(username, password) if err != nil { return nil, err } return authRes, err } // tokenAuth validate a token, if inHeader is true it picks the token from the Authorization header. // If not, it picks the token from the query parameter token func (api *API) tokenAuth(r *http.Request, inHeader bool) (*auth.AuthResource, error) { config, err := api.configProvider.Parse() if err != nil { return nil, err } if inHeader == true { tokenHeader := strings.Split(r.Header.Get("Authorization"), " ") if len(tokenHeader) < 2 { return nil, errors.New("no token auth header") } token, err := jwt.Parse(string(tokenHeader[1]), func(token *jwt.Token) (key interface{}, err error) { return []byte(config.TokenSecret), nil }) if err != nil { return nil, err } authRes := &auth.AuthResource{} authRes.Username = token.Claims["username"].(string) authRes.DisplayName = token.Claims["displayName"].(string) authRes.Email = token.Claims["email"].(string) return authRes, nil } else { tokenParam := r.URL.Query().Get("token") token, err := jwt.Parse(string(tokenParam), func(token *jwt.Token) (key interface{}, err error) { return []byte(config.TokenSecret), nil }) if err != nil { return nil, err } authRes := &auth.AuthResource{} authRes.Username = token.Claims["username"].(string) authRes.DisplayName = token.Claims["displayName"].(string) authRes.Email = token.Claims["email"].(string) return authRes, nil } }
package driver // RunningInSwarm expect to support running on swarm func RunningInSwarm() { }
package token type TokenType string type Token struct{ Type TokenType Value string } const ( EOF = "EOF" ILLEGAL ="ILLEGAL"//illegal token SEMICOLON = ";" LBRACKET ="["//left bracket RBRACKET ="]" LBRACE = "{" RBRACE = "}" LPAREN = "(" RPAREN = ")" //OPERATORS EQ = "==" //equal sign PL_EQ = "+=" //plus equal MI_EQ = "-= "//minus equal PP = "++" //plus plus MM = "--" //minus minus NE = "!=" //not equal MULT_EQ = "*=" DEVIDE_EQ = "/=" POWER_EQ = "*=" //logical operators AND = "&& "// and && OR = "||" //or || //conditional operators IF = "if"//if ELSE = "else"//else //for iteration FOR = "for"//for WHILE = "while"//while FOREACH = "foreach"//for each //trulability TRUE = "true" FALSE = "false" FUNCTION = "FUNCTION"//function LET = "LET"//a useless keyword which I saw in tutorial and it fucking impressed me)) //Operatos ASIGN = "=" PLUS = "+" MINUS = "-" MULTIPLY = "*" DIVISION = "/" NOT = "!" POWER = "^" //Identifiers IDENT = "IDENT" INT = "INT" STRING = "STRING" RETURN = "RETURN" COMMA = "," DOT = "." //comparators SMALLER = "<" BIGGER = ">" SMALLEREQUAL = "<=" BIGGEREQUAL = ">=" ) var keywords = map[string]TokenType{ "fn": FUNCTION, "let":LET, "if":IF, "else":ELSE, "while":WHILE, "for":FOR, "foreach":FOREACH, "true":TRUE, "false":FALSE, "return":RETURN} //check where a keyword is a token or return identifier if token not present func LookUpIdent(ident string) TokenType{ if tok, ok := keywords[ident];ok{ return tok } return IDENT }
package handler import ( "context" "encoding/json" "fmt" pb "github.com/jlb0906/micro-movie/aria2-srv/proto/aria2" aria2srv "github.com/jlb0906/micro-movie/aria2-srv/service/aria2" "github.com/jlb0906/micro-movie/basic/common" "github.com/jlb0906/micro-movie/movie-srv/proto/movie" "github.com/micro/go-micro/v2/client" "github.com/micro/go-micro/v2/errors" "github.com/micro/go-micro/v2/logger" "github.com/zyxar/argo/rpc" "sync" ) var ( m sync.RWMutex inited bool aria2Cli rpc.Client movieSrv movie.MovieService ) type Aria2 struct{} func (e *Aria2) AddURI(ctx context.Context, req *pb.AddURIReq, rsp *pb.AddURIRsp) error { logger.Info("Received Aria2.AddURI request") gid, err := aria2Cli.AddURI(req.Uri) if err != nil { logger.Error(err) msg := fmt.Sprintf("错误的请求 %v", err) rsp.Err = &pb.Error{ Code: 400, Detail: msg, } return errors.BadRequest(common.Aria2Srv, msg) } logger.Infof("添加了下载任务:%v", gid) rsp.Gid = gid return nil } func (e *Aria2) Remove(ctx context.Context, req *pb.RemoveReq, rsp *pb.RemoveRsp) error { logger.Info("Received Aria2.Remove request") gid, err := aria2Cli.Remove(req.Gid) if err != nil { logger.Error(err) msg := fmt.Sprintf("错误的请求 %v", err) rsp.Err = &pb.Error{ Code: 400, Detail: msg, } return errors.BadRequest(common.Aria2Srv, msg) } logger.Infof("删除了下载任务:%v", gid) rsp.Gid = "gid" return nil } func (e *Aria2) Pause(ctx context.Context, req *pb.PauseReq, rsp *pb.PauseRsp) error { logger.Info("Received Aria2.Pause request") gid, err := aria2Cli.Pause(req.Gid) if err != nil { logger.Error(err) msg := fmt.Sprintf("错误的请求 %v", err) rsp.Err = &pb.Error{ Code: 400, Detail: msg, } return errors.BadRequest(common.Aria2Srv, msg) } logger.Infof("暂停了下载任务:%v", gid) rsp.Gid = "gid" return nil } func (e *Aria2) TellStatus(ctx context.Context, req *pb.TellStatusReq, rsp *pb.TellStatusRsp) error { logger.Info("Received Aria2.TellStatus request") info, err := aria2Cli.TellStatus(req.Gid, req.Keys...) if err != nil { logger.Error(err) msg := fmt.Sprintf("错误的请求 %v", err) rsp.Err = &pb.Error{ Code: 400, Detail: msg, } return errors.BadRequest(common.Aria2Srv, msg) } logger.Infof("下载任务的状态:%v", info) data, _ := json.Marshal(info) rsp.Info = &pb.StatusInfo{} err = json.Unmarshal(data, rsp.Info) if err != nil { logger.Error(err) } return nil } func Init() { m.Lock() defer m.Unlock() if inited { logger.Warn(fmt.Sprint("[Init] handler 已经初始化过")) return } aria2Cli = aria2srv.GetAria2() movie.NewMovieService(common.MovieSrv, client.DefaultClient) inited = true }
package shortenertest import ( "testing" "github.com/toms1441/urlsh/internal/repo/plain" "github.com/toms1441/urlsh/internal/shortener" ) var ss shortener.Service var sr shortener.Repository var modelid, modelurl string func TestNewService(t *testing.T) { var err error ss, err = shortener.NewService(nil, shortener.Config{}) if err == nil { t.Fatalf("shortener.NewService == nil - should return 'repository is invalid'") } ss, err = shortener.NewService(sr, shortener.Config{}) if err == nil { t.Fatalf("shortener.NewService == nil - should return 'config is invalid'") } sr, err = plain.NewShortenerRepository() if err != nil { t.Fatalf("plain.NewShortenerRepository: %v", err) } ss, err = shortener.NewService(sr, validconfig) if err != nil { t.Fatalf("shortener.NewService: %v", err) } } func TestServiceNewShortener(t *testing.T) { model, err := ss.NewShortener("") if err == nil { t.Fatalf("ss.NewShortener == nil - should return validate.Struct error") } model, err = ss.NewShortener("invalid") if err == nil { t.Fatalf("ss.NewShortener == nil - should return invalid url") } model, err = ss.NewShortener("https://www.google.com") if err != nil { t.Fatalf("ss.NewShortener: %v", err) } modelid = model.ID modelurl = model.URLString } func TestServiceGetShortener(t *testing.T) { tempurl, err := ss.GetShortener(modelid) if err != nil { t.Fatalf("ss.GetShortener: %v", err) } if tempurl != modelurl { t.Fatal("tempurl != modelurl") } } func TestServiceRemoveShortener(t *testing.T) { err := ss.RemoveShortener(modelid) if err != nil { t.Fatalf("ss.RemoveShortener: %v", err) } _, err = ss.GetShortener(modelid) if err == nil { t.Fatalf("ss.GetShortener == nil, should return err") } }
package dcp // ServiceID is a single byte. type ServiceID byte // Known ids. const ( Get ServiceID = 1 Set ServiceID = 4 Identify ServiceID = 5 ) // ServiceType is a single byte. type ServiceType byte // Known types. const ( Request ServiceType = 0 Response ServiceType = 1 )
package cobra func main() { }
package cors import ( "github.com/serverless/event-gateway/internal/zap" "github.com/serverless/event-gateway/metadata" "go.uber.org/zap/zapcore" ) // ID uniquely identifies a CORS configuration. type ID string // CORS is used to configure CORS on HTTP subscriptions. type CORS struct { Space string `json:"space" validate:"required,min=3,space"` ID ID `json:"corsId"` Method string `json:"method" validate:"eq=GET|eq=POST|eq=DELETE|eq=PUT|eq=PATCH|eq=HEAD|eq=OPTIONS"` Path string `json:"path" validate:"path"` AllowedOrigins []string `json:"allowedOrigins" validate:"min=1"` AllowedMethods []string `json:"allowedMethods" validate:"min=1"` AllowedHeaders []string `json:"allowedHeaders" validate:"min=1"` AllowCredentials bool `json:"allowCredentials"` Metadata metadata.Metadata `json:"metadata,omitempty"` } // CORSes is an array of CORS configurations. type CORSes []*CORS // MarshalLogObject is a part of zapcore.ObjectMarshaler interface func (c CORS) MarshalLogObject(enc zapcore.ObjectEncoder) error { enc.AddString("space", string(c.Space)) enc.AddString("corsId", string(c.ID)) enc.AddString("method", string(c.Method)) enc.AddString("path", string(c.Path)) enc.AddArray("allowedOrigins", zap.Strings(c.AllowedOrigins)) enc.AddArray("allowedMethods", zap.Strings(c.AllowedMethods)) enc.AddArray("allowedHeaders", zap.Strings(c.AllowedHeaders)) enc.AddBool("allowCredentials", c.AllowCredentials) return nil }
// DRUNKWATER TEMPLATE(add description and prototypes) // Question Title and Description on leetcode.com // Function Declaration and Function Prototypes on leetcode.com //150. Evaluate Reverse Polish Notation //Evaluate the value of an arithmetic expression in Reverse Polish Notation. //Valid operators are +, -, *, /. Each operand may be an integer or another expression. //Note: //Division between two integers should truncate toward zero. //The given RPN expression is always valid. That means the expression would always evaluate to a result and there won't be any divide by zero operation. //Example 1: //Input: ["2", "1", "+", "3", "*"] //Output: 9 //Explanation: ((2 + 1) * 3) = 9 //Example 2: //Input: ["4", "13", "5", "/", "+"] //Output: 6 //Explanation: (4 + (13 / 5)) = 6 //Example 3: //Input: ["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"] //Output: 22 //Explanation: // ((10 * (6 / ((9 + 3) * -11))) + 17) + 5 //= ((10 * (6 / (12 * -11))) + 17) + 5 //= ((10 * (6 / -132)) + 17) + 5 //= ((10 * 0) + 17) + 5 //= (0 + 17) + 5 //= 17 + 5 //= 22 //func evalRPN(tokens []string) int { //} // Time Is Money
package bf import ( "bytes" "io" "reflect" "runtime" "testing" "github.com/rasky/gojit/amd64" ) var helloWorld = "++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++." // http://esolangs.org/wiki/Dbfi var dbfi = ` >>>+[[-]>>[-]++>+>+++++++[<++++>>++<-]++>>+>+>+++++[>++>++++++<<-]+>>>,<++[[>[ ->>]<[>>]<<-]<[<]<+>>[>]>[<+>-[[<+>-]>]<[[[-]<]++<-[<+++++++++>[<->-]>>]>>]]<< ]<]<[[<]>[[>]>>[>>]+[<<]<[<]<+>>-]>[>]+[->>]<<<<[[<<]<[<]+<<[+>+<<-[>-->+<<-[> +<[>>+<<-]]]>[<+>-]<]++>>-->[>]>>[>>]]<<[>>+<[[<]<]>[[<<]<[<]+[-<+>>-[<<+>++>- [<->[<<+>>-]]]<[>+<-]>]>[>]>]>[>>]>>]<<[>>+>>+>>]<<[->>>>>>>>]<<[>.>>>>>>>]<<[ >->>>>>]<<[>,>>>]<<[>+>]<<[+<<]<]` func TestCompile(t *testing.T) { testImplementation(t, Compile) } func TestInterpret(t *testing.T) { testImplementation(t, Interpret) } func testImplementation(t *testing.T, prepare func([]byte, io.Reader, io.Writer) (func([]byte), error)) { cases := []struct { prog string mem []byte rd, wr []byte }{ {"++", []byte{2, 0}, nil, nil}, {"++---", []byte{0xff, 0}, nil, nil}, {"+>+>+", []byte{1, 1, 1, 0}, nil, nil}, {".", nil, nil, []byte{0}}, {"++++.>++.>.", []byte{4, 2, 0}, nil, []byte{4, 2, 0}}, {",", []byte{55}, []byte{55}, nil}, {",>,>,>,,", []byte{55, 44, 33, 11}, []byte{55, 44, 33, 22, 11}, nil}, {"+,", []byte{0}, []byte{}, nil}, {"++++[-]", []byte{0}, nil, nil}, {"++++[>+++<-]", []byte{0, 12}, nil, nil}, {"+++[>+++[>+++<-]<-]", []byte{0, 0, 27}, nil, nil}, {">+>+[<]", []byte{0, 1, 1}, nil, nil}, {helloWorld, nil, nil, []byte("Hello World!\n")}, {dbfi, nil, []byte(helloWorld + "!"), []byte("Hello World!\n")}, } var rd io.Reader var wr io.Writer for _, tc := range cases { rd = bytes.NewBuffer(tc.rd) wr = &bytes.Buffer{} f, e := prepare([]byte(tc.prog), rd, wr) if e != nil { t.Errorf("Compile(%v): %s", tc.prog, e.Error()) continue } runtime.GC() mem := make([]byte, 4096) f(mem) if tc.mem != nil && !bytes.Equal(mem[:len(tc.mem)], tc.mem) { t.Errorf("Compile(%s): %v != %v (expected)", tc.prog, mem, tc.mem) } if tc.wr != nil && !bytes.Equal(tc.wr, wr.(*bytes.Buffer).Bytes()) { t.Errorf("Compile(%s): output %v != %v (expected)", tc.prog, wr.(*bytes.Buffer).Bytes(), tc.wr) } } } func TestOptimize(t *testing.T) { cases := []struct { prog string ops []opcode }{ {"+", []opcode{{'+', 1}}}, {"+++++", []opcode{{'+', 5}}}, {"++XX+++--<>+", []opcode{{'+', 5}, {'-', 2}, {'<', 1}, {'>', 1}, {'+', 1}}}, } for _, tc := range cases { got, _ := optimize([]byte(tc.prog)) if !reflect.DeepEqual(got, tc.ops) { t.Errorf("Optimize(%s): got %v, expect %v", tc.prog, got, tc.ops) } } } func TestGC(t *testing.T) { var rw bytes.Buffer prog, e := Compile([]byte(helloWorld), &rw, &rw) if e != nil { t.Fatalf("Compile: %s", e.Error()) } var m runtime.MemStats for i := 0; i < 1000; i++ { runtime.GC() runtime.ReadMemStats(&m) mem := make([]byte, 2048) prog(mem) } } func BenchmarkCompileHello(b *testing.B) { var rw bytes.Buffer for i := 0; i < b.N; i++ { Compile([]byte(helloWorld), &rw, &rw) } } func benchmark(b *testing.B, prepare func([]byte, io.Reader, io.Writer) (func([]byte), error), code, in []byte) { var r bytes.Buffer var w bytes.Buffer prog, e := prepare(code, &r, &r) if e != nil { b.Fatalf("Compile: %s", e.Error()) } mem := make([]byte, 4096) b.ResetTimer() for i := 0; i < b.N; i++ { for j, _ := range mem { mem[j] = 0 } r.Reset() if in != nil { r.Write(in) } w.Reset() prog(mem) } } func use_goabi() { abi = amd64.GoABI } func reset_abi() { abi = amd64.CgoABI } func BenchmarkCompiledHello(b *testing.B) { use_goabi() defer reset_abi() benchmark(b, Compile, []byte(helloWorld), nil) } func BenchmarkCompiledHelloCgo(b *testing.B) { benchmark(b, Compile, []byte(helloWorld), nil) } func BenchmarkInterpretHello(b *testing.B) { benchmark(b, Interpret, []byte(helloWorld), nil) } func BenchmarkCompiledDbfiHello(b *testing.B) { use_goabi() defer reset_abi() benchmark(b, Compile, []byte(dbfi), []byte(helloWorld+"!")) } func BenchmarkCompiledDbfiHelloCgo(b *testing.B) { benchmark(b, Compile, []byte(dbfi), []byte(helloWorld+"!")) } func BenchmarkInterpretDbfiHello(b *testing.B) { benchmark(b, Interpret, []byte(dbfi), []byte(helloWorld+"!")) }
package v1alpha5 // HasInstanceType returns whether some node in the group fulfils the type check func HasInstanceType(nodeGroup *NodeGroup, hasType func(string) bool) bool { if hasType(nodeGroup.InstanceType) { return true } if nodeGroup.InstancesDistribution != nil { for _, instanceType := range nodeGroup.InstancesDistribution.InstanceTypes { if hasType(instanceType) { return true } } } return false } // HasNodegroup returns true if this clusterConfig contains a managed or un-managed nodegroup with the given name func (c *ClusterConfig) FindNodegroup(name string) *NodeGroup { for _, ng := range c.NodeGroups { if name == ng.NameString() { return ng } } return nil } // GetAllNodeGroupNames collects and returns names for both managed and unmanaged nodegroups func (c *ClusterConfig) GetAllNodeGroupNames() []string { var ngNames []string for _, ng := range c.NodeGroups { ngNames = append(ngNames, ng.NameString()) } for _, ng := range c.ManagedNodeGroups { ngNames = append(ngNames, ng.NameString()) } return ngNames }
package main import ( "fmt" "os" "github.com/jnewmano/advent2020/input" ) func main() { // input.SetRaw(raw) var rawImages = input.LoadSliceString("\n\n") var images = make([]*Image, len(rawImages)) for i, v := range rawImages { images[i] = parseImage(v) } // assume the first image is in the desired transformation and position // say rotate:0, flip: 0, (dy,dx) = (0,0) // once a transformation and position are set, they are fixed currentImage := 0 c := images[0] c.Aligned = true referenceImages := make(map[int]bool) for { // find and set orientation for all images that line up for where image at position i currently is for i, img := range images { if img.Aligned || i == currentImage { continue } current := images[currentImage] if alignImage(current, img) { img.Aligned = true } } referenceImages[currentImage] = true // find the next image to align off of isDone := false for _, v := range images { if v.Aligned == false { break } } if isDone { break } // look at more images for i := 0; i < len(images); i++ { next := (currentImage + i) % len(images) if next == currentImage { continue } if images[next].Aligned { currentImage = (currentImage + i) % len(images) break } } if len(referenceImages) >= len(images) { break } } for _, v := range images { if v.Aligned == false { fmt.Printf("ID: %d is not aligned\n", v.ID) os.Exit(1) } } // find min x, max x, min y, may y minX, maxX, minY, maxY := findCorners(images) { a := imageAtPosition(images, minX, minY) b := imageAtPosition(images, minX, maxY) c := imageAtPosition(images, maxX, minY) d := imageAtPosition(images, maxX, maxY) fmt.Println(a.ID * b.ID * c.ID * d.ID) } }