text
stringlengths
11
4.05M
package chess type SquareSet struct { mask uint64 } func NewSquareSet(mask uint64) *SquareSet { return &SquareSet{mask} } func (s *SquareSet) Iter() <-chan int { ch := make(chan int) go func() { square := bitScan(s.mask, 0) for square != -1 { ch <- square square = bitScan(s.mask, square+1) } }() return ch }
package gopbox import ( "net/url" "unicode" "github.com/polluxxx/goauth2" ) const ( AuthUrl = "https://www.dropbox.com/1/oauth2/authorize" TokenUrl = "https://api.dropbox.com/1/oauth2/token" MetaUrl = "https://api.dropbox.com/1/" ContentUrl = "https://api-content.dropbox.com/1/" ) type DropboxApi struct { Client *goauth2.Client Scope string } func NewDropboxApi(id, secret, redirectUrl string) (*DropboxApi, error) { a := goauth2.Api{ AuthUrl: AuthUrl, TokenUrl: TokenUrl, } c := goauth2.NewClient(id, secret, redirectUrl, &a) da := DropboxApi{ Client: c, } return &da, nil } func (dropbox *DropboxApi) GetAuthUrl(state string) (uri *url.URL, err error) { m := make(map[string]string) m["state"] = state return dropbox.Client.GetAuthUrl(m) } func (dropbox *DropboxApi) FinalizeAuth(code string) (account *Account, err error) { token, err := dropbox.Client.Exchange(code) if err != nil { return nil, err } // Hi, I'm Dropbox and I lowercase the token_type a := []rune(token.Type) a[0] = unicode.ToUpper(a[0]) token.Type = string(a) account, err = NewAccount(token) if err != nil { return nil, err } return account, nil }
package main import "fmt" func main() { x := []int{1, 2, 3, 4, 5} for i, v := range x[2:] { fmt.Println(i, v) } } func minArray(numbers []int) int { for i, v := range numbers[1:] { i = v * i con } return 0 }
package object import ( "ganymede/vector" ) // NewRectangleObject creates a new rectangle func NewRectangleObject(w float64, h float64, mass float64, position vector.Vector) Rectangle { return Rectangle{ vector.NewVector(w, h), NewGenericObject(mass, position, collisionBoundingBox), } } // Rectangle is an object with physical implementation for a 2D rectangle type Rectangle struct { dimensions vector.Vector GenericObject } // GetDimensions returns a dimensions tuple func (r Rectangle) GetDimensions() vector.Vector { return r.dimensions }
package tree import ( "fmt" "math/rand" "testing" "time" ) func TestCreateTree(t *testing.T) { r := rand.New(rand.NewSource(10)) r.Seed(time.Now().UnixNano()) a := r.Perm(30) fmt.Println("create a slice:", a) var tree *AvlNode for _, v := range a { tree = Insert(tree, ElementType(v)) } fmt.Println("tree created\nmidl read:") MidPrintTree(tree) fmt.Println("\nleve read:") LevePrintTree(tree) fmt.Println("\ndraw:") DrawTree1(tree) fmt.Printf("\nmin: %v, max:%v\n", tree.FinMin().Elem, tree.FinMax().Elem) }
// Copyright 2020 Insolar Network Ltd. // All rights reserved. // This material is licensed under the Insolar License version 1.0, // available at https://github.com/insolar/block-explorer/blob/master/LICENSE.md. package load import ( "github.com/skudasov/loadgen" ) func CheckFromName(name string) loadgen.RuntimeCheckFunc { return nil }
package main import ( "bufio" "bytes" "flag" "io/ioutil" "path/filepath" "testing" ) var update = flag.Bool("update", false, "update .golden files") func TestToJSON(t *testing.T) { testtable := []struct { tname string }{ { tname: "ok", }, } for _, tc := range testtable { t.Run(tc.tname, func(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) err := ToJSON(w) if err != nil { t.Fatalf("failed writing json: %s", err) } w.Flush() gp := filepath.Join("testdata", filepath.FromSlash(t.Name())+".golden") if *update { t.Log("update golden file") if err := ioutil.WriteFile(gp, b.Bytes(), 0644); err != nil { t.Fatalf("failed to update golden file: %s", err) } } g, err := ioutil.ReadFile(gp) if err != nil { t.Fatalf("failed reading .golden: %s", err) } t.Log(string(b.Bytes())) if !bytes.Equal(b.Bytes(), g) { t.Errorf("bytes do not match .golden file") } }) } }
// Package log contains simple leveled logging implementation on top of stdlib logger. // NOTE: without "only stdlib" constraint I would use github.com/uber-go/zap for logging. package log import ( "encoding/json" "errors" "fmt" "io" "log" "os" "strconv" "strings" ) // Logger interface is subset of github.com/uber-common/bark.Logger methods. type Logger interface { Debug(args ...interface{}) Debugf(format string, args ...interface{}) Info(args ...interface{}) Infof(format string, args ...interface{}) Warn(args ...interface{}) Warnf(format string, args ...interface{}) Error(args ...interface{}) Errorf(format string, args ...interface{}) Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) Panic(args ...interface{}) Panicf(format string, args ...interface{}) WithFields(keyValues LogFields) Logger Fields() Fields } type LogFields interface { Fields() map[string]interface{} } type Fields map[string]interface{} func (f Fields) Fields() map[string]interface{} { return f } type Level int const ( DebugLevel Level = iota InfoLevel WarnLevel ErrorLevel FatalLevel ) func (l Level) String() string { switch l { case DebugLevel: return "DEBUG" case InfoLevel: return "INFO" case WarnLevel: return "WARN" case ErrorLevel: return "ERROR" case FatalLevel: return "FATAL" } panic("unexpected level: " + strconv.Itoa(int(l))) } var stringToLevel = func() map[string]Level { var levels = []Level{DebugLevel, InfoLevel, WarnLevel, ErrorLevel, FatalLevel} res := make(map[string]Level, len(levels)) for _, l := range levels { res[l.String()] = l } return res }() func LevelFromString(s string) (l Level, err error) { var ok bool l, ok = stringToLevel[strings.ToUpper(s)] if !ok { err = errors.New("invalid level " + s) } return } const stdLoggerFlags = log.LstdFlags | log.Lmicroseconds | log.Lshortfile func NewLogger(l Level, w io.Writer) Logger { return NewLoggerSink(l, log.New(w, "", stdLoggerFlags)) } func NewLoggerSink(l Level, s Sink) Logger { return &logger{ sink: s, level: l, } } // logger is primitive stdlib log.Logger wrapper for more common interface. type logger struct { sink Sink level Level depth int fields Fields } func (l *logger) Fields() Fields { return l.fields } func (l *logger) WithFields(keyValues LogFields) Logger { copy := *l extraFields := keyValues.Fields() if copy.fields == nil { copy.fields = extraFields } else { copy.fields = make(Fields, len(l.fields)+len(extraFields)) for k, v := range l.fields { copy.fields[k] = v } for k, v := range extraFields { copy.fields[k] = v } } return &copy } func (l *logger) Debug(args ...interface{}) { l.log(DebugLevel, args...) } func (l *logger) Debugf(format string, args ...interface{}) { l.logf(DebugLevel, format, args...) } func (l *logger) Info(args ...interface{}) { l.log(InfoLevel, args...) } func (l *logger) Infof(format string, args ...interface{}) { l.logf(InfoLevel, format, args...) } func (l *logger) Warn(args ...interface{}) { l.log(WarnLevel, args...) } func (l *logger) Warnf(format string, args ...interface{}) { l.logf(WarnLevel, format, args...) } func (l *logger) Error(args ...interface{}) { l.log(ErrorLevel, args...) } func (l *logger) Errorf(format string, args ...interface{}) { l.logf(ErrorLevel, format, args...) } func (l *logger) Panic(args ...interface{}) { msg := fmt.Sprint(args...) l.log(ErrorLevel, msg) panic(msg) } func (l *logger) Panicf(format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) l.log(ErrorLevel, msg) panic(msg) } func (l *logger) Fatal(args ...interface{}) { l.log(FatalLevel, args...) os.Exit(1) } func (l *logger) Fatalf(format string, args ...interface{}) { l.logf(FatalLevel, format, args...) os.Exit(1) } type Sink interface { Output(callDepth int, s string) error } const initialLoggerCallDepth = 3 func (l *logger) log(lvl Level, args ...interface{}) { if lvl >= l.level { s := render(lvl, l.fields, fmt.Sprint(args...)) l.sink.Output(l.depth+initialLoggerCallDepth, s) } } func (l *logger) logf(lvl Level, format string, args ...interface{}) { if lvl >= l.level { s := render(lvl, l.fields, fmt.Sprintf(format, args...)) l.sink.Output(l.depth+initialLoggerCallDepth, s) } } func render(l Level, f Fields, msg string) string { if len(f) == 0 { return l.String() + ": " + msg } fBytes, err := json.Marshal(f) if err != nil { panic(err) } return fmt.Sprintf("%s: %s %s", l.String(), fBytes, msg) }
package controller import ( "api/models" "api/utils/inject" ) var BaseModel = &models.BaseModels{} type Controllers struct { Hello *HelloController `auto:"helloController"` Test *TestController `auto:"testController"` } func (ctx *Controllers) New() { BaseModel.New() inject.Register("baseController", ctx) inject.Inject() }
package main import ( "fmt" "./lexer" "./parser" ) func main() { //const ( // RLPL = iota // lex // RPPL // parse // REPL // evalutate //) //replType := REPL //var input io.Reader // //if len(os.Args) >= 2 { // input, _ = os.Open(os.Args[1]) //} else { // input = os.Stdin //} // //file, _ := ioutil.ReadFile(os.Args[1]) // //repl.Rlpl(os.Stdin) lex := lexer.New(` `) // //for token := lex.NextToken(); token.Type != tokens.EOF; token = lex.NextToken() { // fmt.Println(token) //} // pars := parser.New(lex) program := pars.ParseProgram() //_ = program // if pars.HasErrors() { pars.PrintErrors() } else { fmt.Println(program.String(0)) } //if replType == RLPL { // repl.Rlpl() //} else if replType == RPPL { // fmt.Println("TODO") //} else { // repl.Repl() //} //f, _ := filepath.Glob("/home/jesper/Pictures/.downloads/*/*") //fmt.Println(strings.Join(f, "\n")) }
package repository import ( "encoding/csv" "errors" "fmt" "io" "log" "time" "github.com/jesus-mata/academy-go-q12021/application/repository" "github.com/jesus-mata/academy-go-q12021/domain" "github.com/jesus-mata/academy-go-q12021/infrastructure" "github.com/jesus-mata/academy-go-q12021/infrastructure/newsapi" "github.com/jesus-mata/academy-go-q12021/utils" ) type newsRepository struct { csvSource infrastructure.CsvSource newsApi newsapi.NewsApiClient logger *log.Logger } func NewNewsArticleRepository(csv infrastructure.CsvSource, newsApi newsapi.NewsApiClient, logger *log.Logger) repository.NewsArticleRepository { return &newsRepository{csv, newsApi, logger} } func (r *newsRepository) FindByID(id string) (*domain.NewsArticle, error) { r.logger.Println("Finding News Article by ID", id) records, err := r.csvSource.GetAllLines() if err != nil { return nil, err } for _, record := range records { newsArticle, err := mapFromCSVRecord(record) if err != nil { return nil, err } if newsArticle.Id == id { return newsArticle, nil } } return nil, errors.New(fmt.Sprintf("News Article with ID %v does not exist.", id)) } func (r *newsRepository) FindAll() ([]*domain.NewsArticle, error) { r.logger.Println("Retriving all News Articles") records, err := r.csvSource.GetAllLines() if err != nil { return nil, err } newsArticles := make([]*domain.NewsArticle, 0, 5) for _, record := range records { newsArticle, err := mapFromCSVRecord(record) if err != nil { return nil, err } newsArticles = append(newsArticles, newsArticle) } return newsArticles, nil } func (r *newsRepository) GetIterator() (domain.NewsIterator, error) { r.logger.Println("Retriving all News Articles") reader, err := r.csvSource.NewReader() return NewNewsIteratorImpl(reader), err } func (r *newsRepository) FetchCurrent() error { r.logger.Println("Fetching all News Articles from API") newsItems, err := r.newsApi.GetCurrentNews() if err != nil { return err } r.logger.Printf("News Found %v \n", len(newsItems)) err = r.csvSource.WriteLines(newsItems) if err != nil { return err } return nil } func mapFromCSVRecord(record []string) (*domain.NewsArticle, error) { if len(record) < 4 { return nil, errors.New("CSV file has not valid News data") } id := record[0] title := record[1] description := record[2] url := record[3] author := record[4] image := record[5] language := record[6] category := record[7] publishedDate, err := time.Parse(utils.LayoutDateTimeIDOWithTZ, record[8]) if err != nil { return nil, fmt.Errorf("CSV has invalid data. The Published Date '%v' is not a valid date.", record[3]) } return domain.CreateNewsArticle(id, title, description, url, author, image, language, category, publishedDate) } type NewsIteratorImpl struct { data *domain.NewsArticle reader *csv.Reader } func NewNewsIteratorImpl(reader *csv.Reader) domain.NewsIterator { return &NewsIteratorImpl{reader: reader} } func (i *NewsIteratorImpl) HasNext() (bool, error) { record, err := i.reader.Read() if err == io.EOF { return false, nil } if err, ok := err.(*csv.ParseError); ok { return false, errors.New(fmt.Sprintf("Cannot parse CSV: %s", err.Error())) } if err != nil { return false, err } data, err := mapFromCSVRecord(record) if err != nil { return false, err } i.data = data return true, nil } func (i *NewsIteratorImpl) GetNext() *domain.NewsArticle { return i.data }
// Copyright (C) 2019 Storj Labs, Inc. // See LICENSE for copying information. package fpath import ( "os" "path/filepath" "github.com/zeebo/errs" ) // AtomicWriteFile is a helper to atomically write the data to the outfile. func AtomicWriteFile(outfile string, data []byte, _ os.FileMode) (err error) { // TODO: provide better atomicity guarantees, like fsyncing the parent // directory and, on windows, using MoveFileEx with MOVEFILE_WRITE_THROUGH. fh, err := os.CreateTemp(filepath.Dir(outfile), filepath.Base(outfile)) if err != nil { return errs.Wrap(err) } needsClose, needsRemove := true, true defer func() { if needsClose { err = errs.Combine(err, errs.Wrap(fh.Close())) } if needsRemove { err = errs.Combine(err, errs.Wrap(os.Remove(fh.Name()))) } }() if _, err := fh.Write(data); err != nil { return errs.Wrap(err) } needsClose = false if err := fh.Close(); err != nil { return errs.Wrap(err) } if err := os.Rename(fh.Name(), outfile); err != nil { return errs.Wrap(err) } needsRemove = false return nil }
package logger_test import ( "context" "testing" "github.com/jonbodner/proteus/logger" ) func TestLogging(t *testing.T) { logger.Log(logger.WithLevel(context.Background(), logger.DEBUG), logger.DEBUG, "this is a message", []logger.Pair{ {"Foo", "Bar"}, {"int", 1}, {"bool", true}, {"float", 3.14}, {"struct", struct { A int B string }{1, "he\"ll:{},o"}}, }...) }
package main import ( "fmt" "os" "github.com/smartcontractkit/substrate-adapter/adapter" ) func main() { fmt.Println("Starting Substrate adapter") privkey := os.Getenv("SA_PRIVATE_KEY") txType := os.Getenv("SA_TX_TYPE") endpoint := os.Getenv("SA_ENDPOINT") port := os.Getenv("SA_PORT") adapterClient, err := adapter.NewSubstrateAdapter(privkey, txType, endpoint) if err != nil { fmt.Println("Failed starting Substrate adapter:", err) return } adapter.RunWebserver(adapterClient.Handle, port) }
package leetcode func lengthOfLongestSubstring(s string) int { length := len(s) var norepeat int for i := range s { smap := make(map[byte]bool) var tmp int for j := i; j < length; j++ { _, ok := smap[s[j]] if ok { tmp = j - i break } else { smap[s[j]] = true } tmp = j + 1 - i } if norepeat < tmp { norepeat = tmp } } return norepeat } // sliding window algorithm func lengthOfLongestSubstring(s string) int { length := len(s) var norepeat int smap := make(map[byte]int) right, norepeat := -1, 0 for i := 0; i < length; i++ { var cur int if i != 0 { delete(smap, s[i-1]) } for right+1 < length && smap[s[right+1]] == 0 { smap[s[right+1]]++ right++ } cur = right - i + 1 if cur > norepeat { norepeat = cur } } return norepeat }
// Copyright 2015 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. // Note that there's also a lease_test.go, in package sql_test. package lease import ( "context" "fmt" "sync" "sync/atomic" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" ) func TestTableSet(t *testing.T) { defer leaktest.AfterTest(t)() type data struct { version descpb.DescriptorVersion expiration int64 } type insert data type remove data type newest struct { version descpb.DescriptorVersion } testData := []struct { op interface{} expected string }{ {newest{0}, "<nil>"}, {insert{2, 3}, "2:3"}, {newest{0}, "2:3"}, {newest{2}, "2:3"}, {newest{3}, "<nil>"}, {remove{2, 3}, ""}, {insert{2, 4}, "2:4"}, {newest{0}, "2:4"}, {newest{2}, "2:4"}, {newest{3}, "<nil>"}, {insert{3, 1}, "2:4 3:1"}, {newest{0}, "3:1"}, {newest{1}, "<nil>"}, {newest{2}, "2:4"}, {newest{3}, "3:1"}, {newest{4}, "<nil>"}, {insert{1, 1}, "1:1 2:4 3:1"}, {newest{0}, "3:1"}, {newest{1}, "1:1"}, {newest{2}, "2:4"}, {newest{3}, "3:1"}, {newest{4}, "<nil>"}, {remove{3, 1}, "1:1 2:4"}, {remove{1, 1}, "2:4"}, {remove{2, 4}, ""}, } set := &descriptorSet{} for i, d := range testData { switch op := d.op.(type) { case insert: s := &descriptorVersionState{ Descriptor: tabledesc.NewBuilder(&descpb.TableDescriptor{Version: op.version}).BuildImmutable(), } s.expiration = hlc.Timestamp{WallTime: op.expiration} set.insert(s) case remove: s := &descriptorVersionState{ Descriptor: tabledesc.NewBuilder(&descpb.TableDescriptor{Version: op.version}).BuildImmutable(), } s.expiration = hlc.Timestamp{WallTime: op.expiration} set.remove(s) case newest: n := set.findNewest() if op.version != 0 { n = set.findVersion(op.version) } s := "<nil>" if n != nil { s = fmt.Sprintf("%d:%d", n.GetVersion(), n.expiration.WallTime) } if d.expected != s { t.Fatalf("%d: expected %s, but found %s", i, d.expected, s) } continue } if s := set.String(); d.expected != s { t.Fatalf("%d: expected %s, but found %s", i, d.expected, s) } } } func getNumVersions(ds *descriptorState) int { ds.mu.Lock() defer ds.mu.Unlock() return len(ds.mu.active.data) } func TestPurgeOldVersions(t *testing.T) { defer leaktest.AfterTest(t)() // We're going to block gossip so it doesn't come randomly and clear up the // leases we're artificially setting up. gossipSem := make(chan struct{}, 1) serverParams := base.TestServerArgs{ Knobs: base.TestingKnobs{ SQLLeaseManager: &ManagerTestingKnobs{ TestingDescriptorUpdateEvent: func(_ *descpb.Descriptor) error { gossipSem <- struct{}{} <-gossipSem return nil }, }, }, } s, db, kvDB := serverutils.StartServer(t, serverParams) defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*Manager) // Block gossip. gossipSem <- struct{}{} defer func() { // Unblock gossip. <-gossipSem }() if _, err := db.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var tables []catalog.TableDescriptor var expiration hlc.Timestamp getLeases := func() { for i := 0; i < 3; i++ { if err := leaseManager.AcquireFreshestFromStore(context.Background(), tableDesc.GetID()); err != nil { t.Fatal(err) } table, exp, err := leaseManager.Acquire(context.Background(), s.Clock().Now(), tableDesc.GetID()) if err != nil { t.Fatal(err) } tables = append(tables, table.(catalog.TableDescriptor)) expiration = exp if err := leaseManager.Release(table); err != nil { t.Fatal(err) } } } getLeases() ts := leaseManager.findDescriptorState(tableDesc.GetID(), false) if numLeases := getNumVersions(ts); numLeases != 1 { t.Fatalf("found %d versions instead of 1", numLeases) } // Publish a new version for the table if _, err := leaseManager.Publish(context.Background(), tableDesc.GetID(), func(catalog.MutableDescriptor) error { return nil }, nil); err != nil { t.Fatal(err) } getLeases() ts = leaseManager.findDescriptorState(tableDesc.GetID(), false) if numLeases := getNumVersions(ts); numLeases != 2 { t.Fatalf("found %d versions instead of 2", numLeases) } if err := purgeOldVersions( context.Background(), kvDB, tableDesc.GetID(), false, 2 /* minVersion */, leaseManager); err != nil { t.Fatal(err) } if numLeases := getNumVersions(ts); numLeases != 1 { t.Fatalf("found %d versions instead of 1", numLeases) } ts.mu.Lock() correctLease := ts.mu.active.data[0].GetID() == tables[5].GetID() && ts.mu.active.data[0].GetVersion() == tables[5].GetVersion() correctExpiration := ts.mu.active.data[0].expiration == expiration ts.mu.Unlock() if !correctLease { t.Fatalf("wrong lease survived purge") } if !correctExpiration { t.Fatalf("wrong lease expiration survived purge") } // Test that purgeOldVersions correctly removes a table version // without a lease. ts.mu.Lock() tableVersion := &descriptorVersionState{ Descriptor: tables[0], expiration: tables[5].GetModificationTime(), } ts.mu.active.insert(tableVersion) ts.mu.Unlock() if numLeases := getNumVersions(ts); numLeases != 2 { t.Fatalf("found %d versions instead of 2", numLeases) } if err := purgeOldVersions( context.Background(), kvDB, tableDesc.GetID(), false, 2 /* minVersion */, leaseManager); err != nil { t.Fatal(err) } if numLeases := getNumVersions(ts); numLeases != 1 { t.Fatalf("found %d versions instead of 1", numLeases) } } // TestPurgeOldVersionsRetainsDescriptorWithFutureModificationTime tests the // behavior of purgeOldVersions when the descriptorSet contains a descriptor // version with a modification time in advance of the current HLC clock, as can // be the case if the descriptor was updated in a transaction that wrote to a // global_reads range. In such cases, the descriptor with the newest // modification time should still be retained. func TestPurgeOldVersionsRetainsDescriptorWithFutureModificationTime(t *testing.T) { defer leaktest.AfterTest(t)() // We're going to block gossip so it doesn't come randomly and clear up the // leases we're artificially setting up. gossipSem := make(chan struct{}, 1) serverParams := base.TestServerArgs{ Knobs: base.TestingKnobs{ SQLLeaseManager: &ManagerTestingKnobs{ TestingDescriptorUpdateEvent: func(_ *descpb.Descriptor) error { gossipSem <- struct{}{} <-gossipSem return nil }, }, }, } ctx := context.Background() s, db, kvDB := serverutils.StartServer(t, serverParams) defer s.Stopper().Stop(ctx) leaseManager := s.LeaseManager().(*Manager) // Block gossip. gossipSem <- struct{}{} defer func() { // Unblock gossip. <-gossipSem }() if _, err := db.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") futureTime := s.Clock().Now().Add(500*time.Millisecond.Nanoseconds(), 0).WithSynthetic(true) getLatestDesc := func() catalog.TableDescriptor { if err := leaseManager.AcquireFreshestFromStore(ctx, tableDesc.GetID()); err != nil { t.Fatal(err) } table, _, err := leaseManager.Acquire(ctx, futureTime, tableDesc.GetID()) if err != nil { t.Fatal(err) } latestDesc := table.(catalog.TableDescriptor) if err := leaseManager.Release(table); err != nil { t.Fatal(err) } return latestDesc } origDesc := getLatestDesc() ts := leaseManager.findDescriptorState(tableDesc.GetID(), false) if numLeases := getNumVersions(ts); numLeases != 1 { t.Fatalf("found %d versions instead of 1", numLeases) } // Publish a new version for the table with a modification time slightly in // the future of present time. We dictate this modification time by creating // a read-write conflict that forces the publishing transaction to bump its // commit timestamp. update := func(catalog.MutableDescriptor) error { return nil } logEvent := func(txn *kv.Txn) error { txn2 := kvDB.NewTxn(ctx, "future-read") txn2.SetFixedTimestamp(ctx, futureTime.Prev()) if _, err := txn2.Get(ctx, "key"); err != nil { return errors.Wrap(err, "read from other txn in future") } return txn.Put(ctx, "key", "value") } if _, err := leaseManager.Publish(ctx, tableDesc.GetID(), update, logEvent); err != nil { t.Fatal(err) } // The leaseManager should be able to acquire the new version. latestDesc := getLatestDesc() if latestDesc.GetVersion() <= origDesc.GetVersion() { t.Fatalf("expected new version, found %v after %v", latestDesc, origDesc) } ts = leaseManager.findDescriptorState(tableDesc.GetID(), false) if numLeases := getNumVersions(ts); numLeases != 2 { t.Fatalf("found %d versions instead of 2", numLeases) } // Purge old versions and make sure that the newest lease survives the // purge. if err := purgeOldVersions(ctx, kvDB, tableDesc.GetID(), false, 2 /* minVersion */, leaseManager); err != nil { t.Fatal(err) } if numLeases := getNumVersions(ts); numLeases != 1 { t.Fatalf("found %d versions instead of 1", numLeases) } ts.mu.Lock() correctLease := ts.mu.active.data[0].GetID() == latestDesc.GetID() && ts.mu.active.data[0].GetVersion() == latestDesc.GetVersion() ts.mu.Unlock() if !correctLease { t.Fatalf("wrong lease survived purge") } } // Test that a database with conflicting table names under different schemas // do not cause issues. func TestNameCacheDBConflictingTableNames(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*Manager) if _, err := db.Exec(`SET experimental_enable_temp_tables = true`); err != nil { t.Fatal(err) } if _, err := db.Exec(` CREATE TABLE t (public int); CREATE TEMP TABLE t (temp int); CREATE TABLE t2 (public int); CREATE TEMP TABLE t2 (temp int); `); err != nil { t.Fatal(err) } // Select in different orders, and make sure the right one is returned. if _, err := db.Exec("SELECT * FROM pg_temp.t;"); err != nil { t.Fatal(err) } if _, err := db.Exec("SELECT * FROM public.t;"); err != nil { t.Fatal(err) } if _, err := db.Exec("SELECT * FROM public.t2;"); err != nil { t.Fatal(err) } if _, err := db.Exec("SELECT * FROM pg_temp.t2;"); err != nil { t.Fatal(err) } for _, tableName := range []string{"t", "t2"} { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "defaultdb", tableName) lease := leaseManager.names.get( tableDesc.GetParentID(), descpb.ID(keys.PublicSchemaID), tableName, s.Clock().Now(), ) if lease.GetID() != tableDesc.GetID() { t.Fatalf("lease has wrong ID: %d (expected: %d)", lease.GetID(), tableDesc.GetID()) } } } // Test that changing a descriptor's name updates the name cache. func TestNameCacheIsUpdated(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*Manager) if _, err := db.Exec(` CREATE DATABASE t; CREATE DATABASE t1; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } // Populate the name cache. if _, err := db.Exec("SELECT * FROM t.test;"); err != nil { t.Fatal(err) } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Rename. if _, err := db.Exec("ALTER TABLE t.test RENAME TO t.test2;"); err != nil { t.Fatal(err) } // Check that the cache has been updated. if leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test", s.Clock().Now()) != nil { t.Fatalf("old name still in cache") } lease := leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test2", s.Clock().Now()) if lease == nil { t.Fatalf("new name not found in cache") } if lease.GetID() != tableDesc.GetID() { t.Fatalf("new name has wrong ID: %d (expected: %d)", lease.GetID(), tableDesc.GetID()) } if err := leaseManager.Release(lease.Descriptor); err != nil { t.Fatal(err) } // Rename to a different database. if _, err := db.Exec("ALTER TABLE t.test2 RENAME TO t1.test2;"); err != nil { t.Fatal(err) } // Re-read the descriptor, to get the new ParentID. newTableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t1", "test2") if tableDesc.GetParentID() == newTableDesc.GetParentID() { t.Fatalf("database didn't change") } // Check that the cache has been updated. if leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test2", s.Clock().Now()) != nil { t.Fatalf("old name still in cache") } lease = leaseManager.names.get(newTableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test2", s.Clock().Now()) if lease == nil { t.Fatalf("new name not found in cache") } if lease.GetID() != tableDesc.GetID() { t.Fatalf("new name has wrong ID: %d (expected: %d)", lease.GetID(), tableDesc.GetID()) } if err := leaseManager.Release(lease.Descriptor); err != nil { t.Fatal(err) } } // Tests that a name cache entry with by an expired lease is not returned. func TestNameCacheEntryDoesntReturnExpiredLease(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*Manager) const tableName = "test" if _, err := db.Exec(fmt.Sprintf(` CREATE DATABASE t; CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); `, tableName)); err != nil { t.Fatal(err) } // Populate the name cache. if _, err := db.Exec("SELECT * FROM t.test;"); err != nil { t.Fatal(err) } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", tableName) // Check the assumptions this tests makes: that there is a cache entry // (with a valid lease). if lease := leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()); lease == nil { t.Fatalf("name cache has no unexpired entry for (%d, %s)", tableDesc.GetParentID(), tableName) } else { if err := leaseManager.Release(lease.Descriptor); err != nil { t.Fatal(err) } } leaseManager.ExpireLeases(s.Clock()) // Check the name no longer resolves. if lease := leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()); lease != nil { t.Fatalf("name cache has unexpired entry for (%d, %s): %s", tableDesc.GetParentID(), tableName, lease) } } // Tests that a name cache entry always exists for the latest lease and // the lease expiration time is monotonically increasing. func TestNameCacheContainsLatestLease(t *testing.T) { defer leaktest.AfterTest(t)() removalTracker := NewLeaseRemovalTracker() testingKnobs := base.TestingKnobs{ SQLLeaseManager: &ManagerTestingKnobs{ LeaseStoreTestingKnobs: StorageTestingKnobs{ LeaseReleasedEvent: removalTracker.LeaseRemovedNotification, }, }, } s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{Knobs: testingKnobs}) defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*Manager) const tableName = "test" if _, err := db.Exec(fmt.Sprintf(` CREATE DATABASE t; CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); `, tableName)); err != nil { t.Fatal(err) } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", tableName) // Populate the name cache. if _, err := db.Exec("SELECT * FROM t.test;"); err != nil { t.Fatal(err) } // There is a cache entry. lease := leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()) if lease == nil { t.Fatalf("name cache has no unexpired entry for (%d, %s)", tableDesc.GetParentID(), tableName) } tracker := removalTracker.TrackRemoval(lease.Descriptor) // Acquire another lease. if _, err := acquireNodeLease(context.Background(), leaseManager, tableDesc.GetID()); err != nil { t.Fatal(err) } // Check the name resolves to the new lease. newLease := leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()) if newLease == nil { t.Fatalf("name cache doesn't contain entry for (%d, %s)", tableDesc.GetParentID(), tableName) } if newLease == lease { t.Fatalf("same lease %s", newLease.expiration.GoTime()) } if err := leaseManager.Release(lease.Descriptor); err != nil { t.Fatal(err) } // The first lease acquisition was released. if err := tracker.WaitForRemoval(); err != nil { t.Fatal(err) } if err := leaseManager.Release(lease.Descriptor); err != nil { t.Fatal(err) } } // Test that table names are treated as case sensitive by the name cache. func TestTableNameCaseSensitive(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*Manager) if _, err := db.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } // Populate the name cache. if _, err := db.Exec("SELECT * FROM t.test;"); err != nil { t.Fatal(err) } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Check that we cannot get the table by a different name. if leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "tEsT", s.Clock().Now()) != nil { t.Fatalf("lease manager incorrectly found table with different case") } } // Test that there's no deadlock between AcquireByName and Release. // We used to have one due to lock inversion between the nameCache lock and // the descriptorVersionState lock, triggered when the same lease was Release()d after the // table had been dropped (which means it's removed from the nameCache) and // AcquireByName()d at the same time. func TestReleaseAcquireByNameDeadlock(t *testing.T) { defer leaktest.AfterTest(t)() removalTracker := NewLeaseRemovalTracker() testingKnobs := base.TestingKnobs{ SQLLeaseManager: &ManagerTestingKnobs{ LeaseStoreTestingKnobs: StorageTestingKnobs{ LeaseReleasedEvent: removalTracker.LeaseRemovedNotification, RemoveOnceDereferenced: true, }, }, } s, sqlDB, kvDB := serverutils.StartServer( t, base.TestServerArgs{Knobs: testingKnobs}) defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*Manager) if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Populate the name cache. ctx := context.Background() table, _, err := leaseManager.AcquireByName( ctx, leaseManager.storage.clock.Now(), tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test", ) if err != nil { t.Fatal(err) } if err := leaseManager.Release(table); err != nil { t.Fatal(err) } // Try to trigger the race repeatedly: race an AcquireByName against a // Release. // tableChan acts as a barrier, synchronizing the two routines at every // iteration. tableChan := make(chan catalog.TableDescriptor) errChan := make(chan error) go func() { for table := range tableChan { // Move errors to the main goroutine. errChan <- leaseManager.Release(table) } }() for i := 0; i < 50; i++ { timestamp := leaseManager.storage.clock.Now() ctx := context.Background() desc, _, err := leaseManager.AcquireByName( ctx, timestamp, tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test", ) if err != nil { t.Fatal(err) } table := desc.(catalog.TableDescriptor) // This test will need to wait until leases are removed from the store // before creating new leases because the jitter used in the leases' // expiration causes duplicate key errors when trying to create new // leases. This is not a problem in production, since leases are not // removed from the store until they expire, and the jitter is small // compared to their lifetime, but it is a problem in this test because // we churn through leases quickly. tracker := removalTracker.TrackRemoval(table) // Start the race: signal the other guy to release, and we do another // acquire at the same time. tableChan <- table tableByName, _, err := leaseManager.AcquireByName( ctx, timestamp, tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test", ) if err != nil { t.Fatal(err) } // See if there was an error releasing lease. err = <-errChan if err != nil { t.Fatal(err) } // Release the lease for the last time. if err := leaseManager.Release(tableByName); err != nil { t.Fatal(err) } // There are 2 possible results of the race above: Either we acquired before // releasing (causing us to acquire the same lease, incrementing and then // decrementing the refCount), or we released before acquiring (causing the // lease to be removed before another new lease is acquired). In the latter // case, there are actually two different lease removals, but we still only // track one. // // An earlier version of this test tracked both lease removals, but it used // reference equality to determine whether we were reacquiring the same // lease, which is no longer feasible after the 20.2 descriptor interface // changes. This is mostly fine because async lease removal doesn't require // the descriptorState lock anyway, so it's not that relevant to this test. if err := tracker.WaitForRemoval(); err != nil { t.Fatal(err) } } close(tableChan) } // TestAcquireFreshestFromStoreRaces runs // Manager.acquireFreshestFromStore() in parallel to test for races. func TestAcquireFreshestFromStoreRaces(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*Manager) if _, err := db.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var wg sync.WaitGroup numRoutines := 10 wg.Add(numRoutines) for i := 0; i < numRoutines; i++ { go func() { defer wg.Done() if err := leaseManager.AcquireFreshestFromStore(context.Background(), tableDesc.GetID()); err != nil { t.Error(err) } table, _, err := leaseManager.Acquire(context.Background(), s.Clock().Now(), tableDesc.GetID()) if err != nil { t.Error(err) } if err := leaseManager.Release(table); err != nil { t.Error(err) } }() } wg.Wait() } // This test checks that multiple threads can simultaneously acquire the // latest table version with a lease. When multiple threads // wait on a particular thread acquiring a lease for the latest table version, // they are able to check after waiting that the lease they were waiting on // is still valid. They are able to reacquire a lease if needed. func TestParallelLeaseAcquireWithImmediateRelease(t *testing.T) { defer leaktest.AfterTest(t)() testingKnobs := base.TestingKnobs{ SQLLeaseManager: &ManagerTestingKnobs{ LeaseStoreTestingKnobs: StorageTestingKnobs{ // Immediate remove descriptorVersionState and release its // lease when it is dereferenced. This forces threads // waiting on a lease to reacquire the lease. RemoveOnceDereferenced: true, }, }, } s, sqlDB, kvDB := serverutils.StartServer( t, base.TestServerArgs{Knobs: testingKnobs}) defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*Manager) if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var wg sync.WaitGroup numRoutines := 10 now := s.Clock().Now() wg.Add(numRoutines) for i := 0; i < numRoutines; i++ { go func() { defer wg.Done() table, _, err := leaseManager.Acquire(context.Background(), now, tableDesc.GetID()) if err != nil { t.Error(err) } if err := leaseManager.Release(table); err != nil { t.Error(err) } }() } wg.Wait() } // Test one possible outcome of a race between a lease acquisition (the first // case through descriptorState.acquire(), the second through // descriptorState.acquireFreshestFromStore()) and a release of the lease that was // just acquired. Precisely: // 1. Thread 1 calls either acquireFreshestFromStore() or acquire(). // 2. Thread 1 releases the lock on descriptorState and starts acquisition of a lease // from the store, blocking until it's finished. // 3. Thread 2 calls acquire(). The lease has not been acquired yet, so it // also enters the acquisition code path (calling DoChan). // 4. Thread 2 proceeds to release the lock on descriptorState waiting for the // in-flight acquisition. // 4. The lease is acquired from the store and the waiting routines are // unblocked. // 5. Thread 2 unblocks first, and releases the new lease, for whatever reason. // 5. Thread 1 wakes up. At this point, a naive implementation would use the // newly acquired lease, which would be incorrect. The test checks that // acquireFreshestFromStore() or acquire() notices, after re-acquiring the // descriptorState lock, that the new lease has been released and acquires a new // one. func TestLeaseAcquireAndReleaseConcurrently(t *testing.T) { defer leaktest.AfterTest(t)() skip.WithIssue(t, 51798, "fails in the presence of migrations requiring backfill, "+ "but cannot import sqlmigrations") // Result is a struct for moving results to the main result routine. type Result struct { table catalog.TableDescriptor exp hlc.Timestamp err error } descID := descpb.ID(keys.LeaseTableID) // acquireBlock calls Acquire. acquireBlock := func( ctx context.Context, m *Manager, acquireChan chan Result, ) { table, e, err := m.Acquire(ctx, m.storage.clock.Now(), descID) acquireChan <- Result{err: err, exp: e, table: table.(catalog.TableDescriptor)} } testCases := []struct { name string // Whether the second routine is a call to Manager.acquireFreshest or // not. This determines which channel we unblock. isSecondCallAcquireFreshest bool }{ // Checks what happens when the race between between acquire() and // lease release occurs. { name: "CallAcquireConcurrently", isSecondCallAcquireFreshest: false, }, // Checks what happens when the race between // acquireFreshestFromStore() and lease release occurs. { name: "CallAcquireFreshestAndAcquireConcurrently", isSecondCallAcquireFreshest: true, }, } for _, test := range testCases { ctx := logtags.AddTag(context.Background(), "test: Lease", nil) t.Run(test.name, func(t *testing.T) { // blockChan and freshestBlockChan is used to set up the race condition. blockChan := make(chan struct{}) freshestBlockChan := make(chan struct{}) // acquisitionBlock is used to prevent acquireNodeLease from // completing, to force a lease to delay its acquisition. acquisitionBlock := make(chan struct{}) // preblock is used for the main routine to wait for all acquisition // routines to catch up. var preblock sync.WaitGroup // acquireArrivals and acquireFreshestArrivals tracks how many times // we've arrived at the knob codepath for the corresponding functions. // This is needed because the fix to the race condition hits the knob more // than once in a single routine, so we need to ignore any extra passes. var acquireArrivals int32 var acquireFreshestArrivals int32 // leasesAcquiredCount counts how many leases were acquired in total. var leasesAcquiredCount int32 removalTracker := NewLeaseRemovalTracker() testingKnobs := base.TestingKnobs{ SQLLeaseManager: &ManagerTestingKnobs{ LeaseStoreTestingKnobs: StorageTestingKnobs{ RemoveOnceDereferenced: true, LeaseReleasedEvent: removalTracker.LeaseRemovedNotification, LeaseAcquireResultBlockEvent: func(leaseBlockType AcquireBlockType) { if leaseBlockType == AcquireBlock { if count := atomic.LoadInt32(&acquireArrivals); (count < 1 && test.isSecondCallAcquireFreshest) || (count < 2 && !test.isSecondCallAcquireFreshest) { atomic.AddInt32(&acquireArrivals, 1) preblock.Done() <-blockChan } } else if leaseBlockType == AcquireFreshestBlock { if atomic.LoadInt32(&acquireFreshestArrivals) < 1 { atomic.AddInt32(&acquireFreshestArrivals, 1) preblock.Done() <-freshestBlockChan } } }, LeaseAcquiredEvent: func(_ catalog.Descriptor, _ error) { atomic.AddInt32(&leasesAcquiredCount, 1) <-acquisitionBlock }, }, }, } serverArgs := base.TestServerArgs{Knobs: testingKnobs} serverArgs.LeaseManagerConfig = base.NewLeaseManagerConfig() // The LeaseJitterFraction is zero so leases will have // monotonically increasing expiration. This prevents two leases // from having the same expiration due to randomness, as the // leases are checked for having a different expiration. serverArgs.LeaseManagerConfig.DescriptorLeaseJitterFraction = 0.0 s, _, _ := serverutils.StartServer( t, serverArgs) defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*Manager) acquireResultChan := make(chan Result) // Start two routines to acquire and release. preblock.Add(2) go acquireBlock(ctx, leaseManager, acquireResultChan) if test.isSecondCallAcquireFreshest { go func(ctx context.Context, m *Manager, acquireChan chan Result) { if err := m.AcquireFreshestFromStore(ctx, descID); err != nil { acquireChan <- Result{err: err, exp: hlc.Timestamp{}, table: nil} return } table, e, err := m.Acquire(ctx, s.Clock().Now(), descID) acquireChan <- Result{err: err, exp: e, table: table.(catalog.TableDescriptor)} }(ctx, leaseManager, acquireResultChan) } else { go acquireBlock(ctx, leaseManager, acquireResultChan) } // Wait until both routines arrive. preblock.Wait() // Allow the acquisition to finish. By delaying it until now, we guarantee // both routines will receive the same lease. acquisitionBlock <- struct{}{} // Allow the first routine to finish acquisition. In the case where both // routines are calling Acquire(), first refers to whichever routine // continues, order does not matter. blockChan <- struct{}{} // Wait for the first routine's results. result1 := <-acquireResultChan if result1.err != nil { t.Fatal(result1.err) } // Release the lease. This also causes it to get removed as the // knob RemoveOnceDereferenced is set. tracker := removalTracker.TrackRemoval(result1.table) if err := leaseManager.Release(result1.table); err != nil { t.Fatal(err) } // Wait until the lease is removed. if err := tracker.WaitForRemoval(); err != nil { t.Fatal(err) } // Allow the second routine to proceed. if test.isSecondCallAcquireFreshest { freshestBlockChan <- struct{}{} } else { blockChan <- struct{}{} } // Allow all future acquisitions to complete. close(acquisitionBlock) // Get the acquisition results of the second routine. result2 := <-acquireResultChan if result2.err != nil { t.Fatal(result2.err) } if result1.table == result2.table && result1.exp == result2.exp { t.Fatalf("Expected the leases to be different. TableDescriptor pointers are equal and both the same expiration") } if count := atomic.LoadInt32(&leasesAcquiredCount); count != 2 { t.Fatalf("Expected to acquire 2 leases, instead got %d", count) } }) } }
package users import ( "RBStask/app/models/entity" "RBStask/app/models/mappers" "database/sql" "fmt" // "RBStask/app/controllers" // _ "github.com/lib/pq" ) type UserProvider struct { db *sql.DB users *mappers.UserMapper } func (p *UserProvider) Init() error { connStr := "host=localhost port=5432 user=postgres password=123 dbname=RBStask sslmode=disable" db, err := sql.Open("postgres", connStr) if err != nil { return fmt.Errorf("ошибка инициализации модели : %v", err) } p.db = db p.users = new(mappers.UserMapper) err = p.users.Init(p.db) if err != nil { return fmt.Errorf("ошибка инициализации : %v", err) } return nil } func (p *UserProvider) Login(receivedUser *entity.User) bool { defer p.db.Close() err := p.users.Login(receivedUser) if err == true { fmt.Println("Получили то что хотели true: %s", err) return true }else { fmt.Println("Получили то что хотели false: %s", err) return false } } // func (p *UserProvider) SetCook() error { // x := randToken() // new_cookie := &http.Cookie{Name: "fooddd", Value: "randToken()"} // p.SetCookie(new_cookie) // }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package log // NoLogV returns a verbosity level that will not result in VEvents and // VErrEvents being logged. func NoLogV() Level { return logging.vmoduleConfig.verbosity.get() + 1 }
// Copyright 2021 The CUE Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import "fmt" // goModResolver resolves a CUE version of "go.mod" and uses the Go module // context within which the CUE module is found to resolve a CUE version. cue // is then built within a .unity-bin directory at the Go module root type goModResolver struct { cp *commonPathResolver } func newGoModResolver(c resolverConfig) (resolver, error) { res := &goModResolver{ cp: c.commonPathResolver, } return res, nil } func (a *goModResolver) resolve(version, dir, workingDir, target string) (string, error) { if version != "go.mod" { return "", errNoMatch } commit, err := a.cp.resolve(dir, target) if err != nil { return "", err } return fmt.Sprintf("%s (%s)", version, commit), nil }
// Package aggregation contains the GalleryAggregator for Google+ event pictures. package aggregation import ( "encoding/base64" "encoding/json" "fmt" "net/http" "net/http/httptest" "strings" timepkg "time" "github.com/PuerkitoBio/goquery" "github.com/sourcegraph/webloop" "code.google.com/p/goauth2/oauth" "code.google.com/p/google-api-go-client/taskqueue/v1beta2" "socialvibes/model" "socialvibes/config" ) // The GalleryAggregator takes a Google+ event ID, // renders the event page and aggregates all event picture URLs. func GalleryAggregator(eventId string) { // Create a webloop renderer for the given event renderer := &webloop.StaticRenderer{ TargetBaseURL: "https://plus.google.com/events/gallery/" + eventId + "?sort=1", WaitTimeout: timepkg.Second * 5, ReturnUnfinishedPages: true, } // Create a http.ResponseWriter, so that GoQuery can work with it w := httptest.NewRecorder() // Create an empty http.Request for the given event r, err := http.NewRequest("GET", "", nil) if err != nil { fmt.Printf("Error creating request: %v\n", err) } renderer.ServeHTTP(w, r) // Transform the response into a parsable document var document *goquery.Document document, err = goquery.NewDocumentFromReader(w.Body) if err != nil { fmt.Printf("Error creating request: %v\n", err) } // Parse all event picture URLs from the response document var pictures []model.Picture document.Find(".Bea.VLb").Each(func(i int, s *goquery.Selection) { picUrl, _ := s.Attr("src") pictures = append(pictures, model.Picture{picUrl}) }) // Request current API Access Token authResp, err := http.Get("http://metadata/computeMetadata/v1beta1/instance/service-accounts/default/token") if err != nil { fmt.Printf("Error getting authorization information: %v\n", err) } defer authResp.Body.Close() authDecoder := json.NewDecoder(authResp.Body) authData := new(model.AuthorizationResponse) authDecoder.Decode(&authData) // Create a new authorized API client transport := &oauth.Transport{ Config: config.OAuthConfig, Token: &oauth.Token{ AccessToken: authData.Access_token, }, } taskapi, err := taskqueue.New(transport.Client()) if err != nil { fmt.Printf("Error generating task queue service: %v\n", err) } // Encode the event pictures object which will be passed to the task as payload pictures_json, err := json.Marshal(pictures) if err != nil { fmt.Printf("Error encoding event to json: %v\n", err) } pictures_base64 := base64.StdEncoding.EncodeToString(pictures_json) // Insert the task _, err = taskapi.Tasks.Insert("s~gcdc2013-socialvibes", "picture", &taskqueue.Task{ PayloadBase64: pictures_base64, QueueName: "picture", }).Do() if err != nil { fmt.Printf("Error inserting task: %v\n", err) } // Notify the task consumer in the App Engine via RPC timepkg.Sleep(3 * timepkg.Second) response := `{"method":"EventService.PullTask","params":[{"PullType":"picture", "EventId":"` + eventId + `"}], "id":"1"}` _, err = http.Post("http://gcdc2013-socialvibes.appspot.com/rpc", "application/json", strings.NewReader(response)) if err != nil { fmt.Printf("Error notifying the task consumer in the app engine via rpc: %v\n", err) } }
package main import ( "fmt" ) func main() { var a, k, p, ans int fmt.Scan(&a) k = 0 p = 0 f := []int{} if a == 1 { fmt.Println("1") } else { for i := 1; i <= a+1; i++ { k = k + i p = p + k if p == a { ans = i } if p > a { f = append(f, i) break } } for _, j := range f { ans = j - 1 } fmt.Println(ans) } }
package main import ( "encoding/hex" "fmt" "net/http" ) var stuff []byte var out []byte var out2 []byte func handler1(w http.ResponseWriter, r *http.Request) { n, err := w.Write(out) if n != len(out) { fmt.Println("Short write!") } if err != nil { fmt.Println(err) } } func handler2(w http.ResponseWriter, r *http.Request) { n, err := w.Write(out2) if n != len(out2) { fmt.Println("Short write!") } if err != nil { fmt.Println(err) } } func main() { stuff = make([]byte, 32*1024) out = make([]byte, len(stuff)*2) out2 = []byte("hello world") for i := 0; i < len(stuff); i++ { stuff[i] = byte(i % 256) } hex.Encode(out, stuff) http.HandleFunc("/test1", handler1) http.HandleFunc("/test2", handler2) http.ListenAndServe(":20002", nil) }
package main import ( "encoding/json" "flag" "github.com/gorilla/mux" "github.com/wilsonfv/todolist/app/controller" "github.com/wilsonfv/todolist/app/dao" "github.com/wilsonfv/todolist/app/model" "log" "net/http" ) var td = dao.TaskDao{} func ListTask(w http.ResponseWriter, r *http.Request) { log.Println("ListTask") tasks, err := controller.ListAll(&td) if err != nil { replyWithError(w, http.StatusInternalServerError, err.Error()) return } replyWithJson(w, http.StatusOK, tasks) } func AddTask(w http.ResponseWriter, r *http.Request) { log.Println("AddTask") defer r.Body.Close() var task model.Task if err := json.NewDecoder(r.Body).Decode(&task); err != nil { replyWithError(w, http.StatusInternalServerError, "Invalid request") return } if err := controller.AddTask(&td, task); err != nil { replyWithError(w, http.StatusInternalServerError, err.Error()) return } replyWithJson(w, http.StatusCreated, task) } func DeleteTask(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() var task model.Task if err := json.NewDecoder(r.Body).Decode(&task); err != nil { replyWithError(w, http.StatusInternalServerError, err.Error()) return } if err := controller.DeleteTask(&td, task); err != nil { replyWithError(w, http.StatusInternalServerError, err.Error()) return } replyWithJson(w, http.StatusOK, map[string]string{"result": "deleted"}) } func replyWithError(w http.ResponseWriter, code int, msg string) { replyWithJson(w, code, map[string]string{"error": msg}) } func replyWithJson(w http.ResponseWriter, code int, payload interface{}) { response, _ := json.Marshal(payload) w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) w.Write(response) } func init() { var mongodbUrl string flag.StringVar(&mongodbUrl, "mongodbUrl", "localhost:27017", "url to connect to mongodb") flag.Parse() td.Server = mongodbUrl td.Database = "task_db" td.Collection = "tasks" log.Print("connecting to mongodb at ", mongodbUrl) td.Connect() } func main() { router := mux.NewRouter() router.HandleFunc("/task", ListTask).Methods("GET") router.HandleFunc("/task", AddTask).Methods("POST") router.HandleFunc("/task", DeleteTask).Methods("DELETE") log.Println("App Server starting at localhost:8181/task") if err := http.ListenAndServe(":8181", router); err != nil { log.Fatal(err) } }
package main import ( "fmt" ) func test() { // var x float64 = 3.4 // var x = 3.4 // var y string = "abc" // var y = "abc" // var a uint8 = 10 // var b uint16 = 10 // var c int = 100 // var c = 100 // fmt.Println("x's type:", reflect.TypeOf(x)) // fmt.Println("y's type:", reflect.TypeOf(y)) // fmt.Println("a's type:", reflect.TypeOf(a)) // fmt.Println("b's type:", reflect.TypeOf(b)) // fmt.Println("c's type:", reflect.TypeOf(c)) // 1. var v_name v_type,指定类型,声明若不赋值,则使用默认值0 var a int fmt.Printf("a = %d\n", a) a = 10 fmt.Printf("a = %d\n", a) // 2. var v_name [v_type]= value,声明直接赋值,类型可以省略,这样编译器自行判断 var b int = 101 var c = 3.14 fmt.Printf("b = %d, c = %f\n", b, c) // 3. v_name := value ,省略var,:= 左侧的变量不能是声明过的,否则编译会错误 str := "yekai" // 引用传递 // &符号代表取变量的地址,*代表取地址对应的内容。 // y执行是x的地址单元,地址单元内容被修改,那么x再读取数据也受影响 x := 99 fmt.Printf("str = %s, x=%d\n", str, x) y := &x // 传入指针 *y = 100 // 对引用修改, 其他引用该地址的值咋也会变 fmt.Printf("str = %s, x=%d\n", str, x) // str = yekai, x=99 // str = yekai, x=100 } // var ( // 多用于全局变量声明 // a1 int // b1 bool // ) var x1, y1 int var c1, d1 int = 3, 4 var e1, f1 = "yekai", 3.14 // h, g := 123, "只能在函数内部声明" func main1() { h1, g1 := 123, "只能在函数内部声明" fmt.Println(x1, y1, c1, d1, e1, f1, h1, g1) _, value := 3, 5 // _代表占位符, 不关心第一个右值, fmt.Println(_) 无法获取_的值 fmt.Println(value) }
package main import ( "context" "fmt" "time" "github.com/bketelsen/microclass/module4/userservice/proto/account" "github.com/micro/go-micro/client" "github.com/micro/profile-srv/proto/record" ) func main() { cl := account.NewAccountClient("go.micro.srv.user", client.DefaultClient) req := &account.LoginRequest{ Username: "gopher", Password: "password1", } var rsp *account.LoginResponse var err error ctx, cxl := context.WithTimeout(context.Background(), 1*time.Second) defer cxl() // Call service if rsp, err = cl.Login(ctx, req); err != nil { fmt.Println("call err: ", err, rsp) return } fmt.Println("Found account:", rsp.Session.GetUsername()) sreq := &account.SearchRequest{ Username: "gopher", Limit: 10, } var srsp *account.SearchResponse if srsp, err = cl.Search(ctx, sreq); err != nil { fmt.Println("call err:", err, srsp) } var foundUser *account.User if srsp.Users != nil { if len(srsp.Users) > 0 { foundUser = srsp.Users[0] } else { fmt.Println("no user found") return } } else { fmt.Println("Got no users") return } profcl := record.NewRecordClient("go.micro.srv.profile", client.DefaultClient) profreq := &record.ReadRequest{ Id: foundUser.GetId(), } var profrsp *record.ReadResponse if profrsp, err = profcl.Read(ctx, profreq); err != nil { fmt.Println("call err:", err, profrsp) return } fmt.Println(profrsp.Profile) }
package main import ( "os" "github.com/ray-g/dnsproxy/dnsproxy" "github.com/ray-g/dnsproxy/utils" ) func main() { dnsproxy.Serve(os.Args[1]) utils.WaitSysSignal() }
package day02 import ( "testing" "github.com/wistler/aoc-2020/internal/io" ) func TestWithSampleData(t *testing.T) { input := []string{ "1-3 a: abcde", "1-3 b: cdefg", "2-9 c: ccccccccc", } part1Ans := 2 part2Ans := 1 got, err := part1(input) check(err) if got != part1Ans { t.Fatalf(`Part 1: got %v, but want %v`, got, part1Ans) } got, err = part2(input) check(err) if got != part2Ans { t.Fatalf(`Part 2: got %v, but want %v`, got, part1Ans) } } func TestWithRealData(t *testing.T) { input := io.ReadInputFile("./input.txt") _, err := part1(input) check(err) _, err = part2(input) check(err) }
package nan import ( "log" "math" ) // START OMIT func Log(x float64) float64 { if x <= 0.0 { log.Panicf("x (%v) <= 0", x) } return math.Log(x) } // END OMIT
// Common methods for Expiry packages package expiry import ( "fmt" "time" ) type clock interface { Now() time.Time } type realClock struct{} func (r realClock) Now() time.Time { return time.Now() } // Handles tracking of expiration times. // This is intentionally _not_ an interface // The way the Expiry packages implement this is unique type Expiry struct { Clock clock compactorFunc func() closer chan bool compactorInterval time.Duration running bool } type Options struct { Clock clock CompactorFunc func() CompactorInterval time.Duration } var DefaultOptions = &Options{ Clock: realClock{}, CompactorInterval: time.Duration(5) * time.Minute, } func NewExpiry(options *Options) (*Expiry, error) { var e = &Expiry{closer: make(chan bool)} e.Clock = options.Clock e.compactorFunc = options.CompactorFunc e.compactorInterval = options.CompactorInterval if e.compactorFunc == nil { // If function is missing, then throw an error return nil, fmt.Errorf("missing Expiry Compactor function") } return e, nil } // IsRunning can access the running state readonly func (e *Expiry) IsRunning() bool { return e.running } func (e *Expiry) Start() { if !e.IsRunning() { go e.runCompactor() e.running = true } } func (e *Expiry) Stop() { if e.IsRunning() { // Signal to the runCompactor it should stop ticking e.closer <- true // Setting to false so an in-progress compaction can stop e.running = false } } // runCompactor is in its own goroutine and thus needs the closer to stop func (e *Expiry) runCompactor() { // Run immediately e.compactorFunc() ticker := time.NewTicker(e.compactorInterval) COMPACT: for { select { case <-e.closer: break COMPACT case <-ticker.C: e.compactorFunc() } } ticker.Stop() } func (e *Expiry) NewExpiryRecordTTL(key string, ttl time.Duration) ExpiryRecord { return NewExpiryRecordTTL(key, e.Clock, ttl) }
package main import ( "encoding/json" "fmt" "os" ) func main() { var i interface{} dec := json.NewDecoder(os.Stdin) if err := dec.Decode(&i); err != nil { fmt.Fprintf(os.Stderr, "Error parsing json: %v\n", err) os.Exit(1) } enc := json.NewEncoder(os.Stdout) enc.SetIndent("", " ") if err := enc.Encode(i); err != nil { fmt.Fprintf(os.Stderr, "Error formatting json: %v\n", err) os.Exit(1) } }
/* Copyright © 2021 Author : mehtaarn000 Email : arnavm834@gmail.com */ package utils import ( "os" "path/filepath" "strings" ) // Create creates files and directories recursively (and returns a write object) func Create(p string) (*os.File, error) { if err := os.MkdirAll(filepath.Dir(p), 777); err != nil { return nil, err } return os.Create(p) } // GetFiles is used to get all file names (not full paths) in cwd func GetFiles() []string { var files []string err := filepath.Walk("./", func(path string, info os.FileInfo, err error) error { if info.IsDir() { return nil } if strings.Contains(path, ".ssc") { return nil } files = append(files, path) return nil }) if err != nil { Exit(err) } return files } func RemoveEverything() { err := filepath.Walk("./", func(path string, info os.FileInfo, err error) error { if strings.Contains(path, ".ssc") { return nil } os.Remove(path) return nil }) if err != nil { Exit(err) } } // FileExists it used to check if file exists in cwd func FileExists(filename string) bool { info, err := os.Stat(filename) if os.IsNotExist(err) { return false } return !info.IsDir() }
package acmetool import kingpin "gopkg.in/alecthomas/kingpin.v2" type App struct { CommandLine *kingpin.Application Commands map[string]func(Ctx) }
package todotxt import ( "fmt" "sort" ) // TaskSegmentType represents type of segment in task string. //go:generate stringer -type TaskSegmentType -trimprefix Segment -output segment_type.go type TaskSegmentType uint8 // Flags for indicating type of segment in task string. const ( SegmentIsCompleted TaskSegmentType = iota + 1 SegmentCompletedDate SegmentPriority SegmentCreatedDate SegmentTodoText SegmentContext SegmentProject SegmentTag SegmentDueDate ) // TaskSegment represents a segment in task string. type TaskSegment struct { Type TaskSegmentType Originals []string Display string } // Segments returns a segmented task string in todo.txt format. The order of segments is the same as String(). func (task *Task) Segments() []*TaskSegment { var segs []*TaskSegment newBasicTaskSeg := func(t TaskSegmentType, s string) *TaskSegment { return &TaskSegment{ Type: t, Originals: []string{s}, Display: s, } } newTaskSeg := func(t TaskSegmentType, so, sd string) *TaskSegment { return &TaskSegment{ Type: t, Originals: []string{so}, Display: sd, } } if task.Completed { segs = append(segs, newBasicTaskSeg(SegmentIsCompleted, "x")) if task.HasCompletedDate() { segs = append(segs, newBasicTaskSeg(SegmentCompletedDate, task.CompletedDate.Format(DateLayout))) } } if task.HasPriority() && (!task.Completed || !RemoveCompletedPriority) { segs = append(segs, newTaskSeg(SegmentPriority, task.Priority, fmt.Sprintf("(%s)", task.Priority))) } if task.HasCreatedDate() { segs = append(segs, newBasicTaskSeg(SegmentCreatedDate, task.CreatedDate.Format(DateLayout))) } segs = append(segs, newBasicTaskSeg(SegmentTodoText, task.Todo)) if task.HasContexts() { sort.Strings(task.Contexts) for _, context := range task.Contexts { segs = append(segs, newTaskSeg(SegmentContext, context, fmt.Sprintf("@%s", context))) } } if task.HasProjects() { sort.Strings(task.Projects) for _, project := range task.Projects { segs = append(segs, newTaskSeg(SegmentProject, project, fmt.Sprintf("+%s", project))) } } if task.HasAdditionalTags() { // Sort map alphabetically by keys keys := make([]string, 0, len(task.AdditionalTags)) for key := range task.AdditionalTags { keys = append(keys, key) } sort.Strings(keys) for _, key := range keys { val := task.AdditionalTags[key] segs = append(segs, &TaskSegment{ Type: SegmentTag, Originals: []string{key, val}, Display: fmt.Sprintf("%s:%s", key, val), }) } } if task.HasDueDate() { segs = append(segs, newBasicTaskSeg(SegmentDueDate, fmt.Sprintf("due:%s", task.DueDate.Format(DateLayout)))) } return segs }
// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package appsplatform import ( "context" "time" "chromiumos/tast/ctxutil" "chromiumos/tast/local/chrome/uiauto/faillog" "chromiumos/tast/local/playbilling/dgapi2" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: Dgapi2OneTime, LacrosStatus: testing.LacrosVariantUnneeded, Desc: "Verify it is possible to go through a one-time purchase flow in the DGAPI2 test app", Contacts: []string{ "jshikaram@chromium.org", "ashpakov@google.com", // until Sept 2022 "chromeos-apps-foundation-team@google.com", }, Attr: []string{"group:mainline", "informational"}, SoftwareDeps: []string{"chrome"}, Fixture: "playBillingDgapi2Fixture", Params: []testing.Param{{ ExtraSoftwareDeps: []string{"android_p"}, }, { Name: "vm", ExtraSoftwareDeps: []string{"android_vm"}, }}, Timeout: 5 * time.Minute, }) } // Dgapi2OneTime Checks DGAPI2 test app allows to purchase a onetime sku. func Dgapi2OneTime(ctx context.Context, s *testing.State) { p := s.FixtValue().(*dgapi2.FixtDgapiData) cr := p.Chrome testApp := p.TestApp cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second) defer cancel() defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "Dgapi2OneTime") // As the tested app is stateful, we might observe purchases from the previously failed test runs. // Need to consume them, if available, before we proceed with the test. if err := testApp.TryConsumeOneTime(ctx); err != nil { s.Fatal("Failed to consume a onetime sku: ", err) } if err := testApp.PurchaseOneTime(ctx); err != nil { s.Fatal("Failed to purchase a onetime sku: ", err) } }
package fileUtil import ( "archive/tar" "compress/gzip" "fmt" "io" "os" "path" "path/filepath" ) // Tar压缩(不支持压缩目录哦) // 参数: // sourceList:需要压缩的文件路径列表 //targetPath:压缩到目标路径 // 返回值: // 1.错误对象 func Tar(sourceList []string, targetPath string) error { targetFile, err := os.Create(targetPath) if err != nil { return err } gzipWriter := gzip.NewWriter(targetFile) newWriter := tar.NewWriter(gzipWriter) defer func() { targetFile.Close() gzipWriter.Close() newWriter.Close() }() // 遍历源文件处理 for _, item := range sourceList { // 获取文件信息 fileInfo, err := os.Stat(item) if err != nil { // 这里最好记录当前未找到的路径,不要直接返回,把后面想压缩的文件都给抛弃了 return err } if fileInfo.IsDir() { continue } // 判断文件是否是标准文件 if !fileInfo.Mode().IsRegular() { return nil } header, err := tar.FileInfoHeader(fileInfo, fileInfo.Name()) if err != nil { return err } header.Name = filepath.Base(item) if err := newWriter.WriteHeader(header); err != nil { return err } file, err := os.Open(item) if err != nil { return err } defer file.Close() if _, err = io.Copy(newWriter, file); err != nil { return err } } return nil } // 解压文件 // 参数: // sourceFilePath:原文件夹路径 // tarFilePath:解压到目标文件夹路径 // 返回值: // 1.错误对象 func UnTar(sourceFilePath, tarFilePath string) error { // 判断目标文件夹是否存在,如果不存在就创建 if !DirExists(tarFilePath) { err := os.MkdirAll(tarFilePath, os.ModePerm|os.ModeTemporary) if err != nil { return err } } sourceFile, err := os.Open(sourceFilePath) if err != nil { return err } newGzip, err := gzip.NewReader(sourceFile) if err != nil { return err } defer func() { sourceFile.Close() newGzip.Close() }() newTar := tar.NewReader(newGzip) for { header, err := newTar.Next() if err != nil { if err == io.EOF { break } else { return err } } targetFilePath := filepath.Join(tarFilePath, header.Name) info := header.FileInfo() if info.IsDir() { if err = os.MkdirAll(targetFilePath, info.Mode()); err != nil { return err } continue } tempTarFilePath := path.Join(tarFilePath, header.Name) file, err := os.OpenFile(tempTarFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode()) if err != nil { fmt.Println(1) return err } defer file.Close() _, err = io.Copy(file, newTar) if err != nil { fmt.Println(2) return err } } return nil }
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tests import ( "math" "testing" ) var ( allBasicInts = []int{-1, 0, 1} allInt8s = []int8{math.MinInt8, -1, 0, 1, math.MaxInt8} allInt16s = []int16{math.MinInt16, -1, 0, 1, math.MaxInt16} allInt32s = []int32{math.MinInt32, -1, 0, 1, math.MaxInt32} allInt64s = []int64{math.MinInt64, -1, 0, 1, math.MaxInt64} allBasicUints = []uint{0, 1} allUintptrs = []uintptr{0, 1, ^uintptr(0)} allUint8s = []uint8{0, 1, math.MaxUint8} allUint16s = []uint16{0, 1, math.MaxUint16} allUint32s = []uint32{0, 1, math.MaxUint32} allUint64s = []uint64{0, 1, math.MaxUint64} ) var allInts = flatten( allBasicInts, allInt8s, allInt16s, allInt32s, allInt64s, ) var allUints = flatten( allBasicUints, allUintptrs, allUint8s, allUint16s, allUint32s, allUint64s, ) func TestInt(t *testing.T) { runTestCases(t, false, "plain", allInts) runTestCases(t, false, "pointers", pointersTo(allInts)) runTestCases(t, false, "interfaces", interfacesTo(allInts)) runTestCases(t, false, "interfacesTo", interfacesTo(pointersTo(allInts))) } func TestIntTruncation(t *testing.T) { runTestCases(t, true, "pass", []any{ truncatingInt8{save: math.MinInt8 - 1}, truncatingInt16{save: math.MinInt16 - 1}, truncatingInt32{save: math.MinInt32 - 1}, truncatingInt8{save: math.MaxInt8 + 1}, truncatingInt16{save: math.MaxInt16 + 1}, truncatingInt32{save: math.MaxInt32 + 1}, }) runTestCases(t, false, "fail", []any{ truncatingInt8{save: 1}, truncatingInt16{save: 1}, truncatingInt32{save: 1}, }) } func TestUint(t *testing.T) { runTestCases(t, false, "plain", allUints) runTestCases(t, false, "pointers", pointersTo(allUints)) runTestCases(t, false, "interfaces", interfacesTo(allUints)) runTestCases(t, false, "interfacesTo", interfacesTo(pointersTo(allUints))) } func TestUintTruncation(t *testing.T) { runTestCases(t, true, "pass", []any{ truncatingUint8{save: math.MaxUint8 + 1}, truncatingUint16{save: math.MaxUint16 + 1}, truncatingUint32{save: math.MaxUint32 + 1}, }) runTestCases(t, false, "fail", []any{ truncatingUint8{save: 1}, truncatingUint16{save: 1}, truncatingUint32{save: 1}, }) }
package retry import ( "context" "testing" "time" ) func immediateTimeAfter(time.Duration) <-chan time.Time { c := make(chan time.Time, 1) c <- time.Now() return c } func TestBackoffRetries(t *testing.T) { // make backoff return immediately Clock.After = immediateTimeAfter ctx := context.Background() backoff := BackoffHandler{MaxRetries: 3} if !backoff.Backoff(ctx) { t.Fatalf("backoff failed immediately") } if !backoff.Backoff(ctx) { t.Fatalf("backoff failed after 1 retry") } if !backoff.Backoff(ctx) { t.Fatalf("backoff failed after 2 retry") } if backoff.Backoff(ctx) { t.Fatalf("backoff allowed after 3 (max) retries") } } func TestBackoffCancel(t *testing.T) { // prevent backoff from returning normally Clock.After = func(time.Duration) <-chan time.Time { return make(chan time.Time) } ctx, cancelFunc := context.WithCancel(context.Background()) backoff := BackoffHandler{MaxRetries: 3} cancelFunc() if backoff.Backoff(ctx) { t.Fatalf("backoff allowed after cancel") } if _, ok := backoff.GetMaxBackoffDuration(ctx); ok { t.Fatalf("backoff allowed after cancel") } } func TestBackoffGracePeriod(t *testing.T) { currentTime := time.Now() // make Clock.Now return whatever we like Clock.Now = func() time.Time { return currentTime } // make backoff return immediately Clock.After = immediateTimeAfter ctx := context.Background() backoff := BackoffHandler{MaxRetries: 1} if !backoff.Backoff(ctx) { t.Fatalf("backoff failed immediately") } // the next call to Backoff would fail unless it's after the grace period gracePeriod := backoff.SetGracePeriod() // advance time to after the grace period, which at most will be 8 seconds, but we will advance +1 second. currentTime = currentTime.Add(gracePeriod + time.Second) if !backoff.Backoff(ctx) { t.Fatalf("backoff failed after the grace period expired") } // confirm we ignore grace period after backoff if backoff.Backoff(ctx) { t.Fatalf("backoff allowed after 1 (max) retry") } } func TestGetMaxBackoffDurationRetries(t *testing.T) { // make backoff return immediately Clock.After = immediateTimeAfter ctx := context.Background() backoff := BackoffHandler{MaxRetries: 3} if _, ok := backoff.GetMaxBackoffDuration(ctx); !ok { t.Fatalf("backoff failed immediately") } backoff.Backoff(ctx) // noop if _, ok := backoff.GetMaxBackoffDuration(ctx); !ok { t.Fatalf("backoff failed after 1 retry") } backoff.Backoff(ctx) // noop if _, ok := backoff.GetMaxBackoffDuration(ctx); !ok { t.Fatalf("backoff failed after 2 retry") } backoff.Backoff(ctx) // noop if _, ok := backoff.GetMaxBackoffDuration(ctx); ok { t.Fatalf("backoff allowed after 3 (max) retries") } if backoff.Backoff(ctx) { t.Fatalf("backoff allowed after 3 (max) retries") } } func TestGetMaxBackoffDuration(t *testing.T) { // make backoff return immediately Clock.After = immediateTimeAfter ctx := context.Background() backoff := BackoffHandler{MaxRetries: 3} if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*2 { t.Fatalf("backoff (%s) didn't return < 2 seconds on first retry", duration) } backoff.Backoff(ctx) // noop if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*4 { t.Fatalf("backoff (%s) didn't return < 4 seconds on second retry", duration) } backoff.Backoff(ctx) // noop if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*8 { t.Fatalf("backoff (%s) didn't return < 8 seconds on third retry", duration) } backoff.Backoff(ctx) // noop if duration, ok := backoff.GetMaxBackoffDuration(ctx); ok || duration != 0 { t.Fatalf("backoff (%s) didn't return 0 seconds on fourth retry (exceeding limit)", duration) } } func TestBackoffRetryForever(t *testing.T) { // make backoff return immediately Clock.After = immediateTimeAfter ctx := context.Background() backoff := BackoffHandler{MaxRetries: 3, RetryForever: true} if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*2 { t.Fatalf("backoff (%s) didn't return < 2 seconds on first retry", duration) } backoff.Backoff(ctx) // noop if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*4 { t.Fatalf("backoff (%s) didn't return < 4 seconds on second retry", duration) } backoff.Backoff(ctx) // noop if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*8 { t.Fatalf("backoff (%s) didn't return < 8 seconds on third retry", duration) } if !backoff.Backoff(ctx) { t.Fatalf("backoff refused on fourth retry despire RetryForever") } if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*16 { t.Fatalf("backoff returned %v instead of 8 seconds on fourth retry", duration) } if !backoff.Backoff(ctx) { t.Fatalf("backoff refused on fifth retry despire RetryForever") } if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*16 { t.Fatalf("backoff returned %v instead of 8 seconds on fifth retry", duration) } }
package services import ( "github.com/devmaufh/golang-api-rest/models" ) //ModuleServiceInteface build and interface for access to service implementation type ModuleServiceInteface interface { Save(models.Module) models.Module FindAll() []models.Module } type moduleService struct { modules []models.Module } //New creates new ModuleServiceInteface func New() ModuleServiceInteface { return &moduleService{ modules: []models.Module{}, } } func (service *moduleService) Save(module models.Module) models.Module { service.modules = append(service.modules, module) return module } func (service *moduleService) FindAll() []models.Module { return service.modules }
// +build linux package smnet import ( "bufio" "fmt" "github.com/safchain/ethtool" "log" "net" "os/exec" "regexp" "strconv" "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/system" ) //zhaojyun smnet type SMNetIOStats struct { filter filter.Filter ps system.PS skipChecks bool IgnoreProtocolStats bool Interfaces []string } //zhaojianyun 接口最大网速与网路接口状态 type SMIORunStatus struct { RunStatus uint32 Speed uint64 } type SMIODiyInfo struct { ip string mask string gateway string adminStatus uint32 } func (_ *SMNetIOStats) Description() string { return "Read metrics about network interface usage" } var smNetSampleConfig = ` ## By default, telegraf gathers stats from any up interface (excluding loopback) ## Setting interfaces will tell it to gather these explicit interfaces, ## regardless of status. ## # interfaces = ["eth0"] ## ## On linux systems telegraf also collects protocol stats. ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. ## # ignore_protocol_stats = false ## ` func (_ *SMNetIOStats) SampleConfig() string { return smNetSampleConfig } func (s *SMNetIOStats) Gather(acc telegraf.Accumulator) error { netio, err := s.ps.NetIO() if err != nil { return fmt.Errorf("error getting net io info: %s", err) } if s.filter == nil { if s.filter, err = filter.Compile(s.Interfaces); err != nil { return fmt.Errorf("error compiling filter: %s", err) } } interfaces, err := net.Interfaces() if err != nil { return fmt.Errorf("error getting list of interfaces: %s", err) } //网络接口map interfacesByName := map[string]net.Interface{} for _, iface := range interfaces { interfacesByName[iface.Name] = iface } //获取网关信息 gateways := ReadGateways() for _, io := range netio { if len(s.Interfaces) != 0 { var found bool if s.filter.Match(io.Name) { found = true } if !found { continue } } else if !s.skipChecks { iface, ok := interfacesByName[io.Name] if !ok { continue } if iface.Flags&net.FlagLoopback == net.FlagLoopback { continue } if iface.Flags&net.FlagUp == 0 { continue } } tags := map[string]string{ "interface": io.Name, } tiface, _ := interfacesByName[io.Name] //解析mask地址 ip, mask, _ := ParseIPMask(tiface) ////接口配置状态 var adminStatus uint32 if tiface.Flags&net.FlagUp == 1 { adminStatus = 1 } //接口运行状态和网速 gateway, ok := gateways[io.Name] if !ok { gateway = "---" } var ioDiyInfo SMIODiyInfo ioDiyInfo.ip = ip ioDiyInfo.mask = mask ioDiyInfo.gateway = gateway ioDiyInfo.adminStatus = adminStatus instates := ReadRunStatus(io.Name) fields := map[string]interface{}{ "index": tiface.Index, "mtu": tiface.MTU, "speed": instates.Speed, "ip": ioDiyInfo.ip, "net_mask": ioDiyInfo.mask, "gateway": ioDiyInfo.gateway, "mac": tiface.HardwareAddr.String(), "admin_status": ioDiyInfo.adminStatus, "run_status": instates.RunStatus, "bytes_sent": io.BytesSent, "bytes_recv": io.BytesRecv, "packets_sent": io.PacketsSent, "packets_recv": io.PacketsRecv, "err_in": io.Errin, "err_out": io.Errout, "drop_in": io.Dropin, "drop_out": io.Dropout, } acc.AddCounter("smnet", fields, tags) } return nil } func init() { inputs.Add("smnet", func() telegraf.Input { return &SMNetIOStats{ps: system.NewSystemPS()} }) } /* * 函数名: ParseIPMask(iface net.Interface) * 作用:根据IP解析Mask地址 * 返回值:IP地址,MASK地址 */ func ParseIPMask(iface net.Interface) (string, string, error) { adds, err := iface.Addrs() if err != nil { log.Fatal("get network addr failed: ", err) return "", "", err } ipv4 := "--" mask := "--" for _, ip := range adds { if strings.Contains(ip.String(), ".") { _, ipNet, err := net.ParseCIDR(ip.String()) if err != nil { fmt.Println(err) } val := make([]byte, len(ipNet.Mask)) copy(val, ipNet.Mask) var s []string for _, i := range val[:] { s = append(s, strconv.Itoa(int(i))) } ipv4 = ip.String()[:strings.Index(ip.String(), "/")] mask = strings.Join(s, ".") break } } return ipv4, mask, nil } /* * 函数名:delete_extra_space(s string) string * 功 能:删除字符串中多余的空格(含tab),有多个空格时,仅保留一个空格,同时将字符串中的tab换为空格 * 参 数:s string:原始字符串 * 返回值:string:删除多余空格后的字符串 */ func deleteExtraSpace(s string) string { //删除字符串中的多余空格,有多个空格时,仅保留一个空格 s1 := strings.Replace(s, " ", " ", -1) //替换tab为空格 regstr := "\\s{2,}" //两个及两个以上空格的正则表达式 reg, _ := regexp.Compile(regstr) //编译正则表达式 s2 := make([]byte, len(s1)) //定义字符数组切片 copy(s2, s1) //将字符串复制到切片 spc_index := reg.FindStringIndex(string(s2)) //在字符串中搜索 for len(spc_index) > 0 { //找到适配项 s2 = append(s2[:spc_index[0]+1], s2[spc_index[1]:]...) //删除多余空格 spc_index = reg.FindStringIndex(string(s2)) //继续在字符串中搜索 } return string(s2) } /* * 函数名:Readgateways() map[string]string * 作用:读取网关信息 * 返回值:map[网络接口名]网关地址 */ func ReadGateways() map[string]string { cmd := exec.Command("route", "-n") //创建获取命令输出管道 stdout, err := cmd.StdoutPipe() if err != nil { fmt.Printf("Error:can not obtain stdout pipe for command:%s\n", err) } //执行命令 if err := cmd.Start(); err != nil { fmt.Println("Error:The command is err,", err) } //使用带缓冲的读取器 outputBuf := bufio.NewReader(stdout) var gateways map[string]string /*创建集合 */ gateways = make(map[string]string) var i int for { //一次获取一行,_ 获取当前行是否被读完 output, _, err := outputBuf.ReadLine() if err != nil { // 判断是否到文件的结尾了否则出错 if err.Error() != "EOF" { fmt.Printf("Error :%s\n", err) } break } if i < 2 { i++ continue } tempgate := strings.Split(deleteExtraSpace(string(output)), " ") if len(tempgate) == 8 { _, ok := gateways[tempgate[7]] if ok { continue } gateways[tempgate[7]] = tempgate[1] } } //wait 方法会一直阻塞到其所属的命令完全运行结束为止 if err := cmd.Wait(); err != nil { fmt.Println("wait:", err.Error()) } return gateways } /* * 函数名: ReadRunStatus(ifacename string) * 作用:获取接口运行状态和网速 * 返回值:SMIORunStatus */ func ReadRunStatus(ifacename string) SMIORunStatus { //获取ethtool命令句柄 ethHandle, err := ethtool.NewEthtool() if err != nil { panic(err.Error()) } defer ethHandle.Close() var instates SMIORunStatus //获取接口运行状态 stats, err := ethHandle.LinkState(ifacename) if err == nil { instates.RunStatus = stats } //获取网速 result, err := ethHandle.CmdGetMapped(ifacename) if err == nil { speed, ok := result["speed"] if ok && speed != 4294967295 { instates.Speed = speed } } return instates }
package cli import ( "fmt" "os" "runtime" "strings" "testing" "github.com/alessio/shellescape" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/cli-runtime/pkg/genericclioptions" "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" "github.com/tilt-dev/tilt/pkg/model" "github.com/tilt-dev/wmclient/pkg/analytics" ) func TestArgsClear(t *testing.T) { f := newServerFixture(t) createTiltfile(f, []string{"foo", "bar"}) cmd := newArgsCmd(genericclioptions.NewTestIOStreamsDiscard()) c := cmd.register() err := c.Flags().Parse([]string{"--clear"}) require.NoError(t, err) err = cmd.run(f.ctx, c.Flags().Args()) require.NoError(t, err) require.Equal(t, 0, len(getTiltfile(f).Spec.Args)) require.Equal(t, []analytics.CountEvent{ {Name: "cmd.args", Tags: map[string]string{"clear": "true"}, N: 1}, }, f.analytics.Counts) } func TestArgsNewValue(t *testing.T) { f := newServerFixture(t) createTiltfile(f, []string{"foo", "bar"}) cmd := newArgsCmd(genericclioptions.NewTestIOStreamsDiscard()) c := cmd.register() err := c.Flags().Parse([]string{"--", "--foo", "bar"}) require.NoError(t, err) err = cmd.run(f.ctx, c.Flags().Args()) require.NoError(t, err) require.Equal(t, []string{"--foo", "bar"}, getTiltfile(f).Spec.Args) require.Equal(t, []analytics.CountEvent{ {Name: "cmd.args", Tags: map[string]string{"set": "true"}, N: 1}, }, f.analytics.Counts) } func TestArgsClearAndNewValue(t *testing.T) { f := newServerFixture(t) createTiltfile(f, []string{"foo", "bar"}) cmd := newArgsCmd(genericclioptions.NewTestIOStreamsDiscard()) c := cmd.register() err := c.Flags().Parse([]string{"--clear", "--", "--foo", "bar"}) require.NoError(t, err) err = cmd.run(f.ctx, c.Flags().Args()) require.Error(t, err) require.Contains(t, err.Error(), "--clear cannot be specified with other values") } func TestArgsNoChange(t *testing.T) { f := newServerFixture(t) createTiltfile(f, []string{"foo", "bar"}) streams, _, _, errOut := genericclioptions.NewTestIOStreams() cmd := newArgsCmd(streams) c := cmd.register() err := c.Flags().Parse([]string{"foo", "bar"}) require.NoError(t, err) err = cmd.run(f.ctx, c.Flags().Args()) require.NoError(t, err) require.Contains(t, errOut.String(), "no action taken") } func TestArgsEdit(t *testing.T) { editorForString := func(contents string) string { switch runtime.GOOS { case "windows": // This is trying to minimize windows weirdness: // 1. If EDITOR includes a ` ` and a `\`, then the editor library will prepend a cmd /c, // but then pass the whole $EDITOR as a single element of argv, while cmd /c // seems to want everything as separate argvs. Since we're on Windows, any paths // we get will have a `\`. // 2. Windows' echo gave surprising quoting behavior that I didn't take the time to understand. // So: generate one txt file that contains the desired contents and one bat file that // simply writes the txt file to the first arg, so that the EDITOR we pass to the editor library // has no spaces or quotes. argFile, err := os.CreateTemp(t.TempDir(), "newargs*.txt") require.NoError(t, err) _, err = argFile.WriteString(contents) require.NoError(t, err) require.NoError(t, argFile.Close()) f, err := os.CreateTemp(t.TempDir(), "writeargs*.bat") require.NoError(t, err) _, err = f.WriteString(fmt.Sprintf(`type %s > %%1`, argFile.Name())) require.NoError(t, err) err = f.Close() require.NoError(t, err) return f.Name() default: return fmt.Sprintf("echo %s >", shellescape.Quote(contents)) } } for _, tc := range []struct { name string contents string expectedArgs []string expectedError string }{ {"simple", "baz quu", []string{"baz", "quu"}, ""}, {"quotes", "baz 'quu quz'", []string{"baz", "quu quz"}, ""}, {"comments ignored", " # test comment\n1 2\n # second test comment", []string{"1", "2"}, ""}, {"parse error", "baz 'quu", nil, "Unterminated single-quoted string"}, {"only comments", "# these are the tilt args", nil, "must have exactly one non-comment line, found zero. If you want to clear the args, use `tilt args --clear`"}, {"multiple lines", "foo\nbar\n", nil, "cannot have multiple non-comment lines"}, {"empty lines ignored", "1 2\n\n\n", []string{"1", "2"}, ""}, {"dashes", "--foo --bar", []string{"--foo", "--bar"}, ""}, {"quoted hash", "1 '2 # not a comment'", []string{"1", "2 # not a comment"}, ""}, // TODO - fix comment parsing so the below passes // {"mid-line comment", "1 2 # comment", []string{"1", "2"}, ""}, } { t.Run(tc.name, func(t *testing.T) { f := newServerFixture(t) origEditor := os.Getenv("EDITOR") contents := tc.contents if runtime.GOOS == "windows" { contents = strings.ReplaceAll(contents, "\n", "\r\n") } err := os.Setenv("EDITOR", editorForString(contents)) require.NoError(t, err) defer func() { err := os.Setenv("EDITOR", origEditor) require.NoError(t, err) }() originalArgs := []string{"foo", "bar"} createTiltfile(f, originalArgs) cmd := newArgsCmd(genericclioptions.NewTestIOStreamsDiscard()) c := cmd.register() err = c.Flags().Parse(nil) require.NoError(t, err) err = cmd.run(f.ctx, c.Flags().Args()) if tc.expectedError != "" { require.Error(t, err) require.Contains(t, err.Error(), tc.expectedError) } else { require.NoError(t, err) } expectedArgs := originalArgs if tc.expectedArgs != nil { expectedArgs = tc.expectedArgs } require.Equal(t, expectedArgs, getTiltfile(f).Spec.Args) var expectedCounts []analytics.CountEvent if tc.expectedError == "" { expectedCounts = []analytics.CountEvent{ {Name: "cmd.args", Tags: map[string]string{"edit": "true"}, N: 1}, } } require.Equal(t, expectedCounts, f.analytics.Counts) }) } } func createTiltfile(f *serverFixture, args []string) { tf := v1alpha1.Tiltfile{ ObjectMeta: metav1.ObjectMeta{ Name: model.MainTiltfileManifestName.String(), }, Spec: v1alpha1.TiltfileSpec{Args: args}, Status: v1alpha1.TiltfileStatus{}, } err := f.client.Create(f.ctx, &tf) require.NoError(f.T(), err) } func getTiltfile(f *serverFixture) *v1alpha1.Tiltfile { var tf v1alpha1.Tiltfile err := f.client.Get(f.ctx, types.NamespacedName{Name: model.MainTiltfileManifestName.String()}, &tf) require.NoError(f.T(), err) return &tf }
package main import ( "fmt" "log" "os" ) func main() { file, err := os.Create("index.jsp") if err != nil { log.Fatal("Cannot create file", err) } defer file.Close() v := os.Getenv("name") s := `<html> <body> <h2>Hello `+ v +` the Server x2 is running!</h2> <h1>Test to put the artifact on Bitbucket repository </h1> </body> </html>` fmt.Fprintf(file, s) }
package app import ( "fmt" "go.uber.org/dig" ) type ( // @ctor SomeStruct struct{} // @ctor SomeInterface interface{} ) // Start the application which invoked from main() function in cmd package. func Start(di *dig.Container, text string) { // "text" is provided by dependency-injection fmt.Println(text) // Learn more: https://godoc.org/go.uber.org/dig#hdr-Named_Values type parameter struct { dig.In Text string `name:"typical"` } // Invoke another function di.Invoke(func(p parameter) { fmt.Println(p.Text) }) } // Stop the application which invoked gracefully when the application stop or received exit signal func Stop() { fmt.Println("Bye") } // HelloWorld text // @ctor func HelloWorld() string { return "Hello World" } // HelloTypical text // @ctor (name:"typical") func HelloTypical() string { return "Hello Typical" }
package template_validator import ( "fmt" admission "k8s.io/api/admissionregistration/v1" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" lifecycleapi "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk/api" "sigs.k8s.io/controller-runtime/pkg/client" "kubevirt.io/ssp-operator/internal/common" "kubevirt.io/ssp-operator/internal/operands" ) // Define RBAC rules needed by this operand: // +kubebuilder:rbac:groups=core,resources=services;serviceaccounts,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles;clusterrolebindings,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=validatingwebhookconfigurations,verbs=get;list;watch;create;update;patch;delete // RBAC for created roles // +kubebuilder:rbac:groups=template.openshift.io,resources=templates,verbs=get;list;watch // +kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch type templateValidator struct{} func (t *templateValidator) Name() string { return operandName } func (t *templateValidator) AddWatchTypesToScheme(*runtime.Scheme) error { return nil } func (t *templateValidator) WatchTypes() []client.Object { return []client.Object{ &v1.ServiceAccount{}, &v1.Service{}, &apps.Deployment{}, } } func (t *templateValidator) WatchClusterTypes() []client.Object { return []client.Object{ &rbac.ClusterRole{}, &rbac.ClusterRoleBinding{}, &admission.ValidatingWebhookConfiguration{}, } } func (t *templateValidator) Reconcile(request *common.Request) ([]common.ResourceStatus, error) { return common.CollectResourceStatus(request, reconcileClusterRole, reconcileServiceAccount, reconcileClusterRoleBinding, reconcileService, reconcileDeployment, reconcileValidatingWebhook, ) } func (t *templateValidator) Cleanup(request *common.Request) error { for _, obj := range []client.Object{ newClusterRole(), newClusterRoleBinding(request.Namespace), newValidatingWebhook(request.Namespace), } { err := request.Client.Delete(request.Context, obj) if err != nil && !errors.IsNotFound(err) { request.Logger.Error(err, fmt.Sprintf("Error deleting \"%s\": %s", obj.GetName(), err)) return err } } return nil } var _ operands.Operand = &templateValidator{} func GetOperand() operands.Operand { return &templateValidator{} } const ( operandName = "template-validator" operandComponent = common.AppComponentTemplating ) func reconcileClusterRole(request *common.Request) (common.ResourceStatus, error) { return common.CreateOrUpdate(request). ClusterResource(newClusterRole()). WithAppLabels(operandName, operandComponent). UpdateFunc(func(newRes, foundRes client.Object) { foundRes.(*rbac.ClusterRole).Rules = newRes.(*rbac.ClusterRole).Rules }). Reconcile() } func reconcileServiceAccount(request *common.Request) (common.ResourceStatus, error) { return common.CreateOrUpdate(request). NamespacedResource(newServiceAccount(request.Namespace)). WithAppLabels(operandName, operandComponent). Reconcile() } func reconcileClusterRoleBinding(request *common.Request) (common.ResourceStatus, error) { return common.CreateOrUpdate(request). ClusterResource(newClusterRoleBinding(request.Namespace)). WithAppLabels(operandName, operandComponent). UpdateFunc(func(newRes, foundRes client.Object) { newBinding := newRes.(*rbac.ClusterRoleBinding) foundBinding := foundRes.(*rbac.ClusterRoleBinding) foundBinding.RoleRef = newBinding.RoleRef foundBinding.Subjects = newBinding.Subjects }). Reconcile() } func reconcileService(request *common.Request) (common.ResourceStatus, error) { return common.CreateOrUpdate(request). NamespacedResource(newService(request.Namespace)). WithAppLabels(operandName, operandComponent). UpdateFunc(func(newRes, foundRes client.Object) { newService := newRes.(*v1.Service) foundService := foundRes.(*v1.Service) // ClusterIP should not be updated newService.Spec.ClusterIP = foundService.Spec.ClusterIP foundService.Spec = newService.Spec }). Reconcile() } func reconcileDeployment(request *common.Request) (common.ResourceStatus, error) { validatorSpec := request.Instance.Spec.TemplateValidator image := getTemplateValidatorImage() if image == "" { panic("Cannot reconcile without valid image name") } deployment := newDeployment(request.Namespace, *validatorSpec.Replicas, image) addPlacementFields(deployment, validatorSpec.Placement) return common.CreateOrUpdate(request). NamespacedResource(deployment). WithAppLabels(operandName, operandComponent). UpdateFunc(func(newRes, foundRes client.Object) { foundRes.(*apps.Deployment).Spec = newRes.(*apps.Deployment).Spec }). StatusFunc(func(res client.Object) common.ResourceStatus { dep := res.(*apps.Deployment) status := common.ResourceStatus{} if *validatorSpec.Replicas > 0 && dep.Status.AvailableReplicas == 0 { msg := fmt.Sprintf("No validator pods are running. Expected: %d", dep.Status.Replicas) status.NotAvailable = &msg } if dep.Status.AvailableReplicas != *validatorSpec.Replicas { msg := fmt.Sprintf( "Not all template validator pods are running. Expected: %d, running: %d", *validatorSpec.Replicas, dep.Status.AvailableReplicas, ) status.Progressing = &msg status.Degraded = &msg } return status }). Reconcile() } func addPlacementFields(deployment *apps.Deployment, nodePlacement *lifecycleapi.NodePlacement) { if nodePlacement == nil { return } podSpec := &deployment.Spec.Template.Spec podSpec.Affinity = nodePlacement.Affinity podSpec.NodeSelector = nodePlacement.NodeSelector podSpec.Tolerations = nodePlacement.Tolerations } func reconcileValidatingWebhook(request *common.Request) (common.ResourceStatus, error) { return common.CreateOrUpdate(request). ClusterResource(newValidatingWebhook(request.Namespace)). WithAppLabels(operandName, operandComponent). UpdateFunc(func(newRes, foundRes client.Object) { newWebhookConf := newRes.(*admission.ValidatingWebhookConfiguration) foundWebhookConf := foundRes.(*admission.ValidatingWebhookConfiguration) // Copy CA Bundle from the found webhook, // so it will not be overwritten copyFoundCaBundles(newWebhookConf.Webhooks, foundWebhookConf.Webhooks) foundWebhookConf.Webhooks = newWebhookConf.Webhooks }). Reconcile() } func copyFoundCaBundles(newWebhooks []admission.ValidatingWebhook, foundWebhooks []admission.ValidatingWebhook) { for i := range newWebhooks { newWebhook := &newWebhooks[i] for j := range foundWebhooks { foundWebhook := &foundWebhooks[j] if newWebhook.Name == foundWebhook.Name { newWebhook.ClientConfig.CABundle = foundWebhook.ClientConfig.CABundle break } } } }
package main import ( "encoding/gob" "fmt" "net" "os" ) type Person struct { Name Name Email []Email } type Name struct { Family string Last string } type Email struct { Kind string Address string } func (p Person) String() string { s := p.Name.Last + " " + p.Name.Family for _, v := range p.Email { s += "\n" + v.Kind + ":" + v.Address } return s } func main() { person := Person{ Name: Name{ Family: "Smith", Last: "John", }, Email: []Email{ { Kind: "home", Address: "john@home.org", }, { Kind: "work", Address: "john@gmail.com", }, }, } svc := "localhost:1200" conn, err := net.Dial("tcp", svc) checkErr(err) encoder := gob.NewEncoder(conn) decoder := gob.NewDecoder(conn) for n := 0; n < 10; n++ { encoder.Encode(person) var newPerson Person decoder.Decode(&newPerson) fmt.Println(n, " ", newPerson.String()) } os.Exit(1) } func checkErr(err error) { if err != nil { fmt.Print("Fatal error ", err.Error()) os.Exit(1) } }
package stack // simpleStack implements a non-thread safe Stack. type simpleStack[T any] []T // newSimpleStack returns a new non-thread safe Stack. func newSimpleStack[T any]() *simpleStack[T] { return new(simpleStack[T]) } // Push pushes an element onto the top of this Stack. func (s *simpleStack[T]) Push(element T) { *s = append(*s, element) } // Pop removes and returns the top element of this Stack. func (s *simpleStack[T]) Pop() (value T, exists bool) { if s.IsEmpty() { return value, false } index := len(*s) - 1 element := (*s)[index] *s = (*s)[:index] return element, true } // Peek returns the top element of this Stack without removing it. func (s *simpleStack[T]) Peek() (value T, exists bool) { if (*s).IsEmpty() { return value, false } return (*s)[len(*s)-1], true } // Clear removes all elements from this Stack. func (s *simpleStack[T]) Clear() { *s = (*s)[:0] } // Size returns the amount of elements in this Stack. func (s *simpleStack[T]) Size() int { return len(*s) } // IsEmpty checks if this Stack is empty. func (s *simpleStack[T]) IsEmpty() bool { return len(*s) == 0 } // code contract - make sure the type implements the interface. var _ Stack[int] = &simpleStack[int]{}
package main import ( "strings" "testing" ) const testData = `1, 1 1, 6 8, 3 3, 4 5, 5 8, 9` func TestTask1(t *testing.T) { points := loadData(strings.Split(testData, "\n")) if calculateLargestArea(points) != 17 { t.Fail() } } func TestTask2(t *testing.T) { points := loadData(strings.Split(testData, "\n")) if calculateAreaWithMaxConcentration(points, 32) != 16 { t.Fail() } }
package balancer import ( "github.com/fufuok/load-balancer/internal/doublejump" "github.com/fufuok/load-balancer/utils" ) // JumpConsistentHash type consistentHash struct { count int h *doublejump.Hash } func NewConsistentHash(choices ...*Choice) (lb *consistentHash) { lb = &consistentHash{} lb.Update(choices) return } func (b *consistentHash) Select(key ...string) (item interface{}) { if b.count == 0 { return } hash := utils.HashString(key...) return b.h.Get(hash).(*Choice).Item } func (b *consistentHash) Name() string { return "ConsistentHash" } func (b *consistentHash) Update(choices []*Choice) bool { b.count = len(choices) b.h = doublejump.NewHash() for i := range choices { b.h.Add(choices[i]) } return true }
package main import "fmt" const INT_MAX = int(^uint(0) >> 1) const INT_MIN = ^INT_MAX func (t *BST) isValid() bool { if t.root != nil { return t.root.left._isValid(INT_MIN, t.root.data) && t.root.right._isValid(t.root.data, INT_MAX) } // empty tree is valid, right? return true } func (n *Node) _isValid(min, max int) bool { if n != nil { fmt.Println("isValid called on", n.data, "min", min, "max", max) // check within boundary if n.data < min || n.data > max { fmt.Println("!!! illegal node", n.data, "!!!") return false } // set new min max at current node return n.left._isValid(min, n.data) && n.right._isValid(n.data, max) } // reached end of tree branch and no premature returns // means this branch is valid return true } // helper func for making invalid BSTs func (t *BST) get(data int) *Node { return t.root._get(data) } func (n *Node) _get(data int) *Node { if n != nil { if n.data == data { return n } else if data > n.data { return n.right._get(data) } else if data < n.data { return n.left._get(data) } } return nil } func main() { var valid BST valid.insert(10) valid.insert(5) valid.insert(20) valid.insert(15) valid.print() fmt.Println(valid.isValid()) invalid := valid invalid.get(20).left = nil invalid.get(5).right = &Node{data: 15} invalid.print() fmt.Println(invalid.isValid()) var valid2 BST valid2.insert(10) valid2.insert(0) valid2.insert(12) valid2.insert(-1) valid2.insert(4) valid2.insert(11) valid2.insert(20) valid2.insert(-20) valid2.print() fmt.Println(valid2.isValid()) invalid2 := valid2 invalid2.get(11).left = &Node{data: 9} invalid2.print() fmt.Println(invalid2.isValid()) invalid3 := valid2 invalid3.insert(7) invalid3.insert(5) invalid3.get(5).left = &Node{data: 3} invalid3.print() fmt.Println(invalid3.isValid()) }
package storage import ( "fmt" "github.com/jinzhu/gorm" _ "github.com/jinzhu/gorm/dialects/sqlite" "keysiron/config" ) func connectMySQL() (db *gorm.DB, err error) { var connStr = fmt.Sprintf( "%s:%s@tcp(%s:%d)/%s?charset=utf8&parseTime=True&loc=Local", config.KeysironConfigVar.Database.User, config.KeysironConfigVar.Database.Password, config.KeysironConfigVar.Database.Host, config.KeysironConfigVar.Database.Port, config.KeysironConfigVar.Database.Name, ) return gorm.Open("mysql", connStr) } func connectSqlite() (db *gorm.DB, err error) { return gorm.Open("sqlite3", config.KeysironConfigVar.Database.Name) } // Connect : make a connection func Connect() (db *gorm.DB, err error) { var dbType = config.KeysironConfigVar.Database.Type if dbType == "sqlite3" { return connectSqlite() } return connectMySQL() }
package main import ( "github.com/g3force/go-blink1" "github.com/g3force/qaBlink/config" "github.com/g3force/qaBlink/watcher" "log" "os" "sort" "strings" "time" ) var CONFIG_LOCATIONS = []string{"config.json", os.Getenv("HOME") + "/.qaBlink.json"} type QaBlinkSlot struct { Id string Jobs []watcher.QaBlinkJob } type QaBlink struct { Slots []QaBlinkSlot blink1Devices []*blink1.Device Config *config.QaBlinkConfig } func NewQaBlink(config *config.QaBlinkConfig) *QaBlink { qaBlink := new(QaBlink) qaBlink.Config = config for _, slot := range config.Slots { var qaSlot QaBlinkSlot qaSlot.Id = slot.Id for _, refId := range slot.RefId { var jenkinsJob = watcher.NewJenkinsJob(config.Jenkins, refId) if jenkinsJob != nil { qaSlot.Jobs = append(qaSlot.Jobs, jenkinsJob) } else { var sonarJob = watcher.NewSonarJob(config.Sonar, refId) if sonarJob != nil { qaSlot.Jobs = append(qaSlot.Jobs, sonarJob) } else { var sonarJob = watcher.NewSonar51Job(config.Sonar, refId) if sonarJob != nil { qaSlot.Jobs = append(qaSlot.Jobs, sonarJob) } else { log.Printf("Could not find a job for refId %v in slot %v", refId, slot) } } } } qaBlink.Slots = append(qaBlink.Slots, qaSlot) } return qaBlink } func (qaBlink *QaBlink) UpdateStatus() { log.Printf("Updating %d slots\n", len(qaBlink.Slots)) for _, slot := range qaBlink.Slots { for jobId, job := range slot.Jobs { job.Update() log.Printf("%40s(job:%d): %8v [pending: %5v,score: %3v]", job.Id(), jobId, job.State(), job.State().Pending, job.State().Score) } } } func toState(state watcher.QaBlinkState) blink1.State { if state.Pending { return blink1.State{Red: 0, Green: 0, Blue: 255} } switch state.StatusCode { case watcher.STABLE: return blink1.State{Red: 0, Green: 255, Blue: 0} case watcher.UNSTABLE: return blink1.State{Red: 255, Green: 255, Blue: 0} case watcher.FAILED: return blink1.State{Red: 255, Green: 0, Blue: 0} case watcher.UNKNOWN: return blink1.State{Red: 0, Green: 0, Blue: 0} case watcher.DISABLED: return blink1.State{Red: 255, Green: 0, Blue: 255} } return blink1.State{} } func (qaBlink *QaBlink) UpdateBlink() { perSlotDuration := time.Duration(qaBlink.Config.PerSlotDuration) * time.Millisecond for _, slot := range qaBlink.Slots { slotId := 0 for _, device := range qaBlink.blink1Devices { for ledId := 0; ledId < 2; ledId++ { var state blink1.State if slotId < len(slot.Jobs) { job := slot.Jobs[slotId] state = toState(job.State()) } else { state = blink1.State{} } state.FadeTime = time.Duration(qaBlink.Config.FadeTime) * time.Millisecond switch ledId { case 0: state.LED = blink1.LED1 case 1: state.LED = blink1.LED2 default: continue } device.SetState(state) slotId++ } } time.Sleep(perSlotDuration) } } func (qaBlink *QaBlink) UpdateDevices() { newDevices := 0 for i := 0; ; i++ { device, err := blink1.OpenNextDevice() if device == nil { break } if err != nil { log.Print(err) break } device.SetState(blink1.State{Red: 255, Blue: 255}) newDevices++ } qaBlink.blink1Devices = blink1.OpenDevices() // sort devices, so that they are always used in the same order sort.Slice(qaBlink.blink1Devices, func(i, j int) bool { s1 := qaBlink.blink1Devices[i].Device.Bus + qaBlink.blink1Devices[i].Device.Device s2 := qaBlink.blink1Devices[j].Device.Bus + qaBlink.blink1Devices[j].Device.Device return strings.Compare(s1, s2) > 0 }) if newDevices > 0 { log.Printf("Found %d new blink1 devices, %d now.\n", newDevices, len(qaBlink.blink1Devices)) } } func repeat(f func(), duration time.Duration) { for { time.Sleep(duration) f() } } func main() { chosenConfig := "" for _, configLocation := range CONFIG_LOCATIONS { if _, err := os.Stat(configLocation); !os.IsNotExist(err) { chosenConfig = configLocation break } } blinkConfig := config.NewQaBlinkConfig(chosenConfig) qaBlink := NewQaBlink(blinkConfig) statusUpdateInterval := time.Duration(qaBlink.Config.UpdateInterval) * time.Millisecond deviceUpdateInterval := statusUpdateInterval go qaBlink.UpdateDevices() qaBlink.UpdateStatus() go repeat(qaBlink.UpdateStatus, statusUpdateInterval) go repeat(qaBlink.UpdateBlink, 0) go repeat(qaBlink.UpdateDevices, deviceUpdateInterval) for { time.Sleep(time.Hour) } }
package parser import ( "github.com/antlr/antlr4/runtime/Go/antlr" "github.com/colinking/go-sqlite3-native/internal/parser/generated" "github.com/colinking/go-sqlite3-native/internal/vm" ) //go:generate antlr -Dlanguage=Go -o generated -package generated SQL.g4 func Parse(query string) ([]vm.Instruction, error) { // This parser is based on the antlr language and uses the official Go antlr runtime. // For more information on how this works, see: https://blog.gopheracademy.com/advent-2017/parsing-with-antlr4-and-go/ // Further inspiration was taken from the unofficial SQLite antlr grammar: https://github.com/antlr/grammars-v4/blob/master/sql/sqlite/SQLite.g4 // Along with the official, but Lemon-based, SQLite grammar: https://github.com/sqlite/sqlite/blob/master/src/parse.y is := antlr.NewInputStream(query) // Create a lexer which can take arbitrary user-supplied strings and convert them // into tokens that we can produce a parse tree on. lexer := generated.NewSQLLexer(is) stream := antlr.NewCommonTokenStream(lexer, antlr.LexerDefaultTokenChannel) // Create a parser that can consume the list of tokens and produce a parse tree that we can walk: parser := generated.NewSQLParser(stream) // Create a listener that we will use to hook into antlr's runtime as it walks through // the parse tree. l := listener{ program: []vm.Instruction{}, } // Walk through the parse tree. This walk will invoke methods on the listener // which we can catch in order to produce our bytecode program. antlr.ParseTreeWalkerDefault.Walk(&l, parser.Start()) return l.program, nil } type listener struct { *generated.BaseSQLListener program []vm.Instruction } var _ generated.SQLListener = &listener{} // EnterStart is called when production start is entered. func (s *listener) EnterStart(ctx *generated.StartContext) { // TODO: add the basic structure } // ExitStart is called when production start is exited. func (s *listener) ExitStart(ctx *generated.StartContext) {}
/* Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package apiversion import ( "testing" "github.com/blang/semver" "github.com/GoogleContainerTools/skaffold/v2/testutil" ) func TestParseVersion(t *testing.T) { tests := []struct { description string version string want semver.Version shouldErr bool }{ { description: "full", version: "skaffold/v7alpha3", want: semver.Version{ Major: 7, Pre: []semver.PRVersion{ { VersionStr: "alpha", }, { VersionNum: 3, IsNum: true, }, }, }, }, { description: "ga", version: "skaffold/v4", want: semver.Version{ Major: 4, }, }, { description: "incorrect", version: "apps/v1", shouldErr: true, }, } for _, test := range tests { testutil.Run(t, test.description, func(t *testutil.T) { got, err := Parse(test.version) t.CheckErrorAndDeepEqual(test.shouldErr, err, test.want, got) }) } }
package main import "github.com/alidevjimmy/go-echo-train/app" func main() { app.StartApplocation(":8080") }
package main import ( "strings" "testing" ) func TestPangram(t *testing.T) { for k, v := range map[string]string{ "A quick brown fox jumps over the lazy dog": "NULL", "A slow Yellow fox crawls Under the proactive dog": "bjkmqz"} { if r := pangram(k); r != v { t.Errorf("failed: pangram %s is %s, got %s", k, v, r) } } } func BenchmarkPangram(b *testing.B) { for i := 0; i < b.N; i++ { var s []byte for j := i; j > 0; j /= 26 { s = append(s, byte(j%26+97)) } pangram(string(s)) } } func pangram(q string) string { var r []byte q = strings.ToLower(q) for i := byte('a'); i <= 'z'; i++ { if !strings.Contains(q, string(i)) { r = append(r, i) } } if len(r) == 0 { return "NULL" } return string(r) }
package config import ( "fmt" "path/filepath" "strings" log "github.com/sirupsen/logrus" homedir "github.com/mitchellh/go-homedir" "github.com/spf13/viper" ) // DSN stores all the database connection and driver information type DSN struct { Driver string Host string Port string Name string Username string Password string ParseTime string MaxOpenConnections int MaxIdleConnections int } func getHomePath() string { home, _ := homedir.Dir() return home } func getProjectPath() string { dir, err := filepath.Abs(filepath.Dir(".")) if err != nil { log.Warn("Warning, cannot get current path") return "" } // Traverse back from current directory until service base dir is reaach and add to config path for !strings.HasSuffix(dir, "go-boilerplate") && dir != "/" { dir, err = filepath.Abs(dir + "/..") if err != nil { break } } return dir } // Init loads up the application config file func Init() { // Find home directory. viper.SetEnvPrefix("go-boilerplate") viper.BindEnv("configFile") viper.BindEnv("configPath") viper.SetDefault("configFile", "config") viper.SetDefault("configPath", "/tmp") viper.AddConfigPath(getHomePath()) viper.AddConfigPath(getProjectPath()) viper.AddConfigPath(viper.GetString("configPath")) viper.SetConfigName(viper.GetString("configFile")) viper.SetDefault("logging.level", "DEBUG") viper.SetDefault("logging.errorLogFile", "error.log") // If a config file is found, read it in. if err := viper.ReadInConfig(); err != nil { log.Errorln("Error using config file:", viper.ConfigFileUsed()) log.Errorln(err.Error()) } } // ConnectionString returns the correctly formatted connection string for connecting to the database func (d DSN) ConnectionString() string { return fmt.Sprintf("./%s.db", d.Name) } // GetDSN : returns the database dsn from viper config. func GetDSN() DSN { dsn := DSN{Driver: viper.GetString("store.db.driver"), Username: viper.GetString("store.db.username"), Password: viper.GetString("store.db.password"), Host: viper.GetString("store.db.host"), Port: viper.GetString("store.db.port"), Name: viper.GetString("store.db.name"), ParseTime: viper.GetString("store.db.parseTime"), MaxOpenConnections: viper.GetInt("store.db.maxOpenConnections"), MaxIdleConnections: viper.GetInt("store.db.maxIdleConnections")} return dsn }
package main import ( "time" ) // START1 OMIT func FooArgs( strict bool, // HL message string, // HL delay time.Duration, // HL onSuccess func(), // HL onError func(error), // HL ) error { // Do the work ... return nil } // END1 OMIT func main() { // START2 OMIT err := FooArgs(false, "some text", time.Minute, nil, nil) // END2 OMIT if err != nil { panic(err) } }
package main import ( "fmt" "myRPC/limit/base" "time" ) func main() { limitBase.InitLimit() limiter,_ := limitBase.GetLimitMgr().NewLimiter("counter", map[interface{}]interface{}{}) m := make(map[int]bool) for i := 0; i < 1000; i++ { allow := limiter.Allow() if allow { m[i] = true } else { m[i] = false } time.Sleep(time.Millisecond*3) } for i := 0; i < 1000; i++ { fmt.Printf("i=%d allow=%v\n", i, m[i]) } }
package main import ( "fmt" "io/ioutil" "os" "gopkg.in/yaml.v2" ) type Config struct { Hosts []struct { HostName string `yaml:"name"` Connection string `yaml:"connection"` UserName string `yaml:"username"` PassWord string `yaml:"password"` Commands []struct { Name string `yaml:"name"` String string `yaml:"string"` UserInput bool `yaml:"userinput"` WhiteSpace bool `yaml:"whitespace"` } `yaml:"commands"` } `yaml:"hosts"` } func GetConfig() { if _, err := os.Stat("./config/config.yml"); err == nil { // check if config file exists yamlFile, err := ioutil.ReadFile("./config/config.yml") if err != nil { panic(err) } err = yaml.Unmarshal(yamlFile, &config) if err != nil { panic(err) } } else if os.IsNotExist(err) { // config file not included, use embedded config yamlFile, err := Asset("config/config.yml") if err != nil { panic(err) } err = yaml.Unmarshal(yamlFile, &config) if err != nil { panic(err) } } else { fmt.Println("Schrodinger: file may or may not exist. See err for details.") // panic(err) } }
package core import ( "github.com/cadmium-im/zirconium-go/core/models" "github.com/cadmium-im/zirconium-go/core/utils" "github.com/fatih/structs" "github.com/google/logger" ) type Router struct { appContext *AppContext handlers map[string][]C2SMessageHandler } type C2SMessageHandler interface { HandleMessage(s *Session, message models.BaseMessage) IsAuthorizationRequired() bool HandlingType() string } func NewRouter(ctx *AppContext) (*Router, error) { r := &Router{ appContext: ctx, handlers: map[string][]C2SMessageHandler{}, } return r, nil } func (r *Router) RouteMessage(origin *Session, message models.BaseMessage) { handlers := r.handlers[message.MessageType] if handlers != nil { for _, v := range handlers { if v.IsAuthorizationRequired() { if origin.Claims == nil { logger.Warningf("Connection %s isn't authorized", origin.connID) msg := utils.PrepareMessageUnauthorized(message, message.To) // fixme: domain _ = origin.Send(msg) } } go v.HandleMessage(origin, message) } } else { protocolError := models.ProtocolError{ ErrCode: "unhandled", ErrText: "Server doesn't implement message type " + message.MessageType, ErrPayload: make(map[string]interface{}), } errMsg := models.NewBaseMessage(message.ID, message.MessageType, message.To, message.From, false, structs.Map(protocolError)) logger.Infof("Drop message with type %s because server hasn't proper handlers", message.MessageType) _ = origin.Send(errMsg) } } func (r *Router) RegisterC2SHandler(c C2SMessageHandler) { r.handlers[c.HandlingType()] = append(r.handlers[c.HandlingType()], c) }
package h2mux import ( "testing" "time" "github.com/stretchr/testify/assert" ) func assertEmpty(t *testing.T, rl *ReadyList) { select { case <-rl.ReadyChannel(): t.Fatal("Spurious wakeup") default: } } func assertClosed(t *testing.T, rl *ReadyList) { select { case _, ok := <-rl.ReadyChannel(): assert.False(t, ok, "ReadyChannel was not closed") case <-time.After(100 * time.Millisecond): t.Fatalf("Timeout") } } func receiveWithTimeout(t *testing.T, rl *ReadyList) uint32 { select { case i := <-rl.ReadyChannel(): return i case <-time.After(100 * time.Millisecond): t.Fatalf("Timeout") return 0 } } func TestReadyListEmpty(t *testing.T) { rl := NewReadyList() // no signals, receive should fail assertEmpty(t, rl) } func TestReadyListSignal(t *testing.T) { rl := NewReadyList() assertEmpty(t, rl) rl.Signal(0) if receiveWithTimeout(t, rl) != 0 { t.Fatalf("Received wrong ID of signalled event") } assertEmpty(t, rl) } func TestReadyListMultipleSignals(t *testing.T) { rl := NewReadyList() assertEmpty(t, rl) // Signals should not block; // Duplicate unhandled signals should not cause multiple wakeups signalled := [5]bool{} for i := range signalled { rl.Signal(uint32(i)) rl.Signal(uint32(i)) } // All signals should be received once (in any order) for range signalled { i := receiveWithTimeout(t, rl) if signalled[i] { t.Fatalf("Received signal %d more than once", i) } signalled[i] = true } for i := range signalled { if !signalled[i] { t.Fatalf("Never received signal %d", i) } } assertEmpty(t, rl) } func TestReadyListClose(t *testing.T) { rl := NewReadyList() rl.Close() // readyList.run() occurs in a separate goroutine, // so there's no way to directly check that run() has terminated. // Perform an indirect check: is the ready channel closed? assertClosed(t, rl) // a second rl.Close() shouldn't cause a panic rl.Close() // Signal shouldn't block after Close() done := make(chan struct{}) go func() { for i := 0; i < 5; i++ { rl.Signal(uint32(i)) } close(done) }() select { case <-done: case <-time.After(100 * time.Millisecond): t.Fatal("Test timed out") } } func TestReadyDescriptorQueue(t *testing.T) { var queue readyDescriptorQueue items := [4]readyDescriptor{} for i := range items { items[i].ID = uint32(i) } if !queue.Empty() { t.Fatalf("nil queue should be empty") } queue.Enqueue(&items[3]) queue.Enqueue(&items[1]) queue.Enqueue(&items[0]) queue.Enqueue(&items[2]) if queue.Empty() { t.Fatalf("Empty should be false after enqueue") } i := queue.Dequeue().ID if i != 3 { t.Fatalf("item 3 should have been dequeued, got %d instead", i) } i = queue.Dequeue().ID if i != 1 { t.Fatalf("item 1 should have been dequeued, got %d instead", i) } i = queue.Dequeue().ID if i != 0 { t.Fatalf("item 0 should have been dequeued, got %d instead", i) } i = queue.Dequeue().ID if i != 2 { t.Fatalf("item 2 should have been dequeued, got %d instead", i) } if !queue.Empty() { t.Fatal("queue should be empty after dequeuing all items") } if queue.Dequeue() != nil { t.Fatal("dequeue on empty queue should return nil") } } func TestReadyDescriptorMap(t *testing.T) { m := newReadyDescriptorMap() m.Delete(42) // (delete of missing key should be a noop) x := m.SetIfMissing(42) if x == nil { t.Fatal("SetIfMissing for new key returned nil") } if m.SetIfMissing(42) != nil { t.Fatal("SetIfMissing for existing key returned non-nil") } // this delete has effect m.Delete(42) // the next set should reuse the old object y := m.SetIfMissing(666) if y == nil { t.Fatal("SetIfMissing for new key returned nil") } if x != y { t.Fatal("SetIfMissing didn't reuse freed object") } }
package api const ( RecordPrefix = "/record" ReviewPrefix = "/review" )
package service import ( "strconv" "time" "github.com/dgrijalva/jwt-go" meetupmanager "github.com/lucas-dev-it/62252aee-9d11-4149-a0ea-de587cbcd233" "github.com/lucas-dev-it/62252aee-9d11-4149-a0ea-de587cbcd233/business" "github.com/lucas-dev-it/62252aee-9d11-4149-a0ea-de587cbcd233/business/model" "github.com/lucas-dev-it/62252aee-9d11-4149-a0ea-de587cbcd233/internal" uuid "github.com/satori/go.uuid" "golang.org/x/crypto/bcrypt" ) var ( expirationTime = internal.GetEnv("TOKEN_EXPIRATION_TIME", "1") signingString = internal.GetEnv("TOKEN_SIGNING_KEY", "testSigningString") ) type userRepository interface { FindUserByUsername(username string) (*model.User, error) } type userService struct { userRepository userRepository } func NewUserService(userRepository userRepository) *userService { return &userService{userRepository: userRepository} } // TokenIssue gets a new access token for a particular user func (us *userService) TokenIssue(ti *business.TokenIssue) (*business.AccessToken, error) { user, err := us.userRepository.FindUserByUsername(ti.Username) if err != nil { return nil, err } if err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(ti.Password)); err != nil { return nil, meetupmanager.CustomError{ Cause: err, Type: meetupmanager.ErrNotFound, Message: "username/password combination is invalid", } } now := time.Now() uuid := uuid.NewV4() et, err := strconv.Atoi(expirationTime) if err != nil { return nil, err } expiresAt := now.Add(time.Hour * time.Duration(et)).Unix() cm := jwt.MapClaims{ "iss": "auth-service", "nbf": now.Unix(), "exp": expiresAt, "jti": uuid, } if len(user.Scopes) > 0 { sNames := make([]model.ScopeName, len(user.Scopes)) for i, s := range user.Scopes { sNames[i] = s.Name } cm["scopes"] = sNames } at := jwt.NewWithClaims(jwt.SigningMethodHS256, cm) tokenString, err := at.SignedString([]byte(signingString)) if err != nil { return nil, err } return &business.AccessToken{ AccessToken: tokenString, ExpiresAt: expiresAt, }, nil }
package main import ( "html/template" "github.com/yuriizinets/go-ssc" ) type PageIndex struct { ComponentHttpbinUUID ssc.Component ComponentCounter ssc.Component ComponentSampleBinding ssc.Component ComponentSampleParent ssc.Component } func (*PageIndex) Template() *template.Template { return template.Must(template.New("page.index.html").Funcs(funcmap()).ParseGlob("*.html")) } func (p *PageIndex) Init() { p.ComponentHttpbinUUID = ssc.RegC(p, &ComponentHttpbinUUID{}) p.ComponentCounter = ssc.RegC(p, &ComponentCounter{}) p.ComponentSampleBinding = ssc.RegC(p, &ComponentSampleBinding{}) p.ComponentSampleParent = ssc.RegC(p, &ComponentSampleParent{}) } func (*PageIndex) Meta() ssc.Meta { return ssc.Meta{ Title: "SSC Example", } }
package repositories import ( "context" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" "headless-todo-tasks-service/internal/entities" "headless-todo-tasks-service/internal/services/repositories" ) const TasksCollection = "tasks" type TasksRepositoryMongo struct { db *mongo.Database } func NewTasksRepositoryMongo(db *mongo.Database) repositories.TasksRepository { return &TasksRepositoryMongo{db} } func (r *TasksRepositoryMongo) Create(ctx context.Context, name, description, userId string) (*entities.Task, error) { task := entities.NewTask(name, description, userId) result, err := r.db.Collection(TasksCollection).InsertOne(ctx, bson.M{"name": task.Name, "userId": task.UserId, "description": task.Description, "created": task.Created}) if err != nil { return nil, err } task.ID = result.InsertedID.(primitive.ObjectID) return &task, nil }
package model import ( "github.com/jinzhu/gorm" ) type TypeInfo struct { Type string } type BookInLocal struct { gorm.Model Title string Type string }
// Package reader contains various meteo.Reader implementations using different sensors. package reader
package serverFunctionality import ( "context" "database/sql" "fmt" "log" "time" _ "github.com/denisenkom/go-mssqldb" models "github.com/mtapp/MeetingTrackingApp/model" ) // Replace with your own connection parameters var server = "BWEBDB01" //"localhost" var port = 1433 var user = "sa" var password = "EUAS.2023!" var database = "MeetingAppointment" func CloseConnectionDb() { // Close the database connection pool after program executes defer models.DB.Close() } func ConnectDb() { //CloseConnectionDb() var err error log.Println("ServerFunctionalityt baris", nil) // Create connection string connString := fmt.Sprintf("server=%s;user id=%s;password=%s;port=%d;database=%s;", server, user, password, port, database) // Create connection pool models.DB, err = sql.Open("sqlserver", connString) if err != nil { log.Fatal("Error creating connection pool: " + err.Error()) } log.Printf("Connected!\n") } // Gets and prints SQL Server version func SelectVersion() { // Use background context ctx := context.Background() // Ping database to see if it's still alive. // Important for handling network issues and long queries. err := models.DB.PingContext(ctx) if err != nil { log.Fatal("Error pinging database: " + err.Error()) } var result string // Run query and scan for result err = models.DB.QueryRowContext(ctx, "SELECT @@version").Scan(&result) if err != nil { log.Fatal("Scan failed:", err.Error()) } fmt.Printf("%s\n", result) log.Printf("%s\n", result) } func GetAllMeetingAppoinmentRecords() ([]models.AppointmentRequest, error) { fmt.Println("Endpoint Hit: homePage") var allAppointmentItemArray []models.AppointmentRequest rows, err := models.DB.Query("SELECT * FROM APPOINTMENT_REQUEST") if err != nil { return nil, err } defer rows.Close() for rows.Next() { var item models.AppointmentRequest rows.Scan(&item.ID, &item.REQUESTER_NAME, &item.REQUESTER_MAIL, &item.MEETING_ROOM, &item.ARRANGE_TIME_START, &item.ARRANGE_TIME_END, &item.APPROVMENT_STATUS, &item.DESCRIPTION, &item.TITLE) allAppointmentItemArray = append(allAppointmentItemArray, item) } return allAppointmentItemArray, nil } // HTTP response listing all appointment records. func InsertAppointment(item models.AppointmentRequest) (lastInsertedId int, errText error) { var lastInsertId int startDate, err1 := time.Parse(time.RFC3339, item.ARRANGE_TIME_START) endDate, err2 := time.Parse(time.RFC3339, item.ARRANGE_TIME_END) if err1 != nil || err2 != nil { fmt.Println("Error while parsing date :", err1) return 0, err1 } err := models.DB.QueryRow(` INSERT INTO APPOINTMENT_REQUEST(REQUESTER_NAME, REQUESTER_MAIL, MEETING_ROOM, ARRANGE_TIME_START, ARRANGE_TIME_END, APPROVMENT_STATUS, DESCRIPTION, TITLE) VALUES(@p1, @p2, @p3, @p4, @p5, @p6, @p7, @p8) SELECT SCOPE_IDENTITY()`, item.REQUESTER_NAME, item.REQUESTER_MAIL, item.MEETING_ROOM, startDate, endDate, item.APPROVMENT_STATUS, item.DESCRIPTION, item.TITLE).Scan(&lastInsertId) if err != nil { return 0, err } else { return lastInsertId, nil } }
// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package firmware import ( "context" "strings" "chromiumos/tast/local/firmware" "chromiumos/tast/testing" "chromiumos/tast/testing/hwdep" ) func init() { testing.AddTest(&testing.Test{ Func: FpUpdaterSucceeded, Desc: "Checks that the fingerprint firmware updater did not fail at boot", Contacts: []string{ "tomhughes@chromium.org", "chromeos-fingerprint@google.com", }, Attr: []string{"group:mainline", "group:fingerprint-cq"}, SoftwareDeps: []string{"biometrics_daemon"}, HardwareDeps: hwdep.D(hwdep.Fingerprint()), }) } const ( successString = "The update was successful." // Differentiate between RO and RW failures, instead of using regex. roFailureString = "Failed to update RO image, aborting." rwFailureString = "Failed to update RW image, aborting." noUpdateString = "Update was not necessary." noFirmwareFileString = "No firmware file on rootfs, exiting." ) func FpUpdaterSucceeded(ctx context.Context, s *testing.State) { latest, prev, err := firmware.ReadFpUpdaterLogs() if err != nil { s.Fatal("Failed to read logs: ", err) } // After a successful update there's a reboot, so latest log should say no update needed. if strings.Contains(latest, noUpdateString) && strings.Contains(prev, successString) { return } // If both latest and previous log says no update, count as success. if strings.Contains(latest, noUpdateString) && strings.Contains(prev, noUpdateString) { return } // If latest log says no update and previous log does not exist, count as success. if strings.Contains(latest, noUpdateString) && prev == "" { return } // Everything else counts as failure. if strings.Contains(latest, roFailureString) || strings.Contains(prev, roFailureString) { s.Fatal("Failed to update RO") } if strings.Contains(latest, rwFailureString) || strings.Contains(prev, rwFailureString) { s.Fatal("Failed to update RW") } if strings.Contains(latest, noFirmwareFileString) || strings.Contains(prev, noFirmwareFileString) { s.Fatal("Failed to find firmware file on rootfs") } s.Fatalf("Updater result unknown: latest=%q prev=%q", latest, prev) }
package main import ( . "fmt" // 调用函数,无序通过报名 operatorSystem "os" // 给包起别名 _ "wovert/09_func/other" // _ 表示仅调用包的init 函数 ) func main() { // 接受用户的参数,字符串方式传递 list := operatorSystem.Args n := len(list) Println("n=", n) for i := 0; i < n; i++ { Printf("%d=%s\n", i, list[i]) } }
/* Copyright 2015 All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "io" "log" "net/http" "net/url" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( e2eCorsUpstreamListener = "127.0.0.1:12345" e2eCorsProxyListener = "127.0.0.1:54321" e2eCorsOauthListener = "127.0.0.1:23456" e2eCorsUpstreamURL = "/upstream" ) func testBuildCorsConfig() *Config { config := newDefaultConfig() config.Listen = e2eCorsProxyListener config.DiscoveryURL = testDiscoveryURL(e2eCorsOauthListener, "master") config.Upstream = "http://" + e2eCorsUpstreamListener config.CorsOrigins = []string{"*"} config.Verbose = false config.EnableLogging = false config.DisableAllLogging = true config.Resources = []*Resource{ { URL: e2eCorsUpstreamURL, Methods: []string{http.MethodGet}, WhiteListed: true, }, } return config } func TestCorsWithUpstream(t *testing.T) { log.SetOutput(io.Discard) config := testBuildCorsConfig() // launch fake upstream resource server _ = runTestUpstream(t, e2eCorsUpstreamListener, e2eCorsUpstreamURL) // launch fake oauth OIDC server _ = runTestAuth(t, e2eCorsOauthListener, "master") // launch keycloak-gatekeeper proxy _ = runTestGatekeeper(t, config) // ok now exercise the ensemble with a CORS-enabled request client := http.Client{} u, _ := url.Parse("http://" + e2eCorsProxyListener + e2eCorsUpstreamURL) h := make(http.Header, 1) h.Set("Content-Type", "application/json") h.Add("Origin", "myorigin.com") resp, err := client.Do(&http.Request{ Method: http.MethodGet, URL: u, Header: h, }) require.NoError(t, err) defer func() { _ = resp.Body.Close() }() buf, erb := io.ReadAll(resp.Body) assert.NoError(t, erb) assert.Contains(t, string(buf), `"message": "test"`) // check this is our test resource if assert.NotEmpty(t, resp.Header) && assert.Contains(t, resp.Header, "Access-Control-Allow-Origin") { // check the returned upstream response after proxying contains CORS headers assert.Equal(t, []string{"*"}, resp.Header["Access-Control-Allow-Origin"]) } }
package cncscraper import ( "time" ) type Topic struct { CrawlDate time.Time `bson:"crawl_datetime"` CreatedDate time.Time `bson:"topic_datetime"` ForumId int `bson:"forum_id"` Id int `bson:"topic_id"` IsArchived bool `bson:"is_archived"` PollOptions []PollOption `bson:"polls"` Replies int `bson:"replies"` ReplyDate time.Time `bson:"reply_datetime"` Subject string `bson:"subject"` Title string `bson:"title"` User User `bson:"user"` Views int `bson:"views"` Votes int `bson:"votes"` }
package column_test import ( "context" "fmt" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/types" ) func TestTuples(t *testing.T) { tableName := "tuples" t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s ( %[1]s1 Tuple(Int64), %[1]s1_array Array(Tuple(Int64)), %[1]s2 Tuple(Int64, Int64), %[1]s2_array Array(Tuple(Int64, Int64)), %[1]s3 Tuple(Int64, Int64, Int64), %[1]s3_array Array(Tuple(Int64, Int64, Int64)), %[1]s4 Tuple(Int64, Int64, Int64, Int64), %[1]s4_array Array(Tuple(Int64, Int64, Int64, Int64)), %[1]s5 Tuple(Int64, Int64, Int64, Int64, Int64), %[1]s5_array Array(Tuple(Int64, Int64, Int64, Int64, Int64)) ) Engine=Memory`, tableName), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) col1 := column.NewTuple1[int64](column.New[int64]()) col1Array := column.NewTuple1[int64](column.New[int64]()).Array() type Tuple2 types.Tuple2[int64, int64] col2 := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]()) col2Array := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]()).Array() type Tuple3 types.Tuple3[int64, int64, int64] col3 := column.NewTuple3[Tuple3, int64, int64, int64](column.New[int64](), column.New[int64](), column.New[int64]()) col3Array := column.NewTuple3[Tuple3, int64, int64, int64](column.New[int64](), column.New[int64](), column.New[int64]()).Array() type Tuple4 types.Tuple4[int64, int64, int64, int64] col4 := column.NewTuple4[ Tuple4, int64, int64, int64, int64]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ) col4Array := column.NewTuple4[ Tuple4, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ).Array() type Tuple5 types.Tuple5[ int64, int64, int64, int64, int64, ] col5 := column.NewTuple5[ Tuple5, int64, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ) col5Array := column.NewTuple5[ Tuple5, int64, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ).Array() var col1Insert []int64 var col1ArrayInsert [][]int64 var col2Insert []Tuple2 var col2ArrayInsert [][]Tuple2 var col3Insert []Tuple3 var col3ArrayInsert [][]Tuple3 var col4Insert []Tuple4 var col4ArrayInsert [][]Tuple4 var col5Insert []Tuple5 var col5ArrayInsert [][]Tuple5 for insertN := 0; insertN < 2; insertN++ { rows := 10 for i := 0; i < rows; i++ { col1.Append(int64(i)) col1Insert = append(col1Insert, int64(i)) col1Array.Append([]int64{int64(i), int64(i + 1)}) col1ArrayInsert = append(col1ArrayInsert, []int64{int64(i), int64(i + 1)}) col2.Append(Tuple2{int64(i), int64(i + 1)}) col2Insert = append(col2Insert, Tuple2{int64(i), int64(i + 1)}) col2Array.Append([]Tuple2{{int64(i), int64(i + 1)}, {int64(i + 2), int64(i + 3)}}) col2ArrayInsert = append(col2ArrayInsert, []Tuple2{{int64(i), int64(i + 1)}, {int64(i + 2), int64(i + 3)}}) col3.Append(Tuple3{int64(i), int64(i + 1), int64(i + 2)}) col3Insert = append(col3Insert, Tuple3{int64(i), int64(i + 1), int64(i + 2)}) col3Array.Append([]Tuple3{ {int64(i), int64(i + 1), int64(i + 2)}, {int64(i + 3), int64(i + 4), int64(i + 5)}, }) col3ArrayInsert = append(col3ArrayInsert, []Tuple3{ {int64(i), int64(i + 1), int64(i + 2)}, {int64(i + 3), int64(i + 4), int64(i + 5)}, }) col4.Append(Tuple4{int64(i), int64(i + 1), int64(i + 2), int64(i + 3)}) col4Insert = append(col4Insert, Tuple4{int64(i), int64(i + 1), int64(i + 2), int64(i + 3)}) col4Array.Append([]Tuple4{ {int64(i), int64(i + 1), int64(i + 2), int64(i + 3)}, {int64(i + 4), int64(i + 5), int64(i + 6), int64(i + 7)}, }) col4ArrayInsert = append(col4ArrayInsert, []Tuple4{ {int64(i), int64(i + 1), int64(i + 2), int64(i + 3)}, {int64(i + 4), int64(i + 5), int64(i + 6), int64(i + 7)}, }) col5.Append(Tuple5{int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)}) col5Insert = append(col5Insert, Tuple5{int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)}) col5Array.Append([]Tuple5{ {int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)}, {int64(i + 5), int64(i + 6), int64(i + 7), int64(i + 8), int64(i + 9)}, }) col5ArrayInsert = append(col5ArrayInsert, []Tuple5{ {int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)}, {int64(i + 5), int64(i + 6), int64(i + 7), int64(i + 8), int64(i + 9)}, }) } err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( %[1]s1, %[1]s1_array, %[1]s2, %[1]s2_array, %[1]s3, %[1]s3_array, %[1]s4, %[1]s4_array, %[1]s5, %[1]s5_array ) VALUES`, tableName), col1, col1Array, col2, col2Array, col3, col3Array, col4, col4Array, col5, col5Array, ) require.NoError(t, err) } // example read all col1Read := column.NewTuple1[int64](column.New[int64]()) col1ArrayRead := column.NewTuple1[int64](column.New[int64]()).Array() col2Read := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]()) col2ArrayRead := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]()).Array() col3Read := column.NewTuple3[ Tuple3, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), ) col3ArrayRead := column.NewTuple3[ Tuple3, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), ).Array() col4Read := column.NewTuple4[ Tuple4, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ) col4ArrayRead := column.NewTuple4[ Tuple4, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ).Array() col5Read := column.NewTuple5[ Tuple5, int64, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ) col5ArrayRead := column.NewTuple5[ Tuple5, int64, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ).Array() selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s1, %[1]s1_array, %[1]s2, %[1]s2_array, %[1]s3, %[1]s3_array, %[1]s4, %[1]s4_array, %[1]s5, %[1]s5_array FROM test_%[1]s`, tableName), col1Read, col1ArrayRead, col2Read, col2ArrayRead, col3Read, col3ArrayRead, col4Read, col4ArrayRead, col5Read, col5ArrayRead) require.NoError(t, err) require.True(t, conn.IsBusy()) var col1ReadData []int64 var col1ArrayReadData [][]int64 var col2ReadData []Tuple2 var col2ArrayReadData [][]Tuple2 var col3ReadData []Tuple3 var col3ArrayReadData [][]Tuple3 var col4ReadData []Tuple4 var col4ArrayReadData [][]Tuple4 var col5ReadData []Tuple5 var col5ArrayReadData [][]Tuple5 for selectStmt.Next() { col1ReadData = col1Read.Read(col1ReadData) col1ArrayReadData = col1ArrayRead.Read(col1ArrayReadData) col2ReadData = col2Read.Read(col2ReadData) col2ArrayReadData = col2ArrayRead.Read(col2ArrayReadData) col3ReadData = col3Read.Read(col3ReadData) col3ArrayReadData = col3ArrayRead.Read(col3ArrayReadData) col4ReadData = col4Read.Read(col4ReadData) col4ArrayReadData = col4ArrayRead.Read(col4ArrayReadData) col5ReadData = col5Read.Read(col5ReadData) col5ArrayReadData = col5ArrayRead.Read(col5ArrayReadData) } require.NoError(t, selectStmt.Err()) selectStmt.Close() assert.Equal(t, col1Insert, col1ReadData) assert.Equal(t, col1ArrayInsert, col1ArrayReadData) assert.Equal(t, col2Insert, col2ReadData) assert.Equal(t, col2ArrayInsert, col2ArrayReadData) assert.Equal(t, col3Insert, col3ReadData) assert.Equal(t, col3ArrayInsert, col3ArrayReadData) assert.Equal(t, col4Insert, col4ReadData) assert.Equal(t, col4ArrayInsert, col4ArrayReadData) assert.Equal(t, col5Insert, col5ReadData) assert.Equal(t, col5ArrayInsert, col5ArrayReadData) // example read row selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s1, %[1]s1_array, %[1]s2, %[1]s2_array, %[1]s3, %[1]s3_array, %[1]s4, %[1]s4_array, %[1]s5, %[1]s5_array FROM test_%[1]s`, tableName), col1Read, col1ArrayRead, col2Read, col2ArrayRead, col3Read, col3ArrayRead, col4Read, col4ArrayRead, col5Read, col5ArrayRead) require.NoError(t, err) require.True(t, conn.IsBusy()) col1ReadData = col1ReadData[:0] col1ArrayReadData = col1ArrayReadData[:0] col2ReadData = col2ReadData[:0] col2ArrayReadData = col2ArrayReadData[:0] col3ReadData = col3ReadData[:0] col3ArrayReadData = col3ArrayReadData[:0] col4ReadData = col4ReadData[:0] col4ArrayReadData = col4ArrayReadData[:0] col5ReadData = col5ReadData[:0] col5ArrayReadData = col5ArrayReadData[:0] for selectStmt.Next() { for i := 0; i < selectStmt.RowsInBlock(); i++ { col1ReadData = append(col1ReadData, col1Read.Row(i)) col1ArrayReadData = append(col1ArrayReadData, col1ArrayRead.Row(i)) col2ReadData = append(col2ReadData, col2Read.Row(i)) col2ArrayReadData = append(col2ArrayReadData, col2ArrayRead.Row(i)) col3ReadData = append(col3ReadData, col3Read.Row(i)) col3ArrayReadData = append(col3ArrayReadData, col3ArrayRead.Row(i)) col4ReadData = append(col4ReadData, col4Read.Row(i)) col4ArrayReadData = append(col4ArrayReadData, col4ArrayRead.Row(i)) col5ReadData = append(col5ReadData, col5Read.Row(i)) col5ArrayReadData = append(col5ArrayReadData, col5ArrayRead.Row(i)) } } require.NoError(t, selectStmt.Err()) selectStmt.Close() assert.Equal(t, col1Insert, col1ReadData) assert.Equal(t, col1ArrayInsert, col1ArrayReadData) assert.Equal(t, col2Insert, col2ReadData) assert.Equal(t, col2ArrayInsert, col2ArrayReadData) assert.Equal(t, col3Insert, col3ReadData) assert.Equal(t, col3ArrayInsert, col3ArrayReadData) assert.Equal(t, col4Insert, col4ReadData) assert.Equal(t, col4ArrayInsert, col4ArrayReadData) assert.Equal(t, col5Insert, col5ReadData) assert.Equal(t, col5ArrayInsert, col5ArrayReadData) }
package testflow import ( tmv1beta1 "github.com/gardener/test-infra/pkg/apis/testmachinery/v1beta1" "github.com/gardener/test-infra/pkg/testmachinery/config" "github.com/gardener/test-infra/pkg/testmachinery/locations" "github.com/gardener/test-infra/pkg/testmachinery/testdefinition" "github.com/gardener/test-infra/pkg/testmachinery/testflow/node" ) // preprocessTestflow takes a Tesflow and creates a map which maps the unique step name to the step pointer. func preprocessTestflow(flowID FlowIdentifier, root *node.Node, tf tmv1beta1.TestFlow, loc locations.Locations, globalConfig []*config.Element) (map[string]*Step, map[*testdefinition.TestDefinition]interface{}, map[testdefinition.Location]interface{}, error) { stepMap := make(map[string]*Step) testdefinitions := make(map[*testdefinition.TestDefinition]interface{}) usedLocations := make(map[testdefinition.Location]interface{}) for _, step := range tf { // todo(schrodit): add validation nodes, err := node.CreateNodesFromStep(step, loc, globalConfig, string(flowID)) if err != nil { return nil, nil, nil, err } for n := range nodes.Iterate() { testdefinitions[n.TestDefinition] = nil usedLocations[n.TestDefinition.Location] = nil } stepMap[step.Name] = &Step{ Info: step, Nodes: nodes, } } return stepMap, testdefinitions, usedLocations, nil } // CreateInitialDAG creates a DAG by evaluating the dependsOn steps. func CreateInitialDAG(steps map[string]*Step, root *node.Node) { for _, step := range steps { if step.Info.DependsOn == nil || len(step.Info.DependsOn) == 0 { // add the root node as parent step.Nodes.AddParents(root) root.AddChildren(step.Nodes.List()...) continue } // go through the list of dependent steps and add them as parent addDependentStepsAsParent(steps, step) } } func addDependentStepsAsParent(steps map[string]*Step, step *Step) { for _, dependentStepName := range step.Info.DependsOn { dependentStep := steps[dependentStepName] step.Nodes.AddParents(dependentStep.Nodes.List()...) dependentStep.Nodes.AddChildren(step.Nodes.List()...) } } // ReorderChildrenOfNodes recursively reorders all children of a nodelist so that serial steps run in serial after parallel nodes. // Returns nil if successful. func ReorderChildrenOfNodes(list *node.Set) *node.Set { children := node.NewSet() for item := range list.Iterate() { // use k8s sets children.Add(reorderChildrenOfNode(item).List()...) } if children.Len() == 0 { return nil } return ReorderChildrenOfNodes(children) } // reorderSerialNodes reorders all children of a node so that serial steps run in serial after parallel nodes. // The functions returns the new Children func reorderChildrenOfNode(root *node.Node) *node.Set { // directly return if there are no nodes or only one node in the pool if root.Children.Len() <= 1 { // todo: write test for special case return root.Children } serialNodes := node.NewSet() parallelNodes := node.NewSet() for item := range root.Children.Iterate() { if item.TestDefinition.HasBehavior(tmv1beta1.SerialBehavior) || item.TestDefinition.HasBehavior(tmv1beta1.DisruptiveBehavior) { serialNodes.Add(item) } else { parallelNodes.Add(item) } } // directly return if there are no serial steps if serialNodes.Len() == 0 { return root.Children } previousChildren := parallelNodes.GetChildren() // remove children from parallel node parallelNodes.Remove(previousChildren.List()...) // set the root as parent for the parallel nodes root.ClearChildren().AddChildren(parallelNodes.List()...) parallelNodes.ClearParents().AddParents(root) for i, serialNode := range serialNodes.List() { // remove the current node from the list of previous children // and add all children of the current node previousChildren.Remove(serialNode).Add(serialNode.Children.List()...) // remove the serial as possible parent from all previous children previousChildren.RemoveParents(serialNode) serialNode.ClearChildren() if i == 0 { parallelNodes.ClearChildren().AddChildren(serialNode) // only remove the root parent as otherwise other parent information will get lost serialNode.RemoveParent(root).AddParents(parallelNodes.List()...) } else { prevNode := serialNodes.List()[i-1] //prevNode.ClearChildren() prevNode.AddChildren(serialNode) // only remove the root parent as otherwise other parent information will get lost serialNode.RemoveParent(root).AddParents(prevNode) } // last node if i == serialNodes.Len()-1 { serialNode.ClearChildren().AddChildren(previousChildren.List()...) previousChildren.AddParents(serialNode) } } // return last node // the list cannot be empty as this case is already checked above. return node.NewSet(serialNodes.Last()) } // ApplyOutputScope defines the artifact scopes for outputs. // This is done by getting the last serial step and setting is as the current nodes artifact source. func ApplyOutputScope(steps map[string]*Step) error { for _, step := range steps { for n := range step.Nodes.Iterate() { var outputSourceNode *node.Node if step.Info.ArtifactsFrom != "" { outputSourceNode = steps[step.Info.ArtifactsFrom].Nodes.List()[0] } else { outputSourceNode = getNextSerialParent(n, func(node *node.Node) bool { if node.Step() == nil { return true } return !node.Step().Definition.ContinueOnError }, func(node *node.Node) bool { if node.Step() == nil { return true } return !node.Step().Definition.Untrusted }) } if outputSourceNode != nil { outputSourceNode.EnableOutput() n.SetInputSource(outputSourceNode) } } } return nil } // ApplyConfigScope calculates the artifacts from all serial parent nodes and merges them. // Whereas the nearer parent's configs overwrites the config when collisions occur func ApplyConfigScope(steps map[string]*Step) { for _, step := range steps { for n := range step.Nodes.Iterate() { nextNode := n configs := config.NewSet(config.New(n.Step().Definition.Config, config.LevelStep)...) for nextNode != nil && nextNode.Parents.Len() != 0 { nextNode = getNextSerialParent(nextNode) if nextNode != nil && nextNode.Step() != nil { cfgs := config.New(nextNode.Step().Definition.Config, config.LevelShared) for _, element := range cfgs { if element.Info.Private == nil || !*element.Info.Private { configs.Add(element) } } } } n.TestDefinition.AddConfig(configs.List()) } } } // SetSerialNodes evaluates real serial steps and marks them as serial. // A node is considered serial if all children of the root node point to one child. func SetSerialNodes(root *node.Node) { child := root for child != nil { child = getNextSerialChild(child) if child != nil { child.SetSerial() } } } type nodeFilterFunc = func(node *node.Node) bool func getNextSerialParent(n *node.Node, filters ...nodeFilterFunc) *node.Node { if n.Parents.Len() == 0 { return nil } if n.Parents.Len() == 1 { parentsList := n.Parents.List() if checkFilter(parentsList[len(parentsList)-1], filters...) { return n.Parents.List()[0] } } var ( parent *node.Node lastParents = n.Parents.List() branches = make([]*node.Set, len(lastParents)) ) for i := range branches { branches[i] = node.NewSet() } for !emptyNodeList(lastParents) { parent, lastParents = getJointNodes(lastParents, branches, getNextSerialParent) if parent != nil && checkFilter(parent, filters...) { return parent } } return nil } func getNextSerialChild(n *node.Node, filters ...nodeFilterFunc) *node.Node { if n == nil || n.Children.Len() == 0 { return nil } if n.Children.Len() == 1 { childrenList := n.Children.List() if checkFilter(childrenList[len(childrenList)-1], filters...) { return n.Children.List()[0] } } var ( child *node.Node lastChildren = n.Children.List() branches = make([]*node.Set, len(lastChildren)) ) for i := range branches { branches[i] = node.NewSet() } for !emptyNodeList(lastChildren) { child, lastChildren = getJointNodes(lastChildren, branches, getNextSerialChild) if child != nil && checkFilter(child, filters...) { return child } } return nil } func getJointNodes(nodes []*node.Node, branches []*node.Set, getNext func(*node.Node, ...nodeFilterFunc) *node.Node) (*node.Node, []*node.Node) { lastNodes := make([]*node.Node, len(nodes)) for i, n := range nodes { if n == nil { continue } if nextNode := getNext(n); nextNode != nil { lastNodes[i] = nextNode branches[i].Add(nextNode) } } if n := findJointNode(branches); n != nil { return n, lastNodes } return nil, lastNodes } // findJointNode returns the first node that exists in all given node sets. // Note the order of the node sets are essential. func findJointNode(nodeSets []*node.Set) *node.Node { if len(nodeSets) == 1 { if nodeSets[0].Len() == 0 { return nil } nodeList := nodeSets[0].List() return nodeList[len(nodeList)-1] } // contains nodes that are already validated that they are not the joint node. alreadyCheckedNodes := node.NewSet() for _, set := range nodeSets { for n := range set.IterateInverse() { if alreadyCheckedNodes.Has(n) { continue } if nodeSetsHave(nodeSets, n) { return n } else { alreadyCheckedNodes.Add(n) } } } return nil } func nodeSetsHave(nodeSets []*node.Set, n *node.Node) bool { for _, set := range nodeSets { if !set.Has(n) { return false } } return true } // emptyNodeList checks if all nodes of a node list are nil func emptyNodeList(nodes []*node.Node) bool { for _, n := range nodes { if n != nil { return false } } return true } func checkFilter(node *node.Node, filters ...nodeFilterFunc) bool { for _, filter := range filters { if !filter(node) { return false } } return true }
package models import( "encoding/json" ) /** * Type definition for AlertSeverityListEnum enum */ type AlertSeverityListEnum int /** * Value collection for AlertSeverityListEnum enum */ const ( AlertSeverityList_KCRITICAL AlertSeverityListEnum = 1 + iota AlertSeverityList_KWARNING AlertSeverityList_KINFO ) func (r AlertSeverityListEnum) MarshalJSON() ([]byte, error) { s := AlertSeverityListEnumToValue(r) return json.Marshal(s) } func (r *AlertSeverityListEnum) UnmarshalJSON(data []byte) error { var s string json.Unmarshal(data, &s) v := AlertSeverityListEnumFromValue(s) *r = v return nil } /** * Converts AlertSeverityListEnum to its string representation */ func AlertSeverityListEnumToValue(alertSeverityListEnum AlertSeverityListEnum) string { switch alertSeverityListEnum { case AlertSeverityList_KCRITICAL: return "kCritical" case AlertSeverityList_KWARNING: return "kWarning" case AlertSeverityList_KINFO: return "kInfo" default: return "kCritical" } } /** * Converts AlertSeverityListEnum Array to its string Array representation */ func AlertSeverityListEnumArrayToValue(alertSeverityListEnum []AlertSeverityListEnum) []string { convArray := make([]string,len( alertSeverityListEnum)) for i:=0; i<len(alertSeverityListEnum);i++ { convArray[i] = AlertSeverityListEnumToValue(alertSeverityListEnum[i]) } return convArray } /** * Converts given value to its enum representation */ func AlertSeverityListEnumFromValue(value string) AlertSeverityListEnum { switch value { case "kCritical": return AlertSeverityList_KCRITICAL case "kWarning": return AlertSeverityList_KWARNING case "kInfo": return AlertSeverityList_KINFO default: return AlertSeverityList_KCRITICAL } }
package task import ( "context" "fmt" "os" "os/signal" "path/filepath" "strings" "syscall" "time" "github.com/radovskyb/watcher" "github.com/go-task/task/v3/errors" "github.com/go-task/task/v3/internal/fingerprint" "github.com/go-task/task/v3/internal/logger" "github.com/go-task/task/v3/taskfile" ) const defaultWatchInterval = 5 * time.Second // watchTasks start watching the given tasks func (e *Executor) watchTasks(calls ...taskfile.Call) error { tasks := make([]string, len(calls)) for i, c := range calls { tasks[i] = c.Task } e.Logger.Errf(logger.Green, "task: Started watching for tasks: %s\n", strings.Join(tasks, ", ")) ctx, cancel := context.WithCancel(context.Background()) for _, c := range calls { c := c go func() { if err := e.RunTask(ctx, c); err != nil && !isContextError(err) { e.Logger.Errf(logger.Red, "%v\n", err) } }() } var watchInterval time.Duration switch { case e.Interval != 0: watchInterval = e.Interval case e.Taskfile.Interval != 0: watchInterval = e.Taskfile.Interval default: watchInterval = defaultWatchInterval } e.Logger.VerboseOutf(logger.Green, "task: Watching for changes every %v\n", watchInterval) w := watcher.New() defer w.Close() w.SetMaxEvents(1) closeOnInterrupt(w) go func() { for { select { case event := <-w.Event: e.Logger.VerboseErrf(logger.Magenta, "task: received watch event: %v\n", event) cancel() ctx, cancel = context.WithCancel(context.Background()) e.Compiler.ResetCache() for _, c := range calls { c := c go func() { if err := e.RunTask(ctx, c); err != nil && !isContextError(err) { e.Logger.Errf(logger.Red, "%v\n", err) } }() } case err := <-w.Error: switch err { case watcher.ErrWatchedFileDeleted: default: e.Logger.Errf(logger.Red, "%v\n", err) } case <-w.Closed: cancel() return } } }() go func() { // re-register every 5 seconds because we can have new files, but this process is expensive to run for { if err := e.registerWatchedFiles(w, calls...); err != nil { e.Logger.Errf(logger.Red, "%v\n", err) } time.Sleep(watchInterval) } }() return w.Start(watchInterval) } func isContextError(err error) bool { if taskRunErr, ok := err.(*errors.TaskRunError); ok { err = taskRunErr.Err } return err == context.Canceled || err == context.DeadlineExceeded } func closeOnInterrupt(w *watcher.Watcher) { ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt, syscall.SIGTERM) go func() { <-ch w.Close() }() } func (e *Executor) registerWatchedFiles(w *watcher.Watcher, calls ...taskfile.Call) error { watchedFiles := w.WatchedFiles() var registerTaskFiles func(taskfile.Call) error registerTaskFiles = func(c taskfile.Call) error { task, err := e.CompiledTask(c) if err != nil { return err } for _, d := range task.Deps { if err := registerTaskFiles(taskfile.Call{Task: d.Task, Vars: d.Vars}); err != nil { return err } } for _, c := range task.Cmds { if c.Task != "" { if err := registerTaskFiles(taskfile.Call{Task: c.Task, Vars: c.Vars}); err != nil { return err } } } for _, s := range task.Sources { files, err := fingerprint.Glob(task.Dir, s) if err != nil { return fmt.Errorf("task: %s: %w", s, err) } for _, f := range files { absFile, err := filepath.Abs(f) if err != nil { return err } if shouldIgnoreFile(absFile) { continue } if _, ok := watchedFiles[absFile]; ok { continue } if err := w.Add(absFile); err != nil { return err } e.Logger.VerboseOutf(logger.Green, "task: watching new file: %v\n", absFile) } } return nil } for _, c := range calls { if err := registerTaskFiles(c); err != nil { return err } } return nil } func shouldIgnoreFile(path string) bool { return strings.Contains(path, "/.git") || strings.Contains(path, "/.hg") || strings.Contains(path, "/.task") || strings.Contains(path, "/node_modules") }
package rtda /** * 帧 */ type Frame struct { lower *Frame //用来实现链表数据结构 localVars LocalVars //局部变量表指针 operandStack *OperandStack //操作数栈指针 } func NewFrame(maxLocals, maxStack uint) *Frame { return &Frame{ localVars: newLocalVars(maxLocals), operandStack: newOperandStack(maxStack), // 执行方法所需的局部变量表大小和操作数栈深度是由编译器预先计算好的,存储在class文件method_info结构的Code属性中, } } // getters func (self *Frame) LocalVars() LocalVars { return self.localVars } func (self *Frame) OperandStack() *OperandStack { return self.operandStack }
// cancellation context can be seen as a convenience without which a data sink // routine often ends up having to store away what coomes out of a channel // input as a result of a negative close poll // // after introducing cancellation context it becomes possible for source to // block on a buffered channel if sink is cancelled prematurely // // the situation is easily demonstrated with an infinitely fast source and // a slow blocking sink, meant to represent a real setting where source channel // depth in insufficient to spread burstiness of incoming data evenly // // one solution is close & drain to complete the shutdown cleanly // // try removing the drain loop: source is likely to end up blocked and // waitgroup will detect deadlock at runtime // package main import ( "context" "log" "sync" "time" ) func main() { ctx, cancel := context.WithCancel(context.Background()) wg := new(sync.WaitGroup) c := make(chan int, 1) wg.Add(1) go func() { defer wg.Done() n := 0 for { select { case <-ctx.Done(): close(c) log.Print("source exit") return default: c <- n log.Print("<-", n) n += 1 } } }() wg.Add(1) go func() { defer wg.Done() for { select { case <-ctx.Done(): for range c { // drain } log.Print("sink exit") return case n := <-c: log.Print(n, "<-") time.Sleep(1*time.Second) // work } } }() time.Sleep(1*time.Second) log.Print("initiate shutdown") cancel() wg.Wait() }
package daemonset import ( "testing" dynatracev1beta1 "github.com/Dynatrace/dynatrace-operator/src/api/v1beta1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestPrepareVolumes(t *testing.T) { t.Run("has defaults if instance is nil", func(t *testing.T) { volumes := prepareVolumes(nil) assert.Contains(t, volumes, getRootVolume()) }) t.Run(`has root volume`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ OneAgent: dynatracev1beta1.OneAgentSpec{ HostMonitoring: &dynatracev1beta1.HostInjectSpec{}, }, }, } volumes := prepareVolumes(instance) assert.Contains(t, volumes, getRootVolume()) assert.NotContains(t, volumes, getCertificateVolume(instance)) }) t.Run(`has tenant secret volume`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ ObjectMeta: corev1.ObjectMeta{ Name: testName, }, } volumes := prepareVolumes(instance) assert.Contains(t, volumes, getOneAgentSecretVolume(instance)) }) t.Run(`has certificate volume`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ TrustedCAs: testName, OneAgent: dynatracev1beta1.OneAgentSpec{ HostMonitoring: &dynatracev1beta1.HostInjectSpec{}, }, }, } volumes := prepareVolumes(instance) assert.Contains(t, volumes, getRootVolume()) assert.Contains(t, volumes, getCertificateVolume(instance)) }) t.Run(`has tls volume`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ TrustedCAs: testName, ActiveGate: dynatracev1beta1.ActiveGateSpec{ Capabilities: []dynatracev1beta1.CapabilityDisplayName{ dynatracev1beta1.KubeMonCapability.DisplayName, }, TlsSecretName: "testing", }, OneAgent: dynatracev1beta1.OneAgentSpec{ HostMonitoring: &dynatracev1beta1.HostInjectSpec{}, }, }, } volumes := prepareVolumes(instance) assert.Contains(t, volumes, getActiveGateCaCertVolume(instance)) }) t.Run(`doesn't have csi volume`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ ObjectMeta: corev1.ObjectMeta{ Annotations: map[string]string{ dynatracev1beta1.AnnotationFeatureDisableReadOnlyOneAgent: "true", }, }, Spec: dynatracev1beta1.DynaKubeSpec{ OneAgent: dynatracev1beta1.OneAgentSpec{ HostMonitoring: &dynatracev1beta1.HostInjectSpec{}, }, }, } volumes := prepareVolumes(instance) assert.NotContains(t, volumes, getCSIStorageVolume(instance)) }) t.Run(`csi volume not supported on classicFullStack`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ OneAgent: dynatracev1beta1.OneAgentSpec{ ClassicFullStack: &dynatracev1beta1.HostInjectSpec{}, }, }, } volumes := prepareVolumes(instance) assert.NotContains(t, volumes, getCSIStorageVolume(instance)) }) t.Run(`has all volumes`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ TrustedCAs: testName, OneAgent: dynatracev1beta1.OneAgentSpec{ HostMonitoring: &dynatracev1beta1.HostInjectSpec{}, }, ActiveGate: dynatracev1beta1.ActiveGateSpec{ Capabilities: []dynatracev1beta1.CapabilityDisplayName{ dynatracev1beta1.KubeMonCapability.DisplayName, }, TlsSecretName: "testing", }, }, } dsInfo := HostMonitoring{ builderInfo{ dynakube: instance, hostInjectSpec: instance.Spec.OneAgent.HostMonitoring, clusterID: "", }, } ds, err := dsInfo.BuildDaemonSet() require.NoError(t, err) volumes := ds.Spec.Template.Spec.Volumes assert.Contains(t, volumes, getRootVolume()) assert.Contains(t, volumes, getCertificateVolume(instance)) assert.Contains(t, volumes, getActiveGateCaCertVolume(instance)) assert.Contains(t, volumes, getCSIStorageVolume(instance)) }) } func TestPrepareVolumeMounts(t *testing.T) { t.Run("has defaults if instance is nil", func(t *testing.T) { volumeMounts := prepareVolumeMounts(nil) assert.Contains(t, volumeMounts, getRootMount()) }) t.Run(`has root volume mount`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ OneAgent: dynatracev1beta1.OneAgentSpec{ HostMonitoring: &dynatracev1beta1.HostInjectSpec{}, }, }, } volumeMounts := prepareVolumeMounts(instance) assert.Contains(t, volumeMounts, getReadOnlyRootMount()) assert.NotContains(t, volumeMounts, getActiveGateCaCertVolumeMount()) }) t.Run(`has cluster certificate volume mount`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ OneAgent: dynatracev1beta1.OneAgentSpec{ HostMonitoring: &dynatracev1beta1.HostInjectSpec{}, }, TrustedCAs: testName, }, } volumeMounts := prepareVolumeMounts(instance) assert.Contains(t, volumeMounts, getReadOnlyRootMount()) assert.Contains(t, volumeMounts, getClusterCaCertVolumeMount()) assert.NotContains(t, volumeMounts, getActiveGateCaCertVolumeMount()) }) t.Run(`has ActiveGate CA volume mount`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ OneAgent: dynatracev1beta1.OneAgentSpec{ HostMonitoring: &dynatracev1beta1.HostInjectSpec{}, }, TrustedCAs: testName, ActiveGate: dynatracev1beta1.ActiveGateSpec{ Capabilities: []dynatracev1beta1.CapabilityDisplayName{ dynatracev1beta1.KubeMonCapability.DisplayName, }, TlsSecretName: "testing", }, }, } volumeMounts := prepareVolumeMounts(instance) assert.Contains(t, volumeMounts, getReadOnlyRootMount()) assert.Contains(t, volumeMounts, getActiveGateCaCertVolumeMount()) }) t.Run(`doesn't have readonly volume mounts`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ ObjectMeta: corev1.ObjectMeta{ Annotations: map[string]string{ dynatracev1beta1.AnnotationFeatureDisableReadOnlyOneAgent: "true", }, }, Spec: dynatracev1beta1.DynaKubeSpec{ ActiveGate: dynatracev1beta1.ActiveGateSpec{ TlsSecretName: testName, Capabilities: []dynatracev1beta1.CapabilityDisplayName{ dynatracev1beta1.RoutingCapability.DisplayName, }, }, OneAgent: dynatracev1beta1.OneAgentSpec{ HostMonitoring: &dynatracev1beta1.HostInjectSpec{}, }, }, } volumeMounts := prepareVolumeMounts(instance) assert.Contains(t, volumeMounts, getRootMount()) assert.Contains(t, volumeMounts, getActiveGateCaCertVolumeMount()) assert.NotContains(t, volumeMounts, getCSIStorageMount()) }) t.Run(`readonly volume not supported on classicFullStack`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ OneAgent: dynatracev1beta1.OneAgentSpec{ ClassicFullStack: &dynatracev1beta1.HostInjectSpec{}, }, }, } volumeMounts := prepareVolumeMounts(instance) assert.Contains(t, volumeMounts, getRootMount()) assert.NotContains(t, volumeMounts, getCSIStorageMount()) }) t.Run(`has all volume mounts`, func(t *testing.T) { instance := &dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ TrustedCAs: testName, OneAgent: dynatracev1beta1.OneAgentSpec{ HostMonitoring: &dynatracev1beta1.HostInjectSpec{}, }, ActiveGate: dynatracev1beta1.ActiveGateSpec{ Capabilities: []dynatracev1beta1.CapabilityDisplayName{ dynatracev1beta1.KubeMonCapability.DisplayName, }, TlsSecretName: "testing", }, }, } dsInfo := HostMonitoring{ builderInfo{ dynakube: instance, hostInjectSpec: instance.Spec.OneAgent.HostMonitoring, clusterID: "", }, } volumeMounts := dsInfo.podSpec().Containers[0].VolumeMounts assert.Contains(t, volumeMounts, getReadOnlyRootMount()) assert.Contains(t, volumeMounts, getActiveGateCaCertVolumeMount()) assert.Contains(t, volumeMounts, getClusterCaCertVolumeMount()) assert.Contains(t, volumeMounts, getCSIStorageMount()) }) }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package task_test import ( "testing" "github.com/google/gapid/core/assert" "github.com/google/gapid/core/event/task" "github.com/google/gapid/core/log" ) func TestBatonYield(t *testing.T) { ctx := log.Testing(t) baton := task.NewBaton() expect := []string{"A", "B", "C", "D"} got := []string{} signal, done := task.NewSignal() go func() { got = append(got, "A") baton.Yield(nil) got = append(got, "C") baton.Release(nil) }() go func() { baton.Acquire() got = append(got, "B") baton.Yield(nil) got = append(got, "D") done(ctx) }() assert.For(ctx, "Interlock complete").That(signal.TryWait(ctx, ExpectNonBlocking)).Equals(true) assert.For(ctx, "Interlock order").That(got).DeepEquals(expect) } func TestBatonRelay(t *testing.T) { ctx := log.Testing(t) baton := task.NewBaton() expect := []string{"A", "B"} got := []string{} signal, done := task.NewSignal() go func() { got = append(got, "A") baton.Yield(nil) got = append(got, "B") done(ctx) }() go baton.Relay() assert.For(ctx, "Replay complete").That(signal.TryWait(ctx, ExpectNonBlocking)).Equals(true) assert.For(ctx, "Replay order").That(got).DeepEquals(expect) } func TestBatonTryRelease(t *testing.T) { assert := assert.To(t) baton := task.NewBaton() go baton.Acquire() assert.For("Baton TryRelease").That(baton.TryRelease(nil, ExpectNonBlocking)).Equals(true) } func TestBatonTryReleaseBlocks(t *testing.T) { assert := assert.To(t) baton := task.NewBaton() assert.For("Baton TryRelease").That(baton.TryRelease(nil, ExpectNonBlocking)).Equals(false) } func TestBatonTryAcquire(t *testing.T) { assert := assert.To(t) baton := task.NewBaton() expect := 1 go baton.Release(expect) got, ok := baton.TryAcquire(ExpectNonBlocking) assert.For("Baton TryAcquire").That(ok).Equals(true) assert.For("Baton value").That(got).Equals(expect) } func TestBatonTryAcquireBlocks(t *testing.T) { assert := assert.To(t) baton := task.NewBaton() _, ok := baton.TryAcquire(ExpectBlocking) assert.For("Baton TryAcquire").That(ok).Equals(false) }
/* Copyright 2022 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package manifest import ( "errors" "testing" sErrors "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/errors" "github.com/GoogleContainerTools/skaffold/v2/proto/enums" "github.com/GoogleContainerTools/skaffold/v2/testutil" ) func TestReplaceImageErr(t *testing.T) { testutil.Run(t, "TestReplaceImageErr", func(t *testutil.T) { err := replaceImageErr(errors.New("")) t.CheckDeepEqual(err.(*sErrors.ErrDef).StatusCode(), enums.StatusCode_RENDER_REPLACE_IMAGE_ERR) }) } func TestTransformManifestErr(t *testing.T) { testutil.Run(t, "TestReplaceImageErr", func(t *testutil.T) { err := transformManifestErr(errors.New("")) t.CheckDeepEqual(err.(*sErrors.ErrDef).StatusCode(), enums.StatusCode_RENDER_TRANSFORM_MANIFEST_ERR) }) } func TestLabelSettingErr(t *testing.T) { testutil.Run(t, "TestLabelSettingErr", func(t *testutil.T) { err := labelSettingErr(errors.New("")) t.CheckDeepEqual(err.(*sErrors.ErrDef).StatusCode(), enums.StatusCode_RENDER_SET_LABEL_ERR) }) } func TestParseImagesInManifestErr(t *testing.T) { testutil.Run(t, "TestParseImagesInManifestErr", func(t *testutil.T) { err := parseImagesInManifestErr(errors.New("")) t.CheckDeepEqual(err.(*sErrors.ErrDef).StatusCode(), enums.StatusCode_RENDER_PARSE_MANIFEST_IMAGES_ERR) }) } func TestWriteErr(t *testing.T) { testutil.Run(t, "TestWriteErr", func(t *testutil.T) { err := writeErr(errors.New("")) t.CheckDeepEqual(err.(*sErrors.ErrDef).StatusCode(), enums.StatusCode_RENDER_MANIFEST_WRITE_ERR) }) } func TestNSSettingErr(t *testing.T) { testutil.Run(t, "TestNSSettingErr", func(t *testutil.T) { err := nsSettingErr(errors.New("")) t.CheckDeepEqual(err.(*sErrors.ErrDef).StatusCode(), enums.StatusCode_RENDER_SET_NAMESPACE_ERR) }) }
package catalog type ActionScope string const ( ActionScopeProject ActionScope = "project" ActionScopeModule ActionScope = "module" ) type ActionType string const ( ActionTypeContainer ActionType = "container" ActionTypeGitHubAction ActionType = "githubaction" ) type ActionAccess struct { Env []string `yaml:"env"` } type ContainerAction struct { Image string `yaml:"image"` // Image is the full image reference including the registry Command string `yaml:"command"` // Command is the command that should be executed in the container image to start the action. Certs []ImageCerts `yaml:"certs,omitempty"` } type Action struct { Repository string `yaml:"repository,omitempty"` Name string `required:"true" yaml:"name"` Enabled bool `default:"true" yaml:"enabled,omitempty"` Type ActionType `required:"true" yaml:"type"` Container ContainerAction `yaml:"container,omitempty"` // Container contains the configuration for containerized actions Description string `yaml:"description,omitempty"` Version string `yaml:"version,omitempty"` Scope ActionScope `required:"true" yaml:"scope"` Rules []WorkflowRule `yaml:"rules,omitempty"` Access ActionAccess `yaml:"access,omitempty"` }
package main import ( "net/http" "os" "testing" "io/ioutil" "github.com/bandwidthcom/go-bandwidth" "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" ) func TestNewCatapultApi(t *testing.T) { os.Setenv("CATAPULT_USER_ID", "UserID") os.Setenv("CATAPULT_API_TOKEN", "Token") os.Setenv("CATAPULT_API_SECRET", "Secret") api, _ := newCatapultAPI(nil) assert.Equal(t, "UserID", api.client.UserID) assert.Equal(t, "Token", api.client.APIToken) assert.Equal(t, "Secret", api.client.APISecret) } func TestGetCatapultApiFail(t *testing.T) { os.Unsetenv("CATAPULT_USER_ID") os.Unsetenv("CATAPULT_API_TOKEN") os.Unsetenv("CATAPULT_API_SECRET") _, err := newCatapultAPI(nil) assert.Error(t, err) } func TestGetApplicationIDWithNewApplication(t *testing.T) { applicationIDs = map[string]string{"localhost": ""} server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/applications?size=1000", Method: http.MethodGet, ContentToSend: `[]`, }, RequestHandler{ PathAndQuery: "/v1/users/userID/applications", Method: http.MethodPost, EstimatedContent: `{"name":"GolangVoiceReferenceApp on localhost","incomingCallUrl":"http://localhost/callCallback","callbackHttpMethod":"POST","autoAnswer":true}`, HeadersToSend: map[string]string{"Location": "/v1/users/userID/applications/123"}, }, }) defer server.Close() id, _ := api.GetApplicationID() assert.Equal(t, "123", id) } func TestGetApplicationIDWithExistingApplication(t *testing.T) { applicationIDs = map[string]string{"localhost": ""} server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/applications?size=1000", Method: http.MethodGet, ContentToSend: `[{"name": "GolangVoiceReferenceApp on localhost", "id": "0123"}]`, }, }) defer server.Close() id, _ := api.GetApplicationID() assert.Equal(t, "0123", id) } func TestGetApplicationIDRepeating(t *testing.T) { applicationIDs = map[string]string{"localhost": ""} server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/applications?size=1000", Method: http.MethodGet, ContentToSend: `[{"name": "GolangVoiceReferenceApp on localhost", "id": "1234"}]`, }, }) id, _ := api.GetApplicationID() server.Close() assert.Equal(t, "1234", id) id, _ = api.GetApplicationID() assert.Equal(t, "1234", id) id, _ = api.GetApplicationID() assert.Equal(t, "1234", id) } func TestGetApplicationIDFail(t *testing.T) { applicationIDs = map[string]string{"localhost": ""} server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/applications?size=1000", Method: http.MethodGet, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.GetApplicationID() assert.Error(t, err) } func TestGetDomainWithNewDomain(t *testing.T) { domainID = "" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/domains?size=100", Method: http.MethodGet, ContentToSend: `[]`, }, RequestHandler{ PathAndQuery: "/v1/users/userID/domains", Method: http.MethodPost, EstimatedContent: `{"name":"random","description":"GolangVoiceReferenceApp's domain"}`, HeadersToSend: map[string]string{"Location": "/v1/users/userID/domains/123"}, }, }) useMockRandomString() defer restoreRandomString() defer server.Close() id, name, _ := api.GetDomain() assert.Equal(t, "123", id) assert.Equal(t, "random", name) } func TestGetDomainWithExistingDomain(t *testing.T) { domainID = "" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/domains?size=100", Method: http.MethodGet, ContentToSend: `[{"name": "domain", "id": "0123", "description": "GolangVoiceReferenceApp's domain"}]`, }, }) defer server.Close() id, name, _ := api.GetDomain() assert.Equal(t, "0123", id) assert.Equal(t, "domain", name) } func TestGetDomainRepeating(t *testing.T) { domainID = "" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/domains?size=100", Method: http.MethodGet, ContentToSend: `[{"name": "domain1", "id": "1234", "description": "GolangVoiceReferenceApp's domain"}]`, }, }) id, _, _ := api.GetDomain() server.Close() assert.Equal(t, "1234", id) id, _, _ = api.GetDomain() assert.Equal(t, "1234", id) id, name, _ := api.GetDomain() assert.Equal(t, "1234", id) assert.Equal(t, "domain1", name) } func TestGetDomainFail(t *testing.T) { domainID = "" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/domains?size=100", Method: http.MethodGet, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, _, err := api.GetDomain() assert.Error(t, err) } func TestCreatePhoneNumber(t *testing.T) { applicationIDs = map[string]string{"localhost": "123"} server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/availableNumbers/local?areaCode=910&quantity=1", Method: http.MethodPost, ContentToSend: `[{"number": "+1234567890", "location": "/v1/users/userID/phoneNumbers/1234"}]`, }, RequestHandler{ PathAndQuery: "/v1/users/userID/phoneNumbers/1234", Method: http.MethodPost, EstimatedContent: `{"applicationId":"123"}`, }, }) defer server.Close() phoneNumber, _ := api.CreatePhoneNumber("910") assert.Equal(t, "+1234567890", phoneNumber) } func TestCreatePhoneNumberFail(t *testing.T) { applicationIDs = map[string]string{"localhost": ""} server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/applications?size=1000", Method: http.MethodGet, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.CreatePhoneNumber("910") assert.Error(t, err) } func TestCreatePhoneNumberFail2(t *testing.T) { applicationIDs = map[string]string{"localhost": "123"} server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/availableNumbers/local?areaCode=910&quantity=1", Method: http.MethodPost, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.CreatePhoneNumber("910") assert.Error(t, err) } func TestCreatePhoneNumberFail3(t *testing.T) { applicationIDs = map[string]string{"localhost": "123"} server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/availableNumbers/local?areaCode=910&quantity=1", Method: http.MethodPost, ContentToSend: `[{"number": "+1234567890", "location": "/v1/users/userID/phoneNumbers/1234"}]`, }, RequestHandler{ PathAndQuery: "/v1/users/userID/phoneNumbers/1234", Method: http.MethodPost, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.CreatePhoneNumber("910") assert.Error(t, err) } func TestCreateSIPAccount(t *testing.T) { applicationIDs = map[string]string{"localhost": "123"} domainID = "456" domainName = "domain1" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/domains/456/endpoints", Method: http.MethodPost, EstimatedContent: `{"name":"random","description":"GolangVoiceReferenceApp's SIP Account","domainId":"456","applicationId":"123","credentials":{"password":"random"}}`, HeadersToSend: map[string]string{"Location": "/v1/users/userID/domains/456/endpoints/567"}, }, }) useMockRandomString() defer server.Close() defer restoreRandomString() account, _ := api.CreateSIPAccount() assert.EqualValues(t, &sipAccount{ EndpointID: "567", URI: "sip:random@domain1.bwapp.bwsip.io", Password: "random", }, account) } func TestCreateSIPAccountFail(t *testing.T) { applicationIDs = map[string]string{"localhost": "123"} domainID = "456" domainName = "domain2" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/domains/456/endpoints", Method: http.MethodPost, StatusCodeToSend: http.StatusBadRequest, }, }) useMockRandomString() defer server.Close() defer restoreRandomString() _, err := api.CreateSIPAccount() assert.Error(t, err) } func TestCreateSIPAccountFail2(t *testing.T) { applicationIDs = map[string]string{"localhost": "123"} domainID = "" domainName = "" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/domains", Method: http.MethodGet, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.CreateSIPAccount() assert.Error(t, err) } func TestCreateSIPAccountFail3(t *testing.T) { applicationIDs = map[string]string{"localhost": ""} domainID = "" domainName = "" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/applications", Method: http.MethodGet, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.CreateSIPAccount() assert.Error(t, err) } func TestCreateSIPAuthToken(t *testing.T) { domainID = "123" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/domains/123/endpoints/456/tokens", Method: http.MethodPost, ContentToSend: `{"token": "token"}`, }, }) defer server.Close() token, _ := api.CreateSIPAuthToken("456") assert.Equal(t, "token", token.Token) } func TestCreateSIPAuthTokenFail(t *testing.T) { domainID = "123" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/domains/123/endpoints/456/tokens", Method: http.MethodPost, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.CreateSIPAuthToken("456") assert.Error(t, err) } func TestCreateSIPAuthTokenFail2(t *testing.T) { domainID = "" server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/domains", Method: http.MethodGet, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.CreateSIPAuthToken("456") assert.Error(t, err) } func TestUpdateCall(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls/123", Method: http.MethodPost, EstimatedContent: `{"state":"transfering"}`, HeadersToSend: map[string]string{"Location": "/v1/users/userID/calls/567"}, }, }) defer server.Close() id, _ := api.UpdateCall("123", &bandwidth.UpdateCallData{ State: "transfering", }) assert.Equal(t, "567", id) } func TestUpdateCallFail(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls/123", Method: http.MethodPost, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.UpdateCall("123", &bandwidth.UpdateCallData{ State: "transfering", }) assert.Error(t, err) } func TestGetCall(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls/123", Method: http.MethodGet, ContentToSend: `{"id": "123", "state":"transfering"}`, }, }) defer server.Close() call, _ := api.GetCall("123") assert.Equal(t, "123", call.ID) } func TestGetCallFail(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls/123", Method: http.MethodGet, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.GetCall("123") assert.Error(t, err) } func TestPlayAudioToCall(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls/123/audio", Method: http.MethodPost, EstimatedContent: `{"fileUrl":"url"}`, }, }) defer server.Close() err := api.PlayAudioToCall("123", "url") assert.NoError(t, err) } func TestSpeakPlayAudioFail(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls/123/audio", Method: http.MethodPost, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() err := api.PlayAudioToCall("123", "url") assert.Error(t, err) } func TestSpeakSentenceToCall(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls/123/audio", Method: http.MethodPost, EstimatedContent: `{"sentence":"text","gender":"female","locale":"en_US","voice":"julie"}`, }, }) defer server.Close() err := api.SpeakSentenceToCall("123", "text") assert.NoError(t, err) } func TestSpeakSentenceToCallFail(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls/123/audio", Method: http.MethodPost, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() err := api.SpeakSentenceToCall("123", "test") assert.Error(t, err) } func TestCreateGather(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls/123/gather", Method: http.MethodPost, EstimatedContent: `{"maxDigits":"1"}`, HeadersToSend: map[string]string{"Location": "/v1/users/userID/calls/123/gather/456"}, }, }) defer server.Close() id, err := api.CreateGather("123", &bandwidth.CreateGatherData{ MaxDigits: 1, }) assert.NoError(t, err) assert.Equal(t, "456", id) } func TestCreateGatherFail(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls/123/gather", Method: http.MethodPost, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.CreateGather("123", &bandwidth.CreateGatherData{ MaxDigits: 1, }) assert.Error(t, err) } func TestGetRecording(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/recordings/456", Method: http.MethodGet, ContentToSend: `{"id": "456"}`, }, }) defer server.Close() r, err := api.GetRecording("456") assert.NoError(t, err) assert.Equal(t, "456", r.ID) } func TestGetRecordingFail(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/recordings/456", Method: http.MethodGet, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.GetRecording("456") assert.Error(t, err) } func TestCreateCall(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls", Method: http.MethodPost, EstimatedContent: `{"from":"111","to":"222"}`, HeadersToSend: map[string]string{"Location": "/v1/users/userID/calls/123"}, }, }) defer server.Close() id, err := api.CreateCall(&bandwidth.CreateCallData{ From: "111", To: "222", }) assert.NoError(t, err) assert.Equal(t, "123", id) } func TestCreateCallFail(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/calls", Method: http.MethodPost, StatusCodeToSend: http.StatusBadRequest, }, }) defer server.Close() _, err := api.CreateCall(&bandwidth.CreateCallData{ From: "111", To: "222", }) assert.Error(t, err) } func TestDownloadMediaFile(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/media/test", Method: http.MethodGet, ContentToSend: `123`, HeadersToSend: map[string]string{"Content-Type": "text/plain"}, }, }) defer server.Close() r, contentType, err := api.DownloadMediaFile("test") defer r.Close() assert.NoError(t, err) assert.Equal(t, "text/plain", contentType) b, _ := ioutil.ReadAll(r) assert.Equal(t, "123\n", string(b)) } func TestDownloadMediaFileFail(t *testing.T) { server, api := startMockCatapultServer(t, []RequestHandler{ RequestHandler{ PathAndQuery: "/v1/users/userID/media/test", Method: http.MethodGet, StatusCodeToSend: http.StatusNotFound, }, }) defer server.Close() _, _, err := api.DownloadMediaFile("test") assert.Error(t, err) } func TestCatapultMiddleware(t *testing.T) { os.Setenv("CATAPULT_USER_ID", "UserID") os.Setenv("CATAPULT_API_TOKEN", "Token") os.Setenv("CATAPULT_API_SECRET", "Secret") context := createFakeGinContext() catapultMiddleware(context) instance := context.MustGet("catapultAPI") assert.NotNil(t, instance) assert.NotNil(t, instance.(catapultAPIInterface)) } func TestCatapultMiddlewareFail(t *testing.T) { os.Unsetenv("CATAPULT_USER_ID") os.Unsetenv("CATAPULT_API_TOKEN") os.Unsetenv("CATAPULT_API_SECRET") context := createFakeGinContext() gin.SetMode(gin.TestMode) defer func() { _, ok := context.Get("catapultAPI") assert.False(t, ok) r := recover() assert.NotNil(t, r) }() catapultMiddleware(context) } func TestRandomString(t *testing.T) { assert.Equal(t, 10, len(randomString(10))) assert.Equal(t, 16, len(randomString(16))) assert.NotEqual(t, randomString(32), randomString(32)) } var originalRandomString = randomString func useMockRandomString() { randomString = func(length int) string { return "random" } } func restoreRandomString() { randomString = originalRandomString }
package sass import ( "net/http" "net/http/httptest" "strings" "testing" "time" "github.com/thatguystone/acrylic/internal" "github.com/thatguystone/acrylic/internal/testutil" "github.com/thatguystone/acrylic/watch" "github.com/thatguystone/cog/check" ) func hit(h http.Handler) *httptest.ResponseRecorder { req := httptest.NewRequest("GET", "/", nil) rr := httptest.NewRecorder() h.ServeHTTP(rr, req) return rr } func TestSassBasic(t *testing.T) { c := check.New(t) tmp := testutil.NewTmpDir(c, map[string]string{ "all.scss": `@import "sub"; @import "sub2";`, "more/_sub.scss": `.sub {color: #000;}`, "more2/_sub2.scss": `.sub2 {color: #fff;}`, }) defer tmp.Remove() sass := New( tmp.Path("all.scss"), IncludePaths( tmp.Path("more"), tmp.Path("more2")), LogTo(internal.NewLogger("test", c.Logf))) rr := hit(sass) c.Equal(rr.Code, http.StatusOK) body := rr.Body.String() c.Contains(body, `.sub {`) c.Contains(body, `.sub2 {`) } func TestSassChange(t *testing.T) { c := check.New(t) tmp := testutil.NewTmpDir(c, map[string]string{ "all.scss": `.all {color: #000;}`, }) defer tmp.Remove() w := watch.New(tmp.Path(".")) defer w.Stop() sass := New( tmp.Path("all.scss"), LogTo(internal.NewLogger("test", c.Logf)), Watch(w)) rr := hit(sass) c.Equal(rr.Code, http.StatusOK) tmp.WriteFile("all.scss", `.some {color: #000;}`) c.Until(500, func() bool { rr := hit(sass) c.Equal(rr.Code, http.StatusOK) if !strings.Contains(rr.Body.String(), ".some {") { time.Sleep(2 * time.Millisecond) return false } return true }) } func TestSassErrors(t *testing.T) { c := check.New(t) tmp := testutil.NewTmpDir(c, map[string]string{ "all.scss": `@import "`, }) defer tmp.Remove() sass := New( tmp.Path("all.scss"), LogTo(internal.NewLogger("test", c.Logf))) rr := hit(sass) c.Equal(rr.Code, http.StatusInternalServerError) }
// dockerns は Docker コンテナーへの接続な名前解決を行う HTTP / SOCKS v5 プロキシーサーバー及び DNS サーバー。 // // ルーティングに関する設定は etcd 上に保存して使用する。 // // # 「ホスト名が ^.*\.my-service\.com$ の正規表現に一致したら my_container_name へ接続する」というルーティング情報を master アカウントに追加する。 // # 0.regexp_name の 0 は優先順位で、複数のルーティング情報がある場合に値が大きいほど優先される。regexp_name は管理上の設定名なので何でも構わない。 // curl -L http://172.17.42.1:4001/v2/keys/proxy/master/my_container_name/0.regexp_name -X PUT -d value='^.*\.my-service\.com$' // // 上記のようなルーティング情報が保存されている状態で、以下のようにして dockerns を起動する。 // // # Docker Remote API は /var/run/docker.sock 経由でアクセス // # etcd は http://172.17.42.1:4001 経由でアクセス // # HTTP は 80 番ポート、SOCKS は 1080 番ポート、DNS は 53 番ポートで待ち受け // dockerns -docker=unix:///var/run/docker.sock: -etcd=http://172.17.42.1:4001 -http=:80 -socks=:1080 -dns=:53 // // この状態で HTTP や SOCKS v5 プロトコルでアクセスすると、プロキシーのユーザー名とパスワードを要求される。 // // ユーザー名(アカウント名) // master // パスワード // 任意の文字列 (dockerns 起動時に -password オプションで指定した場合はその文字列) // // 接続しようとした先が正規表現に一致している場合は本来の接続先ではなく指定されたコンテナーへの接続としてすり替えられる。 // // dockerns の起動中に Docker のコンテナーが起動/終了されたり etcd のルーティング情報が変化した場合には随時設定が再構築される。 // // 有効なオプションは以下の通り。 // // -d // デバッグモード。 // -reverse // HTTP サーバーでリバースプロキシーモードを有効にする。 // 有効にするためには -account オプションで有効なアカウント名を指定する必要がある。 // -account="" // アカウント名。 // 常に特定のアカウントを使用する場合はここでアカウント名を指定するとユーザー認証が不要になる。 // -realm="Proxy" // HTTP プロキシーで使用されるレルム。 // -password="" // HTTP / SOCKS v5 プロキシーで使用するパスワード。 // 省略した場合は任意の文字列を入力すれば通過できる。 // -docker="" // Docker Remote API にアクセスするためのアドレスを指定する。 // 省略した場合は Docker Remote API は使用せずに起動する。 // 例: 'http://172.17.42.1:4243', 'unix:///path/to/docker.sock:' // -etcd="http://172.17.42.1:4001" // etcd にアクセスするためのアドレスを指定する。 // -routes="/proxy" // プロキシールーティング情報が etcd 上のどこを基点に保存されているのかを指定する。 // -http="" // HTTP プロキシーが待ち受けるアドレスを :80 のような形で指定する。省略した場合は待ち受けない。 // -socks="" // SOCKS v5 プロキシーが待ち受けるアドレスを :1080 のような形で指定する。省略した場合は待ち受けない。 // -dns="" // DNS サーバが待ち受けるアドレスを :53 のような形で指定する。省略した場合は待ち受けない。 // 使用するためには -account でアカウント名を適切に渡す必要がある。 // -ns="8.8.8.8:53" // DNS サーバが自分自身で解決できなかったリクエストを転送する先のネームサーバー。 // -fakemx="" // -ns で指定されたサーバーからの応答を返す前に MX レコードの内容を書き換える場合に指定する。 package main import ( "flag" "log" "os" "os/signal" "time" "github.com/mimoto-xxxxxx/dockerns/accounts" "github.com/mimoto-xxxxxx/dockerns/dns" "github.com/mimoto-xxxxxx/dockerns/proxy" ) func main() { var ( debug = flag.Bool("d", false, "debug mode") reverse = flag.Bool("reverse", false, "enable reverse http proxy mode") account = flag.String("account", "", "account") realm = flag.String("realm", "Proxy", "realm for proxy server") proxyPassword = flag.String("password", "", "password for proxy server") dockerAddress = flag.String("docker", "", "docker remote api address") etcdAddress = flag.String("etcd", "http://172.17.42.1:4001", "etcd address") etcdRoot = flag.String("routes", "/proxy", "etcd routes information root") httpService = flag.String("http", "", "HTTP service address (e.g., ':80')") socksService = flag.String("socks", "", "SOCKSv5 service address (e.g., ':1080')") dnsService = flag.String("dns", "", "DNS service address (e.g., ':53')") nameServer = flag.String("ns", "8.8.8.8:53", "secondary name server (e.g., '8.8.8.8:53')") fakeMX = flag.String("fakemx", "", "enable mx record poisoning(e.g., 'localhost.localdomain.')") ) flag.Parse() ac := accounts.New(*dockerAddress, *etcdAddress, *etcdRoot) ac.Verbose = *debug end := make(chan struct{}) c := make(chan os.Signal) signal.Notify(c, os.Interrupt) go func() { for _ = range c { end <- struct{}{} } }() go func() { // 初回の設定読み込みが成功するまで待機する log.Println("building routing table") err := ac.Reload() for err != nil { time.Sleep(time.Second) log.Println("wait...") err = ac.Reload() } go ac.Watch() if *httpService != "" { go func() { if *reverse && *account != "" { s := proxy.NewRevHTTP(ac, *account) if err := s.ListenAndServe(*httpService); err != nil { log.Println("ListenAndServe(RevHTTP):", err) } } else { s := proxy.NewHTTP(ac) s.AccountName = *account s.Password = *proxyPassword s.Realm = *realm if err := s.ListenAndServe(*httpService); err != nil { log.Println("ListenAndServe(HTTP):", err) } } end <- struct{}{} }() } if *socksService != "" { go func() { s := proxy.NewSOCKS(ac) s.AccountName = *account if err := s.ListenAndServe(*socksService); err != nil { log.Println("ListenAndServe(SOCKS):", err) } end <- struct{}{} }() } if *dnsService != "" { go func() { s := dns.New(ac) s.AccountName = *account s.NameServer = *nameServer s.FakeMX = *fakeMX if err := s.ListenAndServe(*dnsService); err != nil { log.Println("ListenAndServe(DNS):", err) } end <- struct{}{} }() } }() <-end }
// Copyright 2018 SixUnDeuxZero // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package anchor import ( "context" "crypto/ecdsa" "fmt" "math/big" "time" log "github.com/sirupsen/logrus" "github.com/Magicking/faktur-daemon/internal/db" ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" cmn "github.com/Magicking/faktur-daemon/common" ) type Anchor struct { key *ecdsa.PrivateKey from common.Address lastNonce uint64 chainId *big.Int } func NewAnchor(_key *ecdsa.PrivateKey, _lastNonce uint64, _chainId *big.Int) *Anchor { _from := crypto.PubkeyToAddress(_key.PublicKey) a := Anchor{ key: _key, from: _from, lastNonce: _lastNonce, chainId: _chainId, } return &a } func (a *Anchor) SendWithValueMessage(ctx context.Context, to common.Address, value *big.Int, data []byte) (common.Hash, error) { nc := cmn.ClientFromContext(ctx) auth := bind.NewKeyedTransactor(a.key) if value == nil { value = new(big.Int) } gasPrice, err := nc.SuggestGasPrice(ctx) if err != nil { return common.Hash{}, fmt.Errorf("failed to suggest gas price: %v", err) } _to := to gasLimit, err := nc.EstimateGas(ctx, ethereum.CallMsg{ From: a.from, To: &_to, Gas: 0, GasPrice: gasPrice, Value: value, Data: data, }) if err != nil { return common.Hash{}, fmt.Errorf("Could not estimate gas: %v", err) } rawTx := types.NewTransaction(a.lastNonce, to, value, gasLimit, gasPrice, data) signedTx, err := auth.Signer(types.NewEIP155Signer(a.chainId), a.from, rawTx) if err != nil { return common.Hash{}, fmt.Errorf("SendWithValueMessage: %v", err) } err = nc.SendTransaction(ctx, signedTx) if err != nil { return common.Hash{}, fmt.Errorf("SendTransaction: %v", err) } a.lastNonce++ return signedTx.Hash(), nil } func (a *Anchor) updateNonce(ctx context.Context) error { nc := cmn.ClientFromContext(ctx) nonce, err := nc.NonceAt(ctx, a.from, nil) if err != nil { return err } /* val, err := nc.BalanceAt(ctx, a.from, nil) if err != nil { return err } log.Printf("Balance at %s: %s\n", a.from.Hex(), val.String()) */ if a.lastNonce >= nonce { return nil } log.Println("Nonce updated to", nonce) a.lastNonce = nonce return nil } func (a *Anchor) updateWaiting(ctx context.Context) { nc := cmn.ClientFromContext(ctx) roots, err := db.FilterByState(ctx, db.WAITING_CONFIRMATION) if err != nil { log.Printf("Could not query database: %v", err) return } for _, entry := range roots { txHash := common.HexToHash(entry.TransactionHash) root := common.HexToHash(entry.MerkleRoot) rcpt, err := nc.TransactionReceipt(ctx, txHash) if err != nil { log.Printf("TransactionReceipt(%v): %v", txHash.Hex(), err) continue } if rcpt != nil { if err = db.UpdateTx(ctx, root, nil, db.SENT); err != nil { log.Printf("TODO Could not save merkle root %v: %v", txHash.Hex(), err) continue } log.Printf("Confirmed: Root: %v; txHash: %v", root.Hex(), txHash.Hex()) continue } // Check if timeout too old log.Println(entry) // Set to retry if necessary } } func (a *Anchor) updateRetry(ctx context.Context, contractAddress common.Address) { roots, err := db.FilterByState(ctx, db.RETRY) if err != nil { log.Printf("Could not query database: %v", err) return } for _, entry := range roots { root := common.HexToHash(entry.MerkleRoot) txHash, err := a.SendWithValueMessage(ctx, contractAddress, new(big.Int), root.Bytes()) if err != nil { log.Printf("Could not sent transaction for hash %v: %v", root.Hex(), err) continue } log.Printf("Transaction sent: %v", txHash.Hex()) // Save merkleroot to database with state WAITING CONFIRMATION if err = db.UpdateTx(ctx, root, &txHash, db.WAITING_CONFIRMATION); err != nil { log.Printf("TODO Could not save merkle root %v: %v", root.Hex(), err) continue } } } func (a *Anchor) runWatchDog(ctx context.Context, contractAddress common.Address) { ticker := time.NewTicker(time.Duration(10 * time.Second)) a.updateNonce(ctx) for { select { case <-ticker.C: a.updateRetry(ctx, contractAddress) a.updateWaiting(ctx) a.updateNonce(ctx) } //SEND } } func (a *Anchor) Run(ctx context.Context, contractAddress common.Address, c chan common.Hash) { if a.lastNonce == 0 { if err := a.updateNonce(ctx); err != nil { log.Fatalf("Could not obtain fresh nonce: %v", err) } } go a.runWatchDog(ctx, contractAddress) // Get NOT_SENT // re-emit every TXs in not SENT_STATE for root := range c { txHash, err := a.SendWithValueMessage(ctx, contractAddress, new(big.Int), root.Bytes()) if err != nil { log.WithFields(log.Fields{ "hash": root.Hex(), }).Warn(err) // Save in database even if it might already be present // This allow upstream to forget about the tx if need <= TODO CHECK THAT err = db.UpdateTx(ctx, root, nil, db.RETRY) if err != nil { log.WithFields(log.Fields{ "hash": "", "merkleRoot": root.Hex(), }).Warn(err) } continue } log.Printf("Transaction sent: %v", txHash.Hex()) // Save merkleroot to database with state WAITING_CONFIRMATION err = db.UpdateTx(ctx, root, &txHash, db.WAITING_CONFIRMATION) if err != nil { log.WithFields(log.Fields{ "hash": txHash.Hex(), "merkleRoot": root.Hex(), }).Warn(err) } } }
// +build !debug package cache func (q *queue) checkInvariants() {} func (c *lru) checkInvariants() {}
package controller import ( "antalk-go/internal/common" proto "antalk-go/internal/proto/pb" "antalk-go/internal/push/service" "context" ) type Controller struct { push *service.Push } func New(c *common.Config) (*Controller, error) { s := &Controller{} return s, nil } func (c *Controller) Cmd(ctx context.Context, req *proto.CmdReq, resp *proto.CmdResp) error { if req.Meta.CmdType == proto.CmdType_CMD_MSG_NOTIFY { meta := &proto.CmdMeta{} notifyReq := &proto.MsgNotifyReq{} if err := notifyReq.Unmarshal(req.Data); err != nil { meta.ErrorCode = proto.ErrorCode_ERROR_INTERNAL resp.Meta = meta //TODO messi: log return nil } //TODO messi: 参数判断 notifyResp := &proto.MsgNotifyResp{} errCode := c.push.MsgNotify(ctx, notifyReq, notifyResp) if errCode != proto.ErrorCode_ERROR_NONE { meta.ErrorCode = errCode resp.Meta = meta return nil } data, err := notifyResp.Marshal() if err != nil { meta.ErrorCode = proto.ErrorCode_ERROR_INTERNAL resp.Meta = meta return nil } //success meta.ErrorCode = proto.ErrorCode_ERROR_NONE resp.Meta = meta resp.Data = data return nil } return nil }
package odoo import ( "fmt" ) // IrActionsClient represents ir.actions.client model. type IrActionsClient struct { LastUpdate *Time `xmlrpc:"__last_update,omptempty"` BindingModelId *Many2One `xmlrpc:"binding_model_id,omptempty"` BindingType *Selection `xmlrpc:"binding_type,omptempty"` Context *String `xmlrpc:"context,omptempty"` CreateDate *Time `xmlrpc:"create_date,omptempty"` CreateUid *Many2One `xmlrpc:"create_uid,omptempty"` DisplayName *String `xmlrpc:"display_name,omptempty"` Help *String `xmlrpc:"help,omptempty"` Id *Int `xmlrpc:"id,omptempty"` Name *String `xmlrpc:"name,omptempty"` Params *String `xmlrpc:"params,omptempty"` ParamsStore *String `xmlrpc:"params_store,omptempty"` ResModel *String `xmlrpc:"res_model,omptempty"` Tag *String `xmlrpc:"tag,omptempty"` Target *Selection `xmlrpc:"target,omptempty"` Type *String `xmlrpc:"type,omptempty"` WriteDate *Time `xmlrpc:"write_date,omptempty"` WriteUid *Many2One `xmlrpc:"write_uid,omptempty"` XmlId *String `xmlrpc:"xml_id,omptempty"` } // IrActionsClients represents array of ir.actions.client model. type IrActionsClients []IrActionsClient // IrActionsClientModel is the odoo model name. const IrActionsClientModel = "ir.actions.client" // Many2One convert IrActionsClient to *Many2One. func (iac *IrActionsClient) Many2One() *Many2One { return NewMany2One(iac.Id.Get(), "") } // CreateIrActionsClient creates a new ir.actions.client model and returns its id. func (c *Client) CreateIrActionsClient(iac *IrActionsClient) (int64, error) { ids, err := c.CreateIrActionsClients([]*IrActionsClient{iac}) if err != nil { return -1, err } if len(ids) == 0 { return -1, nil } return ids[0], nil } // CreateIrActionsClient creates a new ir.actions.client model and returns its id. func (c *Client) CreateIrActionsClients(iacs []*IrActionsClient) ([]int64, error) { var vv []interface{} for _, v := range iacs { vv = append(vv, v) } return c.Create(IrActionsClientModel, vv) } // UpdateIrActionsClient updates an existing ir.actions.client record. func (c *Client) UpdateIrActionsClient(iac *IrActionsClient) error { return c.UpdateIrActionsClients([]int64{iac.Id.Get()}, iac) } // UpdateIrActionsClients updates existing ir.actions.client records. // All records (represented by ids) will be updated by iac values. func (c *Client) UpdateIrActionsClients(ids []int64, iac *IrActionsClient) error { return c.Update(IrActionsClientModel, ids, iac) } // DeleteIrActionsClient deletes an existing ir.actions.client record. func (c *Client) DeleteIrActionsClient(id int64) error { return c.DeleteIrActionsClients([]int64{id}) } // DeleteIrActionsClients deletes existing ir.actions.client records. func (c *Client) DeleteIrActionsClients(ids []int64) error { return c.Delete(IrActionsClientModel, ids) } // GetIrActionsClient gets ir.actions.client existing record. func (c *Client) GetIrActionsClient(id int64) (*IrActionsClient, error) { iacs, err := c.GetIrActionsClients([]int64{id}) if err != nil { return nil, err } if iacs != nil && len(*iacs) > 0 { return &((*iacs)[0]), nil } return nil, fmt.Errorf("id %v of ir.actions.client not found", id) } // GetIrActionsClients gets ir.actions.client existing records. func (c *Client) GetIrActionsClients(ids []int64) (*IrActionsClients, error) { iacs := &IrActionsClients{} if err := c.Read(IrActionsClientModel, ids, nil, iacs); err != nil { return nil, err } return iacs, nil } // FindIrActionsClient finds ir.actions.client record by querying it with criteria. func (c *Client) FindIrActionsClient(criteria *Criteria) (*IrActionsClient, error) { iacs := &IrActionsClients{} if err := c.SearchRead(IrActionsClientModel, criteria, NewOptions().Limit(1), iacs); err != nil { return nil, err } if iacs != nil && len(*iacs) > 0 { return &((*iacs)[0]), nil } return nil, fmt.Errorf("ir.actions.client was not found with criteria %v", criteria) } // FindIrActionsClients finds ir.actions.client records by querying it // and filtering it with criteria and options. func (c *Client) FindIrActionsClients(criteria *Criteria, options *Options) (*IrActionsClients, error) { iacs := &IrActionsClients{} if err := c.SearchRead(IrActionsClientModel, criteria, options, iacs); err != nil { return nil, err } return iacs, nil } // FindIrActionsClientIds finds records ids by querying it // and filtering it with criteria and options. func (c *Client) FindIrActionsClientIds(criteria *Criteria, options *Options) ([]int64, error) { ids, err := c.Search(IrActionsClientModel, criteria, options) if err != nil { return []int64{}, err } return ids, nil } // FindIrActionsClientId finds record id by querying it with criteria. func (c *Client) FindIrActionsClientId(criteria *Criteria, options *Options) (int64, error) { ids, err := c.Search(IrActionsClientModel, criteria, options) if err != nil { return -1, err } if len(ids) > 0 { return ids[0], nil } return -1, fmt.Errorf("ir.actions.client was not found with criteria %v and options %v", criteria, options) }
package main // Pomodoro! // References: // - [Wikipedia: Pomodoro Technique](https://en.wikipedia.org/wiki/Pomodoro_Technique) // - [List of colors for prompt](https://wiki.archlinux.org/index.php/Color_Bash_Prompt#List_of_colors_for_prompt_and_Bash) // - [CMD in Python and Go](http://www.darkcoding.net/software/pretty-command-line-console-output-on-unix-in-python-and-go-lang/) // - [time.Tick](http://golang.org/pkg/time/#example_Tick) // - [strconv.Itoa](http://golang.org/pkg/strconv/#Itoa) // - [Capture spaced user input](http://stackoverflow.com/questions/7452641/capture-spaced-user-input) // // It'd be cool to make it with ncurses: // - [ncurses with go](https://code.google.com/p/goncurses/source/browse/ncurses.go) // import ( "bufio" "os" "strconv" "time" ) func wait(start string, minutes int, end string) { var smins, ssecs string tchan := time.Tick(1 * time.Second) seconds := 0 for _ = range tchan { smins = strconv.Itoa(minutes) if minutes < 10 { smins = "0" + smins } ssecs = strconv.Itoa(seconds) if seconds < 10 { ssecs = "0" + ssecs } print("\r"+start, smins+":"+ssecs, end) if seconds == 0 { if minutes == 0 { break } minutes-- seconds = 60 } seconds-- } } const delim = '\n' func main() { var task_name string var which_break string var break_time int bblack := "\033[1;30m" bwhite := "\033[1;37m" white := "\033[0;37m" bred := "\033[1;31m" red := "\033[0;31m" bgreen := "\033[1;32m" green := "\033[0;32m" end := "\033[0m" task_count := 1 r := bufio.NewReader(os.Stdin) for { println("\n"+bwhite+"Pomodoro! "+white+"#"+strconv.Itoa(task_count), end) print("Name your task: ") task_name, _ = r.ReadString(delim) task_name = task_name[:len(task_name)-1] wait(red+"Waiting ", 25, " minutes for you to complete \""+bred+task_name+red+"\" :|"+end) print("\x07") // ASCII code 7 (BEL), or "\a" print("\r" + green + "You just finished \"" + bgreen + task_name + green + "\" :) \033[K\n" + end) ask_break: print("Short (s) or long (l) break?") which_break, _ = r.ReadString(delim) which_break = which_break[:len(which_break)-1] switch which_break { case "s": switch task_count % 4 { case 0: break_time = 15 default: break_time = 3 } case "l": switch task_count % 4 { case 0: break_time = 30 default: break_time = 5 } default: goto ask_break } wait(bblack+"Take a break of ", break_time, " minutes..."+end) task_count++ } }
package middleware import ( "net/http" "github.com/winded/tyomaa/backend/util" ) func AccessControl(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", util.EnvOrDefault("ALLOW_ORIGIN", "*")) next.ServeHTTP(w, r) }) } // Json middleware sets the response's content type to application/json func Json(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") next.ServeHTTP(w, r) }) }
package controller import ( "encoding/json" "net/http" "strconv" "time" "github.com/simplejia/clog/api" "github.com/simplejia/namesrv/model" ) var AddNameStatFunc = func() func(string, string) error { fun := "AddNameStatFunc" ch := make(chan [2]string, 1e6) go func() { m := make(map[string]time.Time) n := make(map[string]int) tick := time.Tick(time.Second) for { select { case <-tick: for key, expire := range m { if time.Since(expire) < 0 { continue } num := n[key] delete(m, key) delete(n, key) statModel := model.NewStat() statModel.Name = key err := statModel.IncNumDay(num) if err != nil { clog.Error("%s stat.IncNumDay err: %v, req: %v", fun, err, statModel) } } case kv := <-ch: key, value := kv[0], kv[1] num, _ := strconv.Atoi(value) num++ if _, ok := n[key]; ok { n[key] += num break } m[key] = time.Now().Add(time.Second * 10) n[key] = num } } }() return func(key, value string) (err error) { select { case ch <- [2]string{key, value}: default: } return } }() // @postfilter("Boss") func (relation *Relation) GetsFromName(w http.ResponseWriter, r *http.Request) { fun := "relation.GetsFromName" name := r.FormValue("name") cc := r.FormValue("cc") num := r.FormValue("num") AddNameStatFunc(name, num) relationModel := model.NewRelation() relationModel.Name = name rels, err := relationModel.GetsFromName() if err != nil { clog.Error("%s gets error: %v, req: %v", fun, err, relationModel) w.WriteHeader(http.StatusInternalServerError) return } ccNew := model.Relations(rels).CheckCode() if cc == ccNew { w.WriteHeader(http.StatusNotModified) return } json.NewEncoder(w).Encode(map[string]interface{}{ "rels": rels, "cc": ccNew, }) return }
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package limits import ( "testing" "golang.org/x/sys/unix" ) func TestSet(t *testing.T) { testCases := []struct { limit Limit privileged bool expectedErr error }{ {limit: Limit{Cur: 50, Max: 50}, privileged: false, expectedErr: nil}, {limit: Limit{Cur: 20, Max: 50}, privileged: false, expectedErr: nil}, {limit: Limit{Cur: 20, Max: 60}, privileged: false, expectedErr: unix.EPERM}, {limit: Limit{Cur: 60, Max: 50}, privileged: false, expectedErr: unix.EINVAL}, {limit: Limit{Cur: 11, Max: 10}, privileged: false, expectedErr: unix.EINVAL}, {limit: Limit{Cur: 20, Max: 60}, privileged: true, expectedErr: nil}, } ls := NewLimitSet() for _, tc := range testCases { if _, err := ls.Set(1, tc.limit, tc.privileged); err != tc.expectedErr { t.Fatalf("Tried to set Limit to %+v and privilege %t: got %v, wanted %v", tc.limit, tc.privileged, err, tc.expectedErr) } } }
package database import ( "gorm.io/driver/sqlite" "gorm.io/gorm" "os" ) func newSQLite() gorm.Dialector { fn := os.Getenv("SQLITE_FILENAME") return sqlite.Open(fn) }
package provider import ( "bytes" "chaplapp/core" "encoding/json" "os" "testing" "time" ) func TestProvidesChairmanListCorrectly(testing *testing.T) { chairmen := Chairmen{[]string{"John Doe", "Jane Doe", "Foobius Bar"}} src, _ := json.Marshal(chairmen) os.Stdout.Write(src) reader := bytes.NewReader(src) result, _ := NewJsonChairmanProvider(reader).Chairmen() if resultCount, originalCount := len(result), len(chairmen.Chairmen); resultCount != originalCount { testing.Error("Result contains different number of chairmen (", resultCount, ") than original JSON (", originalCount, ").") } for _, chairman := range result { if !containsChairman(chairmen.Chairmen, chairman) { testing.Error("Chairman with name: [", chairman.Name(), "] was not in original JSON") } } } func containsChairman(chairmen []string, chairman core.Chairman) bool { for _, candidate := range chairmen { if chairman.Name() == candidate { return true } } return false } func TestProvidesMeetingListCorrectly(t *testing.T) { meetings := Meetings{[]string{"01/01/06", "02/01/06", "03/04/07"}} src, _ := json.Marshal(meetings) os.Stdout.Write(src) reader := bytes.NewReader(src) result, _ := NewJsonMeetingProvider(reader).Meetings() if len(result) != len(meetings.Meetings) { t.Error("Result contains different number of meetings (", len(result), ") than original JSON (", len(meetings.Meetings), ").") } for _, meeting := range result { if !contains(meetings.Meetings, meeting) { t.Error("Meeting with date: [", meeting.Date().Format(time.UnixDate), "] was not in original JSON") } } } func contains(meetings []string, meeting core.Meeting) bool { for _, candidate := range meetings { if meeting.Date().Format(core.MeetingDateFormat) == candidate { return true } } return false }
package r30_test import ( "testing" "go.lukeharris.dev/r30" "go.lukeharris.dev/testUtils" ) func TestStep(t *testing.T) { utils := testUtils.Setup(t) t1 := []byte{0b00000001, 0b01000000} t1Expect := []byte{0b00000011, 0b01100000} t1Got := r30.Step(t1) utils.BytesEq(t1Got, t1Expect) t2 := []byte{0b00000001} t2Expect := []byte{0b10000011} t2Got := r30.Step(t2) utils.BytesEq(t2Got, t2Expect) }
// Copyright 2016 Attic Labs, Inc. All rights reserved. // Licensed under the Apache License, version 2.0: // http://www.apache.org/licenses/LICENSE-2.0 package types import "github.com/attic-labs/noms/go/d" type listLeafSequence struct { leafSequence values []Value } func newListLeafSequence(vrw ValueReadWriter, v ...Value) sequence { d.PanicIfTrue(vrw == nil) return listLeafSequence{leafSequence{vrw, len(v), ListKind}, v} } // sequence interface func (ll listLeafSequence) getCompareFn(other sequence) compareFn { oll := other.(listLeafSequence) return func(idx, otherIdx int) bool { return ll.values[idx].Equals(oll.values[otherIdx]) } } func (ll listLeafSequence) getItem(idx int) sequenceItem { return ll.values[idx] } func (ll listLeafSequence) WalkRefs(cb RefCallback) { for _, v := range ll.values { v.WalkRefs(cb) } } func (ll listLeafSequence) typeOf() *Type { ts := make([]*Type, len(ll.values)) for i, v := range ll.values { ts[i] = v.typeOf() } return makeCompoundType(ListKind, makeCompoundType(UnionKind, ts...)) }
package main //634. 寻找数组的错位排列 //在组合数学中,如果一个排列中所有元素都不在原先的位置上,那么这个排列就被称为错位排列。 // //给定一个从1 到 n升序排列的数组,你可以计算出总共有多少个不同的错位排列吗? // //由于答案可能非常大,你只需要将答案对 109+7 取余输出即可。 // // // //样例 1: // //输入: 3 //输出: 2 //解释: 原始的数组为 [1,2,3]。两个错位排列的数组为 [2,3,1] 和 [3,1,2]。 // // //注释: //n 的范围是 [1, 106]。 // 动态规划 // 当 n (1,n) ,当x跟 1 互换时,问题可以分解为 dp(n-2)的 n-1个子问题 // 当 x 放置除了 1 的某个位置时 它可以选取的是 (n-1) // dp(n) = (n-1)*(dp(n-1)+dp(n-2)) // func findDerangement(n int) int { if n == 0 { return 1 } if n == 1 { return 0 } dp := make([]int, n+1) dp[0] = 1 dp[1] = 0 for i := 2; i < n; i++ { dp[n] = ((i - 1) * (dp[n-2] + dp[n-1])) % 1000000007 } return dp[n] }
package main import ( "BeegoDemo/blockchain" "BeegoDemo/db_mysql" "BeegoDemo/models" _ "BeegoDemo/routers" "encoding/json" "encoding/xml" "fmt" "github.com/astaxie/beego" ) func main() { user1 := models.User{ Id:1, Phone:"", Password: "", } fmt.Println("内存中的数据User1:",user1) //json /** * {"Id":1 , "phone": "131" , " Password": "fasdfa"} */ _,_ =json.Marshal(user1) xmlBytes,_ := xml.Marshal(user1) fmt.Println(string(xmlBytes)) var user2 models.User xml.Unmarshal(xmlBytes,&user2) fmt.Println("反序列化的User2:",user2) //1.生成第一个区块 block := blockchain.NewBlock(0,[]byte{},[]byte{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,}) fmt.Println(block) fmt.Printf("区块的Hash值:%x\n",block.Hash) fmt.Printf("区块的nonce值:%d\n",block.Nonce) return //1、链接数据库 db_mysql.ConnectDB() //2、静态资源路径设置 beego.SetStaticPath("/js","./static/js") beego.SetStaticPath("/css","./static/css") beego.SetStaticPath("/img","./static/img") //3、允许 beego.Run() //启动端口监听: 阻塞 }
package libbpf type Packet []byte type libbpfAfxdpRunner interface { Read() <-chan Packet Pass(data Packet) New(data Packet) Drop() Close() }
package main import ( "cloud.google.com/go/profiler" "github.com/chidakiyo/benkyo/go-memleak-check/lib" "github.com/gin-gonic/gin" "log" "net/http" "os" ) func main() { StartProfiler("leak-01", "0.0.2") route := gin.Default() http.Handle("/", route) route.GET("ds", lib.MercariDatastoreCreate) route.GET("ods", lib.OfficialDatastore) route.GET("ads", lib.AppengineDatastore) log.Fatal(http.ListenAndServe(":8080", nil)) //appengine.Main() } func StartProfiler(service, version string) { if err := profiler.Start(profiler.Config{ Service: service, ServiceVersion: version, // ProjectID must be set if not running on GCP. ProjectID: os.Getenv("PROJECT_ID"), DebugLogging: true, }); err != nil { panic(err) } }