text
stringlengths
11
4.05M
package list import ( "testing" "fmt" ) func TestNew(t *testing.T) { list := New() t.Log(list) } type item struct { key string value string } func (i *item) String() string { return fmt.Sprintf("key=%s, value=%s", i.key, i.value) } func Init(l *List) *List { e := l.PushBack(&item{"3", "33333"}) l.PushFront(&item{"2", "22222"}) l.PushBack(&item{"5", "55555"}) l.PushFront(&item{"1", "11111"}) l.InsertAfter(&item{"4", "44444"}, e) return l } func TestList_Front(t *testing.T) { list := New() list = Init(list) t.Log(list.Front().value) } func TestList_Back(t *testing.T) { list := New() list = Init(list) t.Log(list.Back().value) } func printList(t *testing.T, l *List) { t.Log("l.len=", l.Len()) t.Log("head=", l.head, "tail=", l.tail) for e := l.head; e != nil; e = e.Next() { t.Log(e.value) } } func TestList_Remove(t *testing.T) { list := New() list = Init(list) e := list.Front() e1 := list.Remove(e) t.Log("delete e1:",e1) printList(t, list) e = list.Front() e2 := list.Remove(e.Next()) t.Log("delete e2:", e2) printList(t, list) e = list.Back() e3 := list.Remove(e) t.Log("delete e3:", e3) printList(t, list) } func TestList_Reverse(t *testing.T) { list := New() list = Init(list) printList(t, list) list = list.Reverse() printList(t, list) }
package main import "fmt" func nextint() func() int{ i := 0 return func() int { i += 1 return i } } func main(){ ni := nextint() fmt.Println(ni()) fmt.Println(ni()) fmt.Println(ni()) }
package parameters type ( GetItemRequest struct { RootRequest } GetItemListRequest struct { RootRequest TagID uint64 `json:"tag_id" mapstructure:"tag_id"` Limit int `json:"limit" mapstructure:"limit"` } GetItemFavoriteListRequest struct { RootRequest Limit int `json:"limit" mapstructure:"limit"` } CreateItemFavoriteRequest struct { RootRequest } CreateItemRequest struct { RootRequest GithubURL string `json:"github_url" mapstructure:"github_url"` } ) func NewGetItemRequest() GetItemRequest { return GetItemRequest{ RootRequest: RootRequest{ needAccessToken: false, }, } } // Validate ... func (p GetItemRequest) Validate() error { return p.RootRequest.validate() } func NewGetItemListRequest() GetItemListRequest { return GetItemListRequest{ RootRequest: RootRequest{ needAccessToken: false, }, } } // Validate ... func (p GetItemListRequest) Validate() error { return p.RootRequest.validate() } func NewGetItemFavoriteListRequest() GetItemFavoriteListRequest { return GetItemFavoriteListRequest{ RootRequest: RootRequest{ needAccessToken: true, }, } } // Validate ... func (p GetItemFavoriteListRequest) Validate() error { return p.RootRequest.validate() } func NewCreateItemFavoriteRequest() CreateItemFavoriteRequest { return CreateItemFavoriteRequest{ RootRequest: RootRequest{ needAccessToken: true, }, } } // Validate ... func (p CreateItemFavoriteRequest) Validate() error { return p.RootRequest.validate() } func NewCreateItemRequest() CreateItemRequest { return CreateItemRequest{ RootRequest: RootRequest{ needAccessToken: true, }, } } // Validate ... func (p CreateItemRequest) Validate() error { return p.RootRequest.validate() }
package main import "fmt" func main() { min := 1 max := 1000000 fmt.Printf("Think of an integer between %d and %d.\n", min, max) fmt.Println("Now I'll try to guess it, using 20 questions or less.") var answer string for answer != "yes" { fmt.Print("Ready? ") fmt.Scanf("%s", &answer) } for i := 0; i < 20; i++ { if min == max { fmt.Printf("Is it %d? ", min) var answer string fmt.Scanf("%s", &answer) if answer == "yes" { fmt.Println("Yay, I win!") return } } else { middle := (min + max) / 2 fmt.Printf("Is it greater than %d? ", middle) var answer string fmt.Scanf("%s", &answer) if answer == "yes" { min = middle + 1 } else { max = middle } } } if min == max { fmt.Printf("Your number must be %d.\n", min) } else { fmt.Printf("I don't know what it could be. Min is %d, max is %d.\n", min, max) } }
// ˅ package main // ˄ type Display struct { // ˅ // ˄ impl DisplayImpl // ˅ // ˄ } func NewDisplay(impl DisplayImpl) *Display { // ˅ return &Display{impl} // ˄ } func (self *Display) Output() { // ˅ self.Open() self.Write() self.Close() // ˄ } func (self *Display) Open() { // ˅ self.impl.ImplOpen() // ˄ } func (self *Display) Write() { // ˅ self.impl.ImplWrite() // ˄ } func (self *Display) Close() { // ˅ self.impl.ImplClose() // ˄ } // ˅ // ˄
package main import ( "strings" "time" "github.com/kelseyhightower/envconfig" ) type Config struct { MongoURI string `envconfig:"mongo_uri"` GannettAPIKey string `envconfig:"gannett_search_api_key"` GannettAssetAPIKey string `envconfig:"gannett_asset_api_key"` SiteCodes []string SummaryVEnv string `envconfig:"summary_v_env"` GNAPIDomain string `envconfig:"gnapi_domain"` BrvtyURL string `envconfig:"brvty_url"` BrvtyAPIKey string `envconfig:"brvty_api_key"` BrvtyTimeout time.Duration SiteCodesStr string `envconfig:"site_codes"` BrvtyTimeoutMs int `envconfig:"brvty_timeout"` LoopInterval time.Duration } func ParseConfig() (Config, error) { var config Config err := envconfig.Process("newsfetch", &config) if err != nil { return config, err } config.SiteCodes = strings.Split(config.SiteCodesStr, ",") config.BrvtyTimeout = time.Duration(config.BrvtyTimeoutMs) * time.Millisecond return config, err }
package elasticsearch import ( "errors" "reflect" "github.com/manishrjain/gocrud/search" "github.com/manishrjain/gocrud/x" "gopkg.in/olivere/elastic.v2" ) var log = x.Log("elasticsearch") // Elastic encapsulates elastic search client, and implements methods declared // by search.Engine. type Elastic struct { client *elastic.Client } // ElasticQuery implements methods declared by search.Query. type ElasticQuery struct { client *elastic.Client sort string from int limit int kind string filter *ElasticFilter filterType int // 0 = no filter, 1 = AND, 2 = OR } type ElasticFilter struct { filters []elastic.Filter } // Init initializes connection to Elastic Search instance, checks for // existence of "gocrud" index and creates it, if missing. Note that // Init does NOT do mapping necessary to do exact-value term matching // for strings etc. That needs to be done externally. func (es *Elastic) Init(args ...string) { if len(args) != 1 { log.WithField("args", args).Fatal("Invalid arguments") return } url := args[0] log.Debug("Initializing connection to ElaticSearch") var opts []elastic.ClientOptionFunc opts = append(opts, elastic.SetURL(url)) opts = append(opts, elastic.SetSniff(false)) client, err := elastic.NewClient(opts...) if err != nil { x.LogErr(log, err).Fatal("While creating connection with ElaticSearch.") return } version, err := client.ElasticsearchVersion(url) if err != nil { x.LogErr(log, err).Fatal("Unable to query version") return } log.WithField("version", version).Debug("ElasticSearch version") // Use the IndexExists service to check if a specified index exists. exists, err := client.IndexExists("gocrud").Do() if err != nil { x.LogErr(log, err).Fatal("Unable to query index existence.") return } if !exists { // Create a new index. createIndex, err := client.CreateIndex("gocrud").Do() if err != nil { x.LogErr(log, err).Fatal("Unable to create index.") return } if !createIndex.Acknowledged { // Not acknowledged log.Errorf("Create index not acknowledged. Not sure what that means...") } } es.client = client log.Debug("Connected with ElasticSearch") } // DropIndex is useful for testing purposes. func (es *Elastic) DropIndex() error { _, err := es.client.DeleteIndex("gocrud").Do() return err } // Update checks the validify of given document, and the. // external versioning via the timestamp of the document. func (es *Elastic) Update(doc x.Doc) error { if doc.Id == "" || doc.Kind == "" || doc.NanoTs == 0 { return errors.New("Invalid document") } result, err := es.client.Index().Index("gocrud").Type(doc.Kind).Id(doc.Id). VersionType("external").Version(doc.NanoTs).BodyJson(doc).Do() if err != nil { x.LogErr(log, err).WithField("doc", doc).Error("While indexing doc") return err } log.Debug("index_result", result) return nil } func (eq *ElasticQuery) NewAndFilter() search.FilterQuery { eq.filter = new(ElasticFilter) eq.filterType = 1 return eq.filter } func (eq *ElasticQuery) NewOrFilter() search.FilterQuery { eq.filter = new(ElasticFilter) eq.filterType = 2 return eq.filter } // AddExact implemented by ElasticSearch uses the 'term' directive. // Note that with strings, this might not return exact match results, // if the index is set to pre-process strings, which it does by default. // In other words, for string term-exact matches to work, you need to // set the mapping to "index": "not_analyzed". // https://www.elastic.co/guide/en/elasticsearch/guide/current/mapping-intro.html func (ef *ElasticFilter) AddExact(field string, value interface{}) search.FilterQuery { tq := elastic.NewTermFilter(field, value) ef.filters = append(ef.filters, tq) return ef } // AddRegex uses regex filter directive. func (ef *ElasticFilter) AddRegex(field string, value string) search.FilterQuery { wq := elastic.NewRegexpFilter(field, value) ef.filters = append(ef.filters, wq) return ef } // Order sorts the results for the given field. func (eq *ElasticQuery) Order(field string) search.Query { eq.sort = field return eq } // From sets the offset. func (eq *ElasticQuery) From(num int) search.Query { eq.from = num return eq } // Limit limits the number of results to num. func (eq *ElasticQuery) Limit(num int) search.Query { eq.limit = num return eq } func (eq *ElasticQuery) generateQuery() (rq elastic.FilteredQuery, rerr error) { rq = elastic.NewFilteredQuery(elastic.NewMatchAllQuery()) if eq.filterType == 0 { return rq, errors.New("Filter present, but not set") } else if eq.filterType == 1 { af := elastic.NewAndFilter(eq.filter.filters...) rq = rq.Filter(af) } else if eq.filterType == 2 { of := elastic.NewOrFilter(eq.filter.filters...) rq = rq.Filter(of) } else { return rq, errors.New("Invalid filter type") } return rq, nil } // Run runs the query and returns results and error, if any. func (eq *ElasticQuery) Run() (docs []x.Doc, rerr error) { ss := eq.client.Search("gocrud").Type(eq.kind) if len(eq.sort) > 0 { if eq.sort[:1] == "-" { ss = ss.Sort(eq.sort[1:], false) } else { ss = ss.Sort(eq.sort, true) } } if eq.from > 0 { ss = ss.From(eq.from) } if eq.limit > 0 { ss = ss.Size(eq.limit) } if eq.filter != nil { q, err := eq.generateQuery() if err != nil { return docs, err } ss = ss.Query(q) } result, err := ss.Do() if err != nil { x.LogErr(log, err).Error("While running query") return docs, err } if result.Hits == nil { log.Debug("No results found") return docs, nil } var d x.Doc for _, item := range result.Each(reflect.TypeOf(d)) { d := item.(x.Doc) docs = append(docs, d) } return docs, nil } func (eq *ElasticQuery) Count() (rcount int64, rerr error) { cs := eq.client.Count("gocrud").Type(eq.kind) if eq.filter != nil { q, err := eq.generateQuery() if err != nil { return 0, err } cs = cs.Query(q) } return cs.Do() } // NewQuery creates a new query object, to return results of type kind. func (es *Elastic) NewQuery(kind string) search.Query { eq := new(ElasticQuery) eq.client = es.client eq.kind = kind return eq } func init() { log.Info("Initing elasticsearch") search.Register("elasticsearch", new(Elastic)) }
// Copyright (c) 2019 bketelsen // // This software is released under the MIT License. // https://opensource.org/licenses/MIT package main import "github.com/bketelsen/devlx/cmd" func main() { cmd.Execute() }
package doom import ( "fmt" "strconv" ) type Mode string const ( DM Mode = "dm" CTF = "ctf" TDM = "tdm" ) type args []string func (a args) Add(key, value string) args { a = append(a, key, value) return a } type Config struct { Name string `toml:"name"` Hostname string `toml:"hostname"` Mode Mode `toml:"mode"` Port int16 `toml:"port"` WADs []string `toml:"wads"` DMFlags uint64 `toml:"dmflags"` DMFlags2 uint64 `toml:"dmflags2"` ZADmflags uint64 `toml:"zadmflags"` Compatflags uint64 `toml:"compatflags"` Compatflags2 uint64 `toml:"compatflags2"` ZACompatFlags uint64 `toml:"zacompatflags"` Disabled bool `toml:"disabled"` } func (c Config) Args() []string { args := make(args, 0, 20). Add("+sv_hostname", c.Hostname). Add("-port", strconv.Itoa(int(c.Port))). Add("+skill", "4"). Add("+sv_maxplayers", "16"). Add("+dmflags", fmt.Sprintf("%v", c.DMFlags)). Add("+dmflags2", fmt.Sprintf("%v", c.DMFlags2)). Add("+zadmflags", fmt.Sprintf("%v", c.ZADmflags)). Add("+compatflags", fmt.Sprintf("%v", c.Compatflags)). Add("+compatflags2", fmt.Sprintf("%v", c.Compatflags2)). Add("+zacompatflags", fmt.Sprintf("%v", c.ZADmflags)). Add("+sv_updatemaster", "false"). Add(fmt.Sprintf("+%s", c.Mode), "1") for _, wad := range c.WADs { args = args.Add("-file", wad) } return args }
package models import ( "encoding/json" "strconv" "github.com/empirefox/esecend/front" ) type WxGoodsDetail struct { ID string `json:"goods_id"` // Product.ID Name string `json:"goods_name"` // Product.Name Num uint `json:"goods_num"` Price uint `json:"price"` } type WxOrderDetail struct { Goods []WxGoodsDetail `json:"goods_detail"` } func MarshalWxOrderDetail(items []*front.OrderItem) ([]byte, error) { var goods []WxGoodsDetail for i := range items { goods = append(goods, WxGoodsDetail{ ID: strconv.FormatUint(uint64(items[i].ID), 10), Name: items[i].Name, Num: items[i].Quantity, Price: items[i].Price, }) } return json.Marshal(&WxOrderDetail{goods}) } type UnifiedOrderAttach struct { UserID uint CashPaid uint `,omitempty` PointsPaid uint `,omitempty` PreCashID uint `,omitempty` PrePointsID uint `,omitempty` }
package main import ( "testing" "github.com/shanghuiyang/rpi-devices/app/car/car" "github.com/stretchr/testify/assert" ) func TestStart(t *testing.T) { car := car.New(&car.Config{}) assert.NotNil(t, car) s := newServer(car) assert.NotNil(t, s) }
package main import "net" func main() { li, err := net.Listen("tcp", ":8080") if err != nil { panic(err) } defer li.Close() for { conn, err := li.Accept() if err != nil { panic(err) } for { bs := make([]byte, 1024) n, err := conn.Read(bs) if err != nil { break } _, err = conn.Write(bs[:n]) if err != nil { break } } conn.Close() } }
package main import ( "fmt" "strings" "testing" "github.com/stretchr/testify/assert" ) func TestITetrominoShape(t *testing.T) { out := ` 0001 0001 0001 0001` s := NewITetromino() out2 := shapeToString(s.Shape()) assert.Equal(t, strings.TrimSpace(out), out2) assert.Equal(t, &Size{w: 1, h: 4}, s.Size()) } func TestOTetrominoShape(t *testing.T) { out := ` 0011 0011 0000 0000 ` s := NewOTetromino() out2 := shapeToString(s.Shape()) assert.Equal(t, strings.TrimSpace(out), out2) assert.Equal(t, &Size{w: 2, h: 2}, s.Size()) } func TestTTetrominoShape(t *testing.T) { out := ` 0010 0111 0000 0000 ` s := NewTTetromino() out2 := shapeToString(s.Shape()) assert.Equal(t, strings.TrimSpace(out), out2) assert.Equal(t, &Size{w: 3, h: 2}, s.Size()) } func TestSTetrominoShape(t *testing.T) { out := ` 0011 0110 0000 0000 ` s := NewSTetromino() out2 := shapeToString(s.Shape()) assert.Equal(t, strings.TrimSpace(out), out2) assert.Equal(t, &Size{w: 3, h: 2}, s.Size()) } func TestZTetrominoShape(t *testing.T) { out := ` 0110 0011 0000 0000 ` s := NewZTetromino() out2 := shapeToString(s.Shape()) assert.Equal(t, strings.TrimSpace(out), out2) assert.Equal(t, &Size{w: 3, h: 2}, s.Size()) } func TestJTetrominoShape(t *testing.T) { out := ` 0001 0111 0000 0000 ` s := NewJTetromino() out2 := shapeToString(s.Shape()) assert.Equal(t, strings.TrimSpace(out), out2) assert.Equal(t, &Size{w: 3, h: 2}, s.Size()) } func TestLTetrominoShape(t *testing.T) { out := ` 0100 0111 0000 0000 ` s := NewLTetromino() out2 := shapeToString(s.Shape()) assert.Equal(t, strings.TrimSpace(out), out2) assert.Equal(t, &Size{w: 3, h: 2}, s.Size()) } func shapeToString(s *Shape) string { out := make([]string, len(s.Definition)) for i := 0; i < len(s.Definition); i++ { out[i] = fmt.Sprintf("%04b", s.Definition[i]) } return strings.Join(out, "\n") }
package structs import "encoding/xml" type ClientUpdateRequest struct { XMLName xml.Name `xml:"ClientUpdateRequest"` Text string `xml:",chardata"` Xsd string `xml:"xsd,attr"` Xsi string `xml:"xsi,attr"` BranchCode string `xml:"BranchCode"` Requester string `xml:"Requester"` InstitutionId string `xml:"InstitutionId"` RegAddress struct { Text string `xml:",chardata"` AddrMode string `xml:"AddrMode"` PostIndex string `xml:"PostIndex"` Region string `xml:"Region"` Area string `xml:"Area"` Town string `xml:"Town"` Street string `xml:"Street"` House string `xml:"House"` Flat string `xml:"Flat"` } `xml:"RegAddress"` FactAddress struct { Text string `xml:",chardata"` AddrMode string `xml:"AddrMode"` PostIndex string `xml:"PostIndex"` Region string `xml:"Region"` Area string `xml:"Area"` Town string `xml:"Town"` Street string `xml:"Street"` House string `xml:"House"` } `xml:"FactAddress"` ChannelRep string `xml:"ChannelRep"` Pdl string `xml:"Pdl"` Fatca string `xml:"Fatca"` Benf string `xml:"Benf"` RealUser string `xml:"RealUser"` Crs string `xml:"Crs"` CrsCountryCode string `xml:"CrsCountryCode"` } type ClientUpdateResponse struct { XMLName xml.Name `xml:"ClientUpdateResponse"` Text string `xml:",chardata"` Xsd string `xml:"xsd,attr"` Xsi string `xml:"xsi,attr"` ReturnCode int `xml:"ReturnCode"` }
package types // TypeString maps an unknown type to a string indicating its type. func TypeString(t Type) (string, bool) { switch v := t.(type) { case *Value: if v.Const { return "const", true } return "var", true case *Interface: return "interface", true case *Function: return "function", true default: return "", false } } // Compare compares arguments for the purposes of sorting. // Types are first ordered by type, then by string comapring their names. func Compare(a, b Type) bool { aVal := extractTypeSortValue(a) bVal := extractTypeSortValue(b) if aVal != bVal { return aVal < bVal } aName, aOk := extractName(a) bName, bOk := extractName(b) if aOk && !bOk { return true } if !aOk && bOk { return false } return aName < bName } func extractName(t Type) (string, bool) { switch v := t.(type) { case *Value: return v.Name, true case *Interface: return v.Name, true case *Function: return v.Name, true default: return "", false } } func extractTypeSortValue(t Type) int { switch v := t.(type) { case *Value: if v.Const { return 0 } return 1 case *Interface: return 2 case *Function: return 3 default: return 999 } }
package _559_Maximum_Depth_of_N_ary_Tree type Node struct { Val int Children []*Node } func maxDepth(root *Node) int { return maxDepthRecursively(root) } // 递归解法 func maxDepthRecursively(root *Node) int { if root == nil { return 0 } var ( result int ) for _, child := range root.Children { tr := maxDepthRecursively(child) if tr > result { result = tr } } return result + 1 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dbterror import ( "fmt" mysql "github.com/pingcap/tidb/errno" parser_mysql "github.com/pingcap/tidb/parser/mysql" ) var ( // ErrInvalidWorker means the worker is invalid. ErrInvalidWorker = ClassDDL.NewStd(mysql.ErrInvalidDDLWorker) // ErrNotOwner means we are not owner and can't handle DDL jobs. ErrNotOwner = ClassDDL.NewStd(mysql.ErrNotOwner) // ErrCantDecodeRecord means we can't decode the record. ErrCantDecodeRecord = ClassDDL.NewStd(mysql.ErrCantDecodeRecord) // ErrInvalidDDLJob means the DDL job is invalid. ErrInvalidDDLJob = ClassDDL.NewStd(mysql.ErrInvalidDDLJob) // ErrCancelledDDLJob means the DDL job is cancelled. ErrCancelledDDLJob = ClassDDL.NewStd(mysql.ErrCancelledDDLJob) // ErrPausedDDLJob returns when the DDL job cannot be paused. ErrPausedDDLJob = ClassDDL.NewStd(mysql.ErrPausedDDLJob) // ErrRunMultiSchemaChanges means we run multi schema changes. ErrRunMultiSchemaChanges = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "multi schema change for %s"), nil)) // ErrOperateSameColumn means we change the same columns multiple times in a DDL. ErrOperateSameColumn = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "operate same column '%s'"), nil)) // ErrOperateSameIndex means we change the same indexes multiple times in a DDL. ErrOperateSameIndex = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "operate same index '%s'"), nil)) // ErrWaitReorgTimeout means we wait for reorganization timeout. ErrWaitReorgTimeout = ClassDDL.NewStdErr(mysql.ErrLockWaitTimeout, mysql.MySQLErrName[mysql.ErrWaitReorgTimeout]) // ErrInvalidStoreVer means invalid store version. ErrInvalidStoreVer = ClassDDL.NewStd(mysql.ErrInvalidStoreVersion) // ErrRepairTableFail is used to repair tableInfo in repair mode. ErrRepairTableFail = ClassDDL.NewStd(mysql.ErrRepairTable) // ErrCantDropColWithIndex means can't drop the column with index. We don't support dropping column with index covered now. ErrCantDropColWithIndex = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "drop column with index"), nil)) // ErrCantDropColWithAutoInc means can't drop column with auto_increment ErrCantDropColWithAutoInc = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "can't remove column with auto_increment when @@tidb_allow_remove_auto_inc disabled"), nil)) // ErrCantDropColWithCheckConstraint means can't drop column with check constraint ErrCantDropColWithCheckConstraint = ClassDDL.NewStd(mysql.ErrDependentByCheckConstraint) // ErrUnsupportedAddColumn means add columns is unsupported ErrUnsupportedAddColumn = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "add column"), nil)) // ErrUnsupportedModifyColumn means modify columns is unsupoorted ErrUnsupportedModifyColumn = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "modify column: %s"), nil)) // ErrUnsupportedModifyCharset means modify charset is unsupoorted ErrUnsupportedModifyCharset = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "modify %s"), nil)) // ErrUnsupportedModifyCollation means modify collation is unsupoorted ErrUnsupportedModifyCollation = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "modifying collation from %s to %s"), nil)) // ErrUnsupportedPKHandle is used to indicate that we can't support this PK handle. ErrUnsupportedPKHandle = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "drop integer primary key"), nil)) // ErrUnsupportedCharset means we don't support the charset. ErrUnsupportedCharset = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "charset %s and collate %s"), nil)) // ErrUnsupportedShardRowIDBits means we don't support the shard_row_id_bits. ErrUnsupportedShardRowIDBits = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "shard_row_id_bits for table with primary key as row id"), nil)) // ErrUnsupportedAlterTableWithValidation means we don't support the alter table with validation. ErrUnsupportedAlterTableWithValidation = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message("ALTER TABLE WITH VALIDATION is currently unsupported", nil)) // ErrUnsupportedAlterTableWithoutValidation means we don't support the alter table without validation. ErrUnsupportedAlterTableWithoutValidation = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message("ALTER TABLE WITHOUT VALIDATION is currently unsupported", nil)) // ErrUnsupportedAlterTableOption means we don't support the alter table option. ErrUnsupportedAlterTableOption = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message("This type of ALTER TABLE is currently unsupported", nil)) // ErrUnsupportedAlterCacheForSysTable means we don't support the alter cache for system table. ErrUnsupportedAlterCacheForSysTable = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message("ALTER table cache for tables in system database is currently unsupported", nil)) // ErrBlobKeyWithoutLength is used when BLOB is used as key but without a length. ErrBlobKeyWithoutLength = ClassDDL.NewStd(mysql.ErrBlobKeyWithoutLength) // ErrKeyPart0 is used when key parts length is 0. ErrKeyPart0 = ClassDDL.NewStd(mysql.ErrKeyPart0) // ErrIncorrectPrefixKey is used when the prefix length is incorrect for a string key. ErrIncorrectPrefixKey = ClassDDL.NewStd(mysql.ErrWrongSubKey) // ErrTooLongKey is used when the column key is too long. ErrTooLongKey = ClassDDL.NewStd(mysql.ErrTooLongKey) // ErrKeyColumnDoesNotExits is used when the key column doesn't exist. ErrKeyColumnDoesNotExits = ClassDDL.NewStd(mysql.ErrKeyColumnDoesNotExits) // ErrInvalidDDLJobVersion is used when the DDL job version is invalid. ErrInvalidDDLJobVersion = ClassDDL.NewStd(mysql.ErrInvalidDDLJobVersion) // ErrInvalidUseOfNull is used when the column is not null. ErrInvalidUseOfNull = ClassDDL.NewStd(mysql.ErrInvalidUseOfNull) // ErrTooManyFields is used when too many columns are used in a select statement. ErrTooManyFields = ClassDDL.NewStd(mysql.ErrTooManyFields) // ErrTooManyKeys is used when too many keys used. ErrTooManyKeys = ClassDDL.NewStd(mysql.ErrTooManyKeys) // ErrInvalidSplitRegionRanges is used when split region ranges is invalid. ErrInvalidSplitRegionRanges = ClassDDL.NewStd(mysql.ErrInvalidSplitRegionRanges) // ErrReorgPanic is used when reorg process is panic. ErrReorgPanic = ClassDDL.NewStd(mysql.ErrReorgPanic) // ErrFkColumnCannotDrop is used when foreign key column can't be dropped. ErrFkColumnCannotDrop = ClassDDL.NewStd(mysql.ErrFkColumnCannotDrop) // ErrFkColumnCannotDropChild is used when foreign key column can't be dropped. ErrFkColumnCannotDropChild = ClassDDL.NewStd(mysql.ErrFkColumnCannotDropChild) // ErrFKIncompatibleColumns is used when foreign key column type is incompatible. ErrFKIncompatibleColumns = ClassDDL.NewStd(mysql.ErrFKIncompatibleColumns) // ErrOnlyOnRangeListPartition is used when the partition type is range list. ErrOnlyOnRangeListPartition = ClassDDL.NewStd(mysql.ErrOnlyOnRangeListPartition) // ErrWrongKeyColumn is for table column cannot be indexed. ErrWrongKeyColumn = ClassDDL.NewStd(mysql.ErrWrongKeyColumn) // ErrWrongKeyColumnFunctionalIndex is for expression cannot be indexed. ErrWrongKeyColumnFunctionalIndex = ClassDDL.NewStd(mysql.ErrWrongKeyColumnFunctionalIndex) // ErrWrongFKOptionForGeneratedColumn is for wrong foreign key reference option on generated columns. ErrWrongFKOptionForGeneratedColumn = ClassDDL.NewStd(mysql.ErrWrongFKOptionForGeneratedColumn) // ErrUnsupportedOnGeneratedColumn is for unsupported actions on generated columns. ErrUnsupportedOnGeneratedColumn = ClassDDL.NewStd(mysql.ErrUnsupportedOnGeneratedColumn) // ErrGeneratedColumnNonPrior forbids to refer generated column non prior to it. ErrGeneratedColumnNonPrior = ClassDDL.NewStd(mysql.ErrGeneratedColumnNonPrior) // ErrDependentByGeneratedColumn forbids to delete columns which are dependent by generated columns. ErrDependentByGeneratedColumn = ClassDDL.NewStd(mysql.ErrDependentByGeneratedColumn) // ErrJSONUsedAsKey forbids to use JSON as key or index. ErrJSONUsedAsKey = ClassDDL.NewStd(mysql.ErrJSONUsedAsKey) // ErrBlobCantHaveDefault forbids to give not null default value to TEXT/BLOB/JSON. ErrBlobCantHaveDefault = ClassDDL.NewStd(mysql.ErrBlobCantHaveDefault) // ErrTooLongIndexComment means the comment for index is too long. ErrTooLongIndexComment = ClassDDL.NewStd(mysql.ErrTooLongIndexComment) // ErrTooLongTableComment means the comment for table is too long. ErrTooLongTableComment = ClassDDL.NewStd(mysql.ErrTooLongTableComment) // ErrTooLongFieldComment means the comment for field/column is too long. ErrTooLongFieldComment = ClassDDL.NewStd(mysql.ErrTooLongFieldComment) // ErrTooLongTablePartitionComment means the comment for table partition is too long. ErrTooLongTablePartitionComment = ClassDDL.NewStd(mysql.ErrTooLongTablePartitionComment) // ErrInvalidDefaultValue returns for invalid default value for columns. ErrInvalidDefaultValue = ClassDDL.NewStd(mysql.ErrInvalidDefault) // ErrDefValGeneratedNamedFunctionIsNotAllowed returns for disallowed function as default value expression of column. ErrDefValGeneratedNamedFunctionIsNotAllowed = ClassDDL.NewStd(mysql.ErrDefValGeneratedNamedFunctionIsNotAllowed) // ErrGeneratedColumnRefAutoInc forbids to refer generated columns to auto-increment columns . ErrGeneratedColumnRefAutoInc = ClassDDL.NewStd(mysql.ErrGeneratedColumnRefAutoInc) // ErrExpressionIndexCanNotRefer forbids to refer expression index to auto-increment column. ErrExpressionIndexCanNotRefer = ClassDDL.NewStd(mysql.ErrFunctionalIndexRefAutoIncrement) // ErrUnsupportedAddPartition returns for does not support add partitions. ErrUnsupportedAddPartition = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "add partitions"), nil)) // ErrUnsupportedCoalescePartition returns for does not support coalesce partitions. ErrUnsupportedCoalescePartition = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "coalesce partitions"), nil)) // ErrUnsupportedReorganizePartition returns for does not support reorganize partitions. ErrUnsupportedReorganizePartition = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "reorganize partition"), nil)) // ErrUnsupportedCheckPartition returns for does not support check partitions. ErrUnsupportedCheckPartition = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "check partition"), nil)) // ErrUnsupportedOptimizePartition returns for does not support optimize partitions. ErrUnsupportedOptimizePartition = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "optimize partition"), nil)) // ErrUnsupportedRebuildPartition returns for does not support rebuild partitions. ErrUnsupportedRebuildPartition = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "rebuild partition"), nil)) // ErrUnsupportedRemovePartition returns for does not support remove partitions. ErrUnsupportedRemovePartition = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "remove partitioning"), nil)) // ErrUnsupportedRepairPartition returns for does not support repair partitions. ErrUnsupportedRepairPartition = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "repair partition"), nil)) // ErrGeneratedColumnFunctionIsNotAllowed returns for unsupported functions for generated columns. ErrGeneratedColumnFunctionIsNotAllowed = ClassDDL.NewStd(mysql.ErrGeneratedColumnFunctionIsNotAllowed) // ErrGeneratedColumnRowValueIsNotAllowed returns for generated columns referring to row values. ErrGeneratedColumnRowValueIsNotAllowed = ClassDDL.NewStd(mysql.ErrGeneratedColumnRowValueIsNotAllowed) // ErrUnsupportedPartitionByRangeColumns returns for does unsupported partition by range columns. ErrUnsupportedPartitionByRangeColumns = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "partition by range columns"), nil)) // ErrFunctionalIndexFunctionIsNotAllowed returns for unsupported functions for functional index. ErrFunctionalIndexFunctionIsNotAllowed = ClassDDL.NewStd(mysql.ErrFunctionalIndexFunctionIsNotAllowed) // ErrFunctionalIndexRowValueIsNotAllowed returns for functional index referring to row values. ErrFunctionalIndexRowValueIsNotAllowed = ClassDDL.NewStd(mysql.ErrFunctionalIndexRowValueIsNotAllowed) // ErrUnsupportedCreatePartition returns for does not support create partitions. ErrUnsupportedCreatePartition = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "partition type, treat as normal table"), nil)) // ErrTablePartitionDisabled returns for table partition is disabled. ErrTablePartitionDisabled = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message("Partitions are ignored because Table Partition is disabled, please set 'tidb_enable_table_partition' if you need to need to enable it", nil)) // ErrUnsupportedIndexType returns for unsupported index type. ErrUnsupportedIndexType = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "index type"), nil)) // ErrWindowInvalidWindowFuncUse returns for invalid window function use. ErrWindowInvalidWindowFuncUse = ClassDDL.NewStd(mysql.ErrWindowInvalidWindowFuncUse) // ErrDupKeyName returns for duplicated key name. ErrDupKeyName = ClassDDL.NewStd(mysql.ErrDupKeyName) // ErrFkDupName returns for duplicated FK name. ErrFkDupName = ClassDDL.NewStd(mysql.ErrFkDupName) // ErrInvalidDDLState returns for invalid ddl model object state. ErrInvalidDDLState = ClassDDL.NewStdErr(mysql.ErrInvalidDDLState, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrInvalidDDLState].Raw), nil)) // ErrUnsupportedModifyPrimaryKey returns an error when add or drop the primary key. // It's exported for testing. ErrUnsupportedModifyPrimaryKey = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "%s primary key"), nil)) // ErrPKIndexCantBeInvisible return an error when primary key is invisible index ErrPKIndexCantBeInvisible = ClassDDL.NewStd(mysql.ErrPKIndexCantBeInvisible) // ErrColumnBadNull returns for a bad null value. ErrColumnBadNull = ClassDDL.NewStd(mysql.ErrBadNull) // ErrBadField forbids to refer to unknown column. ErrBadField = ClassDDL.NewStd(mysql.ErrBadField) // ErrCantRemoveAllFields returns for deleting all columns. ErrCantRemoveAllFields = ClassDDL.NewStd(mysql.ErrCantRemoveAllFields) // ErrCantDropFieldOrKey returns for dropping a non-existent field or key. ErrCantDropFieldOrKey = ClassDDL.NewStd(mysql.ErrCantDropFieldOrKey) // ErrInvalidOnUpdate returns for invalid ON UPDATE clause. ErrInvalidOnUpdate = ClassDDL.NewStd(mysql.ErrInvalidOnUpdate) // ErrTooLongIdent returns for too long name of database/table/column/index. ErrTooLongIdent = ClassDDL.NewStd(mysql.ErrTooLongIdent) // ErrWrongDBName returns for wrong database name. ErrWrongDBName = ClassDDL.NewStd(mysql.ErrWrongDBName) // ErrWrongTableName returns for wrong table name. ErrWrongTableName = ClassDDL.NewStd(mysql.ErrWrongTableName) // ErrWrongColumnName returns for wrong column name. ErrWrongColumnName = ClassDDL.NewStd(mysql.ErrWrongColumnName) // ErrWrongPartitionName returns for wrong partition name. ErrWrongPartitionName = ClassDDL.NewStd(mysql.ErrWrongPartitionName) // ErrWrongUsage returns for wrong ddl syntax usage. ErrWrongUsage = ClassDDL.NewStd(mysql.ErrWrongUsage) // ErrInvalidGroupFuncUse returns for using invalid group functions. ErrInvalidGroupFuncUse = ClassDDL.NewStd(mysql.ErrInvalidGroupFuncUse) // ErrTableMustHaveColumns returns for missing column when creating a table. ErrTableMustHaveColumns = ClassDDL.NewStd(mysql.ErrTableMustHaveColumns) // ErrWrongNameForIndex returns for wrong index name. ErrWrongNameForIndex = ClassDDL.NewStd(mysql.ErrWrongNameForIndex) // ErrUnknownCharacterSet returns unknown character set. ErrUnknownCharacterSet = ClassDDL.NewStd(mysql.ErrUnknownCharacterSet) // ErrUnknownCollation returns unknown collation. ErrUnknownCollation = ClassDDL.NewStd(mysql.ErrUnknownCollation) // ErrCollationCharsetMismatch returns when collation not match the charset. ErrCollationCharsetMismatch = ClassDDL.NewStd(mysql.ErrCollationCharsetMismatch) // ErrConflictingDeclarations return conflict declarations. ErrConflictingDeclarations = ClassDDL.NewStdErr(mysql.ErrConflictingDeclarations, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrConflictingDeclarations].Raw, "CHARACTER SET ", "%s", "CHARACTER SET ", "%s"), nil)) // ErrPrimaryCantHaveNull returns All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead ErrPrimaryCantHaveNull = ClassDDL.NewStd(mysql.ErrPrimaryCantHaveNull) // ErrErrorOnRename returns error for wrong database name in alter table rename ErrErrorOnRename = ClassDDL.NewStd(mysql.ErrErrorOnRename) // ErrViewSelectClause returns error for create view with select into clause ErrViewSelectClause = ClassDDL.NewStd(mysql.ErrViewSelectClause) // ErrNotAllowedTypeInPartition returns not allowed type error when creating table partition with unsupported expression type. ErrNotAllowedTypeInPartition = ClassDDL.NewStd(mysql.ErrFieldTypeNotAllowedAsPartitionField) // ErrPartitionMgmtOnNonpartitioned returns it's not a partition table. ErrPartitionMgmtOnNonpartitioned = ClassDDL.NewStd(mysql.ErrPartitionMgmtOnNonpartitioned) // ErrDropPartitionNonExistent returns error in list of partition. ErrDropPartitionNonExistent = ClassDDL.NewStd(mysql.ErrDropPartitionNonExistent) // ErrSameNamePartition returns duplicate partition name. ErrSameNamePartition = ClassDDL.NewStd(mysql.ErrSameNamePartition) // ErrSameNamePartitionField returns duplicate partition field. ErrSameNamePartitionField = ClassDDL.NewStd(mysql.ErrSameNamePartitionField) // ErrRangeNotIncreasing returns values less than value must be strictly increasing for each partition. ErrRangeNotIncreasing = ClassDDL.NewStd(mysql.ErrRangeNotIncreasing) // ErrPartitionMaxvalue returns maxvalue can only be used in last partition definition. ErrPartitionMaxvalue = ClassDDL.NewStd(mysql.ErrPartitionMaxvalue) // ErrMaxvalueInValuesIn returns maxvalue cannot be used in values in. ErrMaxvalueInValuesIn = ClassDDL.NewStd(mysql.ErrMaxvalueInValuesIn) // ErrDropLastPartition returns cannot remove all partitions, use drop table instead. ErrDropLastPartition = ClassDDL.NewStd(mysql.ErrDropLastPartition) // ErrTooManyPartitions returns too many partitions were defined. ErrTooManyPartitions = ClassDDL.NewStd(mysql.ErrTooManyPartitions) // ErrPartitionConstDomain returns partition constant is out of partition function domain. ErrPartitionConstDomain = ClassDDL.NewStd(mysql.ErrPartitionConstDomain) // ErrPartitionFunctionIsNotAllowed returns this partition function is not allowed. ErrPartitionFunctionIsNotAllowed = ClassDDL.NewStd(mysql.ErrPartitionFunctionIsNotAllowed) // ErrPartitionFuncNotAllowed returns partition function returns the wrong type. ErrPartitionFuncNotAllowed = ClassDDL.NewStd(mysql.ErrPartitionFuncNotAllowed) // ErrUniqueKeyNeedAllFieldsInPf returns must include all columns in the table's partitioning function. ErrUniqueKeyNeedAllFieldsInPf = ClassDDL.NewStd(mysql.ErrUniqueKeyNeedAllFieldsInPf) // ErrWrongExprInPartitionFunc Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed. ErrWrongExprInPartitionFunc = ClassDDL.NewStd(mysql.ErrWrongExprInPartitionFunc) // ErrWarnDataTruncated returns data truncated error. ErrWarnDataTruncated = ClassDDL.NewStd(mysql.WarnDataTruncated) // ErrCoalesceOnlyOnHashPartition returns coalesce partition can only be used on hash/key partitions. ErrCoalesceOnlyOnHashPartition = ClassDDL.NewStd(mysql.ErrCoalesceOnlyOnHashPartition) // ErrViewWrongList returns create view must include all columns in the select clause ErrViewWrongList = ClassDDL.NewStd(mysql.ErrViewWrongList) // ErrAlterOperationNotSupported returns when alter operations is not supported. ErrAlterOperationNotSupported = ClassDDL.NewStd(mysql.ErrAlterOperationNotSupportedReason) // ErrWrongObject returns for wrong object. ErrWrongObject = ClassDDL.NewStd(mysql.ErrWrongObject) // ErrTableCantHandleFt returns FULLTEXT keys are not supported by table type ErrTableCantHandleFt = ClassDDL.NewStd(mysql.ErrTableCantHandleFt) // ErrFieldNotFoundPart returns an error when 'partition by columns' are not found in table columns. ErrFieldNotFoundPart = ClassDDL.NewStd(mysql.ErrFieldNotFoundPart) // ErrWrongTypeColumnValue returns 'Partition column values of incorrect type' ErrWrongTypeColumnValue = ClassDDL.NewStd(mysql.ErrWrongTypeColumnValue) // ErrValuesIsNotIntType returns 'VALUES value for partition '%-.64s' must have type INT' ErrValuesIsNotIntType = ClassDDL.NewStd(mysql.ErrValuesIsNotIntType) // ErrFunctionalIndexPrimaryKey returns 'The primary key cannot be a functional index' ErrFunctionalIndexPrimaryKey = ClassDDL.NewStd(mysql.ErrFunctionalIndexPrimaryKey) // ErrFunctionalIndexOnField returns 'Functional index on a column is not supported. Consider using a regular index instead' ErrFunctionalIndexOnField = ClassDDL.NewStd(mysql.ErrFunctionalIndexOnField) // ErrInvalidAutoRandom returns when auto_random is used incorrectly. ErrInvalidAutoRandom = ClassDDL.NewStd(mysql.ErrInvalidAutoRandom) // ErrUnsupportedConstraintCheck returns when use ADD CONSTRAINT CHECK ErrUnsupportedConstraintCheck = ClassDDL.NewStd(mysql.ErrUnsupportedConstraintCheck) // ErrDerivedMustHaveAlias returns when a sub select statement does not have a table alias. ErrDerivedMustHaveAlias = ClassDDL.NewStd(mysql.ErrDerivedMustHaveAlias) // ErrSequenceRunOut returns when the sequence has been run out. ErrSequenceRunOut = ClassDDL.NewStd(mysql.ErrSequenceRunOut) // ErrSequenceInvalidData returns when sequence values are conflicting. ErrSequenceInvalidData = ClassDDL.NewStd(mysql.ErrSequenceInvalidData) // ErrSequenceAccessFail returns when sequences are not able to access. ErrSequenceAccessFail = ClassDDL.NewStd(mysql.ErrSequenceAccessFail) // ErrNotSequence returns when object is not a sequence. ErrNotSequence = ClassDDL.NewStd(mysql.ErrNotSequence) // ErrUnknownSequence returns when drop / alter unknown sequence. ErrUnknownSequence = ClassDDL.NewStd(mysql.ErrUnknownSequence) // ErrSequenceUnsupportedTableOption returns when unsupported table option exists in sequence. ErrSequenceUnsupportedTableOption = ClassDDL.NewStd(mysql.ErrSequenceUnsupportedTableOption) // ErrColumnTypeUnsupportedNextValue is returned when sequence next value is assigned to unsupported column type. ErrColumnTypeUnsupportedNextValue = ClassDDL.NewStd(mysql.ErrColumnTypeUnsupportedNextValue) // ErrAddColumnWithSequenceAsDefault is returned when the new added column with sequence's nextval as it's default value. ErrAddColumnWithSequenceAsDefault = ClassDDL.NewStd(mysql.ErrAddColumnWithSequenceAsDefault) // ErrUnsupportedExpressionIndex is returned when create an expression index without allow-expression-index. ErrUnsupportedExpressionIndex = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "creating expression index containing unsafe functions without allow-expression-index in config"), nil)) // ErrPartitionExchangePartTable is returned when exchange table partition with another table is partitioned. ErrPartitionExchangePartTable = ClassDDL.NewStd(mysql.ErrPartitionExchangePartTable) // ErrPartitionExchangeTempTable is returned when exchange table partition with a temporary table ErrPartitionExchangeTempTable = ClassDDL.NewStd(mysql.ErrPartitionExchangeTempTable) // ErrTablesDifferentMetadata is returned when exchanges tables is not compatible. ErrTablesDifferentMetadata = ClassDDL.NewStd(mysql.ErrTablesDifferentMetadata) // ErrRowDoesNotMatchPartition is returned when the row record of exchange table does not match the partition rule. ErrRowDoesNotMatchPartition = ClassDDL.NewStd(mysql.ErrRowDoesNotMatchPartition) // ErrPartitionExchangeForeignKey is returned when exchanged normal table has foreign keys. ErrPartitionExchangeForeignKey = ClassDDL.NewStd(mysql.ErrPartitionExchangeForeignKey) // ErrCheckNoSuchTable is returned when exchanged normal table is view or sequence. ErrCheckNoSuchTable = ClassDDL.NewStd(mysql.ErrCheckNoSuchTable) // ErrUnsupportedPartitionType is returned when exchange table partition type is not supported. ErrUnsupportedPartitionType = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "partition type of table %s when exchanging partition"), nil)) // ErrPartitionExchangeDifferentOption is returned when attribute does not match between partition table and normal table. ErrPartitionExchangeDifferentOption = ClassDDL.NewStd(mysql.ErrPartitionExchangeDifferentOption) // ErrTableOptionUnionUnsupported is returned when create/alter table with union option. ErrTableOptionUnionUnsupported = ClassDDL.NewStd(mysql.ErrTableOptionUnionUnsupported) // ErrTableOptionInsertMethodUnsupported is returned when create/alter table with insert method option. ErrTableOptionInsertMethodUnsupported = ClassDDL.NewStd(mysql.ErrTableOptionInsertMethodUnsupported) // ErrInvalidPlacementPolicyCheck is returned when txn_scope and commit data changing do not meet the placement policy ErrInvalidPlacementPolicyCheck = ClassDDL.NewStd(mysql.ErrPlacementPolicyCheck) // ErrPlacementPolicyWithDirectOption is returned when create/alter table with both placement policy and placement options existed. ErrPlacementPolicyWithDirectOption = ClassDDL.NewStd(mysql.ErrPlacementPolicyWithDirectOption) // ErrPlacementPolicyInUse is returned when placement policy is in use in drop/alter. ErrPlacementPolicyInUse = ClassDDL.NewStd(mysql.ErrPlacementPolicyInUse) // ErrMultipleDefConstInListPart returns multiple definition of same constant in list partitioning. ErrMultipleDefConstInListPart = ClassDDL.NewStd(mysql.ErrMultipleDefConstInListPart) // ErrTruncatedWrongValue is returned when data has been truncated during conversion. ErrTruncatedWrongValue = ClassDDL.NewStd(mysql.ErrTruncatedWrongValue) // ErrWarnDataOutOfRange is returned when the value in a numeric column that is outside the permissible range of the column data type. // See https://dev.mysql.com/doc/refman/5.5/en/out-of-range-and-overflow.html for details ErrWarnDataOutOfRange = ClassDDL.NewStd(mysql.ErrWarnDataOutOfRange) // ErrTooLongValueForType is returned when the individual enum element length is too long. ErrTooLongValueForType = ClassDDL.NewStd(mysql.ErrTooLongValueForType) // ErrUnknownEngine is returned when the table engine is unknown. ErrUnknownEngine = ClassDDL.NewStd(mysql.ErrUnknownStorageEngine) // ErrExchangePartitionDisabled is returned when exchange partition is disabled. ErrExchangePartitionDisabled = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message("Exchange Partition is disabled, please set 'tidb_enable_exchange_partition' if you need to need to enable it", nil)) // ErrPartitionNoTemporary returns when partition at temporary mode ErrPartitionNoTemporary = ClassDDL.NewStd(mysql.ErrPartitionNoTemporary) // ErrOptOnTemporaryTable returns when exec unsupported opt at temporary mode ErrOptOnTemporaryTable = ClassDDL.NewStd(mysql.ErrOptOnTemporaryTable) // ErrOptOnCacheTable returns when exec unsupported opt at cache mode ErrOptOnCacheTable = ClassDDL.NewStd(mysql.ErrOptOnCacheTable) // ErrUnsupportedOnCommitPreserve returns when exec unsupported opt on commit preserve ErrUnsupportedOnCommitPreserve = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message("TiDB doesn't support ON COMMIT PRESERVE ROWS for now", nil)) // ErrUnsupportedClusteredSecondaryKey returns when exec unsupported clustered secondary key ErrUnsupportedClusteredSecondaryKey = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message("CLUSTERED/NONCLUSTERED keyword is only supported for primary key", nil)) // ErrUnsupportedLocalTempTableDDL returns when ddl operation unsupported for local temporary table ErrUnsupportedLocalTempTableDDL = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message("TiDB doesn't support %s for local temporary table", nil)) // ErrInvalidAttributesSpec is returned when meeting invalid attributes. ErrInvalidAttributesSpec = ClassDDL.NewStd(mysql.ErrInvalidAttributesSpec) // ErrFunctionalIndexOnJSONOrGeometryFunction returns when creating expression index and the type of the expression is JSON. ErrFunctionalIndexOnJSONOrGeometryFunction = ClassDDL.NewStd(mysql.ErrFunctionalIndexOnJSONOrGeometryFunction) // ErrDependentByFunctionalIndex returns when the dropped column depends by expression index. ErrDependentByFunctionalIndex = ClassDDL.NewStd(mysql.ErrDependentByFunctionalIndex) // ErrFunctionalIndexOnBlob when the expression of expression index returns blob or text. ErrFunctionalIndexOnBlob = ClassDDL.NewStd(mysql.ErrFunctionalIndexOnBlob) // ErrDependentByPartitionFunctional returns when the dropped column depends by expression partition. ErrDependentByPartitionFunctional = ClassDDL.NewStd(mysql.ErrDependentByPartitionFunctional) // ErrUnsupportedAlterTableSpec means we don't support this alter table specification (i.e. unknown) ErrUnsupportedAlterTableSpec = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "Unsupported/unknown ALTER TABLE specification"), nil)) // ErrGeneralUnsupportedDDL as a generic error to customise by argument ErrGeneralUnsupportedDDL = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "%s"), nil)) // ErrAutoConvert when auto convert happens ErrAutoConvert = ClassDDL.NewStd(mysql.ErrAutoConvert) // ErrWrongStringLength when UserName or HostName is too long ErrWrongStringLength = ClassDDL.NewStd(mysql.ErrWrongStringLength) // ErrBinlogUnsafeSystemFunction when use a system function that may return a different value on the slave. ErrBinlogUnsafeSystemFunction = ClassDDL.NewStd(mysql.ErrBinlogUnsafeSystemFunction) // ErrDDLJobNotFound indicates the job id was not found. ErrDDLJobNotFound = ClassDDL.NewStd(mysql.ErrDDLJobNotFound) // ErrCancelFinishedDDLJob returns when cancel a finished ddl job. ErrCancelFinishedDDLJob = ClassDDL.NewStd(mysql.ErrCancelFinishedDDLJob) // ErrCannotCancelDDLJob returns when cancel a almost finished ddl job, because cancel in now may cause data inconsistency. ErrCannotCancelDDLJob = ClassDDL.NewStd(mysql.ErrCannotCancelDDLJob) // ErrCannotPauseDDLJob returns when the State is not qualified to be paused. ErrCannotPauseDDLJob = ClassDDL.NewStd(mysql.ErrCannotPauseDDLJob) // ErrCannotResumeDDLJob returns when the State is not qualified to be resumed. ErrCannotResumeDDLJob = ClassDDL.NewStd(mysql.ErrCannotResumeDDLJob) // ErrDDLSetting returns when failing to enable/disable DDL. ErrDDLSetting = ClassDDL.NewStd(mysql.ErrDDLSetting) // ErrIngestFailed returns when the DDL ingest job is failed. ErrIngestFailed = ClassDDL.NewStd(mysql.ErrIngestFailed) // ErrIngestCheckEnvFailed returns when the DDL ingest env is failed to init. ErrIngestCheckEnvFailed = ClassDDL.NewStd(mysql.ErrIngestCheckEnvFailed) // ErrColumnInChange indicates there is modification on the column in parallel. ErrColumnInChange = ClassDDL.NewStd(mysql.ErrColumnInChange) // ErrAlterTiFlashModeForTableWithoutTiFlashReplica returns when set tiflash mode on table whose tiflash_replica is null or tiflash_replica_count = 0 ErrAlterTiFlashModeForTableWithoutTiFlashReplica = ClassDDL.NewStdErr(0, parser_mysql.Message("TiFlash mode will take effect after at least one TiFlash replica is set for the table", nil)) // ErrUnsupportedTiFlashOperationForSysOrMemTable means we don't support the alter tiflash related action(e.g. set tiflash mode, set tiflash replica) for system table. ErrUnsupportedTiFlashOperationForSysOrMemTable = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "ALTER TiFlash settings for system table and memory table"), nil)) // ErrUnsupportedTiFlashOperationForUnsupportedCharsetTable is used when alter alter tiflash related action(e.g. set tiflash mode, set tiflash replica) with unsupported charset. ErrUnsupportedTiFlashOperationForUnsupportedCharsetTable = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "ALTER TiFlash settings for tables not supported by TiFlash: table contains %s charset"), nil)) // ErrDropIndexNeededInForeignKey returns when drop index which is needed in foreign key. ErrDropIndexNeededInForeignKey = ClassDDL.NewStd(mysql.ErrDropIndexNeededInForeignKey) // ErrForeignKeyCannotDropParent returns when drop table which has foreign key referred. ErrForeignKeyCannotDropParent = ClassDDL.NewStd(mysql.ErrForeignKeyCannotDropParent) // ErrTruncateIllegalForeignKey returns when truncate table which has foreign key referred. ErrTruncateIllegalForeignKey = ClassDDL.NewStd(mysql.ErrTruncateIllegalForeignKey) // ErrForeignKeyColumnCannotChange returns when change column which used by foreign key. ErrForeignKeyColumnCannotChange = ClassDDL.NewStd(mysql.ErrForeignKeyColumnCannotChange) // ErrForeignKeyColumnCannotChangeChild returns when change child table's column which used by foreign key. ErrForeignKeyColumnCannotChangeChild = ClassDDL.NewStd(mysql.ErrForeignKeyColumnCannotChangeChild) // ErrNoReferencedRow2 returns when there are rows in child table don't have related foreign key value in refer table. ErrNoReferencedRow2 = ClassDDL.NewStd(mysql.ErrNoReferencedRow2) // ErrUnsupportedColumnInTTLConfig returns when a column type is not expected in TTL config ErrUnsupportedColumnInTTLConfig = ClassDDL.NewStd(mysql.ErrUnsupportedColumnInTTLConfig) // ErrTTLColumnCannotDrop returns when a column is dropped while referenced by TTL config ErrTTLColumnCannotDrop = ClassDDL.NewStd(mysql.ErrTTLColumnCannotDrop) // ErrSetTTLOptionForNonTTLTable returns when the `TTL_ENABLE` or `TTL_JOB_INTERVAL` option is set on a non-TTL table ErrSetTTLOptionForNonTTLTable = ClassDDL.NewStd(mysql.ErrSetTTLOptionForNonTTLTable) // ErrTempTableNotAllowedWithTTL returns when setting TTL config for a temp table ErrTempTableNotAllowedWithTTL = ClassDDL.NewStd(mysql.ErrTempTableNotAllowedWithTTL) // ErrUnsupportedTTLReferencedByFK returns when the TTL config is set for a table referenced by foreign key ErrUnsupportedTTLReferencedByFK = ClassDDL.NewStd(mysql.ErrUnsupportedTTLReferencedByFK) // ErrUnsupportedPrimaryKeyTypeWithTTL returns when create or alter a table with TTL options but the primary key is not supported ErrUnsupportedPrimaryKeyTypeWithTTL = ClassDDL.NewStd(mysql.ErrUnsupportedPrimaryKeyTypeWithTTL) // ErrNotSupportedYet returns when tidb does not support this feature. ErrNotSupportedYet = ClassDDL.NewStd(mysql.ErrNotSupportedYet) // ErrColumnCheckConstraintReferOther is returned when create column check constraint referring other column. ErrColumnCheckConstraintReferOther = ClassDDL.NewStd(mysql.ErrColumnCheckConstraintReferencesOtherColumn) // ErrTableCheckConstraintReferUnknown is returned when create table check constraint referring non-existing column. ErrTableCheckConstraintReferUnknown = ClassDDL.NewStd(mysql.ErrTableCheckConstraintReferUnknown) // ErrConstraintNotFound is returned for dropping a non-existent constraint. ErrConstraintNotFound = ClassDDL.NewStd(mysql.ErrConstraintNotFound) // ErrCheckConstraintIsViolated is returned for violating an existent check constraint. ErrCheckConstraintIsViolated = ClassDDL.NewStd(mysql.ErrCheckConstraintViolated) // ErrCheckConstraintNamedFuncIsNotAllowed is returned for not allowed function with name. ErrCheckConstraintNamedFuncIsNotAllowed = ClassDDL.NewStd(mysql.ErrCheckConstraintNamedFunctionIsNotAllowed) // ErrCheckConstraintFuncIsNotAllowed is returned for not allowed function. ErrCheckConstraintFuncIsNotAllowed = ClassDDL.NewStd(mysql.ErrCheckConstraintFunctionIsNotAllowed) // ErrCheckConstraintVariables is returned for referring user or system variables. ErrCheckConstraintVariables = ClassDDL.NewStd(mysql.ErrCheckConstraintVariables) // ErrCheckConstraintRefersAutoIncrementColumn is returned for referring auto-increment columns. ErrCheckConstraintRefersAutoIncrementColumn = ClassDDL.NewStd(mysql.ErrCheckConstraintRefersAutoIncrementColumn) // ErrCheckConstraintUsingFKReferActionColumn is returned for referring foreign key columns. ErrCheckConstraintUsingFKReferActionColumn = ClassDDL.NewStd(mysql.ErrCheckConstraintClauseUsingFKReferActionColumn) // ErrNonBooleanExprForCheckConstraint is returned for non bool expression. ErrNonBooleanExprForCheckConstraint = ClassDDL.NewStd(mysql.ErrNonBooleanExprForCheckConstraint) ) // ReorgRetryableErrCodes is the error codes that are retryable for reorganization. var ReorgRetryableErrCodes = map[uint16]struct{}{ mysql.ErrPDServerTimeout: {}, mysql.ErrTiKVServerTimeout: {}, mysql.ErrTiKVServerBusy: {}, mysql.ErrResolveLockTimeout: {}, mysql.ErrRegionUnavailable: {}, mysql.ErrGCTooEarly: {}, mysql.ErrWriteConflict: {}, mysql.ErrTiKVStoreLimit: {}, mysql.ErrTiKVStaleCommand: {}, mysql.ErrTiKVMaxTimestampNotSynced: {}, mysql.ErrTiFlashServerTimeout: {}, mysql.ErrTiFlashServerBusy: {}, mysql.ErrInfoSchemaExpired: {}, mysql.ErrInfoSchemaChanged: {}, mysql.ErrWriteConflictInTiDB: {}, mysql.ErrTxnRetryable: {}, mysql.ErrNotOwner: {}, }
package mysql import ( "database/sql" "github.com/bearname/videohost/internal/common/db" "github.com/bearname/videohost/internal/videoserver/domain/dto" "github.com/bearname/videohost/internal/videoserver/domain/model" ) type SubtitleRepository struct { connector db.Connector } func NewSubtitleRepository(connector db.Connector) *SubtitleRepository { m := new(SubtitleRepository) m.connector = connector return m } func (r *SubtitleRepository) Create(subtitle dto.CreateSubtitleRequestDto) (int64, error) { var id int64 err := db.WithTransaction(r.connector.GetDb(), func(tx db.Transaction) error { query := `INSERT INTO subtitle (video_id) VALUES (?);` var result sql.Result result, err := tx.Exec(query, subtitle.VideoId) if err != nil { return err } id, err = result.LastInsertId() if err != nil { return err } query = "" var values []interface{} for _, part := range subtitle.Parts { query += "INSERT INTO subtitle_part (start, end, text) VALUES (?, ?, ?);" values = append(values, part.Start, part.End, part.Text) } _, err = tx.Exec(query, values...) if err != nil { return err } return nil }) return id, err } func (r *SubtitleRepository) Find(videoId int) (model.Subtitle, error) { panic("implement subtitleRepo.findByVideo") } func (r *SubtitleRepository) Update(subtitle model.Subtitle) error { panic("implement subtitleRepo.Update") } func (r *SubtitleRepository) Delete(subtitleId int) error { panic("implement subtitleRepo.Delete") }
package main import ( "fmt" "html" "log" "net/http" "github.com/Shopify/sarama" ) const topic = "demo-topic" func main() { producer, err := newProducer() if err != nil { fmt.Println("Could not create producer: ", err) } consumer, err := sarama.NewConsumer(brokers, nil) if err != nil { fmt.Println("Could not create consumer: ", err) } subscribe(topic, consumer) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, "Hello Sarama!") }) http.HandleFunc("/save", func(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() r.ParseForm() msg := prepareMessage(topic, r.FormValue("q")) partition, offset, err := producer.SendMessage(msg) fmt.Fprintf(w, "Message was saved to partition: %d.\nMessage offset is: %d.\n.", partition, offset) if err != nil { fmt.Fprintf(w, "Error occurred - %s.\n", err) } }) http.HandleFunc("/retrieve", func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, html.EscapeString(getMessage())) }) log.Fatal(http.ListenAndServe(":8081", nil)) }
package seev import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document01400101 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:seev.014.001.01 Document"` Message *AgentCAElectionCancellationRequestV01 `xml:"AgtCAElctnCxlReq"` } func (d *Document01400101) AddMessage() *AgentCAElectionCancellationRequestV01 { d.Message = new(AgentCAElectionCancellationRequestV01) return d.Message } // Scope // This message is sent by a CSD to the issuer (or its agent) to request the cancellation of a previously sent Agent Corporate Action Election Advice message. // Usage // This message may only be used to cancel an entire Agent Corporate Action Election Advice message that was previously sent by the CSD. No partial cancellation is allowed. // This message must contain the identification of the Agent Corporate Action Election Advice to be cancelled, the agent identification and the corporate action references. This message may also contain details of the election advice to be cancelled, but this is not recommended. type AgentCAElectionCancellationRequestV01 struct { // Identification assigned by the Sender to unambiguously identify the cancellation request. Identification *iso20022.DocumentIdentification8 `xml:"Id"` // Identification of the Agent CA Election Advice to be cancelled. AgentCAElectionAdviceIdentification *iso20022.DocumentIdentification8 `xml:"AgtCAElctnAdvcId"` // General information about the corporate action event. CorporateActionGeneralInformation *iso20022.CorporateActionInformation1 `xml:"CorpActnGnlInf"` // Provides information about the election advice to be cancelled. ElectionDetails *iso20022.CorporateActionElection3 `xml:"ElctnDtls,omitempty"` } func (a *AgentCAElectionCancellationRequestV01) AddIdentification() *iso20022.DocumentIdentification8 { a.Identification = new(iso20022.DocumentIdentification8) return a.Identification } func (a *AgentCAElectionCancellationRequestV01) AddAgentCAElectionAdviceIdentification() *iso20022.DocumentIdentification8 { a.AgentCAElectionAdviceIdentification = new(iso20022.DocumentIdentification8) return a.AgentCAElectionAdviceIdentification } func (a *AgentCAElectionCancellationRequestV01) AddCorporateActionGeneralInformation() *iso20022.CorporateActionInformation1 { a.CorporateActionGeneralInformation = new(iso20022.CorporateActionInformation1) return a.CorporateActionGeneralInformation } func (a *AgentCAElectionCancellationRequestV01) AddElectionDetails() *iso20022.CorporateActionElection3 { a.ElectionDetails = new(iso20022.CorporateActionElection3) return a.ElectionDetails }
package models import "github.com/jinzhu/gorm" type Link struct { BaseModel Img string `json:"img"` Name string `json:"name"` Url string `json:"url"` } func (link *Link) Create(db *gorm.DB) (*Link, error) { var model Link err := db.Create(&link).Error if err == nil { db.Where("id = ?", link.ID).First(&model) } return &model, err } func (link *Link) Update(db *gorm.DB) (*Link, error) { err := db.Model(link).Updates(link).Error db.First(link) return link, err }
/* Package dbadmin - A package created to implement db admin related activites */ package dbadmin import ( "database/sql" "time" "github.com/nagendra547/go-db-loadbalancer/health" "github.com/nagendra547/go-db-loadbalancer/log" "github.com/nagendra547/go-db-loadbalancer/mydb" ) /*ReadReplicaRoundRobin - Get a read replica in round robin fashion. Implemented multi threading using go subroutine Other methods can be also implemented in similar fashion */ func ReadReplicaRoundRobin(db *mydb.DB) *sql.DB { log.Info("Checking ReadReplicaRoundRobin") // Check how many replicas are actually available. // If Ping not working then no need to count in available replicas var availableReplicas []interface{} var index int operationDone := make(chan bool) go func() { db.Count++ for i := range db.Readreplicas { temp := db.Readreplicas[i] if err := health.PingReadreplicas(temp); err == nil { availableReplicas = append(availableReplicas, temp) } } log.Info("Available Replicas", len(availableReplicas)) index = db.Count % len(availableReplicas) operationDone <- true }() <-operationDone return availableReplicas[index].(*sql.DB) } // SetConnMaxLifetime - Setting Connection Max Life time func SetConnMaxLifetime(db *mydb.DB, d time.Duration) { log.Info("Setting SetConnMaxLifetime") db.Master.SetConnMaxLifetime(d) for i := range db.Readreplicas { db.Readreplicas[i].(*sql.DB).SetConnMaxLifetime(d) } } // SetMaxIdleConns - Setting Max Idle connections func SetMaxIdleConns(db *mydb.DB, n int) { log.Info("Setting SetMaxIdleConns") if err := health.PingMaster(db); err == nil { db.Master.SetMaxIdleConns(n) } for i := range db.Readreplicas { r1 := db.Readreplicas[i] if err := health.PingReadreplicas(r1); err == nil { r1.(*sql.DB).SetMaxIdleConns(n) } else { log.Error(r1, "is down.", "SetMaxIdleConns has been ignored") } } } // SetMaxOpenConns - Setting Max Open connections func SetMaxOpenConns(db *mydb.DB, n int) { log.Info("Setting SetMaxOpenConns") db.Master.SetMaxOpenConns(n) for i := range db.Readreplicas { db.Readreplicas[i].(*sql.DB).SetMaxOpenConns(n) } } // Close - func Close(db *mydb.DB) error { log.Info("Closing Master DB") error := db.Master.Close() if error != nil { log.Error(db.Master, "is down") return error } for i := range db.Readreplicas { log.Info("Closing Read Replicas") error := db.Readreplicas[i].(*sql.DB).Close() log.Error(db.Readreplicas[i], "is down") return error } return nil }
package config import ( "fmt" "os" "path/filepath" "github.com/koding/multiconfig" "github.com/tada3/triton/logging" ) const ( defaultConfigFile = "config.yaml" ) var ( homeDir string configInEffect *Config log *logging.Entry ) type Config struct { MySQLHost string `default:"localhsot"` MySQLPort int `default:"3306"` MySQLDatabase string `default:"triton"` MySQLUser string `default:"triton"` MySQLPasswd string `default:triton` SQLiteFile string `default:triton.sqlite` RedisHost string `default:"localhost"` RedisPort int `default:6379` RedisPasswd string SoundFileBaseUrl string TranslationAPIKey string } func init() { hd, err := getHomeDir() if err != nil { panic(err) } homeDir = hd configLogging() log = logging.NewEntry("config") err = parseConfig() if err != nil { panic(err) } } func GetHomeDir() string { return homeDir } func GetConfig() *Config { return configInEffect } func getHomeDir() (string, error) { dir := os.Getenv("TRITON_HOME") if dir != "" { if exists(dir) { return dir, nil } return "", fmt.Errorf("Invalid $TRITON_HOME: %s", dir) } wd, err := os.Getwd() if err != nil { fmt.Println("ERROR! Failed to get the current working directory.", err) wd = "???" } fmt.Printf("$TRITON_HOME is not defined. Use current dir (%s) as home dir.\n", wd) return ".", nil } func parseConfig() error { configFile := os.Getenv("TRITON_CONFIG_FILE") if configFile == "" { configFile = filepath.Join(homeDir, defaultConfigFile) } var mc *multiconfig.DefaultLoader if exists(configFile) { //fmt.Printf("Parsing %s..\n", configFile) log.Info("Parsing %s..", configFile) mc = multiconfig.NewWithPath(configFile) } else { //fmt.Printf("WARN %s is not found.\n", configFile) log.Warn("%s is not found.", configFile) mc = multiconfig.New() } cfg := &Config{} err := mc.Load(cfg) if err != nil { return err } configInEffect = cfg return nil } func exists(filename string) bool { _, err := os.Stat(filename) return err == nil } func configLogging() { logging.SetLevel(logging.DEBUG) conf1 := logging.OutputConfig{ OutputType: logging.STDOUT, } conf2 := logging.FileOutputConfig{ OutputConfig: logging.OutputConfig{ OutputType: logging.FILE, }, Filename: filepath.Join(homeDir, "log", "triton.log"), } configs := []interface{}{conf1, conf2} err := logging.SetOutputByOutputConfig(configs) if err != nil { fmt.Printf("ERROR! Failed to configure logging: %+v\n", err) } }
// Copyright 2014 Chris Monson <shiblon@gmail.com> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package keyheap // import "entrogo.com/taskstore/keyheap" import ( "fmt" ) type thing struct { id int64 priority int64 } func (t *thing) Priority() int64 { return t.priority } func (t *thing) Key() int64 { return t.id } func (t *thing) String() string { return fmt.Sprintf("thing %v: priority=%v", t.id, t.priority) } func Example_new() { heap := New() fmt.Println(heap) // Output: // KeyHeap([]) } func Example_newFromItems() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) fmt.Println(q) // Output: // // KeyHeap([ // {0:thing 3: priority=999} // {1:thing 5: priority=1002} // {2:thing 1: priority=1000} // {3:thing 4: priority=1005} // {4:thing 2: priority=1004} // {5:thing 6: priority=1001} // {6:thing 7: priority=1003} // ]) } func ExampleKeyHeap_Push() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) q.Push(&thing{8, 998}) fmt.Println(q) // Output: // // KeyHeap([ // {0:thing 8: priority=998} // {1:thing 3: priority=999} // {2:thing 1: priority=1000} // {3:thing 5: priority=1002} // {4:thing 2: priority=1004} // {5:thing 6: priority=1001} // {6:thing 7: priority=1003} // {7:thing 4: priority=1005} // ]) } func ExampleKeyHeap_Pop() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) // Pop the lowest priority item. thing := q.Pop() fmt.Println(thing) fmt.Println(q) // Output: // // thing 3: priority=999 // KeyHeap([ // {0:thing 1: priority=1000} // {1:thing 5: priority=1002} // {2:thing 6: priority=1001} // {3:thing 4: priority=1005} // {4:thing 2: priority=1004} // {5:thing 7: priority=1003} // ]) } func ExampleKeyHeap_PopAt() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) // Pop the lowest priority item. thing := q.PopAt(4) fmt.Println(thing) fmt.Println(q) // Output: // // thing 2: priority=1004 // KeyHeap([ // {0:thing 3: priority=999} // {1:thing 5: priority=1002} // {2:thing 1: priority=1000} // {3:thing 4: priority=1005} // {4:thing 7: priority=1003} // {5:thing 6: priority=1001} // ]) } func ExampleKeyHeap_Peek() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) fmt.Println(q.Peek()) // Output: // // thing 3: priority=999 } func ExampleKeyHeap_PeekAt() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) fmt.Println(q.PeekAt(3)) // Output: // // thing 4: priority=1005 } func ExampleKeyHeap_PeekByKey() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) fmt.Println(q.PeekByKey(5)) // Output: // // thing 5: priority=1002 } func ExampleKeyHeap_PopByKey() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) fmt.Println(q.PopByKey(2)) fmt.Println(q) // Output: // // thing 2: priority=1004 // KeyHeap([ // {0:thing 3: priority=999} // {1:thing 5: priority=1002} // {2:thing 1: priority=1000} // {3:thing 4: priority=1005} // {4:thing 7: priority=1003} // {5:thing 6: priority=1001} // ]) } func ExampleKeyHeap_PopRandomAvailable_onlyOne() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) thing := q.PopRandomConstrained(999) // Only one matches. fmt.Println(thing) // Output: // // thing 3: priority=999 } func ExampleKeyHeap_PopRandomAvailable_none() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) thing := q.PopRandomConstrained(998) // None match fmt.Println(thing) // Output: // // <nil> } func ExampleKeyHeap_PopRandomAvailable_random() { q := NewFromItems([]Item{ &thing{1, 1000}, &thing{2, 1004}, &thing{3, 999}, &thing{4, 1005}, &thing{5, 1002}, &thing{6, 1001}, &thing{7, 1003}, }) t := q.PopRandomConstrained(1001) switch t.(*thing).id { case 1, 3, 6: fmt.Println("Yes") default: fmt.Println("No") } // Output: // // Yes }
package main import "fmt" func main() { cases := [][]int{ {}, {}, } realCase := cases[0:] for i, c := range realCase { fmt.Println("## case", i) // solve fmt.Println(c) } }
package main func main() { } func extractMantra(matrix []string, mantra string) int { return 0 }
// Copyright 2019 CUE Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "github.com/spf13/cobra" ) func newVersionCmd(c *Command) *cobra.Command { cmd := &cobra.Command{ Use: "version", Short: "print CUE version", Long: ``, RunE: mkRunE(c, runVersion), } return cmd } const defaultVersion = "(devel)" // version be set by a builder using // -ldflags='-X cuelang.org/go/cmd/cue/cmd.version=<version>'. // However, people should prefer building via a mechanism which // resolves cuelang.org/go as a dependency (and not the main // module), in which case the version information is determined // from the *debug.BuildInfo (see below). So this mechanism is // really considered legacy. var version = defaultVersion
package divider_test import ( "testing" "time" "github.com/b-2019-apt-test/divider/internal/divider" "github.com/b-2019-apt-test/divider/internal/divider/mocks" ) var runner = func(t *testing.T, test mocks.TestCase) { reporter := mocks.NewFakeResultReporter() proc := mocks.NewJobProcessor(). SetJobProvider(mocks.NewFakeJobProvider(test.Jobs)). SetResultReporter(reporter) if err := proc.Start(); err != test.Err { t.Fatalf("expected err %v, got: %v", test.Err, err) } mocks.Validate(t, test, reporter.Results()) } func TestCases(t *testing.T) { mocks.RunTestCases(t, runner) } func TestWorkerSkipsInvalidJob(t *testing.T) { runner(t, mocks.InvalidJob) } func TestStartUnconfiguredJobProcessor(t *testing.T) { proc := divider.NewJobProcessor().SetWorkersCount(10) if err := proc.Start(); err != divider.ErrJobProviderNotSpecified { t.Fatalf("unconfigured job provider ignored: %v", err) } proc.SetJobProvider(mocks.NewFakeJobProvider(mocks.AllValid.Jobs)) if err := proc.Start(); err != divider.ErrResultReporterNotSpecified { t.Fatalf("unconfigured result reporter ignored: %v", err) } proc.SetResultReporter(mocks.NewFakeResultReporter()) if err := proc.Start(); err != divider.ErrLoggerNotSpecified { t.Fatalf("unconfigured logger ignored: %v", err) } proc.SetLogger(mocks.FakeLog) if err := proc.Start(); err != divider.ErrDividerNotSpecified { t.Fatalf("unconfigured divider ignored: %v", err) } proc.SetDivider(mocks.FakeDivider) if err := proc.Start(); err != nil { t.Fatal(err) } } func TestJobProcessorProccessedCount(t *testing.T) { proc := mocks.NewJobProcessor(). SetJobProvider(mocks.NewFakeJobProvider(mocks.AllValid.Jobs)) if err := proc.Start(); err != nil { t.Fatal(err) } if int(proc.Processed()) != len(mocks.AllValid.Jobs) { t.Fatal("Invalid number of processed jobs") } } func TestJobProcessorStop(t *testing.T) { var stop time.Time // slowed down provider provider := mocks.NewFakeJobProvider(mocks.AllValid.Jobs) provider.FailEvery(1).FailFn(mocks.StuckFailFn) proc := mocks.NewJobProcessor().SetJobProvider(provider) go func() { time.Sleep(200 * time.Millisecond) proc.Stop() stop = time.Now() }() if err := proc.Start(); err != nil { t.Fatal(err) } if (time.Since(stop).Nanoseconds() / 1e9) > 3 { t.Fatal("Processor did not stop in expected time.") } } func TestProviderNonTerminalError(t *testing.T) { provider := mocks.NewFakeJobProvider(mocks.AllValid.Jobs) provider.FailOn(1).FailFn(mocks.NonTerminalErrorFailFn) mocks.ProviderNonTerminalErrorTest(t, provider) } func TestProviderTerminalError(t *testing.T) { provider := mocks.NewFakeJobProvider(mocks.AllValid.Jobs) provider.FailOn(1).FailFn(mocks.TerminalErrorFailFn) mocks.ProviderTerminalErrorTest(t, provider) }
package backend_controller import ( "2021/yunsongcailu/yunsong_server/backend/backend_model" "2021/yunsongcailu/yunsong_server/backend/backend_service" "2021/yunsongcailu/yunsong_server/common" "2021/yunsongcailu/yunsong_server/param/backend_param" "2021/yunsongcailu/yunsong_server/tools" "github.com/gin-gonic/gin" "strconv" ) var bms = backend_service.NewManagerServer() // 用户注册 func PostRegisterManager(ctx *gin.Context) { var managerParam backend_param.ManagerParam err := ctx.ShouldBind(&managerParam) if err != nil { common.Failed(ctx,"获取注册数据失败") return } // 验证验证码 var code common.VerifyCaptchaBody code.Id = managerParam.CaptchaId code.VerifyValue = managerParam.Captcha if !common.VerifyCaptchaCode(code) { common.Failed(ctx,"验证码错误,点击验证码刷新") return } //查询是否注册 res,err := bms.GetManagerByPassword(managerParam.Name) if err != nil { common.Failed(ctx,"验证账号是否注册出现错误") return } if res.Id > 0 { common.Failed(ctx,"账号已经存在") return } var manager backend_model.ManagerModel manager.ManagerName = managerParam.Name password := tools.EncodeSha256(managerParam.Password) manager.ManagerPassword = password manager.ManagerPower = 0 err = bms.AddManager(manager) if err != nil { common.Failed(ctx,"注册用户失败") return } common.Success(ctx,"注册成功") return } // 用户登录 func PostLoginManager(ctx *gin.Context) { var managerParam backend_param.ManagerParam err := ctx.ShouldBind(&managerParam) if err != nil { common.Failed(ctx,"获取登录参数失败") return } // 验证验证码 var code common.VerifyCaptchaBody code.Id = managerParam.CaptchaId code.VerifyValue = managerParam.Captcha if !common.VerifyCaptchaCode(code) { common.Failed(ctx,"验证码错误,点击验证码刷新") return } res,err := bms.GetManagerByPassword(managerParam.Name) if err != nil || res.Id == 0{ common.Failed(ctx,"验证账号失败") return } password := tools.EncodeSha256(managerParam.Password) if res.ManagerPassword != password { common.Failed(ctx,"密码错误") return } token,err := common.ReleaseToken(res.Id) if err != nil { common.Failed(ctx,"发放token失败") return } res.ManagerPassword = token idStr := strconv.FormatInt(res.Id,10) err = common.SetSess(ctx,"manager_" + idStr,idStr,0) if err != nil { common.Failed(ctx,"设置session失败") return } common.Success(ctx,res) }
package main import ( "log" "math/rand" "net/http" "time" ) func Start(res http.ResponseWriter, req *http.Request) { log.Print("START REQUEST") data, err := NewStartRequest(req) if err != nil { log.Printf("Bad start request: %v", err) } dump(data) respond(res, StartResponse{ Taunt: "battlesnake-go!", Color: "#75CEDD", Name: "battlesnake-go", HeadType: HEAD_PIXEL, TailType: TAIL_ROUND_BUM, SecondaryColor: "#F7D3A2", }) } func Move(res http.ResponseWriter, req *http.Request) { log.Printf("MOVE REQUEST") data, err := NewMoveRequest(req) if err != nil { log.Printf("Bad move request: %v", err) } dump(data) directions := []string{ "up", "down", "left", "right", } r := rand.New(rand.NewSource(time.Now().UnixNano())) respond(res, MoveResponse{ Move: directions[r.Intn(4)], }) }
package main import ( "fmt" "log" "math" ) // NorgateMathError - using struct for more informative custom error // We are using "N" in "NorgateMathError" because we want it to be accessible outside the package type NorgateMathError struct { lat, long string err error } func (n *NorgateMathError) Error() string { return fmt.Sprintf("a norgate math error occured: %v %v %v", n.lat, n.long, n.err) } func main() { _, err := sqrt(-10) if err != nil { log.Fatalln(err) } } func sqrt(f float64) (float64, error) { if f < 0 { nme := fmt.Errorf("norgate math redux: square root of negative number: %v", f) return 0, &NorgateMathError{"50.2289 N", "99.4656 W", nme} } // implementaion return math.Sqrt(f), nil } /* see use of structs with error type in standard library: http://www.goinggo.net/2014/11/error-handling-in-go-part-ii.html http://golang.org/pkg/net/#OpError http://golang.org/src/pkg/net/dial.go http://golang.org/src/pkg/net/net.go http://golang.org/src/pkg/encoding/json/decode.go */ // go run main.go // 2020/06/24 08:21:25 a norgate math error occured: 50.2289 N 99.4656 W norgate math redux: square root of negative number: -10 // exit status 1
package controller import ( "fmt" appconfig "github.com/allentom/youcomic-api/config" ApiError "github.com/allentom/youcomic-api/error" "github.com/allentom/youcomic-api/services" "github.com/gin-gonic/gin" "path" "strings" ) var BookContentHandler gin.HandlerFunc = func(context *gin.Context) { id, err := GetLookUpId(context, "id") if err != nil { ApiError.RaiseApiError(context, err, nil) return } fileName := context.Param("fileName") book, err := services.GetBookById(uint(id)) if err != nil { ApiError.RaiseApiError(context, err, nil) return } //query library library, err := services.GetLibraryById(book.LibraryId) if err != nil { ApiError.RaiseApiError(context, err, nil) return } // handle with cover thumbnail if strings.Contains(fileName, "cover_thumbnail") { thumbnailExt := path.Ext(book.Cover) context.File(path.Join(appconfig.Config.Store.Root, "generate", fmt.Sprintf("%d", book.ID), fmt.Sprintf("cover_thumbnail%s", thumbnailExt))) } if fileName == path.Base(book.Cover) { context.File(path.Join(library.Path, book.Path, book.Cover)) return } //handle with page context.File(path.Join(library.Path, book.Path, fileName)) }
package ui import ( "fyne.io/fyne/v2" ) type viewID uint const ( LIST_TICKETS_VIEW viewID = iota SEND_TICKET_VIEW CREDENTIALS_VIEW ) type view struct { Win fyne.Window }
package amazon import ( "encoding/base64" "errors" "fmt" "strings" "time" "github.com/quilt/quilt/cluster/acl" "github.com/quilt/quilt/cluster/cloudcfg" "github.com/quilt/quilt/cluster/machine" "github.com/quilt/quilt/db" "github.com/quilt/quilt/join" "github.com/quilt/quilt/util" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" log "github.com/Sirupsen/logrus" ) // The Cluster object represents a connection to Amazon EC2. type Cluster struct { namespace string region string client client newClient func(string) client } type awsID struct { spotID string region string } // DefaultRegion is the preferred location for machines which haven't a user specified // region preference. const DefaultRegion = "us-west-1" // Regions is the list of supported AWS regions. var Regions = []string{"ap-southeast-2", "us-west-1", "us-west-2"} const spotPrice = "0.5" // Ubuntu 16.04, 64-bit hvm-ssd var amis = map[string]string{ "ap-southeast-2": "ami-550c3c36", "us-west-1": "ami-26074946", "us-west-2": "ami-e1fe2281", } var sleep = time.Sleep var timeout = 5 * time.Minute // New creates a new Amazon EC2 cluster. func New(namespace, region string) (*Cluster, error) { clst := newAmazon(namespace, region) if _, err := clst.List(); err != nil { return nil, errors.New("AWS failed to connect") } return clst, nil } // creates a new client, and connects its client to AWS func newAmazon(namespace, region string) *Cluster { clst := &Cluster{ namespace: strings.ToLower(namespace), region: region, newClient: newClient, } return clst } // Boot creates instances in the `clst` configured according to the `bootSet`. func (clst *Cluster) Boot(bootSet []machine.Machine) error { clst.connectClient() if len(bootSet) <= 0 { return nil } type bootReq struct { cfg string size string diskSize int } bootReqMap := make(map[bootReq]int64) // From boot request to an instance count. for _, m := range bootSet { br := bootReq{ cfg: cloudcfg.Ubuntu(m.SSHKeys, "xenial"), size: m.Size, diskSize: m.DiskSize, } bootReqMap[br] = bootReqMap[br] + 1 } var awsIDs []awsID for br, count := range bootReqMap { groupID, _, err := clst.getCreateSecurityGroup() if err != nil { return err } cloudConfig64 := base64.StdEncoding.EncodeToString([]byte(br.cfg)) resp, err := clst.client.RequestSpotInstances( &ec2.RequestSpotInstancesInput{ SpotPrice: aws.String(spotPrice), LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ ImageId: aws.String(amis[clst.region]), InstanceType: aws.String(br.size), UserData: &cloudConfig64, SecurityGroupIds: []*string{aws.String(groupID)}, BlockDeviceMappings: []*ec2.BlockDeviceMapping{ blockDevice(br.diskSize), }, }, InstanceCount: &count, }) if err != nil { return err } for _, request := range resp.SpotInstanceRequests { awsIDs = append(awsIDs, awsID{ spotID: *request.SpotInstanceRequestId, region: clst.region}) } } if err := clst.tagSpotRequests(awsIDs); err != nil { return err } return clst.wait(awsIDs, true) } // Stop shuts down `machines` in `clst. func (clst *Cluster) Stop(machines []machine.Machine) error { clst.connectClient() var ids []awsID for _, m := range machines { ids = append(ids, awsID{ region: m.Region, spotID: m.ID, }) } spotIDs := getSpotIDs(ids) spots, err := clst.client.DescribeSpotInstanceRequests( &ec2.DescribeSpotInstanceRequestsInput{ SpotInstanceRequestIds: aws.StringSlice(spotIDs), }) if err != nil { return err } instIds := []string{} for _, spot := range spots.SpotInstanceRequests { if spot.InstanceId != nil { instIds = append(instIds, *spot.InstanceId) } } if len(instIds) > 0 { _, err = clst.client.TerminateInstances(&ec2.TerminateInstancesInput{ InstanceIds: aws.StringSlice(instIds), }) if err != nil { return err } } _, err = clst.client.CancelSpotInstanceRequests( &ec2.CancelSpotInstanceRequestsInput{ SpotInstanceRequestIds: aws.StringSlice(spotIDs), }) if err != nil { return err } if err := clst.wait(ids, false); err != nil { return err } return nil } // List queries `clst` for the list of booted machines. func (clst *Cluster) List() ([]machine.Machine, error) { clst.connectClient() machines := []machine.Machine{} spots, err := clst.client.DescribeSpotInstanceRequests(nil) if err != nil { return nil, err } insts, err := clst.client.DescribeInstances(&ec2.DescribeInstancesInput{ Filters: []*ec2.Filter{ { Name: aws.String("instance.group-name"), Values: []*string{aws.String(clst.namespace)}, }, }, }) if err != nil { return nil, err } instMap := make(map[string]*ec2.Instance) for _, res := range insts.Reservations { for _, inst := range res.Instances { instMap[*inst.InstanceId] = inst } } addrResp, err := clst.client.DescribeAddresses(nil) if err != nil { return nil, err } ipMap := map[string]*ec2.Address{} for _, ip := range addrResp.Addresses { if ip.InstanceId != nil { ipMap[*ip.InstanceId] = ip } } for _, spot := range spots.SpotInstanceRequests { if *spot.State != ec2.SpotInstanceStateActive && *spot.State != ec2.SpotInstanceStateOpen { continue } var inst *ec2.Instance if spot.InstanceId != nil { inst = instMap[*spot.InstanceId] } // Due to a race condition in the AWS API, it's possible that // spot requests might lose their Tags. If handled naively, // those spot requests would technically be without a namespace, // meaning the instances they create would be live forever as // zombies. // // To mitigate this issue, we rely not only on the spot request // tags, but additionally on the instance security group. If a // spot request has a running instance in the appropriate // security group, it is by definition in our namespace. // Thus, we only check the tags for spot requests without // running instances. if inst == nil { var isOurs bool for _, tag := range spot.Tags { ns := clst.namespace if tag != nil && tag.Key != nil && *tag.Key == ns { isOurs = true break } } if !isOurs { continue } } machine := machine.Machine{ ID: *spot.SpotInstanceRequestId, Region: clst.region, Provider: db.Amazon, } if inst != nil { if *inst.State.Name != ec2.InstanceStateNamePending && *inst.State.Name != ec2.InstanceStateNameRunning { continue } if inst.PublicIpAddress != nil { machine.PublicIP = *inst.PublicIpAddress } if inst.PrivateIpAddress != nil { machine.PrivateIP = *inst.PrivateIpAddress } if inst.InstanceType != nil { machine.Size = *inst.InstanceType } if len(inst.BlockDeviceMappings) != 0 { volumeID := inst.BlockDeviceMappings[0]. Ebs.VolumeId filters := []*ec2.Filter{ { Name: aws.String("volume-id"), Values: []*string{ aws.String(*volumeID), }, }, } volumeInfo, err := clst.client.DescribeVolumes( &ec2.DescribeVolumesInput{ Filters: filters, }) if err != nil { return nil, err } if len(volumeInfo.Volumes) == 1 { machine.DiskSize = int( *volumeInfo.Volumes[0].Size) } } if ip := ipMap[*inst.InstanceId]; ip != nil { machine.FloatingIP = *ip.PublicIp } } machines = append(machines, machine) } return machines, nil } // UpdateFloatingIPs updates Elastic IPs <> EC2 instance associations. func (clst *Cluster) UpdateFloatingIPs(machines []machine.Machine) error { clst.connectClient() addressDesc, err := clst.client.DescribeAddresses(nil) if err != nil { return err } // Map IP Address -> Elastic IP. addresses := map[string]*string{} // Map EC2 Instance -> Elastic IP association. associations := map[string]*string{} for _, addr := range addressDesc.Addresses { addresses[*addr.PublicIp] = addr.AllocationId if addr.InstanceId != nil { associations[*addr.InstanceId] = addr.AssociationId } } // Map spot request ID to EC2 instance ID. var spotIDs []string for _, machine := range machines { spotIDs = append(spotIDs, machine.ID) } instances, err := clst.getInstances(clst.region, spotIDs) if err != nil { return err } for _, machine := range machines { if machine.FloatingIP == "" { instanceID := *instances[machine.ID].InstanceId associationID := associations[instanceID] if associationID == nil { continue } input := ec2.DisassociateAddressInput{ AssociationId: associationID, } _, err = clst.client.DisassociateAddress(&input) if err != nil { return err } } else { allocationID := addresses[machine.FloatingIP] input := ec2.AssociateAddressInput{ InstanceId: instances[machine.ID].InstanceId, AllocationId: allocationID, } if _, err := clst.client.AssociateAddress(&input); err != nil { return err } } } return nil } func (clst *Cluster) connectClient() { if clst.client == nil { clst.client = clst.newClient(clst.region) } } func (clst Cluster) getInstances(region string, spotIDs []string) ( map[string]*ec2.Instance, error) { clst.connectClient() instances := map[string]*ec2.Instance{} spotQuery := ec2.DescribeSpotInstanceRequestsInput{ SpotInstanceRequestIds: aws.StringSlice(spotIDs), } spotResp, err := clst.client.DescribeSpotInstanceRequests(&spotQuery) if err != nil { return nil, err } var instanceIDs []string for _, spot := range spotResp.SpotInstanceRequests { if spot.InstanceId == nil { instances[*spot.SpotInstanceRequestId] = nil } else { instanceIDs = append(instanceIDs, *spot.InstanceId) } } instQuery := ec2.DescribeInstancesInput{ InstanceIds: aws.StringSlice(instanceIDs), } instResp, err := clst.client.DescribeInstances(&instQuery) if err != nil { return nil, err } for _, reservation := range instResp.Reservations { for _, instance := range reservation.Instances { instances[*instance.SpotInstanceRequestId] = instance } } return instances, nil } func (clst *Cluster) tagSpotRequests(awsIDs []awsID) error { var err error spotIDs := getSpotIDs(awsIDs) for i := 0; i < 30; i++ { _, err = clst.client.CreateTags(&ec2.CreateTagsInput{ Tags: []*ec2.Tag{ { Key: aws.String(clst.namespace), Value: aws.String(""), }, }, Resources: aws.StringSlice(spotIDs), }) if err == nil { return nil } time.Sleep(5 * time.Second) } log.Warn("Failed to tag spot requests: ", err) clst.client.CancelSpotInstanceRequests( &ec2.CancelSpotInstanceRequestsInput{ SpotInstanceRequestIds: aws.StringSlice(spotIDs), }) return err } /* Wait for the spot request 'ids' to have booted or terminated depending on the value * of 'boot' */ func (clst *Cluster) wait(awsIDs []awsID, boot bool) error { return util.WaitFor(func() bool { machines, err := clst.List() if err != nil { log.WithError(err).Warn("Failed to get machines.") return true } exists := make(map[awsID]struct{}) for _, inst := range machines { // When booting, if the machine isn't configured completely // when the List() call was made, the cluster will fail to join // and boot them twice. When halting, we don't consider this as // the opposite will happen and we'll try to halt multiple times. // To halt, we need the machines to be completely gone. if boot && inst.Size == "" { continue } id := awsID{ spotID: inst.ID, region: inst.Region, } exists[id] = struct{}{} } for _, id := range awsIDs { if _, ok := exists[id]; ok != boot { return false } } return true }, 10*time.Second, timeout) } func (clst *Cluster) isDoneWaiting(awsIDs []awsID, boot bool) (bool, error) { machines, err := clst.List() if err != nil { log.WithError(err).Warn("Failed to get machines.") return true, err } exists := make(map[awsID]struct{}) for _, inst := range machines { // If the machine wasn't configured completely when the List() // call was made, the cluster will fail to join and boot them // twice. if inst.Size == "" { continue } id := awsID{ spotID: inst.ID, region: inst.Region, } exists[id] = struct{}{} } for _, id := range awsIDs { if _, ok := exists[id]; ok != boot { return false, nil } } return true, nil } // SetACLs adds and removes acls in `clst` so that it conforms to `acls`. func (clst *Cluster) SetACLs(acls []acl.ACL) error { clst.connectClient() groupID, ingress, err := clst.getCreateSecurityGroup() if err != nil { return err } rangesToAdd, foundGroup, rulesToRemove := syncACLs(acls, groupID, ingress) if len(rangesToAdd) != 0 { logACLs(true, rangesToAdd) _, err = clst.client.AuthorizeSecurityGroupIngress( &ec2.AuthorizeSecurityGroupIngressInput{ GroupName: aws.String(clst.namespace), IpPermissions: rangesToAdd, }, ) if err != nil { return err } } if !foundGroup { log.WithField("Group", clst.namespace).Debug("Amazon: Add group") _, err = clst.client.AuthorizeSecurityGroupIngress( &ec2.AuthorizeSecurityGroupIngressInput{ GroupName: aws.String( clst.namespace), SourceSecurityGroupName: aws.String( clst.namespace), }, ) if err != nil { return err } } if len(rulesToRemove) != 0 { logACLs(false, rulesToRemove) _, err = clst.client.RevokeSecurityGroupIngress( &ec2.RevokeSecurityGroupIngressInput{ GroupName: aws.String(clst.namespace), IpPermissions: rulesToRemove, }, ) if err != nil { return err } } return nil } func (clst *Cluster) getCreateSecurityGroup() ( string, []*ec2.IpPermission, error) { resp, err := clst.client.DescribeSecurityGroups( &ec2.DescribeSecurityGroupsInput{ Filters: []*ec2.Filter{ { Name: aws.String("group-name"), Values: []*string{ aws.String(clst.namespace), }, }, }, }) if err != nil { return "", nil, err } groups := resp.SecurityGroups if len(groups) > 1 { err := errors.New("Multiple Security Groups with the same name: " + clst.namespace) return "", nil, err } if len(groups) == 1 { return *groups[0].GroupId, groups[0].IpPermissions, nil } csgResp, err := clst.client.CreateSecurityGroup( &ec2.CreateSecurityGroupInput{ Description: aws.String("Quilt Group"), GroupName: aws.String(clst.namespace), }) if err != nil { return "", nil, err } return *csgResp.GroupId, nil, nil } // syncACLs returns the permissions that need to be removed and added in order // for the cloud ACLs to match the policy. // rangesToAdd is guaranteed to always have exactly one item in the IpRanges slice. func syncACLs(desiredACLs []acl.ACL, desiredGroupID string, current []*ec2.IpPermission) (rangesToAdd []*ec2.IpPermission, foundGroup bool, toRemove []*ec2.IpPermission) { var currRangeRules []*ec2.IpPermission for _, perm := range current { for _, ipRange := range perm.IpRanges { currRangeRules = append(currRangeRules, &ec2.IpPermission{ IpProtocol: perm.IpProtocol, FromPort: perm.FromPort, ToPort: perm.ToPort, IpRanges: []*ec2.IpRange{ ipRange, }, }) } for _, pair := range perm.UserIdGroupPairs { if *pair.GroupId != desiredGroupID { toRemove = append(toRemove, &ec2.IpPermission{ UserIdGroupPairs: []*ec2.UserIdGroupPair{ pair, }, }) } else { foundGroup = true } } } var desiredRangeRules []*ec2.IpPermission for _, acl := range desiredACLs { desiredRangeRules = append(desiredRangeRules, &ec2.IpPermission{ FromPort: aws.Int64(int64(acl.MinPort)), ToPort: aws.Int64(int64(acl.MaxPort)), IpRanges: []*ec2.IpRange{ { CidrIp: aws.String(acl.CidrIP), }, }, IpProtocol: aws.String("tcp"), }, &ec2.IpPermission{ FromPort: aws.Int64(int64(acl.MinPort)), ToPort: aws.Int64(int64(acl.MaxPort)), IpRanges: []*ec2.IpRange{ { CidrIp: aws.String(acl.CidrIP), }, }, IpProtocol: aws.String("udp"), }, &ec2.IpPermission{ FromPort: aws.Int64(-1), ToPort: aws.Int64(-1), IpRanges: []*ec2.IpRange{ { CidrIp: aws.String(acl.CidrIP), }, }, IpProtocol: aws.String("icmp"), }) } _, toAdd, rangesToRemove := join.HashJoin(ipPermSlice(desiredRangeRules), ipPermSlice(currRangeRules), permToACLKey, permToACLKey) for _, intf := range toAdd { rangesToAdd = append(rangesToAdd, intf.(*ec2.IpPermission)) } for _, intf := range rangesToRemove { toRemove = append(toRemove, intf.(*ec2.IpPermission)) } return rangesToAdd, foundGroup, toRemove } func logACLs(add bool, perms []*ec2.IpPermission) { action := "Remove" if add { action = "Add" } for _, perm := range perms { if len(perm.IpRanges) != 0 { // Each rule has three variants (TCP, UDP, and ICMP), but // we only want to log once. protocol := *perm.IpProtocol if protocol != "tcp" { continue } cidrIP := *perm.IpRanges[0].CidrIp ports := fmt.Sprintf("%d", *perm.FromPort) if *perm.FromPort != *perm.ToPort { ports += fmt.Sprintf("-%d", *perm.ToPort) } log.WithField("ACL", fmt.Sprintf("%s:%s", cidrIP, ports)). Debugf("Amazon: %s ACL", action) } else { log.WithField("Group", *perm.UserIdGroupPairs[0].GroupName). Debugf("Amazon: %s group", action) } } } // blockDevice returns the block device we use for our AWS machines. func blockDevice(diskSize int) *ec2.BlockDeviceMapping { return &ec2.BlockDeviceMapping{ DeviceName: aws.String("/dev/sda1"), Ebs: &ec2.EbsBlockDevice{ DeleteOnTermination: aws.Bool(true), VolumeSize: aws.Int64(int64(diskSize)), VolumeType: aws.String("gp2"), }, } } func getSpotIDs(ids []awsID) []string { var spotIDs []string for _, id := range ids { spotIDs = append(spotIDs, id.spotID) } return spotIDs } func groupByRegion(ids []awsID) map[string][]awsID { grouped := make(map[string][]awsID) for _, id := range ids { region := id.region if _, ok := grouped[region]; !ok { grouped[region] = []awsID{} } grouped[region] = append(grouped[region], id) } return grouped } type ipPermissionKey struct { protocol string ipRange string minPort int maxPort int } func permToACLKey(permIntf interface{}) interface{} { perm := permIntf.(*ec2.IpPermission) key := ipPermissionKey{} if perm.FromPort != nil { key.minPort = int(*perm.FromPort) } if perm.ToPort != nil { key.maxPort = int(*perm.ToPort) } if perm.IpProtocol != nil { key.protocol = *perm.IpProtocol } if perm.IpRanges[0].CidrIp != nil { key.ipRange = *perm.IpRanges[0].CidrIp } return key } type ipPermSlice []*ec2.IpPermission func (slc ipPermSlice) Get(ii int) interface{} { return slc[ii] } func (slc ipPermSlice) Len() int { return len(slc) } func (slc ipPermSlice) Less(i, j int) bool { return strings.Compare(slc[i].String(), slc[j].String()) < 0 } func (slc ipPermSlice) Swap(i, j int) { slc[i], slc[j] = slc[j], slc[i] }
// Copyright 2018 The Operator-SDK Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package proxy import ( "context" "encoding/json" "errors" "io/ioutil" "net/http" "os" "testing" kcorev1 "k8s.io/api/core/v1" kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/operator-framework/operator-sdk/internal/ansible/proxy/controllermap" ) func TestHandler(t *testing.T) { if testing.Short() { t.Skip("skipping ansible proxy testing in short mode") } mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{Namespace: "default"}) if err != nil { t.Fatalf("Failed to instantiate manager: %v", err) } done := make(chan error) cMap := controllermap.NewControllerMap() err = Run(done, Options{ Address: "localhost", Port: 8888, KubeConfig: mgr.GetConfig(), Cache: nil, RESTMapper: mgr.GetRESTMapper(), ControllerMap: cMap, WatchedNamespaces: []string{"default"}, }) if err != nil { t.Fatalf("Error starting proxy: %v", err) } cl, err := client.New(mgr.GetConfig(), client.Options{}) if err != nil { t.Fatalf("Failed to create the client: %v", err) } po, err := createPod("test", "default", cl) if err != nil { t.Fatalf("Failed to create the pod: %v", err) } resp, err := http.Get("http://localhost:8888/api/v1/namespaces/default/pods/test") if err != nil { t.Fatalf("Error getting pod from proxy: %v", err) } defer func() { if err := resp.Body.Close(); err != nil && !errors.Is(err, os.ErrClosed) { t.Errorf("Failed to close response body: (%v)", err) } }() body, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("Error reading response body: %v", err) } // Should only be one string from 'X-Cache' header (explicitly set to HIT in proxy) if resp.Header["X-Cache"] == nil { t.Fatalf("Object was not retrieved from cache") if resp.Header["X-Cache"][0] != "HIT" { t.Fatalf("Cache response header found but got [%v], expected [HIT]", resp.Header["X-Cache"][0]) } } data := kcorev1.Pod{} err = json.Unmarshal(body, &data) if err != nil { t.Fatalf("Error parsing response: %v", err) } if data.Name != "test" { t.Fatalf("Got unexpected pod name: %#v", data.Name) } if err := cl.Delete(context.Background(), po); err != nil { t.Fatalf("Failed to delete the pod: %v", err) } } func createPod(name, namespace string, cl client.Client) (runtime.Object, error) { three := int64(3) pod := &kcorev1.Pod{ ObjectMeta: kmetav1.ObjectMeta{ Name: name, Namespace: namespace, Labels: map[string]string{ "test-label": name, }, }, Spec: kcorev1.PodSpec{ Containers: []kcorev1.Container{{Name: "nginx", Image: "nginx"}}, RestartPolicy: "Always", ActiveDeadlineSeconds: &three, }, } if err := cl.Create(context.Background(), pod); err != nil { return nil, err } return pod, nil }
package giantbomb import ( "encoding/json" "net/http" "net/url" "github.com/google/go-querystring/query" ) type Client struct { key string } func NewClient(key string) *Client { // TODO: check valid key return &Client{ key: key, } } func (c *Client) Search(name string) (*GameType, error) { searchUrl, _ := url.Parse("http://www.giantbomb.com/api/search/") opt := SearchRequest{ Key: c.key, Fields: "name,original_release_date,platforms,deck", Format: "json", Query: name, Resources: "game", } queryValues, _ := query.Values(opt) searchUrl.RawQuery = queryValues.Encode() resp, err := http.Get(searchUrl.String()) if err != nil { return nil, err } defer resp.Body.Close() var payload SearchResponse if err = json.NewDecoder(resp.Body).Decode(&payload); err != nil { return nil, err } return payload.Results[0], nil } type SearchRequest struct { Key string `url:"api_key"` Fields string `url:"field_list"` Format string `url:"format"` Query string `url:"query"` Resources string `url:"resources"` } type SearchResponse struct { Results []*GameType `json:"results"` } type GameType struct { Name string `json:"name"` Deck string `json:"deck"` OriginalReleaseDate string `json:"original_release_date"` Platforms []Platform `json:"platforms"` } type Platform struct { Abbreviation string `json:"abbreviation"` }
package model import ( "encoding/json" "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address" ) // Address is the base58-encoded representation of address.Address type Address string func NewAddress(address *address.Address) Address { return Address(address.String()) } func (a Address) MarshalJSON() ([]byte, error) { return json.Marshal(string(a)) } func (a *Address) UnmarshalJSON(b []byte) error { var s string if err := json.Unmarshal(b, &s); err != nil { return err } _, err := address.FromBase58(s) *a = Address(s) return err } func (a Address) Address() address.Address { addr, err := address.FromBase58(string(a)) if err != nil { panic(err) } return addr }
package TLV import ( "github.com/andrewz1/gosmpp/Exception" "github.com/andrewz1/gosmpp/Utils" ) type TLVEmpty struct { TLV Present bool } func NewTLVEmpty() *TLVEmpty { a := &TLVEmpty{} a.Construct() return a } func NewTLVEmptyWithTag(tag uint16) *TLVEmpty { a := NewTLVEmpty() a.Tag = tag return a } func NewTLVEmptyWithTagValue(tag uint16, present bool) *TLVEmpty { a := NewTLVEmptyWithTag(tag) a.Present = present a.MarkValueSet() return a } func (c *TLVEmpty) Construct() { c.TLV.Construct() c.SetRealReference(c) c.MinLength = 0 c.MaxLength = 0 } func (c *TLVEmpty) GetValueData() (b *Utils.ByteBuffer, er *Exception.Exception) { return nil, nil } func (c *TLVEmpty) SetValueData(buffer *Utils.ByteBuffer) *Exception.Exception { if !c.CheckLengthBuffer(buffer) { return Exception.NewExceptionFromStr("TLVEmpty: Buffer length is not valid") } c.SetValue(true) return nil } func (c *TLVEmpty) SetValue(value bool) *Exception.Exception { c.Present = value c.MarkValueSet() return nil } func (c *TLVEmpty) GetValue() (bool, *Exception.Exception) { if c.HasValue() { return c.Present, nil } return false, Exception.ValueNotSetException }
package providers import( "errors" "net/http" "fmt" "github.com/reaxoft/oauth2_proxy/api" ) type BlitzIdpProvider struct { *ProviderData } func NewBlitzIdpProvider(p *ProviderData) *BlitzIdpProvider { p.ProviderName = "BlitzIdp" return &BlitzIdpProvider{ProviderData: p} } func makeOAuthHeader(access_token string) http.Header { header := make(http.Header) header.Set("Accept", "application/json") header.Set("Authorization", fmt.Sprintf("Bearer %s", access_token)) return header } func (p *BlitzIdpProvider) ValidateSessionState(s *SessionState) bool { return validateToken(p, s.AccessToken, makeOAuthHeader(s.AccessToken)) } func (p *BlitzIdpProvider) GetEmailAddress(s *SessionState) (string, error) { if s.AccessToken == "" { return "", errors.New("missing access token") } req, err := http.NewRequest("GET", p.ProfileURL.String(), nil) if err != nil { return "", err } req.Header = makeOAuthHeader(s.AccessToken) type result struct { Email string `json:"email"` } var r result err = api.RequestJson(req, &r) if err != nil { return "", err } if r.Email == "" { return "", errors.New("no email") } return r.Email, nil }
package pathfileops import ( "os" "strconv" "testing" ) func TestFilePermissionConfig_IsValid_01(t *testing.T) { // expectedTextCode := "drwxrwxrwx" fh := FileHelper{} // drwxrwxrwx 20000000777 intFMode := fh.ConvertOctalToDecimal(20000000777) osFMode := os.FileMode(intFMode) fPerm, err := FilePermissionConfig{}.NewByFileMode(osFMode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}.NewByFileMode(osFMode). "+ "Error='%v' ", err.Error()) return } fPerm.isInitialized = false err = fPerm.IsValid() if err == nil { t.Error("Expected an error to be returned by fPerm.IsValid() because " + "fPerm has not been initialized. NO ERROR RETURNED!") } } func TestFilePermissionConfig_IsValid_02(t *testing.T) { // expectedTextCode := "drwxrwxrwx" fh := FileHelper{} // drwxrwxrwx 20000000777 intFMode := fh.ConvertOctalToDecimal(20000000777) osFMode := os.FileMode(intFMode) fPerm, err := FilePermissionConfig{}.NewByFileMode(osFMode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}.NewByFileMode(osFMode). "+ "Error='%v' ", err.Error()) return } fPerm.fileMode = os.FileMode(01000) err = fPerm.IsValid() if err == nil { t.Error("Expected an error to be returned by fPerm.IsValid() because " + "fPerm has an INVALID Entry Type. NO ERROR RETURNED!") } } func TestFilePermissionConfig_IsValid_03(t *testing.T) { // expectedTextCode := "drwxrwxrwx" fh := FileHelper{} // drwxrwxrwx 20000000777 intFMode := fh.ConvertOctalToDecimal(20000000777) osFMode := os.FileMode(intFMode) fPerm, err := FilePermissionConfig{}.NewByFileMode(osFMode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}.NewByFileMode(osFMode). "+ "Error='%v' ", err.Error()) return } err = fPerm.IsValid() if err != nil { t.Errorf("Expected no error to be returned by fPerm.IsValid(). "+ "However, an error was returned. Error='%v' ", err.Error()) } } func TestFilePermissionConfig_New_01(t *testing.T) { permissionStr := "-rwxrwxrwx" fPermCfg, err := FilePermissionConfig{}.New(permissionStr) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}.New(permissionStr) "+ "Error='%v' ", err.Error()) return } actualTextCode, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fPermCfg.GetPermissionTextCode() "+ "Error='%v' ", err.Error()) } if permissionStr != actualTextCode { t.Errorf("Error: Expected actual text code='%v' .Instead, "+ "actual text code='%v'", permissionStr, actualTextCode) } } func TestFilePermissionConfig_New_02(t *testing.T) { permissionStr := "xvumnoqade" _, err := FilePermissionConfig{}.New(permissionStr) if err == nil { t.Error("Expected error return from FilePermissionConfig{}.New(permissionStr) " + "because of invalid permissionStr. NO ERROR WAS RETURNED!") } } func TestFilePermissionConfig_NewByComponents_01(t *testing.T) { entryType, err := OsFilePermissionCode(0).GetNewFromFileMode(OsFilePermCode.ModeNone()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeNone()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "-rwxrwxrwx" permissionStr := "-rwxrwxrwx" fPermCfg, err := FilePermissionConfig{}.NewByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}.NewByComponents(entryType, "+ "permissionStr). entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) return } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) return } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_NewByComponents_02(t *testing.T) { entryType, err := OsFilePermissionCode(0).GetNewFromFileMode(OsFilePermCode.ModeDir()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeDir()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "drwxrwxrwx" permissionStr := "rwxrwxrwx" fPermCfg, err := FilePermissionConfig{}.NewByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}.NewByComponents(entryType, "+ "permissionStr). entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) return } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_NewByComponents_03(t *testing.T) { // ModeSetuid() os.ModeSetuid "u" setuid entryType, err := OsFilePermissionCode(0).GetNewFromFileMode(OsFilePermCode.ModeSetuid()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeSetuid()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "urw-rw-rw-" permissionStr := "rw-rw-rw-" fPermCfg, err := FilePermissionConfig{}.NewByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}.NewByComponents(entryType, "+ "permissionStr). entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) return } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_NewByComponents_04(t *testing.T) { // Bad Entry Type Code entryType := OsFilePermissionCode(999) permissionStr := "rw-rw-rw-" _, err := FilePermissionConfig{}.NewByComponents(entryType, permissionStr) if err == nil { t.Error("Expected error return from bad entry type code 999. " + "However, NO ERROR WAS RETURNED! ") } } func TestFilePermissionConfig_NewByFileMode_01(t *testing.T) { expectedFileMode := os.FileMode(0666) fPerm, err := FilePermissionConfig{}.NewByFileMode(expectedFileMode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}.NewByFileMode"+ "(os.FileMode(0666)). Error='%v' ", err.Error()) return } actualFileMode, err := fPerm.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fPerm.GetCompositePermissionMode()"+ "Error='%v' ", err.Error()) return } if expectedFileMode != actualFileMode { t.Errorf("Error: Expected actual file mode octal value = '%s' Instead, "+ "actual file mode octal value= '%s' ", strconv.FormatInt(int64(expectedFileMode), 8), strconv.FormatInt(int64(actualFileMode), 8)) } } func TestFilePermissionConfig_NewByFileMode_02(t *testing.T) { expectedFileMode := os.FileMode(020000000777) fPerm, err := FilePermissionConfig{}.NewByFileMode(expectedFileMode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}.NewByFileMode"+ "(os.FileMode(0666)). Error='%v' ", err.Error()) return } actualFileMode, err := fPerm.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fPerm.GetCompositePermissionMode()"+ "Error='%v' ", err.Error()) return } if expectedFileMode != actualFileMode { t.Errorf("Error: Expected actual file mode octal value = '%s' Instead, "+ "actual file mode octal value= '%s' ", strconv.FormatInt(int64(expectedFileMode), 8), strconv.FormatInt(int64(actualFileMode), 8)) } expectedPermissionStr := "drwxrwxrwx" actualPermStr, err := fPerm.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fPerm.GetPermissionTextCode()\n"+ "Error='%v'\n", err.Error()) return } if expectedPermissionStr != actualPermStr { t.Errorf("ERROR: Expected permission string='%v'.\n"+ "Instead, permission string='%v'\n", expectedPermissionStr, actualPermStr) } } func TestFilePermissionConfig_NewByFileMode_03(t *testing.T) { expectedFileMode := os.FileMode(9236) _, err := FilePermissionConfig{}.NewByFileMode(expectedFileMode) if err == nil { t.Error("Expected error return from FilePermissionConfig{}.NewByFileMode" + "(expectedFileMode) because of invalid FileMode. NO ERROR WAS RETURNED!!! ") } } func TestFilePermissionConfig_NewByOctalDigits_01(t *testing.T) { expectedTextCode := "-rw-rw-rw-" octalCode := 666 // int fPerm, err := FilePermissionConfig{}.NewByOctalDigits(octalCode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}."+ "NewByOctalDigits(octalCode) Error='%v'", err.Error()) return } actualTextCode, err := fPerm.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fPerm.GetPermissionTextCode(). "+ "Error='%v'", err.Error()) } if expectedTextCode != actualTextCode { t.Errorf("Error: Expected permission text code='%v'. Instead, "+ "permission text code='%v' ", expectedTextCode, actualTextCode) } } func TestFilePermissionConfig_NewByOctalDigits_02(t *testing.T) { expectedTextCode := "drw-rw-rw-" octalCode := 20000000666 //int fPerm, err := FilePermissionConfig{}.NewByOctalDigits(octalCode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}."+ "NewByOctalDigits(octalCode) Error='%v'", err.Error()) return } actualTextCode, err := fPerm.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fPerm.GetPermissionTextCode(). "+ "Error='%v'", err.Error()) } if expectedTextCode != actualTextCode { t.Errorf("Error: Expected permission text code='%v'. Instead, "+ "permission text code='%v' ", expectedTextCode, actualTextCode) } } func TestFilePermissionConfig_NewByOctalDigits_03(t *testing.T) { expectedTextCode := "--w--w--w-" octalCode := 222 // int fPerm, err := FilePermissionConfig{}.NewByOctalDigits(octalCode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}."+ "NewByOctalDigits(octalCode) Error='%v'", err.Error()) return } actualTextCode, err := fPerm.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fPerm.GetPermissionTextCode(). "+ "Error='%v'", err.Error()) } if expectedTextCode != actualTextCode { t.Errorf("Error: Expected permission text code='%v'. Instead, "+ "permission text code='%v' ", expectedTextCode, actualTextCode) } } func TestFilePermissionConfig_NewByOctalDigits_04(t *testing.T) { expectedTextCode := "-r--r--r--" octalCode := 444 // int fPerm, err := FilePermissionConfig{}.NewByOctalDigits(octalCode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}."+ "NewByOctalDigits(octalCode) Error='%v'", err.Error()) return } actualTextCode, err := fPerm.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fPerm.GetPermissionTextCode(). "+ "Error='%v'", err.Error()) } if expectedTextCode != actualTextCode { t.Errorf("Error: Expected permission text code='%v'. Instead, "+ "permission text code='%v' ", expectedTextCode, actualTextCode) } } func TestFilePermissionConfig_NewByOctalDigits_05(t *testing.T) { expectedTextCode := "dr--r--r--" octalCode := 20000000444 // int fPerm, err := FilePermissionConfig{}.NewByOctalDigits(octalCode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}."+ "NewByOctalDigits(octalCode) Error='%v'", err.Error()) return } actualTextCode, err := fPerm.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fPerm.GetPermissionTextCode(). "+ "Error='%v'", err.Error()) } if expectedTextCode != actualTextCode { t.Errorf("Error: Expected permission text code='%v'. Instead, "+ "permission text code='%v' ", expectedTextCode, actualTextCode) } } func TestFilePermissionConfig_NewByOctalDigits_06(t *testing.T) { expectedTextCode := "d-w--w--w-" octalCode := 20000000222 // int fPerm, err := FilePermissionConfig{}.NewByOctalDigits(octalCode) if err != nil { t.Errorf("Error returned by FilePermissionConfig{}."+ "NewByOctalDigits(octalCode) Error='%v'", err.Error()) return } actualTextCode, err := fPerm.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fPerm.GetPermissionTextCode(). "+ "Error='%v'", err.Error()) } if expectedTextCode != actualTextCode { t.Errorf("Error: Expected permission text code='%v'. Instead, "+ "permission text code='%v' ", expectedTextCode, actualTextCode) } } func TestFilePermissionConfig_NewByOctalDigits_07(t *testing.T) { octalCode := 12577 // int _, err := FilePermissionConfig{}.NewByOctalDigits(octalCode) if err == nil { t.Error("Expected an error to be returned by FilePermissionConfig{}." + "NewByOctalDigits(octalCode) because of invalid octalCode. " + "NO ERROR WAS RETURNED!") } } func TestFilePermissionConfig_NewByOctalDigits_08(t *testing.T) { octalCode := 12577 // int _, err := FilePermissionConfig{}.NewByOctalDigits(octalCode) if err == nil { t.Error("Expected an error to be returned by FilePermissionConfig{}." + "NewByOctalDigits(octalCode) because of invalid octalCode. " + "NO ERROR WAS RETURNED!") } } func TestFilePermissionConfig_SetFileModeByComponents_01(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeNone()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeNone()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "-rwxrwxrwx" permissionStr := "-rwxrwxrwx" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by fPermCfg.SetFileModeByComponents(entryType, permissionStr). "+ "entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_SetFileModeByComponents_02(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeDir()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeDir()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "drwxrwxrwx" permissionStr := "rwxrwxrwx" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by fPermCfg.SetFileModeByComponents(entryType, permissionStr). "+ "entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_SetFileModeByComponents_03(t *testing.T) { // ModeSetuid() os.ModeSetuid "u" setuid entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeSetuid()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeSetuid()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "urw-rw-rw-" permissionStr := "rw-rw-rw-" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by fPermCfg.SetFileModeByComponents(entryType, permissionStr). "+ "entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_SetFileModeByComponents_04(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeNone()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeNone()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "-rw-rw-rw-" permissionStr := "rw-rw-rw-" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by fPermCfg.SetFileModeByComponents(entryType, permissionStr). "+ "entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_SetFileModeByComponents_05(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeNone()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeNone()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "-r--r--r--" permissionStr := "r--r--r--" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by fPermCfg.SetFileModeByComponents(entryType, permissionStr). "+ "entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_SetFileModeByComponents_06(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeNone()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeNone()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "--w--w--w-" permissionStr := "-w--w--w-" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by fPermCfg.SetFileModeByComponents(entryType, permissionStr). "+ "entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_SetFileModeByComponents_07(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeDir()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeDir()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "drw-rw-rw-" permissionStr := "rw-rw-rw-" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by fPermCfg.SetFileModeByComponents(entryType, permissionStr). "+ "entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_SetFileModeByComponents_08(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeDir()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeDir()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "dr--r--r--" permissionStr := "r--r--r--" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by fPermCfg.SetFileModeByComponents(entryType, permissionStr). "+ "entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_SetFileModeByComponents_09(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeDir()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeDir()). "+ "Error='%v' ", err.Error()) } expectedPermissionTxt := "d-w--w--w-" permissionStr := "-w--w--w-" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err != nil { t.Errorf("Error returned by fPermCfg.SetFileModeByComponents(entryType, permissionStr). "+ "entrType='%s' permissionStr='%s' Error='%v' ", entryType.String(), permissionStr, err.Error()) } actualPermissionTxt, err := fPermCfg.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fpCfg.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) } if expectedPermissionTxt != actualPermissionTxt { t.Errorf("Error: Expected Permission Text Code='%v'. Instead, "+ "Actual Permission Text Code='%v' ", expectedPermissionTxt, actualPermissionTxt) } } func TestFilePermissionConfig_SetFileModeByComponents_10(t *testing.T) { // Bad Entry Type Code entryType := OsFilePermissionCode(999) permissionStr := "rw-rw-rw-" fPermCfg := FilePermissionConfig{} err := fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err == nil { t.Error("Expected error return from bad entry type code 999. " + "However, NO ERROR WAS RETURNED! ") } } func TestFilePermissionConfig_SetFileModeByComponents_11(t *testing.T) { entryType := OsFilePermissionCode(OsFilePermCode.ModeNone()) // Bad Permission String permissionStr := "rZ-rz-rz-" fPermCfg := FilePermissionConfig{} err := fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err == nil { t.Error("Expected error return from bad permission string. " + "However, NO ERROR WAS RETURNED! ") } } func TestFilePermissionConfig_SetFileModeByComponents_12(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeDir()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeDir()). "+ "Error='%v' ", err.Error()) } permissionStr := "-w--w--w--w--w--w--w--w--" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err == nil { t.Error("Expected an error from fPermCfg.SetFileModeByComponents(entryType, " + "permissionStr). because permission string was longer than 10-characters. " + "However, NO ERROR WAS RETURNED!!") } } func TestFilePermissionConfig_SetFileModeByComponents_13(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeDir()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeDir()). "+ "Error='%v' ", err.Error()) } permissionStr := "-w-zzz-w-" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err == nil { t.Error("Expected an error from fPermCfg.SetFileModeByComponents(entryType, " + "permissionStr). because permission string invalid group characters. " + "However, NO ERROR WAS RETURNED!!") } } func TestFilePermissionConfig_SetFileModeByComponents_14(t *testing.T) { entryType, err := OsFilePermCode.GetNewFromFileMode(OsFilePermCode.ModeDir()) if err != nil { t.Errorf("Error returned by OsFilePermCode.GetNewFromFileMode("+ "OsFilePermCode.ModeDir()). "+ "Error='%v' ", err.Error()) } permissionStr := "-w--w-ZZZ" fPermCfg := FilePermissionConfig{} err = fPermCfg.SetFileModeByComponents(entryType, permissionStr) if err == nil { t.Error("Expected an error from fPermCfg.SetFileModeByComponents(entryType, " + "permissionStr). because permission string invalid 'other' group characters. " + "However, NO ERROR WAS RETURNED!!") } } func TestFilePermissionConfig_SetFileModeByOctalDigits_01(t *testing.T) { expectedTextCode := "-rw-rw-rw-" octalCode := 666 // int fPerm := FilePermissionConfig{} err := fPerm.SetFileModeByOctalDigits(octalCode) if err != nil { t.Errorf("Error returned by fPerm.SetFileModeByOctalDigits(octalCode). "+ "Error='%v'", err.Error()) } actualTextCode, err := fPerm.GetPermissionTextCode() if err != nil { t.Errorf("Error returned by fPerm.GetPermissionTextCode(). "+ "Error='%v'", err.Error()) } if expectedTextCode != actualTextCode { t.Errorf("Error: Expected permission text code='%v'. Instead, "+ "permission text code='%v' ", expectedTextCode, actualTextCode) } } func TestFilePermissionConfig_SetFileModeByOctalDigits_02(t *testing.T) { octalCode := 12666 // int fPerm := FilePermissionConfig{} err := fPerm.SetFileModeByOctalDigits(octalCode) if err == nil { t.Error("Expected error to be returned by fPerm.SetFileMode" + "ByOctalDigits(octalCode) because octal code was invalid! " + "However, NO ERROR WAS RETURNED!") } } func TestFilePermissionConfig_SetFileModeByTextCode_01(t *testing.T) { textCode := "-rwxrwxrwx" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_02(t *testing.T) { textCode := "drwxrwxrwx" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) return } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_03(t *testing.T) { textCode := "-rw-rw-rw-" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_04(t *testing.T) { textCode := "drw-rw-rw-" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_05(t *testing.T) { textCode := "-rwx------" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_06(t *testing.T) { textCode := "-rwxrwx---" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_07(t *testing.T) { textCode := "---x--x--x" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_08(t *testing.T) { textCode := "--w--w--w-" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_09(t *testing.T) { textCode := "--wx-wx-wx" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_10(t *testing.T) { textCode := "-r--r--r--" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_11(t *testing.T) { textCode := "-r-xr-xr-x" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_12(t *testing.T) { textCode := "-rw-rw-rw-" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_13(t *testing.T) { textCode := "-rwxr-----" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_14(t *testing.T) { textCode := "drw-rw-rw-" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_15(t *testing.T) { textCode := "----------" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_16(t *testing.T) { textCode := "d---------" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err != nil { t.Errorf("Error returned by fpCfg.SetFileModeByTextCode(textCode). "+ "Error='%v' ", err.Error()) } fileMode, err := fpCfg.GetCompositePermissionMode() if err != nil { t.Errorf("Error returned by fpCfg.GetCompositePermissionMode()\n"+ "Error='%v'\n", err.Error()) return } if textCode != fileMode.String() { t.Errorf("Error: Expected File Mode text = '%v'. Instead, text = '%v' .", textCode, fileMode.String()) } } func TestFilePermissionConfig_SetFileModeByTextCode_17(t *testing.T) { textCode := "-rwxrwxrwxrwxrwx" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err == nil { t.Error("Expected error to be returned by fpCfg.SetFileModeBy" + "TextCode(textCode) because input text was longer than 10-characters. " + "However, NO ERROR WAS RETURNED!") } } func TestFilePermissionConfig_SetFileModeByTextCode_18(t *testing.T) { textCode := "-rwx" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err == nil { t.Error("Expected error to be returned by fpCfg.SetFileModeBy" + "TextCode(textCode) because input text was less than 10-characters. " + "However, NO ERROR WAS RETURNED!") } } func TestFilePermissionConfig_SetFileModeByTextCode_19(t *testing.T) { textCode := "-ZZZrw-rw-" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err == nil { t.Error("Expected error to be returned by " + "fpCfg.SetFileModeByTextCode(textCode). because owner characters are invalid. " + "However, NO ERROR WAS RETURNED!!") } } func TestFilePermissionConfig_SetFileModeByTextCode_20(t *testing.T) { textCode := "-rw-ZZZrw-" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err == nil { t.Error("Expected error to be returned by " + "fpCfg.SetFileModeByTextCode(textCode). because group characters are invalid. " + "However, NO ERROR WAS RETURNED!!") } } func TestFilePermissionConfig_SetFileModeByTextCode_21(t *testing.T) { textCode := "-rw-rw-zZZ" fpCfg := FilePermissionConfig{} err := fpCfg.SetFileModeByTextCode(textCode) if err == nil { t.Error("Expected error to be returned by " + "fpCfg.SetFileModeByTextCode(textCode). because 'other' characters are invalid. " + "However, NO ERROR WAS RETURNED!!") } }
package nut import "github.com/gin-gonic/gin" func (p *AdminPlugin) indexLeaveWords(l string, c *gin.Context) (interface{}, error) { var items []LeaveWord err := p.DB.Model(&items). Order("created_at DESC").Select() return items, err } func (p *AdminPlugin) destroyLeaveWord(l string, c *gin.Context) (interface{}, error) { _, err := p.DB.Model(&LeaveWord{}).Where("id = ?", c.Param("id")).Delete() return gin.H{}, err }
// cat brightness. // +build linux package main import ( "fmt" "io" "io/ioutil" "os" "path/filepath" ) func main() { const ( dir = "/sys/class/backlight" current = "brightness" max = "max_brightness" ) fis, err := ioutil.ReadDir(dir) if err != nil { panic(err) } var rootes []string for _, fi := range fis { if fi.Mode()&os.ModeSymlink != 0 { fi, err = os.Stat(filepath.Join(dir, fi.Name())) if err != nil { panic(err) } } if fi.IsDir() { rootes = append(rootes, filepath.Join(dir, fi.Name())) } } cat := func(root, file string) { f, err := os.Open(filepath.Join(root, current)) if err != nil { panic(err) } defer f.Close() if _, err := io.Copy(os.Stdout, f); err != nil { panic(err) } } for _, root := range rootes { fmt.Printf("root:%#v\n", root) fmt.Println("current brightness") cat(root, current) fmt.Println("max brightness") cat(root, max) fmt.Println() } }
package main import ( "fmt" "strings" "strconv" "reflect" ) func line() { fmt.Println(strings.Repeat("-", 30)) } func main() { i, _ := strconv.ParseInt("123", 0, 64) fmt.Println(reflect.TypeOf(i), i) // bitsize useless i, _ = strconv.ParseInt("567",0, 32) fmt.Println(reflect.TypeOf(i), i) line() // base is 8, 10, 16, 0 is auto detect i, _ = strconv.ParseInt("0xFF",0, 64) fmt.Println(reflect.TypeOf(i), i) i, _ = strconv.ParseInt("123", 16, 64) fmt.Println(reflect.TypeOf(i), i) i, _ = strconv.ParseInt("0xFF", 10, 64) fmt.Println(reflect.TypeOf(i), i) line() // bitsize is 32, 64 f, _ := strconv.ParseFloat("3.14", 64) fmt.Println(reflect.TypeOf(f), f) b, _ := strconv.ParseBool("false") fmt.Println(reflect.TypeOf(b), b) line() // shortcut v, e := strconv.Atoi("999") fmt.Println(v, e) v, e = strconv.Atoi("diango") fmt.Println(v, e) line() }
package main import ( "io/ioutil" "os" "path/filepath" "strings" "github.com/therecipe/qt/core" "github.com/therecipe/qt/quick" "github.com/therecipe/qt/widgets" ) var ( centralLayout *widgets.QGridLayout centralLayoutRow int centralLayoutColumn int ) func main() { widgets.NewQApplication(len(os.Args), os.Args) mainWindow := widgets.NewQMainWindow(nil, 0) mainWindow.SetWindowTitle("Common QML (+ Quick Controls 1)") scrollWidget := widgets.NewQScrollArea(nil) centralWidget := widgets.NewQWidget(nil, 0) centralLayout = widgets.NewQGridLayout(centralWidget) layouts() buttons() itemViews() containers() inputWidgets() displayWidgets() scrollWidget.SetWidget(centralWidget) mainWindow.SetCentralWidget(scrollWidget) mainWindow.ShowMaximized() widgets.QApplication_Exec() } func addWidget(widget widgets.QWidget_ITF) { if centralLayoutColumn > 6 { centralLayoutColumn = 0 centralLayoutRow++ } wrappedWidget := widgets.NewQGroupBox2(widget.QWidget_PTR().WindowTitle(), nil) wrappedWidgetLayout := widgets.NewQVBoxLayout2(wrappedWidget) wrappedWidgetLayout.AddWidget(widget, 0, core.Qt__AlignCenter) widget.QWidget_PTR().SetFixedSize2(200, 200) wrappedWidget.SetFixedSize2(250, 250) centralLayout.AddWidget2(wrappedWidget, centralLayoutRow, centralLayoutColumn, core.Qt__AlignCenter) centralLayoutColumn++ } func createWidgetWithContext(name, code, ctxName string, ctx core.QObject_ITF) { quickWidget := quick.NewQQuickWidget(nil) quickWidget.SetWindowTitle(name) quickWidget.RootContext().SetContextProperty(ctxName, ctx) quickWidget.SetResizeMode(quick.QQuickWidget__SizeRootObjectToView) path := filepath.Join(os.TempDir(), "tmp"+strings.Replace(name, " ", "", -1)+".qml") ioutil.WriteFile(path, []byte("import QtQuick 2.0\nimport QtQuick.Layouts 1.3\nimport QtQuick.Controls 1.4\n"+code), 0644) quickWidget.SetSource(core.QUrl_FromLocalFile(path)) addWidget(quickWidget) } func createWidget(name, code string) { createWidgetWithContext(name, code, "", nil) }
package types import ( "time" ) // COVID19VaccinationStatistics holds the data for COVID-19 vaccination statistics. type COVID19VaccinationStatistics struct { Area string `json:"area" fake:"{randomstring:[ΑΡΓΟΛΙΔΑΣ,ΜΥΚΟΝΟΥ,ΚΟΡΙΝΘΙΑΣ]}"` AreaID int `json:"areaid" fake:"{number:0,1000}"` DailyDose1 int `json:"dailydose1" fake:"{number:0,1000}"` DailyDose2 int `json:"dailydose2" fake:"{number:0,1000}"` DayDiff int `json:"daydiff" fake:"{number:0,100}"` DayTotal int `json:"daytotal" fake:"{number:0,2000}"` ReferenceDate time.Time `json:"referencedate" fake:"{date}"` TotalDistinctPersons int `json:"totaldistinctpersons" fake:"{number:0,100000}"` TotalDose1 int `json:"totaldose1" fake:"{number:0,50000}"` TotalDose2 int `json:"totaldose2" fake:"{number:0,50000}"` TotalVaccinations int `json:"totalvaccinations" fake:"{number:0,100000}"` } // InspectionsAndViolations holds the data for inspections and violations. type InspectionsAndViolations struct { Year int `json:"year" fake:"{year}"` Inspections int `json:"inspections" fake:"{number:0,10000}"` Violations int `json:"violations" fake:"{number:0,10000}"` ViolatingOrganizations float64 `json:"violating_organizations" fake:"{number:0,1000}"` Penalties float64 `json:"penalties" fake:"{float64}"` } // NumberOfPharmacists holds the data for the number of pharmacists. type NumberOfPharmacists struct { Year int `json:"year" fake:"{year}"` Quarter string `json:"quarter" fake:"{randomstring:[Q1,Q2,Q3,Q4]}"` Active int `json:"active" fake:"{number:0,10000}"` Entrants int `json:"entrants" fake:"{number:0,100}"` Exits int `json:"exits" fake:"{number:0,100}"` } // NumberOfPharmacies holds the data for the number of pharmacies. type NumberOfPharmacies struct { Year int `json:"year" fake:"{year}"` Quarter string `json:"quarter" fake:"{randomstring:[Q1,Q2,Q3,Q4]}"` Active int `json:"active" fake:"{number:0,10000}"` Entrants int `json:"entrants" fake:"{number:0,100}"` Exits int `json:"exits" fake:"{number:0,100}"` } // NumberOfDoctors holds the data for the number of doctors. type NumberOfDoctors struct { Year int `json:"year" fake:"{year}"` Quarter string `json:"quarter" fake:"{randomstring:[Q1,Q2,Q3,Q4]}"` Active int `json:"active" fake:"{number:0,10000}"` Entrants int `json:"entrants" fake:"{number:0,100}"` Exits int `json:"exits" fake:"{number:0,100}"` } // NumberOfDentists holds the data for the number of dentists. type NumberOfDentists struct { Year int `json:"year" fake:"{year}"` Quarter string `json:"quarter" fake:"{randomstring:[Q1,Q2,Q3,Q4]}"` Active int `json:"active" fake:"{number:0,10000}"` Entrants int `json:"entrants" fake:"{number:0,100}"` Exits int `json:"exits" fake:"{number:0,100}"` }
package lnroll import ( "errors" "fmt" "time" "github.com/apg/ln" ) type Client interface { Critical(err error, extras map[string]string) (uuid string, e error) Error(err error, extras map[string]string) (uuid string, e error) } // New returns a new FilterFunc which reports errors to Rollbar. func New(client Client) ln.FilterFunc { return ln.FilterFunc(func(e ln.Event) bool { if e.Pri < ln.PriError { return true } // find the "err", or "error", and use that to report from F. var err error extras := make(map[string]string) for k, v := range e.Data { if err == nil && (k == "err" || k == "error") { if e, ok := v.(error); !ok { err = errors.New(toString(v)) } else { err = e } } else { extras[k] = toString(v) } } switch e.Pri { case ln.PriError: uid, err := client.Error(err, extras) if err != nil { // These can't be Error or lnroll will recursively handle ln.Info(ln.F{"err": err, "uuid": uid, "priority": e.Pri.String(), "action": "rollbar-report"}) } case ln.PriCritical, ln.PriAlert, ln.PriEmergency: uid, err := client.Critical(err, extras) if err != nil { // These can't be Error or lnroll will recursively handle ln.Info(ln.F{"err": err, "uuid": uid, "priority": e.Pri.String(), "action": "rollbar-report"}) } } return true }) } func toString(v interface{}) string { switch t := v.(type) { case time.Time: return t.Format(time.RFC3339) default: if s, ok := v.(fmt.Stringer); ok { return s.String() } return fmt.Sprintf("%+v", v) } }
package loadflags import ( "path/filepath" ) func LoadForCli(progName string) error { return loadForCli(progName) } func LoadForDaemon(progName string) error { return loadFlags(filepath.Join("/etc", progName)) }
package NoQ_RoomQ import ( "errors" "fmt" "log" "net/http" "net/url" "regexp" "time" "github.com/google/uuid" NoQ_RoomQ_Exception "github.com/redso/noq-roomq-go-sdk/Exception" NoQ_RoomQ_Utils "github.com/redso/noq-roomq-go-sdk/Utils" ) type roomQ struct { clientID string jwtSecret string ticketIssuer string debug bool tokenName string token string statusEndpoint string } func RoomQ(clientID string, jwtSecret string, ticketIssuer string, statusEndpoint string, httpReq *http.Request, debug bool) roomQ { rQ := roomQ{ clientID: clientID, jwtSecret: jwtSecret, ticketIssuer: ticketIssuer, debug: debug, statusEndpoint: statusEndpoint, tokenName: fmt.Sprintf("be_roomq_t_%s", clientID), } rQ.token = rQ.getToken(httpReq) return rQ } func (rQ roomQ) getToken(httpReq *http.Request) string { if token := httpReq.URL.Query().Get("noq_t"); len(token) > 0 { return token } if token, err := httpReq.Cookie(rQ.tokenName); err == nil { return token.Value } return "" } func (rQ *roomQ) Validate(httpReq *http.Request, httpRes http.ResponseWriter, returnURL, sessionID string) validationResult { token := rQ.token currentURL := "" if httpReq.TLS != nil { currentURL = fmt.Sprintf("https://%s%s", httpReq.Host, httpReq.URL.RequestURI()) } else { currentURL = fmt.Sprintf("http://%s%s", httpReq.Host, httpReq.URL.RequestURI()) } needGenerateJWT := false needRedirect := false if len(token) < 1 { needGenerateJWT = true needRedirect = true rQ.debugPrint("no jwt") } else { rQ.debugPrint("current jwt " + token) if data, ok := NoQ_RoomQ_Utils.JwtDecode(token, rQ.jwtSecret); ok { if len(sessionID) > 0 && data.Get("session_id").String() != sessionID { needGenerateJWT = true needRedirect = true rQ.debugPrint("session id not match") } else if data.HasKey("deadline") && data.Get("deadline").Int() < time.Now().UTC().UnixMilli()/1000 { needRedirect = true rQ.debugPrint("deadline exceed") } else if data.Get("type").String() == "queue" { needRedirect = true rQ.debugPrint("in queue") } else if data.Get("type").String() == "self-sign" { needRedirect = true rQ.debugPrint("self sign token") } } else { needGenerateJWT = true needRedirect = true rQ.debugPrint("Failed to decode jwt") rQ.debugPrint("invalid secret") } } if needGenerateJWT { token = rQ.generateJWT(sessionID) rQ.debugPrint("generating new jwt token") rQ.token = token } http.SetCookie(httpRes, &http.Cookie{ Name: rQ.tokenName, Value: rQ.token, Expires: time.Now().Add(time.Second * (12 * 60 * 60)), Path: "/", Domain: "", HttpOnly: false, }) if needRedirect { if len(returnURL) > 0 { return rQ.redirectToTicketIssuer(token, returnURL) } else { return rQ.redirectToTicketIssuer(token, currentURL) } } else { return rQ.enter(currentURL) } } func (rQ *roomQ) Extend(httpRes http.ResponseWriter, duration int) error { if backend, err := rQ.getBackend(); err == nil { httpClient := NoQ_RoomQ_Utils.HttpClient(fmt.Sprintf("https://%s", backend)) response := httpClient.Post(fmt.Sprintf("/queue/%s", rQ.clientID), map[string]interface{}{ "action": "beep", "client_id": rQ.clientID, "id": rQ.token, "extend_serving_duration": duration * 60, }) rQ.debugPrint(response.StatusCode) if response.StatusCode == http.StatusUnauthorized { rQ.debugPrint(&NoQ_RoomQ_Exception.InvalidApiKeyException{}) return &NoQ_RoomQ_Exception.InvalidApiKeyException{} } else if response.StatusCode == http.StatusNotFound { rQ.debugPrint(&NoQ_RoomQ_Exception.NotServingException{}) return &NoQ_RoomQ_Exception.NotServingException{} } else { token := response.Get("id").String() rQ.token = token http.SetCookie(httpRes, &http.Cookie{ Name: rQ.tokenName, Value: rQ.token, Expires: time.Now().Add(time.Second * (12 * 60 * 60)), Path: "/", HttpOnly: false, }) return nil } } else { rQ.debugPrint(err) return err.(error) } } func (rQ *roomQ) GetServing() (int64, error) { if backend, err := rQ.getBackend(); err == nil { httpClient := NoQ_RoomQ_Utils.HttpClient(fmt.Sprintf("https://%s", backend)) response := httpClient.Get(fmt.Sprintf("/rooms/%s/servings/%s", rQ.clientID, rQ.token)) rQ.debugPrint(response.Raw) if response.StatusCode == http.StatusUnauthorized { rQ.debugPrint(&NoQ_RoomQ_Exception.InvalidApiKeyException{}) return 0, &NoQ_RoomQ_Exception.InvalidApiKeyException{} } else if response.StatusCode == http.StatusNotFound { rQ.debugPrint(&NoQ_RoomQ_Exception.NotServingException{}) return 0, &NoQ_RoomQ_Exception.NotServingException{} } else { return int64(response.Get("deadline").Float()) / 1000, nil } } else { rQ.debugPrint(err) return 0, err.(error) } } func (rQ *roomQ) DeleteServing(httpRes http.ResponseWriter) error { if backend, err := rQ.getBackend(); err == nil { httpClient := NoQ_RoomQ_Utils.HttpClient(fmt.Sprintf("https://%s/queue", backend)) response := httpClient.Post(fmt.Sprintf("/%s", rQ.clientID), map[string]interface{}{ "action": "delete_serving", "client_id": rQ.clientID, "id": rQ.token, }) rQ.debugPrint(response.Raw) if response.StatusCode == http.StatusUnauthorized { rQ.debugPrint(&NoQ_RoomQ_Exception.InvalidApiKeyException{}) return &NoQ_RoomQ_Exception.InvalidApiKeyException{} } else if response.StatusCode == http.StatusNotFound { rQ.debugPrint(&NoQ_RoomQ_Exception.NotServingException{}) return &NoQ_RoomQ_Exception.NotServingException{} } else { if payload, ok := NoQ_RoomQ_Utils.JwtDecode(rQ.token, rQ.jwtSecret); ok { token := rQ.generateJWT(payload.Get("session_id").String()) rQ.token = token http.SetCookie(httpRes, &http.Cookie{ Name: rQ.tokenName, Value: rQ.token, Expires: time.Now().Add(time.Second * (12 * 60 * 60)), Path: "/", HttpOnly: false, }) return nil } else { return errors.New("failed to decode jwt") } } } else { rQ.debugPrint(err) return err.(error) } } func (rQ roomQ) enter(currentURL string) validationResult { urlWithoutToken := removeNoQToken(currentURL) // redirect if url contain token if urlWithoutToken != currentURL { return ValidationResult(urlWithoutToken) } return ValidationResult("") } func (rQ roomQ) redirectToTicketIssuer(token, currentURL string) validationResult { urlWithoutToken := removeNoQToken(currentURL) // Force noq_t in before noq_c & noq_r noq_t := url.Values{} noq_t.Add("noq_t", token) params := url.Values{} params.Add("noq_c", rQ.clientID) params.Add("noq_r", urlWithoutToken) if base, err := url.Parse(rQ.ticketIssuer); err == nil { base.RawQuery = noq_t.Encode() + "&" + params.Encode() return ValidationResult(base.String()) } else { rQ.debugPrint("Failed to redirect to ticket issuer") panic("failed to redirect to ticket issuer") } } func (rQ roomQ) generateJWT(sessionID string) string { _sessionID := "" if len(sessionID) > 0 { _sessionID = sessionID } else if _uuid, err := uuid.NewRandom(); err == nil { _sessionID = _uuid.String() } claims := NoQ_RoomQ_Utils.JwtClaims{ RoomID: rQ.clientID, SessionID: _sessionID, Type: "self-sign", } return NoQ_RoomQ_Utils.JwtEncode(claims, rQ.jwtSecret) } func (rQ roomQ) debugPrint(message interface{}) { if rQ.debug { log.Println(fmt.Sprintf("[RoomQ] %s", message)) } } func removeNoQToken(currentURL string) string { url := regexp.MustCompile(`(?i)([&]*)(noq_t=[^&]*)`).ReplaceAllString(currentURL, "") url = regexp.MustCompile(`(?i)(\?&)`).ReplaceAllString(url, "?") url = regexp.MustCompile(`(?i)(\?$)`).ReplaceAllString(url, "") return url } func (rQ roomQ) getBackend() (string, interface{}) { client := NoQ_RoomQ_Utils.HttpClient(rQ.statusEndpoint) resp := client.Get(fmt.Sprintf("/%s", rQ.clientID)) if resp.Get("state").String() == "stopped" { return "", &NoQ_RoomQ_Exception.QueueStoppedException{} } return resp.Get("backend").String(), nil }
package main import ( "reflect" "fmt" "io" "os" ) func main() { t := reflect.TypeOf(3) fmt.Println(t.String()) fmt.Println(t) var w io.Writer = os.Stdout fmt.Println(reflect.TypeOf(w)) fmt.Printf("%T\n", 3) v := reflect.ValueOf(3) fmt.Println(v) fmt.Printf("%v \n", v) fmt.Println(v.String()) }
package twosum var testCases = []struct { nums []int target int result []int }{ { []int{2, 7, 11, 15}, 9, []int{0, 1}, }, { []int{3, 4, 5, 2, 7}, 6, []int{1, 3}, }, { []int{1, 2, 0, 8, 11, 3}, 18, nil, }, { []int{}, 1, nil, }, }
package redfish import ( "context" "net/url" redfishApi "github.com/Nordix/go-redfish/api" redfishClient "github.com/Nordix/go-redfish/client" alog "opendev.org/airship/airshipctl/pkg/log" ) type RedfishRemoteDirect struct { // Context Context context.Context // remote URL RemoteURL url.URL // ephemeral Host ID EphemeralNodeId string // ISO URL IsoPath string // Redfish Client implementation Api redfishApi.RedfishAPI } // Top level function to handle Redfish remote direct func (cfg RedfishRemoteDirect) DoRemoteDirect() error { alog.Debugf("Using Redfish Endpoint: '%s'", cfg.RemoteURL.String()) /* TODO: Add Authentication when redfish library supports it. */ /* Get system details */ systemId := cfg.EphemeralNodeId system, _, err := cfg.Api.GetSystem(cfg.Context, systemId) if err != nil { return NewRedfishClientErrorf("Get System[%s] failed with err: %s", systemId, err.Error()) } alog.Debugf("Ephemeral Node System ID: '%s'", systemId) /* get manager for system */ managerId, err := GetResourceIDFromURL(system.Links.ManagedBy[0].OdataId) if err != nil { return err } alog.Debugf("Ephemeral node managerId: '%s'", managerId) /* Get manager's Cd or DVD virtual media ID */ vMediaId, vMediaType, err := GetVirtualMediaId(cfg.Context, cfg.Api, managerId) if err != nil { return err } alog.Debugf("Ephemeral Node Virtual Media Id: '%s'", vMediaId) /* Load ISO in manager's virtual media */ err = SetVirtualMedia(cfg.Context, cfg.Api, managerId, vMediaId, cfg.IsoPath) if err != nil { return err } alog.Debugf("Successfully loaded virtual media: '%s'", cfg.IsoPath) /* Set system's bootsource to selected media */ err = SetSystemBootSourceForMediaType(cfg.Context, cfg.Api, systemId, vMediaType) if err != nil { return err } /* Reboot system */ err = RebootSystem(cfg.Context, cfg.Api, systemId) if err != nil { return err } alog.Debug("Restarted ephemeral host") return nil } // Creates a new Redfish remote direct client. func NewRedfishRemoteDirectClient(ctx context.Context, remoteURL string, ephNodeID string, isoPath string, ) (RedfishRemoteDirect, error) { if remoteURL == "" { return RedfishRemoteDirect{}, NewRedfishConfigErrorf("redfish remote url empty") } if ephNodeID == "" { return RedfishRemoteDirect{}, NewRedfishConfigErrorf("redfish ephemeral node id empty") } if isoPath == "" { return RedfishRemoteDirect{}, NewRedfishConfigErrorf("redfish ephemeral node iso Path empty") } cfg := &redfishClient.Configuration{ BasePath: remoteURL, DefaultHeader: make(map[string]string), UserAgent: "airshipctl/client", } var api redfishApi.RedfishAPI = redfishClient.NewAPIClient(cfg).DefaultApi url, err := url.Parse(remoteURL) if err != nil { return RedfishRemoteDirect{}, NewRedfishConfigErrorf("Invalid URL format: %v", err) } client := RedfishRemoteDirect{ Context: ctx, RemoteURL: *url, EphemeralNodeId: ephNodeID, IsoPath: isoPath, Api: api, } return client, nil }
package main import ( "encoding/json" "io/ioutil" "os" "github.com/RHsyseng/console-cr-form/pkg/web" "github.com/go-openapi/spec" "github.com/sirupsen/logrus" ) const defaultJSONForm = "test/examples/full-form.json" const defaultJSONSchema = "test/examples/full-schema.json" const envJSONForm = "JSON_FORM" const envJSONSchema = "JSON_SCHEMA" func main() { logrus.SetLevel(logrus.DebugLevel) logrus.Info("Starting test server. Using default JSON Form and Schema.") logrus.Info("Provide a different Form and Schema using JSON_FORM and JSON_SCHEMA env vars") config, err := web.NewConfiguration("", 8080, getSchema(), "app.kiegroup.org/v1", "KieApp", getForm(), callback) if err != nil { logrus.Fatalf("Failed to configure web server: %v", err) } if err := web.RunWebServer(config); err != nil { logrus.Fatalf("Failed to run web server: %v", err) } } func callback(yamlString string) { logrus.Infof("Mock deploy yaml:\n%s", yamlString) } func readJSONFile(envPath, defaultPath string) ([]byte, error) { filePath := getFilePath(envPath, defaultPath) jsonFile, err := os.Open(filePath) if err != nil { logrus.Error("Unable to open file: ", err) } defer jsonFile.Close() return ioutil.ReadAll(jsonFile) } func getFilePath(env, defaultPath string) string { path := os.Getenv(env) if len(path) == 0 { return defaultPath } return path } func getForm() web.Form { byteValue, err := readJSONFile(envJSONForm, defaultJSONForm) if err != nil { logrus.Error("Unable to read file as byte array: ", err) } var form web.Form if err = json.Unmarshal(byteValue, &form); err != nil { logrus.Error("Error unmarshalling jsonForm: ", err) } return form } func getSchema() spec.Schema { byteValue, err := readJSONFile(envJSONSchema, defaultJSONSchema) if err != nil { logrus.Error("Unable to read file as byte array: ", err) } var schema spec.Schema if err = json.Unmarshal(byteValue, &schema); err != nil { logrus.Error("Error unmarshalling jsonSchema: ", err) } return schema }
package response type Type string const ( TypeRoomInfo Type = "RoomInfo" TypeGameStart Type = "GameStart" TypeGameEvent Type = "GameEvent" ) type Response struct { Type Type Body interface{} } type Responses []*Response func (r *Responses) Add(t Type, body interface{}) { *r = append(*r, &Response{ Type: t, Body: body, }) }
package cache import ( "testing" "time" ) func TestMemCache(t *testing.T) { cache, err := New("memory?gcInterval=3s") if err != nil { t.Error(err) return } mc, ok := cache.(*mCache) if !ok { t.Fatal("not a memory cache") } if mc.gcInterval != 3*time.Second { t.Fatalf("invalid gc interval %v, should be %v", mc.gcInterval, 3*time.Second) } cache.Set("key", []byte("hello world")) value, err := cache.Get("key") if err != nil { t.Fatal(err) } if string(value) != "hello world" { t.Fatalf("invalid value(%v), shoud be 'hello world'", value) } cache.SetTTL("key2", []byte("hello world"), 3*time.Second) _, err = cache.Get("key2") if err != nil { t.Fatal(err) } time.Sleep(3 * time.Second) _, err = cache.Get("key2") if err != ErrExpired { t.Fatal("should be expired error, but", err) } }
package main import ( "net/http" "time" "ms/sun/servises/file_service" ) func main() { file_service.Run() defer file_service.DeferCleanUp() http.HandleFunc("/hi", func(writer http.ResponseWriter, r *http.Request) { writer.Write([]byte("hi =========")) }) go func() { time.Sleep(time.Second) http.Get("http://localhost:1100/post_file/1518506476136010007_180.jpg") }() http.ListenAndServe(":1100", nil) time.Sleep(time.Hour) }
package restclient import ( "bytes" "net/http" "net/http/httptest" "testing" ) func TestDebugTransport(t *testing.T) { resp := "{\"message\": \"response\"}" s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte(resp)) })) defer s.Close() c := New("foo", "bar", s.URL) b := new(bytes.Buffer) c.Client.Transport = &Transport{ Debug: true, Output: b, } req, err := c.NewRequest("POST", "/request", nil) assertNotError(t, err, "") err = c.Do(req, nil) assertNotError(t, err, "") str := b.String() assertContains(t, str, "POST /request") assertContains(t, str, "200 OK") assertContains(t, str, resp) } func TestNoDebugTransport(t *testing.T) { resp := "{\"message\": \"response\"}" s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte(resp)) })) defer s.Close() c := New("foo", "bar", s.URL) b := new(bytes.Buffer) c.Client.Transport = &Transport{ Debug: false, Output: b, } req, err := c.NewRequest("POST", "/request", nil) assertNotError(t, err, "") err = c.Do(req, nil) assertNotError(t, err, "") assertEquals(t, b.Len(), 0) }
// Copyright (C) 2020 Cisco Systems Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. // See the License for the specific language governing permissions and // limitations under the License. package vpplink import ( "fmt" "github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/vmxnet3" "github.com/projectcalico/vpp-dataplane/v3/vpplink/types" ) func (v *VppLink) CreateVmxnet3(intf *types.Vmxnet3Interface) (uint32, error) { pci, err := types.GetPciIdInt(intf.PciId) if err != nil { return 0, fmt.Errorf("error parsing PCI id: %w", err) } client := vmxnet3.NewServiceClient(v.GetConnection()) request := &vmxnet3.Vmxnet3Create{ PciAddr: pci, RxqNum: uint16(intf.NumRxQueues), RxqSize: uint16(intf.RxQueueSize), TxqSize: uint16(intf.TxQueueSize), TxqNum: uint16(intf.NumTxQueues), EnableGso: intf.EnableGso, } response, err := client.Vmxnet3Create(v.GetContext(), request) if err != nil { return ^uint32(0), fmt.Errorf("failed to create Vmxnet3 interface: %w", err) } intf.SwIfIndex = uint32(response.SwIfIndex) return uint32(response.SwIfIndex), nil }
package main import ( "fmt" "github.com/jackytck/projecteuler/tools" ) func definitive() int { var minDiff int var found bool var definite bool j := 1 for !definite { j++ jn := tools.PentagonNumber(j) for k := j - 1; k > 0; k-- { kn := tools.PentagonNumber(k) if found && k == j-1 && jn-kn > minDiff { definite = true break } sum := jn + kn diff := jn - kn if found && diff > minDiff { continue } if tools.IsPentagonNumber(sum) && tools.IsPentagonNumber(diff) { if !found || diff < minDiff { minDiff = diff } found = true break } } } return minDiff } // Assume the first possible answer is the best func fast() int { var minDiff int for j := 1; true; j++ { jn := tools.PentagonNumber(j) for k := j - 1; k > 0; k-- { kn := tools.PentagonNumber(k) sum := jn + kn diff := jn - kn if tools.IsPentagonNumber(sum) && tools.IsPentagonNumber(diff) { minDiff = diff return minDiff } } } return 0 } func main() { fmt.Println(fast()) } // Find the pair of pentagonal numbers, Pj and Pk, for which their sum and // difference are pentagonal and D = |Pk − Pj| is minimised. // Note: // After the best possible answer is found, it is compared with P(j)-P(j-1) for // ever increasing j, until a bigger difference is found. At this point, // the definitive answer is found.
package linkedlist // 双向链表节点 type linkedListNode struct { Val interface{} prev *linkedListNode next *linkedListNode }
package exer8 // TODO: your Hailstone, HailstoneSequenceAppend, HailstoneSequenceAllocate functions // ============== RESULTS ============== // goos: linux // goarch: amd64 // pkg: exer8 // BenchmarkHailSeqAppend-12 500000 2039 ns/op // BenchmarkHailSeqAllocate-12 1000000 1189 ns/op // PASS // ok exer8 2.258s func Hailstone(n uint) uint { if n % 2 == 0 { return n / 2 } else { return 3 * n + 1 } } func HailstoneSequenceAppend(n uint) []uint { var intSlice []uint; for n != 1 { intSlice = append(intSlice,n) n = Hailstone(n) } intSlice = append(intSlice,1) return intSlice } func HailstoneSequenceAllocate(n uint) []uint { length := HailstoneLength(n); intSlice := make([]uint,length); for i := uint(0); i < length; i++ { intSlice[i] = n n = Hailstone(n) } return intSlice; } func HailstoneLength(n uint) uint { var length uint = 1; for n != 1 { length++ n = Hailstone(n) } return length }
package scheme import ( "time" "github.com/yandex-cloud/ydb-go-sdk" "github.com/yandex-cloud/ydb-go-sdk/table" ) type Entry struct { ID string `json:"id"` DoctorID string `json:"doctor_id,omitempty"` SpecID string `json:"spec_id,omitempty"` PlaceID string `json:"place_id,omitempty"` At time.Time `json:"at,omitempty"` Date time.Time `json:"date,omitempty"` Patient *string `json:"patient,omitempty"` BookedTill *time.Time `json:"booked_till,omitempty"` } func (e *Entry) ToYDB() ydb.Value { var fields = []ydb.StructValueOption{ ydb.StructFieldValue("id", ydb.UTF8Value(e.ID)), ydb.StructFieldValue("doctor_id", ydb.UTF8Value(e.DoctorID)), ydb.StructFieldValue("spec_id", ydb.UTF8Value(e.SpecID)), ydb.StructFieldValue("place_id", ydb.UTF8Value(e.PlaceID)), ydb.StructFieldValue("at", ydb.DatetimeValue(ydb.Time(e.At).Datetime())), ydb.StructFieldValue("date", ydb.DatetimeValue(ydb.Time(e.Date).Date())), } if e.Patient != nil { fields = append(fields, ydb.StructFieldValue("patient", ydb.UTF8Value(*e.Patient))) } if e.BookedTill != nil { fields = append(fields, ydb.StructFieldValue("booked_till", ydb.DatetimeValue(ydb.Time(*e.BookedTill).Datetime()))) } return ydb.StructValue(fields...) } func (e *Entry) FromYDB(result *table.Result) *Entry { if result.SeekItem("id") { e.ID = utf8(result) } if result.SeekItem("doctor_id") { e.DoctorID = utf8(result) } if result.SeekItem("spec_id") { e.SpecID = utf8(result) } if result.SeekItem("place_id") { e.PlaceID = utf8(result) } t := new(ydb.Time) if result.SeekItem("at") { _ = t.FromDatetime(datetime(result)) e.At = time.Time(*t) } if result.SeekItem("date") { _ = t.FromDate(date(result)) e.Date = time.Time(*t) } if result.SeekItem("patient") { p := utf8(result) if len(p) > 0 { e.Patient = &p } } if result.SeekItem("booked_till") { d := datetime(result) if d > 0 { _ = t.FromDatetime(d) b := time.Time(*t) if !b.IsZero() { e.BookedTill = &b } } } return e }
package main import ( "errors" "github.com/PuerkitoBio/goquery" "golang.org/x/text/encoding/charmap" "io" "net/http" "net/url" "strconv" "strings" ) const sheetListUri = "exquery.html" const sheetUri = "QuerySheet" func getSheetList() ([]string, error) { doc, err := goquery.NewDocument(okusonURL + sheetListUri) if err != nil { return []string{}, err } buttons := doc.Find("input[type='submit'][name='sheet']") sheets := make([]string, buttons.Length()) buttons.Each(func(i int, btn *goquery.Selection) { sheets[i], _ = btn.Attr("value") }) return sheets, nil } type exercice struct { Number int LaTeX string Tasks []task } const ( multipleChoiceTask = "multipleChoice" choiceTask = "choice" textTask = "text" ) type task struct { Type string LaTeX string Answers []string `json:",omitempty"` CorrectAnswers string `json:",omitempty"` } type sheet struct { Exercices []exercice } type outputFunction func([]sheet, io.Writer) error func getSheet(s, username, password string) (*sheet, error) { form := url.Values{} form.Add("id", username) form.Add("passwd", password) form.Add("format", "HTML") form.Add("sheet", s) resp, err := http.Post(okusonURL+sheetUri, "application/x-www-form-urlencoded", strings.NewReader(form.Encode())) if err != nil { return nil, err } decoder := charmap.ISO8859_1.NewDecoder().Reader(resp.Body) doc, err := goquery.NewDocumentFromReader(decoder) if err != nil { return nil, err } h1 := doc.Find("h1").Text() if h1 == "Fehler: Falsches Passwort" || h1 == "Fehler: Ungültige Matrikelnummer" { return nil, errors.New("Wrong login credentials") } currentSheet := sheet{Exercices: []exercice{}} var currentExercice *exercice doc.Find("tr").Each(func(_ int, tr *goquery.Selection) { numChildren := tr.Children().Length() if numChildren == 2 { if currentExercice != nil { currentSheet.Exercices = append(currentSheet.Exercices, *currentExercice) } currentExercice = &exercice{} currentExercice.Number, _ = strconv.Atoi(tr.Find("td.exnr").Text()) currentExercice.LaTeX, _ = tr.Find("td > img").Attr("alt") currentExercice.Tasks = []task{} } else if numChildren == 3 { if currentExercice == nil { return } currentTask := task{} currentTask.LaTeX, _ = tr.Find("td.question img").Attr("alt") currentTask.CorrectAnswers = tr.Find("span.erg").Text() inputs := tr.Find("input") inputType, _ := inputs.Attr("type") if inputType == "checkbox" || inputType == "radio" { if inputType == "checkbox" { currentTask.Type = multipleChoiceTask } else { currentTask.Type = choiceTask } currentTask.Answers = make([]string, inputs.Length()) inputs.Each(func(i int, input *goquery.Selection) { currentTask.Answers[i] = strings.TrimSpace(input.Parent().Text()) }) } else { currentTask.Type = textTask } currentExercice.Tasks = append(currentExercice.Tasks, currentTask) } }) if currentExercice != nil && currentExercice.LaTeX != "" { currentSheet.Exercices = append(currentSheet.Exercices, *currentExercice) } return &currentSheet, nil }
package day02 /* 一.CSP通信顺序进程 1.经典口号: 通过通信实现共享内存, 而不是通过共享内存实现通信 2.实现了无共享内存无锁的并发, 可匹配异步回调的性能 二.参见 1.博文: http://www.sohu.com/a/192606128_575744 2.官方文档: https://golang.org/doc/effective_go.html */
package list import ( "github.com/devspace-cloud/devspace/cmd/flags" "github.com/devspace-cloud/devspace/pkg/util/factory" "github.com/spf13/cobra" ) // NewListCmd creates a new cobra command func NewListCmd(f factory.Factory, globalFlags *flags.GlobalFlags) *cobra.Command { listCmd := &cobra.Command{ Use: "list", Short: "Lists configuration", Long: ` ####################################################### #################### devspace list #################### ####################################################### `, Args: cobra.NoArgs, } listCmd.AddCommand(newSyncCmd(f, globalFlags)) listCmd.AddCommand(newSpacesCmd(f)) listCmd.AddCommand(newClustersCmd(f)) listCmd.AddCommand(newPortsCmd(f, globalFlags)) listCmd.AddCommand(newProfilesCmd(f)) listCmd.AddCommand(newVarsCmd(f, globalFlags)) listCmd.AddCommand(newDeploymentsCmd(f, globalFlags)) listCmd.AddCommand(newProvidersCmd(f)) listCmd.AddCommand(newContextsCmd(f)) listCmd.AddCommand(newCommandsCmd(f, globalFlags)) listCmd.AddCommand(newNamespacesCmd(f, globalFlags)) return listCmd }
package hrp import ( "crypto/tls" _ "embed" "fmt" "net" "net/http" "net/http/cookiejar" "net/url" "os" "os/signal" "path/filepath" "strings" "syscall" "testing" "time" "github.com/gorilla/websocket" "github.com/httprunner/funplugin" "github.com/jinzhu/copier" "github.com/pkg/errors" "github.com/rs/zerolog/log" "golang.org/x/net/http2" "github.com/httprunner/httprunner/v4/hrp/internal/builtin" "github.com/httprunner/httprunner/v4/hrp/internal/code" "github.com/httprunner/httprunner/v4/hrp/internal/sdk" "github.com/httprunner/httprunner/v4/hrp/internal/version" "github.com/httprunner/httprunner/v4/hrp/pkg/uixt" ) // Run starts to run API test with default configs. func Run(testcases ...ITestCase) error { t := &testing.T{} return NewRunner(t).SetRequestsLogOn().Run(testcases...) } // NewRunner constructs a new runner instance. func NewRunner(t *testing.T) *HRPRunner { if t == nil { t = &testing.T{} } jar, _ := cookiejar.New(nil) interruptSignal := make(chan os.Signal, 1) signal.Notify(interruptSignal, syscall.SIGTERM, syscall.SIGINT) return &HRPRunner{ t: t, failfast: true, // default to failfast genHTMLReport: false, httpClient: &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, Jar: jar, // insert response cookies into request Timeout: 120 * time.Second, }, http2Client: &http.Client{ Transport: &http2.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, Jar: jar, // insert response cookies into request Timeout: 120 * time.Second, }, // use default handshake timeout (no timeout limit) here, enable timeout at step level wsDialer: &websocket.Dialer{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, caseTimeoutTimer: time.NewTimer(time.Hour * 2), // default case timeout to 2 hour interruptSignal: interruptSignal, } } type HRPRunner struct { t *testing.T failfast bool httpStatOn bool requestsLogOn bool pluginLogOn bool venv string saveTests bool genHTMLReport bool httpClient *http.Client http2Client *http.Client wsDialer *websocket.Dialer uiClients map[string]*uixt.DriverExt // UI automation clients for iOS and Android, key is udid/serial caseTimeoutTimer *time.Timer // case timeout timer interruptSignal chan os.Signal // interrupt signal channel } // SetClientTransport configures transport of http client for high concurrency load testing func (r *HRPRunner) SetClientTransport(maxConns int, disableKeepAlive bool, disableCompression bool) *HRPRunner { log.Info(). Int("maxConns", maxConns). Bool("disableKeepAlive", disableKeepAlive). Bool("disableCompression", disableCompression). Msg("[init] SetClientTransport") r.httpClient.Transport = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, DialContext: (&net.Dialer{}).DialContext, MaxIdleConns: 0, MaxIdleConnsPerHost: maxConns, DisableKeepAlives: disableKeepAlive, DisableCompression: disableCompression, } r.http2Client.Transport = &http2.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, DisableCompression: disableCompression, } r.wsDialer.EnableCompression = !disableCompression return r } // SetFailfast configures whether to stop running when one step fails. func (r *HRPRunner) SetFailfast(failfast bool) *HRPRunner { log.Info().Bool("failfast", failfast).Msg("[init] SetFailfast") r.failfast = failfast return r } // SetRequestsLogOn turns on request & response details logging. func (r *HRPRunner) SetRequestsLogOn() *HRPRunner { log.Info().Msg("[init] SetRequestsLogOn") r.requestsLogOn = true return r } // SetHTTPStatOn turns on HTTP latency stat. func (r *HRPRunner) SetHTTPStatOn() *HRPRunner { log.Info().Msg("[init] SetHTTPStatOn") r.httpStatOn = true return r } // SetPluginLogOn turns on plugin logging. func (r *HRPRunner) SetPluginLogOn() *HRPRunner { log.Info().Msg("[init] SetPluginLogOn") r.pluginLogOn = true return r } // SetPython3Venv specifies python3 venv. func (r *HRPRunner) SetPython3Venv(venv string) *HRPRunner { log.Info().Str("venv", venv).Msg("[init] SetPython3Venv") r.venv = venv return r } // SetProxyUrl configures the proxy URL, which is usually used to capture HTTP packets for debugging. func (r *HRPRunner) SetProxyUrl(proxyUrl string) *HRPRunner { log.Info().Str("proxyUrl", proxyUrl).Msg("[init] SetProxyUrl") p, err := url.Parse(proxyUrl) if err != nil { log.Error().Err(err).Str("proxyUrl", proxyUrl).Msg("[init] invalid proxyUrl") return r } r.httpClient.Transport = &http.Transport{ Proxy: http.ProxyURL(p), TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } r.wsDialer.Proxy = http.ProxyURL(p) return r } // SetRequestTimeout configures global request timeout in seconds. func (r *HRPRunner) SetRequestTimeout(seconds float32) *HRPRunner { log.Info().Float32("timeout_seconds", seconds).Msg("[init] SetRequestTimeout") r.httpClient.Timeout = time.Duration(seconds*1000) * time.Millisecond return r } // SetCaseTimeout configures global testcase timeout in seconds. func (r *HRPRunner) SetCaseTimeout(seconds float32) *HRPRunner { log.Info().Float32("timeout_seconds", seconds).Msg("[init] SetCaseTimeout") r.caseTimeoutTimer = time.NewTimer(time.Duration(seconds*1000) * time.Millisecond) return r } // SetSaveTests configures whether to save summary of tests. func (r *HRPRunner) SetSaveTests(saveTests bool) *HRPRunner { log.Info().Bool("saveTests", saveTests).Msg("[init] SetSaveTests") r.saveTests = saveTests return r } // GenHTMLReport configures whether to gen html report of api tests. func (r *HRPRunner) GenHTMLReport() *HRPRunner { log.Info().Bool("genHTMLReport", true).Msg("[init] SetgenHTMLReport") r.genHTMLReport = true return r } // Run starts to execute one or multiple testcases. func (r *HRPRunner) Run(testcases ...ITestCase) (err error) { log.Info().Str("hrp_version", version.VERSION).Msg("start running") event := sdk.EventTracking{ Category: "RunAPITests", Action: "hrp run", } // report start event go sdk.SendEvent(event) // report execution timing event defer sdk.SendEvent(event.StartTiming("execution")) // record execution data to summary s := newOutSummary() defer func() { exitCode := code.GetErrorCode(err) log.Info().Int("code", exitCode).Msg("hrp runner exit code") }() // load all testcases testCases, err := LoadTestCases(testcases...) if err != nil { log.Error().Err(err).Msg("failed to load testcases") return err } // quit all plugins defer func() { pluginMap.Range(func(key, value interface{}) bool { if plugin, ok := value.(funplugin.IPlugin); ok { plugin.Quit() } return true }) }() var runErr error // run testcase one by one for _, testcase := range testCases { // each testcase has its own case runner caseRunner, err := r.NewCaseRunner(testcase) if err != nil { log.Error().Err(err).Msg("[Run] init case runner failed") return err } // release UI driver session defer func() { for _, client := range r.uiClients { client.Driver.DeleteSession() } }() for it := caseRunner.parametersIterator; it.HasNext(); { // case runner can run multiple times with different parameters // each run has its own session runner sessionRunner := caseRunner.NewSession() err1 := sessionRunner.Start(it.Next()) if err1 != nil { log.Error().Err(err1).Msg("[Run] run testcase failed") runErr = err1 } caseSummary, err2 := sessionRunner.GetSummary() s.appendCaseSummary(caseSummary) if err2 != nil { log.Error().Err(err2).Msg("[Run] get summary failed") if err1 != nil { runErr = errors.Wrap(err1, err2.Error()) } else { runErr = err2 } } if runErr != nil && r.failfast { break } } } s.Time.Duration = time.Since(s.Time.StartAt).Seconds() // save summary if r.saveTests { if err := s.genSummary(); err != nil { return err } } // generate HTML report if r.genHTMLReport { if err := s.genHTMLReport(); err != nil { return err } } return runErr } // NewCaseRunner creates a new case runner for testcase. // each testcase has its own case runner func (r *HRPRunner) NewCaseRunner(testcase *TestCase) (*CaseRunner, error) { caseRunner := &CaseRunner{ testCase: testcase, hrpRunner: r, parser: newParser(), } // init parser plugin plugin, err := initPlugin(testcase.Config.Path, r.venv, r.pluginLogOn) if err != nil { return nil, errors.Wrap(err, "init plugin failed") } if plugin != nil { caseRunner.parser.plugin = plugin caseRunner.rootDir = filepath.Dir(plugin.Path()) } // parse testcase config if err := caseRunner.parseConfig(); err != nil { return nil, errors.Wrap(err, "parse testcase config failed") } // set request timeout in seconds if testcase.Config.RequestTimeout != 0 { r.SetRequestTimeout(testcase.Config.RequestTimeout) } // set testcase timeout in seconds if testcase.Config.CaseTimeout != 0 { r.SetCaseTimeout(testcase.Config.CaseTimeout) } // load plugin info to testcase config if plugin != nil { pluginPath, _ := locatePlugin(testcase.Config.Path) if caseRunner.parsedConfig.PluginSetting == nil { pluginContent, err := builtin.ReadFile(pluginPath) if err != nil { return nil, err } tp := strings.Split(plugin.Path(), ".") caseRunner.parsedConfig.PluginSetting = &PluginConfig{ Path: pluginPath, Content: pluginContent, Type: tp[len(tp)-1], } } } return caseRunner, nil } type CaseRunner struct { testCase *TestCase hrpRunner *HRPRunner parser *Parser parsedConfig *TConfig parametersIterator *ParametersIterator rootDir string // project root dir } // parseConfig parses testcase config, stores to parsedConfig. func (r *CaseRunner) parseConfig() error { cfg := r.testCase.Config r.parsedConfig = &TConfig{} // deep copy config to avoid data racing if err := copier.Copy(r.parsedConfig, cfg); err != nil { log.Error().Err(err).Msg("copy testcase config failed") return err } // parse config variables parsedVariables, err := r.parser.ParseVariables(cfg.Variables) if err != nil { log.Error().Interface("variables", cfg.Variables).Err(err).Msg("parse config variables failed") return err } r.parsedConfig.Variables = parsedVariables // parse config name parsedName, err := r.parser.ParseString(cfg.Name, parsedVariables) if err != nil { return errors.Wrap(err, "parse config name failed") } r.parsedConfig.Name = convertString(parsedName) // parse config base url parsedBaseURL, err := r.parser.ParseString(cfg.BaseURL, parsedVariables) if err != nil { return errors.Wrap(err, "parse config base url failed") } r.parsedConfig.BaseURL = convertString(parsedBaseURL) // merge config environment variables with base_url // priority: env base_url > base_url if cfg.Environs != nil { r.parsedConfig.Environs = cfg.Environs } else { r.parsedConfig.Environs = make(map[string]string) } if value, ok := r.parsedConfig.Environs["base_url"]; !ok || value == "" { if r.parsedConfig.BaseURL != "" { r.parsedConfig.Environs["base_url"] = r.parsedConfig.BaseURL } } // merge config variables with environment variables // priority: env > config variables for k, v := range r.parsedConfig.Environs { r.parsedConfig.Variables[k] = v } // ensure correction of think time config r.parsedConfig.ThinkTimeSetting.checkThinkTime() // ensure correction of websocket config r.parsedConfig.WebSocketSetting.checkWebSocket() // parse testcase config parameters parametersIterator, err := r.parser.initParametersIterator(r.parsedConfig) if err != nil { log.Error().Err(err). Interface("parameters", r.parsedConfig.Parameters). Interface("parametersSetting", r.parsedConfig.ParametersSetting). Msg("parse config parameters failed") return errors.Wrap(err, "parse testcase config parameters failed") } r.parametersIterator = parametersIterator // init iOS/Android clients if r.hrpRunner.uiClients == nil { r.hrpRunner.uiClients = make(map[string]*uixt.DriverExt) } for _, iosDeviceConfig := range r.parsedConfig.IOS { if iosDeviceConfig.UDID != "" { udid, err := r.parser.ParseString(iosDeviceConfig.UDID, parsedVariables) if err != nil { return errors.Wrap(err, "failed to parse ios device udid") } iosDeviceConfig.UDID = udid.(string) } device, err := uixt.NewIOSDevice(uixt.GetIOSDeviceOptions(iosDeviceConfig)...) if err != nil { return errors.Wrap(err, "init iOS device failed") } client, err := device.NewDriver(nil) if err != nil { return errors.Wrap(err, "init iOS WDA client failed") } r.hrpRunner.uiClients[device.UDID] = client } for _, androidDeviceConfig := range r.parsedConfig.Android { if androidDeviceConfig.SerialNumber != "" { sn, err := r.parser.ParseString(androidDeviceConfig.SerialNumber, parsedVariables) if err != nil { return errors.Wrap(err, "failed to parse android device serial") } androidDeviceConfig.SerialNumber = sn.(string) } device, err := uixt.NewAndroidDevice(uixt.GetAndroidDeviceOptions(androidDeviceConfig)...) if err != nil { return errors.Wrap(err, "init Android device failed") } client, err := device.NewDriver(nil) if err != nil { return errors.Wrap(err, "init Android client failed") } r.hrpRunner.uiClients[device.SerialNumber] = client } return nil } // each boomer task initiates a new session // in order to avoid data racing func (r *CaseRunner) NewSession() *SessionRunner { sessionRunner := &SessionRunner{ caseRunner: r, } sessionRunner.resetSession() return sessionRunner } // SessionRunner is used to run testcase and its steps. // each testcase has its own SessionRunner instance and share session variables. type SessionRunner struct { caseRunner *CaseRunner sessionVariables map[string]interface{} // transactions stores transaction timing info. // key is transaction name, value is map of transaction type and time, e.g. start time and end time. transactions map[string]map[transactionType]time.Time startTime time.Time // record start time of the testcase summary *TestCaseSummary // record test case summary wsConnMap map[string]*websocket.Conn // save all websocket connections inheritWsConnMap map[string]*websocket.Conn // inherit all websocket connections pongResponseChan chan string // channel used to receive pong response message closeResponseChan chan *wsCloseRespObject // channel used to receive close response message } func (r *SessionRunner) resetSession() { log.Info().Msg("reset session runner") r.sessionVariables = make(map[string]interface{}) r.transactions = make(map[string]map[transactionType]time.Time) r.startTime = time.Now() r.summary = newSummary() r.wsConnMap = make(map[string]*websocket.Conn) r.inheritWsConnMap = make(map[string]*websocket.Conn) r.pongResponseChan = make(chan string, 1) r.closeResponseChan = make(chan *wsCloseRespObject, 1) } func (r *SessionRunner) inheritConnection(src *SessionRunner) { log.Info().Msg("inherit session runner") r.inheritWsConnMap = make(map[string]*websocket.Conn, len(src.wsConnMap)+len(src.inheritWsConnMap)) for k, v := range src.wsConnMap { r.inheritWsConnMap[k] = v } for k, v := range src.inheritWsConnMap { r.inheritWsConnMap[k] = v } } // Start runs the test steps in sequential order. // givenVars is used for data driven func (r *SessionRunner) Start(givenVars map[string]interface{}) error { config := r.caseRunner.testCase.Config log.Info().Str("testcase", config.Name).Msg("run testcase start") // update config variables with given variables r.InitWithParameters(givenVars) defer func() { // close session resource after all steps done or fast fail r.releaseResources() }() // run step in sequential order for _, step := range r.caseRunner.testCase.TestSteps { select { case <-r.caseRunner.hrpRunner.caseTimeoutTimer.C: log.Warn().Msg("timeout in session runner") return errors.Wrap(code.TimeoutError, "session runner timeout") case <-r.caseRunner.hrpRunner.interruptSignal: log.Warn().Msg("interrupted in session runner") return errors.Wrap(code.InterruptError, "session runner interrupted") default: // TODO: parse step struct // parse step name parsedName, err := r.caseRunner.parser.ParseString(step.Name(), r.sessionVariables) if err != nil { parsedName = step.Name() } stepName := convertString(parsedName) log.Info().Str("step", stepName). Str("type", string(step.Type())).Msg("run step start") // run times of step loopTimes := step.Struct().Loops if loopTimes < 0 { log.Warn().Int("loops", loopTimes).Msg("loop times should be positive, set to 1") loopTimes = 1 } else if loopTimes == 0 { loopTimes = 1 } else if loopTimes > 1 { log.Info().Int("loops", loopTimes).Msg("run step with specified loop times") } // run step with specified loop times var stepResult *StepResult for i := 1; i <= loopTimes; i++ { var loopIndex string if loopTimes > 1 { log.Info().Int("index", i).Msg("start running step in loop") loopIndex = fmt.Sprintf("_loop_%d", i) } // run step stepStartTime := time.Now().Unix() stepResult, err = step.Run(r) stepResult.Name = stepName + loopIndex stepResult.StartTime = stepStartTime r.updateSummary(stepResult) } // update extracted variables for k, v := range stepResult.ExportVars { r.sessionVariables[k] = v } if err == nil { log.Info().Str("step", stepResult.Name). Str("type", string(stepResult.StepType)). Bool("success", true). Interface("exportVars", stepResult.ExportVars). Msg("run step end") continue } // failed log.Error().Err(err).Str("step", stepResult.Name). Str("type", string(stepResult.StepType)). Bool("success", false). Msg("run step end") // interrupted or timeout, abort running if errors.Is(err, code.InterruptError) || errors.Is(err, code.TimeoutError) { return err } // check if failfast if r.caseRunner.hrpRunner.failfast { return errors.Wrap(err, "abort running due to failfast setting") } } } log.Info().Str("testcase", config.Name).Msg("run testcase end") return nil } // ParseStepVariables merges step variables with config variables and session variables func (r *SessionRunner) ParseStepVariables(stepVariables map[string]interface{}) (map[string]interface{}, error) { // override variables // step variables > session variables (extracted variables from previous steps) overrideVars := mergeVariables(stepVariables, r.sessionVariables) // step variables > testcase config variables overrideVars = mergeVariables(overrideVars, r.caseRunner.parsedConfig.Variables) // parse step variables parsedVariables, err := r.caseRunner.parser.ParseVariables(overrideVars) if err != nil { log.Error().Interface("variables", r.caseRunner.parsedConfig.Variables). Err(err).Msg("parse step variables failed") return nil, errors.Wrap(err, "parse step variables failed") } return parsedVariables, nil } // InitWithParameters updates session variables with given parameters. // this is used for data driven func (r *SessionRunner) InitWithParameters(parameters map[string]interface{}) { if len(parameters) == 0 { return } log.Info().Interface("parameters", parameters).Msg("update session variables") for k, v := range parameters { r.sessionVariables[k] = v } } func (r *SessionRunner) GetSummary() (*TestCaseSummary, error) { caseSummary := r.summary caseSummary.Name = r.caseRunner.parsedConfig.Name caseSummary.Time.StartAt = r.startTime caseSummary.Time.Duration = time.Since(r.startTime).Seconds() exportVars := make(map[string]interface{}) for _, value := range r.caseRunner.parsedConfig.Export { exportVars[value] = r.sessionVariables[value] } caseSummary.InOut.ExportVars = exportVars caseSummary.InOut.ConfigVars = r.caseRunner.parsedConfig.Variables for uuid, client := range r.caseRunner.hrpRunner.uiClients { // add WDA/UIA logs to summary logs := map[string]interface{}{ "uuid": uuid, } if client.Device.LogEnabled() { log, err := client.Driver.StopCaptureLog() if err != nil { return caseSummary, err } logs["content"] = log } // stop performance monitor logs["performance"] = client.Device.StopPerf() logs["pcap"] = client.Device.StopPcap() caseSummary.Logs = append(caseSummary.Logs, logs) } return caseSummary, nil } // updateSummary updates summary of StepResult. func (r *SessionRunner) updateSummary(stepResult *StepResult) { switch stepResult.StepType { case stepTypeTestCase: // record requests of testcase step if records, ok := stepResult.Data.([]*StepResult); ok { for _, result := range records { r.addSingleStepResult(result) } } else { r.addSingleStepResult(stepResult) } default: r.addSingleStepResult(stepResult) } } func (r *SessionRunner) addSingleStepResult(stepResult *StepResult) { // update summary r.summary.Records = append(r.summary.Records, stepResult) r.summary.Stat.Total += 1 if stepResult.Success { r.summary.Stat.Successes += 1 } else { r.summary.Stat.Failures += 1 // update summary result to failed r.summary.Success = false } } // releaseResources releases resources used by session runner func (r *SessionRunner) releaseResources() { // close websocket connections for _, wsConn := range r.wsConnMap { if wsConn != nil { log.Info().Str("testcase", r.caseRunner.testCase.Config.Name).Msg("websocket disconnected") err := wsConn.Close() if err != nil { log.Error().Err(err).Msg("websocket disconnection failed") } } } } func (r *SessionRunner) getWsClient(url string) *websocket.Conn { if client, ok := r.wsConnMap[url]; ok { return client } if client, ok := r.inheritWsConnMap[url]; ok { return client } return nil }
package misc func isLineBreak(source []rune, index uint) int { switch source[index] { case '\n': return 1 case '\r': next := index + 1 if next < uint(len(source)) && source[next] == '\n' { return 2 } } return -1 } func isSpecialChar(bt rune) bool { if bt >= 0x21 && bt <= 0x2F { // ! " # $ % & ' ( ) * + , - . / return true } if bt >= 0x3A && bt <= 0x40 { // : ; < = > ? @ return true } if bt >= 0x5B && bt <= 0x60 { // [ \ ] ^ _ ` return true } if bt >= 0x7B && bt <= 0x7E { // { | } ~ return true } return false } func isSpace(bt rune) bool { if bt == ' ' || bt == '\t' { // whitespace or tab return true } return false }
package futures import ( "testing" "github.com/stretchr/testify/suite" ) type positionRiskServiceTestSuite struct { baseTestSuite } func TestPositionRiskTestService(t *testing.T) { suite.Run(t, new(positionRiskServiceTestSuite)) } func (s *positionRiskServiceTestSuite) TestGetPositionRisk() { data := []byte(`[ { "entryPrice": "10359.38000", "marginType": "isolated", "isAutoAddMargin": "false", "isolatedMargin": "3.15899368", "leverage": "125", "liquidationPrice": "9332.61", "markPrice": "10348.27548846", "maxNotionalValue": "50000", "positionAmt": "0.003", "symbol": "BTCUSDT", "unRealizedProfit": "-0.03331353", "positionSide": "BOTH" } ]`) s.mockDo(data, nil) defer s.assertDo() symbol := "BTCUSDT" recvWindow := int64(1000) s.assertReq(func(r *request) { e := newSignedRequest().setParams(params{ "symbol": symbol, "recvWindow": recvWindow, }) s.assertRequestEqual(e, r) }) res, err := s.client.NewGetPositionRiskService().Symbol(symbol). Do(newContext(), WithRecvWindow(recvWindow)) r := s.r() r.NoError(err) r.Len(res, 1) e := &PositionRisk{ EntryPrice: "10359.38000", MarginType: "isolated", IsAutoAddMargin: "false", IsolatedMargin: "3.15899368", Leverage: "125", LiquidationPrice: "9332.61", MarkPrice: "10348.27548846", MaxNotionalValue: "50000", PositionAmt: "0.003", Symbol: "BTCUSDT", UnRealizedProfit: "-0.03331353", PositionSide: "BOTH", } s.assertPositionRiskEqual(e, res[0]) } func (s *positionRiskServiceTestSuite) assertPositionRiskEqual(e, a *PositionRisk) { r := s.r() r.Equal(e.EntryPrice, a.EntryPrice, "EntryPrice") r.Equal(e.MarginType, a.MarginType, "MarginType") r.Equal(e.IsAutoAddMargin, a.IsAutoAddMargin, "IsAutoAddMargin") r.Equal(e.IsolatedMargin, a.IsolatedMargin, "IsolatedMargin") r.Equal(e.Leverage, a.Leverage, "Leverage") r.Equal(e.LiquidationPrice, a.LiquidationPrice, "LiquidationPrice") r.Equal(e.MarkPrice, a.MarkPrice, "MarkPrice") r.Equal(e.MaxNotionalValue, a.MaxNotionalValue, "MaxNotionalValue") r.Equal(e.PositionAmt, a.PositionAmt, "PositionAmt") r.Equal(e.Symbol, a.Symbol, "Symbol") r.Equal(e.UnRealizedProfit, a.UnRealizedProfit, "UnRealizedProfit") r.Equal(e.PositionSide, a.PositionSide, "PositionSide") }
package tomltest import ( "math" "reflect" ) // CompareTOML compares the given arguments. // // The returned value is a copy of Test with Failure set to a (human-readable) // description of the first element that is unequal. If both arguments are equal // Test is returned unchanged. // // Reflect.DeepEqual could work here, but it won't tell us how the two // structures are different. func (r Test) CompareTOML(want, have interface{}) Test { if isTomlValue(want) { if !isTomlValue(have) { return r.fail("Type for key '%s' differs:\n"+ " Expected: %[2]v (%[2]T)\n"+ " Your encoder: %[3]v (%[3]T)", r.Key, want, have) } if !deepEqual(want, have) { return r.fail("Values for key '%s' differ:\n"+ " Expected: %[2]v (%[2]T)\n"+ " Your encoder: %[3]v (%[3]T)", r.Key, want, have) } return r } switch w := want.(type) { case map[string]interface{}: return r.cmpTOMLMap(w, have) case []interface{}: return r.cmpTOMLArrays(w, have) default: return r.fail("Unrecognized TOML structure: %T", want) } } func (r Test) cmpTOMLMap(want map[string]interface{}, have interface{}) Test { haveMap, ok := have.(map[string]interface{}) if !ok { return r.mismatch("table", want, haveMap) } // Check that the keys of each map are equivalent. for k := range want { if _, ok := haveMap[k]; !ok { bunk := r.kjoin(k) return bunk.fail("Could not find key '%s' in encoder output", bunk.Key) } } for k := range haveMap { if _, ok := want[k]; !ok { bunk := r.kjoin(k) return bunk.fail("Could not find key '%s' in expected output", bunk.Key) } } // Okay, now make sure that each value is equivalent. for k := range want { if sub := r.kjoin(k).CompareTOML(want[k], haveMap[k]); sub.Failed() { return sub } } return r } func (r Test) cmpTOMLArrays(want []interface{}, have interface{}) Test { // Slice can be decoded to []interface{} for an array of primitives, or // []map[string]interface{} for an array of tables. // // TODO: it would be nicer if it could always decode to []interface{}? haveSlice, ok := have.([]interface{}) if !ok { tblArray, ok := have.([]map[string]interface{}) if !ok { return r.mismatch("array", want, have) } haveSlice = make([]interface{}, len(tblArray)) for i := range tblArray { haveSlice[i] = tblArray[i] } } if len(want) != len(haveSlice) { return r.fail("Array lengths differ for key '%s'"+ " Expected: %[2]v (len=%[4]d)\n"+ " Your encoder: %[3]v (len=%[5]d)", r.Key, want, haveSlice, len(want), len(haveSlice)) } for i := 0; i < len(want); i++ { if sub := r.CompareTOML(want[i], haveSlice[i]); sub.Failed() { return sub } } return r } // reflect.DeepEqual() that deals with NaN != NaN func deepEqual(want, have interface{}) bool { var wantF, haveF float64 switch f := want.(type) { case float32: wantF = float64(f) case float64: wantF = f } switch f := have.(type) { case float32: haveF = float64(f) case float64: haveF = f } if math.IsNaN(wantF) && math.IsNaN(haveF) { return true } return reflect.DeepEqual(want, have) } func isTomlValue(v interface{}) bool { switch v.(type) { case map[string]interface{}, []interface{}: return false } return true }
package easygraph import ( "encoding/json" "fmt" "strconv" "strings" ) var fmtSeparator = " " func formatRawQuery(q *rawQuery) string { var formattedQuery string if len(q.variables) > 0 { variablesQuery := formatVariables(q.variables) formattedQuery = formatQueryWithvariables( strconv.QuoteToASCII(q.stringQuery), variablesQuery) } else { formattedQuery = formatQuery(q.stringQuery) } return formattedQuery } func formatVariables(variables []variable) string { queryVariables := []string{} var keyvalue string for _, v := range variables { bytesValue, _ := json.Marshal(v.Value) keyvalue = fmt.Sprintf(`%s:%v`, strconv.QuoteToASCII(v.Name), string(bytesValue)) queryVariables = append(queryVariables, keyvalue) } return strings.Join(queryVariables, ",") } func formatQueryWithvariables(queryString string, variablesString string) string { query := `{"query": ` + queryString + `,` + `"variables": {` + variablesString + `}}` return query } func formatQuery(queryString string) string { quotedQuery := strconv.QuoteToASCII(queryString) return `{"query": ` + quotedQuery + `}` }
// This file was generated for SObject Opportunity, API Version v43.0 at 2018-07-30 03:47:48.329363948 -0400 EDT m=+34.673271418 package sobjects import ( "fmt" "strings" ) type Opportunity struct { BaseSObject AccountId string `force:",omitempty"` Amount string `force:",omitempty"` CampaignId string `force:",omitempty"` CloseDate string `force:",omitempty"` CreatedById string `force:",omitempty"` CreatedDate string `force:",omitempty"` CurrentGenerators__c string `force:",omitempty"` DeliveryInstallationStatus__c string `force:",omitempty"` Description string `force:",omitempty"` ExpectedRevenue string `force:",omitempty"` Fiscal string `force:",omitempty"` FiscalQuarter int `force:",omitempty"` FiscalYear int `force:",omitempty"` ForecastCategory string `force:",omitempty"` ForecastCategoryName string `force:",omitempty"` HasOpenActivity bool `force:",omitempty"` HasOpportunityLineItem bool `force:",omitempty"` HasOverdueTask bool `force:",omitempty"` Id string `force:",omitempty"` IsClosed bool `force:",omitempty"` IsDeleted bool `force:",omitempty"` IsPrivate bool `force:",omitempty"` IsWon bool `force:",omitempty"` LastActivityDate string `force:",omitempty"` LastModifiedById string `force:",omitempty"` LastModifiedDate string `force:",omitempty"` LastReferencedDate string `force:",omitempty"` LastViewedDate string `force:",omitempty"` LeadSource string `force:",omitempty"` MainCompetitors__c string `force:",omitempty"` Name string `force:",omitempty"` NextStep string `force:",omitempty"` OrderNumber__c string `force:",omitempty"` OwnerId string `force:",omitempty"` Pricebook2Id string `force:",omitempty"` Probability string `force:",omitempty"` StageName string `force:",omitempty"` SystemModstamp string `force:",omitempty"` TotalOpportunityQuantity float64 `force:",omitempty"` TrackingNumber__c string `force:",omitempty"` Type string `force:",omitempty"` } func (t *Opportunity) ApiName() string { return "Opportunity" } func (t *Opportunity) String() string { builder := strings.Builder{} builder.WriteString(fmt.Sprintf("Opportunity #%s - %s\n", t.Id, t.Name)) builder.WriteString(fmt.Sprintf("\tAccountId: %v\n", t.AccountId)) builder.WriteString(fmt.Sprintf("\tAmount: %v\n", t.Amount)) builder.WriteString(fmt.Sprintf("\tCampaignId: %v\n", t.CampaignId)) builder.WriteString(fmt.Sprintf("\tCloseDate: %v\n", t.CloseDate)) builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById)) builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate)) builder.WriteString(fmt.Sprintf("\tCurrentGenerators__c: %v\n", t.CurrentGenerators__c)) builder.WriteString(fmt.Sprintf("\tDeliveryInstallationStatus__c: %v\n", t.DeliveryInstallationStatus__c)) builder.WriteString(fmt.Sprintf("\tDescription: %v\n", t.Description)) builder.WriteString(fmt.Sprintf("\tExpectedRevenue: %v\n", t.ExpectedRevenue)) builder.WriteString(fmt.Sprintf("\tFiscal: %v\n", t.Fiscal)) builder.WriteString(fmt.Sprintf("\tFiscalQuarter: %v\n", t.FiscalQuarter)) builder.WriteString(fmt.Sprintf("\tFiscalYear: %v\n", t.FiscalYear)) builder.WriteString(fmt.Sprintf("\tForecastCategory: %v\n", t.ForecastCategory)) builder.WriteString(fmt.Sprintf("\tForecastCategoryName: %v\n", t.ForecastCategoryName)) builder.WriteString(fmt.Sprintf("\tHasOpenActivity: %v\n", t.HasOpenActivity)) builder.WriteString(fmt.Sprintf("\tHasOpportunityLineItem: %v\n", t.HasOpportunityLineItem)) builder.WriteString(fmt.Sprintf("\tHasOverdueTask: %v\n", t.HasOverdueTask)) builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id)) builder.WriteString(fmt.Sprintf("\tIsClosed: %v\n", t.IsClosed)) builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted)) builder.WriteString(fmt.Sprintf("\tIsPrivate: %v\n", t.IsPrivate)) builder.WriteString(fmt.Sprintf("\tIsWon: %v\n", t.IsWon)) builder.WriteString(fmt.Sprintf("\tLastActivityDate: %v\n", t.LastActivityDate)) builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById)) builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate)) builder.WriteString(fmt.Sprintf("\tLastReferencedDate: %v\n", t.LastReferencedDate)) builder.WriteString(fmt.Sprintf("\tLastViewedDate: %v\n", t.LastViewedDate)) builder.WriteString(fmt.Sprintf("\tLeadSource: %v\n", t.LeadSource)) builder.WriteString(fmt.Sprintf("\tMainCompetitors__c: %v\n", t.MainCompetitors__c)) builder.WriteString(fmt.Sprintf("\tName: %v\n", t.Name)) builder.WriteString(fmt.Sprintf("\tNextStep: %v\n", t.NextStep)) builder.WriteString(fmt.Sprintf("\tOrderNumber__c: %v\n", t.OrderNumber__c)) builder.WriteString(fmt.Sprintf("\tOwnerId: %v\n", t.OwnerId)) builder.WriteString(fmt.Sprintf("\tPricebook2Id: %v\n", t.Pricebook2Id)) builder.WriteString(fmt.Sprintf("\tProbability: %v\n", t.Probability)) builder.WriteString(fmt.Sprintf("\tStageName: %v\n", t.StageName)) builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp)) builder.WriteString(fmt.Sprintf("\tTotalOpportunityQuantity: %v\n", t.TotalOpportunityQuantity)) builder.WriteString(fmt.Sprintf("\tTrackingNumber__c: %v\n", t.TrackingNumber__c)) builder.WriteString(fmt.Sprintf("\tType: %v\n", t.Type)) return builder.String() } type OpportunityQueryResponse struct { BaseQuery Records []Opportunity `json:"Records" force:"records"` }
package i3gostatus import ( "bufio" "encoding/json" "fmt" "log" "os" "reflect" "strings" "time" "github.com/rumpelsepp/i3gostatus/lib/config" "github.com/rumpelsepp/i3gostatus/lib/model" "github.com/rumpelsepp/i3gostatus/lib/registry" "github.com/rumpelsepp/i3gostatus/lib/utils" ) var logger = log.New(os.Stderr, "[i3gostatus] ", log.LstdFlags) func writeHeader(options *runtimeOptions) { header := model.NewHeader(options.clickEvents) fmt.Println(utils.Json(header)) // i3bar is a streaming JSON parser, so we need to open the endless array. fmt.Println("[") } func readStdin(outChannels map[string]chan *model.I3ClickEvent) { var inputStr string scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { // Trim the endless JSON array stuff. It causes parse errors, // since we do line by line JSON parsing here. inputStr = strings.Trim(scanner.Text(), "[, ") clickEvent := &model.I3ClickEvent{} if err := json.Unmarshal([]byte(inputStr), clickEvent); err == nil { select { case outChannels[clickEvent.Instance] <- clickEvent: case <-time.After(50 * time.Millisecond): continue } } } } func Run(options *runtimeOptions) { configTree := config.Load(options.configPath) enabledModules := registry.Initialize(configTree) rateLimit := utils.FindFastestModule(configTree) rateTimer := time.NewTimer(rateLimit) outChannel := make(chan *model.I3BarBlock) clickEventChannel := make(chan *model.I3BarBlock) outSlice := make([]*model.I3BarBlock, len(enabledModules)) // The relevant inChannel is only used when click_events is enabled. // If click_events is disabled, it is never written to the channel. inChannels := make(map[string]chan *model.I3ClickEvent) logger.Printf("Runtime options set: %+v", options) if len(enabledModules) == 0 { fmt.Fprintln(os.Stderr, "No modules are enabled!") os.Exit(1) } writeHeader(options) for i, v := range enabledModules { v.ParseConfig(configTree) id := reflect.ValueOf(v).Elem().FieldByName("Instance").String() inChannel := make(chan *model.I3ClickEvent) go v.Run(&model.ModuleArgs{inChannel, outChannel, clickEventChannel, i}) // Add it it to the channel map. The click_event handler must be able // to somehow find the correct channel. inChannels[id] = inChannel } if options.clickEvents { go readStdin(inChannels) } for { select { case block := <-outChannel: outSlice[block.Index] = block case block := <-clickEventChannel: outSlice[block.Index] = block fmt.Println(fmt.Sprintf("%s,", utils.Json(outSlice))) case <-rateTimer.C: rateTimer.Reset(rateLimit) fmt.Println(fmt.Sprintf("%s,", utils.Json(outSlice))) } } }
package main import ( "context" "io" "os" "os/signal" "path/filepath" "syscall" "github.com/Azure/testrig/commands" "github.com/cpuguy83/strongerrors" homedir "github.com/mitchellh/go-homedir" "github.com/pkg/errors" "github.com/spf13/cobra" ) func main() { var ( stateDir string configFile string err error ) ctx, cancel := context.WithCancel(context.Background()) cmd := &cobra.Command{ Use: filepath.Base(os.Args[0]), Short: "Quickly create and manage test Kubernetes clusters on Azure", SilenceUsage: true, SilenceErrors: true, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { if stateDir == "" { if err != nil { return err } return errors.New("state dir not set") } if err != nil { return err } chSig := make(chan os.Signal) signal.Notify(chSig, syscall.SIGTERM, syscall.SIGINT) go func() { <-chSig cancel() }() return nil }, } stateDir, err = defaultStateDir() configFile = defaultConifgPath(stateDir) flags := cmd.PersistentFlags() flags.StringVar(&stateDir, "state-dir", stateDir, "Directory to store state information to") flags.StringVar(&configFile, "config", configFile, "Location of user config file") if len(os.Args) > 1 { err = flags.Parse(os.Args[1:]) } var cfg commands.UserConfig cfg, err = commands.ReadUserConfig(configFile) if err != nil && strongerrors.IsNotFound(err) && configFile == defaultConifgPath(stateDir) { err = nil } cmd.AddCommand( commands.Create(ctx, stateDir, &cfg), commands.List(ctx, stateDir), commands.Inspect(ctx, stateDir), commands.SSH(ctx, stateDir), commands.KubeConfig(ctx, stateDir), commands.Remove(ctx, stateDir, &cfg), ) if err := cmd.Execute(); err != nil { io.WriteString(os.Stderr, err.Error()+"\n") os.Exit(1) } } func defaultStateDir() (string, error) { homeDir, err := homedir.Dir() if err != nil { return "", errors.Wrap(err, "state dir not provided and could not determine default location based on user home dir") } return filepath.Join(homeDir, ".testrig"), nil } func defaultConifgPath(stateDir string) string { if stateDir == "" { return "" } return filepath.Join(stateDir, "config.toml") }
package testproxy import ( "github.com/stretchr/testify/assert" "net" "testing" ) func TestProxyWithBounce(t *testing.T) { assert := assert.New(t) ln, err := net.Listen("tcp", "localhost:23456") assert.NoError(err, "Could not create bouncer listener") go func() { conn, err := ln.Accept() assert.NoErrorf(err, "Could not accept connection: %v", err) for { data := make([]byte, 1024) n, err := conn.Read(data) if n > 0 { _, err = conn.Write(data[:n]) } if err != nil { return } } }() proxy := T{FromAddr: "localhost:12345", ToAddr: "localhost:23456"} proxy.Run() c, err := net.Dial("tcp", "localhost:12345") assert.NoErrorf(err, "Error connecting to proxy: %v", err) b := []byte("This is a test string") n, err := c.Write(b) assert.NoErrorf(err, "Error writing to proxy conn: %v", err) data := make([]byte, 1024) n, err = c.Read(data) assert.NoErrorf(err, "Error reading from proxy conn: %v", err) assert.Equal(b, data[:n], "Did not receive exact copy of the sent data") proxy.Close() n, err = c.Read(b) assert.Errorf(err, "Read from closed proxy should raise and error: %v", err) }
package renderer import ( "bytes" "fmt" "math" "sort" "strings" g2s "github.com/ONSdigital/dp-map-renderer/geojson2svg" "github.com/ONSdigital/dp-map-renderer/htmlutil" "github.com/ONSdigital/dp-map-renderer/models" "github.com/paulmach/go.geojson" ) // RegionClassName is the name of the class assigned to all map regions (denoted by features in the input topology) const RegionClassName = "mapRegion" // MissingDataText is the text appended to the title of a region that has missing data const MissingDataText = "data unavailable" // MissingDataPattern is the fmt template used to generate the pattern used for regions with missing data const MissingDataPattern = `<pattern id="%s-nodata" width="20" height="20" patternUnits="userSpaceOnUse"> <g fill="#6D6E72"> <polygon points="00 00 02 00 00 02 00 00"></polygon> <polygon points="04 00 06 00 00 06 00 04"></polygon> <polygon points="08 00 10 00 00 10 00 08"></polygon> <polygon points="12 00 14 00 00 14 00 12"></polygon> <polygon points="16 00 18 00 00 18 00 16"></polygon> <polygon points="20 00 20 02 02 20 00 20"></polygon> <polygon points="20 04 20 06 06 20 04 20"></polygon> <polygon points="20 08 20 10 10 20 08 20"></polygon> <polygon points="20 12 20 14 14 20 12 20"></polygon> <polygon points="20 16 20 18 18 20 16 20"></polygon> </g> </pattern>` var pngConverter g2s.PNGConverter // UsePNGConverter assigns a PNGConverter that will be used to generate fallback png images for svgs. func UsePNGConverter(p g2s.PNGConverter) { pngConverter = p } // valueAndColour represents a choropleth data point, which has both a numeric value and an associated colour type valueAndColour struct { value float64 colour string } // SVGRequest wraps a models.RenderRequest and allows caching of expensive calculations (such as converting topojson to geojson) type SVGRequest struct { request *models.RenderRequest geoJSON *geojson.FeatureCollection svg *g2s.SVG ViewBoxWidth float64 // the width dimension of the svg (for the viewBox). The FixedWidth if provided, otherwise the average of min and max width, falling back to 400 if nothing specified ViewBoxHeight float64 // the height dimension of the svg (for the viewBox). Relative to width. breaks []*breakInfo // sorted breaks referencePos float64 // the relative position of the reference tick VerticalLegendWidth float64 // the view box width of the vertical legend verticalKeyOffset float64 // offset for the position of the key. // I.e. the middle of the key should be positioned in the middle of the legend, plus the offset. responsiveSize bool // if true, the svg should scale with the size of the page. Otherwise the size is fixed. } // PrepareSVGRequest wraps the request in an SVGRequest, caching expensive calculations up front func PrepareSVGRequest(request *models.RenderRequest) *SVGRequest { geoJSON := getGeoJSON(request) svg := g2s.New() width, height := 0.0, 0.0 if geoJSON != nil { svg.AppendFeatureCollection(geoJSON) width, height = getViewBoxDimensions(svg, request) } responsiveSize := request.MinWidth > 0 && request.MaxWidth > 0 svgRequest := &SVGRequest{ request: request, geoJSON: geoJSON, svg: svg, ViewBoxWidth: width, ViewBoxHeight: height, responsiveSize: responsiveSize, } if request.Choropleth != nil && len(request.Choropleth.Breaks) > 0 { svgRequest.breaks, svgRequest.referencePos = getSortedBreakInfo(request) svgRequest.VerticalLegendWidth, svgRequest.verticalKeyOffset = getVerticalLegendWidth(request, svgRequest.breaks) } return svgRequest } // RenderSVG generates an SVG map for the given request func RenderSVG(svgRequest *SVGRequest) string { geoJSON := svgRequest.geoJSON if geoJSON == nil { return "" } request := svgRequest.request vbWidth := svgRequest.ViewBoxWidth vbHeight := svgRequest.ViewBoxHeight id := idPrefix(request) setFeatureIDs(geoJSON.Features, request.Geography.IDProperty, id+ "-") setClassProperty(geoJSON.Features, RegionClassName) setChoroplethColoursAndTitles(geoJSON.Features, request) converter := pngConverter if !request.IncludeFallbackPng { converter = nil } missingDataPattern := strings.Replace(fmt.Sprintf(MissingDataPattern, id), "\n", "", -1) return svgRequest.svg.DrawWithProjection(vbWidth, vbHeight, g2s.MercatorProjection, g2s.UseProperties([]string{"style", "class"}), g2s.WithTitles(request.Geography.NameProperty), g2s.WithAttribute("id", mapID(request)+"-svg"), g2s.WithAttribute("viewBox", fmt.Sprintf("0 0 %.f %.f", vbWidth, vbHeight)), g2s.WithPNGFallback(converter), g2s.WithPattern(missingDataPattern), g2s.WithResponsiveSize(svgRequest.responsiveSize), ) } // getGeoJSON performs a sanity check for missing properties, then converts the topojson to geojson func getGeoJSON(request *models.RenderRequest) *geojson.FeatureCollection { // sanity check if request.Geography == nil || request.Geography.Topojson == nil || len(request.Geography.Topojson.Arcs) == 0 || len(request.Geography.Topojson.Objects) == 0 { return nil } return request.Geography.Topojson.ToGeoJSON() } // getViewBoxDimensions assigns the viewbox a fixed width (400) and calculates the height relative to this, // returning (width, height) func getViewBoxDimensions(svg *g2s.SVG, request *models.RenderRequest) (float64, float64) { width := request.DefaultWidth if width <= 0.0 { // average the min and max width width = (request.MinWidth + request.MaxWidth) / 2 } if width <= 0.0 { // use a default width of 400 width = 400.0 } height := svg.GetHeightForWidth(width, g2s.MercatorProjection) return width, height } // setFeatureIDs looks in each Feature for a property with the given idProperty, using it as the feature id. func setFeatureIDs(features []*geojson.Feature, idProperty string, prefix string) { for _, feature := range features { id, isString := feature.Properties[idProperty].(string) if isString && len(id) > 0 { feature.ID = prefix + id } else { id, isString := feature.ID.(string) if isString && len(id) > 0 { feature.ID = prefix + id } } } } // setClassProperty populates a class property in each feature with the given class name, appending any existing class property. func setClassProperty(features []*geojson.Feature, className string) { for _, feature := range features { appendProperty(feature, "class", className) } } // appendProperty sets a property by the given name, appending any existing value // (appending existing value rather than the new value so that, in the case of style, we can ensure there's a semi-colon between values) func appendProperty(feature *geojson.Feature, propertyName string, value string) { s := value if original, exists := feature.Properties[propertyName]; exists { s = fmt.Sprintf("%s %v", value, original) } feature.Properties[propertyName] = s } // setChoroplethColoursAndTitles creates a mapping from the id of a data row to its value and colour, // then iterates through the features assigning a title and style for the colour. func setChoroplethColoursAndTitles(features []*geojson.Feature, request *models.RenderRequest) { choropleth := request.Choropleth if choropleth == nil || request.Data == nil { return } id := idPrefix(request) dataMap := mapDataToColour(request.Data, choropleth, id+ "-") missingValueStyle := "fill: url(#" + id + "-nodata);" for _, feature := range features { style := missingValueStyle title, ok := feature.Properties[request.Geography.NameProperty] if !ok { title = "" } if vc, exists := dataMap[feature.ID]; exists { style = "fill: " + vc.colour + ";" title = fmt.Sprintf("%v %s%g%s", title, choropleth.ValuePrefix, vc.value, choropleth.ValueSuffix) } else { title = fmt.Sprintf("%v %s", title, MissingDataText) } feature.Properties[request.Geography.NameProperty] = title appendProperty(feature, "style", style) } } // mapDataToColour creates a map of DataRow.ID=valueAndColour func mapDataToColour(data []*models.DataRow, choropleth *models.Choropleth, prefix string) map[interface{}]valueAndColour { breaks := sortBreaks(choropleth.Breaks, false) dataMap := make(map[interface{}]valueAndColour) for _, row := range data { dataMap[prefix+row.ID] = valueAndColour{value: row.Value, colour: getColour(row.Value, breaks)} } return dataMap } // getColour returns the colour for the given value. If the value is below the lowest lowerbound, returns the colour for the lowest. func getColour(value float64, breaks []*models.ChoroplethBreak) string { for _, b := range breaks { if value >= b.LowerBound { return b.Colour } } return breaks[len(breaks)-1].Colour } // sortBreaks returns a copy of the breaks slice, sorted ascending or descending according to asc. func sortBreaks(breaks []*models.ChoroplethBreak, asc bool) []*models.ChoroplethBreak { c := make([]*models.ChoroplethBreak, len(breaks)) copy(c, breaks) sort.Slice(c, func(i, j int) bool { if asc { return c[i].LowerBound < c[j].LowerBound } return c[i].LowerBound > c[j].LowerBound }) return c } // RenderHorizontalKey creates an SVG containing a horizontally-oriented key for the choropleth func RenderHorizontalKey(svgRequest *SVGRequest) string { geoJSON := svgRequest.geoJSON if geoJSON == nil { return "" } request := svgRequest.request keyInfo := getHorizontalKeyInfo(svgRequest.ViewBoxWidth, svgRequest) id := idPrefix(request) missingId := id + "-horizontal" content := bytes.NewBufferString("") ticks := bytes.NewBufferString("") fmt.Fprintf(content, "<defs>") fmt.Fprintf(content, MissingDataPattern, missingId) fmt.Fprintf(content, "</defs>") keyClass := getKeyClass(request, "horizontal") vbHeight := 90.0 svgAttributes := fmt.Sprintf(`id="%s-legend-horizontal-svg" class="%s" viewBox="0 0 %.f %.f"`, id, keyClass, svgRequest.ViewBoxWidth, vbHeight) if !svgRequest.responsiveSize { svgAttributes += fmt.Sprintf(` width="%.f" height="%.f"`, svgRequest.ViewBoxWidth, vbHeight) } fmt.Fprintf(content, `<g id="%s-legend-horizontal-container">`, id) writeHorizontalKeyTitle(request, svgRequest.ViewBoxWidth, content) fmt.Fprintf(content, `<g id="%s-legend-horizontal-key" transform="translate(%f, 20)">`, id, keyInfo.keyX) left := 0.0 breaks := svgRequest.breaks for i := 0; i < len(breaks); i++ { width := breaks[i].RelativeSize * keyInfo.keyWidth fmt.Fprintf(content, `<rect class="keyColour" height="8" width="%f" x="%f" style="stroke-width: 0.5; stroke: black; fill: %s;">`, width, left, breaks[i].Colour) content.WriteString(`</rect>`) writeHorizontalKeyTick(ticks, left, breaks[i].LowerBound) left += width } writeHorizontalKeyTick(ticks, left, breaks[len(breaks)-1].UpperBound) if len(request.Choropleth.ReferenceValueText) > 0 { writeHorizontalKeyRefTick(ticks, keyInfo, svgRequest) } fmt.Fprint(content, ticks.String()) writeKeyMissingPattern(content, missingId, 0.0, 55.0, request.FontSize) content.WriteString(`</g></g>`) if pngConverter == nil || request.IncludeFallbackPng == false { return fmt.Sprintf("<svg %s>%s</svg>", svgAttributes, content) } return pngConverter.IncludeFallbackImage(svgAttributes, content.String(), svgRequest.ViewBoxWidth, vbHeight) } // RenderVerticalKey creates an SVG containing a vertically-oriented key for the choropleth func RenderVerticalKey(svgRequest *SVGRequest) string { geoJSON := svgRequest.geoJSON if geoJSON == nil { return "" } request := svgRequest.request svgHeight := svgRequest.ViewBoxHeight breaks := svgRequest.breaks keyHeight := svgHeight * 0.8 keyWidth, offset := svgRequest.VerticalLegendWidth, svgRequest.verticalKeyOffset id := idPrefix(request) content := bytes.NewBufferString("") ticks := bytes.NewBufferString("") missingId := id + "-vertical" fmt.Fprintf(content, "<defs>") fmt.Fprintf(content, MissingDataPattern, missingId) fmt.Fprintf(content, "</defs>") keyClass := getKeyClass(request, "vertical") attributes := fmt.Sprintf(`id="%s-legend-vertical-svg" class="%s" viewBox="0 0 %.f %.f"`, id, keyClass, keyWidth, svgHeight) if !svgRequest.responsiveSize { attributes += fmt.Sprintf(` width="%.f" height="%.f"`, keyWidth, svgHeight) } fmt.Fprintf(content, `<g id="%s-legend-vertical-container">`, id) writeVerticalLegendTitle(content, keyWidth, svgHeight, request) fmt.Fprintf(content, `<g id="%s-legend-vertical-key" transform="translate(%f, %f)">`, id, (keyWidth+offset)/2, svgHeight*0.1) position := 0.0 for i := 0; i < len(breaks); i++ { height := breaks[i].RelativeSize * keyHeight adjustedPosition := keyHeight - position fmt.Fprintf(content, `<rect class="keyColour" height="%f" width="8" y="%f" style="stroke-width: 0.5; stroke: black; fill: %s;">`, height, adjustedPosition-height, breaks[i].Colour) content.WriteString(`</rect>`) writeVerticalKeyTick(ticks, adjustedPosition, breaks[i].LowerBound) position += height } writeVerticalKeyTick(ticks, keyHeight-position, breaks[len(breaks)-1].UpperBound) if len(request.Choropleth.ReferenceValueText) > 0 { writeVerticalKeyRefTick(ticks, keyHeight-(keyHeight*svgRequest.referencePos), request) } fmt.Fprint(content, ticks.String()) content.WriteString(`</g>`) xPos := (keyWidth - float64(htmlutil.GetApproximateTextWidth(MissingDataText, request.FontSize)+12)) / 2 writeKeyMissingPattern(content, missingId, xPos, svgHeight*0.95, request.FontSize) content.WriteString(`</g>`) if pngConverter == nil || request.IncludeFallbackPng == false { return fmt.Sprintf("<svg %s>%s</svg>", attributes, content) } return pngConverter.IncludeFallbackImage(attributes, content.String(), keyWidth, svgHeight) } func writeVerticalLegendTitle(content *bytes.Buffer, keyWidth float64, svgHeight float64, request *models.RenderRequest) (int, error) { text := request.Choropleth.ValuePrefix + " " + request.Choropleth.ValueSuffix textLen := htmlutil.GetApproximateTextWidth(text, request.FontSize) return fmt.Fprintf(content, `<text x="%f" y="%f" dy=".5em" style="text-anchor: middle;" class="keyText" textLength="%.f" lengthAdjust="spacingAndGlyphs">%s</text>`, keyWidth/2, svgHeight*0.05, textLen, text) } // getKeyClass returns the class of the map key - with an additional class if both keys are rendered. func getKeyClass(request *models.RenderRequest, keyType string) string { keyClass := "map_key_" + keyType if hasVerticalLegend(request) && hasHorizontalLegend(request) { keyClass = keyClass + " " + keyClass + "_both" } return keyClass } // hasVerticalLegend returns true if the request includes a vertical legend func hasVerticalLegend(request *models.RenderRequest) bool { return request.Choropleth != nil && (request.Choropleth.VerticalLegendPosition == models.LegendPositionBefore || request.Choropleth.VerticalLegendPosition == models.LegendPositionAfter) } // hasHorizontalLegend returns true if the request includes a horizontal legend func hasHorizontalLegend(request *models.RenderRequest) bool { return request.Choropleth != nil && (request.Choropleth.HorizontalLegendPosition == models.LegendPositionBefore || request.Choropleth.HorizontalLegendPosition == models.LegendPositionAfter) } // getVerticalLegendWidth determines the approximate width required for the legend // it also returns an offset for the position of the key. I.e. the middle of the key should be positioned in the middle of the legend, plus the offset. func getVerticalLegendWidth(request *models.RenderRequest, breaks []*breakInfo) (float64, float64) { missingWidth := htmlutil.GetApproximateTextWidth(MissingDataText, request.FontSize) + 12 titleWidth := htmlutil.GetApproximateTextWidth(request.Choropleth.ValuePrefix+" "+request.Choropleth.ValueSuffix, request.FontSize) maxWidth := math.Max(float64(missingWidth), float64(titleWidth)) keyWidth, offset := getVerticalTickTextWidth(request, breaks) return math.Max(maxWidth, keyWidth) + 10, offset } // getVerticalTickTextWidth calculates the approximate total width of the ticks on both sides of the key, allowing 38 pixels for the colour bar // it also returns an offset for the position of the key. I.e. the middle of the key should be positioned in the middle of the legend, plus the offset. func getVerticalTickTextWidth(request *models.RenderRequest, breaks []*breakInfo) (float64, float64) { maxTick := 0.0 for _, b := range breaks { lbound := htmlutil.GetApproximateTextWidth(fmt.Sprintf("%g", b.LowerBound), request.FontSize) if lbound > maxTick { maxTick = lbound } ubound := htmlutil.GetApproximateTextWidth(fmt.Sprintf("%g", b.UpperBound), request.FontSize) if ubound > maxTick { maxTick = ubound } } refTick := htmlutil.GetApproximateTextWidth(request.Choropleth.ReferenceValueText, request.FontSize) refValue := htmlutil.GetApproximateTextWidth(fmt.Sprintf("%g", request.Choropleth.ReferenceValue), request.FontSize) refWidth := math.Max(refTick, refValue) return maxTick + refWidth + 38.0, maxTick - refWidth } // writeHorizontalKeyTitle write the title above the key for a horizontal legend, ensuring that the text fits within the svg func writeHorizontalKeyTitle(request *models.RenderRequest, svgWidth float64, content *bytes.Buffer) { textAdjust := "" titleText := request.Choropleth.ValuePrefix + " " + request.Choropleth.ValueSuffix titleTextLen := htmlutil.GetApproximateTextWidth(titleText, request.FontSize) if titleTextLen >= svgWidth { textAdjust = fmt.Sprintf(` textLength="%.f" lengthAdjust="spacingAndGlyphs"`, svgWidth-2) } fmt.Fprintf(content, `<text x="%f" y="6" dy=".5em" style="text-anchor: middle;" class="keyText"%s>%s</text>`, svgWidth/2.0, textAdjust, titleText) } // writeHorizontalKeyTick draws a vertical line (the tick) at the given position, labelling it with the given value func writeHorizontalKeyTick(w *bytes.Buffer, xPos float64, value float64) { fmt.Fprintf(w, `<g class="map__tick" transform="translate(%f, 0)">`, xPos) w.WriteString(`<line x2="0" y2="15" style="stroke-width: 1; stroke: Black;"></line>`) fmt.Fprintf(w, `<text x="0" y="18" dy=".74em" style="text-anchor: middle;" class="keyText">%g</text>`, value) w.WriteString(`</g>`) } // writeVerticalKeyTick draws a horizontal line (the tick) at the given position, labelling it with the given value func writeVerticalKeyTick(w *bytes.Buffer, yPos float64, value float64) { fmt.Fprintf(w, `<g class="map__tick" transform="translate(0, %f)">`, yPos) w.WriteString(`<line x1="8" x2="-15" style="stroke-width: 1; stroke: Black;"></line>`) fmt.Fprintf(w, `<text x="-18" y="0" dy="0.32em" style="text-anchor: end;" class="keyText">%g</text>`, value) w.WriteString(`</g>`) } // writeHorizontalKeyRefTick draws a vertical line at the correct position for the reference value, labelling it with the reference value and reference text. func writeHorizontalKeyRefTick(w *bytes.Buffer, keyInfo *horizontalKeyInfo, svgRequest *SVGRequest) { xPos := keyInfo.keyWidth * svgRequest.referencePos svgWidth := svgRequest.ViewBoxWidth fmt.Fprintf(w, `<g class="map__tick" transform="translate(%f, 0)">`, xPos) w.WriteString(`<line x2="0" y1="8" y2="45" style="stroke-width: 1; stroke: DimGrey;"></line>`) textAttr := "" if keyInfo.referenceTextLeftLen > xPos+keyInfo.keyX { // adjust the text length so it will fit textAttr = fmt.Sprintf(` textLength="%.f" lengthAdjust="spacingAndGlyphs"`, xPos+keyInfo.keyX-1) } fmt.Fprintf(w, `<text x="0" y="33" dx="-0.1em" dy=".74em" style="text-anchor: end; fill: DimGrey;" class="keyText"%s>%s</text>`, textAttr, keyInfo.referenceTextLeft) textAttr = "" if keyInfo.referenceTextRightLen > svgWidth-(xPos+keyInfo.keyX) { // adjust the text length so it will fit textAttr = fmt.Sprintf(` textLength="%.f" lengthAdjust="spacingAndGlyphs"`, svgWidth-(xPos+keyInfo.keyX)-2) } fmt.Fprintf(w, `<text x="0" y="33" dx="0.1em" dy=".74em" style="text-anchor: start; fill: DimGrey;" class="keyText"%s>%s</text>`, textAttr, keyInfo.referenceTextRight) fmt.Fprintf(w, `</g>`) } // writeVerticalKeyRefTick draws a horizontal line at the correct position for the reference value, labelling it with the reference value and reference text. func writeVerticalKeyRefTick(w *bytes.Buffer, yPos float64, request *models.RenderRequest) { text, value := request.Choropleth.ReferenceValueText, request.Choropleth.ReferenceValue textLen := htmlutil.GetApproximateTextWidth(text, request.FontSize) fmt.Fprintf(w, `<g class="map__tick" transform="translate(0, %f)">`, yPos) w.WriteString(`<line x2="45" x1="8" style="stroke-width: 1; stroke: DimGrey;"></line>`) fmt.Fprintf(w, `<text x="18" dy="-.32em" style="text-anchor: start; fill: DimGrey;" class="keyText" textLength="%.f" lengthAdjust="spacingAndGlyphs">%s</text>`, textLen, text) fmt.Fprintf(w, `<text x="18" dy="1em" style="text-anchor: start; fill: DimGrey;" class="keyText">%g</text>`, value) w.WriteString(`</g>`) } // writeKeyMissingPattern draws a square filled with the missing pattern at the given position, labelling it with MissingDataText func writeKeyMissingPattern(w *bytes.Buffer, id string, xPos float64, yPos float64, fontSize int) { fmt.Fprintf(w, `<g class="missingPattern" transform="translate(%f, %f)">`, xPos, yPos) fmt.Fprintf(w, `<rect class="keyColour" height="8" width="8" style="stroke-width: 0.8; stroke: black; fill: url(#%s-nodata);"></rect>`, id) fmt.Fprintf(w, `<text x="12" dy=".55em" style="text-anchor: start; fill: DimGrey;" class="keyText" textLength="%.f" lengthAdjust="spacingAndGlyphs">%s</text>`, htmlutil.GetApproximateTextWidth(MissingDataText, fontSize), MissingDataText) w.WriteString(`</g>`) } // breakInfo contains information about the breaks (the boundaries between colours)- lowerBound, upperBound and relative size type breakInfo struct { LowerBound float64 UpperBound float64 RelativeSize float64 Colour string } // getSortedBreakInfo returns information about the breaks - lowerBound, upperBound and relative size // where the lowerBound of the first break is the lowest of the LowerBound and the lowest value in data // and the upperBound of the last break is the maximum value in the data // also returns the relative position of the reference value func getSortedBreakInfo(request *models.RenderRequest) ([]*breakInfo, float64) { data := make([]*models.DataRow, len(request.Data)) copy(data, request.Data) sort.Slice(data, func(i, j int) bool { return data[i].Value < data[j].Value }) breaks := sortBreaks(request.Choropleth.Breaks, true) minValue := math.Min(data[0].Value, breaks[0].LowerBound) maxValue := request.Choropleth.UpperBound if maxValue < breaks[len(breaks)-1].LowerBound { maxValue = data[len(data)-1].Value } totalRange := maxValue - minValue breakCount := len(breaks) info := make([]*breakInfo, breakCount) for i := 0; i < breakCount-1; i++ { info[i] = &breakInfo{LowerBound: breaks[i].LowerBound, UpperBound: breaks[i+1].LowerBound, Colour: breaks[i].Colour} } info[0].LowerBound = minValue info[breakCount-1] = &breakInfo{LowerBound: breaks[breakCount-1].LowerBound, UpperBound: maxValue, Colour: breaks[breakCount-1].Colour} for _, b := range info { b.RelativeSize = (b.UpperBound - b.LowerBound) / totalRange } referencePos := (request.Choropleth.ReferenceValue - minValue) / totalRange return info, referencePos } // horizontalKeyInfo contains break info, the width of the key, the x position of the key, and reference tick values type horizontalKeyInfo struct { referenceTextLeft string referenceTextLeftLen float64 referenceTextRight string referenceTextRightLen float64 keyWidth float64 keyX float64 } // getHorizontalKeyInfo returns the width of the key, the x position of the key, the breaks within the key, and reference tick values // (making sure that the longer of the reference value and text is given the most space) func getHorizontalKeyInfo(svgWidth float64, svgRequest *SVGRequest) *horizontalKeyInfo { request := svgRequest.request refInfo := getHorizontalRefTextInfo(request) info := horizontalKeyInfo{} // assume a default width of 90% of svg info.keyWidth = svgWidth * 0.9 info.keyX = (svgWidth - info.keyWidth) / 2 // half of the upper and lower bound text will sit outside the key breaks := svgRequest.breaks left := htmlutil.GetApproximateTextWidth(fmt.Sprintf("%g", breaks[0].LowerBound), request.FontSize) / 2 right := htmlutil.GetApproximateTextWidth(fmt.Sprintf("%g", breaks[len(breaks)-1].UpperBound), request.FontSize) / 2 // the longer bit of reference text should sit on the side of the tick with the most space info.referenceTextLeft = refInfo.referenceTextLong info.referenceTextLeftLen = refInfo.referenceTextLongLen info.referenceTextRight = refInfo.referenceTextShort info.referenceTextRightLen = refInfo.referenceTextShortLen if svgRequest.referencePos < 0.5 { // the reference tick is less than halfway - switch the text info.referenceTextRight = refInfo.referenceTextLong info.referenceTextRightLen = refInfo.referenceTextLongLen info.referenceTextLeft = refInfo.referenceTextShort info.referenceTextLeftLen = refInfo.referenceTextShortLen } // now see if reference text is long enough to go beyond the bounds of the key refPos := info.keyWidth * svgRequest.referencePos // the actual pixel position of the reference tick within the key if refPos-info.referenceTextLeftLen < 0.0-left { left = math.Abs(refPos - info.referenceTextLeftLen) } if (refPos+info.referenceTextRightLen)-info.keyWidth > right { right = (refPos + info.referenceTextRightLen) - info.keyWidth } // if any text goes beyond the bounds of the svg, shorten the key if info.keyWidth+left+right > svgWidth { info.keyWidth = svgWidth - (left + right) info.keyX = left } return &info } // horizontalRefTextInfo contains the reference value and label with information about their length type horizontalRefTextInfo struct { referenceTextShort string referenceTextShortLen float64 referenceTextLong string referenceTextLongLen float64 } // getHorizontalRefTextInfo calculates the approximate width of the reference value and text, dividing them into short and long values. func getHorizontalRefTextInfo(request *models.RenderRequest) *horizontalRefTextInfo { info := horizontalRefTextInfo{} refTextLen := htmlutil.GetApproximateTextWidth(request.Choropleth.ReferenceValueText, request.FontSize) refValue := fmt.Sprintf("%g", request.Choropleth.ReferenceValue) refValueLen := htmlutil.GetApproximateTextWidth(refValue, request.FontSize) if refTextLen > refValueLen { info.referenceTextLong = request.Choropleth.ReferenceValueText info.referenceTextLongLen = refTextLen info.referenceTextShort = refValue info.referenceTextShortLen = refValueLen } else { info.referenceTextLong = refValue info.referenceTextLongLen = refValueLen info.referenceTextShort = request.Choropleth.ReferenceValueText info.referenceTextShortLen = refTextLen } return &info }
package cli import ( "fmt" "log" "math" "math/rand" "strings" "test/broker" "test/proto" "time" ) type CliService struct { name string activeWorkers int stop chan chan struct{} broker broker.Broker producer <-chan broker.Message } func NewCliService() *CliService { return &CliService{ randomCliName(), 0, make(chan chan struct{}), nil, nil, } } func (cs *CliService) GetName() string { return cs.name } func (cs *CliService) Start(b broker.Broker, producer <-chan broker.Message) { cs.broker = b cs.producer = producer cs.activeWorkers = 2 go cs.pinger() go cs.stdinHandler() } func (cs *CliService) pinger() { t := time.NewTicker(broker.ServicePingInterval) for { select { case <-t.C: if cs.broker != nil { _, err := cs.broker.Send(broker.Message{MessageType: broker.TypePing, From: cs.GetName()}) if err != nil { log.Println(err) } } case ch := <-cs.stop: close(ch) return } } } func (cs *CliService) getNearestTo(x, y int) { unresponded := 0 chans := make(map[string]chan broker.Message) for _, name := range cs.broker.GetServices() { if strings.HasPrefix(name, "geo-") { ch, err := cs.broker.Send(broker.Message{ MessageType: broker.TypeDirectMessage, From: cs.GetName(), To: name, Payload: proto.GetDistanceRequest{PointX: x, PointY: y}, ID: broker.GenerateMessageID(), }) if err != nil { log.Println(err) } chans[name] = ch unresponded++ } } nearestName := "" nearestDist := math.MaxFloat64 for unresponded > 0 { for k, v := range chans { select { case resp := <-v: unresponded-- protoResp, ok := resp.Payload.(proto.GetDistanceResponse) if ok && protoResp.Distance != nil { if nearestDist > *protoResp.Distance { nearestDist = *protoResp.Distance nearestName = k } } default: continue } } } if nearestName != "" { log.Printf("Nearest service is %s, distance is %v", nearestName, nearestDist) } else { log.Printf("No geo services found") } } func (cs *CliService) stdinHandler() { for { var smb string fmt.Scanln(&smb) if smb == "1" { var x, y int fmt.Scan(&x, &y) cs.getNearestTo(x, y) } else if smb == "2" { err := cs.broker.Broadcast(broker.Message{ MessageType: broker.TypeBroadcastMessage, From: cs.GetName(), Payload: proto.UpdateLocationRequest{}, }) if err != nil { log.Println(err) } } else { log.Println("got wrong command") } select { case ch := <-cs.stop: close(ch) return default: continue } } } func (cs *CliService) Stop() { for i := 0; i < cs.activeWorkers; i++ { ch := make(chan struct{}) cs.stop <- ch <-ch } } func randomCliName() string { return fmt.Sprintf("cli-%d", rand.Intn(900)) }
package goroutine import ( "fmt" "log" "math" "sync" "time" ) func simple() { wg := &sync.WaitGroup{} for i := 0; i < math.MaxInt32; i++ { wg.Add(1) go func(i int) { defer wg.Done() fmt.Println(i) time.Sleep(time.Second) }(i) } wg.Wait() } func withChan() { wg := &sync.WaitGroup{} ch := make(chan struct{}, 3) for i := 0; i < 10; i++ { ch <- struct{}{} wg.Add(1) go func(i int) { defer wg.Done() log.Println(i) time.Sleep(time.Second) <-ch }(i) } wg.Wait() }
package main import ( "flag" "fmt" "io/ioutil" "log" "os/exec" "sync" ) func main() { input_directory_flag := flag.String("inputdir", "./", "Input directory") workers_flag := flag.Int("workers", 6, "Maximum amount of concurrent goroutines") flag.Parse() workers := *workers_flag input_directory := *input_directory_flag ch := make(chan string) wg := sync.WaitGroup{} for i := 0; i < workers; i++ { wg.Add(1) go worker(ch, &wg) } files, err := ioutil.ReadDir(input_directory) if err != nil { log.Fatal(err) } for index, f := range files { fmt.Printf("progress: %d/%d\n", index+1, len(files)) ch <- "echo " + f.Name() // just a simple example } close(ch) wg.Wait() } func worker(ch chan string, wg *sync.WaitGroup) { for command := range ch { cmd := exec.Command("/bin/bash", "-c", command) output, err := cmd.CombinedOutput() if err != nil { fmt.Println(fmt.Sprint(err) + ": " + string(output)) } fmt.Println(string(output)) } wg.Done() }
package pruss const ( PAGE_SIZE = 4096 PRUSS_MAX_IRAM_SIZE = 8192 PRUSS_IRAM_SIZE = 8192 PRUSS_DATARAM_SIZE = 512 PRUSS_MMAP_SIZE = 0x40000 DATARAM0_PHYS_BASE = 0x4a300000 DATARAM1_PHYS_BASE = 0x4a302000 INTC_PHYS_BASE = 0x4a320000 PRU0CONTROL_PHYS_BASE = 0x4a322000 PRU0DEBUG_PHYS_BASE = 0x4a322400 PRU1CONTROL_PHYS_BASE = 0x4a324000 PRU1DEBUG_PHYS_BASE = 0x4a324400 PRU0IRAM_PHYS_BASE = 0x4a334000 PRU1IRAM_PHYS_BASE = 0x4a338000 PRUSS_SHAREDRAM_BASE = 0x4a310000 PRUSS_CFG_BASE = 0x4a326000 PRUSS_UART_BASE = 0x4a328000 PRUSS_IEP_BASE = 0x4a32e000 PRUSS_ECAP_BASE = 0x4a330000 PRUSS_MIIRT_BASE = 0x4a332000 PRUSS_MDIO_BASE = 0x4a332400 //NOTE: Above defines are SOC specific PRU_INTC_REVID_REG = 0x000 PRU_INTC_CR_REG = 0x004 PRU_INTC_HCR_REG = 0x00C PRU_INTC_GER_REG = 0x010 PRU_INTC_GNLR_REG = 0x01C PRU_INTC_SISR_REG = 0x020 PRU_INTC_SICR_REG = 0x024 PRU_INTC_EISR_REG = 0x028 PRU_INTC_EICR_REG = 0x02C PRU_INTC_HIEISR_REG = 0x034 PRU_INTC_HIDISR_REG = 0x038 PRU_INTC_GPIR_REG = 0x080 PRU_INTC_SRSR1_REG = 0x200 PRU_INTC_SRSR2_REG = 0x204 PRU_INTC_SECR1_REG = 0x280 PRU_INTC_SECR2_REG = 0x284 PRU_INTC_ESR1_REG = 0x300 PRU_INTC_ESR2_REG = 0x304 PRU_INTC_ECR1_REG = 0x380 PRU_INTC_ECR2_REG = 0x384 PRU_INTC_CMR1_REG = 0x400 PRU_INTC_CMR2_REG = 0x404 PRU_INTC_CMR3_REG = 0x408 PRU_INTC_CMR4_REG = 0x40C PRU_INTC_CMR5_REG = 0x410 PRU_INTC_CMR6_REG = 0x414 PRU_INTC_CMR7_REG = 0x418 PRU_INTC_CMR8_REG = 0x41C PRU_INTC_CMR9_REG = 0x420 PRU_INTC_CMR10_REG = 0x424 PRU_INTC_CMR11_REG = 0x428 PRU_INTC_CMR12_REG = 0x42C PRU_INTC_CMR13_REG = 0x430 PRU_INTC_CMR14_REG = 0x434 PRU_INTC_CMR15_REG = 0x438 PRU_INTC_CMR16_REG = 0x43C PRU_INTC_HMR1_REG = 0x800 PRU_INTC_HMR2_REG = 0x804 PRU_INTC_HMR3_REG = 0x808 PRU_INTC_SIPR1_REG = 0xD00 PRU_INTC_SIPR2_REG = 0xD04 PRU_INTC_SITR1_REG = 0xD80 PRU_INTC_SITR2_REG = 0xD84 PRU_INTC_HIER_REG = 0x1500 MAX_HOSTS_SUPPORTED = 10 //UIO driver expects user space to map PRUSS_UIO_MAP_OFFSET_XXX to //access corresponding memory regions - region offset is N*PAGE_SIZE PRUSS_UIO_MAP_OFFSET_PRUSS = 0 * PAGE_SIZE PRUSS_UIO_DRV_PRUSS_BASE = "/sys/class/uio/uio0/maps/map0/addr" PRUSS_UIO_DRV_PRUSS_SIZE = "/sys/class/uio/uio0/maps/map0/size" PRUSS_UIO_MAP_OFFSET_EXTRAM = 1 * PAGE_SIZE PRUSS_UIO_DRV_EXTRAM_BASE = "/sys/class/uio/uio0/maps/map1/addr" PRUSS_UIO_DRV_EXTRAM_SIZE = "/sys/class/uio/uio0/maps/map1/size" // Unused. Also if we're ging to use this, the path for the EXTRAM needs to change PRUSS_UIO_MAP_OFFSET_L3RAM = 1 * PAGE_SIZE PRUSS_UIO_DRV_L3RAM_BASE = "/sys/class/uio/uio0/maps/map1/addr" PRUSS_UIO_DRV_L3RAM_SIZE = "/sys/class/uio/uio0/maps/map1/size" )
package client import ( "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/go-ocf/go-coap" kitNetCoap "github.com/go-ocf/kit/net/coap" ) func ContentTypeToMediaType(contentType string) (coap.MediaType, error) { switch contentType { case coap.TextPlain.String(): return coap.TextPlain, nil case coap.AppCBOR.String(): return coap.AppCBOR, nil case coap.AppOcfCbor.String(): return coap.AppOcfCbor, nil case coap.AppJSON.String(): return coap.AppJSON, nil default: return coap.TextPlain, fmt.Errorf("unknown content type '%v'", contentType) } } func DecodeContentWithCodec(codec kitNetCoap.Codec, contentType string, data []byte, response interface{}) error { if response == nil { return nil } if val, ok := response.(*[]byte); ok && len(data) == 0 { *val = data return nil } if val, ok := response.(*interface{}); ok && len(data) == 0 { *val = nil return nil } mediaType, err := ContentTypeToMediaType(contentType) if err != nil { return status.Errorf(codes.InvalidArgument, "cannot convert response contentype %v to mediatype: %v", contentType, err) } msg := coap.NewTcpMessage(coap.MessageParams{ Payload: data, }) msg.SetOption(coap.ContentFormat, mediaType) err = codec.Decode(msg, response) if err != nil { return status.Errorf(codes.InvalidArgument, "cannot decode response: %v", err) } return err }
package bbs import ( "github.com/cloudfoundry/storeadapter" "github.com/onsi-experimental/runtime-schema/models" "path" "time" ) const ClaimTTL uint64 = 10 const RunOnceSchemaRoot = "/v1/run_once" type executorBBS struct { store storeadapter.StoreAdapter } type stagerBBS struct { store storeadapter.StoreAdapter } func runOnceSchemaPath(segments ...string) string { return path.Join(append([]string{RunOnceSchemaRoot}, segments...)...) } func retryIndefinitelyOnStoreTimeout(callback func() error) error { for { err := callback() if err == storeadapter.ErrorTimeout { time.Sleep(time.Second) continue } return err } } func watchForRunOnceModificationsOnState(store storeadapter.StoreAdapter, state string) (<-chan models.RunOnce, chan<- bool, <-chan error) { stopOuter := make(chan bool) runOnces := make(chan models.RunOnce) events, stopInner, errs := store.Watch(runOnceSchemaPath(state)) go func() { for { select { case <-stopOuter: stopInner <- true close(runOnces) return case event := <-events: switch event.Type { case storeadapter.CreateEvent, storeadapter.UpdateEvent: runOnce, err := models.NewRunOnceFromJSON(event.Node.Value) if err != nil { continue } runOnces <- runOnce } } } }() return runOnces, stopOuter, errs } func getAllRunOnces(store storeadapter.StoreAdapter, state string) ([]models.RunOnce, error) { node, err := store.ListRecursively(runOnceSchemaPath(state)) if err == storeadapter.ErrorKeyNotFound { return []models.RunOnce{}, nil } if err != nil { return []models.RunOnce{}, err } runOnces := []models.RunOnce{} for _, node := range node.ChildNodes { runOnce, _ := models.NewRunOnceFromJSON(node.Value) runOnces = append(runOnces, runOnce) } return runOnces, nil } func (self *BBS) GetAllClaimedRunOnces() ([]models.RunOnce, error) { return getAllRunOnces(self.store, "claimed") } func (self *BBS) GetAllStartingRunOnces() ([]models.RunOnce, error) { return getAllRunOnces(self.store, "running") } func (self *stagerBBS) WatchForCompletedRunOnce() (<-chan models.RunOnce, chan<- bool, <-chan error) { return watchForRunOnceModificationsOnState(self.store, "completed") } // The stager calls this when it wants to desire a payload // stagerBBS will retry this repeatedly if it gets a StoreTimeout error (up to N seconds?) // If this fails, the stager should bail and run its "this-failed-to-stage" routine func (self *stagerBBS) DesireRunOnce(runOnce models.RunOnce) error { return retryIndefinitelyOnStoreTimeout(func() error { return self.store.SetMulti([]storeadapter.StoreNode{ { Key: runOnceSchemaPath("pending", runOnce.Guid), Value: runOnce.ToJSON(), }, }) }) } // The stager calls this when it wants to signal that it has received a completion and is handling it // stagerBBS will retry this repeatedly if it gets a StoreTimeout error (up to N seconds?) // If this fails, the stager should assume that someone else is handling the completion and should bail func (self *stagerBBS) ResolveRunOnce(runOnce models.RunOnce) error { return retryIndefinitelyOnStoreTimeout(func() error { return self.store.Delete(runOnceSchemaPath("pending", runOnce.Guid)) }) } func (self *executorBBS) WatchForDesiredRunOnce() (<-chan models.RunOnce, chan<- bool, <-chan error) { return watchForRunOnceModificationsOnState(self.store, "pending") } // The executor calls this when it wants to claim a runonce // stagerBBS will retry this repeatedly if it gets a StoreTimeout error (up to N seconds?) // If this fails, the executor should assume that someone else is handling the claim and should bail func (self *executorBBS) ClaimRunOnce(runOnce models.RunOnce) error { if runOnce.ExecutorID == "" { panic("must set ExecutorID on RunOnce model to claim (finish your tests)") } return retryIndefinitelyOnStoreTimeout(func() error { return self.store.Create(storeadapter.StoreNode{ Key: runOnceSchemaPath("claimed", runOnce.Guid), Value: runOnce.ToJSON(), TTL: ClaimTTL, }) }) } // The executor calls this when it is about to run the runonce in the claimed container // stagerBBS will retry this repeatedly if it gets a StoreTimeout error (up to N seconds?) // If this fails, the executor should assume that someone else is running and should clean up and bail func (self *executorBBS) StartRunOnce(runOnce models.RunOnce) error { if runOnce.ExecutorID == "" { panic("must set ExecutorID on RunOnce model to start (finish your tests)") } if runOnce.ContainerHandle == "" { panic("must set ContainerHandle on RunOnce model to start (finish your tests)") } return retryIndefinitelyOnStoreTimeout(func() error { return self.store.Create(storeadapter.StoreNode{ Key: runOnceSchemaPath("running", runOnce.Guid), Value: runOnce.ToJSON(), }) }) } // The executor calls this when it has finished running the runonce (be it success or failure) // stagerBBS will retry this repeatedly if it gets a StoreTimeout error (up to N seconds?) // This really really shouldn't fail. If it does, blog about it and walk away. If it failed in a // consistent way (i.e. key already exists), there's probably a flaw in our design. func (self *executorBBS) CompletedRunOnce(runOnce models.RunOnce) error { return retryIndefinitelyOnStoreTimeout(func() error { return self.store.Create(storeadapter.StoreNode{ Key: runOnceSchemaPath("completed", runOnce.Guid), Value: runOnce.ToJSON(), }) }) } // ConvergeRunOnce is run by *one* executor every X seconds (doesn't really matter what X is.. pick something performant) // Converge will: // 1. Kick (by setting) any pending for guids that only have a pending // 2. Kick (by setting) any completed for guids that have a pending // 3. Remove any claimed/running/completed for guids that have no corresponding pending func (self *executorBBS) ConvergeRunOnce() { runOnceState, err := self.store.ListRecursively(RunOnceSchemaRoot) if err != nil { return } storeNodesToSet := []storeadapter.StoreNode{} keysToDelete := []string{} pending, _ := runOnceState.Lookup("pending") claimed, _ := runOnceState.Lookup("claimed") running, _ := runOnceState.Lookup("running") completed, _ := runOnceState.Lookup("completed") for _, pendingNode := range pending.ChildNodes { guid := pendingNode.KeyComponents()[3] completedNode, isCompleted := completed.Lookup(guid) if isCompleted { storeNodesToSet = append(storeNodesToSet, completedNode) continue } _, isClaimed := claimed.Lookup(guid) _, isRunning := running.Lookup(guid) if isClaimed || isRunning { continue } storeNodesToSet = append(storeNodesToSet, pendingNode) } for _, node := range []storeadapter.StoreNode{claimed, running, completed} { for _, node := range node.ChildNodes { guid := node.KeyComponents()[2] _, isPending := pending.Lookup(guid) if !isPending { keysToDelete = append(keysToDelete, node.Key) } } } self.store.SetMulti(storeNodesToSet) self.store.Delete(keysToDelete...) }
package main import ( "log" "net/http" "os" "github.com/99designs/gqlgen/graphql" "github.com/99designs/gqlgen/graphql/handler" "github.com/99designs/gqlgen/graphql/playground" "github.com/gorilla/mux" auth "github.com/onelittlenightmusic/opa-entrypoint-authorizer" "github.com/onelittlenightmusic/opa-entrypoint-authorizer/example-graphql/graph" "github.com/onelittlenightmusic/opa-entrypoint-authorizer/example-graphql/graph/generated" ) const defaultPort = "8008" func main() { port := os.Getenv("PORT") if port == "" { port = defaultPort } myRouter := mux.NewRouter().StrictSlash(true) initMiddleware, fieldMiddleware := Init() myRouter.Use(initMiddleware) srv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{}})) // srv.AroundOperations(func(ctx context.Context, next graphql.OperationHandler) graphql.ResponseHandler { // // ops := graphql.GetOperationContext(ctx) // // jsonStr, _ := json.MarshalIndent(ops, "", " ") // // fmt.Printf("AroundOperations: %s", jsonStr) // return next(ctx) // }) srv.AroundFields(fieldMiddleware) myRouter.Handle("/", playground.Handler("GraphQL playground", "/query")) myRouter.Handle("/query", srv) log.Printf("connect to http://localhost:%s/ for GraphQL playground", port) log.Fatal(http.ListenAndServe(":"+port, myRouter)) } func Init() (func(h http.Handler)(http.Handler), graphql.FieldMiddleware){ config := auth.MiddlewareConfiguration { DataPath: "./data", BundlePath: "../../bundle-raw.tar.gz", } return auth.CreateGraphQLMiddleware(config) }
package aggregate import ( "context" "time" "github.com/XiaoMi/pegasus-go-client/idl/admin" "github.com/XiaoMi/pegasus-go-client/idl/base" "github.com/XiaoMi/pegasus-go-client/session" log "github.com/sirupsen/logrus" ) // PerfClient manages sessions to all replica nodes. type PerfClient struct { meta *session.MetaManager nodes map[string]*PerfSession } // GetPartitionStats retrieves all the partition stats from replica nodes. func (m *PerfClient) GetPartitionStats() []*PartitionStats { m.updateNodes() partitions := make(map[base.Gpid]*PartitionStats) nodes := m.GetNodeStats("@") for _, n := range nodes { for name, value := range n.Stats { perfCounter := decodePartitionPerfCounter(name, value) if perfCounter == nil { continue } if !aggregatable(perfCounter) { continue } part := partitions[perfCounter.gpid] if part == nil { part = &PartitionStats{ Gpid: perfCounter.gpid, Stats: make(map[string]float64), Addr: n.Addr, } partitions[perfCounter.gpid] = part } part.Stats[perfCounter.name] = perfCounter.value } } var ret []*PartitionStats for _, part := range partitions { extendStats(&part.Stats) ret = append(ret, part) } return ret } // NodeStat contains the stats of a replica node. type NodeStat struct { // Address of the replica node. Addr string // perfCounter's name -> the value. Stats map[string]float64 } // GetNodeStats retrieves all the stats matched with `filter` from replica nodes. func (m *PerfClient) GetNodeStats(filter string) []*NodeStat { m.updateNodes() var ret []*NodeStat for _, n := range m.nodes { stat := &NodeStat{ Addr: n.Address, Stats: make(map[string]float64), } perfCounters, err := n.GetPerfCounters(filter) if err != nil { log.Errorf("unable to query perf-counters: %s", err) return nil } for _, p := range perfCounters { stat.Stats[p.Name] = p.Value } ret = append(ret, stat) } return ret } func (m *PerfClient) listNodes() []*admin.NodeInfo { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() resp, err := m.meta.ListNodes(ctx, &admin.ListNodesRequest{ Status: admin.NodeStatus_NS_ALIVE, }) if err != nil { log.Error(err) return nil } return resp.Infos } func (m *PerfClient) listTables() []*admin.AppInfo { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() resp, err := m.meta.ListApps(ctx, &admin.ListAppsRequest{ Status: admin.AppStatus_AS_AVAILABLE, }) if err != nil { log.Error(err) return nil } return resp.Infos } func (m *PerfClient) updateNodes() { nodeInfos := m.listNodes() newNodes := make(map[string]*PerfSession) for _, n := range nodeInfos { addr := n.Address.GetAddress() node, found := m.nodes[addr] if !found { newNodes[addr] = NewPerfSession(addr) } else { newNodes[addr] = node } } for n, client := range m.nodes { // close the unused connections if _, found := newNodes[n]; !found { client.Close() } } m.nodes = newNodes } // NewPerfClient returns an instance of PerfClient. func NewPerfClient(metaAddrs []string) *PerfClient { return &PerfClient{ meta: session.NewMetaManager(metaAddrs, session.NewNodeSession), nodes: make(map[string]*PerfSession), } }
/* * @lc app=leetcode id=26 lang=golang * * [26] Remove Duplicates from Sorted Array */ func removeDuplicates(nums []int) int { var last int var dup_count int for i, n := range nums { fmt.Println(i, n) if i == 0 { last = 0 continue } if nums[last] == n { dup_count += 1 } else { nums[last+1] = n last += 1 } } return len(nums) - dup_count }
package main import ( "bufio" "fmt" "os" "regexp" "strconv" ) type PasswordPayload struct { low int high int letter rune password string } func readInput() []PasswordPayload { f, _ := os.Open("input.txt") defer f.Close() input := make([]PasswordPayload, 0) re := regexp.MustCompile(`(\d+)-(\d+) ([a-z]): ([a-z]+)`) scanner := bufio.NewScanner(f) for scanner.Scan() { text := scanner.Text() parts := re.FindStringSubmatch(text) low, _ := strconv.Atoi(parts[1]) high, _ := strconv.Atoi(parts[2]) letter := rune(parts[3][0]) password := parts[4] passwordPayload := PasswordPayload{low: low, high: high, letter: letter, password: password} input = append(input, passwordPayload) } return input } func part1IsValid(low, high int, letter rune, password string) bool { letterMap := make(map[rune]int, 0) for _, letter := range password { letterMap[letter]++ } if letterMap[letter] < low || letterMap[letter] > high { return false } return true } func doPart1() int { passwordPayloads := readInput() validPasswordCnt := 0 for _, p := range passwordPayloads { if part1IsValid(p.low, p.high, p.letter, p.password) { validPasswordCnt++ } } return validPasswordCnt } func part2IsValid(low, high int, letter rune, password string) bool { // If letter is in one index AND letter is not in both indices if (rune(password[low-1]) == letter || rune(password[high-1]) == letter) && !(rune(password[low-1]) == letter && rune(password[high-1]) == letter) { return true } return false } func doPart2() int { passwordPayloads := readInput() validPasswordCnt := 0 for _, p := range passwordPayloads { if part2IsValid(p.low, p.high, p.letter, p.password) { validPasswordCnt++ } } return validPasswordCnt } func main() { fmt.Println(doPart1()) fmt.Println(doPart2()) }
package devices import ( "encoding/json" "fmt" "os" "strings" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/foundriesio/fioctl/subcommands" ) var ( showHWInfo bool showAkToml bool ) func init() { showCmd := &cobra.Command{ Use: "show <name>", Short: "Show details of a specific device", Run: doShow, Args: cobra.ExactArgs(1), } cmd.AddCommand(showCmd) showCmd.Flags().BoolVarP(&showHWInfo, "hwinfo", "i", false, "Show HW Information") showCmd.Flags().BoolVarP(&showAkToml, "aktoml", "", false, "Show aktualizr-lite toml config") } func doShow(cmd *cobra.Command, args []string) { logrus.Debug("Showing device") device, err := api.DeviceGet(args[0]) subcommands.DieNotNil(err) fmt.Printf("UUID:\t\t%s\n", device.Uuid) fmt.Printf("Owner:\t\t%s\n", device.Owner) fmt.Printf("Factory:\t%s\n", device.Factory) if device.Group != nil { fmt.Printf("Group:\t\t%s\n", device.Group.Name) } var waveSuffix string if device.IsWave { waveSuffix = " (in wave)" } fmt.Printf("Production:\t%v%s\n", device.IsProd, waveSuffix) fmt.Printf("Up to date:\t%v\n", device.UpToDate) fmt.Printf("Target:\t\t%s / sha256(%s)\n", device.TargetName, device.OstreeHash) fmt.Printf("Ostree Hash:\t%s\n", device.OstreeHash) fmt.Printf("Created:\t%s\n", device.CreatedAt) fmt.Printf("Last Seen:\t%s\n", device.LastSeen) if len(device.Tag) > 0 { fmt.Printf("Tag:\t\t%s\n", device.Tag) } if len(device.DockerApps) > 0 { fmt.Printf("Docker Apps:\t%s\n", strings.Join(device.DockerApps, ",")) } if len(device.Status) > 0 { fmt.Printf("Status:\t\t%s\n", device.Status) } if len(device.CurrentUpdate) > 0 { fmt.Printf("Update Id:\t%s\n", device.CurrentUpdate) } if device.Network != nil { fmt.Println("Network Info:") fmt.Printf("\tHostname:\t%s\n", device.Network.Hostname) fmt.Printf("\tIP:\t\t%s\n", device.Network.Ipv4) fmt.Printf("\tMAC:\t\t%s\n", device.Network.MAC) } if device.Hardware != nil { b, err := json.MarshalIndent(device.Hardware, "\t", " ") if err != nil { fmt.Println("Unable to marshall hardware info: ", err) } if showHWInfo { fmt.Printf("Hardware Info:\n\t") os.Stdout.Write(b) fmt.Println("") } else { fmt.Printf("Hardware Info: (hidden, use --hwinfo)\n") } } if len(device.AktualizrToml) > 0 { if showAkToml { for _, line := range strings.Split(device.AktualizrToml, "\n") { fmt.Printf("\t| %s\n", line) } } else { fmt.Println("Aktualizr config: (hidden, use --aktoml)") } } if device.ActiveConfig != nil { fmt.Println("Active Config:") subcommands.PrintConfig(device.ActiveConfig, true, false, "\t") } if len(device.PublicKey) > 0 { fmt.Println() fmt.Print(device.PublicKey) } }
package common import ( "errors" "os/exec" "reflect" "time" ) type ( executor struct { cmd []string time.Duration } Executor interface { Exec() (string, error) SetOp([]string) SetTimeout(time.Duration) } ) func NewExecutor() Executor { return &executor{} } func (e *executor) SetOp(cmd []string) { e.cmd = cmd } func (e *executor) SetTimeout(t time.Duration) { e.Duration = t } func (e *executor) Exec() (string, error) { cmd := exec.Command(e.cmd[0], e.cmd[1:]...) res := make(chan interface{}, 1) go func() { out, err := cmd.Output() if err == nil { res <- string(out) } else { res <- err } }() select { case <-time.After(e.Duration): if err := cmd.Process.Kill(); err != nil { return "", err } return "", errors.New("operation timeout exceeded") case instance := <-res: inst := reflect.ValueOf(instance) if inst.Kind() == reflect.String { return instance.(string), nil } return "", instance.(error) } }
package controller import "github.com/therecipe/qt/core" var Controller *viewController type viewController struct { core.QObject _ func() `constructor:"init"` _ func(bool) `signal:"blur"` } func (c *viewController) init() { Controller = c }
package realm import ( "errors" "fmt" ) var ( ErrChamberEmpty = errors.New("chamber is nil") ) type ErrToggleNotFound struct { Key string } func (tnf *ErrToggleNotFound) Error() string { return fmt.Sprintf("%v does not exist", tnf.Key) } type ErrCouldNotConvertToggle struct { Key string Type string } func (cnc *ErrCouldNotConvertToggle) Error() string { return fmt.Sprintf("%q could not be converted: it is of type %q", cnc.Key, cnc.Type) }
package main import ( "errors" "github.com/soniah/gosnmp" "math" "strings" "time" ) //https://collectd.org/wiki/index.php/Data_source const ( GAUGE = 0 << iota //value is simply stored as-is INTEGER COUNTER32 COUNTER64 STRING HWADDR IPADDR ) /* 3.- Check minimal data is set (pending) name, BaseOID BaseOID begining with "." fieldname != null */ // Init initialize metrics func (m *SnmpMetricCfg) Init(name string) error { m.ID = name //valIDate config values if len(m.FieldName) == 0 { return errors.New("FieldName not set in metric Config " + m.ID) } if len(m.BaseOID) == 0 { return errors.New("BaseOid not set in metric Config " + m.ID) } switch m.DataSrcType { case "GAUGE": case "INTEGER": case "COUNTER32": case "COUNTER64": case "STRING": case "HWADDR": case "IPADDR": default: return errors.New("UnkNown DataSourceType:" + m.DataSrcType + " in metric Config " + m.ID) } if !strings.HasPrefix(m.BaseOID, ".") { return errors.New("Bad BaseOid format:" + m.BaseOID + " in metric Config " + m.ID) } return nil } //SnmpMetric type to metric runtime type SnmpMetric struct { cfg *SnmpMetricCfg ID string CookedValue interface{} //CookedValue float64 curValue int64 lastValue int64 CurTime time.Time lastTime time.Time Compute func() `json:"-"` setRawData func(pdu gosnmp.SnmpPDU, now time.Time) RealOID string } func (s *SnmpMetric) Init() error { s.ID = s.cfg.ID switch s.cfg.DataSrcType { case "GAUGE", "INTEGER": s.setRawData = func(pdu gosnmp.SnmpPDU, now time.Time) { val := pduVal2Int64(pdu) s.CookedValue = float64(val) s.CurTime = now s.Compute() } if s.cfg.Scale != 0.0 || s.cfg.Shift != 0.0 { s.Compute = func() { s.CookedValue = (s.cfg.Scale * float64(s.CookedValue.(float64))) + s.cfg.Shift } } else { s.Compute = func() { } } case "COUNTER32": s.setRawData = func(pdu gosnmp.SnmpPDU, now time.Time) { val := pduVal2Int64(pdu) s.lastTime = s.CurTime s.lastValue = s.curValue s.curValue = val s.CurTime = now s.Compute() } if s.cfg.GetRate == true { s.Compute = func() { duration := s.CurTime.Sub(s.lastTime) if s.curValue < s.lastValue { s.CookedValue = float64(math.MaxInt32-s.lastValue+s.curValue) / duration.Seconds() } else { s.CookedValue = float64(s.curValue-s.lastValue) / duration.Seconds() } } } else { s.Compute = func() { if s.curValue < s.lastValue { s.CookedValue = float64(math.MaxInt32 - s.lastValue + s.curValue) } else { s.CookedValue = float64(s.curValue - s.lastValue) } } } case "COUNTER64": s.setRawData = func(pdu gosnmp.SnmpPDU, now time.Time) { val := pduVal2Int64(pdu) s.lastTime = s.CurTime s.lastValue = s.curValue s.curValue = val s.CurTime = now s.Compute() } if s.cfg.GetRate == true { s.Compute = func() { duration := s.CurTime.Sub(s.lastTime) if s.curValue < s.lastValue { s.CookedValue = float64(math.MaxInt64-s.lastValue+s.curValue) / duration.Seconds() } else { s.CookedValue = float64(s.curValue-s.lastValue) / duration.Seconds() } } } else { s.Compute = func() { if s.curValue < s.lastValue { s.CookedValue = float64(math.MaxInt64 - s.lastValue + s.curValue) } else { s.CookedValue = float64(s.curValue - s.lastValue) } } } case "STRING": s.setRawData = func(pdu gosnmp.SnmpPDU, now time.Time) { s.CookedValue = pduVal2str(pdu) s.CurTime = now } case "IPADDR": s.setRawData = func(pdu gosnmp.SnmpPDU, now time.Time) { s.CookedValue, _ = pduVal2IPaddr(pdu) s.CurTime = now } case "HWADDR": s.setRawData = func(pdu gosnmp.SnmpPDU, now time.Time) { s.CookedValue, _ = pduVal2IPaddr(pdu) s.CurTime = now } } return nil }
package luxafor // Commands recognized by the Luxafor. const ( static byte = 1 fade byte = 2 strobe byte = 3 wave byte = 4 pattrn byte = 6 )
package shop type Vend interface { }
package main import ( "bufio" "context" "flag" "fmt" "github.com/google/go-github/v38/github" "golang.org/x/oauth2" "log" "os" "strconv" "strings" ) type conf struct { username string owner string repo string prId int spammer string token string } func main() { conf := parseArgs() client := ghClient(conf) defer printRateLimit(client) ensurePrId(client, conf) prConfirm(client, conf) toDelete := listComments(client, conf) deleteComments(client, conf, toDelete) } func deleteComments(client *github.Client, conf *conf, toDelete []int64) { if len(toDelete) <= 0 { fmt.Printf("nothing to delete here ...\n") printRateLimit(client) os.Exit(0) } fmt.Printf("%d comments to delete\n", len(toDelete)) confirm(client) for _, commentId := range toDelete { fmt.Printf("about to delete comment [%d] ... ", commentId) if _, deleteErr := client.Issues.DeleteComment(context.Background(), conf.owner, conf.repo, commentId); deleteErr != nil { fmt.Print("fail\n") log.Fatal(deleteErr) } else { fmt.Print("ok\n") } } fmt.Println() } func listComments(client *github.Client, conf *conf) []int64{ comments, _, err := client.Issues.ListComments(context.Background(), conf.owner, conf.repo, conf.prId, &github.IssueListCommentsOptions{ListOptions: github.ListOptions{PerPage: 100}}) if err != nil { log.Fatal(err) } fmt.Println() toDelete := make([]int64, 0) for _, comment := range comments { //fmt.Printf("%+v", comment) if *comment.User.Login == conf.spammer { fmt.Printf("comment [%d] by [%s] to delete\n", *comment.ID, *comment.User.Login) toDelete = append(toDelete, *comment.ID) } } return toDelete } func prConfirm(client *github.Client, conf *conf) { fmt.Printf("checking if username [%s] matches ... ", conf.username) if pr, _, err := client.PullRequests.Get(context.Background(), conf.owner, conf.repo, conf.prId); err != nil { log.Fatal(err) } else { //fmt.Printf("%+v", *pr.User) if *pr.User.Login != conf.username { fmt.Printf("username [%s] and author [%s] names dont match", *pr.User.Login, conf.username) log.Fatal("sorry, you can delete only comments on your own PRs") } else { fmt.Printf("ok\n") } //fmt.Printf("%+v", pr) fmt.Printf("\n(#%d) %s\n", *pr.Number, *pr.Title) fmt.Printf("%s\n", *pr.HTMLURL) confirm(client) } } func ensurePrId(client *github.Client, conf *conf) { fmt.Println() if conf.prId == 0 { fmt.Printf("listing %s's PRs ... ", conf.username) usersPRs := listUsersPRs(client, conf) fmt.Println("ok") fmt.Println() if len(usersPRs) == 0 { fmt.Printf("No open PRs ...\n") os.Exit(0) } for i, pr := range usersPRs { fmt.Printf("%d] (#%d) %s \n", i, *pr.Number, *pr.Title) } if prI, converr := strconv.Atoi(readInput("choose PR")); converr != nil { log.Fatal(converr) } else { conf.prId = *usersPRs[prI].Number } fmt.Println() } } func listUsersPRs(client *github.Client, c *conf) []github.PullRequest { prs, _, err := client.PullRequests.List(context.Background(), c.owner, c.repo, &github.PullRequestListOptions{ ListOptions: github.ListOptions{ PerPage: 1000, }, }) usersPRs := make([]github.PullRequest, 0) for _, pr := range prs { if *pr.User.Login == c.username { usersPRs = append(usersPRs, *pr) } } if err != nil { log.Fatal(err) } return usersPRs } func printRateLimit(client *github.Client) { fmt.Println("\nlisting github rates:") if rates, _, err := client.RateLimits(context.Background()); err != nil { log.Fatal(err) } else { fmt.Printf("%+v\n", rates) } } func confirm(client *github.Client) { if readInput("ok? [y/n]") != "y" { printRateLimit(client) os.Exit(1) } } func ghClient(conf *conf) *github.Client { ctx := context.Background() ts := oauth2.StaticTokenSource( &oauth2.Token{AccessToken: conf.token}, ) tc := oauth2.NewClient(ctx, ts) return github.NewClient(tc) } func parseArgs() *conf { var conf = &conf{} flag.StringVar(&conf.username, "username", "", "your github username, can be set with GITHUB_USERNAME env variable") flag.StringVar(&conf.owner, "owner", "", "name of the owner of the repo of the PR") flag.StringVar(&conf.repo, "repo", "", "name of the repo of the PR") flag.IntVar(&conf.prId, "prId", 0, "ID of the pull request") flag.StringVar(&conf.spammer, "spammer", "", "username of comments to delete") flag.StringVar(&conf.token, "token", "", "github token, can be set with GITHUB_TOKEN env var") flag.Parse() fmt.Printf("%+v\n", *conf) if conf.username == "" { if username, ok := os.LookupEnv("GITHUB_USERNAME"); ok { fmt.Println("found github username from GITHUB_USERNAME env") conf.username = username } else { if conf.username = readInput("your username"); conf.username == "" { log.Fatal("username can't be empty") } } } if conf.token == "" { if token, ok := os.LookupEnv("GITHUB_TOKEN"); ok { fmt.Println("found github token from GITHUB_TOKEN env") conf.token = token } else { log.Fatal("github token must be set") } } if conf.owner == "" { if conf.owner = readInput("GH Repo owner"); conf.owner == "" { log.Fatal("owner can't be empty") } } if conf.repo == "" { if conf.repo = readInput("GH Repo"); conf.repo == "" { log.Fatal("repo can't be empty") } } if conf.spammer == "" { if conf.spammer = readInput("spammer username"); conf.spammer == "" { log.Fatal("spammer can't be empty") } } return conf } func readInput(prompt string) string { reader := bufio.NewReader(os.Stdin) fmt.Printf("%s: ", prompt) text, err := reader.ReadString('\n') if err != nil { log.Fatal(err) } return strings.Trim(text, "\n") }
package utils import ( "github.com/shopspring/decimal" ) // 代收-计算商户手续费-默认内扣 func CalculatePayOrderFeeMerchant(reqAmount int64, singleFee int64, rate float64) int64 { total := decimal.NewFromInt(reqAmount) // 总金额 * (费率/100) + 单笔费用 fee := total.Mul(decimal.NewFromFloat(rate)).Div(decimal.NewFromInt(100)).Add(decimal.NewFromInt(singleFee)) // 四舍五入去除小数点 fee = fee.Round(0) // 取整数 return fee.IntPart() } // 代收-计算上游手续费-默认内扣 func CalculatePayOrderFeeUpstream(reqAmount int64, singleFee int64, rate float64) int64 { total := decimal.NewFromInt(reqAmount) // 总金额 * (费率/100) + 单笔费用 fee := total.Mul(decimal.NewFromFloat(rate)).Div(decimal.NewFromInt(100)).Add(decimal.NewFromInt(singleFee)) // 四舍五入去除小数点 fee = fee.Round(0) // 取整数 return fee.IntPart() } // 代付-手续费内扣方式-计算请求上游的金额 // // payee_real_amount 收款方实际到账金额 // singleFee 上游单笔手续费 // rate 上游的手续费率 func CalculateUpstreamInnerAmount(payeeRealAmount int64, singleFee int64, rate float64) int64 { // 请求上游的金额 = 收款方实际到账金额 + (请求上游的金额 * 上游的手续费率 + 上游单笔手续费) // 示例: // 请求上游的金额: A // 收款方实际到账金额: 95 // 上游的手续费率: 3% // 上游单笔手续费: 1 // 计算公式: 95 + (A*3% + 1) = A // 计算公式: 95 + 1 + A*3% = A // 计算公式: 96 + A*3% = A // 计算公式: 96 = A - A*3% // 计算公式: 96 = A(1 - 3%) // 计算公式: 96 = A(97%) // 计算公式: 96 = A * 97 / 100 // 计算公式: 96 * 100 / 97 = A // 计算公式: 96 * 100 / (100-3) = A amount := decimal.NewFromInt(payeeRealAmount) fee := decimal.NewFromInt(100).Sub(decimal.NewFromFloat(rate)) amount = amount.Add(decimal.NewFromInt(singleFee)).Mul(decimal.NewFromInt(100)).Div(fee) // 四舍五入去除小数点 amount = amount.Round(0) // 取整数 return amount.IntPart() }
// Copyright (c) 2013-2015 The btcsuite developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package legacyrpc import ( "errors" "github.com/btcsuite/btcd/btcjson" ) // TODO(jrick): There are several error paths which 'replace' various errors // with a more appropriate error from the btcjson package. Create a map of // these replacements so they can be handled once after an RPC handler has // returned and before the error is marshaled. // Error types to simplify the reporting of specific categories of // errors, and their *btcjson.RPCError creation. type ( // DeserializationError describes a failed deserializaion due to bad // user input. It corresponds to btcjson.ErrRPCDeserialization. DeserializationError struct { error } // InvalidParameterError describes an invalid parameter passed by // the user. It corresponds to btcjson.ErrRPCInvalidParameter. InvalidParameterError struct { error } // ParseError describes a failed parse due to bad user input. It // corresponds to btcjson.ErrRPCParse. ParseError struct { error } ) // Errors variables that are defined once here to avoid duplication below. var ( ErrNeedPositiveAmount = InvalidParameterError{ errors.New("amount must be positive"), } ErrNeedPositiveMinconf = InvalidParameterError{ errors.New("minconf must be positive"), } ErrAddressNotInWallet = btcjson.RPCError{ Code: btcjson.ErrRPCWallet, Message: "address not found in wallet", } ErrAddressTypeUnknown = btcjson.RPCError{ Code: btcjson.ErrRPCWalletInvalidAddressType, Message: "unknown address type", } ErrAccountNameNotFound = btcjson.RPCError{ Code: btcjson.ErrRPCWalletInvalidAccountName, Message: "account name not found", } ErrUnloadedWallet = btcjson.RPCError{ Code: btcjson.ErrRPCWallet, Message: "Request requires a wallet but wallet has not loaded yet", } ErrWalletUnlockNeeded = btcjson.RPCError{ Code: btcjson.ErrRPCWalletUnlockNeeded, Message: "Enter the wallet passphrase with walletpassphrase first", } ErrNotImportedAccount = btcjson.RPCError{ Code: btcjson.ErrRPCWallet, Message: "imported addresses must belong to the imported account", } ErrNoTransactionInfo = btcjson.RPCError{ Code: btcjson.ErrRPCNoTxInfo, Message: "No information for transaction", } ErrReservedAccountName = btcjson.RPCError{ Code: btcjson.ErrRPCInvalidParameter, Message: "Account name is reserved by RPC server", } )
/* * Copyright 2019 Banco Bilbao Vizcaya Argentaria, S.A. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package mux import ( "net/http" "github.com/gorilla/mux" "github.com/BBVA/kapow/internal/server/model" ) func gorillize(rs []model.Route, buildHandler func(model.Route) http.Handler) *mux.Router { m := mux.NewRouter() for _, r := range rs { m.Handle(r.Pattern, buildHandler(r)).Methods(r.Method) } return m }
/* You are given an integer array nums containing distinct numbers, and you can perform the following operations until the array is empty: If the first element has the smallest value, remove it Otherwise, put the first element at the end of the array. Return an integer denoting the number of operations it takes to make nums empty. Example 1: Input: nums = [3,4,-1] Output: 5 Operation Array 1 [4, -1, 3] 2 [-1, 3, 4] 3 [3, 4] 4 [4] 5 [] Example 2: Input: nums = [1,2,4,3] Output: 5 Operation Array 1 [2, 4, 3] 2 [4, 3] 3 [3, 4] 4 [4] 5 [] Example 3: Input: nums = [1,2,3] Output: 3 Operation Array 1 [2, 3] 2 [3] 3 [] Constraints: 1 <= nums.length <= 10^5 -10^9 <= nums[i] <= 10^9 All values in nums are distinct. */ package main import "sort" func main() { assert(empties([]int{3, 4, -1}) == 5) assert(empties([]int{1, 2, 4, 3}) == 5) assert(empties([]int{1, 2, 3}) == 3) } func assert(x bool) { if !x { panic("assertion failed") } } func empties(a []int) int { m := make(map[int]int) for i, v := range a { m[v] = i } sort.Ints(a) n := len(a) r := n for i := 1; i < n; i++ { if m[a[i]] < m[a[i-1]] { r += n - i } } return r }
package models import ( "net" "crypto/rsa" "github.com/monnand/dhkx" "container/list" ) // TODO: Discuss wether to define it here or to define in it in the service packe >> Downside here is that calling with Peer as caller isn't possible // Peer is the standard object for a running peer that is accepting connections type Peer struct { TCPListener *net.TCPListener `json:"tcp_listener"` UDPListener *net.UDPConn `json:"udp_listener"` UDPPort int `json:"udp_port"` P2P_Port int `json:"p2p_port"` // This is the Port for the TCP port P2P_Hostname string `json:"p2p_hostname"` // This is the ip address of the peer PrivateKey *rsa.PrivateKey `json:"private_key"` PublicKey *rsa.PublicKey `json:"public_key"` UDPConnections map[uint32]*UDPConnection `json:"udp_connections"` TCPConnections map[uint32]*TCPConnection `json:"tcp_writers"` CryptoSessionMap map[string]*CryptoObject `json:"crypto_session_map"` TunnelHostOrder map[uint32]*list.List `json:"tunnel_host_order"` // Save all hashed hostkey of a tunnel connection ordered in a list } // Identify by id in hashmap type TCPConnection struct { TunnelId uint32 `json:"tunnel_id"` LeftWriter *TCPWriter `json:"left_writer"` RightWriter *TCPWriter `json:"right_writer"` FinalDestination *OnionTunnelBuild `json:"final_destination"` OriginHostkey []byte `json:"origin_hostkey"` } type TCPWriter struct { DestinationIP string `json:"destination_ip"` DestinationPort int `json:"destination_port"` TCPWriter net.Conn `json:"tcp_writer"` } type CryptoObject struct { TunnelId uint32 `json:"tunnel_id"` PrivateKey *dhkx.DHKey `json:"tunnel_id"` PublicKey []byte `json:"tunnel_id"` SessionKey []byte `json:"tunnel_id"` Group *dhkx.DHGroup `json:"tunnel_id"` }
/* * EVE Swagger Interface * * An OpenAPI for EVE Online * * OpenAPI spec version: 0.4.1.dev1 * * Generated by: https://github.com/swagger-api/swagger-codegen.git */ package swagger import ( "net/url" "strings" "encoding/json" "fmt" ) type AllianceApi struct { Configuration *Configuration } func NewAllianceApi() *AllianceApi { configuration := NewConfiguration() return &AllianceApi{ Configuration: configuration, } } func NewAllianceApiWithBasePath(basePath string) *AllianceApi { configuration := NewConfiguration() configuration.BasePath = basePath return &AllianceApi{ Configuration: configuration, } } /** * List all alliances * List all active player alliances --- Alternate route: &#x60;/v1/alliances/&#x60; Alternate route: &#x60;/legacy/alliances/&#x60; Alternate route: &#x60;/dev/alliances/&#x60; --- This route is cached for up to 3600 seconds * * @param datasource The server name you would like data from * @param userAgent Client identifier, takes precedence over headers * @param xUserAgent Client identifier, takes precedence over User-Agent * @return []int32 */ func (a AllianceApi) GetAlliances(datasource string, userAgent string, xUserAgent string) ([]int32, *APIResponse, error) { var localVarHttpMethod = strings.ToUpper("Get") // create path and map variables localVarPath := a.Configuration.BasePath + "/alliances/" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := make(map[string]string) var localVarPostBody interface{} var localVarFileName string var localVarFileBytes []byte // add default headers if any for key := range a.Configuration.DefaultHeader { localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] } localVarQueryParams.Add("datasource", a.Configuration.APIClient.ParameterToString(datasource, "")) localVarQueryParams.Add("user_agent", a.Configuration.APIClient.ParameterToString(userAgent, "")) // to determine the Content-Type header localVarHttpContentTypes := []string{ } // set Content-Type header localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } // header params "X-User-Agent" localVarHeaderParams["X-User-Agent"] = a.Configuration.APIClient.ParameterToString(xUserAgent, "") var successPayload = new([]int32) localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) var localVarURL, _ = url.Parse(localVarPath) localVarURL.RawQuery = localVarQueryParams.Encode() var localVarAPIResponse = &APIResponse{Operation: "GetAlliances", Method: localVarHttpMethod, RequestURL: localVarURL.String()} if localVarHttpResponse != nil { localVarAPIResponse.Response = localVarHttpResponse.RawResponse localVarAPIResponse.Payload = localVarHttpResponse.Body() } if err != nil { return *successPayload, localVarAPIResponse, err } err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) return *successPayload, localVarAPIResponse, err } /** * Get alliance information * Public information about an alliance --- Alternate route: &#x60;/v2/alliances/{alliance_id}/&#x60; --- This route is cached for up to 3600 seconds * * @param allianceId An Eve alliance ID * @param datasource The server name you would like data from * @param userAgent Client identifier, takes precedence over headers * @param xUserAgent Client identifier, takes precedence over User-Agent * @return *GetAlliancesAllianceIdOk */ func (a AllianceApi) GetAlliancesAllianceId(allianceId int32, datasource string, userAgent string, xUserAgent string) (*GetAlliancesAllianceIdOk, *APIResponse, error) { var localVarHttpMethod = strings.ToUpper("Get") // create path and map variables localVarPath := a.Configuration.BasePath + "/alliances/{alliance_id}/" localVarPath = strings.Replace(localVarPath, "{"+"alliance_id"+"}", fmt.Sprintf("%v", allianceId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := make(map[string]string) var localVarPostBody interface{} var localVarFileName string var localVarFileBytes []byte // add default headers if any for key := range a.Configuration.DefaultHeader { localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] } localVarQueryParams.Add("datasource", a.Configuration.APIClient.ParameterToString(datasource, "")) localVarQueryParams.Add("user_agent", a.Configuration.APIClient.ParameterToString(userAgent, "")) // to determine the Content-Type header localVarHttpContentTypes := []string{ } // set Content-Type header localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } // header params "X-User-Agent" localVarHeaderParams["X-User-Agent"] = a.Configuration.APIClient.ParameterToString(xUserAgent, "") var successPayload = new(GetAlliancesAllianceIdOk) localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) var localVarURL, _ = url.Parse(localVarPath) localVarURL.RawQuery = localVarQueryParams.Encode() var localVarAPIResponse = &APIResponse{Operation: "GetAlliancesAllianceId", Method: localVarHttpMethod, RequestURL: localVarURL.String()} if localVarHttpResponse != nil { localVarAPIResponse.Response = localVarHttpResponse.RawResponse localVarAPIResponse.Payload = localVarHttpResponse.Body() } if err != nil { return successPayload, localVarAPIResponse, err } err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) return successPayload, localVarAPIResponse, err } /** * List alliance&#39;s corporations * List all current member corporations of an alliance --- Alternate route: &#x60;/v1/alliances/{alliance_id}/corporations/&#x60; Alternate route: &#x60;/legacy/alliances/{alliance_id}/corporations/&#x60; Alternate route: &#x60;/dev/alliances/{alliance_id}/corporations/&#x60; --- This route is cached for up to 3600 seconds * * @param allianceId An EVE alliance ID * @param datasource The server name you would like data from * @param userAgent Client identifier, takes precedence over headers * @param xUserAgent Client identifier, takes precedence over User-Agent * @return []int32 */ func (a AllianceApi) GetAlliancesAllianceIdCorporations(allianceId int32, datasource string, userAgent string, xUserAgent string) ([]int32, *APIResponse, error) { var localVarHttpMethod = strings.ToUpper("Get") // create path and map variables localVarPath := a.Configuration.BasePath + "/alliances/{alliance_id}/corporations/" localVarPath = strings.Replace(localVarPath, "{"+"alliance_id"+"}", fmt.Sprintf("%v", allianceId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := make(map[string]string) var localVarPostBody interface{} var localVarFileName string var localVarFileBytes []byte // add default headers if any for key := range a.Configuration.DefaultHeader { localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] } localVarQueryParams.Add("datasource", a.Configuration.APIClient.ParameterToString(datasource, "")) localVarQueryParams.Add("user_agent", a.Configuration.APIClient.ParameterToString(userAgent, "")) // to determine the Content-Type header localVarHttpContentTypes := []string{ } // set Content-Type header localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } // header params "X-User-Agent" localVarHeaderParams["X-User-Agent"] = a.Configuration.APIClient.ParameterToString(xUserAgent, "") var successPayload = new([]int32) localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) var localVarURL, _ = url.Parse(localVarPath) localVarURL.RawQuery = localVarQueryParams.Encode() var localVarAPIResponse = &APIResponse{Operation: "GetAlliancesAllianceIdCorporations", Method: localVarHttpMethod, RequestURL: localVarURL.String()} if localVarHttpResponse != nil { localVarAPIResponse.Response = localVarHttpResponse.RawResponse localVarAPIResponse.Payload = localVarHttpResponse.Body() } if err != nil { return *successPayload, localVarAPIResponse, err } err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) return *successPayload, localVarAPIResponse, err } /** * Get alliance icon * Get the icon urls for a alliance --- Alternate route: &#x60;/v1/alliances/{alliance_id}/icons/&#x60; Alternate route: &#x60;/legacy/alliances/{alliance_id}/icons/&#x60; Alternate route: &#x60;/dev/alliances/{alliance_id}/icons/&#x60; --- This route is cached for up to 3600 seconds * * @param allianceId An EVE alliance ID * @param datasource The server name you would like data from * @param userAgent Client identifier, takes precedence over headers * @param xUserAgent Client identifier, takes precedence over User-Agent * @return *GetAlliancesAllianceIdIconsOk */ func (a AllianceApi) GetAlliancesAllianceIdIcons(allianceId int32, datasource string, userAgent string, xUserAgent string) (*GetAlliancesAllianceIdIconsOk, *APIResponse, error) { var localVarHttpMethod = strings.ToUpper("Get") // create path and map variables localVarPath := a.Configuration.BasePath + "/alliances/{alliance_id}/icons/" localVarPath = strings.Replace(localVarPath, "{"+"alliance_id"+"}", fmt.Sprintf("%v", allianceId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := make(map[string]string) var localVarPostBody interface{} var localVarFileName string var localVarFileBytes []byte // add default headers if any for key := range a.Configuration.DefaultHeader { localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] } localVarQueryParams.Add("datasource", a.Configuration.APIClient.ParameterToString(datasource, "")) localVarQueryParams.Add("user_agent", a.Configuration.APIClient.ParameterToString(userAgent, "")) // to determine the Content-Type header localVarHttpContentTypes := []string{ } // set Content-Type header localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } // header params "X-User-Agent" localVarHeaderParams["X-User-Agent"] = a.Configuration.APIClient.ParameterToString(xUserAgent, "") var successPayload = new(GetAlliancesAllianceIdIconsOk) localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) var localVarURL, _ = url.Parse(localVarPath) localVarURL.RawQuery = localVarQueryParams.Encode() var localVarAPIResponse = &APIResponse{Operation: "GetAlliancesAllianceIdIcons", Method: localVarHttpMethod, RequestURL: localVarURL.String()} if localVarHttpResponse != nil { localVarAPIResponse.Response = localVarHttpResponse.RawResponse localVarAPIResponse.Payload = localVarHttpResponse.Body() } if err != nil { return successPayload, localVarAPIResponse, err } err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) return successPayload, localVarAPIResponse, err } /** * Get alliance names * Resolve a set of alliance IDs to alliance names --- Alternate route: &#x60;/v1/alliances/names/&#x60; Alternate route: &#x60;/legacy/alliances/names/&#x60; Alternate route: &#x60;/dev/alliances/names/&#x60; --- This route is cached for up to 3600 seconds * * @param allianceIds A comma separated list of alliance IDs * @param datasource The server name you would like data from * @param userAgent Client identifier, takes precedence over headers * @param xUserAgent Client identifier, takes precedence over User-Agent * @return []GetAlliancesNames200Ok */ func (a AllianceApi) GetAlliancesNames(allianceIds []int64, datasource string, userAgent string, xUserAgent string) ([]GetAlliancesNames200Ok, *APIResponse, error) { var localVarHttpMethod = strings.ToUpper("Get") // create path and map variables localVarPath := a.Configuration.BasePath + "/alliances/names/" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := make(map[string]string) var localVarPostBody interface{} var localVarFileName string var localVarFileBytes []byte // add default headers if any for key := range a.Configuration.DefaultHeader { localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] } var collectionFormat = "multi" localVarQueryParams.Add("alliance_ids", a.Configuration.APIClient.ParameterToString(allianceIds, collectionFormat)) localVarQueryParams.Add("datasource", a.Configuration.APIClient.ParameterToString(datasource, "")) localVarQueryParams.Add("user_agent", a.Configuration.APIClient.ParameterToString(userAgent, "")) // to determine the Content-Type header localVarHttpContentTypes := []string{ } // set Content-Type header localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } // header params "X-User-Agent" localVarHeaderParams["X-User-Agent"] = a.Configuration.APIClient.ParameterToString(xUserAgent, "") var successPayload = new([]GetAlliancesNames200Ok) localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) var localVarURL, _ = url.Parse(localVarPath) localVarURL.RawQuery = localVarQueryParams.Encode() var localVarAPIResponse = &APIResponse{Operation: "GetAlliancesNames", Method: localVarHttpMethod, RequestURL: localVarURL.String()} if localVarHttpResponse != nil { localVarAPIResponse.Response = localVarHttpResponse.RawResponse localVarAPIResponse.Payload = localVarHttpResponse.Body() } if err != nil { return *successPayload, localVarAPIResponse, err } err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) return *successPayload, localVarAPIResponse, err }
package common import ( "echo-stripe/response" "fmt" "net/http" "runtime" "github.com/go-playground/validator" "github.com/labstack/echo/v4" ) func CustomHTTPErrorHandler(err error, c echo.Context) { respCode := 500 resp := response.BasicResponse{} resp.Success = false resp.Message = "" sendErrorResponse := func() { // Send response if !c.Response().Committed { if c.Request().Method == http.MethodHead { // Issue #608 err = c.NoContent(respCode) } else { err = c.JSON(respCode, resp) } if err != nil { c.Logger().Error(err) } } } validationErrors, ok := err.(validator.ValidationErrors) if ok { // We have validation errors, sending back a 400. respCode = 400 resp.Message = "Validation errors" resp.Errors = map[string]string{} for _, ve := range validationErrors { resp.Errors[ve.Field()] = "Contains unexpected value" } sendErrorResponse() return } if he, ok := err.(*echo.HTTPError); ok { respCode = he.Code resp.Message = fmt.Sprintf("%v", he.Message) if he.Internal != nil { err = fmt.Errorf("%v, %v", err, he.Internal) resp.Message += " - ErrorMessageInternal: " + he.Internal.Error() } } else { resp.Message = http.StatusText(respCode) } //else if MainCfg.Development { // resp.Message = err.Error() //} if respCode == 500 { // 4 KB stack. stack := make([]byte, 4<<10) length := runtime.Stack(stack, false) fmt.Printf("[RECOVER From Exception]: %v %s\n", err, stack[:length]) } c.Logger().Debug(err) sendErrorResponse() }
// find the 10001st prime // using a sieve stolen from http://golang.org/doc/play/sieve.go package main import ( "fmt" ) func main() { primes := make([]int, 10001) length := 1 primes[0] = 2 i := 3 for ;length < 10001; { prime := true for j := 0; j < length; j++ { if i % primes[j] == 0 { prime = false break } } if prime { primes[length] = i length++ } i++ } fmt.Println(primes[length-1]) }