text
stringlengths
11
4.05M
package matchmaker import ( "encoding/json" "io" "log" "net/http" "net/url" "time" "github.com/garyburd/redigo/redis" "github.com/pkg/errors" ) const ( maxRetries = 20 ) // Session represents a game session. type Session struct { ID string `json:"id"` Port int `json:"port,omitempty"` IP string `json:"ip,omitempty"` } func (s *Server) generateSessionForGame(c redis.Conn, g *Game) (*Game, error) { p := s.sessAddr + "/session" r, err := http.Post(p, "application/json", nil) if err != nil { return g, errors.Wrap(err, "error calling session") } defer r.Body.Close() sess := Session{} err = json.NewDecoder(r.Body).Decode(&sess) if err != nil { return g, errors.WithStack(err) } g.SessionID = sess.ID sess, err = s.sessionIPAndPort(sess) g.Port = sess.Port g.IP = sess.IP g.Status = statusClosed return g, updateGame(c, g) } func (s *Server) sessionIPAndPort(sess Session) (Session, error) { var body io.ReadCloser for i := 0; i <= maxRetries; i++ { r := s.sessAddr + "/session/" + url.QueryEscape(sess.ID) res, err := http.Get(r) if err != nil { return sess, errors.Wrap(err, "error getting session information") } if res.StatusCode == http.StatusOK { log.Printf("[info][session] recieved session data, status: %v", res.StatusCode) body = res.Body break } err = res.Body.Close() if err != nil { log.Printf("[warn][session] could not close body: %v", err) } log.Printf("[info][session] session: %v data could not be found, please try again", sess.ID) time.Sleep(time.Second) } defer body.Close() if body == nil { return sess, errors.Errorf("could not get session: %v", sess.ID) } return sess, errors.Wrap(json.NewDecoder(body).Decode(&sess), "could not decode json to session") }
package internal import ( "errors" "github.com/mmatur/aws-mfa/internal/types" survey "gopkg.in/AlecAivazis/survey.v1" ) // PromptSurvey prompts survey to user func PromptSurvey(devices []string) (*types.SurveyAnswer, error) { if len(devices) == 0 { return nil, errors.New("no devices") } var qs []*survey.Question answer := &types.SurveyAnswer{} if len(devices) > 1 { qs = append(qs, &survey.Question{ Name: "mfa-device", Prompt: &survey.Select{ Message: "Choose your mfa device", Options: devices, }, }) } else { answer.Device = devices[0] } qs = append(qs, &survey.Question{ Name: "mfa-code", Prompt: &survey.Input{Message: "What is your MFA code?"}, Validate: survey.Required, Transform: survey.Title, }) err := survey.Ask(qs, answer) if err != nil { return nil, err } answer.CleanAnswers() return answer, nil }
package main import ( "testing" "fmt" ) const N = 3000000 // string常量会在编译期分配到只读段,对应数据地址不可写入,并且相同的string常量不会重复存储。 // fmt.Sprintf生成的字符串分配在堆上,对应数据地址可修改。 func Benchmark_Normal(b *testing.B) { b.N = N for i := 1; i < N; i++ { s := fmt.Sprintf("12345678901234567890123456789012345678901234567890") bb := []byte(s) bb[0] = 'x' s = string(bb) s = s } } // 只有动态生成的string,数据可以这样转换修改 func Benchmark_Direct(b *testing.B) { b.N = N for i := 1; i < N; i++ { s := fmt.Sprintf("12345678901234567890123456789012345678901234567890") bb := StringBytes(s) bb[0] = 'x' s = s } }
// Go support for Protocol Buffers RPC which compatiable with https://github.com/Baidu-ecom/Jprotobuf-rpc-socket // // Copyright 2002-2007 the original author or authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pbrpc import ( "errors" "time" pool "github.com/jolestar/go-commons-pool" ) var Empty_Head = make([]byte, SIZE) var ERR_POOL_NOT_INIT = errors.New("[connpool-001]Object pool is nil maybe not init Connect() function.") var ERR_GET_CONN_FAIL = errors.New("[connpool-002]Can not get connection from connection pool. target object is nil.") var ERR_DESTORY_OBJECT_NIL = errors.New("[connpool-003]Destroy object failed due to target object is nil.") var ERR_POOL_URL_NIL = errors.New("[connpool-004]Can not create object cause of URL is nil.") var HB_SERVICE_NAME = "__heartbeat" var HB_METHOD_NAME = "__beat" /* type Connection interface { SendReceive(rpcDataPackage *RpcDataPackage) (*RpcDataPackage, error) Close() error } */ type TCPConnectionPool struct { Config *pool.ObjectPoolConfig objectPool *pool.ObjectPool } func NewDefaultTCPConnectionPool(url URL, timeout *time.Duration) (*TCPConnectionPool, error) { return NewTCPConnectionPool(url, timeout, nil) } func NewTCPConnectionPool(url URL, timeout *time.Duration, config *pool.ObjectPoolConfig) (*TCPConnectionPool, error) { connection := TCPConnectionPool{} if config == nil { connection.Config = pool.NewDefaultPoolConfig() connection.Config.TestOnBorrow = true } else { connection.Config = config } var err error err = connection.connect(url, timeout, 0) if err != nil { return nil, err } return &connection, nil } func (c *TCPConnectionPool) connect(url URL, timeout *time.Duration, sendChanSize int) error { factory := ConnectionPoolFactory{} factory.url = &url var objectPool *pool.ObjectPool eConfig := c.Config if eConfig == nil { eConfig = pool.NewDefaultPoolConfig() } objectPool = pool.NewObjectPool(&factory, eConfig) c.objectPool = objectPool return nil } func (c *TCPConnectionPool) borrowObject() (*TCPConnection, error) { if c.objectPool == nil { return nil, ERR_POOL_NOT_INIT } object, err := c.objectPool.BorrowObject() if err != nil { return nil, err } if object == nil { return nil, ERR_GET_CONN_FAIL } return object.(*TCPConnection), nil } func (c *TCPConnectionPool) SendReceive(rpcDataPackage *RpcDataPackage) (*RpcDataPackage, error) { object, err := c.borrowObject() if err != nil { return nil, err } defer c.objectPool.ReturnObject(object) return object.SendReceive(rpcDataPackage) } func (c *TCPConnectionPool) Close() error { if c.objectPool == nil { return ERR_POOL_NOT_INIT } c.objectPool.Close() return nil } func (c *TCPConnectionPool) GetNumActive() int { if c.objectPool == nil { return 0 } return c.objectPool.GetNumActive() } type ConnectionPoolFactory struct { url *URL } func (c *ConnectionPoolFactory) MakeObject() (*pool.PooledObject, error) { if c.url == nil { return nil, ERR_POOL_URL_NIL } connection := TCPConnection{} err := connection.connect(*c.url, nil, 0) if err != nil { return nil, err } return pool.NewPooledObject(&connection), nil } func (c *ConnectionPoolFactory) DestroyObject(object *pool.PooledObject) error { obj := object.Object if obj == nil { return ERR_DESTORY_OBJECT_NIL } conn := obj.(Connection) return conn.Close() } func (c *ConnectionPoolFactory) ValidateObject(object *pool.PooledObject) bool { if true { return true } obj := object.Object if obj == nil { return false } conn := obj.(ConnectionTester) return conn.TestConnection() != nil } func (c *ConnectionPoolFactory) ActivateObject(object *pool.PooledObject) error { return nil } func (c *ConnectionPoolFactory) PassivateObject(object *pool.PooledObject) error { return nil }
// // Copyright (C) 2019-2021 vdaas.org vald team <vald@vdaas.org> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Package grpc provides agent ngt gRPC client functions package grpc import ( "context" agent "github.com/vdaas/vald/apis/grpc/agent/core" "github.com/vdaas/vald/internal/client" "github.com/vdaas/vald/internal/net/grpc" ) // Client represents agent NGT client interface. type Client interface { client.Client client.ObjectReader client.Indexer } type agentClient struct { addr string opts []grpc.Option grpc.Client } // New returns Client implementation if no error occurs. func New(ctx context.Context, opts ...Option) (Client, error) { c := new(agentClient) for _, opt := range append(defaultOptions, opts...) { opt(c) } c.Client = grpc.New(c.opts...) if err := c.Client.Connect(ctx, c.addr); err != nil { return nil, err } return c, nil } func (c *agentClient) Exists( ctx context.Context, req *client.ObjectID, ) (*client.ObjectID, error) { res, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).Exists(ctx, req, copts...) }, ) if err != nil { return nil, err } return res.(*client.ObjectID), nil } func (c *agentClient) Search( ctx context.Context, req *client.SearchRequest, ) (*client.SearchResponse, error) { res, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).Search(ctx, req, copts...) }, ) if err != nil { return nil, err } return res.(*client.SearchResponse), nil } func (c *agentClient) SearchByID( ctx context.Context, req *client.SearchIDRequest, ) (*client.SearchResponse, error) { res, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).SearchByID(ctx, req, copts...) }, ) if err != nil { return nil, err } return res.(*client.SearchResponse), nil } func (c *agentClient) StreamSearch( ctx context.Context, dataProvider func() *client.SearchRequest, f func(*client.SearchResponse, error), ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (res interface{}, err error) { var st agent.Agent_StreamSearchClient st, err = agent.NewAgentClient(conn).StreamSearch(ctx, copts...) if err != nil { return nil, err } return nil, streamSearch(st, func() interface{} { return dataProvider() }, f) }, ) return err } func (c *agentClient) StreamSearchByID( ctx context.Context, dataProvider func() *client.SearchIDRequest, f func(*client.SearchResponse, error), ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (res interface{}, err error) { var st agent.Agent_StreamSearchByIDClient st, err = agent.NewAgentClient(conn).StreamSearchByID(ctx, copts...) if err != nil { return nil, err } return nil, streamSearch(st, func() interface{} { return dataProvider() }, f, ) }, ) return err } func (c *agentClient) Insert( ctx context.Context, req *client.ObjectVector, ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).Insert(ctx, req, copts...) }, ) return err } func (c *agentClient) StreamInsert( ctx context.Context, dataProvider func() *client.ObjectVector, f func(error), ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (res interface{}, err error) { var st agent.Agent_StreamInsertClient st, err = agent.NewAgentClient(conn).StreamInsert(ctx, copts...) if err != nil { return nil, err } return nil, stream(st, func() interface{} { return dataProvider() }, f, ) }, ) return err } func (c *agentClient) MultiInsert( ctx context.Context, req *client.ObjectVectors, ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).MultiInsert(ctx, req, copts...) }, ) return err } func (c *agentClient) Update( ctx context.Context, req *client.ObjectVector, ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).Update(ctx, req, copts...) }, ) return err } func (c *agentClient) StreamUpdate( ctx context.Context, dataProvider func() *client.ObjectVector, f func(error), ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (res interface{}, err error) { var st agent.Agent_StreamUpdateClient st, err = agent.NewAgentClient(conn).StreamUpdate(ctx, copts...) if err != nil { return nil, err } return nil, stream(st, func() interface{} { return dataProvider() }, f, ) }, ) return err } func (c *agentClient) MultiUpdate( ctx context.Context, req *client.ObjectVectors, ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).MultiUpdate(ctx, req, copts...) }, ) return err } func (c *agentClient) Remove( ctx context.Context, req *client.ObjectID, ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).Remove(ctx, req, copts...) }, ) return err } func (c *agentClient) StreamRemove( ctx context.Context, dataProvider func() *client.ObjectID, f func(error), ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { st, err := agent.NewAgentClient(conn).StreamRemove(ctx, copts...) if err != nil { return nil, err } return nil, stream(st, func() interface{} { return dataProvider() }, f, ) }, ) return err } func (c *agentClient) MultiRemove( ctx context.Context, req *client.ObjectIDs, ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).MultiRemove(ctx, req, copts...) }, ) return err } func (c *agentClient) GetObject( ctx context.Context, req *client.ObjectID, ) (*client.ObjectVector, error) { res, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).GetObject(ctx, req, copts...) }, ) if err != nil { return nil, err } return res.(*client.ObjectVector), nil } func (c *agentClient) StreamGetObject( ctx context.Context, dataProvider func() *client.ObjectID, f func(*client.ObjectVector, error), ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (res interface{}, err error) { var st agent.Agent_StreamGetObjectClient st, err = agent.NewAgentClient(conn).StreamGetObject(ctx, copts...) if err != nil { return nil, err } return nil, grpc.BidirectionalStreamClient(st, func() interface{} { return dataProvider() }, func() interface{} { return new(client.ObjectVector) }, func(res interface{}, err error) { f(res.(*client.ObjectVector), err) }) }, ) return err } func (c *agentClient) CreateIndex( ctx context.Context, req *client.ControlCreateIndexRequest, ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).CreateIndex(ctx, req, copts...) }, ) return err } func (c *agentClient) SaveIndex(ctx context.Context) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).SaveIndex(ctx, new(client.Empty), copts...) }, ) return err } func (c *agentClient) CreateAndSaveIndex( ctx context.Context, req *client.ControlCreateIndexRequest, ) error { _, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).CreateAndSaveIndex(ctx, req, copts...) }, ) return err } func (c *agentClient) IndexInfo(ctx context.Context) (*client.InfoIndex, error) { res, err := c.Client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return agent.NewAgentClient(conn).IndexInfo(ctx, new(client.Empty), copts...) }, ) if err != nil { return nil, err } return res.(*client.InfoIndex), err } func streamSearch( st grpc.ClientStream, dataProvider func() interface{}, f func(*client.SearchResponse, error), ) error { return grpc.BidirectionalStreamClient(st, dataProvider, func() interface{} { return new(client.SearchResponse) }, func(res interface{}, err error) { f(res.(*client.SearchResponse), err) }) } func stream( st grpc.ClientStream, dataProvider func() interface{}, f func(error), ) error { return grpc.BidirectionalStreamClient(st, dataProvider, func() interface{} { return new(client.Empty) }, func(_ interface{}, err error) { f(err) }) }
package arraylist import ( "LimitGo/limit/collection" "testing" ) type Student struct { Id int Name string } func TestArrayListAll(t *testing.T) { TestNew(t) TestArrayList_Append(t) TestArrayList_AddAll(t) TestArrayList_Clear(t) TestArrayList_Contains(t) TestArrayList_Empty(t) TestArrayList_Equals(t) TestArrayList_Get(t) TestArrayList_IndexOf(t) TestArrayList_Insert(t) TestArrayList_Remove(t) TestArrayList_Set(t) TestArrayList_GetIterator(t) TestArrayList_IntType(t) } func TestNew(t *testing.T) { l := New() if l.Size() != 0 || len(l.elements) != 0 || cap(l.elements) != 8 { t.Error("Create ArrayList fail!") } } func TestArrayList_Append(t *testing.T) { l := New() var a collection.Object = Student{1, "Alice"} l.Append(&a) if l.Size() != 1 { t.Error("Append operation fail!") } if l.String() != "{{\"Id\":1,\"Name\":\"Alice\"}}" { t.Error("Append operation fail!") } } func TestArrayList_AddAll(t *testing.T) { l1 := New() var a collection.Object = Student{1, "Alice"} l1.Append(&a) l2 := New() var b collection.Object = Student{2, "Bob"} var c collection.Object = Student{3, "Mark"} l2.Append(&b) l2.Append(&c) if l1.Size() != 1 || l2.Size() != 2 { t.Error("Append operation fail!") } var ll2 collection.Linear = l2 l1.AddAll(&ll2) if l1.Size() != 3 || l2.Size() != 2 { t.Error("AddAll operation fail!") } if l1.String() != "{{\"Id\":1,\"Name\":\"Alice\"},{\"Id\":2,\"Name\":\"Bob\"},{\"Id\":3,\"Name\":\"Mark\"}}" { t.Error("AddAll operation fail!") } } func TestArrayList_Clear(t *testing.T) { l := New() var a collection.Object = Student{1, "Alice"} var b collection.Object = Student{2, "Bob"} var c collection.Object = Student{3, "Mark"} l.Append(&a) l.Append(&b) l.Append(&c) l.Clear() if l.Size() != 0 { t.Error("Clear operation fail!") } } func TestArrayList_Contains(t *testing.T) { l := New() var a collection.Object = Student{1, "Alice"} var b collection.Object = Student{2, "Bob"} var c collection.Object = Student{3, "Mark"} var d collection.Object = Student{4, "Jessie"} l.Append(&a) l.Append(&b) l.Append(&c) if !l.Contains(&a) { t.Error("Contains operation fail!") } if l.Contains(&d) { t.Error("Contains operation fail!") } } func TestArrayList_Empty(t *testing.T) { l := New() if !l.Empty() { t.Error("Empty operation fail!") } var a collection.Object = Student{1, "Alice"} var b collection.Object = Student{2, "Bob"} l.Append(&a) l.Append(&b) if l.Empty() { t.Error("Empty operation fail!") } } func TestArrayList_Equals(t *testing.T) { l1 := New() var l2 collection.List = New() var l3 collection.List = New() var a collection.Object = Student{1, "Alice"} var b collection.Object = Student{2, "Bob"} var c collection.Object = Student{3, "Mark"} var d collection.Object = Student{4, "Jessie"} l1.Append(&a) l1.Append(&b) l2.Append(&a) l2.Append(&b) l3.Append(&c) l3.Append(&d) if !l1.Equals(&l2) { t.Error("Equals operation fail!") } if l1.Equals(&l3) { t.Error("Equals operation fail!") } } func TestArrayList_Get(t *testing.T) { l := New() var a collection.Object = Student{1, "Alice"} var b collection.Object = Student{2, "Bob"} var c collection.Object = Student{3, "Mark"} var d collection.Object = Student{4, "Jessie"} l.Append(&a) l.Append(&b) l.Append(&c) l.Append(&d) p := l.Get(2) s := (*p).(Student) if s != c { t.Error("Get operation fail!") } } func TestArrayList_IndexOf(t *testing.T) { l := New() var a collection.Object = Student{1, "Alice"} var b collection.Object = Student{2, "Bob"} var c collection.Object = Student{3, "Mark"} var d collection.Object = Student{4, "Jessie"} l.Append(&a) l.Append(&b) l.Append(&c) l.Append(&d) i := l.IndexOf(&c) if i != 2 { t.Error("IndexOf operation fail!") } } func TestArrayList_Insert(t *testing.T) { l := New() var a collection.Object = Student{1, "Alice"} var b collection.Object = Student{2, "Bob"} var c collection.Object = Student{3, "Mark"} var d collection.Object = Student{4, "Jessie"} var e collection.Object = Student{5, "Mary"} l.Append(&b) l.Append(&d) if l.IndexOf(&d) != 1 || l.Size() != 2 { t.Error("Append operation fail!") } l.Insert(1, &c) if l.IndexOf(&c) != 1 || l.IndexOf(&d) != 2 || l.Size() != 3 { t.Error("Insert operation fail!") } l.Insert(-1, &a) if l.IndexOf(&a) != 0 || l.IndexOf(&b) != 1 || l.Size() != 4 { t.Error("Insert operation fail!") } l.Insert(10, &e) if l.IndexOf(&e) != 4 || l.Size() != 5 { t.Error("Insert operation fail!") } } func TestArrayList_Remove(t *testing.T) { l := New() var a collection.Object = Student{1, "Alice"} var b collection.Object = Student{2, "Bob"} var c collection.Object = Student{3, "Mark"} var d collection.Object = Student{4, "Jessie"} l.Append(&a) l.Append(&b) l.Append(&c) l.Append(&d) l.Remove(&c) if l.IndexOf(&d) != 2 || l.Contains(&c) || l.Size() != 3 { t.Error("Remove operation fail!") } } func TestArrayList_Set(t *testing.T) { l := New() var a collection.Object = Student{1, "Alice"} var b collection.Object = Student{2, "Bob"} var c collection.Object = Student{3, "Mark"} var d collection.Object = Student{4, "Jessie"} l.Append(&a) l.Append(&b) l.Append(&c) l.Set(1, &d) if l.IndexOf(&d) != 1 || l.Contains(&b) || !l.Contains(&d) || l.Size() != 3 { t.Error("Set operation fail!") } } func TestArrayList_GetIterator(t *testing.T) { l := New() var a collection.Object = Student{1, "Alice"} var b collection.Object = Student{2, "Bob"} var c collection.Object = Student{3, "Mark"} var d collection.Object = Student{4, "Jessie"} l.Append(&a) l.Append(&b) l.Append(&c) l.Append(&d) var s = "" it := l.GetIterator() for i := 0; it.HashNext(); i++ { p := it.Next() stu := (*p).(Student) s += stu.Name if i >= 2 { it.Remove() } } if l.Size() != 2 || s != "AliceBobMarkJessie" { t.Error("Iterator fail!") } } func TestArrayList_IntType(t *testing.T) { l := New() var a collection.Object = 1 var b collection.Object = 2 var c collection.Object = 3 l.Append(&a) l.Append(&b) l.Append(&c) p := l.Get(1) if (*p).(int) != 2 { t.Error("Int fail!") } }
// @APIVersion 1.0.0 // @Title beego Test API // @Description beego has a very cool tools to autogenerate documents for your API // @Contact astaxie@gmail.com // @TermsOfServiceUrl http://beego.me/ // @License Apache 2.0 // @LicenseUrl http://www.apache.org/licenses/LICENSE-2.0.html package routers import ( "nepliteApi/controllers" "github.com/astaxie/beego" "github.com/astaxie/beego/context" ) func init() { user_ns := beego.NewNamespace("/user", beego.NSNamespace("/object", // todo 就是留着占个位置,不然不好看 beego.NSInclude( &controllers.ObjectController{}, ), ), // Todo 注解路由太骚包了。 不建议使用。 // TODO 自动挡把接口隐藏了, 处女座程序员可以用一用 beego.NSRouter("/map", &controllers.UserController{}, "get:URLMapping"), beego.NSRouter("/update", &controllers.UserController{}, "get:UpdateUserYuee"), beego.NSRouter("/updateBase", &controllers.UserController{}, "get:UpdateUserBase"), beego.NSRouter("/add", &controllers.UserController{}, "get:Add"), beego.NSRouter("/info", &controllers.UserController{}, "get:Info"), beego.NSRouter("/infobycard", &controllers.UserController{}, "get:FindByCard"), beego.NSNamespace("/neplite", beego.NSGet(":id", func(context *context.Context) { context.Output.Body([]byte("notAllowed")) }), ), beego.NSNamespace("/shouhou", beego.NSRouter("all", &controllers.UserSHController{}, "get:GetAll"), beego.NSRouter("/add", &controllers.UserSHController{}, "get:Add"), ), beego.NSNamespace("/sales", beego.NSRouter("/getalltouser", &controllers.SalesController{}, "get:Getall2User"), beego.NSRouter("/yingli", &controllers.SalesController{}, "get:Yingli"), beego.NSRouter("/getall", &controllers.SalesController{}, "get:Getall"), beego.NSRouter("/add", &controllers.SalesController{}, "get:Add"), beego.NSRouter("/reportadd", &controllers.ReportRecordController{}, "post:AddRecord"), beego.NSRouter("/report", &controllers.ReportRecordController{}, "get:GetRecordByname"), ), beego.NSNamespace("/group", beego.NSRouter("/add", &controllers.GroupController{}, "post:Add"), beego.NSRouter("/getbymasterid", &controllers.GroupController{}, "get:GetByUserId"), ), beego.NSNamespace("/power", beego.NSRouter("/add", &controllers.UserPowerController{}, "post:Add"), beego.NSRouter("/login", &controllers.UserPowerController{}, "get:Login"), beego.NSRouter("/test/:uid", &controllers.UserPowerController{}, "get:PutPower"), beego.NSRouter("/getpower", &controllers.UserPowerController{}, "get:GetPower"), beego.NSRouter("/all", &controllers.UserPowerController{}, "get:GetAll"), beego.NSRouter("/del", &controllers.UserPowerController{}, "get:DeletePower"), beego.NSRouter("/getnpower", &controllers.UserPowerController{}, "get:GetNormalPower"), beego.NSRouter("/update", &controllers.UserPowerController{}, "post:UpdatePower"), ), ) comm_ns := beego.NewNamespace("/comm", beego.NSRouter("/get", &controllers.SomeNewsController{}, "get:GetAll"), beego.NSRouter("/add", &controllers.SomeNewsController{}, "post:Add"), beego.NSRouter("/getcard", &controllers.CardsController{}, "get:Get"), beego.NSRouter("/addcard", &controllers.CardsController{}, "get:AddCard"), ) wuliao_ns := beego.NewNamespace("/wuliao", beego.NSInclude( &controllers.NepliteControllers{}, ), beego.NSRouter("/pandian", &controllers.PanDianController{}), beego.NSRouter("/pandianadd", &controllers.PanDianController{}, "get:Add"), beego.NSRouter("/all", &controllers.GoodsController{}), beego.NSRouter("/show", &controllers.GoodsController{}, "get:ShowAll"), beego.NSRouter("/updateyuzhi", &controllers.GoodsController{}, "get:UpdateYuzhi"), beego.NSRouter("/add", &controllers.GoodsController{}, "get:Add"), beego.NSNamespace("/record", beego.NSRouter("/ruku", &controllers.GoodsRecordController{}, "get:Ruku"), beego.NSRouter("/chuku", &controllers.GoodsRecordController{}, "get:Chuku"), beego.NSRouter("/add", &controllers.GoodsRecordController{}, "get:Add"), beego.NSRouter("/del", &controllers.GoodsRecordController{}, "get:Del"), ), ) beego.AddNamespace(user_ns, wuliao_ns, comm_ns) }
package api import ( "github.com/MiteshSharma/gateway/common/middleware" "github.com/MiteshSharma/gateway/gateway/middleware" "github.com/MiteshSharma/gateway/gateway/model" "github.com/gorilla/mux" "github.com/urfave/negroni" ) func InitApi(router *mux.Router) *negroni.Negroni { InitProxy(router) n := negroni.New() n.UseFunc(commonMiddleware.NewZipkinMiddleware().GetMiddlewareHandler()) n.UseFunc(commonMiddleware.NewLoggerMiddleware().GetMiddlewareHandler()) n.UseFunc(middleware.NewHystrixMiddleware(model.NewGatewayService()).GetMiddlewareHandler()) n.UseHandler(router) return n }
package main import "fmt" func swap(a,b string)(string,string){ return b,a } func main() { fmt.Println(swap("String A","String B")) }
// This is a runnable example of making an oauth authorized request // to the Twitter api // Enter your consumer key/secret and token key/secret as command line flags // ex: go run example.go -ck ABC -cs DEF -tk 123 -ts 456 package main import ( "flag" "fmt" "io/ioutil" "net/http" "github.com/nhjk/oauth" ) var ck = flag.String("ck", "", "consumer key") var cs = flag.String("cs", "", "consumer secret") var tk = flag.String("tk", "", "token key") var ts = flag.String("ts", "", "token secret") func main() { flag.Parse() // create an http client and a request for it to send client := new(http.Client) req, _ := http.NewRequest("GET", "https://api.twitter.com/1.1/statuses/home_timeline.json", nil) // a consumer allows you to authorize requests with a token cons := oauth.Consumer{*ck, *cs} // authorize request cons.Authorize(req, &oauth.Token{*tk, *ts}) // send request and print body res, _ := client.Do(req) body, _ := ioutil.ReadAll(res.Body) fmt.Printf("%s\n", body) }
package repository import ( "github.com/bearname/videohost/internal/videoserver/domain/dto" "github.com/bearname/videohost/internal/videoserver/domain/model" ) type PlaylistRepository interface { Create(playlist dto.CreatePlaylistDto) (int64, error) FindPlaylists(userId string, privacyType []model.PrivacyType) ([]dto.PlaylistItem, error) FindPlaylist(playlistId int) (model.Playlist, error) AddVideos(playlistId int, userId string, videosId []string) error RemoveVideos(playlistId int, userId string, videosId []string) error ChangeOrder(playlistId int, videoId []string, order []int) error ChangePrivacy(id string, playlistId int, privacyType model.PrivacyType) error Delete(ownerId string, playlistId int) error }
package mr import "strconv" func isElementInSLice(ele int, s []int) (judge bool) { for _, v := range s { if v == ele { return true } } return false } func makeMapOutFileName(fileIndex int, partIndex int) string { return "mr-" + strconv.Itoa(fileIndex) + "-" + strconv.Itoa(partIndex) } func makeReduceOutFileName(partIndex int) string { return "mr-out-" + strconv.Itoa(partIndex) }
package recursion func canPartitionKSubsets(nums []int, k int) bool { sum := 0 for _, num := range nums { sum += num } if k <= 0 || sum%k != 0 { return false } visited := make([]int, len(nums)) return help698(nums, visited, 0, k, 0, 0, sum/k) } func help698(nums []int, visited []int, start int, k int, cur_sum int, cur_num int, target int) bool { if k == 1 { return true } if cur_sum == target && cur_num > 0 { return help698(nums, visited, 0, k-1, 0, 0, target) } for i := start; i < len(nums); i++ { if visited[i] == 0 { visited[i] = 1 if help698(nums, visited, i+1, k, cur_sum+nums[i], cur_num+1, target) { return true } visited[i] = 0 } } return false }
package tasks // Task describes a single step in a plan. This could i.e. be the renaming/deleting of a file. type Task interface { Execute() Result GetDescription() string } // RunnableTask defines a basic task containing preferences stored as a map. type RunnableTask struct { Description string Preferences map[string]interface{} } // Result is the outcome of executing a task. type Result struct { Message string IsSuccessful bool } // handleError is a convenient function to handle errors occuring during the execution of a task. // It will set the result to unseccessful and set the error as a message. func handleError(err error) (result Result) { result.IsSuccessful = false result.Message = err.Error() return result }
package main import "fmt" func main(){ filename := "deckfile.txt" deck := newDeck() if err := deck.saveToFile(filename); err != nil { fmt.Println("Some error occured:- ", err) } // deck.print() // hand, remainingDeck := deal(deck, 5) // hand.print() // remainingDeck.print() // fmt.Println(deck.deckToString()) d := readDeckFromFile(filename) d.print() d.shuffle() } func newCard() string{ return "Ace of star" }
package record import "time" // Record represents a record. type Record struct { ID string `json:"instapi:id"` CreatedAt time.Time `json:"instapi:createdAt"` UpdatedAt *time.Time `json:"instapi:updatedAt"` } // Batch represents a record batch acknowledge record count. type Batch struct { Count int `json:"count"` }
package main import ( "flag" "fmt" ) // Possible fractals var fractals = []string{ "mandelbrot", "sierpinski", "julia", } func main() { // Various flags we use pointers otherwise default value won't change size := flag.Int("size", 400, "Size of the fractal image in px") name := flag.String("name", "mandelbrot", fmt.Sprintf("Fractal Name, possible options are: %v", fractals)) re := flag.Float64("re", 0.285, "Real part of the complex number for julia's set computation") im := flag.Float64("im", 0.0013, "Imaginary part of the complex number for julia's set computation") limit := flag.Int("limit", 200, "Limit of iteration to consider the sequence is bounded") output := flag.String("output", "myfractal.jpg", "Name of the image file to output, format should be jpeg") colorized := flag.Bool("colorized", false, "If the output should be colorized (not possible for sierpinski ATM)") // Parse flags flag.Parse() // Switch on fractal name that have been asked switch *name { case "mandelbrot": mandelbrot(float64(*size), float64(*limit), *output, *colorized) case "sierpinski": sierpinski(*size, *output, *colorized) case "julia": julia(float64(*size), float64(*limit), complex(*re, *im), *output, *colorized) default: fmt.Println("Sorry this fractal name isn't handled here") return } fmt.Println("Computation Ended !") }
package main import ( "bytes" "os" "os/exec" "os/user" "text/template" "time" ) const emailTemplate = `From: {{.From}} To: {{.To}} Subject: {{.Subject}} Cmd: {{.Result.Cmd.Args}} Start: {{.Result.Start}} End: {{.Result.End}} Duration: {{.Result.Duration}} total {{.Result.Cmd.ProcessState.UserTime}} user {{.Result.Cmd.ProcessState.SystemTime}} system ProcessState: {{.Result.Cmd.ProcessState}} Error: {{.Result.Error}} Stderr:{{if .Result.StderrExtra}} ... {{.Result.StderrExtra}} more bytes ...{{end}} {{.Result.Stderr}} Stdout:{{if .Result.StdoutExtra}} ... {{.Result.StdoutExtra}} more bytes ...{{end}} {{.Result.Stdout}} ` type Message struct { To string From string Subject string Result *Result } func (m *Message) Bytes() []byte { var buf bytes.Buffer t := template.New("mail") t, _ = t.Parse(emailTemplate) t.Execute(&buf, m) return buf.Bytes() } func (m *Message) String() string { return string(m.Bytes()) } type Result struct { Cmd *exec.Cmd Error error Start time.Time End time.Time Duration time.Duration Stdout string Stderr string StdoutExtra int StderrExtra int } func identity() string { var username string user, err := user.Current() if err != nil { username = "root" } else { username = user.Username } hostname, err := os.Hostname() if err != nil { hostname = "localhost" } return username + "@" + hostname } type Mailer interface { Send([]string, string, []byte) error } func findMailer() Mailer { if path, err := FindSendmail(); err == nil { return &SendMailMailer{path} } return &SMTPMailer{} }
package main import ( "log" "math/rand" "time" "github.com/fjdumont/exp/pkg/evo" ) const () func main() { target := []rune("You cannot parse HTML with Regular Expressions. Regular Expressions can not parse HTML.") runes := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,.-!\"§$%&/()=? 0123456789") ga := evo.NewGA(512). SetNewGenome(func() []rune { s := make([]rune, len(target)) for i := 0; i < len(target); i++ { s[i] = runes[rand.Intn(len(runes))] } return s }). SetFitness(func(g []rune) float64 { // count the number of correct runes hits := 0 for i, r := range g { if r == target[i] { hits++ } } return float64(hits) / float64(len(target)) }). SetMutate(evo.NewRuneMutator(1.0/float64(len(target)), runes)). SetElitism(0.01). SetReproduction(0.9, evo.SinglePointCrossover). SetMaxProcs(2) ga.Init() // ~2.5-2.6 ts := time.Now() for ep := 1; ; ep++ { ga.Evolve() best := ga.Best() if ep%1000 == 0 || best.Fitness == 1.0 { d := time.Since(ts) avg := ga.AverageFitness() log.Printf("ep: %v,\td: %v,\tbestfit: %v,\tavgfit: %v,\tbest: %v\n", ep, d, best.Fitness, avg, string(best.Genome)) if best.Fitness == 1 { break } ts = time.Now() } } }
package main import ( idgenerator "github.com/Qihoo360/poseidon/service/idgenerator/module" meta "github.com/Qihoo360/poseidon/service/meta/module" proxy "github.com/Qihoo360/poseidon/service/proxy/module" searcher "github.com/Qihoo360/poseidon/service/searcher/module" "github.com/zieckey/simgo" ) func main() { fw := simgo.DefaultFramework fw.RegisterModule("meta", meta.New()) fw.RegisterModule("idgenerator", idgenerator.New()) fw.RegisterModule("proxy", proxy.New()) fw.RegisterModule("searcher", searcher.New()) err := fw.Initialize() if err != nil { panic(err.Error()) } fw.Run() }
package cli import ( "runtime" "syscall" "unsafe" ) // For calculating the size of the console window; this is pretty important when we're writing // arbitrary-length log messages around the interactive display. type winsize struct { Row uint16 Col uint16 Xpixel uint16 Ypixel uint16 } // WindowSize finds and returns the size of the console window as (rows, columns) func WindowSize() (int, int) { ws := new(winsize) if ret, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stderr), uintptr(tiocgwinsz()), uintptr(unsafe.Pointer(ws)), ); int(ret) == -1 { log.Errorf("error %d getting window size", int(errno)) return 25, 80 } return int(ws.Row), int(ws.Col) } // tiocgwinsz returns the ioctl number corresponding to TIOCGWINSZ. // We could determine this using cgo which would be more robust, but I'd really // rather not invoke cgo for something as static as this. func tiocgwinsz() int { if runtime.GOOS == "linux" { return 0x5413 } return 1074295912 // OSX and FreeBSD. }
package widgets import ( "log" // "fmt" // "github.com/gotk3/gotk3/glib" "github.com/gotk3/gotk3/gtk" ) func WindowNew(title string, width, height int) (*gtk.Window){ win, err := gtk.WindowNew(gtk.WINDOW_TOPLEVEL) if err != nil { log.Fatal("Unable to create window: ", err) } win.SetTitle(title) win.SetDefaultSize(width, height) win.SetPosition(gtk.WIN_POS_CENTER) win.Connect("destroy", func(){ gtk.MainQuit() }) return win } func LabelNew(title string, wrapText bool) (*gtk.Label){ label, err := gtk.LabelNew(title) if err != nil { log.Fatal("Unable to add label: ", err) } label.SetLineWrap(wrapText) return label } func EntryNew() (*gtk.Entry){ entry, err := gtk.EntryNew() if err != nil { log.Fatal("Unable to create entry: ", err) } return entry } func ButtonNew(label string, onClick func(), args ...interface{}) (*gtk.Button){ button, err := gtk.ButtonNewWithLabel(label) if err != nil { log.Fatal("Unable to create button: ", err) } button.Connect("clicked", onClick, args) return button } func BoxNew(orient gtk.Orientation, spacing int) (*gtk.Box){ box, err := gtk.BoxNew(orient, spacing) if err != nil{ log.Fatal("Unable to create a Box") } return box } func StackSwitcherNew() (*gtk.StackSwitcher){ stackSwitcher,err := gtk.StackSwitcherNew() if err != nil { log.Fatal("Unable to add StackSwitcher: ", err) } return stackSwitcher } func StackNew() (*gtk.Stack){ stack, err := gtk.StackNew() if err != nil { log.Fatal("Unable to add Stack: ", err) } return stack } func GridNew(columnHomogeneous, rowHomogeneous bool, colSpacing, rowSpacing uint) (*gtk.Grid) { grid, err := gtk.GridNew() if err != nil { log.Fatal("Unable to add grid: ", err) } grid.SetColumnHomogeneous(columnHomogeneous) grid.SetRowHomogeneous(rowHomogeneous) grid.SetColumnSpacing(colSpacing) grid.SetRowSpacing(rowSpacing) return grid } func DialogNew(title string, width, height int) (*gtk.Dialog){ dialog, err := gtk.DialogNew() if err != nil { log.Fatal("Unable to add Dialog: ", err) } dialog.SetDefaultSize(width, height) dialog.SetTitle(title) return dialog } func DialogNewWithButtons(title string, parent gtk.IWindow, flag gtk.DialogFlags, buttons []interface{}) (*gtk.Dialog){ dialog, err := gtk.DialogNewWithButtons(title, parent, flag, buttons) if err != nil { log.Fatal("Unable to create Dialog with Buttons: ", err) } return dialog } func RadioButtonNew(grpMember *gtk.RadioButton, label string, callback func()) (*gtk.RadioButton){ radio, err := gtk.RadioButtonNewWithLabelFromWidget(grpMember, label) if err != nil { log.Fatal("Unable to generate radio button: ", err) } radio.Connect("toggled", callback) return radio } func ImageNew() (*gtk.Image){ img, err := gtk.ImageNew() if err != nil { log.Fatal("Unable to create Image: ", err) } return img } func ImageNewFromFile(path string) (*gtk.Image){ img, err := gtk.ImageNewFromFile(path) if err != nil { log.Fatal("Unable to load Image: ", err) } return img } func FileChooserDialogNew(parent gtk.IWindow, action gtk.FileChooserAction) (*gtk.FileChooserDialog){ title := "Select an item" if action == gtk.FILE_CHOOSER_ACTION_OPEN { title = "Select a file" } else if action == gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER { title = "Select a folder" } dialog, err := gtk.FileChooserDialogNewWith2Buttons( title, parent, action, "Cancel", gtk.RESPONSE_CLOSE, "Select", gtk.RESPONSE_OK) if err != nil { log.Fatal("Unable to create File Chooser Dialog Box") } return dialog }
package inmemoryrepo import "backend/internal/domain" type gameRepository struct { games map[domain.GameId]*domain.Game } func NewGameRepository() domain.GameRepository { return &gameRepository{ games: map[domain.GameId]*domain.Game{}, } } func (r *gameRepository) Save(game *domain.Game) error { r.games[game.Id()] = game return nil } func (r *gameRepository) FindById(gameId domain.GameId) (*domain.Game, error) { game, ok := r.games[gameId] if ok { return game, nil } else { return nil, nil } }
package commands import ( "fmt" "os" "github.com/fatih/color" ) func fail(err error) { fmt.Fprintln(os.Stderr, color.RedString("error:"), err) os.Exit(1) } func failIf(err error) { if err != nil { fail(err) } }
/* Two random numbers A and B have been generated to be either 1, 2, or 3 your job is to randomly pick a third number C that can also be 1, 2 or 3. But, C cannot equal A or B. A can equal B. If A = B, then C has only two numbers left it can be. If A ≠ B, C has only one number it can be. Assume A and B have already been chosen for you This is how A and B would be created in Python A = random.randrange(1,4) B = random.randrange(1,4) Assume this is already in your code. This is the shortest I've come up with in Python while True: C = random.randrange(1,4) if C != A and C != B: break Here are some acceptable values for A, B and C. 1,2,3 1,1,2 2,3,1 3,3,2 Here are some unacceptable values for A, B and C. 1,2,1 2,3,3 1,1,1 3,2,3 */ package main import ( "fmt" "math/rand" "time" ) func main() { rand.Seed(time.Now().UnixNano()) fmt.Println(sample(3)) } func sample(n int) (a, b, c int) { a = 1 + rand.Intn(n) b = 1 + rand.Intn(n) for { c = 1 + rand.Intn(n) if c != a && c != b { break } } return }
package stapi import ( "fmt" "net/http" ) // ApiUrl - the base url for stapi rest api const ApiUrl = "http://stapi.co/api/v1/rest" // Client - the stapi app type Client struct { ApiUrl string HttpClient *http.Client Character Entity } // New - create a new stapi client func New(httpClient *http.Client) Client { c := Client{ ApiUrl: ApiUrl, HttpClient: httpClient, } c.Character = Entity{ ApiUrl: fmt.Sprintf("%s/character", c.ApiUrl), Client: c.HttpClient, } return c }
package SQLite3 /* #include <sqlite3.h> #include <stdlib.h> */ import "C" import ( "database/sql" "database/sql/driver" "errors" "io" "unsafe" ) /* SQLite3驱动,参考: https://www.sqlite.org https://golang.org/pkg/database/sql/driver https://www.cnblogs.com/5211314jackrose/p/5816532.html https://github.com/astaxie/build-web-application-with-golang/blob/master/zh/05.1.md */ var transient *[0]byte func init() { t := unsafe.Pointer(transient) t = unsafe.Pointer(uintptr(t) - 1) transient = (*[0]byte)(t) // 注册SQLite3 sql.Register("SQLite3", &SQLite3Driver{}) } type SQLite3Driver struct { // 实现Driver接口 // Open(name string) (Conn, error) } // Open returns a new connection to the database. // The name is a string in a driver-specific format. func (d *SQLite3Driver) Open(name string) (driver.Conn, error) { var pdb *C.sqlite3 filename := C.CString(name) defer C.free(unsafe.Pointer(filename)) if C.sqlite3_open(filename, &pdb) != C.SQLITE_OK { return nil, errors.New("SQLite3 Open failed.") } return &SQLite3Conn{pDb: pdb}, nil } type SQLite3Conn struct { // 实现Conn接口 // Prepare(query string) (Stmt, error) // Close() error // Begin() (Tx, error) pDb *C.sqlite3 } // Prepare returns a prepared statement, bound to this connection. func (c *SQLite3Conn) Prepare(query string) (driver.Stmt, error) { var pstmt *C.sqlite3_stmt var pztail *C.char zsql := C.CString(query) defer C.free(unsafe.Pointer(zsql)) if C.sqlite3_prepare_v2(c.pDb, zsql, -1, &pstmt, &pztail) != C.SQLITE_OK { return nil, errors.New("SQLite3 Prepare failed.") } return &SQLite3Stmt{pStmt: pstmt, pzTail: pztail, conn: c}, nil } // Close invalidates and potentially stops any current. func (c *SQLite3Conn) Close() error { s := C.sqlite3_next_stmt(c.pDb, nil) for s != nil { C.sqlite3_finalize(s) s = C.sqlite3_next_stmt(c.pDb, nil) } if C.sqlite3_close(c.pDb) != C.SQLITE_OK { return errors.New("SQLite3 Close failed.") } c.pDb = nil return nil } // BeginTx starts and returns a new transaction. func (c *SQLite3Conn) Begin() (driver.Tx, error) { psql := C.CString("BEGIN") defer C.free(unsafe.Pointer(psql)) if C.sqlite3_exec(c.pDb, psql, nil, nil, nil) != C.SQLITE_OK { return nil, errors.New("SQLite3 Begin failed.") } return &SQLite3Tx{conn: c}, nil } type SQLite3Stmt struct { // 实现Stmt接口 // Close() error // NumInput() int // Exec(args []Value) (Result, error) // Query(args []Value) (Rows, error) pStmt *C.sqlite3_stmt pzTail *C.char conn *SQLite3Conn } // Close closes the statement. func (s *SQLite3Stmt) Close() error { if C.sqlite3_finalize(s.pStmt) != C.SQLITE_OK { return errors.New("SQLite3 Finalize failed.") } return nil } // NumInput returns the number of placeholder parameters. func (s *SQLite3Stmt) NumInput() int { return int(C.sqlite3_bind_parameter_count(s.pStmt)) } // Exec executes a query that doesn't return rows, such // as an INSERT or UPDATE. func (s *SQLite3Stmt) Exec(args []driver.Value) (driver.Result, error) { for i, v := range args { switch w := v.(type) { case nil: C.sqlite3_bind_null(s.pStmt, C.int(i+1)) case int64: C.sqlite3_bind_int64(s.pStmt, C.int(i+1), C.sqlite3_int64(w)) case int32: C.sqlite3_bind_int64(s.pStmt, C.int(i+1), C.sqlite3_int64(w)) case int: C.sqlite3_bind_int(s.pStmt, C.int(i+1), C.int(w)) case float64: C.sqlite3_bind_double(s.pStmt, C.int(i+1), C.double(w)) case float32: C.sqlite3_bind_double(s.pStmt, C.int(i+1), C.double(w)) case byte: C.sqlite3_bind_int(s.pStmt, C.int(i+1), C.int(w)) case bool: t := 0 if w { t = 1 } C.sqlite3_bind_int(s.pStmt, C.int(i+1), C.int(t)) case string: pb := []byte(w) C.sqlite3_bind_text(s.pStmt, C.int(i+1), (*C.char)(unsafe.Pointer(&pb[0])), C.int(len(w)), (transient)) case []byte: var pb *byte if len(w) > 0 { pb = &w[0] } C.sqlite3_bind_blob(s.pStmt, C.int(i+1), unsafe.Pointer(pb), C.int(len(w)), (transient)) return nil, errors.New("SQLite3 Step failed.") } } if status := C.sqlite3_step(s.pStmt); status != C.SQLITE_OK && status != C.SQLITE_ROW && status != C.SQLITE_DONE { return nil, errors.New("SQLite3 Step failed") } return &SQLite3Result{lastId: int64(C.sqlite3_last_insert_rowid(s.conn.pDb)), rowNum: int64(C.sqlite3_changes(s.conn.pDb))}, nil } // Query executes a query that may return rows, such as a // SELECT. func (s *SQLite3Stmt) Query(args []driver.Value) (driver.Rows, error) { for i, v := range args { switch w := v.(type) { case nil: C.sqlite3_bind_null(s.pStmt, C.int(i+1)) case int64: C.sqlite3_bind_int64(s.pStmt, C.int(i+1), C.sqlite3_int64(w)) case int32: C.sqlite3_bind_int64(s.pStmt, C.int(i+1), C.sqlite3_int64(w)) case int: C.sqlite3_bind_int(s.pStmt, C.int(i+1), C.int(w)) case float64: C.sqlite3_bind_double(s.pStmt, C.int(i+1), C.double(w)) case float32: C.sqlite3_bind_double(s.pStmt, C.int(i+1), C.double(w)) case byte: C.sqlite3_bind_int(s.pStmt, C.int(i+1), C.int(w)) case bool: t := 0 if w { t = 1 } C.sqlite3_bind_int(s.pStmt, C.int(i+1), C.int(t)) case string: pb := []byte(w) var x int64 = -1 C.sqlite3_bind_text(s.pStmt, C.int(i+1), (*C.char)(unsafe.Pointer(&pb)), C.int(len(w)), (*[0]byte)(unsafe.Pointer(&x))) case []byte: var pb *byte if len(w) > 0 { pb = &w[0] } var x int64 = -1 C.sqlite3_bind_blob(s.pStmt, C.int(i+1), unsafe.Pointer(pb), C.int(len(w)), (*[0]byte)(unsafe.Pointer(&x))) default: return nil, errors.New("SQLite3 Query failed.") } } return &SQLite3Rows{stmt: s}, nil } type SQLite3Tx struct { // 实现Tx接口 // Commit() error // Rollback() error conn *SQLite3Conn } // Commit Transaction func (t *SQLite3Tx) Commit() error { psql := C.CString("COMMIT") defer C.free(unsafe.Pointer(psql)) if C.sqlite3_exec(t.conn.pDb, psql, nil, nil, nil) != C.SQLITE_OK { return errors.New("SQLite3 Commit failed.") } return nil } // Rollback Transaction func (t *SQLite3Tx) Rollback() error { psql := C.CString("ROLLBACK") defer C.free(unsafe.Pointer(psql)) if C.sqlite3_exec(t.conn.pDb, psql, nil, nil, nil) != C.SQLITE_OK { return errors.New("SQLite3 Rollback failed.") } return nil } type SQLite3Result struct { // 实现Result接口 // LastInsertId() (int64, error) lastId int64 rowNum int64 } // LastInsertId returns the database's auto-generated ID // after, for example, an INSERT into a table with primary // key. func (r *SQLite3Result) LastInsertId() (int64, error) { return r.lastId, nil } // RowsAffected returns the number of rows affected by the // query. func (r *SQLite3Result) RowsAffected() (int64, error) { return r.rowNum, nil } type SQLite3Rows struct { // 实现Rows接口 // Columns() []string // Close() error // Next(dest []Value) error stmt *SQLite3Stmt } // Columns returns the names of the columns func (r *SQLite3Rows) Columns() []string { var n int = int(C.sqlite3_column_count(r.stmt.pStmt)) col := make([]string, n) for i := 0; i < n; i++ { col[i] = C.GoString(C.sqlite3_column_name(r.stmt.pStmt, C.int(i))) } return col } // Close closes the rows iterator. func (r *SQLite3Rows) Close() error { if C.sqlite3_reset(r.stmt.pStmt) != C.SQLITE_OK { return errors.New("SQLite3 Reset failed.") } return nil } // Next is called to populate the next row of data into // the provided slice. The provided slice will be the same // size as the Columns() are wide. // Next should return io.EOF when there are no more rows. func (r *SQLite3Rows) Next(dest []driver.Value) error { if C.sqlite3_step(r.stmt.pStmt) == C.SQLITE_DONE { return io.EOF } var n int = len(dest) for i := 0; i < n; i++ { ts := C.sqlite3_column_type(r.stmt.pStmt, C.int(i)) // tu := C.sqlite3_column_decltype(r.stmt.pStmt, C.int(i)) switch ts { case C.SQLITE_INTEGER: dest[i] = int64(C.sqlite3_column_int64(r.stmt.pStmt, C.int(i))) case C.SQLITE_FLOAT: dest[i] = float64(C.sqlite3_column_double(r.stmt.pStmt, C.int(i))) case C.SQLITE_TEXT: dest[i] = C.GoString((*C.char)(unsafe.Pointer(C.sqlite3_column_text(r.stmt.pStmt, C.int(i))))) case C.SQLITE_BLOB: n := int(C.sqlite3_column_bytes(r.stmt.pStmt, C.int(i))) p := C.sqlite3_column_blob(r.stmt.pStmt, C.int(i)) slice := make([]byte, n) copy(slice[:], (*[1 << 30]byte)(unsafe.Pointer(p))[0:n]) dest[i] = slice case C.SQLITE_NULL: dest[i] = nil default: return errors.New("SQLite3 Next failed.") } } return nil }
package weather import ( "encoding/json" "fmt" "github.com/sirupsen/logrus" "io/ioutil" "net/http" "strings" ) func GetWeatherData() string { //resp, err := http.Get("http://www.weather.com.cn/data/sk/101110101.html") //if err != nil { // fmt.Println(err) //} //body, err := ioutil.ReadAll(resp.Body) //fmt.Println(string(body)) return string(GetProvinceAndCities()) } type Province struct { Code string `json:"code"` Name string `json:"name"` URL string `json:"url"` Cities []City `json:"cities"` } type City struct { Code string `json:"code"` Name string `json:"city"` URL string `json:"url"` } //以上数据来自http://www.nmc.gov.cn中央气象台 func GetProvinceAndCities() []byte { var provinces []Province resp, err := http.Get("http://www.nmc.gov.cn/f/rest/province") if err != nil { logrus.Debugln(err) return nil } body, err := ioutil.ReadAll(resp.Body) if err != nil { logrus.Debugln(err) return nil } // logrus.Debugln(string(body)) err = json.Unmarshal(body, &provinces) if err != nil { logrus.Debugln(err) return nil } for i := 0; i < len(provinces); i++ { provinces[i].Name = strings.Replace(provinces[i].Name, "省", "", -1) provinces[i].Name = strings.Replace(provinces[i].Name, "市", "", -1) provinces[i].Name = strings.Replace(provinces[i].Name, "自治区", "", -1) provinces[i].Name = strings.Replace(provinces[i].Name, "回族", "", -1) provinces[i].Name = strings.Replace(provinces[i].Name, "维吾尔", "", -1) provinces[i].Name = strings.Replace(provinces[i].Name, "壮族", "", -1) provinces[i].Name = strings.Replace(provinces[i].Name, "特别行政区", "", -1) provinces[i].Name = strings.Replace(provinces[i].Name, "壮族自治区", "", -1) resp, err := http.Get(fmt.Sprintf("http://www.nmc.gov.cn/f/rest/province/%s", provinces[i].Code)) if err != nil { logrus.Debugln(err) return nil } body, err := ioutil.ReadAll(resp.Body) if err != nil { logrus.Debugln(err) return nil } logrus.Debugln(string(body)) err = json.Unmarshal(body, &provinces[i].Cities) if err != nil { logrus.Debugln(err) return nil } } data, err := json.Marshal(provinces) // fmt.Sprintf(string(data)) return data } func GetIPLocation(ip string) { resp, err := http.Get(fmt.Sprintf("http://ip.taobao.com/service/getIpInfo.php?ip=%s", ip)) if err != nil { logrus.Debugln(err) } body, err := ioutil.ReadAll(resp.Body) if err != nil { logrus.Debugln(err) } fmt.Println(string(body)) }
// // Copyright (C) 2021 IOTech Ltd // // SPDX-License-Identifier: Apache-2.0 package application import ( "context" "github.com/edgexfoundry/edgex-go/internal/pkg/correlation" v2SchedulerContainer "github.com/edgexfoundry/edgex-go/internal/support/scheduler/v2/bootstrap/container" "github.com/edgexfoundry/go-mod-bootstrap/v2/bootstrap/container" "github.com/edgexfoundry/go-mod-bootstrap/v2/di" "github.com/edgexfoundry/go-mod-core-contracts/v2/errors" "github.com/edgexfoundry/go-mod-core-contracts/v2/v2/models" ) // The AddIntervalAction function accepts the new IntervalAction model from the controller function // and then invokes AddIntervalAction function of infrastructure layer to add new IntervalAction func AddIntervalAction(action models.IntervalAction, ctx context.Context, dic *di.Container) (id string, edgeXerr errors.EdgeX) { dbClient := v2SchedulerContainer.DBClientFrom(dic.Get) lc := container.LoggingClientFrom(dic.Get) // checks the interval existence by name _, edgeXerr = dbClient.IntervalByName(action.IntervalName) if edgeXerr != nil { return id, errors.NewCommonEdgeXWrapper(edgeXerr) } addedAction, err := dbClient.AddIntervalAction(action) if err != nil { return "", errors.NewCommonEdgeXWrapper(err) } lc.Debugf("IntervalAction created on DB successfully. IntervalAction ID: %s, Correlation-ID: %s ", addedAction.Id, correlation.FromContext(ctx)) return addedAction.Id, nil }
package bplus // B+ 树非叶子节点 // 假设 keywords = {3,5,8,10} // 4 个键值将数据分为 5 个区间:(-INF, 3), (3, 5), (5, 8), (8, 10), (10, INF) // 5 个区间分别对应 children[0]...children[4] // m 是事先计算得到的,其依据是让所有信息的大小正好等于页的大小 // PAGE_SIZE=(m-1)*4[keywords 大小] + m*8[children 大小] type TreeNode struct { m int // m 叉树 keywords []int // 键值,用于划分数据区间 children []*TreeNode // 保存子节点指针 } // 叶子节点存储的是值,而不是区间 // 这里每个叶子节点存储 3 个数据行的键值和地址信息 // k 是事先计算得到的,其依据是让所有信息的大小正好等于页的大小 // PAGE_SIZE=k*4[keywords 大小]+k*8[dataAddress 大小]+8[prev 大小]+8[next 大小] type TreeLeafNode struct { k int keywords []int // 数据键值 dataAddress []int // 数据地址 prev, next TreeLeafNode // 该节点在链表种的前驱、后继节点 }
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2019 Datadog, Inc. package service import ( "context" "reflect" "testing" "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" //"github.com/go-logr/logr" //apiequality "k8s.io/apimachinery/pkg/api/equality" //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" //"k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "github.com/DataDog/k8s-dns-exposer/pkg/controller/config" "github.com/DataDog/k8s-dns-exposer/pkg/controller/predicate" "github.com/DataDog/k8s-dns-exposer/pkg/utils" ) func TestReconcileService_Reconcile(t *testing.T) { logf.SetLogger(logf.ZapLogger(true)) // Register operator types with the runtime scheme. s := scheme.Scheme s.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.Service{}) s.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.Endpoints{}) service1 := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "service1", Namespace: "foo", Annotations: map[string]string{config.K8sDNSExposerAnnotationKey: "true"}, }, Spec: corev1.ServiceSpec{ ExternalName: "foo.datadoghq.com", ClusterIP: "Nonde", }, } service2 := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "service1", Namespace: "foo", Annotations: map[string]string{ config.K8sDNSExposerAnnotationKey: "true", config.RefreshRateAnnotationKey: "42", }, }, Spec: corev1.ServiceSpec{ ExternalName: "foo.datadoghq.com", ClusterIP: "Nonde", }, } type fields struct { client client.Client scheme *runtime.Scheme dnsResolver utils.DNSResolverIface updateEndpointsFunc utils.UpdateEndpointsFunc watcherPredicate predicate.AnnotationPredicate } type args struct { request reconcile.Request } tests := []struct { name string fields fields args args want reconcile.Result wantErr bool wantFunc func(client client.Client) error }{ { name: "Service doesn't exist, return without requeue", fields: fields{ scheme: s, client: fake.NewFakeClient(), updateEndpointsFunc: utils.UpdateEndpoints, dnsResolver: &FakeResolver{}, watcherPredicate: predicate.AnnotationPredicate{Key: config.K8sDNSExposerAnnotationKey}, }, args: args{ request: reconcile.Request{}, }, wantErr: false, }, { name: "Service exists, Endpoints doesn't create endpoint", fields: fields{ scheme: s, client: fake.NewFakeClient(service1), updateEndpointsFunc: utils.UpdateEndpoints, dnsResolver: &FakeResolver{}, watcherPredicate: predicate.AnnotationPredicate{Key: config.K8sDNSExposerAnnotationKey}, }, args: args{ request: newRequest(service1.Namespace, service1.Name), }, want: reconcile.Result{RequeueAfter: config.DefaultRequeueDuration}, wantErr: false, wantFunc: func(c client.Client) error { endpoint := &corev1.Endpoints{} return c.Get(context.TODO(), newRequest(service1.Namespace, service1.Name).NamespacedName, endpoint) }, }, { name: "Service exists and has custom refresh duration", fields: fields{ scheme: s, client: fake.NewFakeClient(service2), updateEndpointsFunc: utils.UpdateEndpoints, dnsResolver: &FakeResolver{}, watcherPredicate: predicate.AnnotationPredicate{Key: config.K8sDNSExposerAnnotationKey}, }, args: args{ request: newRequest(service1.Namespace, service1.Name), }, want: reconcile.Result{RequeueAfter: 42 * time.Second}, wantErr: false, wantFunc: func(c client.Client) error { endpoint := &corev1.Endpoints{} return c.Get(context.TODO(), newRequest(service1.Namespace, service1.Name).NamespacedName, endpoint) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &ReconcileService{ client: tt.fields.client, scheme: tt.fields.scheme, dnsResolver: tt.fields.dnsResolver, updateEndpointsFunc: tt.fields.updateEndpointsFunc, watcherPredicate: tt.fields.watcherPredicate, } got, err := r.Reconcile(tt.args.request) if (err != nil) != tt.wantErr { t.Errorf("ReconcileService.Reconcile() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("ReconcileService.Reconcile() = %v, want %v", got, tt.want) } if tt.wantFunc != nil { if err = tt.wantFunc(tt.fields.client); err != nil { t.Errorf("ReconcileService.Reconcile() validation function return an error: %v", err) } } }) } } type FakeResolver struct { ips []string err error } func (f *FakeResolver) Resolve(entry string) ([]string, error) { return f.ips, f.err } func newRequest(ns, name string) reconcile.Request { return reconcile.Request{ NamespacedName: types.NamespacedName{ Namespace: ns, Name: name, }, } }
package main import "fmt" //给定两个整数 n 和 k,返回 1 ... n 中所有可能的 k 个数的组合。 // //示例: // //输入: n = 4, k = 2 //输出: //[ //[2,4], //[3,4], //[2,3], //[1,2], //[1,3], //[1,4], //] func main() { fmt.Println(combine1(2, 3)) } //使用回溯 func combine1(n int, k int) [][]int { res := [][]int{} var dfs func(n, k, start int, path []int) dfs = func(n, k, start int, path []int) { if len(path) == k { res = append(res, path) } for i := start; i <= n; i++ { path = append(path, i) ll := make([]int, len(path)) copy(ll, path) dfs(n, k, i+1, ll) path = path[:len(path)-1] } } return res } //效率高的回溯 func combine(n int, k int) [][]int { nums := make([]int, n) for i := 1; i <= n; i++ { nums[i-1] = i } res = [][]int{} recursion(nums, k, []int{}) return res } var res [][]int // 使用数组来截取 func recursion(nums []int, k int, path []int) { if len(path) == k { b := make([]int, k) copy(b, path) res = append(res, b) return } // 剪枝 if k > len(path)+len(nums) { return } for i, top := range nums { numsNew := nums[i+1:] path = append(path, top) //fmt.Println(path,nums) recursion(numsNew, k, path) path = path[:len(path)-1] } }
package logs import ( "os" "github.com/astaxie/beego/logs" ) // RFC5424 log message levels. const ( LevelEmergency = iota LevelAlert LevelCritical LevelError LevelWarning LevelNotice LevelInformational LevelDebug ) func init() { // logs.Async() logs.SetLogFuncCall(true) logs.SetLevel(logs.LevelDebug) logs.SetLogFuncCallDepth(4) _ = logs.SetLogger(logs.AdapterConsole) // logs.SetLogger(logs.AdapterMultiFile, `{"filename":"../logs/library_back.logs"}`) } // SetLevel . func SetLevel(level int) { logs.SetLevel(level) } // Async . func Async(msgLen ...int64) { logs.Async(msgLen...) } // SetLogFuncCall . func SetLogFuncCall(b bool) { logs.SetLogFuncCall(b) } // SetLogFuncCallDepth . func SetLogFuncCallDepth(d int) { logs.SetLogFuncCallDepth(d) } // SetLogger . func SetLogger(adapter string, config ...string) error { return logs.SetLogger(adapter, config...) } type newLoggerFunc func() logs.Logger // Logger . type Logger logs.Logger // Register . func Register(name string, log newLoggerFunc) { logs.Register(name, func() logs.Logger { return log() }) } //Info log info func Info(f interface{}, v ...interface{}) { logs.Info(f, v...) } //Debug log debug func Debug(f interface{}, v ...interface{}) { logs.Debug(f, v...) } //Error log error func Error(f interface{}, v ...interface{}) { logs.Error(f, v...) } //Warn log warn func Warn(f interface{}, v ...interface{}) { logs.Warn(f, v...) } //Fatal log fatal func Fatal(v ...interface{}) { Error(v) os.Exit(1) } // GetLevel log level func GetLevel() int { return logs.GetBeeLogger().GetLevel() }
package calendar import ( "booking-calendar/schedule" "booking-calendar/utils" "log" "testing" "time" ) var operatingSchedule = schedule.CompileBusinessWeekSchedule(parseTime("9:00AM"), parseTime("05:00PM")) func TestCalendar_CheckAvailability(t *testing.T) { var providerCalendar = NewCalendar(operatingSchedule) availability := providerCalendar.CheckAvailability(Query{ Date: parseDate("2020-11-16"), Duration: time.Hour, }) printAvailability(availability) if len(availability) != 8 { t.Error("Schedule should fully available fo entire 8 hours of the day") } } func TestCalendar_BookAppointment(t *testing.T) { var providerCalendar = NewCalendar(operatingSchedule) _, err := providerCalendar.BookAppointment(Appointment{ Client: "C1", Purpose: "Test", StartTime: parseDateTime("2020-11-16T11:00"), EndTime: parseDateTime("2020-11-16T12:00"), }) if err != nil { t.Fail() } if len(providerCalendar.appointments) != 1 { t.Fail() } availability := providerCalendar.CheckAvailability(Query{ Date: parseDate("2020-11-16"), Duration: time.Hour, }) printAvailability(availability) if len(availability) != 7 { t.Error("This test should book only one hour, and the calendar should have 7 hours available for the day") } } func TestCalendar_BookAppointmentWithConflict(t *testing.T) { var providerCalendar = NewCalendar(operatingSchedule) _, err1 := providerCalendar.BookAppointment(Appointment{ Client: "C1", Purpose: "Test", StartTime: parseDateTime("2020-11-16T11:00"), EndTime: parseDateTime("2020-11-16T12:00"), }) if err1 != nil { t.Fail() } if len(providerCalendar.appointments) != 1 { t.Fail() } _, err2 := providerCalendar.BookAppointment(Appointment{ Client: "C1", Purpose: "Test", StartTime: parseDateTime("2020-11-16T11:00"), EndTime: parseDateTime("2020-11-16T12:00"), }) if err2 == nil { t.Fail() } if len(providerCalendar.appointments) != 1 { t.Fail() } } func TestCalendar_CancelAppointment(t *testing.T) { var providerCalendar = NewCalendar(operatingSchedule) id, err := providerCalendar.BookAppointment(Appointment{ Client: "C1", Purpose: "Test", StartTime: parseDateTime("2020-11-16T11:00"), EndTime: parseDateTime("2020-11-16T12:00"), }) if err != nil { t.Fail() } if len(providerCalendar.appointments) != 1 { t.Fail() } providerCalendar.CancelAppointment(id) if len(providerCalendar.appointments) != 0 { t.Fail() } } func printAvailability(availability []utils.SimpleTimeRange) { for _, simpleTimeRange := range availability { log.Printf("%v to %v", simpleTimeRange.Start(), simpleTimeRange.End()) } } func parseTime(timeStr string) time.Time { if parsedTime, err := time.Parse(time.Kitchen, timeStr); err == nil { return parsedTime } else { panic(err) } } func parseDate(dateStr string) time.Time { if parsedDate, err := time.Parse("2006-01-02", dateStr); err == nil { return parsedDate } else { panic(err) } } func parseDateTime(dateTimeStr string) time.Time { if parsedDate, err := time.Parse("2006-01-02T15:04", dateTimeStr); err == nil { return parsedDate } else { panic(err) } }
package aoc2020 import ( "testing" aoc "github.com/janreggie/aoc/internal" "github.com/stretchr/testify/assert" ) func Test_waitingArea(t *testing.T) { assert := assert.New(t) // variables for later const e, o, f = empty, occupied, floor area, err := generateWaitingArea(day11sampleInput) assert.NoError(err) assert.Equal(waitingArea{ width: 10, height: 10, stable: false, representation: [][]seat{ {e, f, e, e, f, e, e, f, e, e}, {e, e, e, e, e, e, e, f, e, e}, {e, f, e, f, e, f, f, e, f, f}, {e, e, e, e, f, e, e, f, e, e}, {e, f, e, e, f, e, e, f, e, e}, {e, f, e, e, e, e, e, f, e, e}, {f, f, e, f, e, f, f, f, f, f}, {e, e, e, e, e, e, e, e, e, e}, {e, f, e, e, e, e, e, e, f, e}, {e, f, e, e, e, e, e, f, e, e}, }, }, area) area = area.iterateSimple() assert.Equal(waitingArea{ width: 10, height: 10, stable: false, representation: [][]seat{ {o, f, o, o, f, o, o, f, o, o}, {o, o, o, o, o, o, o, f, o, o}, {o, f, o, f, o, f, f, o, f, f}, {o, o, o, o, f, o, o, f, o, o}, {o, f, o, o, f, o, o, f, o, o}, {o, f, o, o, o, o, o, f, o, o}, {f, f, o, f, o, f, f, f, f, f}, {o, o, o, o, o, o, o, o, o, o}, {o, f, o, o, o, o, o, o, f, o}, {o, f, o, o, o, o, o, f, o, o}, }, }, area) area = area.iterateSimple() assert.Equal(waitingArea{ width: 10, height: 10, stable: false, representation: [][]seat{ {o, f, e, e, f, e, o, f, o, o}, {o, e, e, e, e, e, e, f, e, o}, {e, f, e, f, e, f, f, e, f, f}, {o, e, e, e, f, e, e, f, e, o}, {o, f, e, e, f, e, e, f, e, e}, {o, f, e, e, e, e, o, f, o, o}, {f, f, e, f, e, f, f, f, f, f}, {o, e, e, e, e, e, e, e, e, o}, {o, f, e, e, e, e, e, e, f, e}, {o, f, o, e, e, e, e, f, o, o}, }, }, area) for !area.isStable() { area = area.iterateSimple() } assert.Equal(waitingArea{ width: 10, height: 10, stable: true, representation: [][]seat{ {o, f, o, e, f, e, o, f, o, o}, {o, e, e, e, o, e, e, f, e, o}, {e, f, o, f, e, f, f, o, f, f}, {o, e, o, o, f, o, o, f, e, o}, {o, f, o, e, f, e, e, f, e, e}, {o, f, o, e, o, e, o, f, o, o}, {f, f, e, f, e, f, f, f, f, f}, {o, e, o, e, o, o, e, o, e, o}, {o, f, e, e, e, e, e, e, f, e}, {o, f, o, e, o, e, o, f, o, o}, }, }, area) } func TestDay11(t *testing.T) { assert := assert.New(t) testCases := []aoc.TestCase{ { Details: "Y2020D11 sample input", Input: day11sampleInput, Result1: "37", Result2: "26", }, { Details: "Y2020D11 my input", Input: day11myInput, Result1: "2453", Result2: "", }, } for _, tt := range testCases { tt.Test(Day11, assert) } }
package lmqtt import ( "net" "github.com/lab5e/lmqtt/pkg/config" ) // Options is the options for a the server type Options func(srv *server) // WithConfig set the config of the server func WithConfig(config config.Config) Options { return func(srv *server) { srv.config = config } } // WithTCPListener set tcp listener(s) of the server. Default listen on :1883. func WithTCPListener(lns ...net.Listener) Options { return func(srv *server) { srv.tcpListener = append(srv.tcpListener, lns...) } } // WithHook set hooks of the server. Notice: WithPlugin() will overwrite hooks. func WithHook(hooks Hooks) Options { return func(srv *server) { srv.hooks = hooks } }
package runtime import ( "fmt" "os" "os/exec" "syscall" "github.com/k82cn/myoci/pkg/subsystem" "k8s.io/klog" ) // RunFlags is the flags of run command. type RunFlags struct { Terminal bool Interactive bool Command string Args []string subsystem.ResourceConfig } // Run run target command in container func Run(flags *RunFlags) { parent := newParentProcess(flags) if err := parent.Start(); err != nil { klog.Errorf("Failed to start parent process: %v", err) return } mgrID := fmt.Sprintf("myoci-cgroup-%d", parent.Process.Pid) cgroupManager := subsystem.NewManager(mgrID) defer cgroupManager.Destroy() cgroupManager.Set(&flags.ResourceConfig) cgroupManager.Apply(parent.Process.Pid) parent.Wait() os.Exit(-1) } func newParentProcess(flags *RunFlags) *exec.Cmd { args := []string{"init", flags.Command} if len(flags.Args) != 0 { args = append(args, flags.Args...) } cmd := exec.Command("/proc/self/exe", args...) cmd.SysProcAttr = &syscall.SysProcAttr{ Cloneflags: syscall.CLONE_NEWUTS | syscall.CLONE_NEWPID | syscall.CLONE_NEWNS | syscall.CLONE_NEWNET | syscall.CLONE_NEWIPC, } if flags.Terminal || flags.Interactive { cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr } return cmd }
package presence import "testing" func TestStringer(t *testing.T) { e := &Event{} if e.Status.String() != unknown { t.Errorf("status string should be %s for uninitialized Event, got: %s ", unknown, e.Status.String()) } e.Status = Online if e.Status.String() != online { t.Errorf("status string should be %s for online Event, got: %s ", online, e.Status.String()) } e.Status = Offline if e.Status.String() != offline { t.Errorf("status string should be %s for offline Event, got: %s ", offline, e.Status.String()) } }
package tmp const ( DirCore = "core" DirCmd = "cmd" DirDatabase = "database" DirHub = "hub" DirHubHelper = "hub_helper" DirHelper = "helper" DirHandlers = "handlers" DirHandler = "%v_handler" DirHandlerHelper = "%v_helper" DirStore = "store" DirDBSStore = "%v_store" DirSource = "source" DirConfigs = "configs" DirUploads = "uploads" ) const ( FileMain = "main.go" FileDatabase = "database.go" FileServer = "server.go" FileModel = "model.go" FileFunction = "function.go" FileHelper = "helper.go" FileHandler = "handler.go" FileHubMiddleware = "middleware.go" FileStore = "store.go" FileHub = "hub.go" FileConfigServer = "configServer.json" FileMod = "go.mod" FileSum = "go.sum" FileDocker = "Dockerfile" FileComposeBuild = "docker-compose-build.yaml" FileComposeLocal = "docker-compose-local.yaml" FileReadme = "README.md" ) type DBType string const ( Postgres DBType = "postgres" Mongodb DBType = "mongodb" ) type HandlerType string const ( TCP HandlerType = "tcp" MQTT HandlerType = "mqtt" WS HandlerType = "ws" )
package edit import ( "fmt" "regexp" "github.com/MakeNowJust/heredoc" "github.com/cli/cli/pkg/iostreams" "github.com/heaths/gh-label/internal/github" "github.com/heaths/gh-label/internal/options" "github.com/heaths/gh-label/internal/utils" "github.com/spf13/cobra" ) type editOptions struct { name string color string description string newName string // test client *github.Client io *iostreams.IOStreams } func EditCmd(globalOpts *options.GlobalOptions) *cobra.Command { opts := &editOptions{} cmd := &cobra.Command{ Use: "edit <name>", Short: "Edit the label <name> in the repository", Example: heredoc.Doc(` $ gh label edit general --new-name feedback $ gh label edit feedback --color c046ff --description "User feedback" `), Args: cobra.ExactArgs(1), PreRunE: func(cmd *cobra.Command, args []string) error { if opts.color != "" { if color, err := utils.ValidateColor(opts.color); err != nil { return fmt.Errorf(`invalid flag "color": %s`, err) } else { // Set color without "#" prefix. opts.color = color } } return nil }, RunE: func(cmd *cobra.Command, args []string) error { opts.name = args[0] return edit(globalOpts, opts) }, } cmd.Flags().StringVarP(&opts.color, "color", "c", "", `The color of the label with or without "#" prefix.`) cmd.Flags().StringVarP(&opts.description, "description", "d", "", "Description of the label.") cmd.Flags().StringVarP(&opts.newName, "new-name", "", "", "Rename the label to the given new name.") return cmd } func edit(globalOpts *options.GlobalOptions, opts *editOptions) error { if opts.client == nil { owner, repo := globalOpts.Repo() cli := &github.Cli{ Owner: owner, Repo: repo, } opts.client = github.New(cli) } if opts.io == nil { opts.io = iostreams.System() } label := github.EditLabel{ Label: github.Label{ Name: opts.name, Color: opts.color, Description: opts.description, }, NewName: opts.newName, } updated, err := opts.client.UpdateLabel(label) if err != nil { return fmt.Errorf("failed to create label; error: %w", err) } re := regexp.MustCompile("^https://api.([^/]+)/repos/(.*)$") matches := re.FindStringSubmatch(updated.URL) if opts.io.IsStdoutTTY() { if label.Name != updated.Name { fmt.Fprintf(opts.io.Out, "Renamed label '%s' to '%s'\n\n", label.Name, updated.Name) } else { fmt.Fprintf(opts.io.Out, "Updated label '%s'\n\n", updated.Name) } } if len(matches) == 3 { fmt.Fprintf(opts.io.Out, "https://%s/%s\n", matches[1], matches[2]) } else { fmt.Fprintln(opts.io.Out, updated.URL) } return nil }
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package imds import ( "encoding/json" "github.com/aws/amazon-vpc-cni-plugins/network/vpc" "github.com/Microsoft/hcsshim" log "github.com/cihub/seelog" ) const ( // hnsAclPolicyAllProtocols represents all the protocols. hnsAclPolicyAllProtocols = 256 // hnsAclPolicyHighPriority represents the higher priority number. hnsAclPolicyHighPriority = 200 // hnsAclPolicyLowPriority represents the lower priority number. hnsAclPolicyLowPriority = 300 ) // BlockInstanceMetadataEndpoint blocks the IMDS endpoint for Windows by creating HNS ACLs. // Currently we block IMDS endpoint over IPv4 only. We will want to block the IPv6 endpoint as // well once HNS ACLs support IPv6. func BlockInstanceMetadataEndpoint(hnsEndpoint *hcsshim.HNSEndpoint) error { log.Infof("Adding ACLs to block instance metadata endpoint %s", vpc.InstanceMetadataEndpointIPv4) // Create an ACL policy to block traffic to instance metadata endpoint. err := addEndpointPolicy( hnsEndpoint, hcsshim.ACLPolicy{ Type: hcsshim.ACL, Action: hcsshim.Block, Direction: hcsshim.Out, RemoteAddresses: vpc.InstanceMetadataEndpointIPv4, Protocol: hnsAclPolicyAllProtocols, Priority: hnsAclPolicyHighPriority, }) if err != nil { log.Errorf("Failed to add endpoint ACL policy to block imds traffic: %v.", err) return err } // Create an ACL policy to allow all incoming traffic. err = addEndpointPolicy( hnsEndpoint, hcsshim.ACLPolicy{ Type: hcsshim.ACL, Action: hcsshim.Allow, Direction: hcsshim.In, Protocol: hnsAclPolicyAllProtocols, Priority: hnsAclPolicyLowPriority, }) if err != nil { log.Errorf("Failed to add endpoint ACL policy to allow incoming traffic: %v.", err) return err } // Create an ACL policy to allow all outgoing traffic. // The priority of this policy should be lower than that of the block policy for disabling IMDS. err = addEndpointPolicy( hnsEndpoint, hcsshim.ACLPolicy{ Type: hcsshim.ACL, Action: hcsshim.Allow, Direction: hcsshim.Out, Protocol: hnsAclPolicyAllProtocols, Priority: hnsAclPolicyLowPriority, }) if err != nil { log.Errorf("Failed to add endpoint ACL policy to allow outgoing traffic: %v.", err) return err } return nil } // addEndpointPolicy adds a policy to an HNS endpoint. func addEndpointPolicy(ep *hcsshim.HNSEndpoint, policy interface{}) error { buf, err := json.Marshal(policy) if err != nil { log.Errorf("Failed to encode policy: %v.", err) return err } ep.Policies = append(ep.Policies, buf) return nil }
package cacheProvider import ( "time" "github.com/BorisBorshevsky/GolangDemos/catapult/addons/cache" "gopkg.in/redis.v5" ) func RedisTTLCache(ttl time.Duration) *RedisTTLCacheProvider { client := redis.NewClient(&redis.Options{ Addr: "127.0.0.1" + ":6379", Password: "", // no password set MaxRetries: 3, PoolSize: 10, PoolTimeout: time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, DialTimeout: 2 * time.Second, }) return &RedisTTLCacheProvider{ Client: client, timeout: ttl, } } type RedisTTLCacheProvider struct { *redis.Client timeout time.Duration } func (r *RedisTTLCacheProvider) Set(k, v string) error { return r.Client.Set(k, v, r.timeout).Err() } func (r *RedisTTLCacheProvider) Get(k string) ([]byte, error) { res, err := r.Client.Get(k).Bytes() if err == redis.Nil { return res, cache.NotExist } return res, err }
package main import "fmt" func main() { arr := []int{3, 232, -1, 34, 6565, 43434} Sort(arr) fmt.Println(arr) } func Sort(arr []int) { for k, v := range arr { if k != 0 { for k >= 1 && arr[k-1] > v { arr[k] = arr[k-1] k-- } arr[k] = v } } }
package main import "fmt" func isValid(s string) bool { if len(s) % 2 == 1 { return false } stackArray := make([]string, len(s)/2) lastIndex := -1 var checkChar string for _, tmp := range s[:] { tmpChar := string(tmp) // fmt.Println(stackArray) checkChar = "0" switch tmpChar { case "{": fallthrough case "(": fallthrough case "[": if lastIndex + 1 >= len(stackArray) { return false } lastIndex++ stackArray[lastIndex] = string(tmpChar) case "}": checkChar = "{" case ")": checkChar = "(" case "]": checkChar = "[" } if checkChar != "0" { if lastIndex < 0 { return false } if stackArray[lastIndex] != checkChar { return false } lastIndex-- } } if lastIndex >= 0 { return false } return true } func main() { fmt.Println(isValid("((")) fmt.Println(isValid("()[]{}")) }
package gouldian import ( "fmt" "strings" ) /* Node of trie */ type Node struct { Path string // substring from the route "owned" by the node Heir []*Node // heir nodes Func Endpoint // end point associated with node /* TODO - Wild *Node // special node that captures any path - Type int // Node type */ } // NewRoutes creates new routing table func NewRoutes(seq ...Routable) *Node { root := &Node{ Heir: []*Node{ { Path: "/", Heir: make([]*Node, 0), }, }, } for _, route := range seq { root.appendEndpoint(route()) } return root } /* lookup is hot-path discovery of node at the path */ func (root *Node) lookup(path string, values *[]string) (at int, node *Node) { node = root lookup: for { // leaf node, no futher lookup is possible // return current `node`` and position `at` path if len(node.Heir) == 0 { return } for _, heir := range node.Heir { if len(path[at:]) < len(heir.Path) { // No match, path cannot match node continue } if path[at] != heir.Path[0] { // No match, path cannot match node // this is micro-optimization to reduce overhead of memequal continue } // the node consumers entire path if len(heir.Path) == 2 && heir.Path[1] == '*' { *values = append(*values, path[at+1:]) at = len(path) node = heir return } if len(heir.Path) == 2 && (heir.Path[1] == ':' || heir.Path[1] == '_' || heir.Path[1] == '*') { // the node is a wild-card that matches any path segment // let's skip the path until next segment and re-call the value p := 1 max := len(path[at:]) for p < max && path[at+p] != '/' { p++ } if heir.Path[1] == ':' { *values = append(*values, path[at+1:at+p]) } at = at + p node = heir continue lookup } if path[at:at+len(heir.Path)] == heir.Path { // node matches the path, continue lookup at = at + len(heir.Path) node = heir continue lookup } } return } } /* appendEndpoint to trie under the path. Input path is a collection of segments, each segment is either path literal or wildcard symbol `:` reserved for lenses */ func (root *Node) appendEndpoint(path []string, endpoint Endpoint) { if len(path) == 0 { _, n := root.appendTo("/") if n.Func == nil { n.Func = endpoint } else { n.Func = n.Func.Or(endpoint) } return } at := 0 node := root for i, segment := range path { // `/` required to speed up lookup on the hot-path segment = "/" + segment at, node = node.appendTo(segment) // split the node and add endpoint if len(segment[at:]) != 0 { split := &Node{ Path: segment[at:], Heir: make([]*Node, 0), } node.Heir = append(node.Heir, split) node = split } // the last segment needs to be enhanced with endpoint if i == len(path)-1 { if node.Func == nil { node.Func = endpoint } else { node.Func = node.Func.Or(endpoint) } } } } /* appendTo finds the node in trie where to add path (or segment). It returns the candidate node and length of "consumed" path */ func (root *Node) appendTo(path string) (at int, node *Node) { node = root lookup: for { if len(node.Heir) == 0 { // leaf node, no futher lookup is possible // return current `node`` and position `at` path return } for _, heir := range node.Heir { prefix := longestCommonPrefix(path[at:], heir.Path) at = at + prefix switch { case prefix == 0: // No common prefix, jump to next heir continue case prefix == len(heir.Path): // Common prefix is the node itself, continue lookup into heirs node = heir continue lookup default: // Common prefix is shorter than node itself, split is required if prefixNode := node.heirByPath(heir.Path[:prefix]); prefixNode != nil { // prefix already exists, current node needs to be moved // under existing one node.Path = node.Path[prefix:] prefixNode.Heir = append(prefixNode.Heir, node) node = prefixNode return } // prefix does not exist, current node needs to be split // the list of heirs needs to be patched for j := 0; j < len(node.Heir); j++ { if node.Heir[j].Path == heir.Path { n := heir node.Heir[j] = &Node{ Path: heir.Path[:prefix], Heir: []*Node{n}, } n.Path = heir.Path[prefix:] node = node.Heir[j] return } } } } // No heir is found return current node return } } func (root *Node) heirByPath(path string) *Node { for i := 0; i < len(root.Heir); i++ { if root.Heir[i].Path == path { return root.Heir[i] } } return nil } /* Walk through trie, use for debug purposes only */ func (root *Node) Walk(f func(int, *Node)) { walk(root, 0, f) } func walk(node *Node, level int, f func(int, *Node)) { f(level, node) for _, n := range node.Heir { walk(n, level+1, f) } } // Println outputs trie to console func (root *Node) Println() { root.Walk( func(i int, n *Node) { fmt.Println(strings.Repeat(" ", i), n.Path) }, ) } // Endpoint converts trie to Endpoint func (root *Node) Endpoint() Endpoint { return func(ctx *Context) (err error) { if ctx.Request == nil { return ErrNoMatch } path := ctx.Request.URL.Path ctx.free() ctx.values = ctx.values[:0] i, node := root.lookup(path, &ctx.values) if len(path) == i && node.Func != nil { return node.Func(ctx) } return ErrNoMatch } } // // Utils // func min(a, b int) int { if a <= b { return a } return b } func longestCommonPrefix(a, b string) (prefix int) { max := min(len(a), len(b)) for prefix < max && a[prefix] == b[prefix] { prefix++ } return }
package main import ( "fmt" facebook "github.com/madebyais/facebook-go-sdk" ) // BasicFeed represents the basic of // how to use facebook-go-sdk func BasicFeed() { // initalize facebook-go-sdk fb := facebook.New() // set your access token // NOTES: Please exchange with your access token fb.SetAccessToken(`...`) // and directly get your feed :) data, err := fb.API(`/me/feed`).Get() if err != nil { panic(err) } // print your feed fmt.Println(` ## SAMPLE - FEED `) fmt.Println(data) }
package main import ( "bytes" ) func defangIPaddr(address string) string { var s bytes.Buffer for j := 0 ;j < len(address); j++ { if (address[j] == '.') { s.WriteByte('[') s.WriteByte('.') s.WriteByte(']') }else{ s.WriteByte(address[j]) } } return s.String() } func main() { }
package _713_Subarray_Product_Less_Than_K func numSubarrayProductLessThanK(nums []int, k int) int { return numSubarrayProductLessThanKWithSlidingWindow(nums, k) } func numSubarrayProductLessThanKWithSlidingWindow(nums []int, k int) int { var ( count int p, q int // 前后idx prod int = 1 // 乘积 ) for q = 0; q < len(nums); q++ { // 以右界增加处理 prod *= nums[q] for prod >= k && p <= q { prod /= nums[p] p++ } count += (q - p + 1) // 每向右移动一次右界,增加的连续个数 } return count }
// Copyright 2015 Keybase, Inc. All rights reserved. Use of // this source code is governed by the included BSD license. package updater import ( "fmt" "os" "path/filepath" "strconv" "github.com/keybase/go-updater/util" ) // Version is the updater version const Version = "0.2.8" // Updater knows how to find and apply updates type Updater struct { source UpdateSource config Config log Log } // UpdateSource defines where the updater can find updates type UpdateSource interface { // Description is a short description about the update source Description() string // FindUpdate finds an update given options FindUpdate(options UpdateOptions) (*Update, error) } // Context defines options, UI and hooks for the updater. // This is where you can define custom behavior specific to your apps. type Context interface { GetUpdateUI() UpdateUI UpdateOptions() UpdateOptions Verify(update Update) error BeforeUpdatePrompt(update Update, options UpdateOptions) error BeforeApply(update Update) error Apply(update Update, options UpdateOptions, tmpDir string) error AfterApply(update Update) error Restart() error ReportError(err error, update *Update, options UpdateOptions) ReportAction(action UpdateAction, update *Update, options UpdateOptions) ReportSuccess(update *Update, options UpdateOptions) AfterUpdateCheck(update *Update) } // Config defines configuration for the Updater type Config interface { GetUpdateAuto() (bool, bool) SetUpdateAuto(b bool) error GetUpdateAutoOverride() bool SetUpdateAutoOverride(bool) error GetInstallID() string SetInstallID(installID string) error } // Log is the logging interface for this package type Log interface { Debug(...interface{}) Info(...interface{}) Debugf(s string, args ...interface{}) Infof(s string, args ...interface{}) Warningf(s string, args ...interface{}) Errorf(s string, args ...interface{}) } // NewUpdater constructs an Updater func NewUpdater(source UpdateSource, config Config, log Log) *Updater { return &Updater{ source: source, config: config, log: log, } } // Update checks, downloads and performs an update func (u *Updater) Update(ctx Context) (*Update, error) { options := ctx.UpdateOptions() update, err := u.update(ctx, options) report(ctx, err, update, options) return update, err } // update returns the update received, and an error if the update was not // performed. The error with be of type Error. The error may be due to the user // (or system) canceling an update, in which case error.IsCancel() will be true. func (u *Updater) update(ctx Context, options UpdateOptions) (*Update, error) { update, err := u.checkForUpdate(ctx, options) if err != nil { return nil, findErr(err) } if update == nil || !update.NeedUpdate { // No update available return nil, nil } u.log.Infof("Got update with version: %s", update.Version) err = ctx.BeforeUpdatePrompt(*update, options) if err != nil { return update, err } // Prompt for update updateAction, err := u.promptForUpdateAction(ctx, *update, options) if err != nil { return update, promptErr(err) } switch updateAction { case UpdateActionApply: ctx.ReportAction(UpdateActionApply, update, options) case UpdateActionAuto: ctx.ReportAction(UpdateActionAuto, update, options) case UpdateActionSnooze: ctx.ReportAction(UpdateActionSnooze, update, options) return update, CancelErr(fmt.Errorf("Snoozed update")) case UpdateActionCancel: ctx.ReportAction(UpdateActionCancel, update, options) return update, CancelErr(fmt.Errorf("Canceled")) case UpdateActionError: return update, promptErr(fmt.Errorf("Unknown prompt error")) case UpdateActionContinue: // Continue } // Linux updates don't have assets so it's ok to prompt for update above before // we check for nil asset. if update.Asset == nil || update.Asset.URL == "" { u.log.Info("No update asset to apply") return update, nil } tmpDir := u.tempDir() defer u.Cleanup(tmpDir) if err := u.downloadAsset(update.Asset, tmpDir, options); err != nil { return update, downloadErr(err) } u.log.Infof("Verify asset: %s", update.Asset.LocalPath) if err := ctx.Verify(*update); err != nil { return update, verifyErr(err) } if err := u.apply(ctx, *update, options, tmpDir); err != nil { return update, err } u.log.Info("Restarting") if err := ctx.Restart(); err != nil { return update, restartErr(err) } return update, nil } func (u *Updater) apply(ctx Context, update Update, options UpdateOptions, tmpDir string) error { u.log.Info("Before apply") if err := ctx.BeforeApply(update); err != nil { return applyErr(err) } u.log.Info("Applying update") if err := ctx.Apply(update, options, tmpDir); err != nil { return applyErr(err) } u.log.Info("After apply") if err := ctx.AfterApply(update); err != nil { return applyErr(err) } return nil } // downloadAsset will download the update to a temporary path (if not cached), // check the digest, and set the LocalPath property on the asset. func (u *Updater) downloadAsset(asset *Asset, tmpDir string, options UpdateOptions) error { if asset == nil { return fmt.Errorf("No asset to download") } downloadOptions := util.DownloadURLOptions{ Digest: asset.Digest, RequireDigest: true, UseETag: true, Log: u.log, } downloadPath := filepath.Join(tmpDir, asset.Name) // If asset had a file extension, lets add it back on if err := util.DownloadURL(asset.URL, downloadPath, downloadOptions); err != nil { return err } asset.LocalPath = downloadPath return nil } // checkForUpdate checks a update source (like a remote API) for an update. // It may set an InstallID, if the server tells us to. func (u *Updater) checkForUpdate(ctx Context, options UpdateOptions) (*Update, error) { u.log.Infof("Checking for update, current version is %s", options.Version) u.log.Infof("Using updater source: %s", u.source.Description()) u.log.Debugf("Using options: %#v", options) update, findErr := u.source.FindUpdate(options) if findErr != nil { return nil, findErr } if update == nil { return nil, nil } // Save InstallID if we received one if update.InstallID != "" && u.config.GetInstallID() != update.InstallID { u.log.Debugf("Saving install ID: %s", update.InstallID) if err := u.config.SetInstallID(update.InstallID); err != nil { u.log.Warningf("Error saving install ID: %s", err) ctx.ReportError(configErr(fmt.Errorf("Error saving install ID: %s", err)), update, options) } } return update, nil } // promptForUpdateAction prompts the user for permission to apply an update func (u *Updater) promptForUpdateAction(ctx Context, update Update, options UpdateOptions) (UpdateAction, error) { u.log.Debug("Prompt for update") auto, autoSet := u.config.GetUpdateAuto() autoOverride := u.config.GetUpdateAutoOverride() u.log.Debugf("Auto update: %s (set=%s autoOverride=%s)", strconv.FormatBool(auto), strconv.FormatBool(autoSet), strconv.FormatBool(autoOverride)) if auto && !autoOverride { return UpdateActionAuto, nil } updateUI := ctx.GetUpdateUI() // If auto update never set, default to true autoUpdate := !autoSet promptOptions := UpdatePromptOptions{AutoUpdate: autoUpdate} updatePromptResponse, err := updateUI.UpdatePrompt(update, options, promptOptions) if err != nil { return UpdateActionError, err } if updatePromptResponse == nil { return UpdateActionError, fmt.Errorf("No response") } if updatePromptResponse.Action != UpdateActionContinue { u.log.Debugf("Update prompt response: %#v", updatePromptResponse) if err := u.config.SetUpdateAuto(updatePromptResponse.AutoUpdate); err != nil { u.log.Warningf("Error setting auto preference: %s", err) ctx.ReportError(configErr(fmt.Errorf("Error setting auto preference: %s", err)), &update, options) } } return updatePromptResponse.Action, nil } func report(ctx Context, err error, update *Update, options UpdateOptions) { if err != nil { // Don't report cancels switch e := err.(type) { case Error: if e.IsCancel() { return } } ctx.ReportError(err, update, options) } else if update != nil { ctx.ReportSuccess(update, options) } } // tempDir, if specified, will contain files that were replaced during an update // and will be removed after an update. The temp dir should already exist. func (u *Updater) tempDir() string { tmpDir := util.TempPath("", "KeybaseUpdater.") if err := util.MakeDirs(tmpDir, 0700, u.log); err != nil { u.log.Warningf("Error trying to create temp dir: %s", err) return "" } return tmpDir } // Cleanup removes temporary files func (u *Updater) Cleanup(tmpDir string) { if tmpDir != "" { u.log.Debugf("Remove temporary directory: %q", tmpDir) if err := os.RemoveAll(tmpDir); err != nil { u.log.Warningf("Error removing temporary directory %q: %s", tmpDir, err) } } }
package main import ( "bufio" "flag" "fmt" "log" "os" ) var changelog = "Changelog" func main() { a := flag.String("a", "", "Author name") m := flag.String("m", "", "Commit message") flag.Parse() Wlog(*a, *m) } func MultiMsg() error { logFile, err := os.OpenFile(changelog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) if err != nil { return err } defer logFile.Close() w := bufio.NewWriter(logFile) scanner := bufio.NewScanner(os.Stdin) var text string fmt.Println("******************************") fmt.Println("* You can Exit by type \":wq\" *") fmt.Println("******************************") fmt.Println("Enter your Changelog message: ") for text != ":wq" { // break the loop if text == "q" scanner.Scan() text = scanner.Text() if text != ":wq" { fmt.Fprintf(w, "%s\n", text) w.Flush() } } return nil } func Wlog(author string, msg string) error { logFile, err := os.OpenFile(changelog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) if err != nil { return err } w := bufio.NewWriter(logFile) defer logFile.Close() log.SetOutput(logFile) if author != "" { log.Println(author) } else { fmt.Print("Enter Author name: ") var input string fmt.Scanln(&input) log.Printf("%s", input) } if msg != "" { fmt.Fprintf(w, "- %s\n", msg) w.Flush() } else { MultiMsg() } //fmt.Println(msg) return nil }
package _020_10_24 import ( "testing" "github.com/stretchr/testify/assert" ) func Test_findMedianSortedArrays(t *testing.T) { table := []struct { input1 []int input2 []int output float64 }{ { []int{1, 3}, []int{2}, 2, }, { []int{1, 2}, []int{3, 4}, 2.5, }, { []int{}, []int{1}, 1.0, }, } for _, tbl := range table { assert.Equal(t, tbl.output, findMedianSortedArrays(tbl.input1, tbl.input2)) } }
// // MinIO Object Storage (c) 2021 MinIO, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package madmin import ( "context" "errors" "fmt" "io" "net/http" "net/url" ) // InspectOptions provides options to Inspect. type InspectOptions struct { Volume, File string } // Inspect makes an admin call to download a raw files from disk. func (adm *AdminClient) Inspect(ctx context.Context, d InspectOptions) (key [32]byte, c io.ReadCloser, err error) { path := fmt.Sprintf(adminAPIPrefix + "/inspect-data") q := make(url.Values) q.Set("volume", d.Volume) q.Set("file", d.File) resp, err := adm.executeMethod(ctx, http.MethodGet, requestData{ relPath: path, queryValues:q, }, ) if err != nil { closeResponse(resp) return key, nil, err } if resp.StatusCode != http.StatusOK { return key, nil, httpRespToErrorResponse(resp) } if resp.Body == nil { return key, nil, errors.New("body is nil") } _, err = io.ReadFull(resp.Body, key[:1]) if err != nil { closeResponse(resp) return key, nil, err } // This is the only version we know. if key[0] != 1 { return key, nil, errors.New("unknown data version") } // Read key... _, err = io.ReadFull(resp.Body, key[:]) if err != nil { closeResponse(resp) return key, nil, err } // Return body return key, resp.Body, nil }
package crypto // Crypto interface for signing algorithm type Crypto interface { Name() string Sign(msg string, secret string) ([]byte, error) }
package logic import ( "context" "github.com/just-coding-0/learn_example/micro_service/zero/internal/svc" "github.com/just-coding-0/learn_example/micro_service/zero/internal/types" "github.com/just-coding-0/learn_example/micro_service/zero/rpc/history/history" "github.com/tal-tech/go-zero/core/logx" ) type LastEchoLogic struct { logx.Logger ctx context.Context svcCtx *svc.ServiceContext } func NewLastEchoLogic(ctx context.Context, svcCtx *svc.ServiceContext) LastEchoLogic { return LastEchoLogic{ Logger: logx.WithContext(ctx), ctx: ctx, svcCtx: svcCtx, } } func (l *LastEchoLogic) LastEcho() (*types.LastEchoResponse, error) { resp, err := l.svcCtx.History.Last(l.ctx, &history.LastRequest{}) if err != nil { return nil, err } return &types.LastEchoResponse{ LastEcho: resp.LastEcho, Times: resp.Times, Msg: resp.Msg, }, nil }
package main import "fmt" func main() { ten := 10 if ten == 20 { println("ten equals 20") } else { println("ten equals something else") } if "a" == "bb" || true && 1 > 10 { println("Option 1") } else if true { println("Option 2") } else { println("Option 3") } val := 3 switch val { case 1: println(1) case 2: println(2) default: println(3) } for i:=1; i <100; i++ { print(i) } var myArray [3]int for i := 0; i < 3; i++ { myArray[i] = i *10 } for index, value := range myArray { fmt.Printf("\n\nIndex is %d, value is %d", index, value) } }
package domain var () // ServiceInstanceAlreadyExistsError is an error type used to //indicate that this service instance has already been // provisioned. type ServiceInstanceAlreadyExistsError string // Error returns a string representation of the error message. func (e ServiceInstanceAlreadyExistsError) Error() string { return string(e) } // ServiceInstanceNotFoundError is an error type used to indicate // that the service instance requested for deprovisioning cannot // be found. type ServiceInstanceNotFoundError string // Error returns a string representation of the error message. func (e ServiceInstanceNotFoundError) Error() string { return string(e) } // ServiceBindingAlreadyExistsError is an error type used to // indicate that this service binding already exists. type ServiceBindingAlreadyExistsError string // Error returns a string representation of the error message. func (e ServiceBindingAlreadyExistsError) Error() string { return string(e) } // ServiceBindingNotFoundError is an error type used to indicate // that the service binding requested for unbinding cannot be // found. type ServiceBindingNotFoundError string // Error returns a string representation of the error message. func (s ServiceBindingNotFoundError) Error() string { return "The service binding was not found." }
// Copyright 2022 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto" betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/vertexai/beta/vertexai_beta_go_proto" "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vertexai/beta" ) // EndpointServer implements the gRPC interface for Endpoint. type EndpointServer struct{} // ProtoToEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum converts a EndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum enum from its proto representation. func ProtoToVertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum(e betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum) *beta.EndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum { if e == 0 { return nil } if n, ok := betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum_name[int32(e)]; ok { e := beta.EndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum(n[len("VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum"):]) return &e } return nil } // ProtoToEndpointDeployedModels converts a EndpointDeployedModels object from its proto representation. func ProtoToVertexaiBetaEndpointDeployedModels(p *betapb.VertexaiBetaEndpointDeployedModels) *beta.EndpointDeployedModels { if p == nil { return nil } obj := &beta.EndpointDeployedModels{ DedicatedResources: ProtoToVertexaiBetaEndpointDeployedModelsDedicatedResources(p.GetDedicatedResources()), AutomaticResources: ProtoToVertexaiBetaEndpointDeployedModelsAutomaticResources(p.GetAutomaticResources()), Id: dcl.StringOrNil(p.GetId()), Model: dcl.StringOrNil(p.GetModel()), ModelVersionId: dcl.StringOrNil(p.GetModelVersionId()), DisplayName: dcl.StringOrNil(p.GetDisplayName()), CreateTime: dcl.StringOrNil(p.GetCreateTime()), ServiceAccount: dcl.StringOrNil(p.GetServiceAccount()), EnableAccessLogging: dcl.Bool(p.GetEnableAccessLogging()), PrivateEndpoints: ProtoToVertexaiBetaEndpointDeployedModelsPrivateEndpoints(p.GetPrivateEndpoints()), SharedResources: dcl.StringOrNil(p.GetSharedResources()), EnableContainerLogging: dcl.Bool(p.GetEnableContainerLogging()), } return obj } // ProtoToEndpointDeployedModelsDedicatedResources converts a EndpointDeployedModelsDedicatedResources object from its proto representation. func ProtoToVertexaiBetaEndpointDeployedModelsDedicatedResources(p *betapb.VertexaiBetaEndpointDeployedModelsDedicatedResources) *beta.EndpointDeployedModelsDedicatedResources { if p == nil { return nil } obj := &beta.EndpointDeployedModelsDedicatedResources{ MachineSpec: ProtoToVertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpec(p.GetMachineSpec()), MinReplicaCount: dcl.Int64OrNil(p.GetMinReplicaCount()), MaxReplicaCount: dcl.Int64OrNil(p.GetMaxReplicaCount()), } for _, r := range p.GetAutoscalingMetricSpecs() { obj.AutoscalingMetricSpecs = append(obj.AutoscalingMetricSpecs, *ProtoToVertexaiBetaEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs(r)) } return obj } // ProtoToEndpointDeployedModelsDedicatedResourcesMachineSpec converts a EndpointDeployedModelsDedicatedResourcesMachineSpec object from its proto representation. func ProtoToVertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpec(p *betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpec) *beta.EndpointDeployedModelsDedicatedResourcesMachineSpec { if p == nil { return nil } obj := &beta.EndpointDeployedModelsDedicatedResourcesMachineSpec{ MachineType: dcl.StringOrNil(p.GetMachineType()), AcceleratorType: ProtoToVertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum(p.GetAcceleratorType()), AcceleratorCount: dcl.Int64OrNil(p.GetAcceleratorCount()), } return obj } // ProtoToEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs converts a EndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs object from its proto representation. func ProtoToVertexaiBetaEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs(p *betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs) *beta.EndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs { if p == nil { return nil } obj := &beta.EndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs{ MetricName: dcl.StringOrNil(p.GetMetricName()), Target: dcl.Int64OrNil(p.GetTarget()), } return obj } // ProtoToEndpointDeployedModelsAutomaticResources converts a EndpointDeployedModelsAutomaticResources object from its proto representation. func ProtoToVertexaiBetaEndpointDeployedModelsAutomaticResources(p *betapb.VertexaiBetaEndpointDeployedModelsAutomaticResources) *beta.EndpointDeployedModelsAutomaticResources { if p == nil { return nil } obj := &beta.EndpointDeployedModelsAutomaticResources{ MinReplicaCount: dcl.Int64OrNil(p.GetMinReplicaCount()), MaxReplicaCount: dcl.Int64OrNil(p.GetMaxReplicaCount()), } return obj } // ProtoToEndpointDeployedModelsPrivateEndpoints converts a EndpointDeployedModelsPrivateEndpoints object from its proto representation. func ProtoToVertexaiBetaEndpointDeployedModelsPrivateEndpoints(p *betapb.VertexaiBetaEndpointDeployedModelsPrivateEndpoints) *beta.EndpointDeployedModelsPrivateEndpoints { if p == nil { return nil } obj := &beta.EndpointDeployedModelsPrivateEndpoints{ PredictHttpUri: dcl.StringOrNil(p.GetPredictHttpUri()), ExplainHttpUri: dcl.StringOrNil(p.GetExplainHttpUri()), HealthHttpUri: dcl.StringOrNil(p.GetHealthHttpUri()), ServiceAttachment: dcl.StringOrNil(p.GetServiceAttachment()), } return obj } // ProtoToEndpointEncryptionSpec converts a EndpointEncryptionSpec object from its proto representation. func ProtoToVertexaiBetaEndpointEncryptionSpec(p *betapb.VertexaiBetaEndpointEncryptionSpec) *beta.EndpointEncryptionSpec { if p == nil { return nil } obj := &beta.EndpointEncryptionSpec{ KmsKeyName: dcl.StringOrNil(p.GetKmsKeyName()), } return obj } // ProtoToEndpoint converts a Endpoint resource from its proto representation. func ProtoToEndpoint(p *betapb.VertexaiBetaEndpoint) *beta.Endpoint { obj := &beta.Endpoint{ Name: dcl.StringOrNil(p.GetName()), DisplayName: dcl.StringOrNil(p.GetDisplayName()), Description: dcl.StringOrNil(p.GetDescription()), Etag: dcl.StringOrNil(p.GetEtag()), CreateTime: dcl.StringOrNil(p.GetCreateTime()), UpdateTime: dcl.StringOrNil(p.GetUpdateTime()), EncryptionSpec: ProtoToVertexaiBetaEndpointEncryptionSpec(p.GetEncryptionSpec()), Network: dcl.StringOrNil(p.GetNetwork()), ModelDeploymentMonitoringJob: dcl.StringOrNil(p.GetModelDeploymentMonitoringJob()), Project: dcl.StringOrNil(p.GetProject()), Location: dcl.StringOrNil(p.GetLocation()), } for _, r := range p.GetDeployedModels() { obj.DeployedModels = append(obj.DeployedModels, *ProtoToVertexaiBetaEndpointDeployedModels(r)) } return obj } // EndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnumToProto converts a EndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum enum to its proto representation. func VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnumToProto(e *beta.EndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum) betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum { if e == nil { return betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum(0) } if v, ok := betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum_value["EndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum"+string(*e)]; ok { return betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum(v) } return betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnum(0) } // EndpointDeployedModelsToProto converts a EndpointDeployedModels object to its proto representation. func VertexaiBetaEndpointDeployedModelsToProto(o *beta.EndpointDeployedModels) *betapb.VertexaiBetaEndpointDeployedModels { if o == nil { return nil } p := &betapb.VertexaiBetaEndpointDeployedModels{} p.SetDedicatedResources(VertexaiBetaEndpointDeployedModelsDedicatedResourcesToProto(o.DedicatedResources)) p.SetAutomaticResources(VertexaiBetaEndpointDeployedModelsAutomaticResourcesToProto(o.AutomaticResources)) p.SetId(dcl.ValueOrEmptyString(o.Id)) p.SetModel(dcl.ValueOrEmptyString(o.Model)) p.SetModelVersionId(dcl.ValueOrEmptyString(o.ModelVersionId)) p.SetDisplayName(dcl.ValueOrEmptyString(o.DisplayName)) p.SetCreateTime(dcl.ValueOrEmptyString(o.CreateTime)) p.SetServiceAccount(dcl.ValueOrEmptyString(o.ServiceAccount)) p.SetEnableAccessLogging(dcl.ValueOrEmptyBool(o.EnableAccessLogging)) p.SetPrivateEndpoints(VertexaiBetaEndpointDeployedModelsPrivateEndpointsToProto(o.PrivateEndpoints)) p.SetSharedResources(dcl.ValueOrEmptyString(o.SharedResources)) p.SetEnableContainerLogging(dcl.ValueOrEmptyBool(o.EnableContainerLogging)) return p } // EndpointDeployedModelsDedicatedResourcesToProto converts a EndpointDeployedModelsDedicatedResources object to its proto representation. func VertexaiBetaEndpointDeployedModelsDedicatedResourcesToProto(o *beta.EndpointDeployedModelsDedicatedResources) *betapb.VertexaiBetaEndpointDeployedModelsDedicatedResources { if o == nil { return nil } p := &betapb.VertexaiBetaEndpointDeployedModelsDedicatedResources{} p.SetMachineSpec(VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecToProto(o.MachineSpec)) p.SetMinReplicaCount(dcl.ValueOrEmptyInt64(o.MinReplicaCount)) p.SetMaxReplicaCount(dcl.ValueOrEmptyInt64(o.MaxReplicaCount)) sAutoscalingMetricSpecs := make([]*betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs, len(o.AutoscalingMetricSpecs)) for i, r := range o.AutoscalingMetricSpecs { sAutoscalingMetricSpecs[i] = VertexaiBetaEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsToProto(&r) } p.SetAutoscalingMetricSpecs(sAutoscalingMetricSpecs) return p } // EndpointDeployedModelsDedicatedResourcesMachineSpecToProto converts a EndpointDeployedModelsDedicatedResourcesMachineSpec object to its proto representation. func VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecToProto(o *beta.EndpointDeployedModelsDedicatedResourcesMachineSpec) *betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpec { if o == nil { return nil } p := &betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpec{} p.SetMachineType(dcl.ValueOrEmptyString(o.MachineType)) p.SetAcceleratorType(VertexaiBetaEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorTypeEnumToProto(o.AcceleratorType)) p.SetAcceleratorCount(dcl.ValueOrEmptyInt64(o.AcceleratorCount)) return p } // EndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsToProto converts a EndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs object to its proto representation. func VertexaiBetaEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsToProto(o *beta.EndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs) *betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs { if o == nil { return nil } p := &betapb.VertexaiBetaEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs{} p.SetMetricName(dcl.ValueOrEmptyString(o.MetricName)) p.SetTarget(dcl.ValueOrEmptyInt64(o.Target)) return p } // EndpointDeployedModelsAutomaticResourcesToProto converts a EndpointDeployedModelsAutomaticResources object to its proto representation. func VertexaiBetaEndpointDeployedModelsAutomaticResourcesToProto(o *beta.EndpointDeployedModelsAutomaticResources) *betapb.VertexaiBetaEndpointDeployedModelsAutomaticResources { if o == nil { return nil } p := &betapb.VertexaiBetaEndpointDeployedModelsAutomaticResources{} p.SetMinReplicaCount(dcl.ValueOrEmptyInt64(o.MinReplicaCount)) p.SetMaxReplicaCount(dcl.ValueOrEmptyInt64(o.MaxReplicaCount)) return p } // EndpointDeployedModelsPrivateEndpointsToProto converts a EndpointDeployedModelsPrivateEndpoints object to its proto representation. func VertexaiBetaEndpointDeployedModelsPrivateEndpointsToProto(o *beta.EndpointDeployedModelsPrivateEndpoints) *betapb.VertexaiBetaEndpointDeployedModelsPrivateEndpoints { if o == nil { return nil } p := &betapb.VertexaiBetaEndpointDeployedModelsPrivateEndpoints{} p.SetPredictHttpUri(dcl.ValueOrEmptyString(o.PredictHttpUri)) p.SetExplainHttpUri(dcl.ValueOrEmptyString(o.ExplainHttpUri)) p.SetHealthHttpUri(dcl.ValueOrEmptyString(o.HealthHttpUri)) p.SetServiceAttachment(dcl.ValueOrEmptyString(o.ServiceAttachment)) return p } // EndpointEncryptionSpecToProto converts a EndpointEncryptionSpec object to its proto representation. func VertexaiBetaEndpointEncryptionSpecToProto(o *beta.EndpointEncryptionSpec) *betapb.VertexaiBetaEndpointEncryptionSpec { if o == nil { return nil } p := &betapb.VertexaiBetaEndpointEncryptionSpec{} p.SetKmsKeyName(dcl.ValueOrEmptyString(o.KmsKeyName)) return p } // EndpointToProto converts a Endpoint resource to its proto representation. func EndpointToProto(resource *beta.Endpoint) *betapb.VertexaiBetaEndpoint { p := &betapb.VertexaiBetaEndpoint{} p.SetName(dcl.ValueOrEmptyString(resource.Name)) p.SetDisplayName(dcl.ValueOrEmptyString(resource.DisplayName)) p.SetDescription(dcl.ValueOrEmptyString(resource.Description)) p.SetEtag(dcl.ValueOrEmptyString(resource.Etag)) p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime)) p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime)) p.SetEncryptionSpec(VertexaiBetaEndpointEncryptionSpecToProto(resource.EncryptionSpec)) p.SetNetwork(dcl.ValueOrEmptyString(resource.Network)) p.SetModelDeploymentMonitoringJob(dcl.ValueOrEmptyString(resource.ModelDeploymentMonitoringJob)) p.SetProject(dcl.ValueOrEmptyString(resource.Project)) p.SetLocation(dcl.ValueOrEmptyString(resource.Location)) sDeployedModels := make([]*betapb.VertexaiBetaEndpointDeployedModels, len(resource.DeployedModels)) for i, r := range resource.DeployedModels { sDeployedModels[i] = VertexaiBetaEndpointDeployedModelsToProto(&r) } p.SetDeployedModels(sDeployedModels) mLabels := make(map[string]string, len(resource.Labels)) for k, r := range resource.Labels { mLabels[k] = r } p.SetLabels(mLabels) return p } // applyEndpoint handles the gRPC request by passing it to the underlying Endpoint Apply() method. func (s *EndpointServer) applyEndpoint(ctx context.Context, c *beta.Client, request *betapb.ApplyVertexaiBetaEndpointRequest) (*betapb.VertexaiBetaEndpoint, error) { p := ProtoToEndpoint(request.GetResource()) res, err := c.ApplyEndpoint(ctx, p) if err != nil { return nil, err } r := EndpointToProto(res) return r, nil } // applyVertexaiBetaEndpoint handles the gRPC request by passing it to the underlying Endpoint Apply() method. func (s *EndpointServer) ApplyVertexaiBetaEndpoint(ctx context.Context, request *betapb.ApplyVertexaiBetaEndpointRequest) (*betapb.VertexaiBetaEndpoint, error) { cl, err := createConfigEndpoint(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } return s.applyEndpoint(ctx, cl, request) } // DeleteEndpoint handles the gRPC request by passing it to the underlying Endpoint Delete() method. func (s *EndpointServer) DeleteVertexaiBetaEndpoint(ctx context.Context, request *betapb.DeleteVertexaiBetaEndpointRequest) (*emptypb.Empty, error) { cl, err := createConfigEndpoint(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } return &emptypb.Empty{}, cl.DeleteEndpoint(ctx, ProtoToEndpoint(request.GetResource())) } // ListVertexaiBetaEndpoint handles the gRPC request by passing it to the underlying EndpointList() method. func (s *EndpointServer) ListVertexaiBetaEndpoint(ctx context.Context, request *betapb.ListVertexaiBetaEndpointRequest) (*betapb.ListVertexaiBetaEndpointResponse, error) { cl, err := createConfigEndpoint(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } resources, err := cl.ListEndpoint(ctx, request.GetProject(), request.GetLocation()) if err != nil { return nil, err } var protos []*betapb.VertexaiBetaEndpoint for _, r := range resources.Items { rp := EndpointToProto(r) protos = append(protos, rp) } p := &betapb.ListVertexaiBetaEndpointResponse{} p.SetItems(protos) return p, nil } func createConfigEndpoint(ctx context.Context, service_account_file string) (*beta.Client, error) { conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file)) return beta.NewClient(conf), nil }
package connlib import ( "bufio" "os" "regexp" "strconv" ) func filterConnections(connections []Connection, filter func(Connection) bool) []Connection { filteredConnections := []Connection{} for _, conn := range connections { if filter(conn) { filteredConnections = append(filteredConnections, conn) } } return filteredConnections } var ephemeralPortRange []uint16 = nil func isEphemeralPort(port uint16) bool { if ephemeralPortRange == nil { // set default values to Linux defaults ephemeralPortRange = []uint16{32768, 60999} // read local port range (just stop on an error for now) if file, err := os.Open("/proc/sys/net/ipv4/ip_local_port_range"); err == nil { if line, _, err := bufio.NewReader(file).ReadLine(); err == nil { re := regexp.MustCompile("\\s+") ports := re.Split(string(line), 2) if port, err := strconv.Atoi(ports[0]); err != nil { ephemeralPortRange[0] = uint16(port) } if port, err := strconv.Atoi(ports[1]); err != nil { ephemeralPortRange[1] = uint16(port) } } } } return port >= ephemeralPortRange[0] && port <= ephemeralPortRange[1] }
package utils import ( "os" "path" "path/filepath" "runtime" log "github.com/sirupsen/logrus" hocon "github.com/go-akka/configuration" ) // HoconConfig encapsulates application's configurations in HOCON format type HoconConfig struct { File string // config file Conf *hocon.Config // configurations } const ( defaultConfigFile = "/config/application.conf" ) func loadAppConfig(file string) *HoconConfig { dir, err := os.Getwd() if err != nil { panic(err) } defer os.Chdir(dir) config := HoconConfig{} log.Infof("Loading configurations from file [%s]", file) confDir, confFile := path.Split(file) os.Chdir(confDir) config.File = file config.Conf = hocon.LoadConfig(confFile) return &config } func InitAppConfig() { configFile := os.Getenv("APP_CONFIG") if configFile == "" { log.Infof("No environment APP_CONFIG found, fallback to [%s]", defaultConfigFile) _, b, _, _ := runtime.Caller(0) d := path.Join(path.Dir(b)) configFile = filepath.Dir(d) + defaultConfigFile } AppConfig = loadAppConfig(configFile) }
package calc import ( "gonum.org/v1/plot" "gonum.org/v1/plot/plotter" "gonum.org/v1/plot/vg" "image/color" ) /* https://github.com/gonum/plot/wiki/Example-plots */ func CreateGraph(f func(float64) float64) { p, err := plot.New() if err != nil { panic(err) } p.Title.Text = "Functions" p.X.Label.Text = "X" p.Y.Label.Text = "Y" // A quadratic function x^2 quad := plotter.NewFunction(f) quad.Color = color.RGBA{B: 255, A: 255} // Add the functions and their legend entries. p.Add(quad) p.Legend.Add("x^2", quad) //p.Legend.ThumbnailWidth = 0.5 * vg.Inch p.Legend.ThumbnailWidth = 1.0 * vg.Inch // Set the axis ranges. Unlike other data sets, // functions don't set the axis ranges automatically // since functions don't necessarily have a // finite range of x and y values. p.X.Min = -5 p.X.Max = 10 p.Y.Min = 0 p.Y.Max = 10 // Save the plot to a PNG file. if err := p.Save(4*vg.Inch, 4*vg.Inch, "functions.png"); err != nil { panic(err) } }
package impl2 import "github.com/sko00o/leetcode-adventure/queue-stack/queue" /* Notes: ○ 用队列模拟栈 § 若使用两个队列,分别记为 Q 和 tmp ,压栈操作时,元素入队 tmp, 然后 Q 元素全部出队再入队 tmp, 最后交换 Q 和 tmp;出栈操作时, 保持所有元素都在 Q,方便维护,从 Q 出队。 */ // Queue is a FIFO Data Structure. type Queue struct { queue.SliceQueue } // MyStack is a stack using queue. type MyStack struct { Q *Queue tmp *Queue } // Constructor return MyStack object. func Constructor() MyStack { return MyStack{ Q: &Queue{}, tmp: &Queue{}, } } // Push element x onto stack. // time complexity: O(n) // space complexity : O(1) func (s *MyStack) Push(x int) { s.tmp.EnQueue(x) for !s.Q.IsEmpty() { s.tmp.EnQueue(s.Q.Front()) s.Q.DeQueue() } s.Q, s.tmp = s.tmp, s.Q } // Pop removes the element on top of the stack and returns that element. // time complexity: O(1) // space complexity : O(1) func (s *MyStack) Pop() int { if s.Q.IsEmpty() { return -1 } res := s.Q.Front().(int) s.Q.DeQueue() return res } // Top get the top element. // time complexity: O(1) // space complexity : O(1) func (s *MyStack) Top() int { if s.Q.IsEmpty() { return -1 } return s.Q.Front().(int) } // Empty returns whether the queue is empty. // time complexity: O(1) // space complexity : O(1) func (s *MyStack) Empty() bool { return s.Q.IsEmpty() }
package donothing import ( "bufio" "bytes" "errors" "fmt" "io" "os" "strings" ) // A Procedure is a sequence of Steps that can be executed or rendered to markdown. type Procedure struct { // The root step of the procedure, of which all other steps are descendants. rootStep *Step stdin io.Reader stdout io.Writer } // Short provides the procedure with a short description. // // The short description will be the title of the rendered markdown document when Render is called, // so it should be concise and accurate. func (pcd *Procedure) Short(s string) { pcd.rootStep.Short(s) } // GetShort returns the procedure's short description. func (pcd *Procedure) GetShort() string { return pcd.rootStep.GetShort() } // Long provides the procedure with a long description. // // The long description will be shown to the user when they first execute the procedure. It will // also be included in the opening section of the procedure's Markdown documentation. // // It should give an overview of the procedure's purpose and any important assumptions the procedure // makes about the state of the world at the beginning of execution. func (pcd *Procedure) Long(s string) { pcd.rootStep.Long(s) } // AddStep adds a step to the procedure. // // A new Step will be instantiated and passed to fn to be defined. func (pcd *Procedure) AddStep(fn func(*Step)) { pcd.rootStep.AddStep(fn) } // GetStepByName returns the step with the given (absolute) name. func (pcd *Procedure) GetStepByName(stepName string) (*Step, error) { var foundStep *Step err := pcd.rootStep.Walk(func(step *Step) error { absNmae := step.AbsoluteName() if absNmae == stepName { //if step.AbsoluteName() == stepName { foundStep = step // Return error to end walk. This error will be ignored since we have set foundStep to // something other than nil. return fmt.Errorf("") } return nil }) if foundStep != nil { return foundStep, nil } if err != nil { return nil, err } return nil, fmt.Errorf("No step with name '%s'", stepName) } // Check validates that the procedure makes sense. // // If problems are found, it returns the list of problems along with an error. // // It checks the procedure against the following expectations: // // 1. Every step has a unique absolute name with no empty parts. // 2. Every step has a short description // 3. Every input has a name that matches the name of an output from a previous step. func (pcd *Procedure) Check() ([]string, error) { steps := make(map[string]*Step) outputs := make(map[string]OutputDef) problems := make([]string, 0) err := pcd.rootStep.Walk(func(step *Step) error { absName := step.AbsoluteName() if absName[len(absName)-1:] == "." { if step.parent == nil { // I really hope this never happens. The root step should get its name from // donothing, not from the calling code. problems = append(problems, "Root step does not have name") } else { problems = append(problems, fmt.Sprintf("Child step of '%s' does not have name", step.parent.AbsoluteName())) } } if steps[absName] != nil { problems = append(problems, fmt.Sprintf("More than one step with name '%s'", absName)) } steps[step.AbsoluteName()] = step if step.GetShort() == "" { problems = append(problems, fmt.Sprintf("Step '%s' has no Short value", absName)) } for _, inputDef := range step.GetInputDefs() { matchingOutputDef, ok := outputs[inputDef.Name] if !ok { problems = append(problems, fmt.Sprintf( "Input '%s' of step '%s' does not refer to an output from any previous step", inputDef.Name, absName, )) continue } if matchingOutputDef.ValueType != inputDef.ValueType { problems = append(problems, fmt.Sprintf( "Input '%s' of step '%s' has type '%s', but output '%s' has type '%s'", inputDef.Name, absName, inputDef.ValueType, matchingOutputDef.Name, matchingOutputDef.ValueType, )) } } for _, outputDef := range step.GetOutputDefs() { outputs[outputDef.Name] = outputDef } return nil }) if err != nil { return []string{}, fmt.Errorf("Error while checking procedure: %w", err) } if len(problems) > 0 { return problems, errors.New("Problems were found in the procedure") } return []string{}, nil } // Render prints the procedure's Markdown representation to f. // // Any occurrence of the string "@@" in the executed template output will be replaced with a // backtick. func (pcd *Procedure) Render(f io.Writer) error { return pcd.RenderStep(f, "root") } // RenderStep prints the given step from the procedure as Markdown to f. // // Any occurrence of the string "@@" in the executed template output will be replaced with a // backtick. func (pcd *Procedure) RenderStep(f io.Writer, stepName string) error { if _, err := pcd.Check(); err != nil { return err } tpl, err := DocTemplate() if err != nil { return err } step, err := pcd.GetStepByName(stepName) if err != nil { return err } tplData := NewStepTemplateData(step, nil, true) var b strings.Builder err = tpl.Execute(&b, tplData) if err != nil { return err } fmt.Fprintf(f, "%s", strings.Replace(b.String(), "@@", "`", -1)) return nil } // Execute runs through the procedure step by step. // // The user will be prompted as necessary. func (pcd *Procedure) Execute() error { return pcd.ExecuteStep("root") } // ExecuteStep runs through the given step. // // The user will be prompted as necessary. func (pcd *Procedure) ExecuteStep(stepName string) error { if _, err := pcd.Check(); err != nil { return err } step, err := pcd.GetStepByName(stepName) if err != nil { return err } tpl, err := ExecTemplate() if err != nil { return err } step, err = pcd.GetStepByName(stepName) if err != nil { return err } var skipTo string step.Walk(func(walkStep *Step) error { if skipTo != "" && walkStep.AbsoluteName() != skipTo { fmt.Fprintf(pcd.stdout, "Skipping step '%s' on the way to '%s'\n", walkStep.AbsoluteName(), skipTo) return nil } tplData := NewStepTemplateData(walkStep, nil, false) var b bytes.Buffer err = tpl.Execute(&b, tplData) if err != nil { return err } fmt.Fprintf(pcd.stdout, "%s", strings.Replace(b.String(), "@@", "`", -1)) promptResult := pcd.prompt() if promptResult.SkipOne { fmt.Fprintf(pcd.stdout, "Skipping step '%s' and its descendants\n", walkStep.AbsoluteName()) return NoRecurse } skipTo = promptResult.SkipTo return nil }) fmt.Fprintln(pcd.stdout, "Done.") return nil } // promptResult is the struct returned by Procedure.prompt. // // Procedure.Execute uses the contents of a promptResult to decide what to do next. type promptResult struct { // Whether to skip this step and its descendants. SkipOne bool // The absolute name of the next step that should be executed. // // If empty, Execute should proceed normally in its walk. SkipTo string } // prompt prompts the user for the next action to take. // // If the user enters an invalid choice, prompt will inform them of this and re-prompt until a valid // choice is entered. func (pcd *Procedure) prompt() promptResult { // promptOnce prompts the user for input. It returns their input, trimmed of leading and // trailing whitespace. promptOnce := func() (string, error) { fmt.Fprintf(pcd.stdout, "\n\n[Enter] to proceed (or \"help\"): ") entry, err := bufio.NewReader(pcd.stdin).ReadBytes('\n') fmt.Fprintf(pcd.stdout, "\n") return strings.TrimSpace(string(entry)), err } for { entry, err := promptOnce() if err != nil { fmt.Fprintf(pcd.stdout, "Error reading input: %s\n", err.Error()) continue } if entry == "" { // Proceed to the next step as normal return promptResult{} } if entry == "help" { // Print the help message and prompt again pcd.printPromptHelp() } if entry == "skip" { return promptResult{SkipOne: true} } if strings.HasPrefix(entry, "skipto ") { parts := strings.Split(entry, " ") if len(parts) != 2 || len(parts[1]) == 0 { fmt.Fprintf(pcd.stdout, "Invalid 'skipto' syntax; enter \"help\" for help\n") } return promptResult{SkipTo: parts[1]} } fmt.Fprintf(pcd.stdout, "Invalid choice; enter \"help\" for help\n") } } // printPromptHelp prints the help message for the Execute prompt. func (pcd *Procedure) printPromptHelp() { fmt.Fprintf(pcd.stdout, `Options: [Enter] Proceed to the next step skip Skip this step and its descendants skipto STEP Skip to the given step by absolute name help Print this help message`) } // NewProcedure returns a new procedure, ready to be given steps. func NewProcedure() *Procedure { pcd := new(Procedure) pcd.rootStep = NewStep() pcd.rootStep.Name("root") pcd.stdin = os.Stdin pcd.stdout = os.Stdout return pcd }
package vespa import ( "bytes" "crypto/tls" "encoding/json" "fmt" "math" "net/http" "sort" "strconv" "time" "github.com/vespa-engine/vespa/client/go/auth/auth0" "github.com/vespa-engine/vespa/client/go/auth/zts" "github.com/vespa-engine/vespa/client/go/util" "github.com/vespa-engine/vespa/client/go/version" ) // CloudOptions configures URL and authentication for a cloud target. type APIOptions struct { System System TLSOptions TLSOptions APIKey []byte AuthConfigPath string } // CloudDeploymentOptions configures the deployment to manage through a cloud target. type CloudDeploymentOptions struct { Deployment Deployment TLSOptions TLSOptions ClusterURLs map[string]string // Endpoints keyed on cluster name } type cloudTarget struct { apiOptions APIOptions deploymentOptions CloudDeploymentOptions logOptions LogOptions httpClient util.HTTPClient ztsClient ztsClient } type deploymentEndpoint struct { Cluster string `json:"cluster"` URL string `json:"url"` Scope string `json:"scope"` } type deploymentResponse struct { Endpoints []deploymentEndpoint `json:"endpoints"` } type jobResponse struct { Active bool `json:"active"` Status string `json:"status"` Log map[string][]logMessage `json:"log"` LastID int64 `json:"lastId"` } type logMessage struct { At int64 `json:"at"` Type string `json:"type"` Message string `json:"message"` } type ztsClient interface { AccessToken(domain string, certficiate tls.Certificate) (string, error) } // CloudTarget creates a Target for the Vespa Cloud or hosted Vespa platform. func CloudTarget(httpClient util.HTTPClient, apiOptions APIOptions, deploymentOptions CloudDeploymentOptions, logOptions LogOptions) (Target, error) { ztsClient, err := zts.NewClient(zts.DefaultURL, httpClient) if err != nil { return nil, err } return &cloudTarget{ httpClient: httpClient, apiOptions: apiOptions, deploymentOptions: deploymentOptions, logOptions: logOptions, ztsClient: ztsClient, }, nil } func (t *cloudTarget) resolveEndpoint(cluster string) (string, error) { if cluster == "" { for _, u := range t.deploymentOptions.ClusterURLs { if len(t.deploymentOptions.ClusterURLs) == 1 { return u, nil } else { return "", fmt.Errorf("multiple clusters, none chosen: %v", t.deploymentOptions.ClusterURLs) } } } else { u := t.deploymentOptions.ClusterURLs[cluster] if u == "" { clusters := make([]string, len(t.deploymentOptions.ClusterURLs)) for c := range t.deploymentOptions.ClusterURLs { clusters = append(clusters, c) } return "", fmt.Errorf("unknown cluster '%s': must be one of %v", cluster, clusters) } return u, nil } return "", fmt.Errorf("no endpoints") } func (t *cloudTarget) Type() string { switch t.apiOptions.System.Name { case MainSystem.Name, CDSystem.Name: return TargetHosted } return TargetCloud } func (t *cloudTarget) Deployment() Deployment { return t.deploymentOptions.Deployment } func (t *cloudTarget) Service(name string, timeout time.Duration, runID int64, cluster string) (*Service, error) { switch name { case DeployService: service := &Service{ Name: name, BaseURL: t.apiOptions.System.URL, TLSOptions: t.apiOptions.TLSOptions, ztsClient: t.ztsClient, httpClient: t.httpClient, } if timeout > 0 { status, err := service.Wait(timeout) if err != nil { return nil, err } if !isOK(status) { return nil, fmt.Errorf("got status %d from deploy service at %s", status, service.BaseURL) } } return service, nil case QueryService, DocumentService: if t.deploymentOptions.ClusterURLs == nil { if err := t.waitForEndpoints(timeout, runID); err != nil { return nil, err } } url, err := t.resolveEndpoint(cluster) if err != nil { return nil, err } t.deploymentOptions.TLSOptions.AthenzDomain = t.apiOptions.System.AthenzDomain return &Service{ Name: name, BaseURL: url, TLSOptions: t.deploymentOptions.TLSOptions, ztsClient: t.ztsClient, httpClient: t.httpClient, }, nil } return nil, fmt.Errorf("unknown service: %s", name) } func (t *cloudTarget) SignRequest(req *http.Request, keyID string) error { if t.apiOptions.System.IsPublic() { if t.apiOptions.APIKey != nil { signer := NewRequestSigner(keyID, t.apiOptions.APIKey) return signer.SignRequest(req) } else { return t.addAuth0AccessToken(req) } } else { if t.apiOptions.TLSOptions.KeyPair.Certificate == nil { return fmt.Errorf("system %s requires a certificate for authentication", t.apiOptions.System.Name) } return nil } } func (t *cloudTarget) CheckVersion(clientVersion version.Version) error { if clientVersion.IsZero() { // development version is always fine return nil } req, err := http.NewRequest("GET", fmt.Sprintf("%s/cli/v1/", t.apiOptions.System.URL), nil) if err != nil { return err } response, err := t.httpClient.Do(req, 10*time.Second) if err != nil { return err } defer response.Body.Close() var cliResponse struct { MinVersion string `json:"minVersion"` } dec := json.NewDecoder(response.Body) if err := dec.Decode(&cliResponse); err != nil { return err } minVersion, err := version.Parse(cliResponse.MinVersion) if err != nil { return err } if clientVersion.Less(minVersion) { return fmt.Errorf("client version %s is less than the minimum supported version: %s", clientVersion, minVersion) } return nil } func (t *cloudTarget) addAuth0AccessToken(request *http.Request) error { a, err := auth0.GetAuth0(t.apiOptions.AuthConfigPath, t.apiOptions.System.Name, t.apiOptions.System.URL) if err != nil { return err } system, err := a.PrepareSystem(auth0.ContextWithCancel()) if err != nil { return err } request.Header.Set("Authorization", "Bearer "+system.AccessToken) return nil } func (t *cloudTarget) logsURL() string { return fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/environment/%s/region/%s/logs", t.apiOptions.System.URL, t.deploymentOptions.Deployment.Application.Tenant, t.deploymentOptions.Deployment.Application.Application, t.deploymentOptions.Deployment.Application.Instance, t.deploymentOptions.Deployment.Zone.Environment, t.deploymentOptions.Deployment.Zone.Region) } func (t *cloudTarget) PrintLog(options LogOptions) error { req, err := http.NewRequest("GET", t.logsURL(), nil) if err != nil { return err } lastFrom := options.From requestFunc := func() *http.Request { fromMillis := lastFrom.Unix() * 1000 q := req.URL.Query() q.Set("from", strconv.FormatInt(fromMillis, 10)) if !options.To.IsZero() { toMillis := options.To.Unix() * 1000 q.Set("to", strconv.FormatInt(toMillis, 10)) } req.URL.RawQuery = q.Encode() t.SignRequest(req, t.deploymentOptions.Deployment.Application.SerializedForm()) return req } logFunc := func(status int, response []byte) (bool, error) { if ok, err := isCloudOK(status); !ok { return ok, err } logEntries, err := ReadLogEntries(bytes.NewReader(response)) if err != nil { return true, err } for _, le := range logEntries { if !le.Time.After(lastFrom) { continue } if LogLevel(le.Level) > options.Level { continue } fmt.Fprintln(options.Writer, le.Format(options.Dequote)) } if len(logEntries) > 0 { lastFrom = logEntries[len(logEntries)-1].Time } return false, nil } var timeout time.Duration if options.Follow { timeout = math.MaxInt64 // No timeout } _, err = wait(t.httpClient, logFunc, requestFunc, &t.apiOptions.TLSOptions.KeyPair, timeout) return err } func (t *cloudTarget) waitForEndpoints(timeout time.Duration, runID int64) error { if runID > 0 { if err := t.waitForRun(runID, timeout); err != nil { return err } } return t.discoverEndpoints(timeout) } func (t *cloudTarget) waitForRun(runID int64, timeout time.Duration) error { runURL := fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/job/%s-%s/run/%d", t.apiOptions.System.URL, t.deploymentOptions.Deployment.Application.Tenant, t.deploymentOptions.Deployment.Application.Application, t.deploymentOptions.Deployment.Application.Instance, t.deploymentOptions.Deployment.Zone.Environment, t.deploymentOptions.Deployment.Zone.Region, runID) req, err := http.NewRequest("GET", runURL, nil) if err != nil { return err } lastID := int64(-1) requestFunc := func() *http.Request { q := req.URL.Query() q.Set("after", strconv.FormatInt(lastID, 10)) req.URL.RawQuery = q.Encode() if err := t.SignRequest(req, t.deploymentOptions.Deployment.Application.SerializedForm()); err != nil { panic(err) } return req } jobSuccessFunc := func(status int, response []byte) (bool, error) { if ok, err := isCloudOK(status); !ok { return ok, err } var resp jobResponse if err := json.Unmarshal(response, &resp); err != nil { return false, nil } if t.logOptions.Writer != nil { lastID = t.printLog(resp, lastID) } if resp.Active { return false, nil } if resp.Status != "success" { return false, fmt.Errorf("run %d ended with unsuccessful status: %s", runID, resp.Status) } return true, nil } _, err = wait(t.httpClient, jobSuccessFunc, requestFunc, &t.apiOptions.TLSOptions.KeyPair, timeout) return err } func (t *cloudTarget) printLog(response jobResponse, last int64) int64 { if response.LastID == 0 { return last } var msgs []logMessage for step, stepMsgs := range response.Log { for _, msg := range stepMsgs { if step == "copyVespaLogs" && LogLevel(msg.Type) > t.logOptions.Level || LogLevel(msg.Type) == 3 { continue } msgs = append(msgs, msg) } } sort.Slice(msgs, func(i, j int) bool { return msgs[i].At < msgs[j].At }) for _, msg := range msgs { tm := time.Unix(msg.At/1000, (msg.At%1000)*1000) fmtTime := tm.Format("15:04:05") fmt.Fprintf(t.logOptions.Writer, "[%s] %-7s %s\n", fmtTime, msg.Type, msg.Message) } return response.LastID } func (t *cloudTarget) discoverEndpoints(timeout time.Duration) error { deploymentURL := fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/environment/%s/region/%s", t.apiOptions.System.URL, t.deploymentOptions.Deployment.Application.Tenant, t.deploymentOptions.Deployment.Application.Application, t.deploymentOptions.Deployment.Application.Instance, t.deploymentOptions.Deployment.Zone.Environment, t.deploymentOptions.Deployment.Zone.Region) req, err := http.NewRequest("GET", deploymentURL, nil) if err != nil { return err } if err := t.SignRequest(req, t.deploymentOptions.Deployment.Application.SerializedForm()); err != nil { return err } urlsByCluster := make(map[string]string) endpointFunc := func(status int, response []byte) (bool, error) { if ok, err := isCloudOK(status); !ok { return ok, err } var resp deploymentResponse if err := json.Unmarshal(response, &resp); err != nil { return false, nil } if len(resp.Endpoints) == 0 { return false, nil } for _, endpoint := range resp.Endpoints { if endpoint.Scope != "zone" { continue } urlsByCluster[endpoint.Cluster] = endpoint.URL } return true, nil } if _, err = wait(t.httpClient, endpointFunc, func() *http.Request { return req }, &t.apiOptions.TLSOptions.KeyPair, timeout); err != nil { return err } if len(urlsByCluster) == 0 { return fmt.Errorf("no endpoints discovered") } t.deploymentOptions.ClusterURLs = urlsByCluster return nil } func isCloudOK(status int) (bool, error) { if status == 401 { // when retrying we should give up immediately if we're not authorized return false, fmt.Errorf("status %d: invalid credentials", status) } return isOK(status), nil }
package spec // Mod is type Mod struct { ModName string Event Kls DataClasses []Kls }
package service import ( "database/sql" "github.com/google/uuid" "time" "varconf-server/core/dao" ) type AppService struct { appDao *dao.AppDao manageTxDao *dao.ManageTxDao } func NewAppService(db *sql.DB) *AppService { appService := AppService{ appDao: dao.NewAppDao(db), manageTxDao: dao.NewManageTxDao(db), } return &appService } func (_self *AppService) PageQuery(likeName string, pageIndex, pageSize int64) ([]*dao.AppData, int64, int64) { start := (pageIndex - 1) * pageSize end := start + pageSize pageData := _self.appDao.QueryApps(dao.QueryAppData{LikeName: likeName, Start: start, End: end}) totalCount := _self.appDao.CountApps(dao.QueryAppData{LikeName: likeName}) pageCount := totalCount / pageSize if totalCount%pageSize != 0 { pageCount += 1 } return pageData, pageCount, totalCount } func (_self *AppService) QueryApp(appId int64) *dao.AppData { apps := _self.appDao.QueryApps(dao.QueryAppData{AppId: appId}) if len(apps) != 1 { return nil } return apps[0] } func (_self *AppService) CreateApp(appData *dao.AppData) bool { if appData == nil { return false } appData.CreateTime.Time = time.Now() appData.UpdateTime.Time = time.Now() appData.ApiKey = appData.Code + ":" + uuid.New().String() rowCnt := _self.appDao.InsertApp(appData) if rowCnt != 1 { return false } return true } func (_self *AppService) SelectedUpdateApp(appData dao.AppData) bool { appData.UpdateTime.Time = time.Now() rowCnt := _self.appDao.SelectedUpdateApp(appData) if rowCnt != 1 { return false } return true } func (_self *AppService) DeleteApp(appId int64) bool { return _self.manageTxDao.DeleteApp(appId) }
package main import ( "fmt" "net/http" "github.com/gorilla/mux" ) func main() { r := mux.NewRouter() r.HandleFunc("/chord/{root}/{pattern}", ChordHandler) r.HandleFunc("/notes/{notes}", NotesHandler) http.ListenAndServe(":8080", r) } func ChordHandler(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) root := vars["root"] pattern := vars["pattern"] w.Write([]byte(fmt.Sprintf("%s %s", root, pattern))) } func NotesHandler(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) notes := vars["notes"] // Parse notes w.Write([]byte(fmt.Sprintf("%s", notes))) }
package dict_data import ( "errors" "xorm.io/builder" "yj-app/app/yjgframe/db" "yj-app/app/yjgframe/utils/excel" "yj-app/app/yjgframe/utils/page" ) // Fill with you ideas below. //新增页面请求参数 type AddReq struct { DictLabel string `form:"dictLabel" binding:"required"` DictValue string `form:"dictValue" binding:"required"` DictType string `form:"dictType" binding:"required"` DictSort int `form:"dictSort" binding:"required"` CssClass string `form:"cssClass"` ListClass string `form:"listClass" binding:"required"` IsDefault string `form:"isDefault" binding:"required"` Status string `form:"status" binding:"required"` Remark string `form:"remark"` } //修改页面请求参数 type EditReq struct { DictCode int64 `form:"dictCode" binding:"required"` DictLabel string `form:"dictLabel" binding:"required"` DictValue string `form:"dictValue" binding:"required"` DictType string `form:"dictType"` DictSort int `form:"dictSort" binding:"required"` CssClass string `form:"cssClass"` ListClass string `form:"listClass" binding:"required"` IsDefault string `form:"isDefault" binding:"required"` Status string `form:"status" binding:"required"` Remark string `form:"remark"` } //分页请求参数 type SelectPageReq struct { DictType string `form:"dictType"` //字典名称 DictLabel string `form:"dictLabel"` //字典标签 Status string `form:"status"` //状态 BeginTime string `form:"beginTime"` //开始时间 EndTime string `form:"endTime"` //结束时间 PageNum int `form:"pageNum"` //当前页码 PageSize int `form:"pageSize"` //每页数 } //根据条件分页查询数据 func SelectListByPage(param *SelectPageReq) (*[]Entity, *page.Paging, error) { db := db.Instance().Engine() p := new(page.Paging) if db == nil { return nil, p, errors.New("获取数据库连接失败") } model := db.Table(TableName()).Alias("t") if param != nil { if param.DictLabel != "" { model.Where("t.dict_label like ?", "%"+param.DictLabel+"%") } if param.Status != "" { model.Where("t.status = ", param.Status) } if param.DictType != "" { model.Where("t.dict_type like ?", "%"+param.DictType+"%") } if param.BeginTime != "" { model.Where("date_format(t.create_time,'%y%m%d') >= date_format(?,'%y%m%d') ", param.BeginTime) } if param.EndTime != "" { model.Where("date_format(t.create_time,'%y%m%d') <= date_format(?,'%y%m%d') ", param.EndTime) } } totalModel := model.Clone() total, err := totalModel.Count() if err != nil { return nil, p, errors.New("读取行数失败") } p = page.CreatePaging(param.PageNum, param.PageSize, int(total)) model.Limit(p.Pagesize, p.StartNum) var result []Entity model.Find(&result) return &result, p, nil } // 导出excel func SelectListExport(param *SelectPageReq, head, col []string) (string, error) { db := db.Instance().Engine() if db == nil { return "", errors.New("获取数据库连接失败") } build := builder.Select(col...).From(TableName(), "t") if param != nil { if param.DictLabel != "" { build.Where(builder.Like{"t.dict_label", param.DictLabel}) } if param.Status != "" { build.Where(builder.Eq{"t.status": param.Status}) } if param.DictType != "" { build.Where(builder.Like{"t.dict_type", param.DictType}) } if param.BeginTime != "" { build.Where(builder.Gte{"date_format(t.create_time,'%y%m%d')": "date_format('" + param.BeginTime + "','%y%m%d')"}) } if param.EndTime != "" { build.Where(builder.Lte{"date_format(t.create_time,'%y%m%d')": "date_format('" + param.EndTime + "','%y%m%d')"}) } } sqlStr, _, _ := build.ToSQL() arr, err := db.SQL(sqlStr).QuerySliceString() path, err := excel.DownlaodExcel(head, arr) return path, err } //获取所有数据 func SelectListAll(param *SelectPageReq) ([]Entity, error) { db := db.Instance().Engine() if db == nil { return nil, errors.New("获取数据库连接失败") } model := db.Table(TableName()).Alias("t") if param != nil { if param.DictLabel != "" { model.Where("t.dict_label like ?", "%"+param.DictLabel+"%") } if param.Status != "" { model.Where("t.status = ", param.Status) } if param.DictType != "" { model.Where("t.dict_type like ?", "%"+param.DictType+"%") } if param.BeginTime != "" { model.Where("date_format(t.create_time,'%y%m%d') >= date_format(?,'%y%m%d') ", param.BeginTime) } if param.EndTime != "" { model.Where("date_format(t.create_time,'%y%m%d') <= date_format(?,'%y%m%d') ", param.EndTime) } } var result []Entity model.Find(&result) return result, nil }
package main import ( "fmt" "net/http" "os" ) func main() { initBeacon() err := http.ListenAndServe(fmt.Sprintf(":%v", getPort()), nil) if err != nil { panic(err) } } func getPort() string { if configuredPort := os.Getenv("PORT"); configuredPort == "" { return "8080" } else { return configuredPort } }
// Copyright © 2017 Daniel Jay Haskin <djhaskin987@gmail.com> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "github.com/djhaskin987/pask/pkg" "github.com/spf13/cobra" "github.com/spf13/viper" "log" ) // runCmd represents the run command var runCmd = &cobra.Command{ Args: cobra.MinimumNArgs(1), Use: "run TASK", Short: "Run a packaged task", Long: `Calls any executable file called TASK found in the folder "<root-path>/pask/<pkg>/<vers>/tasks". Treats each package folder in order of appearance of the packages under the "packages" key in the spec file.`, Run: func(cmd *cobra.Command, args []string) { spec := viper.Get("spec").(string) base := viper.Get("base").(string) log.Println("Running packaged tasks...") log.Printf("Using spec file `%s`\n", spec) log.Println("Using project base `%s`\n", base) if spec, err := pkg.ReadSpec(spec); err != nil { log.Fatalln("Error reading spec file:", err) } else { log.Printf("Using base directory `%s`\n", base) for _, task := range args { if err := spec.Run(base, task); err != nil { log.Fatalf("Problem running task `%s`: `%s`\n", task, err) } } } }, } func init() { RootCmd.AddCommand(runCmd) // Here you will define your flags and configuration settings. // Cobra supports Persistent Flags which will work for this command // and all subcommands, e.g.: // runCmd.PersistentFlags().String("foo", "", "A help for foo") // Cobra supports local flags which will only run when this command // is called directly, e.g.: // runCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") }
package config import ( "github.com/spf13/cobra" "github.com/wish/ctl/pkg/client" ) // Cmd returns the config subcommand func Cmd(c *client.Client) *cobra.Command { config := &cobra.Command{ Use: "config", Short: "Edit ctl configuration", Long: "Tool for changing the behaviour of ctl", } config.AddCommand(fetchCmd(c)) config.AddCommand(deleteCmd(c)) config.AddCommand(viewCmd(c)) return config }
package main import ( "os" "testing" "github.com/jinzhu/gorm" ) var db *gorm.DB func TestMain(m *testing.M) { db = loadDatabase(DBFilenameTest) seedData(db) m.Run() os.Remove(DBFilenameTest) }
package recorder import ( "context" "encoding/json" "errors" "fmt" "github.com/go-redis/redis/v8" "github.com/team-bonitto/bonitto/internal/model" "github.com/team-bonitto/bonitto/internal/queue/consumer" "github.com/team-bonitto/bonitto/internal/queue/producer" "time" ) const QueueName = "recorder" const DBName = "db" var _ consumer.Consumer = Recorder{} var _ producer.Producer = Input{} type Input model.Record func (i Input) GetQueueName() string { return QueueName } func (i Input) Marshal() []byte { b, _ := json.Marshal(i) return b } type Recorder struct { Rds *redis.Client } func (r Recorder) GetQueueName() string { return QueueName } func (r Recorder) Consume(a string) error { input := Input{} if err := json.Unmarshal([]byte(a), &input); err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() k := fmt.Sprintf("%s:%v", input.UserID, input.ProblemNo) if _, err := r.Rds.HMSet(ctx, DBName, k, a).Result(); err != nil { return err } return nil } func New(rds *redis.Client) (Recorder, error) { return Recorder{ Rds: rds, }, nil } func (r *Recorder) GetResults(userID string, problemNo string) ([][]model.TestResult, error) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() k := fmt.Sprintf("%s:%v", userID, problemNo) a, err := r.Rds.HMGet(ctx, DBName, k).Result() if err != nil { return nil, err } if a[0] == nil { return nil, errors.New("not found " + k) } i := a[0].(string) input := Input{} if err := json.Unmarshal([]byte(i), &input); err != nil { return nil, err } return input.TestResults, nil }
package typeswitch import ( "go/ast" "go/types" "strings" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" ) var Analyzer = &analysis.Analyzer{ Name: "typeswitch", Doc: Doc, Run: run, Requires: []*analysis.Analyzer{ inspect.Analyzer, }, } const Doc = "typeswitch finds a type which implement an interfaces which are used in type-switch but the type does not appear in any cases of the type-switch" type enum struct { Interface *types.Interface Implements []types.Object } func run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) // find enum-like interfaces enums := getEnums(pass.Pkg) for _, p := range pass.Pkg.Imports() { for k, v := range getEnums(p) { enums[k] = v } } nodeFilter := []ast.Node{ (*ast.TypeSwitchStmt)(nil), } inspect.Preorder(nodeFilter, func(n ast.Node) { sw, ok := n.(*ast.TypeSwitchStmt) if !ok { return } var typ types.Type switch stmt := sw.Assign.(type) { case *ast.ExprStmt: if expr, ok := stmt.X.(*ast.TypeAssertExpr); ok { typ = pass.TypesInfo.TypeOf(expr.X).Underlying() } case *ast.AssignStmt: if expr, ok := stmt.Rhs[0].(*ast.TypeAssertExpr); ok { typ = pass.TypesInfo.TypeOf(expr.X).Underlying() } default: panic("unexpected type") } e := enums[typ] if e == nil { return } var ids []string for _, obj := range e.Implements { if !hasCase(pass, obj.Type(), sw) { ids = append(ids, obj.Id()) } } if len(ids) != 0 { pass.Reportf(sw.Pos(), "type %s does not appear in any cases", strings.Join(ids, ",")) } }) return nil, nil } func getEnums(pkg *types.Package) map[types.Type]*enum { var itfs []*types.Interface var typs []types.Object // find interfaces for _, n := range pkg.Scope().Names() { obj, ok := pkg.Scope().Lookup(n).(*types.TypeName) if !ok { continue } if itf, ok := obj.Type().Underlying().(*types.Interface); ok { itfs = append(itfs, itf) } else { typs = append(typs, obj) } } // find implements enums := map[types.Type]*enum{} for _, itf := range itfs { e := &enum{ Interface: itf, } for _, typ := range typs { if types.Implements(typ.Type(), itf) { e.Implements = append(e.Implements, typ) } } if len(e.Implements) >= 2 { enums[itf] = e } } return enums } func hasCase(pass *analysis.Pass, t types.Type, sw *ast.TypeSwitchStmt) bool { for _, s := range sw.Body.List { c, ok := s.(*ast.CaseClause) if !ok { continue } // default if c.List == nil { return true } for _, expr := range c.List { if types.Identical(t, pass.TypesInfo.TypeOf(expr)) { return true } } } return false }
package models import "time" // A Sensor record is a specific sensor. type Sensor struct { ID uint `gorm:"primary_key"` SensorTypeID int `gorm:"index"` Name string Description string CreatedAt time.Time }
package mlapi import ( "time" "github.com/freignat91/mlearning/mlserver/server" "golang.org/x/net/context" "google.golang.org/grpc" ) type mlClient struct { api *MlAPI client mlserver.MLearningServiceClient nodeHost string ctx context.Context conn *grpc.ClientConn } func (g *mlClient) init(api *MlAPI) error { g.api = api g.ctx = context.Background() if err := g.connectServer(); err != nil { return err } api.info("Client connected to server %s\n", g.nodeHost) return nil } func (g *mlClient) connectServer() error { cn, err := grpc.Dial("localhost:30107", grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(time.Second*20)) if err != nil { return err } g.conn = cn g.client = mlserver.NewMLearningServiceClient(g.conn) return nil } func (g *mlClient) close() { if g.conn != nil { g.conn.Close() } }
package schedule import ( "io/ioutil" "os" "strings" "BearApp/internal/bootstrap" "github.com/naoina/toml" ) func loadSchedule() ([]*CronJob, error) { configDir := bootstrap.GetAppRoot() + "/config/schedule/" + bootstrap.GetAppEnv() dir, err := os.Open(configDir) if err != nil { return nil, err } var fileList []os.FileInfo fileList, err = dir.Readdir(-1) if err != nil { dir.Close() return nil, err } defer dir.Close() var jobs []*CronJob for i := range fileList { file := fileList[i] if strings.HasSuffix(file.Name(), ".toml") { tomlData, readFileErr := ioutil.ReadFile(configDir + "/" + file.Name()) if readFileErr != nil { return nil, readFileErr } var jobsConf struct { Jobs []*CronJob `toml:"job"` } err := toml.Unmarshal(tomlData, &jobsConf) if err != nil { return nil, err } jobs = append(jobs, jobsConf.Jobs...) } } return jobs, nil }
package kinesis import ( "context" "fmt" "github.com/aws/aws-lambda-go/events" "github.com/golang/protobuf/proto" "github.com/hill-daniel/iot-protobuf-lambda" pb "github.com/hill-daniel/iot-protobuf-lambda/proto" "github.com/pkg/errors" ) // Handler accepts and processes KinesisEvents type Handler struct { db iot.DeviceBase } // NewHandler creates a Lambda handler which accepts and processes KinesisEvents func NewHandler(db iot.DeviceBase) *Handler { return &Handler{db: db} } // HandleRequest handles the incoming Kinesis event func (h *Handler) HandleRequest(ctx context.Context, event events.KinesisEvent) (string, error) { var errs []error for _, r := range event.Records { if err := process(r.Kinesis, h.db); err != nil { errs = append(errs, err) } } if len(errs) != 0 { for _, err := range errs { fmt.Println(err) } return "failed", fmt.Errorf("failed to process %d kinesis records out of %d", len(errs), len(event.Records)) } return "ok", nil } func process(record events.KinesisRecord, db iot.DeviceBase) error { device := &pb.Device{} if err := proto.Unmarshal(record.Data, device); err != nil { return errors.Wrap(err, "failed to unmarshal protobuf") } return db.Store(device) }
package response func CreateResponse(data interface{}) interface{} { return map[string]interface{}{ "result": data, } }
package webauthnutil import ( "time" "github.com/google/uuid" "github.com/pomerium/pomerium/pkg/cryptutil" ) // NewEnrollmentToken creates a new EnrollmentToken. func NewEnrollmentToken(key []byte, ttl time.Duration, deviceEnrollmentID string) (string, error) { id, err := uuid.Parse(deviceEnrollmentID) if err != nil { return "", err } secureToken := cryptutil.GenerateSecureToken(key, time.Now().Add(ttl), cryptutil.Token(id)) return secureToken.String(), nil } // ParseAndVerifyEnrollmentToken parses and verifies an enrollment token func ParseAndVerifyEnrollmentToken(key []byte, rawEnrollmentToken string) (string, error) { secureToken, ok := cryptutil.SecureTokenFromString(rawEnrollmentToken) if !ok { return "", cryptutil.ErrInvalid } err := secureToken.Verify(key, time.Now()) if err != nil { return "", err } return secureToken.Token().UUID().String(), nil }
/* * Copyright (c) 2019. Alexey Shtepa <as.shtepa@gmail.com> LICENSE MIT * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. */ package bindata import ( "fmt" "unsafe" ) func inspect_t(v interface{}) (unsafe.Pointer, uintptr) { // inspect value switch t := v.(type) { case bool: i := v.(bool) return unsafe.Pointer(&i), unsafe.Sizeof(i) case int8: i := v.(int8) return unsafe.Pointer(&i), unsafe.Sizeof(i) case int16: i := v.(int16) return unsafe.Pointer(&i), unsafe.Sizeof(i) case int32: i := v.(int32) return unsafe.Pointer(&i), unsafe.Sizeof(i) case int64: i := v.(int64) return unsafe.Pointer(&i), unsafe.Sizeof(i) case int: i := v.(int) return unsafe.Pointer(&i), unsafe.Sizeof(i) case uint8: i := v.(uint8) return unsafe.Pointer(&i), unsafe.Sizeof(i) case uint16: i := v.(uint16) return unsafe.Pointer(&i), unsafe.Sizeof(i) case uint32: i := v.(uint32) return unsafe.Pointer(&i), unsafe.Sizeof(i) case uint64: i := v.(uint64) return unsafe.Pointer(&i), unsafe.Sizeof(i) case uint: i := v.(uint) return unsafe.Pointer(&i), unsafe.Sizeof(i) case uintptr: i := v.(uintptr) return unsafe.Pointer(&i), unsafe.Sizeof(i) case float32: i := v.(float32) return unsafe.Pointer(&i), unsafe.Sizeof(i) case float64: i := v.(float64) return unsafe.Pointer(&i), unsafe.Sizeof(i) case complex64: i := v.(complex64) return unsafe.Pointer(&i), unsafe.Sizeof(i) case complex128: i := v.(complex128) return unsafe.Pointer(&i), unsafe.Sizeof(i) case string: i := v.(string) return unsafe.Pointer(&i), unsafe.Sizeof(i) default: panic(fmt.Errorf("Unsupported type: %T\n", t)) } } /// builtin decoders func Bool(b []byte) (bool, error) { v := new(bool) memset(unsafe.Pointer(v), b) return *v, nil } func Int(b []byte) (int, error) { v := new(int) memset(unsafe.Pointer(v), b) return *v, nil } func Int8(b []byte) (int8, error) { v := new(int8) memset(unsafe.Pointer(v), b) return *v, nil } func Int16(b []byte) (int16, error) { v := new(int16) memset(unsafe.Pointer(v), b) return *v, nil } func Int32(b []byte) (int32, error) { v := new(int32) memset(unsafe.Pointer(v), b) return *v, nil } func Int64(b []byte) (int64, error) { v := new(int64) memset(unsafe.Pointer(v), b) return *v, nil } func Uint(b []byte) (uint, error) { v := new(uint) memset(unsafe.Pointer(v), b) return *v, nil } func Uint8(b []byte) (uint8, error) { v := new(uint8) memset(unsafe.Pointer(v), b) return *v, nil } func Uint16(b []byte) (uint16, error) { v := new(uint16) memset(unsafe.Pointer(v), b) return *v, nil } func Uint32(b []byte) (uint32, error) { v := new(uint32) memset(unsafe.Pointer(v), b) return *v, nil } func Uint64(b []byte) (uint64, error) { v := new(uint64) memset(unsafe.Pointer(v), b) return *v, nil } func Uintptr(b []byte) (uintptr, error) { v := new(uintptr) memset(unsafe.Pointer(v), b) return *v, nil } func Float32(b []byte) (float32, error) { v := new(float32) memset(unsafe.Pointer(v), b) return *v, nil } func Float64(b []byte) (float64, error) { v := new(float64) memset(unsafe.Pointer(v), b) return *v, nil } func Complex64(b []byte) (complex64, error) { v := new(complex64) memset(unsafe.Pointer(v), b) return *v, nil } func Complex128(b []byte) (complex128, error) { v := new(complex128) memset(unsafe.Pointer(v), b) return *v, nil } func String(b []byte) (string, error) { v := new(string) memset(unsafe.Pointer(v), b) return *v, nil }
package messages import ( "fmt" "github.com/button-tech/BNBTextWallet/config" ) var ( MsgForDeleteIfUserExist = "You do not need to delete your account" MsgForCreate = "You have't got account.\nYou can create it by typing command /create" MsgIfCreate = "Account already created" MsgForFollowLink = "Please, follow the link:\n" MsgInternalError = "Please, try it latter" CleanAccount = "Session cleared. All temporary data has been deleted." ) var help = BuildCodeMarkdown(`Welcome to the BUTTON Wallet on Discord.️`+` You can create account or import QR/mnemonic and send BNB to your friends! Just enter any of this commands.`) + BuildCodeMarkdown("Command Parameters Description:") + "\n️" + ` /create - Create a wallet /import - Import QR code/mnemonic /balance - Get your BNB address balance /send (amount, $ or ticker) (address or nickname) - Send BNB /address - Get your BNB address` var SendHelp = `Please, check your send information` + "\n" + `Information should be in the format:` + "\n" + BuildCodeMarkdown("Example: /send 100 @my_dear_friend or /send 100$ \"Address\"") const UserNotFound = `The user to whom you want to send must be registered` func Welcome(name string) string { hello := fmt.Sprintf("Hello, %s!", name+" ❤️") s := hello + help return s } func BuildUrl(path string) string { return config.EnvConfig.FrontendUrl + path } func BuildCodeMarkdown(text string) string { return "```" + text + "```" } func ForSendTransactionForUser(from, to string, bnb, usd float64) string { return fmt.Sprintf(`📩 Transaction: @%s send @%s: %v BNB it's about: $%v`, from, to, bnb, usd) }
package ptp import "net" // DevKind Type of the device type DevKind int const ( // DevTun Receive/send layer routable 3 packets (IP, IPv6...). Notably, // you don't receive link-local multicast with this interface // type. DevTun DevKind = iota // DevTap Receive/send Ethernet II frames. You receive all packets that // would be visible on an Ethernet link, including broadcast and // multicast traffic. DevTap ) // Packet represents a packet received on TUN/TAP interface type Packet struct { // The Ethernet type of the packet. Commonly seen values are // 0x8000 for IPv4 and 0x86dd for IPv6. Protocol int // True if the packet was too large to be read completely. Truncated bool // The raw bytes of the Ethernet payload (for DevTun) or the full // Ethernet frame (for DevTap). Packet []byte } // InterfaceName - The name of the interface. May be different from the name given to // Open(), if the latter was a pattern. // func (t *Interface) InterfaceName() string { // return t.Name // } // TAP interface type TAP interface { GetName() string GetHardwareAddress() net.HardwareAddr GetIP() net.IP GetMask() net.IPMask GetBasename() string SetName(string) SetHardwareAddress(net.HardwareAddr) SetIP(net.IP) SetMask(net.IPMask) Init(string) error Open() error Close() error Configure() error ReadPacket() (*Packet, error) WritePacket(*Packet) error Run() }
// Copyright 2020 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 package testcore import ( "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance" "github.com/iotaledger/wasp/packages/coretypes" "github.com/iotaledger/wasp/packages/solo" "github.com/iotaledger/wasp/packages/testutil" "github.com/iotaledger/wasp/packages/vm/core/accounts" "github.com/iotaledger/wasp/packages/vm/core/blob" "github.com/iotaledger/wasp/packages/vm/core/eventlog" "github.com/iotaledger/wasp/packages/vm/core/root" "github.com/stretchr/testify/require" "testing" ) func TestInit(t *testing.T) { env := solo.New(t, false, false) chain := env.NewChain(nil, "chain1") chain.AssertAccountBalance(chain.OriginatorAgentID, balance.ColorIOTA, 1) env.AssertAddressBalance(chain.OriginatorAddress, balance.ColorIOTA, testutil.RequestFundsAmount-2) checkFees(chain, blob.Interface.Name, 0, 0) checkFees(chain, root.Interface.Name, 0, 0) checkFees(chain, accounts.Interface.Name, 0, 0) checkFees(chain, eventlog.Interface.Name, 0, 0) } func TestBase(t *testing.T) { env := solo.New(t, false, false) chain := env.NewChain(nil, "chain1") req := solo.NewCallParams(root.Interface.Name, root.FuncSetContractFee, root.ParamHname, blob.Interface.Hname(), root.ParamOwnerFee, 1, ) _, err := chain.PostRequest(req, nil) require.NoError(t, err) chain.AssertAccountBalance(chain.OriginatorAgentID, balance.ColorIOTA, 2) env.AssertAddressBalance(chain.OriginatorAddress, balance.ColorIOTA, testutil.RequestFundsAmount-3) checkFees(chain, blob.Interface.Name, 1, 0) } func TestFeeIsEnough1(t *testing.T) { env := solo.New(t, false, false) chain := env.NewChain(nil, "chain1") req := solo.NewCallParams(root.Interface.Name, root.FuncSetContractFee, root.ParamHname, blob.Interface.Hname(), root.ParamOwnerFee, 1, ) _, err := chain.PostRequest(req, nil) require.NoError(t, err) chain.AssertAccountBalance(chain.OriginatorAgentID, balance.ColorIOTA, 2) env.AssertAddressBalance(chain.OriginatorAddress, balance.ColorIOTA, testutil.RequestFundsAmount-3) checkFees(chain, blob.Interface.Name, 1, 0) _, err = chain.UploadBlob(nil, blob.VarFieldVMType, "dummyType", blob.VarFieldProgramBinary, "dummyBinary", ) require.NoError(t, err) chain.AssertAccountBalance(chain.OriginatorAgentID, balance.ColorIOTA, 3) env.AssertAddressBalance(chain.OriginatorAddress, balance.ColorIOTA, testutil.RequestFundsAmount-5) } func TestFeeIsEnough2(t *testing.T) { env := solo.New(t, false, false) chain := env.NewChain(nil, "chain1") req := solo.NewCallParams(root.Interface.Name, root.FuncSetContractFee, root.ParamHname, blob.Interface.Hname(), root.ParamOwnerFee, 2, ) _, err := chain.PostRequest(req, nil) require.NoError(t, err) chain.AssertAccountBalance(chain.OriginatorAgentID, balance.ColorIOTA, 2) env.AssertAddressBalance(chain.OriginatorAddress, balance.ColorIOTA, testutil.RequestFundsAmount-3) checkFees(chain, blob.Interface.Name, 2, 0) user := env.NewSignatureSchemeWithFunds() userAgentID := coretypes.NewAgentIDFromAddress(user.Address()) _, err = chain.UploadBlob(user, blob.VarFieldVMType, "dummyType", blob.VarFieldProgramBinary, "dummyBinary", ) require.NoError(t, err) chain.AssertAccountBalance(chain.OriginatorAgentID, balance.ColorIOTA, 4) env.AssertAddressBalance(chain.OriginatorAddress, balance.ColorIOTA, testutil.RequestFundsAmount-3) chain.AssertAccountBalance(userAgentID, balance.ColorIOTA, 1) env.AssertAddressBalance(user.Address(), balance.ColorIOTA, testutil.RequestFundsAmount-3) }
package Problem0399 func calcEquation(equations [][]string, values []float64, queries [][]string) []float64 { // 建立变量之间的转换关系 m := make(map[string]map[string]float64) for i, e := range equations { a, b := e[0], e[1] v := values[i] // 添加 a / b 的记录 if _, ok := m[a]; !ok { m[a] = make(map[string]float64) } m[a][b] = 1.0 / v // 添加 b / a 的记录 if _, ok := m[b]; !ok { m[b] = make(map[string]float64) } m[b][a] = v } // 逐个搜索 queries 的结果 res := make([]float64, len(queries)) for i, q := range queries { res[i] = bfs(m, q[0], q[1]) } return res } type entry struct { s string f float64 } func bfs(m map[string]map[string]float64, a, b string) float64 { _, ok := m[a] if !ok { return -1.0 } _, ok = m[b] if !ok { return -1.0 } if a == b { return 1.0 } isVisited := make(map[string]bool) queue := []entry{{a, 1.0}} for len(queue) > 0 { e := queue[0] queue = queue[1:] if e.s == b { // 找到了 b return 1.0 / e.f } if isVisited[e.s] { continue } isVisited[e.s] = true for k, v := range m[e.s] { queue = append(queue, entry{k, v * e.f}) } } //没有找到 b return -1.0 }
package querydigest import ( "bytes" "fmt" "io" "os/exec" "github.com/kaz/pprotein/internal/collect" ) type ( processor struct{} ) func (p *processor) Cacheable() bool { return true } func (p *processor) Process(snapshot *collect.Snapshot) (io.ReadCloser, error) { bodyPath, err := snapshot.BodyPath() if err != nil { return nil, fmt.Errorf("failed to find snapshot body: %w", err) } cmd := exec.Command("pt-query-digest", "--limit", "100%", "--output", "json", bodyPath) res, err := cmd.Output() if err != nil { return nil, fmt.Errorf("external process aborted: %w", err) } starts := bytes.IndexByte(res, '{') return io.NopCloser(bytes.NewBuffer(res[starts:])), nil }
package dynamo import ( "github.com/aws/aws-sdk-go/service/dynamodb" ) const ( awsErrConditionalCheckFailed = "ConditionalCheckFailed" ) type DuplicateEntryException struct { Message string } type ItemDoesNotExistException struct { Message string } type UniqueViolationException struct { Message string } func (d DuplicateEntryException) Error() string { return d.Message } func (d ItemDoesNotExistException) Error() string { return d.Message } func (d UniqueViolationException) Error() string { return d.Message } func isConditionalCheckFailedError(err error) bool { _, ok := err.(*dynamodb.ConditionalCheckFailedException) if ok { return true } transactionCastError, ok := err.(*dynamodb.TransactionCanceledException) if !ok { return false } for _, reason := range transactionCastError.CancellationReasons { if *reason.Code == awsErrConditionalCheckFailed { return true } } return false }
package storage import ( "docktor/server/types" "github.com/globalsign/mgo" "github.com/globalsign/mgo/bson" ) // ServicesRepo is the repo for services type ServicesRepo interface { // Drop drops the content of the collection Drop() error // Save a service into database Save(service types.Service) (types.Service, error) // Delete a service in database Delete(id string) error // FindByID get the service by its id FindByID(id string) (types.Service, error) // FindByIDBson get the service by its id FindByIDBson(id bson.ObjectId) (types.Service, error) // FindBySubServiceID get the service which has this subservice id FindBySubServiceID(id string) (types.Service, error) // FindSubServiceByID get the subservice by id FindSubServiceByID(id string) (types.SubService, error) // FindSubServiceByIDBson get the subservice by bson id FindSubServiceByIDBson(id bson.ObjectId) (types.SubService, error) // FindAll get all services FindAll() (types.Services, error) // GetCollectionName returns the name of the collection GetCollectionName() string // CreateIndexes creates Index CreateIndexes() error } // DefaultServicesRepo is the repository for services type DefaultServicesRepo struct { coll *mgo.Collection } // NewServicesRepo instantiate new ServicesRepo func NewServicesRepo(coll *mgo.Collection) ServicesRepo { return &DefaultServicesRepo{coll: coll} } // GetCollectionName gets the name of the collection func (r *DefaultServicesRepo) GetCollectionName() string { return r.coll.FullName } // CreateIndexes creates Index func (r *DefaultServicesRepo) CreateIndexes() error { return nil } // FindAll get all services func (r *DefaultServicesRepo) FindAll() (t types.Services, err error) { err = r.coll.Find(nil).All(&t) return t, err } // FindByID get one service by id func (r *DefaultServicesRepo) FindByID(id string) (t types.Service, err error) { err = r.coll.FindId(bson.ObjectIdHex(id)).One(&t) return t, err } // FindByIDBson get one service by id func (r *DefaultServicesRepo) FindByIDBson(id bson.ObjectId) (t types.Service, err error) { err = r.coll.FindId(id).One(&t) return t, err } // FindBySubServiceID get one service by subservice id func (r *DefaultServicesRepo) FindBySubServiceID(id string) (t types.Service, err error) { err = r.coll.Find(bson.M{"sub_services._id": bson.ObjectIdHex(id)}).One(&t) return t, err } // FindSubServiceByID get one sub service by subservice id without file func (r *DefaultServicesRepo) FindSubServiceByID(id string) (types.SubService, error) { t := types.Service{} err := r.coll.Find(bson.M{"sub_services._id": bson.ObjectIdHex(id)}).Select(bson.M{"sub_services.$": 1}).One(&t) return t.SubServices[0], err } // FindSubServiceByIDBson get one sub service by subservice id without file func (r *DefaultServicesRepo) FindSubServiceByIDBson(id bson.ObjectId) (types.SubService, error) { t := types.Service{} err := r.coll.Find(bson.M{"sub_services._id": id}).Select(bson.M{"sub_services.$": 1}).One(&t) return t.SubServices[0], err } // Save create or update service func (r *DefaultServicesRepo) Save(t types.Service) (types.Service, error) { for i := 0; i < len(t.SubServices); i++ { if !t.SubServices[i].ID.Valid() { t.SubServices[i].ID = bson.NewObjectId() } } if !t.ID.Valid() { t.ID = bson.NewObjectId() } change := mgo.Change{ Update: bson.M{"$set": t}, ReturnNew: true, Upsert: true, } _, err := r.coll.FindId(t.ID).Apply(change, &t) return t, err } // Delete remove a service by id func (r *DefaultServicesRepo) Delete(id string) error { return r.coll.RemoveId(bson.ObjectIdHex(id)) } // Drop drops the content of the collection func (r *DefaultServicesRepo) Drop() error { return r.coll.DropCollection() }
package flock import ( "os" ) // LockFile places an exclusive lock on the file. // If the file is already locked, exists with error. func LockFile(f *os.File) error { return LockFd(f.Fd()) } // UnlockFile removes an existing lock held by this process. func UnlockFile(f *os.File) error { return UnlockFd(f.Fd()) }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package chunk import ( "fmt" "math/rand" "testing" "time" "unsafe" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/types" "github.com/stretchr/testify/require" ) func TestColumnCopy(t *testing.T) { col := newFixedLenColumn(8, 10) for i := 0; i < 10; i++ { col.AppendInt64(int64(i)) } c1 := col.CopyConstruct(nil) require.Equal(t, col, c1) c2 := newFixedLenColumn(8, 10) c2 = col.CopyConstruct(c2) require.Equal(t, col, c2) } func TestColumnCopyReconstructFixedLen(t *testing.T) { col := NewColumn(types.NewFieldType(mysql.TypeLonglong), 1024) results := make([]int64, 0, 1024) nulls := make([]bool, 0, 1024) sel := make([]int, 0, 1024) for i := 0; i < 1024; i++ { if rand.Intn(10) < 6 { sel = append(sel, i) } if rand.Intn(10) < 2 { col.AppendNull() nulls = append(nulls, true) results = append(results, 0) continue } v := rand.Int63() col.AppendInt64(v) results = append(results, v) nulls = append(nulls, false) } col = col.CopyReconstruct(sel, nil) nullCnt := 0 for n, i := range sel { if nulls[i] { nullCnt++ require.True(t, col.IsNull(n)) } else { require.Equal(t, results[i], col.GetInt64(n)) } } require.Equal(t, col.nullCount(), nullCnt) require.Len(t, sel, col.length) for i := 0; i < 128; i++ { if i%2 == 0 { col.AppendNull() } else { col.AppendInt64(int64(i * i * i)) } } require.Len(t, sel, col.length-128) require.Equal(t, nullCnt+128/2, col.nullCount()) for i := 0; i < 128; i++ { if i%2 == 0 { require.True(t, col.IsNull(len(sel)+i)) } else { require.Equal(t, int64(i*i*i), col.GetInt64(len(sel)+i)) require.False(t, col.IsNull(len(sel)+i)) } } } func TestColumnCopyReconstructVarLen(t *testing.T) { col := NewColumn(types.NewFieldType(mysql.TypeVarString), 1024) results := make([]string, 0, 1024) nulls := make([]bool, 0, 1024) sel := make([]int, 0, 1024) for i := 0; i < 1024; i++ { if rand.Intn(10) < 6 { sel = append(sel, i) } if rand.Intn(10) < 2 { col.AppendNull() nulls = append(nulls, true) results = append(results, "") continue } v := fmt.Sprintf("%v", rand.Int63()) col.AppendString(v) results = append(results, v) nulls = append(nulls, false) } col = col.CopyReconstruct(sel, nil) nullCnt := 0 for n, i := range sel { if nulls[i] { nullCnt++ require.True(t, col.IsNull(n)) } else { require.Equal(t, results[i], col.GetString(n)) } } require.Equal(t, col.nullCount(), nullCnt) require.Len(t, sel, col.length) for i := 0; i < 128; i++ { if i%2 == 0 { col.AppendNull() } else { col.AppendString(fmt.Sprintf("%v", i*i*i)) } } require.Len(t, sel, col.length-128) require.Equal(t, nullCnt+128/2, col.nullCount()) for i := 0; i < 128; i++ { if i%2 == 0 { require.True(t, col.IsNull(len(sel)+i)) } else { require.Equal(t, fmt.Sprintf("%v", i*i*i), col.GetString(len(sel)+i)) require.False(t, col.IsNull(len(sel)+i)) } } } func TestLargeStringColumnOffset(t *testing.T) { numRows := 1 col := newVarLenColumn(numRows) // The max-length of a string field can be 6M, a typical batch size for Chunk is 1024, which is 1K. // That is to say, the memory offset of a string column can be 6GB, which exceeds int32 col.offsets[0] = 6 << 30 require.Equal(t, int64(6<<30), col.offsets[0]) // test no overflow. } func TestI64Column(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { col.AppendInt64(int64(i)) } i64s := col.Int64s() for i := 0; i < 1024; i++ { require.Equal(t, int64(i), i64s[i]) i64s[i]++ } it := NewIterator4Chunk(chk) var i int for row := it.Begin(); row != it.End(); row = it.Next() { require.Equal(t, int64(i+1), row.GetInt64(0)) require.Equal(t, int64(i+1), col.GetInt64(i)) i++ } } func TestF64Column(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDouble)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { col.AppendFloat64(float64(i)) } f64s := col.Float64s() for i := 0; i < 1024; i++ { require.Equal(t, float64(i), f64s[i]) f64s[i] /= 2 } it := NewIterator4Chunk(chk) var i int64 for row := it.Begin(); row != it.End(); row = it.Next() { require.Equal(t, float64(i)/2, row.GetFloat64(0)) require.Equal(t, float64(i)/2, col.GetFloat64(int(i))) i++ } } func TestF32Column(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeFloat)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { col.AppendFloat32(float32(i)) } f32s := col.Float32s() for i := 0; i < 1024; i++ { require.Equal(t, float32(i), f32s[i]) f32s[i] /= 2 } it := NewIterator4Chunk(chk) var i int64 for row := it.Begin(); row != it.End(); row = it.Next() { require.Equal(t, float32(i)/2, row.GetFloat32(0)) require.Equal(t, float32(i)/2, col.GetFloat32(int(i))) i++ } } func TestDurationSliceColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDuration)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { col.AppendDuration(types.Duration{Duration: time.Duration(i)}) } ds := col.GoDurations() for i := 0; i < 1024; i++ { require.Equal(t, time.Duration(i), ds[i]) d := types.Duration{Duration: ds[i]} d, _ = d.Add(d) ds[i] = d.Duration } it := NewIterator4Chunk(chk) var i int64 for row := it.Begin(); row != it.End(); row = it.Next() { require.Equal(t, time.Duration(i)*2, row.GetDuration(0, 0).Duration) require.Equal(t, time.Duration(i)*2, col.GetDuration(int(i), 0).Duration) i++ } } func TestMyDecimal(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeNewDecimal)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { d := new(types.MyDecimal) err := d.FromFloat64(float64(i) * 1.1) require.NoError(t, err) col.AppendMyDecimal(d) } ds := col.Decimals() for i := 0; i < 1024; i++ { d := new(types.MyDecimal) err := d.FromFloat64(float64(i) * 1.1) require.NoError(t, err) require.Zero(t, d.Compare(&ds[i])) types.DecimalAdd(&ds[i], d, &ds[i]) require.NoError(t, err) } it := NewIterator4Chunk(chk) var i int64 for row := it.Begin(); row != it.End(); row = it.Next() { d := new(types.MyDecimal) err := d.FromFloat64(float64(i) * 1.1 * 2) require.NoError(t, err) delta := new(types.MyDecimal) err = types.DecimalSub(d, row.GetMyDecimal(0), delta) require.NoError(t, err) fDelta, err := delta.ToFloat64() require.NoError(t, err) require.InDelta(t, 0, fDelta, 0.0001) i++ } } func TestStringColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeVarString)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { col.AppendString(fmt.Sprintf("%v", i*i)) } it := NewIterator4Chunk(chk) var i int for row := it.Begin(); row != it.End(); row = it.Next() { require.Equal(t, fmt.Sprintf("%v", i*i), row.GetString(0)) require.Equal(t, fmt.Sprintf("%v", i*i), col.GetString(i)) i++ } } func TestSetColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeSet)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { col.AppendSet(types.Set{Name: fmt.Sprintf("%v", i), Value: uint64(i)}) } it := NewIterator4Chunk(chk) var i int for row := it.Begin(); row != it.End(); row = it.Next() { s1 := col.GetSet(i) s2 := row.GetSet(0) require.Equal(t, s2.Name, s1.Name) require.Equal(t, s2.Value, s1.Value) require.Equal(t, fmt.Sprintf("%v", i), s1.Name) require.Equal(t, uint64(i), s1.Value) i++ } } func TestJSONColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeJSON)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { j := new(types.BinaryJSON) err := j.UnmarshalJSON([]byte(fmt.Sprintf(`{"%v":%v}`, i, i))) require.NoError(t, err) col.AppendJSON(*j) } it := NewIterator4Chunk(chk) var i int for row := it.Begin(); row != it.End(); row = it.Next() { j1 := col.GetJSON(i) j2 := row.GetJSON(0) require.Equal(t, j2.String(), j1.String()) i++ } } func TestTimeColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDatetime)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { col.AppendTime(types.CurrentTime(mysql.TypeDatetime)) time.Sleep(time.Millisecond / 10) } it := NewIterator4Chunk(chk) ts := col.Times() var i int for row := it.Begin(); row != it.End(); row = it.Next() { j1 := col.GetTime(i) j2 := row.GetTime(0) j3 := ts[i] require.Zero(t, j1.Compare(j2)) require.Zero(t, j1.Compare(j3)) i++ } } func TestDurationColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDuration)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { col.AppendDuration(types.Duration{Duration: time.Second * time.Duration(i)}) } it := NewIterator4Chunk(chk) var i int for row := it.Begin(); row != it.End(); row = it.Next() { j1 := col.GetDuration(i, 0) j2 := row.GetDuration(0, 0) require.Zero(t, j1.Compare(j2)) i++ } } func TestEnumColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeEnum)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { col.AppendEnum(types.Enum{Name: fmt.Sprintf("%v", i), Value: uint64(i)}) } it := NewIterator4Chunk(chk) var i int for row := it.Begin(); row != it.End(); row = it.Next() { s1 := col.GetEnum(i) s2 := row.GetEnum(0) require.Equal(t, s2.Name, s1.Name) require.Equal(t, s2.Value, s1.Value) require.Equal(t, fmt.Sprintf("%v", i), s1.Name) require.Equal(t, uint64(i), s1.Value) i++ } } func TestNullsColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { if i%2 == 0 { col.AppendNull() continue } col.AppendInt64(int64(i)) } it := NewIterator4Chunk(chk) var i int for row := it.Begin(); row != it.End(); row = it.Next() { if i%2 == 0 { require.True(t, row.IsNull(0)) require.True(t, col.IsNull(i)) } else { require.Equal(t, int64(i), row.GetInt64(0)) } i++ } } func TestReconstructFixedLen(t *testing.T) { col := NewColumn(types.NewFieldType(mysql.TypeLonglong), 1024) results := make([]int64, 0, 1024) nulls := make([]bool, 0, 1024) sel := make([]int, 0, 1024) for i := 0; i < 1024; i++ { if rand.Intn(10) < 6 { sel = append(sel, i) } if rand.Intn(10) < 2 { col.AppendNull() nulls = append(nulls, true) results = append(results, 0) continue } v := rand.Int63() col.AppendInt64(v) results = append(results, v) nulls = append(nulls, false) } col.reconstruct(sel) nullCnt := 0 for n, i := range sel { if nulls[i] { nullCnt++ require.True(t, col.IsNull(n)) } else { require.Equal(t, results[i], col.GetInt64(n)) } } require.Equal(t, col.nullCount(), nullCnt) require.Len(t, sel, col.length) for i := 0; i < 128; i++ { if i%2 == 0 { col.AppendNull() } else { col.AppendInt64(int64(i * i * i)) } } require.Len(t, sel, col.length-128) require.Equal(t, nullCnt+128/2, col.nullCount()) for i := 0; i < 128; i++ { if i%2 == 0 { require.True(t, col.IsNull(len(sel)+i)) } else { require.Equal(t, int64(i*i*i), col.GetInt64(len(sel)+i)) require.False(t, col.IsNull(len(sel)+i)) } } } func TestReconstructVarLen(t *testing.T) { col := NewColumn(types.NewFieldType(mysql.TypeVarString), 1024) results := make([]string, 0, 1024) nulls := make([]bool, 0, 1024) sel := make([]int, 0, 1024) for i := 0; i < 1024; i++ { if rand.Intn(10) < 6 { sel = append(sel, i) } if rand.Intn(10) < 2 { col.AppendNull() nulls = append(nulls, true) results = append(results, "") continue } v := fmt.Sprintf("%v", rand.Int63()) col.AppendString(v) results = append(results, v) nulls = append(nulls, false) } col.reconstruct(sel) nullCnt := 0 for n, i := range sel { if nulls[i] { nullCnt++ require.True(t, col.IsNull(n)) } else { require.Equal(t, results[i], col.GetString(n)) } } require.Equal(t, col.nullCount(), nullCnt) require.Len(t, sel, col.length) for i := 0; i < 128; i++ { if i%2 == 0 { col.AppendNull() } else { col.AppendString(fmt.Sprintf("%v", i*i*i)) } } require.Len(t, sel, col.length-128) require.Equal(t, nullCnt+128/2, col.nullCount()) for i := 0; i < 128; i++ { if i%2 == 0 { require.True(t, col.IsNull(len(sel)+i)) } else { require.Equal(t, fmt.Sprintf("%v", i*i*i), col.GetString(len(sel)+i)) require.False(t, col.IsNull(len(sel)+i)) } } } func TestPreAllocInt64(t *testing.T) { col := NewColumn(types.NewFieldType(mysql.TypeLonglong), 128) col.ResizeInt64(256, true) i64s := col.Int64s() require.Equal(t, 256, len(i64s)) for i := 0; i < 256; i++ { require.True(t, col.IsNull(i)) } col.AppendInt64(2333) require.False(t, col.IsNull(256)) require.Equal(t, 257, len(col.Int64s())) require.Equal(t, int64(2333), col.Int64s()[256]) } func TestPreAllocUint64(t *testing.T) { tll := types.NewFieldType(mysql.TypeLonglong) tll.AddFlag(mysql.UnsignedFlag) col := NewColumn(tll, 128) col.ResizeUint64(256, true) u64s := col.Uint64s() require.Equal(t, 256, len(u64s)) for i := 0; i < 256; i++ { require.True(t, col.IsNull(i)) } col.AppendUint64(2333) require.False(t, col.IsNull(256)) require.Equal(t, 257, len(col.Uint64s())) require.Equal(t, uint64(2333), col.Uint64s()[256]) } func TestPreAllocFloat32(t *testing.T) { col := newFixedLenColumn(sizeFloat32, 128) col.ResizeFloat32(256, true) f32s := col.Float32s() require.Equal(t, 256, len(f32s)) for i := 0; i < 256; i++ { require.True(t, col.IsNull(i)) } col.AppendFloat32(2333) require.False(t, col.IsNull(256)) require.Equal(t, 257, len(col.Float32s())) require.Equal(t, float32(2333), col.Float32s()[256]) } func TestPreAllocFloat64(t *testing.T) { col := newFixedLenColumn(sizeFloat64, 128) col.ResizeFloat64(256, true) f64s := col.Float64s() require.Equal(t, 256, len(f64s)) for i := 0; i < 256; i++ { require.True(t, col.IsNull(i)) } col.AppendFloat64(2333) require.False(t, col.IsNull(256)) require.Equal(t, 257, len(col.Float64s())) require.Equal(t, float64(2333), col.Float64s()[256]) } func TestPreAllocDecimal(t *testing.T) { col := newFixedLenColumn(sizeMyDecimal, 128) col.ResizeDecimal(256, true) ds := col.Decimals() require.Equal(t, 256, len(ds)) for i := 0; i < 256; i++ { require.True(t, col.IsNull(i)) } col.AppendMyDecimal(new(types.MyDecimal)) require.False(t, col.IsNull(256)) require.Equal(t, 257, len(col.Float64s())) } func TestPreAllocTime(t *testing.T) { col := newFixedLenColumn(sizeTime, 128) col.ResizeTime(256, true) ds := col.Times() require.Equal(t, 256, len(ds)) for i := 0; i < 256; i++ { require.True(t, col.IsNull(i)) } col.AppendTime(types.ZeroDatetime) require.False(t, col.IsNull(256)) require.Equal(t, 257, len(col.Times())) } func TestNull(t *testing.T) { col := newFixedLenColumn(sizeFloat64, 32) col.ResizeFloat64(1024, true) require.Equal(t, 1024, col.nullCount()) notNulls := make(map[int]struct{}) for i := 0; i < 512; i++ { idx := rand.Intn(1024) notNulls[idx] = struct{}{} col.SetNull(idx, false) } require.Equal(t, 1024-len(notNulls), col.nullCount()) for idx := range notNulls { require.False(t, col.IsNull(idx)) } col.ResizeFloat64(8, true) col.SetNulls(0, 8, true) col.SetNull(7, false) require.Equal(t, 7, col.nullCount()) col.ResizeFloat64(8, true) col.SetNulls(0, 8, true) require.Equal(t, 8, col.nullCount()) col.ResizeFloat64(9, true) col.SetNulls(0, 9, true) col.SetNull(8, false) require.Equal(t, 8, col.nullCount()) } func TestSetNulls(t *testing.T) { col := newFixedLenColumn(sizeFloat64, 32) col.ResizeFloat64(1024, true) require.Equal(t, 1024, col.nullCount()) col.SetNulls(0, 1024, false) require.Zero(t, col.nullCount()) nullMap := make(map[int]struct{}) for i := 0; i < 100; i++ { begin := rand.Intn(1024) l := rand.Intn(37) end := begin + l if end > 1024 { end = 1024 } for i := begin; i < end; i++ { nullMap[i] = struct{}{} } col.SetNulls(begin, end, true) require.Len(t, nullMap, col.nullCount()) for k := range nullMap { require.True(t, col.IsNull(k)) } } } func TestResizeReserve(t *testing.T) { cI64s := newFixedLenColumn(sizeInt64, 0) require.Zero(t, cI64s.length) for i := 0; i < 100; i++ { n := rand.Intn(1024) cI64s.ResizeInt64(n, true) require.Equal(t, n, cI64s.length) require.Equal(t, n, len(cI64s.Int64s())) } cI64s.ResizeInt64(0, true) require.Zero(t, cI64s.length) require.Zero(t, len(cI64s.Int64s())) cStrs := newVarLenColumn(0) for i := 0; i < 100; i++ { n := rand.Intn(1024) cStrs.ReserveString(n) require.Zero(t, cStrs.length) } cStrs.ReserveString(0) require.Zero(t, cStrs.length) } func TestGetRaw(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeFloat)}, 1024) col := chk.Column(0) for i := 0; i < 1024; i++ { col.AppendFloat32(float32(i)) } it := NewIterator4Chunk(chk) var i int for row := it.Begin(); row != it.End(); row = it.Next() { f := float32(i) b := (*[unsafe.Sizeof(f)]byte)(unsafe.Pointer(&f))[:] require.Equal(t, b, row.GetRaw(0)) require.Equal(t, b, col.GetRaw(i)) i++ } chk = NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeVarString)}, 1024) col = chk.Column(0) for i := 0; i < 1024; i++ { col.AppendString(fmt.Sprint(i)) } it = NewIterator4Chunk(chk) i = 0 for row := it.Begin(); row != it.End(); row = it.Next() { require.Equal(t, []byte(fmt.Sprint(i)), row.GetRaw(0)) require.Equal(t, []byte(fmt.Sprint(i)), col.GetRaw(i)) i++ } } func TestResize(t *testing.T) { col := NewColumn(types.NewFieldType(mysql.TypeLonglong), 1024) for i := 0; i < 1024; i++ { col.AppendInt64(int64(i)) } col.ResizeInt64(1024, false) for i := 0; i < 1024; i++ { require.Equal(t, int64(0), col.Int64s()[i]) } col = NewColumn(types.NewFieldType(mysql.TypeFloat), 1024) for i := 0; i < 1024; i++ { col.AppendFloat32(float32(i)) } col.ResizeFloat32(1024, false) for i := 0; i < 1024; i++ { require.Equal(t, float32(0), col.Float32s()[i]) } col = NewColumn(types.NewFieldType(mysql.TypeDouble), 1024) for i := 0; i < 1024; i++ { col.AppendFloat64(float64(i)) } col.ResizeFloat64(1024, false) for i := 0; i < 1024; i++ { require.Equal(t, float64(0), col.Float64s()[i]) } col = NewColumn(types.NewFieldType(mysql.TypeNewDecimal), 1024) for i := 0; i < 1024; i++ { col.AppendMyDecimal(new(types.MyDecimal).FromInt(int64(i))) } col.ResizeDecimal(1024, false) for i := 0; i < 1024; i++ { var d types.MyDecimal require.Equal(t, d, col.Decimals()[i]) } col = NewColumn(types.NewFieldType(mysql.TypeDuration), 1024) for i := 0; i < 1024; i++ { col.AppendDuration(types.Duration{Duration: time.Duration(i), Fsp: i}) } col.ResizeGoDuration(1024, false) for i := 0; i < 1024; i++ { require.Equal(t, time.Duration(0), col.GoDurations()[i]) } col = NewColumn(types.NewFieldType(mysql.TypeDatetime), 1024) for i := 0; i < 1024; i++ { gt := types.FromDate(rand.Intn(2200), rand.Intn(10)+1, rand.Intn(20)+1, rand.Intn(12), rand.Intn(60), rand.Intn(60), rand.Intn(1000000)) t := types.NewTime(gt, 0, 0) col.AppendTime(t) } col.ResizeTime(1024, false) for i := 0; i < 1024; i++ { var time types.Time require.Equal(t, time, col.Times()[i]) } } func BenchmarkDurationRow(b *testing.B) { chk1 := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDuration)}, 1024) col1 := chk1.Column(0) for i := 0; i < 1024; i++ { col1.AppendDuration(types.Duration{Duration: time.Second * time.Duration(i)}) } chk2 := chk1.CopyConstruct() result := chk1.CopyConstruct() b.ResetTimer() for k := 0; k < b.N; k++ { result.Reset() it1 := NewIterator4Chunk(chk1) it2 := NewIterator4Chunk(chk2) for r1, r2 := it1.Begin(), it2.Begin(); r1 != it1.End() && r2 != it2.End(); r1, r2 = it1.Next(), it2.Next() { d1 := r1.GetDuration(0, 0) d2 := r2.GetDuration(0, 0) r, err := d1.Add(d2) if err != nil { b.Fatal(err) } result.AppendDuration(0, r) } } } func BenchmarkDurationVec(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDuration)}, 1024) col1 := chk.Column(0) for i := 0; i < 1024; i++ { col1.AppendDuration(types.Duration{Duration: time.Second * time.Duration(i)}) } col2 := col1.CopyConstruct(nil) result := col1.CopyConstruct(nil) ds1 := col1.GoDurations() ds2 := col2.GoDurations() rs := result.GoDurations() b.ResetTimer() for k := 0; k < b.N; k++ { result.ResizeGoDuration(1024, true) for i := 0; i < 1024; i++ { d1 := types.Duration{Duration: ds1[i]} d2 := types.Duration{Duration: ds2[i]} r, err := d1.Add(d2) if err != nil { b.Fatal(err) } rs[i] = r.Duration } } } func BenchmarkTimeRow(b *testing.B) { chk1 := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDate)}, 1024) col1 := chk1.Column(0) for i := 0; i < 1024; i++ { col1.AppendTime(types.ZeroDate) } chk2 := chk1.CopyConstruct() result := chk1.CopyConstruct() b.ResetTimer() for k := 0; k < b.N; k++ { result.Reset() it1 := NewIterator4Chunk(chk1) it2 := NewIterator4Chunk(chk2) for r1, r2 := it1.Begin(), it2.Begin(); r1 != it1.End() && r2 != it2.End(); r1, r2 = it1.Next(), it2.Next() { d1 := r1.GetTime(0) d2 := r2.GetTime(0) if r := d1.Compare(d2); r > 0 { result.AppendTime(0, d1) } else { result.AppendTime(0, d2) } } } } func BenchmarkTimeVec(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDate)}, 1024) col1 := chk.Column(0) for i := 0; i < 1024; i++ { col1.AppendTime(types.ZeroDate) } col2 := col1.CopyConstruct(nil) result := col1.CopyConstruct(nil) ds1 := col1.Times() ds2 := col2.Times() rs := result.Times() b.ResetTimer() for k := 0; k < b.N; k++ { result.ResizeTime(1024, true) for i := 0; i < 1024; i++ { if r := ds1[i].Compare(ds2[i]); r > 0 { rs[i] = ds1[i] } else { rs[i] = ds2[i] } } } } func genNullCols(n int) []*Column { cols := make([]*Column, n) for i := range cols { cols[i] = NewColumn(types.NewFieldType(mysql.TypeLonglong), 1024) cols[i].ResizeInt64(1024, false) for j := 0; j < 1024; j++ { if rand.Intn(10) < 5 { cols[i].SetNull(j, true) } } } return cols } func TestVectorizedNulls(t *testing.T) { for i := 0; i < 256; i++ { cols := genNullCols(4) lCol, rCol := cols[0], cols[1] vecResult, rowResult := cols[2], cols[3] vecResult.SetNulls(0, 1024, false) rowResult.SetNulls(0, 1024, false) vecResult.MergeNulls(lCol, rCol) for i := 0; i < 1024; i++ { rowResult.SetNull(i, lCol.IsNull(i) || rCol.IsNull(i)) } for i := 0; i < 1024; i++ { require.Equal(t, vecResult.IsNull(i), rowResult.IsNull(i)) } } } func TestResetColumn(t *testing.T) { col0 := NewColumn(types.NewFieldType(mysql.TypeVarString), 0) col1 := NewColumn(types.NewFieldType(mysql.TypeLonglong), 0) // using col0.reset() here will cause panic since it doesn't reset the elemBuf field which // is used by MergeNulls. col0.Reset(types.ETInt) col0.MergeNulls(col1) col := NewColumn(types.NewFieldType(mysql.TypeDatetime), 0) col.Reset(types.ETDuration) col.AppendDuration(types.Duration{}) // using col.reset() above will let this assertion fail since the length of initialized elemBuf // is sizeTime. require.Equal(t, sizeGoDuration, len(col.data)) } func BenchmarkMergeNullsVectorized(b *testing.B) { cols := genNullCols(3) b.ResetTimer() for i := 0; i < b.N; i++ { cols[0].MergeNulls(cols[1:]...) } } func BenchmarkMergeNullsNonVectorized(b *testing.B) { cols := genNullCols(3) b.ResetTimer() for i := 0; i < b.N; i++ { for i := 0; i < 1024; i++ { cols[0].SetNull(i, cols[1].IsNull(i) || cols[2].IsNull(i)) } } } func TestColumnResizeInt64(t *testing.T) { var col = NewColumn(types.NewFieldType(mysql.TypeLonglong), 2) col.AppendUint64(11) col.AppendUint64(11) col.ResizeInt64(4, false) require.Equal(t, col.nullBitmap, []byte{0b1111}) col.AppendUint64(11) require.Equal(t, col.nullBitmap, []byte{0b11111}) col.AppendNull() require.Equal(t, col.nullBitmap, []byte{0b011111}) col.ResizeUint64(11, false) require.Equal(t, col.nullBitmap, []byte{0b11111111, 0b111}) col.ResizeUint64(7, true) require.Equal(t, col.nullBitmap, []byte{0}) col.AppendUint64(32) col.AppendUint64(32) require.Equal(t, col.nullBitmap, []byte{0b10000000, 0b1}) }
package apimessages import ( "github.com/google/uuid" ) // Message domain message struct type Message struct { ID uuid.UUID `json:"id"` UserName string `json:"username"` Message string `json:"message"` } // MapToCreateResponse maps a message to a response func (m *Message) MapToCreateResponse() CreateMessageResponse { return CreateMessageResponse{ID: m.ID} }
package gen import ( "testing" "github.com/stretchr/testify/assert" ) func TestGetEmployeeRows(t *testing.T) { n := 20 rows := GetEmployeeRows(0, n) assert.NotNil(t, rows) nrows := len(rows) var x bool if nrows == n { x = true } assert.True(t, x) } func TestGetDeptEmp(t *testing.T) { n := 20 empRows := GetEmployeeRows(0, n) assert.NotNil(t, empRows) rows := GetDeptEmp(20, 0, n) assert.NotNil(t, rows) } func BenchmarkGetEmployeeRows(b *testing.B) { for i := 0; i < b.N; i++ { GetEmployeeRows(0, 30) } }
package apiserver type Config struct { Addr string `toml:"bind_addr"` DBURL string `toml:"database_url"` JWTSecret string `toml:"jwt_secret"` }
package util import ( fmt "fmt" "net" "strings" ) // Connect dials the given address and returns a net.Conn. The protoAddr argument should be prefixed with the protocol, // eg. "tcp://127.0.0.1:8080" or "unix:///tmp/test.sock" func Connect(protoAddr string) (net.Conn, error) { proto, address := ProtocolAndAddress(protoAddr) conn, err := net.Dial(proto, address) return conn, err } // ProtocolAndAddress splits an address into the protocol and address components. // For instance, "tcp://127.0.0.1:8080" will be split into "tcp" and "127.0.0.1:8080". // If the address has no protocol prefix, the default is "tcp". func ProtocolAndAddress(listenAddr string) (string, string) { protocol, address := "tcp", listenAddr parts := strings.SplitN(address, "://", 2) if len(parts) == 2 { protocol, address = parts[0], parts[1] } return protocol, address } var localIpSegment = []byte{192, 168} // 如设置为 172.31,直接用 172, 31 作为变量传入就行 // SetLocalIPSegment(172, 31) func SetLocalIPSegment(segs ...byte) { localIpSegment = []byte{} for i := 0; i < len(segs) && i < 4; i++ { localIpSegment = append(localIpSegment, segs[i]) } } func GetLocalIPSegment() [2]byte { var segs [2]byte for i := 0; i < len(localIpSegment) && i < 2; i++ { segs[i] = localIpSegment[i] } return segs } // 获取本机 ip // 默认本地的 ip 段为 192.168,如果不是,调用此方法前先调用 SetLocalIPSegment 方法设置本地 ip 段 func LocalIP() (net.IP, error) { tables, err := net.Interfaces() if err != nil { return nil, err } for _, t := range tables { addrs, err := t.Addrs() if err != nil { return nil, err } for _, a := range addrs { ipnet, ok := a.(*net.IPNet) if !ok { continue } if v4 := ipnet.IP.To4(); v4 != nil { var matchd = true for i := 0; i < len(localIpSegment); i++ { if v4[i] != localIpSegment[i] { matchd = false } } if matchd { return v4, nil } } } } return nil, fmt.Errorf("cannot find local IP address") }
package main import ( "fmt" ) const ( iterations = 5000000 factorA = 16807 factorB = 48271 modValue = 2147483647 //2^32 - 1 comparisonMod = 65336 //2^16 ) func simulation(a,b int) (equal int) { for i := 0; i < iterations; i++ { a,b = genVal(a,factorA),genVal(b,factorB) if compareValues(a,b) { equal++ } } return } func genVal(prevVal,factor int) (a int) { a = (prevVal*factor) % modValue if factor == factorA { for a % 4 != 0 { a = (a*factor) % modValue } } else { for a % 8 != 0 { a = (a*factor) % modValue } } return } func compareValues(valA, valB int) (bool) { return (valA % 65536) == (valB % 65536) } func main() { fmt.Println(simulation(277,349)) }
package cacheCheck import ( "github.com/garyburd/redigo/redis" "log" "net/http" "strings" ) /* TODO: What we need to do here is to create our own response writer This response writer will wait for responses and write them to the cache automaticaly. */ var cacheHit bool = false type Middleware struct { hit bool c redis.Conn ks string } func NewMiddleware(c redis.Conn, keyspace string) *Middleware { return &Middleware{false, c, keyspace} } func (l *Middleware) ServeHTTP(w http.ResponseWriter, req *http.Request, next http.HandlerFunc) { key := strings.Join([]string{l.ks, req.URL.String()}, "") exists, err := redis.String(l.c.Do("GET", key)) if err == nil && len(exists) > 0 { cacheHit = true //TODO: Set The expire cache header w.Header().Set("Content-Type", "application/json") w.Write([]byte(exists)) return } else { cacheHit = false log.Printf("This Was a miss: %v", key) next(w, req) } } /* TODO: Set the cache body TODO: Seet the cache headers */ func SetCache(key string, val string, c redis.Conn) { reply, err := redis.String(c.Do("SET", key, val)) log.Printf("reply: %v error: %v", reply, err) } func SetExpire(key string, ttl int, c redis.Conn) { reply, err := redis.Int(c.Do("EXPIRE", key, ttl)) log.Printf("reply: %v error: %v", reply, err) } func RemoveCache(key string, c redis.Conn) { reply, err := redis.String(c.Do("DEL", key)) log.Printf("Cache Deleted %v For: %v Error:", reply, key, err) }
package main import ( "fmt" ) func LongestNonrepeatingSubstr(s string) int { pos := 0 longest := 0 seen := make(map[byte]int) for pos < len(s) { char := s[pos] if _, ok := seen[char]; ok { if len(seen) > longest { longest = len(seen) } pos = seen[char] + 1 seen = make(map[byte]int) } else { seen[char] = pos pos++ } } if len(seen) > longest { longest = len(seen) } return longest } func main() { fmt.Println(LongestNonrepeatingSubstr("dvdp")) }
package request type ( // ID struct ID struct { ID string `json:"id"` } ) // Rules ... func (ID) Rules() map[string][]string { return map[string][]string{ "id": []string{"required"}, } } // Messages ... func (ID) Messages() map[string][]string { return nil }
package main // MIT License // // Copyright (c) 2018 Yuwono Bangun Nagoro // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. import ( "encoding/json" "fmt" "io/ioutil" "os" "sync" ) var wg sync.WaitGroup // aimed to beautify your wrecked JSON // to make it beauty like your crush and your ex func main() { args := os.Args if len(args) <= 1 { fmt.Println("😠 JSON files must be defined!") return } fileNames := args[1:] for _, fn := range fileNames { wg.Add(1) go func(fileName string) { defer wg.Done() rawJsonFile, err := os.Open(fileName) if err != nil { fmt.Printf("😢 Error on opening file %s\n%s\n", fileName, err.Error()) return } defer rawJsonFile.Close() rawJsonBody, err := ioutil.ReadAll(rawJsonFile) if err != nil { fmt.Printf("😢 Error on reading file %s\n%s\n", fileName, err.Error()) return } jsonData, err := validateJSON(string(rawJsonBody)) if err != nil { fmt.Printf("😢 Your JSON on file %s is invalid\n%s\n", fileName, err.Error()) return } rawJsonData, err := json.MarshalIndent(jsonData, "", "\t") if err != nil { fmt.Printf("😢 Error occured while beautifying your JSON on file %s\n%s\n", fileName, err.Error()) return } beautifiedPath := fmt.Sprintf("%s", fileName) beautifiedFile, err := os.Create(beautifiedPath) if err != nil { fmt.Printf("😢 Error occured while creating new file %s\n%s\n", fileName, err.Error()) return } beautifiedFile.Write(rawJsonData) fmt.Printf("😉 Done beautifying your JSON on file %s\n", fileName) defer beautifiedFile.Close() }(fn) } wg.Wait() } func validateJSON(body string) (map[string]interface{}, error) { var temporaryJSONMap map[string]interface{} err := json.Unmarshal([]byte(body), &temporaryJSONMap) return temporaryJSONMap, err }
package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "os" "os/signal" "strings" "syscall" "time" "github.com/bwmarrin/discordgo" "github.com/davecgh/go-spew/spew" "github.com/getsentry/sentry-go" "github.com/kuzmik/goelo2/src/bebot" // "github.com/kuzmik/goelo2/src/twitter" ) var ( //Debug - dump objects to console for debugging purposes Debug bool //Token - the authentication token that was provided when the bot was created in discord. Stored in 1password Token string //TokenFile - file that contains the token, to be read on program start. Keeps the token out of the processlist for shared hosts TokenFile string ) // Config - holds the application config state, as read from the json file type Config struct { Discord struct { APIKey string `json:"api_key"` } `json:"discord"` } func init() { flag.BoolVar(&Debug, "d", false, "Debug mode prints extra data to the console") flag.StringVar(&Token, "t", "", "Discord bot token") flag.StringVar(&TokenFile, "f", "config/secrets.json", "File that contains the bot token") flag.Parse() // If a token file is specified, read that. if TokenFile != "" { jsonData, err := ioutil.ReadFile(TokenFile) if err != nil { handleError("ERROR: Error reading JSON data:", err) os.Exit(1) } var cfg Config err = json.Unmarshal(jsonData, &cfg) if err != nil { handleError("ERROR: Error parsing JSON data:", err) os.Exit(2) } Token = cfg.Discord.APIKey } // If no there is no token specified, either via commandline or via file, bail out if Token == "" { fmt.Println("ERROR: you need to specify a token. Use --help for help") os.Exit(4) } // Set up the sentry reportig sentry.Init(sentry.ClientOptions{ Dsn: "https://3779a47dff1f4fb08d8c16e2f73f90f9@sentry.io/1509313", }) } func main() { dg, err := discordgo.New("Bot " + Token) if err != nil { handleError("Error creating bot", err) return } dg.AddHandler(botReady) dg.AddHandler(messageCreate) dg.AddHandler(messageUpdate) // Open a websocket connection to Discord and begin listening. err = dg.Open() if err != nil { handleError("Error during connecting:", err) return } // Wait here until CTRL-C or other term signal is received. fmt.Println("Bot is now running. Press CTRL-C to exit.") dch := make(chan os.Signal, 1) signal.Notify(dch, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill) <-dch // Cleanly close down the Discord session. dg.Close() } // Handles all errors, which includes sending to sentry func handleError(message string, err error) { fmt.Println(message, err) sentry.CaptureException(err) sentry.Flush(time.Second * 5) } // This function will be called (due to AddHandler above) every time a new // `Message` is created on any `Channel` that the autenticated bot has access to. func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) { // Ignore all messages created by the bot itself; not required, but a good practice. if m.Author.ID == s.State.User.ID { return } // Dump the `MessageCreate` struct to console if Debug == true { spew.Dump(m) // spew.Dump(s.GuildRoles(m.GuildID)) } // Get the `Channel` name because APPARENTLY it isnt included in `m` channel := "" _channel, err := s.State.Channel(m.ChannelID) if err != nil { handleError("Failure getting channel:", err) } // Don't accept DMs or any of the other channel types for now if _channel.Type != discordgo.ChannelTypeGuildText { return } if _channel.Name == "" { channel = "PRIVMSG" } else { channel = _channel.Name } // get the `Guild` which is the stupid name for a server server := "" if m.GuildID == "" { server = "PRIVMSG" } else { _guild, err := s.State.Guild(m.GuildID) if err != nil { fmt.Println("Failure getting guild:", err) } server = _guild.Name } timestamp, _ := m.Message.Timestamp.Parse() realMessage, err := m.Message.ContentWithMoreMentionsReplaced(s) if err != nil { realMessage = m.Message.Content } author := fmt.Sprintf("%s#%s", m.Author.Username, m.Author.Discriminator) // All that work to print this to the console. fmt.Printf("[%v] [%s] [%s] [%s] %s\n", timestamp, server, channel, author, realMessage) // If message is !honk, markov it up if m.Message.Content == "!honk" { //Bebot#Babble shit := bebot.Babble() _, err := s.ChannelMessageSend(m.ChannelID, shit) if err != nil { handleError("Failure sending message:", err) } } // If the message is "ping" reply with "Pong!" if m.Message.Content == "ping" { _, err := s.ChannelMessageSend(m.ChannelID, "pong") if err != nil { handleError("Failure sending message:", err) } } // Ignore anything that starts with ! for logging and markov purposes if strings.HasPrefix(m.Message.Content, "!") { return } // Don't log/markov private channels if Debug == true { spew.Dump(_channel.PermissionOverwrites) } for _, perm := range _channel.PermissionOverwrites { if perm.Deny != 0 { // there is a DENY permission setup for SOMEONE or SOMETHING, so bail on outta here //return } } // Log the chatmessage to the db and add it to the markov model msg := bebot.ChatMessage{ Timestamp: timestamp, ServerType: "discord", ServerID: m.Message.GuildID, Server: server, ChannelID: m.ChannelID, Channel: channel, UserID: m.Author.ID, User: author, Message: realMessage, } msg.Save() } // This function will be called (due to AddHandler above) every time a // `Message` is changed on any `Channel` that the autenticated bot has access to. func messageUpdate(s *discordgo.Session, m *discordgo.MessageUpdate) { if Debug == true { spew.Dump(m) } } // This function will be called (due to AddHandler above) when the bot receives // the "ready" event from Discord. func botReady(s *discordgo.Session, event *discordgo.Ready) { // Set the playing status... for fun? s.UpdateStatus(0, "!honk") }
package worker import "sync" import "github.com/nylo-andry/playupdate" type Pool struct { workerCount int updateService playupdate.UpdateService } func NewPool(workerCount int, updateService playupdate.UpdateService) *Pool { return &Pool{ workerCount, updateService, } } func (p *Pool) Start(macAddresses []string) { var wg sync.WaitGroup inputs := make(chan string) for i := 0; i < p.workerCount; i++ { wg.Add(1) go startWorker(inputs, &wg, p.updateService) } for _, mac := range macAddresses { inputs <- mac } close(inputs) wg.Wait() }