text
stringlengths
11
4.05M
package process import "fmt" //因为UserMgr 实例在服务器端有且只有一个 //因为在很多地方都会使用到,因此,我们将其定义为全局变量 var ( userMgr *UserMgr ) type UserMgr struct { onlineUsers map[int]*UseProcess } //完成对userMgr初始化工作 func init() { userMgr = &UserMgr{ onlineUsers: make(map[int]*UseProcess, 1024), } } //完成对onlineUsers添加 func (this *UserMgr) AddOnlineUser(up *UseProcess) { this.onlineUsers[up.UserId] = up } func (this *UserMgr) DelOnlineUser(userId int) { delete(this.onlineUsers, userId) } //获取当前所有在线的用户 func (this *UserMgr) GetAllOnlineUser() map[int]*UseProcess { return this.onlineUsers } //根据id返回对应的值 func (this *UserMgr) GetOnlineUserById(userId int) (up *UseProcess, err error) { up, ok := this.onlineUsers[userId] if !ok { //说明要查找的这个用户,当前不在线 err = fmt.Errorf("用户%d 不存在", userId) return } return }
// Copyright 2019 GoAdmin Core Team. All rights reserved. // Use of this source code is governed by a Apache-2.0 style // license that can be found in the LICENSE file. package gin import ( "bytes" "errors" "net/http" "net/url" "strings" "github.com/GoAdminGroup/go-admin/adapter" "github.com/GoAdminGroup/go-admin/context" "github.com/GoAdminGroup/go-admin/engine" "github.com/GoAdminGroup/go-admin/modules/config" "github.com/GoAdminGroup/go-admin/plugins" "github.com/GoAdminGroup/go-admin/plugins/admin/models" "github.com/GoAdminGroup/go-admin/plugins/admin/modules/constant" "github.com/GoAdminGroup/go-admin/template/types" "github.com/gin-gonic/gin" ) // Gin structure value is a Gin GoAdmin adapter. type Gin struct { adapter.BaseAdapter ctx *gin.Context app *gin.Engine } func init() { engine.Register(new(Gin)) } // User implements the method Adapter.User. func (gins *Gin) User(ctx interface{}) (models.UserModel, bool) { return gins.GetUser(ctx, gins) } // Use implements the method Adapter.Use. func (gins *Gin) Use(app interface{}, plugs []plugins.Plugin) error { return gins.GetUse(app, plugs, gins) } // Content implements the method Adapter.Content. func (gins *Gin) Content(ctx interface{}, getPanelFn types.GetPanelFn, fn context.NodeProcessor, btns ...types.Button) { gins.GetContent(ctx, getPanelFn, gins, btns, fn) } type HandlerFunc func(ctx *gin.Context) (types.Panel, error) func Content(handler HandlerFunc) gin.HandlerFunc { return func(ctx *gin.Context) { engine.Content(ctx, func(ctx interface{}) (types.Panel, error) { return handler(ctx.(*gin.Context)) }) } } // SetApp implements the method Adapter.SetApp. func (gins *Gin) SetApp(app interface{}) error { var ( eng *gin.Engine ok bool ) if eng, ok = app.(*gin.Engine); !ok { return errors.New("gin adapter SetApp: wrong parameter") } gins.app = eng return nil } // AddHandler implements the method Adapter.AddHandler. func (gins *Gin) AddHandler(method, path string, handlers context.Handlers) { gins.app.Handle(strings.ToUpper(method), path, func(c *gin.Context) { ctx := context.NewContext(c.Request) for _, param := range c.Params { if c.Request.URL.RawQuery == "" { c.Request.URL.RawQuery += strings.ReplaceAll(param.Key, ":", "") + "=" + param.Value } else { c.Request.URL.RawQuery += "&" + strings.ReplaceAll(param.Key, ":", "") + "=" + param.Value } } ctx.SetHandlers(handlers).Next() for key, head := range ctx.Response.Header { c.Header(key, head[0]) } if ctx.Response.Body != nil { buf := new(bytes.Buffer) _, _ = buf.ReadFrom(ctx.Response.Body) c.String(ctx.Response.StatusCode, buf.String()) } else { c.Status(ctx.Response.StatusCode) } }) } // Name implements the method Adapter.Name. func (*Gin) Name() string { return "gin" } // SetContext implements the method Adapter.SetContext. func (*Gin) SetContext(contextInterface interface{}) adapter.WebFrameWork { var ( ctx *gin.Context ok bool ) if ctx, ok = contextInterface.(*gin.Context); !ok { panic("gin adapter SetContext: wrong parameter") } return &Gin{ctx: ctx} } // Redirect implements the method Adapter.Redirect. func (gins *Gin) Redirect() { gins.ctx.Redirect(http.StatusFound, config.Url(config.GetLoginUrl())) gins.ctx.Abort() } // SetContentType implements the method Adapter.SetContentType. func (*Gin) SetContentType() {} // Write implements the method Adapter.Write. func (gins *Gin) Write(body []byte) { gins.ctx.Data(http.StatusOK, gins.HTMLContentType(), body) } // GetCookie implements the method Adapter.GetCookie. func (gins *Gin) GetCookie() (string, error) { return gins.ctx.Cookie(gins.CookieKey()) } // Lang implements the method Adapter.Lang. func (gins *Gin) Lang() string { return gins.ctx.Request.URL.Query().Get("__ga_lang") } // Path implements the method Adapter.Path. func (gins *Gin) Path() string { return gins.ctx.Request.URL.Path } // Method implements the method Adapter.Method. func (gins *Gin) Method() string { return gins.ctx.Request.Method } // FormParam implements the method Adapter.FormParam. func (gins *Gin) FormParam() url.Values { _ = gins.ctx.Request.ParseMultipartForm(32 << 20) return gins.ctx.Request.PostForm } // IsPjax implements the method Adapter.IsPjax. func (gins *Gin) IsPjax() bool { return gins.ctx.Request.Header.Get(constant.PjaxHeader) == "true" } // Query implements the method Adapter.Query. func (gins *Gin) Query() url.Values { return gins.ctx.Request.URL.Query() }
package qstring import ( "strings" "time" ) const format = "2006-01-02T15:04:05" // DateTimeBuilders are used for querying by datetimes. // // Prefer not using builders directly, instead using the constructors. type DateTimeBuilder struct { builder *strings.Builder } func (b *DateTimeBuilder) EQ(t time.Time) *Builder { b.builder.WriteString(" = '") b.builder.WriteString(t.Format(format)) b.builder.WriteString("'") return &Builder{builder: b.builder} } func (b *DateTimeBuilder) NE(t time.Time) *Builder { b.builder.WriteString(" != '") b.builder.WriteString(t.Format(format)) b.builder.WriteString("'") return &Builder{builder: b.builder} } func (b *DateTimeBuilder) GT(t time.Time) *Builder { b.builder.WriteString(" > '") b.builder.WriteString(t.Format(format)) b.builder.WriteString("'") return &Builder{builder: b.builder} } func (b *DateTimeBuilder) GTE(t time.Time) *Builder { b.builder.WriteString(" >= '") b.builder.WriteString(t.Format(format)) b.builder.WriteString("'") return &Builder{builder: b.builder} } func (b *DateTimeBuilder) LT(t time.Time) *Builder { b.builder.WriteString(" < '") b.builder.WriteString(t.Format(format)) b.builder.WriteString("'") return &Builder{builder: b.builder} } func (b *DateTimeBuilder) LTE(t time.Time) *Builder { b.builder.WriteString(" <= '") b.builder.WriteString(t.Format(format)) b.builder.WriteString("'") return &Builder{builder: b.builder} }
// Package postgres contains an implementation of the storage.Backend backed by postgres. package postgres import ( "context" "errors" "fmt" "sort" "strings" "time" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgtype" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/timestamppb" "github.com/pomerium/pomerium/pkg/grpc/databroker" "github.com/pomerium/pomerium/pkg/grpc/registry" "github.com/pomerium/pomerium/pkg/protoutil" "github.com/pomerium/pomerium/pkg/storage" ) var ( schemaName = "pomerium" migrationInfoTableName = "migration_info" recordsTableName = "records" recordChangesTableName = "record_changes" recordChangeNotifyName = "pomerium_record_change" recordOptionsTableName = "record_options" leasesTableName = "leases" serviceChangeNotifyName = "pomerium_service_change" servicesTableName = "services" ) type querier interface { Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error) Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row } func deleteChangesBefore(ctx context.Context, q querier, cutoff time.Time) error { _, err := q.Exec(ctx, ` DELETE FROM `+schemaName+`.`+recordChangesTableName+` WHERE modified_at < $1 `, cutoff) return err } func deleteExpiredServices(ctx context.Context, q querier, cutoff time.Time) (rowCount int64, err error) { cmd, err := q.Exec(ctx, ` DELETE FROM `+schemaName+`.`+servicesTableName+` WHERE expires_at < $1 `, cutoff) if err != nil { return 0, err } return cmd.RowsAffected(), nil } func dup(record *databroker.Record) *databroker.Record { return proto.Clone(record).(*databroker.Record) } func enforceOptions(ctx context.Context, q querier, recordType string, options *databroker.Options) error { if options == nil || options.Capacity == nil { return nil } _, err := q.Exec(ctx, ` DELETE FROM `+schemaName+`.`+recordsTableName+` WHERE type=$1 AND id NOT IN ( SELECT id FROM `+schemaName+`.`+recordsTableName+` WHERE type=$1 ORDER BY version DESC LIMIT $2 ) `, recordType, options.GetCapacity()) return err } func getLatestRecordVersion(ctx context.Context, q querier) (recordVersion uint64, err error) { err = q.QueryRow(ctx, ` SELECT version FROM `+schemaName+`.`+recordChangesTableName+` ORDER BY version DESC LIMIT 1 `).Scan(&recordVersion) if isNotFound(err) { err = nil } return recordVersion, err } func getNextChangedRecord(ctx context.Context, q querier, recordType string, afterRecordVersion uint64) (*databroker.Record, error) { var recordID string var version uint64 var data []byte var modifiedAt pgtype.Timestamptz var deletedAt pgtype.Timestamptz query := ` SELECT type, id, version, data, modified_at, deleted_at FROM ` + schemaName + `.` + recordChangesTableName + ` WHERE version > $1 ` args := []any{afterRecordVersion} if recordType != "" { query += ` AND type = $2` args = append(args, recordType) } query += ` ORDER BY version ASC LIMIT 1 ` err := q.QueryRow(ctx, query, args...).Scan(&recordType, &recordID, &version, &data, &modifiedAt, &deletedAt) if isNotFound(err) { return nil, storage.ErrNotFound } else if err != nil { return nil, fmt.Errorf("error querying next changed record: %w", err) } a, err := protoutil.UnmarshalAnyJSON(data) if isUnknownType(err) { a = protoutil.ToAny(protoutil.ToStruct(map[string]string{ "id": recordID, })) } else if err != nil { return nil, fmt.Errorf("error unmarshaling changed record data: %w", err) } return &databroker.Record{ Version: version, Type: recordType, Id: recordID, Data: a, ModifiedAt: timestamppbFromTimestamptz(modifiedAt), DeletedAt: timestamppbFromTimestamptz(deletedAt), }, nil } func getOptions(ctx context.Context, q querier, recordType string) (*databroker.Options, error) { var capacity pgtype.Int8 err := q.QueryRow(ctx, ` SELECT capacity FROM `+schemaName+`.`+recordOptionsTableName+` WHERE type=$1 `, recordType).Scan(&capacity) if err != nil && !isNotFound(err) { return nil, err } options := new(databroker.Options) if capacity.Valid { options.Capacity = proto.Uint64(uint64(capacity.Int64)) } return options, nil } func getRecord(ctx context.Context, q querier, recordType, recordID string) (*databroker.Record, error) { var version uint64 var data []byte var modifiedAt pgtype.Timestamptz err := q.QueryRow(ctx, ` SELECT version, data, modified_at FROM `+schemaName+`.`+recordsTableName+` WHERE type=$1 AND id=$2 `, recordType, recordID).Scan(&version, &data, &modifiedAt) if isNotFound(err) { return nil, storage.ErrNotFound } else if err != nil { return nil, fmt.Errorf("postgres: failed to execute query: %w", err) } a, err := protoutil.UnmarshalAnyJSON(data) if isUnknownType(err) { return nil, storage.ErrNotFound } else if err != nil { return nil, fmt.Errorf("postgres: failed to unmarshal data: %w", err) } return &databroker.Record{ Version: version, Type: recordType, Id: recordID, Data: a, ModifiedAt: timestamppbFromTimestamptz(modifiedAt), }, nil } func listRecords(ctx context.Context, q querier, expr storage.FilterExpression, offset, limit int) ([]*databroker.Record, error) { args := []interface{}{offset, limit} query := ` SELECT type, id, version, data, modified_at FROM ` + schemaName + `.` + recordsTableName + ` ` if expr != nil { query += "WHERE " err := addFilterExpressionToQuery(&query, &args, expr) if err != nil { return nil, fmt.Errorf("postgres: failed to add filter to query: %w", err) } } query += ` ORDER BY type, id LIMIT $2 OFFSET $1 ` rows, err := q.Query(ctx, query, args...) if err != nil { return nil, fmt.Errorf("postgres: failed to execute query: %w", err) } defer rows.Close() var records []*databroker.Record for rows.Next() { var recordType, id string var version uint64 var data []byte var modifiedAt pgtype.Timestamptz err = rows.Scan(&recordType, &id, &version, &data, &modifiedAt) if err != nil { return nil, fmt.Errorf("postgres: failed to scan row: %w", err) } a, err := protoutil.UnmarshalAnyJSON(data) if isUnknownType(err) { a = protoutil.ToAny(protoutil.ToStruct(map[string]string{ "id": id, })) } else if err != nil { return nil, fmt.Errorf("postgres: failed to unmarshal data: %w", err) } records = append(records, &databroker.Record{ Version: version, Type: recordType, Id: id, Data: a, ModifiedAt: timestamppbFromTimestamptz(modifiedAt), }) } err = rows.Err() if err != nil { return nil, fmt.Errorf("postgres: error iterating over rows: %w", err) } return records, nil } func listServices(ctx context.Context, q querier) ([]*registry.Service, error) { var services []*registry.Service query := ` SELECT kind, endpoint FROM ` + schemaName + `.` + servicesTableName + ` ORDER BY kind, endpoint ` rows, err := q.Query(ctx, query) if err != nil { return nil, fmt.Errorf("postgres: failed to execute query: %w", err) } defer rows.Close() for rows.Next() { var kind, endpoint string err = rows.Scan(&kind, &endpoint) if err != nil { return nil, fmt.Errorf("postgres: failed to scan row: %w", err) } services = append(services, &registry.Service{ Kind: registry.ServiceKind(registry.ServiceKind_value[kind]), Endpoint: endpoint, }) } err = rows.Err() if err != nil { return nil, fmt.Errorf("postgres: error iterating over rows: %w", err) } return services, nil } func listTypes(ctx context.Context, q querier) ([]string, error) { query := ` SELECT DISTINCT type FROM ` + schemaName + `.` + recordsTableName + ` ` rows, err := q.Query(ctx, query) if err != nil { return nil, fmt.Errorf("postgres: failed to execute query: %w", err) } defer rows.Close() var types []string for rows.Next() { var recordType string err = rows.Scan(&recordType) if err != nil { return nil, fmt.Errorf("postgres: failed to scan row: %w", err) } types = append(types, recordType) } err = rows.Err() if err != nil { return nil, fmt.Errorf("postgres: error iterating over rows: %w", err) } sort.Strings(types) return types, nil } func maybeAcquireLease(ctx context.Context, q querier, leaseName, leaseID string, ttl time.Duration) (leaseHolderID string, err error) { tbl := schemaName + "." + leasesTableName expiresAt := timestamptzFromTimestamppb(timestamppb.New(time.Now().Add(ttl))) now := timestamptzFromTimestamppb(timestamppb.Now()) err = q.QueryRow(ctx, ` INSERT INTO `+tbl+` (name, id, expires_at) VALUES ($1, $2, $3) ON CONFLICT (name) DO UPDATE SET id=CASE WHEN `+tbl+`.expires_at<$4 OR `+tbl+`.id=$2 THEN $2 ELSE `+tbl+`.id END, expires_at=CASE WHEN `+tbl+`.expires_at<$4 OR `+tbl+`.id=$2 THEN $3 ELSE `+tbl+`.expires_at END RETURNING `+tbl+`.id `, leaseName, leaseID, expiresAt, now).Scan(&leaseHolderID) return leaseHolderID, err } func putRecordAndChange(ctx context.Context, q querier, record *databroker.Record) error { data, err := jsonbFromAny(record.GetData()) if err != nil { return fmt.Errorf("postgres: failed to convert any to json: %w", err) } modifiedAt := timestamptzFromTimestamppb(record.GetModifiedAt()) deletedAt := timestamptzFromTimestamppb(record.GetDeletedAt()) indexCIDR := &pgtype.Text{Valid: false} if cidr := storage.GetRecordIndexCIDR(record.GetData()); cidr != nil { indexCIDR.String = cidr.String() indexCIDR.Valid = true } query := ` WITH t1 AS ( INSERT INTO ` + schemaName + `.` + recordChangesTableName + ` (type, id, data, modified_at, deleted_at) VALUES ($1, $2, $3, $4, $5) RETURNING * ) ` args := []any{ record.GetType(), record.GetId(), data, modifiedAt, deletedAt, } if record.GetDeletedAt() == nil { query += ` INSERT INTO ` + schemaName + `.` + recordsTableName + ` (type, id, version, data, modified_at, index_cidr) VALUES ($1, $2, (SELECT version FROM t1), $3, $4, $6) ON CONFLICT (type, id) DO UPDATE SET version=(SELECT version FROM t1), data=$3, modified_at=$4, index_cidr=$6 RETURNING ` + schemaName + `.` + recordsTableName + `.version ` args = append(args, indexCIDR) } else { query += ` DELETE FROM ` + schemaName + `.` + recordsTableName + ` WHERE type=$1 AND id=$2 RETURNING ` + schemaName + `.` + recordsTableName + `.version ` } err = q.QueryRow(ctx, query, args...).Scan(&record.Version) if err != nil && !isNotFound(err) { return fmt.Errorf("postgres: failed to execute query: %w", err) } return nil } func putService(ctx context.Context, q querier, svc *registry.Service, expiresAt time.Time) error { query := ` INSERT INTO ` + schemaName + `.` + servicesTableName + ` (kind, endpoint, expires_at) VALUES ($1, $2, $3) ON CONFLICT (kind, endpoint) DO UPDATE SET expires_at=$3 ` _, err := q.Exec(ctx, query, svc.GetKind().String(), svc.GetEndpoint(), expiresAt) return err } func setOptions(ctx context.Context, q querier, recordType string, options *databroker.Options) error { capacity := pgtype.Int8{} if options != nil && options.Capacity != nil { capacity.Int64 = int64(options.GetCapacity()) capacity.Valid = true } _, err := q.Exec(ctx, ` INSERT INTO `+schemaName+`.`+recordOptionsTableName+` (type, capacity) VALUES ($1, $2) ON CONFLICT (type) DO UPDATE SET capacity=$2 `, recordType, capacity) return err } func signalRecordChange(ctx context.Context, q querier) error { _, err := q.Exec(ctx, `NOTIFY `+recordChangeNotifyName) return err } func signalServiceChange(ctx context.Context, q querier) error { _, err := q.Exec(ctx, `NOTIFY `+serviceChangeNotifyName) return err } func jsonbFromAny(any *anypb.Any) ([]byte, error) { if any == nil { return nil, nil } return protojson.Marshal(any) } func timestamppbFromTimestamptz(ts pgtype.Timestamptz) *timestamppb.Timestamp { if !ts.Valid { return nil } return timestamppb.New(ts.Time) } func timestamptzFromTimestamppb(ts *timestamppb.Timestamp) pgtype.Timestamptz { if !ts.IsValid() { return pgtype.Timestamptz{} } return pgtype.Timestamptz{Time: ts.AsTime(), Valid: true} } func isNotFound(err error) bool { return errors.Is(err, pgx.ErrNoRows) || errors.Is(err, storage.ErrNotFound) } func isUnknownType(err error) bool { if err == nil { return false } return errors.Is(err, protoregistry.NotFound) || strings.Contains(err.Error(), "unable to resolve") // protojson doesn't wrap errors so check for the string }
package models import ( "github.com/astaxie/beego/orm" "time" ) //订单提币表 func (a *FinancialOrderWithdrawal) TableName() string { return FinancialOrderWithdrawalTBName() } //查询的类 type FinancialOrderWithdrawalQueryParam struct { BaseQueryParam OrderId string `json:"orderId"` //订单号 } //理财收益表 type FinancialOrderWithdrawal struct { Id int `orm:"pk;column(id)"json:"id"form:"id"` //用户id OrderId string `orm:"column(order_id)"json:"orderId"form:"orderId"` //取出数量 Quantity float64 `orm:"column(quantity)"json:"quantity"form:"quantity"` //取出时间 WithdrawalTime int64 `orm:"column(withdrawal_time)"json:"withdrawalTime"form:"withdrawalTime"` //收益 Profit float64 `orm:"column(profit)"json:"profit"form:"profit"` //年化收益 YearProfit float64 `orm:"column(year_profit)"json:"yearProfit"form:"yearProfit"` //创建时间 CreateTime time.Time `orm:"type(datetime);column(create_time)"json:"createTime"form:"createTime"` } //获取分页数据 func FinancialOrderWithdrawalPageList(params *FinancialOrderWithdrawalQueryParam) ([]*FinancialOrderWithdrawal, int64) { o := orm.NewOrm() query := o.QueryTable(FinancialOrderWithdrawalTBName()) data := make([]*FinancialOrderWithdrawal, 0) //默认排序 sortorder := "id" switch params.Sort { case "id": sortorder = "id" } if params.Order == "desc" { sortorder = "-" + sortorder } if params.OrderId != "" { query = query.Filter("order_id__exact", params.OrderId) } total, _ := query.Count() query.OrderBy(sortorder).Limit(params.Limit, (params.Offset-1)*params.Limit).All(&data) return data, total }
package minimark import ( "strings" "testing" c "github.com/aziis98/parser-combinators" "github.com/aziis98/parser-combinators/examples/minimark/doc" "github.com/aziis98/parser-combinators/examples/minimark/parser" "github.com/stretchr/testify/assert" ) var example1 = ` # Prova ## Prova ### Prova Paragraph of some long text. Paragraph of some long text. Paragraph of some long text. Paragraph of some long text. - First item of list - Second item of this list ` func TestMinimark(t *testing.T) { { r, _ := c.ParseRuneReader(parser.Heading, strings.NewReader("### Prova")) assert.Equal(t, &doc.Heading{Level: 3, Text: "Prova"}, r) } { r, _ := c.ParseRuneReader(parser.Minimark, strings.NewReader(example1)) // json, err := json.MarshalIndent(r, "", " ") // if err != nil { // t.Fatal(err) // } // log.Printf(`%s`, json) assert.Equal(t, []doc.MinimarkNode{ &doc.Heading{Level: 1, Text: "Prova"}, &doc.Heading{Level: 2, Text: "Prova"}, &doc.Heading{Level: 3, Text: "Prova"}, &doc.Paragraph{Text: "Paragraph of some long text.\nParagraph of some long text.\nParagraph of some long text."}, &doc.Paragraph{Text: "Paragraph of some long text."}, &doc.List{ Items: []*doc.Item{ {Depth: 0, Text: "First item of list"}, {Depth: 0, Text: "Second item of this list"}, }, }, }, r, ) } } func Benchmark1(b *testing.B) { var r interface{} for n := 0; n < b.N; n++ { r, _ = c.ParseRuneReader(parser.Minimark, strings.NewReader(example1)) } b.Log(r) }
// Package slicesort contains utilities for easily sorting slices. package slicesort import ( "reflect" "sort" ) type st struct { vxs reflect.Value vf reflect.Value } func (s st) Len() int { return s.vxs.Len() } func (s st) Swap(i, j int) { vi := s.vxs.Index(i) vj := s.vxs.Index(j) t := reflect.ValueOf(vi.Interface()) vi.Set(vj) vj.Set(t) } func (s st) Less(i, j int) bool { vi := s.vxs.Index(i) vj := s.vxs.Index(j) return s.vf.Call([]reflect.Value{vi, vj})[0].Bool() } // Sort sorts a slice given a compare function. See example. func Sort(xs, compare interface{}) { sort.Sort(st{reflect.ValueOf(xs), reflect.ValueOf(compare)}) }
package main import ( "database/sql" "encoding/json" "fmt" "log" "net/http" _ "github.com/lib/pq" ) const ( host = "localhost" port = 5432 pg_user = "USER" password = "PASSWORD" dbname = "DB_NAME" ) var db *sql.DB func main() { initDB() defer db.Close() http.HandleFunc("/", userHandler) log.Fatal(http.ListenAndServe("localhost:8000", nil)) } type user struct { Id int `json:"id"` Age int `json:"age"` First_name string `json:"first_name"` Last_name string `json:"last_name"` Email string `json:"email"` } type users struct { Users []user `json:"users"` } func userHandler(w http.ResponseWriter, r *http.Request) { usrs := users{} err := queryUsers(&usrs) fmt.Println(usrs) if err != nil { http.Error(w, err.Error(), 500) return } out, err := json.Marshal(usrs) fmt.Println(string(out)) if err != nil { http.Error(w, err.Error(), 500) return } fmt.Fprintf(w, string(out)) } func queryUsers(usrs *users) error { rows, err := db.Query(` SELECT id, age, first_name, last_name, email FROM users`) if err != nil { return err } defer rows.Close() for rows.Next() { usr := user{} err = rows.Scan( &usr.Id, &usr.Age, &usr.First_name, &usr.Last_name, &usr.Email, ) if err != nil { return err } usrs.Users = append(usrs.Users, usr) } err = rows.Err() if err != nil { return err } return nil } func initDB() { psqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+ "password=%s dbname=%s sslmode=disable", host, port, pg_user, password, dbname) var err error db, err = sql.Open("postgres", psqlInfo) if err != nil { panic(err) } err = db.Ping() if err != nil { panic(err) } fmt.Println("Successfully connected!") }
package config import ( "fmt" "database/sql" _ "github.com/lib/pq" ) const ( Host = "localhost" User = "hieutm" Password = "hieutm211" DBname = "basicweb" ) func InitDB() (*sql.DB, error) { connStmt := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", User, Password, Host, DBname) db, err := sql.Open("postgres", connStmt) if err != nil { return nil, err } return db, nil }
package main import ( "fmt" "sync" ) func main() { c := make(chan int) var wg sync.WaitGroup wg.Add(2) go func() { for i := 0; i < 5; i++ { c <- i } wg.Done() }() go func() { for i := 0; i < 5; i++ { c <- i } wg.Done() }() go func() { wg.Wait() close(c) }() for n := range c { fmt.Println(n) } } // go run -race main.go // 0 // 0 // 1 // 2 // 3 // 4 // 1 // 2 // 3 // 4
package handler // import ( // "net/http" // "net/http/httptest" // "testing" // ) // func TestAdminCases(t *testing.T) { // httprr := httptest.NewRecorder() // req, err := http.NewRequest("GET", "/v1/admin/cases", nil) // if err != nil { // t.Fatal(err) // } // Cases(httprr, req) // resp := httprr.Result() // }
// Package select_menu provides a select menu component. package select_menu
package test import "testing" func BenchmarkBubbleSort(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { BubbleSort(GenRandomSlice(10000)) } } func BenchmarkInsertSort(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { InsertSort(GenRandomSlice(10000)) } } func BenchmarkQuickSort(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { QuickSort(GenRandomSlice(10000)) } } func BenchmarkHeapSort(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { HeapSort(GenRandomSlice(10000)) } }
// Package libvirt provides a cluster-destroyer for libvirt clusters. package libvirt
//Package models provide the basic model of the db package models import ( "github.com/2liang/mcache/models/base" ) func Init() { base.DbCache.InitXorm("dbcache") base.DbCache.GetMaster() base.DbCache.GetSlave() }
package errno const ( ESysInvalidPrjHome = 11 ESysInitConfFail = 12 ESysInitLogFail = 13 ESysSavePidFileFail = 14 )
package smpp34 import ( "bytes" "encoding/binary" ) var ( // Required QuerySmResp Fields reqQSMRespFields = []string{ MESSAGE_ID, FINAL_DATE, MESSAGE_STATE, ERROR_CODE, } ) type QuerySmResp struct { *Header mandatoryFields map[string]Field tlvFields map[uint16]*TLVField } func NewQuerySmResp(hdr *Header, b []byte) (*QuerySmResp, error) { r := bytes.NewBuffer(b) fields, _, err := create_pdu_fields(reqQSMRespFields, r) if err != nil { return nil, err } s := &QuerySmResp{Header: hdr, mandatoryFields: fields} return s, nil } func (s *QuerySmResp) GetField(f string) Field { switch f { case MESSAGE_STATE, ERROR_CODE: var v uint8 binary.Read(bytes.NewBuffer(s.mandatoryFields[f].ByteArray()), binary.BigEndian, &v) return NewFixedField(v) default: return s.mandatoryFields[f] } } func (s *QuerySmResp) Fields() map[string]Field { return s.mandatoryFields } func (s *QuerySmResp) MandatoryFieldsList() []string { return reqQSMRespFields } func (s *QuerySmResp) Ok() bool { return true } func (s *QuerySmResp) GetHeader() *Header { return s.Header } func (s *QuerySmResp) SetField(f string, v interface{}) error { if s.validate_field(f, v) { field := NewField(f, v) if field != nil { s.mandatoryFields[f] = field return nil } } return FieldValueErr } func (s *QuerySmResp) SetSeqNum(i uint32) { s.Header.Sequence = i } func (s *QuerySmResp) SetTLVField(t, l int, v []byte) error { return TLVFieldPduErr } func (s *QuerySmResp) validate_field(f string, v interface{}) bool { if included_check(s.MandatoryFieldsList(), f) && validate_pdu_field(f, v) { return true } return false } func (s *QuerySmResp) TLVFields() map[uint16]*TLVField { return s.tlvFields } func (s *QuerySmResp) writeFields() []byte { b := []byte{} for _, i := range s.MandatoryFieldsList() { v := s.mandatoryFields[i].ByteArray() b = append(b, v...) } return b } func (s *QuerySmResp) Writer() []byte { b := s.writeFields() h := packUi32(uint32(len(b) + 16)) h = append(h, packUi32(uint32(s.Header.Id))...) h = append(h, packUi32(uint32(s.Header.Status))...) h = append(h, packUi32(s.Header.Sequence)...) return append(h, b...) }
// Copyright 2020, Homin Lee <homin.lee@suapapa.net>. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "github.com/suapapa/go_devices/pcf8574clcd" "periph.io/x/periph/conn/i2c/i2creg" "periph.io/x/periph/host" ) func main() { _, err := host.Init() chk(err) bus, err := i2creg.Open("") chk(err) dev, err := pcf8574clcd.New( bus, 0x27, /* pcf8574.DefaultAddr */ 16, 2, ) chk(err) err = dev.BackLight(true) chk(err) err = dev.SetCursor(0, 0) chk(err) err = dev.Write("Hello~") chk(err) err = dev.SetCursor(0, 1) chk(err) err = dev.Write("RaspberryPi!!") chk(err) } func chk(err error) { if err != nil { panic(err) } }
package pageContext import ( "google.golang.org/appengine/datastore" ) // PageContext datastore: ",noindex" causes json naming problems !!!!!!!!!!!!!!!!!!!!!!!!!! // Page key is the parent key. type PageContext struct { ContextKey *datastore.Key } // PageContexts is a []*PageContext type PageContexts []*PageContext
package middleware import ( sentinelPlugin "github.com/alibaba/sentinel-golang/adapter/gin" "github.com/alibaba/sentinel-golang/core/system" "github.com/gin-gonic/gin" "go.uber.org/zap" ) // Sentinel 限流 func Sentinel(triggerCount float64) gin.HandlerFunc { if _, err := system.LoadRules([]*system.Rule{ { MetricType: system.InboundQPS, TriggerCount: triggerCount, Strategy: system.BBR, }, }); err != nil { zap.L().Fatal("Unexpected error", zap.Error(err)) } return sentinelPlugin.SentinelMiddleware() }
package http import ( "net/http" "fmt" "HelloGo/src/http/wp" ) func Request(rw http.ResponseWriter, r *http.Request) { r.ParseForm() fmt.Fprintln(rw, "xxxxxxxxxxxxxxx") } func StartService() { wp.RunService() }
package lexer // import( // "dictionary" // ) import( "github.com/Evedel/fortify/src/dictionary" ) // constants for token guessing const ( s_udf = iota s_str = iota s_nmb = iota s_cmd = iota ) func Tokenise(source string) (Tokenised []dictionary.Token) { curpos := 0 var t dictionary.Token t.Id = dictionary.None t.IdName = "none" t.Value = "" word := "" for curpos < len(source) { word += string(source[curpos]) if id, ok := dictionary.SpecialSymbol[word]; ok { t.Id = id t.IdName = dictionary.SpecialSymbolReverse[id] } else if id, ok := dictionary.KeyWordRaw[word]; ok { t.Id = id t.IdName = dictionary.KeyWordRawReverse[id] } else if id, ok := dictionary.KeyWordBackslash[word]; ok { t.Id = id t.IdName = dictionary.KeyWordBackslashReverse[id] } if t.Id == dictionary.None { for backpos := len(word) - 1; backpos > -1; backpos-- { if id, ok := dictionary.SpecialSymbol[word[backpos:]]; ok { t.Id = id t.IdName = word[backpos:] } else if id, ok := dictionary.KeyWordRaw[word[backpos:]]; ok { t.Id = id t.IdName = word[backpos:] } else if id, ok := dictionary.KeyWordBackslash[word[backpos:]]; ok { t.Id = id t.IdName = word[backpos:] } if t.Id != dictionary.None { Tokenised = append(Tokenised, dictionary.Token{ dictionary.Word, "word", word[:backpos]}) break } } } if t.Id != dictionary.None { Tokenised = append(Tokenised, t) t.Id = dictionary.None t.IdName = "None" t.Value = "" word = "" } curpos += 1 } return }
package statemachine // State contains the name of the state type State struct { Name string } // NewState returns a new instance of the State struct func NewState(name string) *State { return &State{ Name: name, } }
func uniquePaths(m int, n int) int { if m == 1 || n == 1 { return 1 } if m > n { return c(m+n-2, n-1) } return c(m+n-2, m-1) } func c(m, n int) int { divident := 1 for i := m; i > m - n; i-- { divident *= i } for i := n; i > 0; i-- { divident /= i } return divident }
package main import ( "log" "net/http" "strconv" "github.com/vlad-belogrudov/gopl/pkg/color" "github.com/vlad-belogrudov/gopl/pkg/surface" ) func main() { http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { var err error if err = r.ParseForm(); err != nil { log.Println("cannot parse params: ", err) w.WriteHeader(http.StatusBadRequest) return } params := surface.DefaultSurfaceParams() for k, v := range r.Form { switch k { case "width": params.Width, err = strconv.Atoi(v[0]) if err != nil { log.Println("cannot parse width: ", err) w.WriteHeader(http.StatusBadRequest) return } case "height": params.Height, err = strconv.Atoi(v[0]) if err != nil { log.Println("cannot parse height ", err) w.WriteHeader(http.StatusBadRequest) return } case "xyrange": params.XYRange, err = strconv.ParseFloat(v[0], 10) if err != nil { log.Println("cannot parse xyrange: ", err) w.WriteHeader(http.StatusBadRequest) return } case "cells": params.Cells, err = strconv.Atoi(v[0]) if err != nil { log.Println("cannot parse cells: ", err) w.WriteHeader(http.StatusBadRequest) return } case "topcolor": c, err := strconv.ParseUint(v[0], 0, 24) if err != nil { log.Println("cannot parse topcolor: ", err) w.WriteHeader(http.StatusBadRequest) return } params.TopColor = color.Color{ Red: color.ColorByte((c & (0xFF << 16)) >> 16), Green: color.ColorByte((c & (0xFF << 8)) >> 8), Blue: color.ColorByte(c & 0xFF), } case "bottomcolor": c, err := strconv.ParseUint(v[0], 0, 24) if err != nil { log.Println("cannot parse bottomcolor: ", err) w.WriteHeader(http.StatusBadRequest) return } params.BottomColor = color.Color{ Red: color.ColorByte((c & (0xFF << 16)) >> 16), Green: color.ColorByte((c & (0xFF << 8)) >> 8), Blue: color.ColorByte(c & 0xFF)} } } w.Header().Set("Content-Type", "image/svg+xml") surface.Surface(w, params) }) log.Fatal(http.ListenAndServe("localhost:8000", nil)) }
package db import ( "database/sql" "strconv" "sync" "github.com/textileio/go-textile/pb" "github.com/textileio/go-textile/repo" "github.com/textileio/go-textile/util" ) type NotificationDB struct { modelStore } func NewNotificationStore(db *sql.DB, lock *sync.Mutex) repo.NotificationStore { return &NotificationDB{modelStore{db, lock}} } func (c *NotificationDB) Add(notification *pb.Notification) error { c.lock.Lock() defer c.lock.Unlock() tx, err := c.db.Begin() if err != nil { return err } stm := `insert into notifications(id, date, actorId, subject, subjectId, blockId, target, type, body, read) values(?,?,?,?,?,?,?,?,?,?)` stmt, err := tx.Prepare(stm) if err != nil { log.Errorf("error in tx prepare: %s", err) return err } defer stmt.Close() _, err = stmt.Exec( notification.Id, util.ProtoNanos(notification.Date), notification.Actor, notification.SubjectDesc, notification.Subject, notification.Block, notification.Target, int32(notification.Type), notification.Body, false, ) if err != nil { _ = tx.Rollback() return err } return tx.Commit() } func (c *NotificationDB) Get(id string) *pb.Notification { c.lock.Lock() defer c.lock.Unlock() res := c.handleQuery("select * from notifications where id='" + id + "';") if len(res.Items) == 0 { return nil } return res.Items[0] } func (c *NotificationDB) Read(id string) error { c.lock.Lock() defer c.lock.Unlock() _, err := c.db.Exec("update notifications set read=1 where id=?", id) return err } func (c *NotificationDB) ReadAll() error { c.lock.Lock() defer c.lock.Unlock() _, err := c.db.Exec("update notifications set read=1") return err } func (c *NotificationDB) List(offset string, limit int) *pb.NotificationList { c.lock.Lock() defer c.lock.Unlock() var stm string if offset != "" { stm = "select * from notifications where date<(select date from notifications where id='" + offset + "') order by date desc limit " + strconv.Itoa(limit) + ";" } else { stm = "select * from notifications order by date desc limit " + strconv.Itoa(limit) + ";" } return c.handleQuery(stm) } func (c *NotificationDB) CountUnread() int { c.lock.Lock() defer c.lock.Unlock() row := c.db.QueryRow("select Count(*) from notifications where read=0;") var count int _ = row.Scan(&count) return count } func (c *NotificationDB) Delete(id string) error { c.lock.Lock() defer c.lock.Unlock() _, err := c.db.Exec("delete from notifications where id=?", id) return err } func (c *NotificationDB) DeleteByActor(actorId string) error { c.lock.Lock() defer c.lock.Unlock() _, err := c.db.Exec("delete from notifications where actorId=?", actorId) return err } func (c *NotificationDB) DeleteBySubject(subjectId string) error { c.lock.Lock() defer c.lock.Unlock() _, err := c.db.Exec("delete from notifications where subjectId=?", subjectId) return err } func (c *NotificationDB) DeleteByBlock(blockId string) error { c.lock.Lock() defer c.lock.Unlock() _, err := c.db.Exec("delete from notifications where blockId=?", blockId) return err } func (c *NotificationDB) handleQuery(stm string) *pb.NotificationList { list := &pb.NotificationList{Items: make([]*pb.Notification, 0)} rows, err := c.db.Query(stm) if err != nil { log.Errorf("error in db query: %s", err) return list } for rows.Next() { var id, actorId, subject, subjectId, blockId, target, body string var dateInt int64 var typeInt, readInt int if err := rows.Scan(&id, &dateInt, &actorId, &subject, &subjectId, &blockId, &target, &typeInt, &body, &readInt); err != nil { log.Errorf("error in db scan: %s", err) continue } read := false if readInt == 1 { read = true } list.Items = append(list.Items, &pb.Notification{ Id: id, Date: util.ProtoTs(dateInt), Actor: actorId, SubjectDesc: subject, Subject: subjectId, Block: blockId, Target: target, Type: pb.Notification_Type(typeInt), Body: body, Read: read, }) } return list }
/* * KSQL * * This is a swagger spec for ksqldb * * API version: 1.0.0 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package swagger type ShowListResponseQueries struct { QueryString string `json:"queryString,omitempty"` Sinks string `json:"sinks,omitempty"` Id string `json:"id,omitempty"` }
package util // TransportStatus is an int alias that indicates the status of a Transport type TransportStatus int // Various statuses for transports const ( Unknown TransportStatus = iota Running Stopped )
/* * Copyright (c) 2019. Alexey Shtepa <as.shtepa@gmail.com> LICENSE MIT * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. */ package bindata import "testing" func eqTest(t *testing.T, p, b, r interface{}) { if p != r { t.Errorf("%v -> %v -> %v", p, b, r) } else { t.Logf("%v -> %v -> %v", p, b, r) } } func TestBool(t *testing.T) { P := true B := memdump(P) R, _ := Bool(B) eqTest(t, P, B, R) } func TestInt(t *testing.T) { var P int = 10 B := memdump(P) R, _ := Int(B) eqTest(t, P, B, R) } func TestInt8(t *testing.T) { var P int8 = 10 B := memdump(P) R, _ := Int8(B) eqTest(t, P, B, R) } func TestInt16(t *testing.T) { var P int16 = 10 B := memdump(P) R, _ := Int16(B) eqTest(t, P, B, R) } func TestInt32(t *testing.T) { var P int32 = 10 B := memdump(P) R, _ := Int32(B) eqTest(t, P, B, R) } func TestInt64(t *testing.T) { var P int64 = 10 B := memdump(P) R, _ := Int64(B) eqTest(t, P, B, R) } func TestUint(t *testing.T) { var P uint = 10 B := memdump(P) R, _ := Uint(B) eqTest(t, P, B, R) } func TestUint8(t *testing.T) { var P uint8 = 10 B := memdump(P) R, _ := Uint8(B) eqTest(t, P, B, R) } func TestUint16(t *testing.T) { var P uint16 = 10 B := memdump(P) R, _ := Uint16(B) eqTest(t, P, B, R) } func TestUint32(t *testing.T) { var P uint32 = 10 B := memdump(P) R, _ := Uint32(B) eqTest(t, P, B, R) } func TestUint64(t *testing.T) { var P uint64 = 10 B := memdump(P) R, _ := Uint64(B) eqTest(t, P, B, R) } func TestUintptr(t *testing.T) { var P uintptr = 10 B := memdump(P) R, _ := Uintptr(B) eqTest(t, P, B, R) } func TestFloat32(t *testing.T) { var P float32 = 10.10 B := memdump(P) R, _ := Float32(B) eqTest(t, P, B, R) } func TestFloat64(t *testing.T) { var P float64 = 10.10 B := memdump(P) R, _ := Float64(B) eqTest(t, P, B, R) } func TestComplex64(t *testing.T) { var P complex64 = 10.10+10i B := memdump(P) R, _ := Complex64(B) eqTest(t, P, B, R) } func TestComplex128(t *testing.T) { var P complex128 = 10.10+10i B := memdump(P) R, _ := Complex128(B) eqTest(t, P, B, R) } func TestString(t *testing.T) { var P = "abcdf" B := memdump(P) R, _ := String(B) eqTest(t, P, B, R) }
package resolver import ( "github.com/taktakty/netlabi/testdata" "github.com/stretchr/testify/require" "strconv" "strings" "testing" ) func TestIpaddrMutations(t *testing.T) { testData := ipSegmentTestData t.Run("Update", func(t *testing.T) { ipaddrID := testData[1].Ipaddr[1].ID p := struct { id string status int ipType int note string }{ id: string(ipaddrID), status: 4, ipType: 4, note: "test ipaddr updated", } status := strconv.Itoa(p.status) ipType := strconv.Itoa(p.ipType) input := `{ id:"` + p.id + `", status:` + status + `, type:` + ipType + `, note:"` + p.note + `" }` q := strings.Join([]string{"mutation {updateIpaddr(input:", input, ")", testdata.IpaddrResp, "}"}, "") var resp struct { UpdateIpaddr testdata.IpaddrRespStruct } c.MustPost(q, &resp) require.Equal(t, p.id, resp.UpdateIpaddr.ID) require.Equal(t, p.status, resp.UpdateIpaddr.Status) require.Equal(t, p.ipType, resp.UpdateIpaddr.Type) require.Equal(t, p.note, resp.UpdateIpaddr.Note) }) }
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "testing" "github.com/google/go-cmp/cmp" "k8s.io/test-infra/prow/github" "k8s.io/test-infra/prow/github/fakegithub" ) func TestAcceptInvitations(t *testing.T) { testCases := []struct { id string invitations map[int]github.UserRepoInvitation }{ { id: "no invitations to accept", }, { id: "one invitation to accept", invitations: map[int]github.UserRepoInvitation{ 1: {InvitationID: 1, Repository: &github.Repo{FullName: "foo/bar"}}, }, }, { id: "multiple invitations per client to accept", invitations: map[int]github.UserRepoInvitation{ 1: {InvitationID: 1, Repository: &github.Repo{FullName: "foo/bar"}}, 2: {InvitationID: 2, Repository: &github.Repo{FullName: "james/bond"}}, 3: {InvitationID: 3, Repository: &github.Repo{FullName: "captain/hook"}}, }, }, } for _, tc := range testCases { t.Run(tc.id, func(t *testing.T) { fgh := &fakegithub.FakeClient{UserRepoInvitations: tc.invitations} if err := acceptInvitations(fgh, false); err != nil { t.Fatalf("error wasn't expected: %v", err) } actualInvitations, err := fgh.ListCurrentUserRepoInvitations() if err != nil { t.Fatalf("error not expected: %v", err) } var expected []github.UserRepoInvitation if diff := cmp.Diff(actualInvitations, expected); diff != "" { t.Fatal(diff) } }) } }
package responses import ( "time" ) type ( ItemResponse struct { ID uint64 `json:"id"` GithubURL string `json:"github_url"` Author string `json:"author"` Name string `json:"name"` Description string `json:"description"` CreatedAt time.Time `json:"created_at"` View uint `json:"view"` Star uint `json:"star"` User UserResponse `json:"user"` Tags []TagResponse `json:"tags"` } ItemListResponse struct { Items []ItemResponse } ) func NewItemResponse(id uint64, githubURL, author, name, description string, createdAt time.Time) ItemResponse { return ItemResponse{ ID: id, GithubURL: githubURL, Author: author, Name: name, Description: description, CreatedAt: createdAt, } } func (r *ItemResponse) SetImpression(view, star uint) { r.View = view r.Star = star } func (r *ItemResponse) SetUser(userResponse UserResponse) { r.User = userResponse } func (r *ItemResponse) AppendTag(tagResponse TagResponse) { r.Tags = append(r.Tags, tagResponse) }
package components import ( "html/template" "github.com/GoAdminGroup/go-admin/template/types" ) type PopupAttribute struct { Name string ID string Body template.HTML Footer template.HTML FooterHTML template.HTML Title template.HTML Size string HideFooter bool Height string Width string Draggable bool types.Attribute } func (compo *PopupAttribute) SetID(value string) types.PopupAttribute { compo.ID = value return compo } func (compo *PopupAttribute) SetTitle(value template.HTML) types.PopupAttribute { compo.Title = value return compo } func (compo *PopupAttribute) SetFooter(value template.HTML) types.PopupAttribute { compo.Footer = value return compo } func (compo *PopupAttribute) SetFooterHTML(value template.HTML) types.PopupAttribute { compo.FooterHTML = value return compo } func (compo *PopupAttribute) SetWidth(width string) types.PopupAttribute { compo.Width = width return compo } func (compo *PopupAttribute) SetHeight(height string) types.PopupAttribute { compo.Height = height return compo } func (compo *PopupAttribute) SetDraggable() types.PopupAttribute { compo.Draggable = true return compo } func (compo *PopupAttribute) SetHideFooter() types.PopupAttribute { compo.HideFooter = true return compo } func (compo *PopupAttribute) SetBody(value template.HTML) types.PopupAttribute { compo.Body = value return compo } func (compo *PopupAttribute) SetSize(value string) types.PopupAttribute { compo.Size = value return compo } func (compo *PopupAttribute) GetContent() template.HTML { return ComposeHtml(compo.TemplateList, compo.Separation, *compo, "popup") }
// Copyright 2021 BoCloud // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types_test import ( . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fabedge/fabedge/pkg/common/constants" "github.com/fabedge/fabedge/pkg/operator/types" ) var _ = Describe("Endpoint", func() { It("should equal if all fields are equal", func() { e1 := types.Endpoint{ ID: "test", Name: "edge2", IP: "192.168.0.1", Subnets: []string{"2.2.0.0/64"}, } e2 := types.Endpoint{ ID: "test", Name: "edge2", IP: "192.168.0.1", Subnets: []string{"2.2.0.0/64"}, } Expect(e1.Equal(e2)).Should(BeTrue()) }) DescribeTable("isValid should return false", func(ep types.Endpoint) { Expect(ep.IsValid()).Should(BeFalse()) }, Entry("with invalid ip", types.Endpoint{ IP: "2.2.2.257", Subnets: []string{"2.2.0.0/16"}, }), Entry("with invalid subets", types.Endpoint{ IP: "2.2.2.255", Subnets: []string{"2.2.0.0/33"}, }), Entry("with empty ip and subnets", types.Endpoint{ IP: "", Subnets: nil, }), ) }) var _ = Describe("GenerateNewEndpointFunc", func() { newEndpoint := types.GenerateNewEndpointFunc("C=CH, O=strongSwan, CN={node}") node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "edge1", Annotations: map[string]string{ constants.KeyNodeSubnets: "2.2.0.1/26,2.2.0.128/26", }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ { Type: corev1.NodeInternalIP, Address: "192.168.1.1", }, }, }, } endpoint := newEndpoint(node) It("should replace {node} in id format", func() { Expect(endpoint.ID).Should(Equal("C=CH, O=strongSwan, CN=edge1")) }) It("should extract subnets from annotations", func() { Expect(endpoint.Subnets).Should(ContainElement("2.2.0.1/26")) Expect(endpoint.Subnets).Should(ContainElement("2.2.0.128/26")) }) It("should extract ip from node.status.address", func() { Expect(endpoint.IP).Should(Equal("192.168.1.1")) }) })
package service import ( "go/internal/pkg/model" "gorm.io/gorm" ) type Session interface { Create(m *model.Session, tx *gorm.DB) (*model.Session, error) Update(m *model.Session, tx *gorm.DB) error Delete(m *model.Session, tx *gorm.DB) error FindOneBy(criteria map[string]interface{}) (*model.Session, error) FindAll(page, size int, order string) ([]*model.Session, error) Count(criteria map[string]interface{}) int } //private struct type sessionServices struct { db *gorm.DB } //constructor func NewSessionServices(db *gorm.DB) Session { return &sessionServices{db: db} } func (svc *sessionServices) Create(m *model.Session, tx *gorm.DB) (*model.Session, error) { err := tx.Create(&m).Error if err != nil { return nil, err } return m, nil } func (svc *sessionServices) Update(m *model.Session, tx *gorm.DB) error { err := tx.Model(&m).Where("ID=?", m.ID).Updates(&m).Error if err != nil { return err } return nil } func (svc *sessionServices) Delete(m *model.Session, tx *gorm.DB) error { err := tx.Model(&m).Where("ID=?", m.ID).Delete(&m).Error if err != nil { return err } return nil } func (svc *sessionServices) FindBy(criteria map[string]interface{}) ([]*model.Session, error) { var data []*model.Session err := svc.db.Where(criteria).Find(&data).Error if err != nil { return nil, err } return data, nil } func (svc *sessionServices) FindOneBy(criteria map[string]interface{}) (*model.Session, error) { var m model.Session err := svc.db.Where(criteria).Find(&m).Error if err != nil { return nil, err } return &m, nil } func (svc *sessionServices) FindAll(page, size int, order string) ([]*model.Session, error) { var data []*model.Session if page == 0 || size == 0 { page, size = 1, 10 } offset := (page - 1) * size if order == "" { order = "created desc" } err := svc.db.Limit(size).Offset(offset).Order(order).Find(&data).Error if err != nil { return nil, err } return data, nil } func (svc *sessionServices) Count(criteria map[string]interface{}) int { var result int64 err := svc.db.Model(model.Session{}).Count(&result).Error if err != nil { return 0 } if len(criteria) >= 1 { err := svc.db.Model(model.Session{}).Where(criteria).Count(&result).Error if err != nil { return 0 } } return int(result) }
// 419 scams - "confidence trick". Aka "Nigerian" // en.m.wikipedia.org/wiki/419_scams. package combat var ( scammerPhrases = []string{ "from the desk of barrister", "your assistance is needed", "I am seeking for a reliable person", "role as the next of kin", "you will henceforth stand to get", "millions of dollars", "forty percent", "wire transfer", "official government stamps", "cheque", "Western Union", } ) func scammerWordFunc(round int) []string { return chooseNRandomly(scammerPhrases, round+1) }
package problem0225 // MyQueue 队列实现 type MyQueue struct { data []int size int capacity int } // InitQueue 初始化队列 func InitQueue() MyQueue { return MyQueue{ data: make([]int, 10), size: 0, capacity: 10, } } // Push 入队列 func (queue *MyQueue) Push(x int) { if queue.size >= queue.capacity { tmp := make([]int, queue.capacity*2) copy(tmp, queue.data) queue.data = tmp queue.capacity = queue.capacity * 2 } queue.data[queue.size] = x queue.size++ } // Pop 出队列 func (queue *MyQueue) Pop() int { tmp := queue.data[0] for i := 0; i < queue.size; i++ { queue.data[i] = queue.data[i+1] } queue.size-- return tmp } // Top 栈顶元素 func (queue *MyQueue) Top() int { return queue.data[0] } // Empty 队列是否为空 func (queue *MyQueue) Empty() bool { return queue.size == 0 } // MyStack 栈 type MyStack struct { queue MyQueue } // Constructor 构造器 func Constructor() MyStack { return MyStack{ queue: InitQueue(), } } // Push 入栈 func (stack *MyStack) Push(x int) { stack.queue.Push(x) for i := 1; i < stack.queue.size; i++ { stack.queue.Push(stack.queue.Pop()) } } // Pop 出栈 func (stack *MyStack) Pop() int { return stack.queue.Pop() } // Top 队列头部元素 func (stack *MyStack) Top() int { return stack.queue.Top() } // Empty 队列是否为空 func (stack *MyStack) Empty() bool { return stack.queue.Empty() }
// Copyright 2020. Akamai Technologies, Inc // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "encoding/json" "fmt" "os" "github.com/spf13/cobra" ) // createGroupCmd represents the createGroup command var createGroupCmd = &cobra.Command{ Use: userDiagnosticsCreateGroupUse, Aliases: []string{"creategroup", "createGroup"}, Args: cobra.ExactArgs(2), Short: userDiagnosticsCreateGroupShortDescription, Long: userDiagnosticsCreateGroupLongDescription, Run: func(cmd *cobra.Command, args []string) { var requestStruct CreateGroup requestStruct.GroupName = args[0] requestStruct.URL = args[1] encPayload, err := json.Marshal(requestStruct) if err != nil { fmt.Println(err) os.Exit(1) } resp, byt := doHTTPRequest("POST", "/diagnostic-tools/v2/end-user-links?"+clientTypeKey+"="+clientTypeValue, &encPayload) if resp.StatusCode == 201 { var responseStruct map[string]string var responseStructJson DiagnosticLinkResponse err := json.Unmarshal(*byt, &responseStruct) if err != nil { fmt.Println(err) os.Exit(1) } if jsonString { responseStructJson.GroupName = args[0] responseStructJson.Url = args[1] responseStructJson.ReportedTime = getReportedTime() responseStructJson.DiagnosticLink = responseStruct["diagnosticLink"] resJson, _ := json.MarshalIndent(responseStructJson, "", " ") fmt.Println(string(resJson)) return } colorPrintln("green", "\n"+linkText1) fmt.Println(linkText2) fmt.Printf("\n" + linkText3) colorPrintln("cyan", responseStruct["diagnosticLink"]) } else { printResponseError(byt) } }, } func init() { userDiagnosticsCmd.AddCommand(createGroupCmd) }
package textbox type Backend interface { Size() (int, int) EventsChan() chan Event Flush(*Buffer) Close() }
package main import "fmt" func isPrime(x int) bool { for k := 2; k < x; k++{ if x % k == 0{ return false } } return true } func main() { for k := 2; k < 200000; k++{ if isPrime(k) { fmt.Print("X") } else { fmt.Print("O") } } }
package main import "fmt" import "reflect" const ( a = 10 d int = 30 ) func main() { var b uint8 b = 20 m := a + b fmt.Println(m) fmt.Println(reflect.TypeOf(a)) c := 0x7f fmt.Println(reflect.TypeOf(c)) n := b + c fmt.Println(n) l := b + d fmt.Println(l) }
package frida_go import ( "fmt" "github.com/a97077088/frida-go/cfrida" jsoniter "github.com/json-iterator/go" "unsafe" ) type Bus struct { CObj *BusSignalConnect } func (b *Bus) Post(message interface{},data []byte) { cfrida.Frida_bus_post(b.instance,jsoniter.Wrap(message).ToString(),data) } func (b *Bus) Attach()(bool,error){ isatt,err:=cfrida.Frida_bus_attach_sync(b.instance,0) return isatt,err } func (b *Bus) Description() string { if b.instance==0{ return "" } return fmt.Sprintf(`Frida.Bus()`) } func (b *Bus) IsClosed() bool { return cfrida.Frida_bus_is_detached(b.instance) } func (b *Bus) Free() { cfrida.G_object_unref(b.instance) } // BusFromInst // 新建一个对象来自已经存在的对象实例指针。 // // Create a new object from an existing object instance pointer. func BusFromInst(inst uintptr) *Bus { dl:=new(Bus) dl.instance=inst dl.ptr= unsafe.Pointer(dl.instance) dl.BusSignalConnect=NewBusSignalConnect(dl.instance) setFinalizer(dl, (*Bus).Free) return dl }
package config var Config = struct { Fields []string Services []string PrintColor PrintColor PrintFormat PrintFormat }{ // 日志打印字段 Fields: []string{ "time", "file", "msg", }, // 预加载的服务名 Services: []string{ "service-etrip-approval", "service-etrip-bill", "service-finance-bill", "service-fin-mgr", "service-etrip-app-gateway", "service-glp-gateway", "service-mybank-gateway", "service-wacai-gateway", "service-finance-timing-task", "service-finance-kafka-task", "service-freight-task", "service-freight-approval", "service-finance-supplement-task", }, // 打印颜色配置 PrintColor: PrintColor{ FrontColor: 40, BackgroundColor: 1, }, PrintFormat: PrintFormat{ TimeFormat: 32, FileFormat: 35, MsgFormat: 33, ErrWarning: 31, }, } /* // 前景 背景 颜色 // --------------------------------------- // 30 40 黑色 // 31 41 红色 // 32 42 绿色 // 33 43 黄色 // 34 44 蓝色 // 35 45 紫红色 // 36 46 青蓝色 // 37 47 白色 // // 代码 意义 // ------------------------- // 0 终端默认设置 // 1 高亮显示 // 4 使用下划线 // 5 闪烁 // 7 反白显示 // 8 不可见 */ type PrintColor struct { FrontColor int BackgroundColor int } type PrintFormat struct { TimeFormat int FileFormat int MsgFormat int ErrWarning int }
package main import( "fmt" "strconv" "sort" "sync" // "runtime" ) func SingleHash() string { var data string fmt.Scanln(&data) md5 := DataSignerMd5(data) //cannot be multiplexed wg := &sync.WaitGroup{} crc32Chan := make(chan string) wg.Add(1) go func(data string, result chan string) { result <- DataSignerCrc32(data) wg.Done() }(data, crc32Chan) crc32 := <- crc32Chan crc32md5Chan := make(chan string) wg.Add(1) go func(data string, result chan string) { result <- DataSignerCrc32(data) wg.Done() }(md5, crc32md5Chan) crc32md5 := <- crc32md5Chan wg.Wait() singHsh := crc32 + "~" + crc32md5 fmt.Println("md5:", md5) fmt.Println("crc32:", crc32) fmt.Println("crc32md5:", crc32md5) fmt.Println("singHsh:", singHsh) return singHsh } func MultiHash(th int, singHsh string) string { var data string data = strconv.Itoa(th) + singHsh crc32 := DataSignerCrc32(data) fmt.Println(th, " crc32:", crc32) return crc32 } func main() { step1 := SingleHash() step2 := make([]string, 0, 6) multiHshChan := make(chan string, 6) wg := &sync.WaitGroup{} for i := 0; i < 6; i++ { wg.Add(1) go func(i int, step1 string, multiHshChan chan string){ result := MultiHash(i, step1) multiHshChan <- result wg.Done() }(i, step1, multiHshChan) } wg.Wait() close(multiHshChan) for i := range multiHshChan { step2 = append(step2, i) } // fmt.Println("UNSORTED:", step2) sort.Slice(step2, func(i, j int) bool {return step2[i] < step2[j]}) // fmt.Println("SORTED", step2) var result string for i := range step2 { result += step2[i] } fmt.Println("RESULT", result) }
package main import "fmt" func main() { fmt.Println("Hello from development") }
/* Copyright 2019 The MayaData Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package generic import ( "testing" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/json" "openebs.io/metac/apis/metacontroller/v1alpha1" "openebs.io/metac/controller/generic" "openebs.io/metac/test/integration/framework" k8s "openebs.io/metac/third_party/kubernetes" ) // TestCleanUninstall will verify if GenericController can be // used to implement clean uninstall requirements. // // A clean uninstall implies when a workload specific Namespace // is removed from kubernetes cluster, the associated CRDs and CRs // should get removed from this cluster. This should work even in // the cases where CRs are set with finalizers and the corresponding // controllers i.e. pods are no longer available due to the deletion // of this workload namespace. func TestCleanUninstall(t *testing.T) { // namespace to setup GenericController ctlNSNamePrefix := "gctl-test" // name of the GenericController ctlName := "clean-uninstall-ctrl" // name of the target namespace which is watched by GenericController targetNamespaceName := "target-ns" // name of the target resource(s) that are created // and are expected to get deleted upon deletion // of target namespace targetResName := "my-target" f := framework.NewFixture(t) defer f.TearDown() // create namespace to setup GenericController resources ctlNS := f.CreateNamespaceGen(ctlNSNamePrefix) var err error // --------------------------------------------------- // Create the target namespace i.e. target under test // --------------------------------------------------- // // NOTE: // Targeted CustomResources will be set in this namespace targetNamespace, err := f.GetTypedClientset().CoreV1().Namespaces().Create( &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: targetNamespaceName, }, }, ) if err != nil { t.Fatal(err) } // setup some random CRDs, some of which are cluster scoped // while others are namespace scoped // define a cluster scoped CStorPoolClaim CRD & CR with finalizers cpcCRD, cpcClient, _ := f.SetupClusterCRDAndItsCR( "CStorPoolClaim", targetResName, framework.SetFinalizers([]string{"protect.abc.io", "protect.def.io"}), ) // define a namespace scoped CStorVolumeReplica CRD & CR with finalizers cvrCRD, cvrClient, _ := f.SetupNamespaceCRDAndItsCR( "CStorVolumeReplica", targetNamespace.GetName(), targetResName, framework.SetFinalizers([]string{"protect.xyz.io", "protect.ced.io"}), ) // ------------------------------------------------------------ // Define the "reconcile logic" for finalize i.e. delete event // ------------------------------------------------------------ // // NOTE: // This gets triggered upon deletion of target namespace // // NOTE: // This is a multi process reconciliation strategy: // Stage 1: remove finalizers from custom resources // Stage 2: delete custom resources that dont have finalizers // Stage 3: delete custom resource definition when there are no custom resources // // FUTURE: // One can report these stages via status of the watch object fHook := f.ServeWebhook(func(body []byte) ([]byte, error) { req := generic.SyncHookRequest{} if uerr := json.Unmarshal(body, &req); uerr != nil { return nil, uerr } // initialize the hook response resp := generic.SyncHookResponse{} // this check i.e. deletion timestamp is not required // if this hook is set exclusively as a finalize hook if req.Watch.GetDeletionTimestamp() != nil { var hasAtleastOneCustomResource bool for _, attGroup := range req.Attachments { for _, att := range attGroup { if att == nil { // ignore this attachment continue } // copy the attachment from req to a new instance //respAtt := att respAtt := &unstructured.Unstructured{} if att.GetKind() == "CustomResourceDefinition" { // keep the CRD attachment till all its corresponding // CRs get deleted respAtt.SetUnstructuredContent(att.UnstructuredContent()) resp.Attachments = append(resp.Attachments, respAtt) continue } else { hasAtleastOneCustomResource = true } if len(att.GetFinalizers()) == 0 { // this is a custom resource & does not have any finalizers // then let this be deleted i.e. don't add to response continue } // This is a custom resource with finalizers // Hence, re-build the attachment with empty finalizers respAtt.SetAPIVersion(att.GetAPIVersion()) respAtt.SetKind(att.GetKind()) respAtt.SetName(att.GetName()) respAtt.SetNamespace(att.GetNamespace()) // Setting finalizers to empty is a must to // let this custom resource get deleted respAtt.SetFinalizers([]string{}) resp.Attachments = append(resp.Attachments, respAtt) } } if !hasAtleastOneCustomResource { // If there are no custom resources in attachments then // it implies all these custom resources are deleted. We // can set the response attachments to nil. This will delete // the CRDs. resp.Attachments = nil } // keep executing this finalize hook till its request has attachments if req.Attachments.IsEmpty() { // since all attachments are deleted from cluster // indicate GenericController to mark completion // of finalize hook resp.Finalized = true } else { // if there are still attachments seen in the request // keep resyncing the watch resp.ResyncAfterSeconds = 2 } } t.Logf( "Finalize attachments count: Req %d: Resp %d", req.Attachments.Len(), len(resp.Attachments), ) return json.Marshal(resp) }) // --------------------------------------------------------- // Define & Apply a GenericController i.e. a Meta Controller // --------------------------------------------------------- // This is one of the meta controller that is defined as // a Kubernetes custom resource. It listens to the resource // specified in the watch field and acts against the resources // specified in the attachments field. f.CreateGenericController( ctlName, ctlNS.Name, // enable controller to delete any attachments generic.WithDeleteAny(k8s.BoolPtr(true)), // enable controller to update any attachments generic.WithUpdateAny(k8s.BoolPtr(true)), // set 'sync' as well as 'finalize' hooks //generic.WithWebhookSyncURL(&sHook.URL), generic.WithWebhookFinalizeURL(&fHook.URL), // We want Namespace as our watched resource generic.WithWatch( &v1alpha1.GenericControllerResource{ ResourceRule: v1alpha1.ResourceRule{ APIVersion: "v1", Resource: "namespaces", }, // We are interested only for our target namespace NameSelector: []string{targetNamespaceName}, }, ), // We want the CRs & CRDs as our attachments. // // This is done so as to implement clean uninstall when // above watch resource is deleted. A clean uninstall is // successful if these declared attachments get deleted // when watch i.e. our target namespace is deleted. generic.WithAttachments( []*v1alpha1.GenericControllerAttachment{ // We want all CPC custom resources as attachments &v1alpha1.GenericControllerAttachment{ GenericControllerResource: v1alpha1.GenericControllerResource{ ResourceRule: v1alpha1.ResourceRule{ APIVersion: cpcCRD.Spec.Group + "/" + cpcCRD.Spec.Versions[0].Name, Resource: cpcCRD.Spec.Names.Plural, }, }, UpdateStrategy: &v1alpha1.GenericControllerAttachmentUpdateStrategy{ Method: v1alpha1.ChildUpdateInPlace, }, }, // We want all CVR custom resources as attachments &v1alpha1.GenericControllerAttachment{ GenericControllerResource: v1alpha1.GenericControllerResource{ ResourceRule: v1alpha1.ResourceRule{ APIVersion: cvrCRD.Spec.Group + "/" + cvrCRD.Spec.Versions[0].Name, Resource: cvrCRD.Spec.Names.Plural, }, }, UpdateStrategy: &v1alpha1.GenericControllerAttachmentUpdateStrategy{ Method: v1alpha1.ChildUpdateInPlace, }, }, // We want CRDs to be included as attachments && // We want only our CRDs i.e. CStorPoolClaim & CStorVolumeReplica &v1alpha1.GenericControllerAttachment{ GenericControllerResource: v1alpha1.GenericControllerResource{ ResourceRule: v1alpha1.ResourceRule{ APIVersion: "apiextensions.k8s.io/v1beta1", Resource: "customresourcedefinitions", }, NameSelector: []string{ cpcCRD.GetName(), cvrCRD.GetName(), }, }, UpdateStrategy: &v1alpha1.GenericControllerAttachmentUpdateStrategy{ Method: v1alpha1.ChildUpdateInPlace, }, }, }, ), ) // ------------------------------------------------------- // Wait for the setup to behave similar to production env // ------------------------------------------------------- // // Wait till target namespace is assigned with a finalizer // by GenericController. GenericController automatically // updates the watch with its own finalizer if it finds a // finalize hook in its specifications. err = f.Wait(func() (bool, error) { targetNamespace, err = f.GetTypedClientset().CoreV1().Namespaces().Get(targetNamespaceName, metav1.GetOptions{}) if err != nil { return false, err } for _, finalizer := range targetNamespace.GetFinalizers() { if finalizer == "protect.gctl.metac.openebs.io/"+ctlNS.GetName()+"-"+ctlName { return true, nil } } return false, errors.Errorf("Namespace %s is not set with gctl finalizer", targetNamespaceName) }) if err != nil { // we wait till timeout & panic if condition is not met t.Fatal(err) } // Since setup is ready // // ------------------------------------------------------ // Trigger the test by deleting the target namespace // ------------------------------------------------------ err = f.GetTypedClientset().CoreV1().Namespaces(). Delete(targetNamespace.GetName(), &metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } // Need to wait & see if our controller works as expected // Make sure the specified attachments are deleted t.Logf("Waiting for deletion of CRs & CRDs") err = f.Wait(func() (bool, error) { var errs []error // ------------------------------------------- // verify if our custom resources are deleted // ------------------------------------------- cpc, cpcGetErr := cpcClient.Get(targetResName, metav1.GetOptions{}) if cpcGetErr != nil && !apierrors.IsNotFound(cpcGetErr) { errs = append( errs, errors.Wrapf(cpcGetErr, "Get CPC %s failed", targetResName), ) } if cpc != nil { errs = append(errs, errors.Errorf("CPC %s is not deleted", targetResName)) } cvr, cvrGetErr := cvrClient.Namespace(targetNamespaceName).Get(targetResName, metav1.GetOptions{}) if cvrGetErr != nil && !apierrors.IsNotFound(cvrGetErr) { errs = append( errs, errors.Wrapf(cvrGetErr, "Get CVR %s failed", targetResName), ) } if cvr != nil { errs = append(errs, errors.Errorf("CVR %s is not deleted", targetResName)) } // ------------------------------------------ // verify if our target namespace is deleted // ------------------------------------------ targetNSAgain, targetNSGetErr := f.GetTypedClientset().CoreV1().Namespaces(). Get(targetNamespace.GetName(), metav1.GetOptions{}) if targetNSGetErr != nil && !apierrors.IsNotFound(targetNSGetErr) { errs = append(errs, targetNSGetErr) } if targetNSAgain != nil && len(targetNSAgain.GetFinalizers()) != 0 { errs = append( errs, errors.Errorf( "Namespace %s has finalizers", targetNSAgain.GetName(), ), ) } if targetNSAgain != nil && targetNSAgain.GetDeletionTimestamp() == nil { errs = append( errs, errors.Errorf( "Namespace %s is not marked for deletion", targetNSAgain.GetName(), ), ) } // condition did not pass in case of any errors if len(errs) != 0 { return false, utilerrors.NewAggregate(errs) } // condition passed return true, nil }) if err != nil { t.Fatalf("CRs & CRDs deletion failed: %v", err) } t.Logf("CRs & CRDs were finalized / deleted successfully") }
package main import ( "fmt" "log" "reflect" "strconv" ) var input string = "input.txt" type Instruction struct { command string parameter int visited bool } func NewInstruction(instruction string, parameter string) *Instruction { n, e := strconv.Atoi(parameter) if e != nil { log.Printf("Error in code line, ignored. '%s' '%s' ", instruction, parameter) return nil } return &Instruction{instruction, n, false} } func ParseInstruction(line string) *Instruction { if len(line) < 6 { log.Printf("Misformed code line ignored. '%s'", line) return nil } return NewInstruction(line[:3], line[4:]) } func (l *Instruction) String() string { sign := "" if l.parameter >= 0 { sign = "+" } return fmt.Sprintf("'%s' '%s%d'", l.command, sign, l.parameter) } type VonNeumannMachine struct { akkumulator int program []*Instruction cursor int // commands map[string]Command } func NewVonNeumannMachine(program []*Instruction) *VonNeumannMachine { return &VonNeumannMachine{0, program, 0} } func (m *VonNeumannMachine) String() string { p := "" for _, l := range m.program { p = fmt.Sprintf("%s\t%s\n", p, l.String()) } return fmt.Sprintf("Akkumulator: %d\nCursor:%d\nProgram:\n%s", m.akkumulator, m.cursor, p) } func (m *VonNeumannMachine) Reset() { for _, ins := range m.program { ins.visited = false } m.cursor = 0 m.akkumulator = 0 } func (m *VonNeumannMachine) checkUnFinished() bool { if m.cursor < len(m.program) { return true } else { return false } } func (m *VonNeumannMachine) checkInfiniteLoop() bool { if m.program[m.cursor].visited { return true } else { return false } } func (m *VonNeumannMachine) callCommand(lit string, param ...int) { ifa := reflect.ValueOf(m) method := ifa.MethodByName(lit) if method.IsValid() { var values []reflect.Value for _, p := range param { values = append(values, reflect.ValueOf(p)) } method.Call(values) } else { log.Printf("Error: found unkown command: %s", lit) } } // func (m *VonNeumannMachine) findCommand1(lit string) (Command, error) { // if c, ok := m.commands[lit]; ok { // return c, nil // } else { // return nil, fmt.Errorf("Warning: unknown command, ignored. '%s'", lit) // } // } func (m *VonNeumannMachine) mainLoop() (int, error) { var err error m.Reset() for m.checkUnFinished() { if m.checkInfiniteLoop() { err = fmt.Errorf("Infinite Loop, cursor at %d", m.cursor) break } m.callCommand(m.program[m.cursor].command, m.program[m.cursor].parameter) // if f, err := m.findCommand(m.program[m.cursor].command); err == nil { // f(m, m.program[m.cursor].parameter) // } else { // log.Printf("Warning: unknown command, ignored. '%s'", m.program[m.cursor]) // m.cursor++ // } } return m.akkumulator, err } func (m *VonNeumannMachine) mutate(linenumber int) { if m.program[linenumber].command == "Jmp" { m.program[linenumber].command = "Nop" } else if m.program[linenumber].command == "Nop" { m.program[linenumber].command = "Jmp" } } func (m *VonNeumannMachine) bruteForceMutator() (int, error) { for i, v := range m.program { backup := v.command m.mutate(i) r, e := m.mainLoop() if e == nil { //res = r return r, nil } m.program[i].command = backup } return 0, fmt.Errorf("Error: Did not find a fix this program.") } func main() { m := NewVonNeumannMachine(readProgram(input)) if r, e := m.mainLoop(); e != nil { fmt.Printf("Found Infiniteloop, last value before was: %5d\n", r) } if f, e := m.bruteForceMutator(); e == nil { fmt.Printf("Found the fix. Last value of akkumulator is: %5d\n", f) } else { fmt.Println(e) } }
package main import "fmt" func main() { fmt.Println("Score to number grades") n := 0 fmt.Printf("Enter score > ") fmt.Scanf("%d", &n) if n >= 90 { fmt.Println("A+") } else if n >= 80 { fmt.Println("A") } else if n >= 70 { fmt.Println("B") } else if n >= 60 { fmt.Println("C") } else if n >= 50 { fmt.Println("D") } else { fmt.Println("F") } }
// Copyright 2020 MongoDB Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package owner import ( "github.com/AlecAivazis/survey/v2" "github.com/mongodb/mongocli/internal/config" "github.com/mongodb/mongocli/internal/description" "github.com/mongodb/mongocli/internal/flag" "github.com/mongodb/mongocli/internal/json" "github.com/mongodb/mongocli/internal/store" "github.com/mongodb/mongocli/internal/usage" "github.com/spf13/cobra" "go.mongodb.org/ops-manager/opsmngr" ) type CreateOpts struct { email string password string firstName string lastName string whitelistIps []string store store.OwnerCreator } func (opts *CreateOpts) init() error { var err error opts.store, err = store.NewUnauthenticated(config.Default()) return err } func (opts *CreateOpts) Run() error { user := opts.newOwner() result, err := opts.store.CreateOwner(user, opts.whitelistIps) if err != nil { return err } return json.PrettyPrint(result) } func (opts *CreateOpts) newOwner() *opsmngr.User { user := &opsmngr.User{ Username: opts.email, Password: opts.password, FirstName: opts.firstName, LastName: opts.lastName, EmailAddress: opts.email, Links: nil, } return user } func (opts *CreateOpts) Prompt() error { if opts.password != "" { return nil } prompt := &survey.Password{ Message: "Password:", } return survey.AskOne(prompt, &opts.password) } // mongocli ops-manager owner create --email username --password password --firstName firstName --lastName lastName --whitelistIps whitelistIp func CreateBuilder() *cobra.Command { opts := new(CreateOpts) cmd := &cobra.Command{ Use: "create", Short: description.CreateOwner, Args: cobra.OnlyValidArgs, PreRunE: func(cmd *cobra.Command, args []string) error { if err := opts.init(); err != nil { return err } return opts.Prompt() }, RunE: func(cmd *cobra.Command, args []string) error { return opts.Run() }, } cmd.Flags().StringVar(&opts.email, flag.Email, "", usage.Email) cmd.Flags().StringVarP(&opts.password, flag.Password, flag.PasswordShort, "", usage.Password) cmd.Flags().StringVar(&opts.firstName, flag.FirstName, "", usage.FirstName) cmd.Flags().StringVar(&opts.lastName, flag.LastName, "", usage.LastName) cmd.Flags().StringSliceVar(&opts.whitelistIps, flag.WhitelistIP, []string{}, usage.WhitelistIps) _ = cmd.MarkFlagRequired(flag.Username) _ = cmd.MarkFlagRequired(flag.FirstName) _ = cmd.MarkFlagRequired(flag.LastName) return cmd }
package bertymessenger import ( "context" "net" "google.golang.org/grpc/test/bufconn" ) func mkBufDialer(l *bufconn.Listener) func(context.Context, string) (net.Conn, error) { return func(context.Context, string) (net.Conn, error) { return l.Dial() } }
package problems import ( "fmt" "reflect" "testing" "github.com/stretchr/testify/require" ) func Test_sortedSquares(t *testing.T) { type args struct { A []int } tests := []struct { name string args args want []int }{ { name: "worst 1", args: args{ A: []int{-2, -2, -2, -2, 3}, }, want: []int{4, 4, 4, 4, 9}, }, { name: "worst 2", args: args{ A: []int{-5, -3, -1, 2, 4, 6}, }, want: []int{1, 4, 9, 16, 25, 36}, }, { name: "example 1", args: args{ A: []int{-4, -1, 0, 3, 10}, }, want: []int{0, 1, 9, 16, 100}, }, { name: "example 2", args: args{ A: []int{-7, -3, 2, 3, 11}, }, want: []int{4, 9, 9, 49, 121}, }, { name: "test 1", args: args{ A: []int{1, 2, 3}, }, want: []int{1, 4, 9}, }, { name: "test 2", args: args{ A: []int{-3, -2, -1}, }, want: []int{1, 4, 9}, }, { name: "test 3", args: args{ A: []int{3}, }, want: []int{9}, }, { name: "test 4", args: args{ A: []int{-3}, }, want: []int{9}, }, { name: "test 5", args: args{ A: []int{-1, 0}, }, want: []int{0, 1}, }, } for idx, f := range []func([]int) []int{ sortedSquares, sortedSquares1, sortedSquares2, sortedSquares3, sortedSquares4, sortedSquares5, } { t.Run(fmt.Sprintf("func#%d", idx), func(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var A = make([]int, len(tt.args.A)) copy(A, tt.args.A) if got := f(A); !reflect.DeepEqual(got, tt.want) { t.Errorf("sortedSquares() = %v, want %v", got, tt.want) } }) } }) } } func Test_reverse(t *testing.T) { type args struct { arr []int } tests := []struct { name string args args want []int }{ { name: "test 1", args: args{ arr: []int{1, 2, 3, 4, 5}, }, want: []int{5, 4, 3, 2, 1}, }, { name: "test 2", args: args{ arr: []int{2, 3, 4, 5}, }, want: []int{5, 4, 3, 2}, }, { name: "empty", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert := require.New(t) var arr []int if tt.args.arr != nil { arr = make([]int, len(tt.args.arr)) copy(arr, tt.args.arr) } reverse(arr) assert.Equal(tt.want, arr) }) } } func Test_rotate(t *testing.T) { type args struct { arr []int mid int } tests := []struct { name string args args want []int }{ { name: "test 1", args: args{ arr: []int{1, 2, 3, 4, 5, 6}, mid: 3, }, want: []int{4, 5, 6, 1, 2, 3}, }, { name: "test 2", args: args{ arr: []int{1, 2, 3, 4, 5}, mid: 4, }, want: []int{5, 1, 2, 3, 4}, }, { name: "test 3", args: args{ arr: []int{1, 2, 3, 4, 5}, mid: 0, }, want: []int{1, 2, 3, 4, 5}, }, { name: "test 4", args: args{ arr: []int{1, 2, 3, 4, 5}, mid: 5, }, want: []int{1, 2, 3, 4, 5}, }, { name: "empty", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert := require.New(t) var arr []int if tt.args.arr != nil { arr = make([]int, len(tt.args.arr)) copy(arr, tt.args.arr) } rotate(arr, tt.args.mid) assert.Equal(tt.want, arr) }) } }
package line_login const ( baseUrl = "https://legy-jp-addr.line.naver.jp" registerUrl = baseUrl + "/api/v4/TalkService.do" authRegisterUrl = baseUrl + "/api/v4p/rs" verifyUrl = baseUrl + "/LF1" userAgent = "Line/10.17.0" lineApp = "IOSIPAD\t10.17.0\tiPad OS\t14.1" )
package fsync import ( "errors" "fmt" "net" "os" ) //var getRootDir = func() string { // d, _ := os.Getwd() // return d + "/tmp/" //} type Handler struct { Rw ReadWrite } func (h *Handler) setMsgType(msgType int64) { h.Rw.write64(msgType) } func (h *Handler) setReq(req interface{}) { h.Rw.writeReq(req) } func (h *Handler) getMsgType() int64 { return h.Rw.read64() } func (h *Handler) setResponse(code int, msg string) { res := Response{Code: code, Msg: msg} h.Rw.writeReq(res) } func (h *Handler) getResponse(res interface{}) { h.Rw.readReq(res) } func (h *Handler) getReq(req interface{}) { h.Rw.readReq(req) } func (h *Handler) Upload() { var req UploadReq h.getReq(&req) MyCreateDir(req.FName) h.Rw.recvf(req.FName) msg := (fmt.Sprintf("server recv file %v success", req.FName)) h.setResponse(0, msg) } func (h *Handler) Download() { var req DownloadReq h.getReq(&req) fmt.Println("down file: ", req.FName) _, err := os.Stat(req.FName) if os.IsNotExist(err) { h.setResponse(-1, "file not exists") } else { h.setResponse(0, "ready to send file") h.Rw.sendf(req.FName) } } func (h *Handler) Setup() { } func (h *Handler) Process() { defer h.Finish() MsgType := h.getMsgType() switch MsgType { case MsgUploadFile: h.Upload() case MsgDownloadFile: h.Download() default: panic(errors.New("unknow msg type")) } } func (h *Handler) Finish() { _ = h.Rw.Conn.Close() } type Server struct { } func (s *Server) run() { lis, err := net.Listen("tcp", ":8887") if err != nil { panic(err) } for { conn, err := lis.Accept() if err != nil { fmt.Println(fmt.Sprintf("accept error %v", err)) continue } h := Handler{Rw: ReadWrite{Conn: conn}} h.Process() } }
package ecode // All common ecode var ( OK = add(0) // 正确 NotModified = add(-304) // 木有改动 TemporaryRedirect = add(-307) // 撞车跳转 RequestErr = add(-400) // 请求错误 Unauthorized = add(-401) // 未认证 AccessDenied = add(-403) // 访问权限不足 NothingFound = add(-404) // 啥都木有 MethodNotAllowed = add(-405) // 不支持该方法 Conflict = add(-409) // 冲突 Canceled = add(-498) // 客户端取消请求 ServerErr = add(-500) // 服务器错误 ServiceUnavailable = add(-503) // 过载保护,服务暂不可用 Deadline = add(-504) // 服务调用超时 LimitExceed = add(-509) // 超出限制 RequestParamErr = add(4001) // 请求参数错误 //SignParamErr = add(4011) //签名参数错误 //SignTimestampExpire = add(4012) //签名过期 //SignNonceDuplicate = add(4013) //签名nonce重复 //SignCheckErr = add(4014) //签名验证失败 )
/* Links * http://daqutoxevy.github.com/1.html * http://daqutoxevy.github.com/2.html * http://daqutoxevy.github.com/3.html * http://daqutoxevy.github.com/4.html * http://daqutoxevy.github.com/5.html * http://daqutoxevy.github.com/6.html * http://daqutoxevy.github.com/7.html * http://daqutoxevy.github.com/8.html * http://daqutoxevy.github.com/9.html * http://daqutoxevy.github.com/10.html */ package abc
// We add file / line to the log. package logging import ( "flag" "fmt" "log" "path" "runtime" ) var vFlag = flag.Int("v", 1, "") func getFileLinePrefix() string { if _, file, line, ok := runtime.Caller(2); ok { return fmt.Sprintf("[%s:%d] ", path.Base(file), line) } return "" } func Fatal(v ...interface{}) { log.Fatal(getFileLinePrefix(), fmt.Sprint(v...)) } func Fatalf(format string, v ...interface{}) { log.Fatal(getFileLinePrefix(), fmt.Sprintf(format, v...)) } func Fatalln(v ...interface{}) { log.Fatalln(getFileLinePrefix(), fmt.Sprint(v...)) } func Panic(v ...interface{}) { log.Panic(getFileLinePrefix(), fmt.Sprint(v...)) } func Panicf(format string, v ...interface{}) { log.Panic(getFileLinePrefix(), fmt.Sprintf(format, v...)) } func Panicln(v ...interface{}) { log.Panicln(getFileLinePrefix(), fmt.Sprint(v...)) } func Print(v ...interface{}) { log.Print(getFileLinePrefix(), fmt.Sprint(v...)) } func Printf(format string, v ...interface{}) { log.Print(getFileLinePrefix(), fmt.Sprintf(format, v...)) } func Println(v ...interface{}) { log.Println(getFileLinePrefix(), fmt.Sprint(v...)) } func GetVerboseLevel() int { return *vFlag } func SetVerboseLevel(level int) { *vFlag = level } func getVlogPrefix(level int) string { if level < 0 { return "\033[0;31m" } else if level == 0 { return "\033[0;33m" } return "" } func getVlogSuffix(level int) string { if level <= 0 { return "\033[0m" } return "" } func Vlog(level int, v ...interface{}) { if level <= *vFlag { log.Print(getFileLinePrefix(), getVlogPrefix(level), fmt.Sprint(v...), getVlogSuffix(level)) } } func Vlogf(level int, format string, v ...interface{}) { if level <= *vFlag { log.Print(getFileLinePrefix(), getVlogPrefix(level), fmt.Sprintf(format, v...), getVlogSuffix(level)) } }
package xmpp import ( "encoding/xml" ) const ( NodeAdHocCommand = "http://jabber.org/protocol/commands" ActionAdHocExecute = "execute" ActionAdHocNext = "next" ActionAdHocCancel = "cancel" StatusAdHocExecute = "executing" StatusAdHocCompleted = "completed" StatusAdHocCanceled = "canceled" TypeAdHocForm = "form" TypeAdHocResult = "result" TypeAdHocSubmit = "submit" TypeAdHocListSingle = "list-single" TypeAdHocListMulti = "list-multi" TypeAdHocNoteInfo = "info" TypeAdHocNoteWarning = "warn" TypeAdHocNoteError = "error" TypeAdHocFieldListMulti = "list-multi" TypeAdHocFieldListSingle = "list-single" TypeAdHocFieldTextSingle = "text-single" TypeAdHocFieldJidSingle = "jid-single" TypeAdHocFieldTextPrivate = "text-private" ) type AdHocCommand struct { XMLName xml.Name `xml:"http://jabber.org/protocol/commands command"` Node string `xml:"node,attr"` Action string `xml:"action,attr"` SessionID string `xml:"sessionid,attr"` Status string `xml:"status,attr"` XForm AdHocXForm `xml:"x"` Note AdHocNote `xml:"note,omitempty"` } type AdHocXForm struct { XMLName xml.Name `xml:"jabber:x:data x"` Type string `xml:"type,attr"` Title string `xml:"title"` Instructions string `xml:"instructions"` Fields []AdHocField `xml:"field"` } type AdHocField struct { Var string `xml:"var,attr"` Label string `xml:"label,attr"` Type string `xml:"type,attr"` Options []AdHocFieldOption `xml:"option"` Value string `xml:"value,omitempty"` } type AdHocFieldOption struct { Value string `xml:"value"` } type AdHocNote struct { Type string `xml:"type,attr"` Value string `xml:",innerxml"` }
// Copyright 2020. Akamai Technologies, Inc // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd //root const ( rootShortDescription = "The Diagnostic Tools CLI allows you to diagnose server, ESI, DNS - rchenna, and network issues Akamai customers experience when delivering content to their end users." rootLongDescription = `The Diagnostic Tools CLI allows you to diagnose server, ESI, DNS - rchenna-NEW, and network issues Akamai customers experience when delivering content to their end users. Run the process in background, linux : redirect the output to a textfile and use ampersand & command > textfile & windows : use the START command with /B and redirect the output to a textfile START /B command > textfile` edgercPathFlagDescription = "Location of the edgegrid credentials file." edgercSectionFlagDescription = "Section name in the credentials file." forceColorFlagDescription = "Force color to non-tty output." jsonFlagDescription = "Get JSON output." rootUse = "diagnostics" ) //ghost_location const ( ghostLocationShortDescription = "Lists active Akamai edge server locations from which you can run diagnostic tools." ghostLocationLongDescription = `Lists active Akamai edge server locations from which you can run diagnostic tools.` searchFlagDescription = "The location to filter the list." ghostLocationUse = "ghost-locations --search location" ghostLocation = "Ghost Locations" ) //verify_ip const ( verifyIpShortDescription = "Verifies whether the specified IP address is part of the Akamai edge network." verifyIpLongDescription = `Verifies whether the specified IP address is part of the Akamai edge network.` verifyIpUse = "verify-ip IP_address" ipAddress = "IP address " isCdnIpSuccess = "is an Akamai IP" isCdnIpFailure = "is not an Akamai IP" ) //locate_ip const ( locateIpShortDescription = "Provides the geographic and network location of an IP address within the Akamai network." locateIpLongDescription = `Provides the geographic and network location of an IP address within the Akamai network.` locateIpUse = "locate-ip IP_address" geographicLocation = "Geographic Location" clientIp = "Client IP" countryCode = "Country code" regionCode = "Region code" city = "City" dma = "DMA" msa = "MSA" pmsa = "PMSA" areaCode = "Area code" latitude = "Latitude" longitude = "Longitude" country = "County" continent = "Continent" fisp = "FIPS" timeZone = "Time zone" zipCode = "Zip code" proxy = "Proxy" networkLocation = "Network Location" network = "Network" networkType = "Network type" asNum = "ASN" throughput = "Throughput" ) //translate_url const ( translateUrlShortDescription = "Provides high-level information about an Akamai-optimized URL (ARL), such as its time to live, origin server, and associated CP code." translateUrlLongDescription = `Provides high-level information about an Akamai-optimized URL (ARL), such as its time to live, origin server, and associated CP code.` translateUrlUse = "translate-url URL" translateUrl = "Translate URL" typeCode = "Type code" originServer = "Origin server" cpCode = "CP code" serialNumber = "Serial number" ttl = "TTL" ) //dig const ( digShortDescription = "Runs DIG on a hostname or a domain name to return DNS details for the location of an Akamai edge server and the hostname or the domain name. You can use it to diagnose issues with the DNS resolution." digLongDescription = `Runs the DIG command on a hostname or a domain name to return DNS details for the location of an Akamai edge server and the hostname or the domain name. You can use it to diagnose issues with the DNS resolution.` typeDigFlagDescription = "The type of the DNS record; possible values are: A, AAAA, CNAME, MX, NS, PTR, or SOA." digUse = "dig hostname source_server_location/edge_server_IP --type query_type" ) //mtr const ( mtrShortDescription = "Runs MTR to check connectivity between an Akamai edge server and a remote host or destination. You can use it to diagnose network delays issues." mtrLongDescription = `Runs the MTR command to provide information about the route, number of hops, and time that Internet traffic packets take between the Akamai edge server and a remote host or destination. You can use it to diagnose network delays issues.` resolveHostMtrFlagDescription = "Whether to use DNS to resolve hostnames. When disabled, the output features only IP addresses." mtrUse = "mtr domain_name/destination_IP source_server_location/edge_server_IP --resolve-hostname" networkConnectivity = "Network Connectivity Test from" to = "to" ) //curl const ( curlShortDescription = "Runs CURL to provide a raw HTML for a URL within the Akamai network. You can use it to gather information about the HTTP response." curlLongDescription = `Runs the CURL command to provide a raw HTML for a URL within the Akamai network. You can use it to gather information about the HTTP response.` userAgentFlagDescription = "The user agent; possible values are: android, firefox, iphone, mobile, chrome, msie, msie9, msie10, safari, safari/5, safari/6, webkit, webkit/5, webkit/6." curlUse = "curl URL source_server_location/edge_server_IP --user-agent additional-user-agent" responseHeader = "Response Headers" responseBody = "Response Body" ) //translate_error const ( translateErrorShortDescription = "Provides information about an error string from the reference number produced by Akamai edge servers when a request to retrieve content fails." translateErrorLongDescription = `Provides a summary and logs for the error that occurred in the original request using the error string from the reference number.` translateErrorUse = "translate-error-string error_string" summary = "Summary" urlTranslateError = "URL" httpResponseCode = "HTTP response code" dateAndTime = "Date and time" epocTime = "Epoch time" clientIpTranslateError = "Client IP" connectingIp = "Connecting IP" originHostName = "Origin hostname" originIp = "Origin IP" userAgent = "User agent" clientRequest = "Client request" reasonForFailure = "Reason for failure" wafDetails = "WAF details" errorLogs = "Error Logs" ) //grep const ( grepShortDescription = "Runs GREP to retrieve and parse logs for an IP address within the Akamai network using flags to filter the data. Data is available for 48 hours after the traffic occurs." grepLongDescription = `Runs the GREP command to retrieve and parse logs for an IP address within the Akamai network using flags to filter the data. Logs provide low-level details on how each request was handled, which you can use to troubleshoot caching and performance issues and to ensure the correct set of Akamai features was applied to the traffic. Data is available for 48 hours after the traffic occurs.` grepUse = "grep edge_server_IP --end-date date --end-time time --duration duration --find-in Header:Value --max-lines maximum_log_lines_to_display -r | -f | -rf" endDateFlagDescription = "The end date of log search, in the <yyyy:mm:dd> format." endTimeFlagDescription = "The end time of log search, in the <hh:mm:ss> (UTC) format." durationFlagDescription = "The number of minutes before the `end-date` and `end-time` for which to retrieve logs." maxLinesFlagDescription = "The maximum log lines to display." clientRequestFlagDescription = "Search logs of incoming client requests to the Akamai edge server." forwardRequestFlagDescription = "Search logs of forwarded requests from the Akamai edge server." findInFlagDescription = "Where to search, specified as <field>:<value>. Possible `field` values are: `host-header`, `user-agent`, `http-status-code`, `arl`, `cp-code`, and `client-ip`." ) //estats const ( estatsShortDescription = "Provides error statistics on a CP code’s traffic from clients to Akamai edge servers and from Akamai edge servers to origin." estatsLongDescription = `Provides statistics on errors happening in the delivery of websites based on real-time data of CP code's traffic from clients to Akamai edge servers and from Akamai edge servers to origin.` estatsUse = "estats URL/CP_code" summaryEstats = "Summary" edgeStatistics = "Edge Statistics" percentageFailurEdgeStatistics = "(Percent failure: %.1f%c)" edgeStatisticsDescription = "Edge status code distribution" statusCodeEdgeStatistics = "Status code" percentageHitEdgeStatistics = "% Hits" originalStatistics = "Origin Statistics" percentageFailurOriginalStatistics = "(Percent failure: %.1f%c)" originalStatisticsDescription = "Origin status code distribution" statusCodeOriginalStatistics = "Status code" percentageHitsOriginalStatistics = "% Hits" edgeErrors = "Edge Errors" edgeErrorsDescription = "View last 10 edge errors" edgeIpEdgeErrors = "Edge IP" regionEdgeErrors = "Region" httpStatusEdgeErrors = "HTTP status" hitsEdgeErrors = "Hits" objectStatusEdgeErrors = "Object status" errorCodeEdgeErrors = "Error code" originErrors = "Origin Errors" originErrorsDescription = "View last 10 origin errors" edgeIpOriginErrors = "Edge IP" regionOriginErrors = "Region" httpStatusOriginErrors = "HTTP status" hitsOriginErrors = "Hits" objectStatusOriginErrors = "Object status" errorCodeOriginErrors = "Error code" ) //debug_url const ( debugUrlShortDescription = "Provides DNS information, HTTP response, response headers, and logs for a URL on Akamai edge servers." debugUrlLongDescription = `Provides DNS information, HTTP response, response headers, and logs for a URL on Akamai edge servers.` headerFlagDescription = "Any additional headers to add to the request, in the <header>:<value> format." edgeIpFlagDescription = "The Akamai edge server IP address to test the URL against, otherwise a random server by default." debugUrlUse = "debug-url URL --edge-ip edge_server_IP --header request_header" dnsInformation = "DNS information" httpResponse = "HTTP response" responseHeaderDebugUrl = "Response header" logs = "Logs" ) //user_diagnostics const ( userDiagnosticsShortDescription = "Use this tool to create a sharable link and send it to the end users to collect diagnostic data. You can view, filter, and export the results." userDiagnosticsLongDescription = `Use this tool to create a sharable link and send it to the end users to collect diagnostic data. You can view, filter, and export the results.` userDiagnosticsUse = "user-diagnostics" ) //user_diagnostics_list const ( userDiagnosticsListShortDescription = "Lists all groups created to gather diagnostic data of end users of hostnames experiencing issues together with the generated links and number of collected data." userDiagnosticsListLongDescription = `Lists all groups created to gather diagnostic data of end users of hostnames experiencing issues together with the generated links and number of collected data.` userDiagnosticsListUse = "list" userDiagnosticsListNote = "Note: Each link is active for 7 days and has a limit of 50 submissions." linkId = "Link ID" statusUserDiagnosticsLink = "Status" generatedOn = "Generated on (UTC)" groupName = "Group name" hostNameOrUrl = "Hostname/URL" caseId = "Case ID" userSharableLink = "User sharable link" records = "Records" ) //user_diagnostics_create_group const ( userDiagnosticsCreateGroupShortDescription = "Creates a group for a hostname you want to gather diagnostic data for. It also generates a diagnostic link that you can send to end users of the group’s hostname or URL. When end users click the link, the tool gathers necessary diagnostic data to submit." userDiagnosticsCreateGroupLongDescription = `Creates a group for a hostname you want to gather diagnostic data for. It also generates a diagnostic link that you can send to end users of the group’s hostname or URL. When end users click the link, the tool gathers necessary diagnostic data to submit.` userDiagnosticsCreateGroupUse = "create-group group_name hostname" linkText1 = "Here is your link!" linkText2 = "Copy and send the link below to the end users who are experiencing the content delivery problem.\nEach link is active for 7 days and has a limit of 50 submissions." linkText3 = "Link: " ) //user_diagnostics_get const ( userDiagnosticsGetShortDescription = "Lists end users' diagnostic data submitted using a diagnostic link." userDiagnosticsGetLongDescription = `Lists end users' diagnostic data submitted using a diagnostic link.` userDiagnosticsGetUse = "get link_id" viewUserDiagnosticsData = "View User Diagnostic Data" generatedOnUserDiagnosticGet = "Generated on" groupNameUserDiagnosticGet = "Group name" hostNameOrUrlUserDiagnosticGet = "Hostname/URL" userSharableLinkUserDiagnosticGet = "User sharable link" linkStatus = "Link status" uid = "UID" timestamp = "Timestamp (UTC)" clientIpPreferred = "Client IP preferred" clientDnsIpv4 = "Client DNS IPv4" clientDnsIpv6 = "Client DNS IPv6" userAgentUserDiagnosticGet = "User agent" cookie = "Cookie" protocol = "Protocol" connectedCipher = "Connected cipher" clientIpv4 = "Client IPv4" clientIpv6 = "Client IPv6" edgeIps = "Edge IPs" curlDescription = " - Request content from Akamai edge server, run: " digDescription = " - Get domain details from an Akamai edge server, run: " edgeIpMessage = "Edge IP is not shown either because of a system error or the client DNS has an ECS (EDNS Client Subnet)." ) //generic constants const ( clientTypeKey = "clientType" clientTypeValue = "cli" )
package nats import ( "os" ) // NATS var ( NATSURL = os.Getenv("NATS_SERVER_URL") DEFAULTMAXMESSAGES = os.Getenv("NATS_DEFAULT_MAX_MESSAGES") )
package auth import ( "strconv" "testing" "github.com/uhppoted/uhppoted-lib/kvs" ) func TestValidateHOTPWithValidOTP(t *testing.T) { hotp := HOTP{ increment: 8, secrets: kvs.NewKeyValueStore("test:secrets", func(v string) (interface{}, error) { return v, nil }), counters: struct { *kvs.KeyValueStore filepath string }{ kvs.NewKeyValueStore("test:counters", func(v string) (interface{}, error) { return strconv.ParseUint(v, 10, 64) }), "", }, } hotp.secrets.Put("qwerty", "DFIOJ3BJPHPCRJBT") if err := hotp.Validate("qwerty", "644039"); err != nil { t.Errorf("HOTP refused valid OTP") } if err := hotp.Validate("qwerty", "586787"); err != nil { t.Errorf("HOTP refused valid OTP") } } func TestValidateHOTPWithOutOfOrderOTP(t *testing.T) { hotp := HOTP{ increment: 8, secrets: kvs.NewKeyValueStore("test:secrets", func(v string) (interface{}, error) { return v, nil }), counters: struct { *kvs.KeyValueStore filepath string }{ kvs.NewKeyValueStore("test:counters", func(v string) (interface{}, error) { return strconv.ParseUint(v, 10, 64) }), "", }, } hotp.secrets.Put("qwerty", "DFIOJ3BJPHPCRJBT") if err := hotp.Validate("qwerty", "586787"); err != nil { t.Errorf("HOTP refused valid OTP") } if err := hotp.Validate("qwerty", "644039"); err == nil { t.Errorf("HOTP accepted out of order OTP") } } func TestValidateHOTPWithOutOfRangeOTP(t *testing.T) { hotp := HOTP{ increment: 2, secrets: kvs.NewKeyValueStore("test:secrets", func(v string) (interface{}, error) { return v, nil }), counters: struct { *kvs.KeyValueStore filepath string }{ kvs.NewKeyValueStore("test:counters", func(v string) (interface{}, error) { return strconv.ParseUint(v, 10, 64) }), "", }, } hotp.secrets.Put("qwerty", "DFIOJ3BJPHPCRJBT") if err := hotp.Validate("qwerty", "586787"); err == nil { t.Errorf("HOTP accepted out of range OTP") } } func TestValidateHOTPWithInvalidOTP(t *testing.T) { hotp := HOTP{ increment: 8, secrets: kvs.NewKeyValueStore("test:secrets", func(v string) (interface{}, error) { return v, nil }), counters: struct { *kvs.KeyValueStore filepath string }{ kvs.NewKeyValueStore("test:counters", func(v string) (interface{}, error) { return strconv.ParseUint(v, 10, 64) }), "", }, } hotp.secrets.Put("qwerty", "DFIOJ3BJPHPCRJBT") if err := hotp.Validate("qwerty", "644038"); err == nil { t.Errorf("HOTP accepted invalid OTP") } }
package main import ( "os" "time" "github.com/prometheus/client_golang/prometheus" ) var ( Resolution int64 = 10000 metrics = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "tmp_file_age", Help: "Age of /tmp/testfile", }) ) func GetCollector() prometheus.Collector { return metrics } func UpdateMetric() { stat, err := os.Stat("/tmp/testfile") if err == nil { metrics.Set(float64(time.Since(stat.ModTime()).Seconds())) } else { metrics.Set(float64(0)) } }
// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package mockstore import ( "net/url" "strings" "github.com/pingcap/errors" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/mockstore/unistore" "github.com/pingcap/tidb/testkit/testenv" "github.com/tikv/client-go/v2/testutils" "github.com/tikv/client-go/v2/tikv" pd "github.com/tikv/pd/client" ) // MockTiKVDriver is in memory mock TiKV driver. type MockTiKVDriver struct{} // Open creates a MockTiKV storage. func (d MockTiKVDriver) Open(path string) (kv.Storage, error) { u, err := url.Parse(path) if err != nil { return nil, errors.Trace(err) } if !strings.EqualFold(u.Scheme, "mocktikv") { return nil, errors.Errorf("Uri scheme expected(mocktikv) but found (%s)", u.Scheme) } opts := []MockTiKVStoreOption{WithPath(u.Path), WithStoreType(MockTiKV)} txnLocalLatches := config.GetGlobalConfig().TxnLocalLatches if txnLocalLatches.Enabled { opts = append(opts, WithTxnLocalLatches(txnLocalLatches.Capacity)) } return NewMockStore(opts...) } // EmbedUnistoreDriver is in embedded unistore driver. type EmbedUnistoreDriver struct{} // Open creates a EmbedUnistore storage. func (d EmbedUnistoreDriver) Open(path string) (kv.Storage, error) { u, err := url.Parse(path) if err != nil { return nil, errors.Trace(err) } if !strings.EqualFold(u.Scheme, "unistore") { return nil, errors.Errorf("Uri scheme expected(unistore) but found (%s)", u.Scheme) } opts := []MockTiKVStoreOption{WithPath(u.Path), WithStoreType(EmbedUnistore)} txnLocalLatches := config.GetGlobalConfig().TxnLocalLatches if txnLocalLatches.Enabled { opts = append(opts, WithTxnLocalLatches(txnLocalLatches.Capacity)) } return NewMockStore(opts...) } // StoreType is the type of backend mock storage. type StoreType uint8 const ( // MockTiKV is the mock storage based on goleveldb. MockTiKV StoreType = iota // EmbedUnistore is the mock storage based on unistore. EmbedUnistore defaultStoreType = EmbedUnistore ) type mockOptions struct { clusterInspector func(testutils.Cluster) clientHijacker func(tikv.Client) tikv.Client pdClientHijacker func(pd.Client) pd.Client path string txnLocalLatches uint storeType StoreType ddlCheckerHijack bool } // MockTiKVStoreOption is used to control some behavior of mock tikv. type MockTiKVStoreOption func(*mockOptions) // WithMultipleOptions merges multiple options into one option. func WithMultipleOptions(opts ...MockTiKVStoreOption) MockTiKVStoreOption { return func(args *mockOptions) { for _, opt := range opts { opt(args) } } } // WithClientHijacker hijacks KV client's behavior, makes it easy to simulate the network // problem between TiDB and TiKV. func WithClientHijacker(hijacker func(tikv.Client) tikv.Client) MockTiKVStoreOption { return func(c *mockOptions) { c.clientHijacker = hijacker } } // WithPDClientHijacker hijacks PD client's behavior, makes it easy to simulate the network // problem between TiDB and PD. func WithPDClientHijacker(hijacker func(pd.Client) pd.Client) MockTiKVStoreOption { return func(c *mockOptions) { c.pdClientHijacker = hijacker } } // WithClusterInspector lets user to inspect the mock cluster handler. func WithClusterInspector(inspector func(testutils.Cluster)) MockTiKVStoreOption { return func(c *mockOptions) { c.clusterInspector = inspector } } // WithStoreType lets user choose the backend storage's type. func WithStoreType(tp StoreType) MockTiKVStoreOption { return func(c *mockOptions) { c.storeType = tp } } // WithPath specifies the mocktikv path. func WithPath(path string) MockTiKVStoreOption { return func(c *mockOptions) { c.path = path } } // WithTxnLocalLatches enable txnLocalLatches, when capacity > 0. func WithTxnLocalLatches(capacity uint) MockTiKVStoreOption { return func(c *mockOptions) { c.txnLocalLatches = capacity } } // WithDDLChecker prepare injected DDL implementation for the domain of this store. It must be done before bootstrap to // avoid data race with dom.ddl. func WithDDLChecker() MockTiKVStoreOption { return func(c *mockOptions) { c.ddlCheckerHijack = true } } // DDLCheckerInjector is used to break import cycle. var DDLCheckerInjector func(kv.Storage) kv.Storage // NewMockStore creates a mocked tikv store, the path is the file path to store the data. // If path is an empty string, a memory storage will be created. func NewMockStore(options ...MockTiKVStoreOption) (kv.Storage, error) { testenv.SetGOMAXPROCSForTest() opt := mockOptions{ clusterInspector: func(c testutils.Cluster) { BootstrapWithSingleStore(c) }, storeType: defaultStoreType, } for _, f := range options { f(&opt) } var ( store kv.Storage err error ) switch opt.storeType { case MockTiKV: store, err = newMockTikvStore(&opt) case EmbedUnistore: store, err = newUnistore(&opt) default: panic("unsupported mockstore") } if err != nil { return nil, errors.Trace(err) } if opt.ddlCheckerHijack { store = DDLCheckerInjector(store) } return store, nil } // BootstrapWithSingleStore initializes a Cluster with 1 Region and 1 Store. func BootstrapWithSingleStore(cluster testutils.Cluster) (storeID, peerID, regionID uint64) { switch x := cluster.(type) { case *testutils.MockCluster: return testutils.BootstrapWithSingleStore(x) case *unistore.Cluster: return unistore.BootstrapWithSingleStore(x) default: panic("unsupported cluster type") } } // BootstrapWithMultiStores initializes a Cluster with 1 Region and n Stores. func BootstrapWithMultiStores(cluster testutils.Cluster, n int) (storeIDs, peerIDs []uint64, regionID uint64, leaderPeer uint64) { switch x := cluster.(type) { case *testutils.MockCluster: return testutils.BootstrapWithMultiStores(x, n) case *unistore.Cluster: return unistore.BootstrapWithMultiStores(x, n) default: panic("unsupported cluster type") } } // BootstrapWithMultiRegions initializes a Cluster with multiple Regions and 1 // Store. The number of Regions will be len(splitKeys) + 1. func BootstrapWithMultiRegions(cluster testutils.Cluster, splitKeys ...[]byte) (storeID uint64, regionIDs, peerIDs []uint64) { switch x := cluster.(type) { case *testutils.MockCluster: return testutils.BootstrapWithMultiRegions(x, splitKeys...) case *unistore.Cluster: return unistore.BootstrapWithMultiRegions(x, splitKeys...) default: panic("unsupported cluster type") } }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package txn import ( "bytes" "context" "encoding/hex" "encoding/json" "fmt" "strconv" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" derr "github.com/pingcap/tidb/store/driver/error" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/logutil" tikverr "github.com/tikv/client-go/v2/error" "go.uber.org/zap" ) func genKeyExistsError(name string, value string, err error) error { if err != nil { logutil.BgLogger().Info("extractKeyExistsErr meets error", zap.Error(err)) } return kv.ErrKeyExists.FastGenByArgs(value, name) } // ExtractKeyExistsErrFromHandle returns a ErrKeyExists error from a handle key. func ExtractKeyExistsErrFromHandle(key kv.Key, value []byte, tblInfo *model.TableInfo) error { name := tblInfo.Name.String() + ".PRIMARY" _, handle, err := tablecodec.DecodeRecordKey(key) if err != nil { return genKeyExistsError(name, key.String(), err) } if handle.IsInt() { if pkInfo := tblInfo.GetPkColInfo(); pkInfo != nil { if mysql.HasUnsignedFlag(pkInfo.GetFlag()) { handleStr := strconv.FormatUint(uint64(handle.IntValue()), 10) return genKeyExistsError(name, handleStr, nil) } } return genKeyExistsError(name, handle.String(), nil) } if len(value) == 0 { return genKeyExistsError(name, handle.String(), errors.New("missing value")) } idxInfo := tables.FindPrimaryIndex(tblInfo) if idxInfo == nil { return genKeyExistsError(name, handle.String(), errors.New("cannot find index info")) } cols := make(map[int64]*types.FieldType, len(tblInfo.Columns)) for _, col := range tblInfo.Columns { cols[col.ID] = &(col.FieldType) } handleColIDs := make([]int64, 0, len(idxInfo.Columns)) for _, col := range idxInfo.Columns { handleColIDs = append(handleColIDs, tblInfo.Columns[col.Offset].ID) } row, err := tablecodec.DecodeRowToDatumMap(value, cols, time.Local) if err != nil { return genKeyExistsError(name, handle.String(), err) } data, err := tablecodec.DecodeHandleToDatumMap(handle, handleColIDs, cols, time.Local, row) if err != nil { return genKeyExistsError(name, handle.String(), err) } valueStr := make([]string, 0, len(data)) for _, col := range idxInfo.Columns { d := data[tblInfo.Columns[col.Offset].ID] str, err := d.ToString() if err != nil { return genKeyExistsError(name, key.String(), err) } if col.Length > 0 && len(str) > col.Length { str = str[:col.Length] } if types.IsBinaryStr(&tblInfo.Columns[col.Offset].FieldType) || types.IsTypeBit(&tblInfo.Columns[col.Offset].FieldType) { str = util.FmtNonASCIIPrintableCharToHex(str) } valueStr = append(valueStr, str) } return genKeyExistsError(name, strings.Join(valueStr, "-"), nil) } // ExtractKeyExistsErrFromIndex returns a ErrKeyExists error from a index key. func ExtractKeyExistsErrFromIndex(key kv.Key, value []byte, tblInfo *model.TableInfo, indexID int64) error { var idxInfo *model.IndexInfo for _, index := range tblInfo.Indices { if index.ID == indexID { idxInfo = index } } if idxInfo == nil { return genKeyExistsError("UNKNOWN", key.String(), errors.New("cannot find index info")) } name := tblInfo.Name.String() + "." + idxInfo.Name.String() if len(value) == 0 { return genKeyExistsError(name, key.String(), errors.New("missing value")) } colInfo := tables.BuildRowcodecColInfoForIndexColumns(idxInfo, tblInfo) values, err := tablecodec.DecodeIndexKV(key, value, len(idxInfo.Columns), tablecodec.HandleNotNeeded, colInfo) if err != nil { return genKeyExistsError(name, key.String(), err) } valueStr := make([]string, 0, len(values)) for i, val := range values { d, err := tablecodec.DecodeColumnValue(val, colInfo[i].Ft, time.Local) if err != nil { return genKeyExistsError(name, key.String(), err) } str, err := d.ToString() if err != nil { return genKeyExistsError(name, key.String(), err) } if types.IsBinaryStr(colInfo[i].Ft) || types.IsTypeBit(colInfo[i].Ft) { str = util.FmtNonASCIIPrintableCharToHex(str) } valueStr = append(valueStr, str) } return genKeyExistsError(name, strings.Join(valueStr, "-"), nil) } func extractKeyErr(err error) error { if err == nil { return nil } if e, ok := errors.Cause(err).(*tikverr.ErrWriteConflict); ok { return newWriteConflictError(e.WriteConflict) } if e, ok := errors.Cause(err).(*tikverr.ErrRetryable); ok { notFoundDetail := prettyLockNotFoundKey(e.Retryable) return kv.ErrTxnRetryable.GenWithStackByArgs(e.Retryable + " " + notFoundDetail) } return derr.ToTiDBErr(err) } func newWriteConflictError(conflict *kvrpcpb.WriteConflict) error { if conflict == nil { return kv.ErrWriteConflict } var bufConflictKeyTableID bytes.Buffer // table id part of conflict key, which is used to be parsed by upper level to provide more information about the table var bufConflictKeyRest bytes.Buffer // the rest part of conflict key var bufPrimaryKeyTableID bytes.Buffer // table id part of primary key var bufPrimaryKeyRest bytes.Buffer // the rest part of primary key prettyWriteKey(&bufConflictKeyTableID, &bufConflictKeyRest, conflict.Key) bufConflictKeyRest.WriteString(", originalKey=" + hex.EncodeToString(conflict.Key)) bufConflictKeyRest.WriteString(", primary=") prettyWriteKey(&bufPrimaryKeyTableID, &bufPrimaryKeyRest, conflict.Primary) bufPrimaryKeyRest.WriteString(", originalPrimaryKey=" + hex.EncodeToString(conflict.Primary)) return kv.ErrWriteConflict.FastGenByArgs(conflict.StartTs, conflict.ConflictTs, conflict.ConflictCommitTs, bufConflictKeyTableID.String(), bufConflictKeyRest.String(), bufPrimaryKeyTableID.String(), bufPrimaryKeyRest.String(), conflict.Reason.String(), ) } func prettyWriteKey(bufTableID, bufRest *bytes.Buffer, key []byte) { tableID, indexID, indexValues, err := tablecodec.DecodeIndexKey(key) if err == nil { _, err1 := fmt.Fprintf(bufTableID, "{tableID=%d", tableID) if err1 != nil { logutil.BgLogger().Error("error", zap.Error(err1)) } _, err1 = fmt.Fprintf(bufRest, ", indexID=%d, indexValues={", indexID) if err1 != nil { logutil.BgLogger().Error("error", zap.Error(err1)) } for _, v := range indexValues { _, err2 := fmt.Fprintf(bufRest, "%s, ", v) if err2 != nil { logutil.BgLogger().Error("error", zap.Error(err2)) } } bufRest.WriteString("}}") return } tableID, handle, err := tablecodec.DecodeRecordKey(key) if err == nil { _, err3 := fmt.Fprintf(bufTableID, "{tableID=%d", tableID) if err3 != nil { logutil.BgLogger().Error("error", zap.Error(err3)) } _, err3 = fmt.Fprintf(bufRest, ", handle=%s}", handle.String()) if err3 != nil { logutil.BgLogger().Error("error", zap.Error(err3)) } return } mKey, mField, err := tablecodec.DecodeMetaKey(key) if err == nil { _, err3 := fmt.Fprintf(bufRest, "{metaKey=true, key=%s, field=%s}", string(mKey), string(mField)) if err3 != nil { logutil.Logger(context.Background()).Error("error", zap.Error(err3)) } return } _, err4 := fmt.Fprintf(bufRest, "%#v", key) if err4 != nil { logutil.BgLogger().Error("error", zap.Error(err4)) } } func prettyLockNotFoundKey(rawRetry string) string { if !strings.Contains(rawRetry, "TxnLockNotFound") { return "" } start := strings.Index(rawRetry, "[") if start == -1 { return "" } rawRetry = rawRetry[start:] end := strings.Index(rawRetry, "]") if end == -1 { return "" } rawRetry = rawRetry[:end+1] var key []byte err := json.Unmarshal([]byte(rawRetry), &key) if err != nil { return "" } var buf1 bytes.Buffer var buf2 bytes.Buffer prettyWriteKey(&buf1, &buf2, key) return buf1.String() + buf2.String() }
package entities import ( "fmt" ) // User is a user's info struct type User struct { Id string `json:"id"` // Định nghĩa tên trường hiển thị trong JSON Name string `json:"name"` Password string `json:"password"` } type Url struct { ID string `json:"id"` UserName string `json:"username"` LongUrl string `json:"longUrl"` ShortUrl string `json:"shortUrl"` } func (user User) ToString() string { return fmt.Sprintf("id: %s\nName: %s\nPassword: %s\n", user.Id, user.Name, user.Password) }
package controller import ( "github.com/gin-gonic/gin" ) var R *gin.Engine func init() { R = gin.Default() R.Static("/static","./static") user() }
// Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "fmt" "math" "time" "github.com/pingcap/errors" ) // AddUint64 adds uint64 a and b if no overflow, else returns error. func AddUint64(a uint64, b uint64) (uint64, error) { if math.MaxUint64-a < b { return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) } return a + b, nil } // AddInt64 adds int64 a and b if no overflow, otherwise returns error. func AddInt64(a int64, b int64) (int64, error) { if (a > 0 && b > 0 && math.MaxInt64-a < b) || (a < 0 && b < 0 && math.MinInt64-a > b) { return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) } return a + b, nil } // AddDuration adds time.Duration a and b if no overflow, otherwise returns error. func AddDuration(a time.Duration, b time.Duration) (time.Duration, error) { if (a > 0 && b > 0 && math.MaxInt64-a < b) || (a < 0 && b < 0 && math.MinInt64-a > b) { return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", int64(a), int64(b))) } return a + b, nil } // SubDuration subtracts time.Duration a with b and returns time.Duration if no overflow error. func SubDuration(a time.Duration, b time.Duration) (time.Duration, error) { if (a > 0 && b < 0 && math.MaxInt64-a < -b) || (a < 0 && b > 0 && math.MinInt64-a > -b) || (a == 0 && b == math.MinInt64) { return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) } return a - b, nil } // AddInteger adds uint64 a and int64 b and returns uint64 if no overflow error. func AddInteger(a uint64, b int64) (uint64, error) { if b >= 0 { return AddUint64(a, uint64(b)) } if uint64(-b) > a { return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) } return a - uint64(-b), nil } // SubUint64 subtracts uint64 a with b and returns uint64 if no overflow error. func SubUint64(a uint64, b uint64) (uint64, error) { if a < b { return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) } return a - b, nil } // SubInt64 subtracts int64 a with b and returns int64 if no overflow error. func SubInt64(a int64, b int64) (int64, error) { if (a > 0 && b < 0 && math.MaxInt64-a < -b) || (a < 0 && b > 0 && math.MinInt64-a > -b) || (a == 0 && b == math.MinInt64) { return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) } return a - b, nil } // SubUintWithInt subtracts uint64 a with int64 b and returns uint64 if no overflow error. func SubUintWithInt(a uint64, b int64) (uint64, error) { if b < 0 { return AddUint64(a, uint64(-b)) } return SubUint64(a, uint64(b)) } // SubIntWithUint subtracts int64 a with uint64 b and returns uint64 if no overflow error. func SubIntWithUint(a int64, b uint64) (uint64, error) { if a < 0 || uint64(a) < b { return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) } return uint64(a) - b, nil } // MulUint64 multiplies uint64 a and b and returns uint64 if no overflow error. func MulUint64(a uint64, b uint64) (uint64, error) { if b > 0 && a > math.MaxUint64/b { return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) } return a * b, nil } // MulInt64 multiplies int64 a and b and returns int64 if no overflow error. func MulInt64(a int64, b int64) (int64, error) { if a == 0 || b == 0 { return 0, nil } var ( res uint64 err error negative = false ) if a > 0 && b > 0 { res, err = MulUint64(uint64(a), uint64(b)) } else if a < 0 && b < 0 { res, err = MulUint64(uint64(-a), uint64(-b)) } else if a < 0 && b > 0 { negative = true res, err = MulUint64(uint64(-a), uint64(b)) } else { negative = true res, err = MulUint64(uint64(a), uint64(-b)) } if err != nil { return 0, errors.Trace(err) } if negative { // negative result if res > math.MaxInt64+1 { return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) } return -int64(res), nil } // positive result if res > math.MaxInt64 { return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) } return int64(res), nil } // MulInteger multiplies uint64 a and int64 b, and returns uint64 if no overflow error. func MulInteger(a uint64, b int64) (uint64, error) { if a == 0 || b == 0 { return 0, nil } if b < 0 { return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) } return MulUint64(a, uint64(b)) } // DivInt64 divides int64 a with b, returns int64 if no overflow error. // It just checks overflow, if b is zero, a "divide by zero" panic throws. func DivInt64(a int64, b int64) (int64, error) { if a == math.MinInt64 && b == -1 { return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) } return a / b, nil } // DivUintWithInt divides uint64 a with int64 b, returns uint64 if no overflow error. // It just checks overflow, if b is zero, a "divide by zero" panic throws. func DivUintWithInt(a uint64, b int64) (uint64, error) { if b < 0 { if a != 0 && uint64(-b) <= a { return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) } return 0, nil } return a / uint64(b), nil } // DivIntWithUint divides int64 a with uint64 b, returns uint64 if no overflow error. // It just checks overflow, if b is zero, a "divide by zero" panic throws. func DivIntWithUint(a int64, b uint64) (uint64, error) { if a < 0 { if uint64(-a) >= b { return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) } return 0, nil } return uint64(a) / b, nil }
package glogger import ( "net/http" ) type readableResponseWriter struct { writer http.ResponseWriter statusCode int length int } func (writer *readableResponseWriter) WriteHeader(code int) { writer.statusCode = code writer.writer.WriteHeader(code) } func (writer *readableResponseWriter) Write(b []byte) (int, error) { n, err := writer.writer.Write(b) if err != nil { return n, err } writer.length += n return n, err } func (writer *readableResponseWriter) Header() http.Header { return writer.writer.Header() } func (writer *readableResponseWriter) Length() int { return writer.length }
/* * @lc app=leetcode id=10 lang=golang * * [10] Regular Expression Matching */ package main /* Solution 1: */ func isMatch(s string, p string) bool { if len(p) == 0 { return len(s) == 0 } firstCharMatch := len(s) > 0 && (s[0] == p[0] || p[0] == '.') if len(p) > 1 && p[1] == '*' { return firstCharMatch && isMatch(s[1:], p) || isMatch(s, p[2:]) } else { return firstCharMatch && isMatch(s[1:], p[1:]) } }
package cache import ( "fmt" "testing" ) func TestGet(t *testing.T) { //Set("server",[]byte("haha")) bytes, e := Get("server") fmt.Printf("%s err = %v \n", bytes, e) }
// Copyright 2014 Chris Monson <shiblon@gmail.com> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package journal // import "entrogo.com/taskstore/journal" import ( "bytes" "encoding/gob" "errors" "fmt" "io" "log" "os" "path/filepath" // only use name manipulation, nothing that touches the file system. "sort" "strconv" "strings" "time" ) var ( ErrNotOpen = errors.New("journal is not open") // Logf is a function used to log warnings, etc. It can be overridden for // e.g., testing log output. Logf = func(fstr string, vals ...interface{}) { log.Printf(fstr, vals...) } ) const ( journalMaxRecords = 50000 journalMaxAge = time.Hour * 24 // allow a ten-second clock correction before panicking. Yes, it's arbitrary. clockSkewLeeway = 10 ) type DiskLog struct { dir string fs FS journalName string journalFile File journalEnc *gob.Encoder journalBirth time.Time journalRecords int lastSnapshotTime time.Time isOpen bool rot chan chan error add chan addRequest snap chan snapRequest quit chan chan error } type addRequest struct { rec interface{} resp chan error } type snapRequest struct { elems <-chan interface{} snapresp chan<- error resp chan error } func OpenDiskLog(dir string) (*DiskLog, error) { // Default implementation just uses standard os module return OpenDiskLogInjectFS(dir, OSFS{}) } func OpenDiskLogInjectFS(dir string, fs FS) (*DiskLog, error) { if info, err := fs.Stat(dir); err != nil { return nil, fmt.Errorf("Unable to stat %q: %v", dir, err) } else if !info.IsDir() { return nil, fmt.Errorf("Path %q is not a directory", dir) } d := &DiskLog{ dir: dir, add: make(chan addRequest, 1), rot: make(chan chan error, 1), snap: make(chan snapRequest, 1), quit: make(chan chan error, 1), fs: fs, } // We *always* open a new log, even if there was one open when we last terminated. // This allows us to ignore any corrupt records at the end of the old one // without doing anything complicated to find out where they are, etc. Just // open a new log and have done with it. It's simpler and safer. d.openNewLog() go func() { for { select { case req := <-d.add: req.resp <- d.addRecord(req.rec) case resp := <-d.rot: resp <- d.rotateLog() case req := <-d.snap: req.resp <- d.snapshot(req.elems, req.snapresp) case resp := <-d.quit: err := d.freezeLog() d.isOpen = false resp <- err return } } }() return d, nil } // Close gracefully shuts the journal down, finalizing the current journal log. func (d *DiskLog) Close() error { resp := make(chan error, 1) d.quit <- resp return <-resp } func (d *DiskLog) IsOpen() bool { return d.isOpen } // addRecord attempts to append the record to the end of the file, using gob encoding. func (d *DiskLog) addRecord(rec interface{}) error { if !d.isOpen { return ErrNotOpen } if err := d.journalEnc.Encode(rec); err != nil { return err } if err := d.journalFile.Sync(); err != nil { return err } d.journalRecords++ age := time.Now().Sub(d.journalBirth) if age >= journalMaxAge || d.journalRecords >= journalMaxRecords { if err := d.rotateLog(); err != nil { return err } } return nil } // TSFromName gets a timestamp from the file name (it's a prefix). func TSFromName(name string) (int64, error) { name = filepath.Base(name) pos := strings.IndexRune(name, '.') if pos < 0 { return -1, fmt.Errorf("weird name, can't find ID: %q", name) } return strconv.ParseInt(name[:pos], 10, 64) } // snapshot attempts to get data elements from the caller and write them all to // a snapshot file. It always triggers a log rotation, so that any other data // that comes in (not part of the snapshot) is strictly newer. func (d *DiskLog) snapshot(elems <-chan interface{}, resp chan<- error) error { if !d.isOpen { return ErrNotOpen } lastbirth := d.journalBirth if err := d.rotateLog(); err != nil { return err } // Once the rotation is complete, we try to create a file (still // synchronous) and then kick off an asynchronous snapshot process. snapname := filepath.Join(d.dir, fmt.Sprintf("%d.%d.snapshot.working", lastbirth.Unix(), os.Getpid())) donename := strings.TrimSuffix(snapname, ".working") file, err := d.fs.Create(snapname) if err != nil { return err } encoder := gob.NewEncoder(file) go func() { // make sure we consume all of them to play nicely with the producer. defer func() { num := 0 for _ = range elems { num++ } if num > 0 { Logf("consumed but did not snapshot %d element(s)", num) } }() for elem := range elems { if err := encoder.Encode(elem); err != nil { errtxt := fmt.Sprintf("snapshot failed to encode element %#v: %v", elem, err) if err := file.Close(); err != nil { errtxt += fmt.Sprintf(" -- also failed to close file %q: %v", file.Name(), err) } resp <- errors.New(errtxt) return } } if err := file.Close(); err != nil { resp <- fmt.Errorf("failed to close snapshot file %q: %v", snapname, err) return } // Now we indicate that the file is finished by renaming it. if err := d.fs.Rename(snapname, donename); err != nil { resp <- fmt.Errorf("snapshot incomplete, failed to rename %q to %q: %v", snapname, donename, err) return } // Finally, we delete all of the journal files that participated up to this point. doneglob := filepath.Join(d.dir, "*.*.log") workglob := filepath.Join(d.dir, "*.*.log.working") donenames, err := d.fs.FindMatching(doneglob) if err != nil { Logf("finished name glob %q failed: %v", doneglob, err) } worknames, err := d.fs.FindMatching(workglob) if err != nil { Logf("working name glob %q failed: %v", workglob, err) } names := make([]string, 0, len(donenames)+len(worknames)) names = append(names, donenames...) names = append(names, worknames...) // Mark all previous journals, finished or otherwise, as obsolete. maxts := lastbirth.Unix() for _, name := range names { ts, err := TSFromName(name) if err != nil { Logf("skipping unknown name format %q: %v", name, err) continue } if ts > maxts { continue } // Finally, rename this log file to an obsolete name so that it can be cleaned up later. var obsname string if strings.HasSuffix(name, ".working") { obsname = fmt.Sprintf("%s.defunct", strings.TrimSuffix(name, ".working")) } else { obsname = fmt.Sprintf("%s.obsolete", name) } if err := d.fs.Rename(name, obsname); err != nil { Logf("failed to rename %q to %q: %v\n", name, obsname, err) continue } } resp <- nil }() return nil } // freezeLog closes the file for the current journal, nils out the appropriate // members, and removes the ".working" suffix from the file name. func (d *DiskLog) freezeLog() error { if !d.isOpen { return ErrNotOpen } jf := d.journalFile d.journalEnc = nil d.journalFile = nil if err := jf.Close(); err != nil { return fmt.Errorf("failed to close log: %v", err) } if !strings.HasSuffix(d.journalName, ".working") { return fmt.Errorf("trying to freeze an already-frozen log: %s", d.journalName) } if err := d.fs.Rename(d.journalName, strings.TrimSuffix(d.journalName, ".working")); err != nil { return fmt.Errorf("failed to freeze %s by rename: %v", d.journalName, err) } return nil } // openNewLog creates a new log file and sets it as the current log. It does // not check whether another one is already open, it just abandons it without // closing it. func (d *DiskLog) openNewLog() error { // Make sure we don't rotate into the past. That will mess everything up. var name string oldbirth := d.journalBirth birth := time.Now() if birth.Unix() < oldbirth.Unix()-clockSkewLeeway { panic(fmt.Sprintf( "latest log created at timestamp %d, which appears to be in the future; current time is %d\n"+ "either the clock got changed, or too many rotations have happened in a short period of time", oldbirth.Unix(), birth.Unix())) } else if birth.Unix() <= oldbirth.Unix() { birth = oldbirth.Add(1 * time.Second) } name = filepath.Join(d.dir, fmt.Sprintf("%d.%d.log.working", birth.Unix(), os.Getpid())) f, err := d.fs.Create(name) if err != nil { return err } d.journalBirth = birth d.journalName = name d.journalRecords = 0 d.journalFile = f d.journalEnc = gob.NewEncoder(f) d.isOpen = true return nil } // rotateLog closes and freezes the current log and opens a new one. func (d *DiskLog) rotateLog() error { if !d.isOpen { return ErrNotOpen } if err := d.freezeLog(); err != nil { return err } if err := d.openNewLog(); err != nil { return err } return nil } // Dir returns the file system directory for this journal. func (d *DiskLog) Dir() string { return d.dir } // Return the current journal name. func (d *DiskLog) JournalName() string { return d.journalName } // Append adds a record to the end of the journal. func (d *DiskLog) Append(rec interface{}) error { resp := make(chan error, 1) d.add <- addRequest{ rec, resp, } return <-resp } // StartSnapshot triggers an immediate rotation, then consumes all of the // elements on the channel and serializing them to a snapshot file with the // same ID as the recently-closed log. func (d *DiskLog) StartSnapshot(elems <-chan interface{}, snapresp chan<- error) error { resp := make(chan error, 1) d.snap <- snapRequest{ elems, snapresp, resp, } return <-resp } // Rotate closes the current log file and opens a new one. func (d *DiskLog) Rotate() error { resp := make(chan error, 1) d.rot <- resp return <-resp } // latestFrozenSnapshot attempts to find the most recent snapshot on which to base journal replay. func (d *DiskLog) latestFrozenSnapshot() (int64, string, error) { glob := filepath.Join(d.dir, fmt.Sprintf("*.*.snapshot")) names, err := d.fs.FindMatching(glob) if err != nil { return -1, "", err } if len(names) == 0 { return -1, "", io.EOF } bestts := int64(-1) bestname := "" for _, name := range names { ts, err := TSFromName(name) if err != nil { Logf("can't find id in in %q: %v", name, err) continue } if ts > bestts { bestts = ts bestname = name } } if bestts < 0 { return -1, "", io.EOF } return bestts, bestname, nil } // SnapshotDecoder returns a decoder whose Decode function can be called to get // the next item from the most recent frozen snapshot. func (d *DiskLog) SnapshotDecoder() (Decoder, error) { _, snapname, err := d.latestFrozenSnapshot() if err != nil && err != io.EOF { return nil, err } // Default empty implementation for the case where there just isn't a file. if err == io.EOF { return EmptyDecoder{}, nil } // Found it - try to open it for reading. file, err := d.fs.Open(snapname) if err != nil { return nil, err } return gob.NewDecoder(file), nil } // journalNames implements a Sorter interface, sorting based on timestamps. type journalNames []string func (n journalNames) Less(i, j int) bool { bi, _ := TSFromName(n[i]) bj, _ := TSFromName(n[j]) return bi < bj } func (n journalNames) Swap(i, j int) { n[i], n[j] = n[j], n[i] } func (n journalNames) Len() int { return len(n) } // gobMultiDecoder decodes gob entries from multiple readers in series. An // io.MultiReader is not suitable here because each journal file can have a // single corrupt entry at the end, so we have to gracefully handle // ErrUnexpectedEOF in the logical *middle* of the whole journal stream. type gobMultiDecoder struct { fs FS filenames []string cur int decoder *gob.Decoder } func newGobMultiDecoder(fs FS, filenames ...string) (*gobMultiDecoder, error) { if len(filenames) == 0 { return nil, fmt.Errorf("gob multidecoder needs at least one file") } return &gobMultiDecoder{ cur: 0, fs: fs, filenames: filenames, decoder: nil, }, nil } // newGobDecoder loads the file into memory and creates a gob.Decoder from it. // As we are dealing with log files, and they really should not have any reason // to get individually huge, it is safer and simpler to just load the file into // RAM and the quickly close it instead of relying on the caller to consume all // of the records quickly. func (d *gobMultiDecoder) newGobDecoder(fname string) (*gob.Decoder, error) { file, err := d.fs.Open(fname) if err != nil { return nil, err } defer func() { if err := file.Close(); err != nil { panic(fmt.Sprintf("error closing file opened only for reading. should never happen: %v", err)) } }() var buffer bytes.Buffer if _, err := buffer.ReadFrom(file); err != nil { return nil, err } return gob.NewDecoder(&buffer), nil } // Decode runs the decode function on each file in turn, skipping records that // produce an ErrUnexpectedEOF. When we checksum records, it will also stop on // those and verify that they are each the last such in their respective files. func (d *gobMultiDecoder) Decode(val interface{}) error { // This only happens on the first call. if d.decoder == nil && d.cur == 0 { name := d.filenames[d.cur] decoder, err := d.newGobDecoder(name) if err != nil { Logf("failed to create decoder for file %q: %v", name, err) return err } d.decoder = decoder } err := d.decoder.Decode(val) for err == io.EOF || err == io.ErrUnexpectedEOF { if err == io.ErrUnexpectedEOF { Logf("journal file %q has an unexpected EOF", d.filenames[d.cur]) // Try to read one more time, ensure we get an actual EOF. v := struct{}{} err := d.decoder.Decode(&v) if err != io.EOF && err != io.ErrUnexpectedEOF { // OK - the next record really *wasn't* supposed to be the // last. Only the last record is allowed to be a partial write // or otherwise corrupt, so this is a real problem. return io.ErrUnexpectedEOF } } if d.cur++; d.cur >= len(d.filenames) { return io.EOF // really and truly finished, now. } name := d.filenames[d.cur] d.decoder, err = d.newGobDecoder(d.filenames[d.cur]) if err != nil { Logf("failed to create decoder for file %q: %v", name, err) return err } err = d.decoder.Decode(val) } return err } // JournalDecoder returns a Decoder whose Decode function can be called to get // the next item from the journals that are newer than the most recent // snapshot. func (d *DiskLog) JournalDecoder() (Decoder, error) { doneglob := filepath.Join(d.dir, fmt.Sprintf("*.*.log")) workglob := filepath.Join(d.dir, fmt.Sprintf("*.*.log.working")) donenames, err := d.fs.FindMatching(doneglob) switch { case err == io.EOF: return EmptyDecoder{}, nil case err != nil: return nil, err } worknames, err := d.fs.FindMatching(workglob) if err != nil { return nil, err } names := make([]string, 0, len(donenames)+len(worknames)) names = append(names, donenames...) names = append(names, worknames...) sort.Sort(journalNames(names)) snapbirth, _, err := d.latestFrozenSnapshot() if err != nil && err != io.EOF { return nil, err } for i, name := range names { ts, err := TSFromName(name) if err != nil { return nil, fmt.Errorf("failed to get timestamp from name %q: %v", name, err) } if ts > snapbirth { // Found the first journal file later than the snapshot. Return the decoder. return newGobMultiDecoder(d.fs, names[i:]...) } } // No journals found later than the snapshot. return EmptyDecoder{}, nil }
package heap import "testing" func TestTopKFrequent(t *testing.T) { arr := topKFrequent([]int{1, 1, 1, 2, 2, 3}, 2) if !(arr[0] == 2 && arr[1] == 1) { t.Fail() } }
package controllers import ( "fmt" "github.com/gopherchina/website/models" ) type MainController struct { baseController } func (this *MainController) Get() { name := this.Ctx.Input.Param(":name") if name == "" { this.Data["Title"] = "GopherChina" this.Data["indexActive"] = true this.TplNames = "index.tpl" } else if name == "speaker" { this.Data["Title"] = "分享嘉宾 - GopherChina" this.Data["userActive"] = true this.TplNames = "speaker.tpl" } else if name == "venue" { this.Data["Title"] = "会场信息 - GopherChina" this.TplNames = "venue.tpl" } else if name == "register" { this.Data["regActive"] = true this.Data["Title"] = "注册报名 - GopherChina" this.TplNames = "register.tpl" } else { df := models.GetDoc(name, this.Lang) this.Data[fmt.Sprintf("%sActive", name)] = true this.Data["Section"] = name this.Data["Title"] = df.Title + " - GopherChina" this.Data["title"] = df.Title this.Data["Data"] = string(df.Data) this.TplNames = "detail.tpl" } }
package main import ( "fmt" "log" "strconv" "strings" "net/http" "io/ioutil" ) type podcast struct { Name string Image string Episodes []episode } type episode struct { Filename string Size int } var podcasts map[string]podcast var mediaEndings = []string{"mp3", "ogg", "wav", "mp4", "webm"} var imageEndings = []string{"jpg", "jpeg", "png", "tiff"} func main() { podcasts = make(map[string]podcast) scanDirectory("data") for _, podcast := range podcasts { fmt.Println(podcast) } http.HandleFunc("/", sendHtml) http.HandleFunc("/podcasts/", sendXml) http.Handle("/media/" + "data/", http.StripPrefix("/media/" + "data/", http.FileServer(http.Dir("data")))) log.Fatal(http.ListenAndServe(":8080", nil)) } func scanDirectory(directory string) { podcast := podcast{Name: directory, Image: "", Episodes: make([]episode, 0)} files, _ := ioutil.ReadDir(directory) for _, f := range files { if(f.IsDir()) { scanDirectory(directory + "/" + f.Name()) } else if checkFileending(f.Name(), mediaEndings) { episode := episode{Filename: f.Name(), Size: int(f.Size())} podcast.Episodes = append(podcast.Episodes, episode) } else if checkFileending(f.Name(), imageEndings) { podcast.Image = f.Name() } } podcasts[podcast.Name] = podcast } func checkFileending(filename string, endings []string) bool { for _, ending := range endings { if strings.HasSuffix(strings.ToLower(filename), "." + strings.ToLower(ending)) { return true } } return false } func sendHtml(w http.ResponseWriter, req *http.Request) { for _, podcast := range podcasts { fmt.Fprintf(w, "<html><head><title>directory2podcast</title></head><body><ul>\n") fmt.Fprintf(w, "<li><a href=\"podcasts/" + podcast.Name + "\">" + podcast.Name + "</a></li>\n") fmt.Fprintf(w, "</ul></body></html>\n") } } func sendXml(w http.ResponseWriter, req *http.Request) { podcast, present := podcasts[req.URL.Path[len("/podcasts/"):]] if(present) { fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n") fmt.Fprintf(w, "<rss version=\"2.0\" xmlns:atom=\"http://www.w3.org/2005/Atom\">\n") fmt.Fprintf(w, " <channel>\n") fmt.Fprintf(w, " <title>" + podcast.Name + "</title>\n") fmt.Fprintf(w, " \n") fmt.Fprintf(w, " <generator>directory2podcast</generator>\n") for _, episode := range podcast.Episodes { fmt.Fprintf(w, " <item>\n") fmt.Fprintf(w, " <title>" + episode.Filename + "</title>\n") fmt.Fprintf(w, " <link>media/" + episode.Filename + "</link>\n") fmt.Fprintf(w, " <description><![CDATA[" + episode.Filename + "]]></description>\n") fmt.Fprintf(w, " <enclosure url=\"http://" + req.Host + "/media/" + podcast.Name + "/" + episode.Filename + "\" length=\"" + strconv.Itoa(episode.Size) + "\" type=\"audio/mp3\" />\n") fmt.Fprintf(w, " </item>\n") } fmt.Fprintf(w, " </channel>\n") fmt.Fprintf(w, "</rss>\n") } else { sendHtml(w, req) } }
package catapult import ( "net" "net/http" "time" ) type Client struct { RequestTimeout time.Duration context *Ctx rawClient *http.Client } var ( DefaultRequestTimeout = time.Second * 20 DefaultExpectContinueTimeout = time.Second DefaultTLSHandshakeTimeout = time.Second DefaultDialTimeout = time.Second DefaultKeepAlive = time.Second * 90 DefaultIdleConnTimeout = time.Second * 90 DefaultMaxIdleConnections = 100 DefaultTransport = buildDefaultTransport() ) func buildDefaultTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ Timeout: DefaultDialTimeout, KeepAlive: DefaultKeepAlive, DualStack: true, }).DialContext, MaxIdleConns: DefaultMaxIdleConnections, IdleConnTimeout: DefaultIdleConnTimeout, TLSHandshakeTimeout: DefaultTLSHandshakeTimeout, ExpectContinueTimeout: DefaultExpectContinueTimeout, } } func NewClient() *Client { client := &Client{ RequestTimeout: DefaultRequestTimeout, context: NewContext(), rawClient: http.DefaultClient, } client.context.Client = client client.rawClient.Transport = DefaultTransport return client } func (c *Client) NewRequest() *Request { ctx := c.context.Clone() req := &Request{ timeout: c.RequestTimeout, Context: ctx, } req.Context.Client = c return req } func (c *Client) Wrap(feature ClientFeature) { c.context.middlwares = append(c.context.middlwares, feature) }
package client import ( "encoding/json" "fmt" "path/filepath" "github.com/go-resty/resty/v2" "github.com/ophum/humstack-redeployment/pkg/api" ) type RedeploymentClient struct { scheme string apiServerAddress string apiServerPort int32 client *resty.Client headers map[string]string } type RedeploymentResponse struct { Code int32 `json:"code"` Error interface{} `json:"error"` Data struct { Redeployment api.Redeployment `json:"redeployment"` } `json:"data"` } type RedeploymentListResponse struct { Code int32 `json:"code"` Error interface{} `json:"error"` Data struct { RedeploymentList []*api.Redeployment `json:"redeployments"` } `json:"data"` } const basePath = "api/v0/redeployments" func NewRedeploymentClient(scheme, apiServerAddress string, apiServerPort int32) *RedeploymentClient { return &RedeploymentClient{ scheme: scheme, apiServerAddress: apiServerAddress, apiServerPort: apiServerPort, client: resty.New(), headers: map[string]string{ "Content-Type": "application/json", "Accept": "application/json", }, } } func (c *RedeploymentClient) Get(rdID string) (*api.Redeployment, error) { resp, err := c.client.R().SetHeaders(c.headers).Get(c.getPath(rdID)) if err != nil { return nil, err } body := resp.Body() rdResp := RedeploymentResponse{} err = json.Unmarshal(body, &rdResp) if err != nil { return nil, err } return &rdResp.Data.Redeployment, nil } func (c *RedeploymentClient) List() ([]*api.Redeployment, error) { resp, err := c.client.R().SetHeaders(c.headers).Get(c.getPath("")) if err != nil { return nil, err } body := resp.Body() rdResp := RedeploymentListResponse{} err = json.Unmarshal(body, &rdResp) if err != nil { return nil, err } return rdResp.Data.RedeploymentList, nil } func (c *RedeploymentClient) Create(rd *api.Redeployment) (*api.Redeployment, error) { body, err := json.Marshal(rd) if err != nil { return nil, err } resp, err := c.client.R().SetHeaders(c.headers).SetBody(body).Post(c.getPath("")) if err != nil { return nil, err } body = resp.Body() rdResp := RedeploymentResponse{} err = json.Unmarshal(body, &rdResp) if err != nil { return nil, err } if resp.IsError() { return nil, fmt.Errorf("error: %+v", rdResp.Error) } return &rdResp.Data.Redeployment, nil } func (c *RedeploymentClient) Update(rd *api.Redeployment) (*api.Redeployment, error) { body, err := json.Marshal(rd) if err != nil { return nil, err } resp, err := c.client.R().SetHeaders(c.headers).SetBody(body).Put(c.getPath(rd.ID)) if err != nil { return nil, err } body = resp.Body() rdResp := RedeploymentResponse{} err = json.Unmarshal(body, &rdResp) if err != nil { return nil, err } return &rdResp.Data.Redeployment, nil } func (c *RedeploymentClient) Delete(rdID string) error { _, err := c.client.R().SetHeaders(c.headers).Delete(c.getPath(rdID)) if err != nil { return err } return nil } func (c *RedeploymentClient) getPath(path string) string { return fmt.Sprintf("%s://%s", c.scheme, filepath.Join(fmt.Sprintf("%s:%d", c.apiServerAddress, c.apiServerPort), basePath, path)) }
package request import ( "context" "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/httptest" "net/http/httputil" "net/url" "reflect" "strconv" "testing" "github.com/stretchr/testify/require" ) func TestRequest(t *testing.T) { tests := []struct { name string // default "[unnamed]" method string // default "GET" reqHeader http.Header // default nil respHeader http.Header // default nil params url.Values // default url.Values{} req interface{} // default nil resp interface{} // default struct{}{} }{ { name: "GET", }, { name: "GET with Params", params: url.Values{"k1": nil, "k2": []string{}, "k3": []string{"v"}, "k4": []string{"v", "v"}}, }, { // Not Good, But Allowed name: "GET with Request Body", req: struct { String string `json:"string"` Int int `json:"int"` }{ String: "string", Int: 123, }, }, { name: "GET with Response", resp: struct { String string `json:"string"` Int int `json:"int"` }{ String: "string", Int: 123, }, }, { name: "POST", method: http.MethodPost, }, { name: "POST with Params", method: http.MethodPost, params: url.Values{"k1": nil, "k2": []string{}, "k3": []string{"v"}, "k4": []string{"v", "v"}}, }, { name: "POST with Request Body", method: http.MethodPost, req: struct { String string `json:"string"` Int int `json:"int"` }{ String: "string", Int: 123, }, }, { name: "POST with Response", method: http.MethodPost, resp: struct { String string `json:"string"` Int int `json:"int"` }{ String: "string", Int: 123, }, }, { name: "Header", reqHeader: http.Header{"Header-Key": {"Header-Value"}}, }, { name: "GetHeader", respHeader: http.Header{"Header-Key": {"Header-Value"}}, }, } pof := func(i interface{}) interface{} { return reflect.New(reflect.TypeOf(i)).Interface() } eof := func(i interface{}) interface{} { return reflect.ValueOf(i).Elem().Interface() } for idx, tt := range tests { test := tt if test.name == "" { test.name = "[unnamed]" } if test.method == "" { test.method = "GET" } if test.reqHeader == nil { test.reqHeader = make(http.Header) } // basic reqHeader basicHeader := http.Header{ "Accept-Encoding": {"gzip"}, "User-Agent": {"Go-http-client/1.1"}, } if test.params == nil { test.params = url.Values{} } if test.method == "POST" || test.req != nil { if test.req != nil { reqBytes, err := json.Marshal(test.req) require.NoError(t, err) basicHeader.Add("Content-Length", strconv.Itoa(len(reqBytes))) } else { basicHeader.Add("Content-Length", "0") } } if test.resp == nil { test.resp = struct{}{} } t.Run(fmt.Sprintf("%d|%s", idx, test.name), func(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { // check if the Method meets expectations require.Equal(t, test.method, req.Method) // check if the Header meets expectations require.Equal(t, test.reqHeader, req.Header) // check if the Params meets expectations require.Equal(t, test.params.Encode(), req.URL.Query().Encode()) // check if the Request Body meets expectations reqBytes, err := ioutil.ReadAll(req.Body) require.NoError(t, err) if test.req == nil { require.Len(t, reqBytes, 0) } else { _req := pof(test.req) require.NoError(t, json.Unmarshal(reqBytes, _req)) require.Equal(t, test.req, eof(_req)) } // send response for k, vs := range test.respHeader { for _, v := range vs { rw.Header().Add(k, v) } } respBytes, err := json.Marshal(test.resp) require.NoError(t, err) _, err = rw.Write(respBytes) require.NoError(t, err) })) defer server.Close() // verify that the Client received the expected response _resp := pof(test.resp) r := New().With(Options.Log(Log{ Logger: t.Logf, URL: true, RequestBody: true, RequestBodyLimit: 0, ResponseBody: true, ResponseBodyLimit: 0, })) if test.method == http.MethodPost { r.With(Options.Method(http.MethodPost)) } for k, vs := range test.reqHeader { for _, v := range vs { r.With(Options.AddHeader(k, v)) } } for k, vs := range basicHeader { for _, v := range vs { test.reqHeader.Add(k, v) } } respHeader := http.Header{} r.With(Options.GetResponseHeader(respHeader)) r.With(Options.CheckResponseBeforeUnmarshal(func(statusCode int, body []byte) error { require.Equal(t, http.StatusOK, statusCode) return nil })) r.With(Options.CheckResponseAfterUnmarshal(func(statusCode int, v interface{}) error { require.Equal(t, http.StatusOK, statusCode) require.Equal(t, test.resp, eof(v)) return nil })) r.With(Options.HookRequest(func(ctx context.Context, req *http.Request) error { dumpBytes, err := httputil.DumpRequest(req, true) if err != nil { return err } t.Logf("request dump: %s", string(dumpBytes)) return nil })) r.With(Options.HookResponse(func(ctx context.Context, resp *http.Response) error { dumpBytes, err := httputil.DumpResponse(resp, true) if err != nil { return err } t.Logf("response dump: %s", string(dumpBytes)) return nil })) require.NoError(t, r.Do(server.URL, test.params, test.req, _resp)) require.Equal(t, test.resp, eof(_resp)) // check if the Header meets expectations // only check if all expected header exsit and meets expectations for k, vs := range test.respHeader { require.Equal(t, vs, respHeader.Values(k), "response header %s", k) } }) } }
package main import ( "encoding/json" "fmt" "github.com/apache/rocketmq-client-go/core" log "github.com/cihub/seelog" "rocketmq-filemgr/src/common" "rocketmq-filemgr/src/rocketmqMy" "runtime" "strconv" "time" // "net/http" // _ "net/http/pprof" ) type rocketmqXclean struct { //Implement xclean's taskDealMsg interface e *common.ExeCommon } func (rx *rocketmqXclean)LogInit() { config := ` <seelog> <outputs formatid="main"> <rollingfile type="size" filename="xxx" maxsize="1000000000" maxrolls="25"/> </outputs> <formats> <format id="main" format="%Date %Time [%File:%Line] [%Func] %Msg%n"/> </formats> </seelog> ` logger, loggerErr := log.LoggerFromConfigAsBytes([]byte(config)) if loggerErr != nil { fmt.Println(loggerErr) } log.ReplaceLogger(logger) } /* * Implement xclean's taskDealMsg interface */ func (rx *rocketmqXclean) DealMsg(msg common.InputMsg, msgDealCounts int) int { return rockemqXcleanDealMsg(msg, msgDealCounts) } func (rx *rocketmqXclean)ChooseStrategy(){ rx.e.StrategyInterface = common.ChooseStrategy(common.PushCfg.PushStrategy, rx.e.RebuildFilename, common.PushCfg.PushMaxGorutines, common.PushCfg.PushListIterTicker, common.PushCfg.PushReconsumeListLen, rx) } func (rx *rocketmqXclean)RocketMQStart() { //TODO model config xcleanPcConfig := rocketmqMy.GenPcConfig(common.CommonCfg.RocketMqNameServer, common.DeviceId + "xxx", rocketmq.Clustering) rocketmqMy.ConsumeWithPush(xcleanPcConfig, common.PushCfg.PushTopic, common.ExitChan, common.MsgChan) } func (rx *rocketmqXclean)ExeInit() { //TODO move it Combination Inheritance ConfigInit RegisterExit GoRecieveMsg RebuildTask watchSquidPortCfg() /* use no buffer channel can make less error when restart */ repChan = make(chan common.OutputMsg, 65535) go goMergeReport(repChan) } func main() { runtime.GOMAXPROCS(runtime.NumCPU()) /* go func() { log.Debug(http.ListenAndServe(":6060", nil)) }() */ var e = common.ExeCommon{ RebuildFilename:"xxx", CfgFilename:"xxx", } rx := rocketmqXclean{&e} defer log.Flush() var p common.ExeInterface = &rx e.Run(p) <- make(chan struct{}) } /* -1 failed, 0 success */ func rockemqXcleanDealMsg(msgStruct common.InputMsg, msgDealCounts int) int { failed := 0 if failed == 1 { return -1 } return 0 }
package controller import ( "context" "encoding/json" "errors" "github.com/go-redis/redis/v8" "log" "projja_exec/graph" "projja_exec/model" "strconv" "time" ) func (c *controller) ListenExecStream(ctx context.Context) { for { xStreamSlice := c.Rds.XRead(ctx, &redis.XReadArgs{Block: 0, Streams: []string{"exec", "$"}}) xReadResult, err := xStreamSlice.Result() if err != nil { if err == redis.Nil { continue } else { panic(err) } } rdsMessages := xReadResult[len(xReadResult)-1].Messages for _, rdsMessage := range rdsMessages { rdsMap := rdsMessage.Values if val, ok := rdsMap["skills"]; ok { go c.setSkillsToUser(val) } if val, ok := rdsMap["info"]; ok { go c.updateUserInfo(val) } } } } func (c *controller) setSkillsToUser(jsonSkills interface{}) { if strJsonSkills, ok := jsonSkills.(string); ok { skillsData := &userSkillsData{} err := json.Unmarshal([]byte(strJsonSkills), skillsData) if err != nil { log.Println("error in unmarshalling:", err) return } err = c.setSkillsToUserInGraph(skillsData) if err != nil { log.Println("error in setting skills to user: ", err) } } else { log.Println("error in casting user skills id") } } func (c *controller) updateUserInfo(jsonUserInfo interface{}) { if strJsonUserData, ok := jsonUserInfo.(string); ok { userData := &updateUserData{} err := json.Unmarshal([]byte(strJsonUserData), userData) if err != nil { log.Println("error in unmarshalling:", err) return } err = c.updateUserInfoInGraph(userData) if err != nil { log.Println("error in updating user info: ", err) } } else { log.Println("error in casting user info") } } func (c *controller) ListenProjectStream(ctx context.Context) { for { xStreamSlice := c.Rds.XRead(ctx, &redis.XReadArgs{Block: 0, Streams: []string{"project", "$"}}) xReadResult, err := xStreamSlice.Result() if err != nil { if err == redis.Nil { continue } else { panic(err) } } rdsMessages := xReadResult[len(xReadResult)-1].Messages for _, rdsMessage := range rdsMessages { rdsMap := rdsMessage.Values if val, ok := rdsMap["new"]; ok { go c.createNewProject(val) } if val, ok := rdsMap["add-member"]; ok { go c.addMember(val) } if val, ok := rdsMap["remove-member"]; ok { go c.removeMember(val) } if val, ok := rdsMap["task"]; ok { go c.createTask(val) } } } } func (c *controller) createNewProject(jsonProject interface{}) { if strJsonProject, ok := jsonProject.(string); ok { newProject := &model.Project{} err := json.Unmarshal([]byte(strJsonProject), newProject) if err != nil { log.Println("error in unmarshalling:", err) return } err = c.addProject(newProject) if err != nil { log.Println("error in creating project: ", err) } } else { log.Println("error in casting project") } } func (c *controller) addMember(jsonProjectNewMember interface{}) { if strJsonProjectNewMember, ok := jsonProjectNewMember.(string); ok { newProjectMemberData := &addingMemberData{} err := json.Unmarshal([]byte(strJsonProjectNewMember), newProjectMemberData) if err != nil { log.Println("error in unmarshalling:", err) return } err = c.addMemberInGraph(newProjectMemberData) if err != nil { log.Println("error in adding member: ", err) } } else { log.Println("error in casting project member data") } } func (c *controller) removeMember(jsonRemovingMember interface{}) { if strJsonRemovingMember, ok := jsonRemovingMember.(string); ok { removingMember := &removingMemberData{} err := json.Unmarshal([]byte(strJsonRemovingMember), removingMember) if err != nil { log.Println("error in unmarshalling:", err) return } err = c.removeMemberInGraph(removingMember) if err != nil { log.Println("error in removing member: ", err) } } else { log.Println("error in casting removing member data") } } func (c *controller) createTask(jsonTask interface{}) { if strJsonTask, ok := jsonTask.(string); ok { task := &newTaskData{} taskDTO := &struct { ProjectId int64 Task *struct { Id int64 Description string Deadline string Priority string Executor *model.User Skills []string } }{} err := json.Unmarshal([]byte(strJsonTask), taskDTO) if err != nil { log.Println("error in unmarshalling:", err) return } deadline, err := time.Parse("2006-01-02", taskDTO.Task.Deadline) if err != nil { log.Println("error in parsing date: ", err) return } task.ProjectId = taskDTO.ProjectId task.Task = &model.Task{ Id: taskDTO.Task.Id, Description: taskDTO.Task.Description, Deadline: deadline, Priority: taskDTO.Task.Priority, Executor: taskDTO.Task.Executor, Skills: taskDTO.Task.Skills, } err = c.createTaskInGraph(task) if err != nil { log.Println("error in creating task: ", err) } } else { log.Println("error in casting task") } } func (c *controller) ListenTaskStream(ctx context.Context) { for { xStreamSlice := c.Rds.XRead(ctx, &redis.XReadArgs{Block: 0, Streams: []string{"task", "$"}}) xReadResult, err := xStreamSlice.Result() if err != nil { if err == redis.Nil { continue } else { panic(err) } } rdsMessages := xReadResult[len(xReadResult)-1].Messages for _, rdsMessage := range rdsMessages { rdsMap := rdsMessage.Values if val, ok := rdsMap["executor"]; ok { go c.changeTaskExecutor(val) } if val, ok := rdsMap["description"]; ok { go c.changeTaskDescription(val) } if val, ok := rdsMap["close"]; ok { go c.closeTask(val) } if val, ok := rdsMap["deadline"]; ok { go c.changeTaskDeadline(val) } } } } func (c *controller) changeTaskExecutor(jsonChangeExecutorData interface{}) { if strJsonChangeExecutorData, ok := jsonChangeExecutorData.(string); ok { changeExecutor := &changeExecutorData{} err := json.Unmarshal([]byte(strJsonChangeExecutorData), changeExecutor) if err != nil { log.Println("error in unmarshalling:", err) return } err = c.changeTaskExecutorInGraph(changeExecutor) if err != nil { log.Println("error in changing task executor: ", err) } } else { log.Println("error in casting change executor data") } } func (c *controller) changeTaskDescription(jsonChangeDescriptionData interface{}) { if strJsonChangeDescriptionData, ok := jsonChangeDescriptionData.(string); ok { changeDescription := &changeDescriptionData{} err := json.Unmarshal([]byte(strJsonChangeDescriptionData), changeDescription) if err != nil { log.Println("error in unmarshalling:", err) return } err = c.changeTaskDescriptionInGraph(changeDescription) if err != nil { log.Println("error in change description: ", err) } } else { log.Println("error in casting change description") } } func (c *controller) closeTask(jsonCloseData interface{}) { if strJsonCloseData, ok := jsonCloseData.(string); ok { closeData := &closeTaskData{} err := json.Unmarshal([]byte(strJsonCloseData), closeData) if err != nil { log.Println("error in unmarshalling:", err) return } err = c.closeTaskInGraph(closeData) if err != nil { log.Println("error in closing task: ", err) } } else { log.Println("error in casting close task") } } func (c *controller) changeTaskDeadline(jsonChangeDeadlineData interface{}) { if strJsonChangeDeadlineData, ok := jsonChangeDeadlineData.(string); ok { changeDeadline := &changeDeadlineData{} changeDeadlineDTO := &struct { TaskId int64 Deadline string ProjectId int64 }{} err := json.Unmarshal([]byte(strJsonChangeDeadlineData), changeDeadlineDTO) if err != nil { log.Println("error in unmarshalling:", err) return } deadline, err := time.Parse("2006-01-02", changeDeadlineDTO.Deadline) if err != nil { log.Println("error in parsing date: ", err) return } changeDeadline.TaskId = changeDeadlineDTO.TaskId changeDeadline.Deadline = deadline changeDeadline.ProjectId = changeDeadlineDTO.ProjectId err = c.changeTaskDeadlineInGraph(changeDeadline) if err != nil { log.Println("error in change deadline: ", err) } } else { log.Println("error in casting change deadline") } } func (c *controller) saveNewProject(newProject *model.Project) error { project := graph.MakeNewProject(newProject) err := c.writeProjectToRedis(project) if err != nil { return err } return nil } func (c *controller) writeProjectToRedis(project *graph.Project) error { ctx := context.Background() byteGraph, err := json.Marshal(project.Graph) if err != nil { return err } status := c.Rds.Set(ctx, strconv.FormatInt(project.Id, 10), string(byteGraph), 0) if status.Err() != nil { return status.Err() } return nil } func (c *controller) readData(id int64) (*graph.Project, error) { val, err := c.Rds.Get(context.Background(), strconv.FormatInt(id, 10)).Result() switch { case err == redis.Nil: log.Println("key does not exist") return nil, err case err != nil: log.Println("Get failed", err) return nil, err case val == "": err = errors.New("value is empty") log.Println(err) return nil, err } g := &graph.Graph{} err = json.Unmarshal([]byte(val), g) if err != nil { return nil, err } return &graph.Project{ Id: id, Graph: g, }, nil }
package cmd import ( "context" "flag" "fmt" "github.com/jmoiron/sqlx" "github.com/soulmonk/go-grpc-http-rest-microservice-tutorial/pkg/logger" //"github.com/jmoiron/sqlx" TODO "log" // postgres driver _ "github.com/lib/pq" "github.com/soulmonk/go-grpc-http-rest-microservice-tutorial/pkg/protocol/grpc" "github.com/soulmonk/go-grpc-http-rest-microservice-tutorial/pkg/protocol/rest" "github.com/soulmonk/go-grpc-http-rest-microservice-tutorial/pkg/service/v1" ) type PG struct { Host string Port string User string Password string Dbname string } // Config is configuration for Server type Config struct { // gRPC server start parameters section // gRPC is TCP port to listen by gRPC server GRPCPort string // HTTP/REST gateway start parameters section // HTTPPort is TCP port to listen by HTTP/REST gateway HTTPPort string Db PG // Log parameters section // LogLevel is global log level: Debug(-1), Info(0), Warn(1), Error(2), DPanic(3), Panic(4), Fatal(5) LogLevel int // LogTimeFormat is print time format for logger e.g. 2006-01-02T15:04:05Z07:00 LogTimeFormat string } // RunServer runs gRPC server and HTTP gateway func RunServer() error { // new line ctx := context.Background() // get configuration var cfg Config flag.StringVar(&cfg.GRPCPort, "grpc-port", "", "gRPC port to bind") flag.StringVar(&cfg.HTTPPort, "http-port", "", "HTTP port to bind") flag.StringVar(&cfg.Db.Host, "db-host", "", "Database host") flag.StringVar(&cfg.Db.Port, "db-port", "", "Database port") flag.StringVar(&cfg.Db.User, "db-user", "", "Database user") flag.StringVar(&cfg.Db.Password, "db-password", "", "Database password") flag.StringVar(&cfg.Db.Dbname, "db-name", "", "Database name") flag.IntVar(&cfg.LogLevel, "log-level", 0, "Global log level") flag.StringVar(&cfg.LogTimeFormat, "log-time-format", "", "Print time format for logger e.g. 2006-01-02T15:04:05Z07:00") flag.Parse() if len(cfg.GRPCPort) == 0 { return fmt.Errorf("invalid TCP port for gRPC server: '%s'", cfg.GRPCPort) } if len(cfg.HTTPPort) == 0 { return fmt.Errorf("invalid TCP port for HTTP getaway: '%s'", cfg.HTTPPort) } // initialize logger if err := logger.Init(cfg.LogLevel, cfg.LogTimeFormat); err != nil { return fmt.Errorf("failed to initialize logger: %v", err) } var err error psqlInfo := fmt.Sprintf("host=%s port=%s user=%s "+ "password=%s dbname=%s sslmode=disable", cfg.Db.Host, cfg.Db.Port, cfg.Db.User, cfg.Db.Password, cfg.Db.Dbname) db, err := sqlx.Open("postgres", psqlInfo) if err != nil { panic(err) } err = db.Ping() if err != nil { panic(err) } defer func() { if err := db.Close(); err != nil { log.Fatal(err) } }() v1API := v1.NewToDoServiceServer(db) // run HTTP gateway go func() { _ = rest.RunServer(ctx, cfg.GRPCPort, cfg.HTTPPort) }() return grpc.RunServer(ctx, v1API, cfg.GRPCPort) }
package main import ( "html/template" "net/http" ) func render(w http.ResponseWriter, path string, data interface{}) error { tpl := template.Must(template.ParseFiles("static/template/header.html", "static/template/footer.html", path)) return tpl.ExecuteTemplate(w, "content", data) }
package btree import ( "fmt" "gosearch/pkg/crawler" "testing" ) func TestAdd(t *testing.T) { tr := New() testCases := []struct { wantErr bool ErrorText string Want *crawler.Document }{ { wantErr: false, ErrorText: "", Want: &crawler.Document{ uint64(2), "", "", }, }, { wantErr: false, ErrorText: "", Want: &crawler.Document{ uint64(3), "", "", }, }, { wantErr: true, ErrorText: "element already exist", Want: &crawler.Document{ uint64(2), "", "", }, }, { wantErr: true, ErrorText: "element is nil", Want: nil, }, } for i, tt := range testCases { err := tr.Add(tt.Want) if tt.wantErr && err != nil { if err.Error() != tt.ErrorText { t.Fatalf("[%d] ожидалась ошибка \"%s\", а получена \"%s\"", i, tt.ErrorText, err.Error()) } } if tt.wantErr && err == nil { t.Fatalf("[%d] ожидалась ошибка \"%s\", но ошибка не получена", i, tt.ErrorText) } if !tt.wantErr && err != nil { t.Fatalf("[%d] ожидалось \"%v\", а получена ошибка \"%s\"", i, tt.Want, err.Error()) } } want := "\n\t3\n2\n" if fmt.Sprint(tr) != want { t.Fatalf("ожидалось \"%v\", а получено \"%s\"", want, tr) } } func TestSearch(t *testing.T) { tree := New() for i := 0; i < 3; i++ { err := tree.Add(&crawler.Document{ID: uint64(i), Title: "Title", URL: "URL"}) if err != nil { t.Fatalf("ошибка добавления элемента с ID=%d в дерево: %s", i+1, err.Error()) } } tests := []struct { wantErr bool ErrorText string Want *crawler.Document }{ { wantErr: false, ErrorText: "", Want: &crawler.Document{ uint64(2), "", "", }, }, { wantErr: true, ErrorText: "document not found", Want: &crawler.Document{ uint64(3), "", "", }, }, { wantErr: true, ErrorText: "element is nil", Want: nil, }, } for i, tt := range tests { got, err := tree.Search(tt.Want) if tt.wantErr && err != nil { if err.Error() != tt.ErrorText { t.Fatalf("[%d] ожидалась ошибка \"%s\", а получена \"%s\"", i, tt.ErrorText, err.Error()) } } if tt.wantErr && err == nil { t.Fatalf("[%d] ожидалась ошибка \"%s\", но ошибка не получена", i, tt.ErrorText) } if !tt.wantErr && err != nil { t.Fatalf("[%d] ожидалось \"%v\", а получена ошибка \"%s\"", i, tt.Want, err.Error()) } if !tt.wantErr && err != nil { if got.Ident() != tt.Want.Ident() { t.Fatalf("[%d] ошибка поиска элемента в дереве: ожидалось \"%d\", но получили \"%d\"", i, tt.Want.Ident(), got.Ident()) } } } }
package graph import ( "testing" ) func TestTraverse(t *testing.T) { // com := Node{"COM"} }
package main import ( "fmt" "flag" "os" "path/filepath" ) // Subdirs for the expected project structure const templatesDirName = "templates" // Command-line flag for where to put the generated website. var outputDir string const outputDirFlag = "o" // Command-line flag for the directory containing project data. var projectDir string const projectDirFlag = "r" var defaultProjectDir = filepath.Join("/", "home", "gredelston", "dev", "gscsg", "src") // init sets up command-line flags. func init() { flag.StringVar(&outputDir, outputDirFlag, "", "`Output directory` for the generated website") flag.StringVar(&projectDir, projectDirFlag, "", "`Project path` containing sourcefiles used to generate website") flag.Parse() } // fpExists is a convenience function to check whether a directory exists. func fpExists(fp string) bool { _, err := os.Stat(fp) return !os.IsNotExist(err) } // checkOutputDir verifies that outputDir is valid, and creates it if necessary. func checkOutputDir() { // Ensure validity of output directory if outputDir == "" { panic(fmt.Errorf("must supply an output directory with -%s", outputDirFlag)) } outputDir, err := filepath.Abs(outputDir) if err != nil { panic(err) } if !fpExists(outputDir) { parentDir := filepath.Dir(outputDir) if !fpExists(parentDir) { panic(fmt.Errorf("could not find parent directory %s of specified output directory %s", parentDir, outputDir)) } else { err = os.Mkdir(outputDir, os.ModePerm) if err != nil { panic(err) } } } } // checkProjectDir verifies that projectDir exists; or if none is provided, swaps in the default. func checkProjectDir() { if !fpExists(projectDirFlag) { projectDir = defaultProjectDir } for _, subdir := range []string{templatesDirName} { if !fpExists(filepath.Join(projectDir, subdir)) { panic(fmt.Errorf("project directory %s does not contain subdir %s", projectDir, subdir)) } } } func main() { checkOutputDir() checkProjectDir() // Load hello.mustache and render with custom data if err := RenderSite(projectDir, outputDir); err != nil { panic(fmt.Errorf("failed to render site: %w", err)) } else { fmt.Printf("Site written to %s\n", outputDir) } }
/* Copyright (c) 2017 Jason Ish * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package config import ( "fmt" "github.com/jasonish/evebox/sqlite/configdb" "github.com/ogier/pflag" "log" "os" ) func usage(flagset *pflag.FlagSet) { fmt.Fprintf(os.Stderr, "Usage: evebox config -D <dir> <command> [args...]\n") fmt.Fprintf(os.Stderr, "\n") fmt.Fprintf(os.Stderr, "Global options:\n") flagset.PrintDefaults() fmt.Fprintf(os.Stderr, ` Commands: users `) } func Main(args []string) { var dataDirectory string flagset := pflag.NewFlagSet("evebox config", pflag.ExitOnError) flagset.Usage = func() { usage(flagset) } flagset.SetInterspersed(false) flagset.StringVarP(&dataDirectory, "data-directory", "D", "", "Data directory") flagset.Parse(args) commandArgs := flagset.Args() if len(commandArgs) == 0 { usage(flagset) os.Exit(1) } if dataDirectory == "" { log.Fatal("error: --data-directory is required") } db, err := configdb.NewConfigDB(dataDirectory) if err != nil { log.Fatalf("error: %v", err) } command := commandArgs[0] args = commandArgs[1:] switch command { case "users": UsersMain(db, args) default: fmt.Fprintf(os.Stderr, "error: unknown command: %s", command) os.Exit(1) } }
package main import ( "github.com/gorilla/csrf" "github.com/gorilla/mux" "github.com/justinas/alice" "net/http" ) func (app *application) routes() http.Handler { // common middleware stdMiddleware := alice.New(app.recoverPanic, app.logRequest, secureHeaders) dynMiddleware := alice.New(csrf.Protect([]byte(app.secret)), app.authenticate) r := mux.NewRouter() r.Handle("/", dynMiddleware.ThenFunc(app.home)).Methods(http.MethodGet) r.Handle("/snippet/create", dynMiddleware.Append(app.requireAuthentication).ThenFunc(app.createSnippetForm)).Methods(http.MethodGet) r.Handle("/snippet/create", dynMiddleware.Append(app.requireAuthentication).ThenFunc(app.createSnippet)).Methods(http.MethodPost) r.Handle("/snippet/{id}", dynMiddleware.ThenFunc(app.showSnippet)).Methods(http.MethodGet) r.Handle("/user/signup", dynMiddleware.ThenFunc(app.signupUserForm)).Methods(http.MethodGet) r.Handle("/user/signup", dynMiddleware.ThenFunc(app.signupUser)).Methods(http.MethodPost) r.Handle("/user/login", dynMiddleware.ThenFunc(app.loginUserForm)).Methods(http.MethodGet) r.Handle("/user/login", dynMiddleware.ThenFunc(app.loginUser)).Methods(http.MethodPost) r.Handle("/user/logout", dynMiddleware.Append(app.requireAuthentication).ThenFunc(app.logoutUser)).Methods(http.MethodPost) r.HandleFunc("/ping", ping).Methods(http.MethodGet) fileServer := http.FileServer(http.Dir("./ui/static/")) // Use the mux.Handle() function to register the file server as the handler for // all URL paths that start with "/static/". For matching paths, we strip the // "/static" prefix before the request reaches the file server r.PathPrefix("/static/").Handler(http.StripPrefix("/static", fileServer)).Methods(http.MethodGet) return stdMiddleware.Then(r) }
package main import ( "bufio" "fmt" "log" "math" "os" "strconv" "strings" "time" ) func main() { // 너구리 한 쌍은 한 달 후에 다른 새끼 너구리 한 쌍을 낳습니다. // 이 새끼 너구리 한 쌍은 한 달 동안 성체가 되며 성체가 된 너구리 한 쌍은 다시 한 달 후에 다른 새끼 너구리 한 쌍을 낳습니다. // 이미 성체가 된 너구리 부부는 달마다 새끼 너구리를 한 쌍씩 낳는다고 가정할 때, n달 후의 너구리 수를 구하는 함수를 작성하세요. (단, 이 너구리들은 죽지 않습니다.) //풀이법 대략 3개 정도 //1. 재귀함수로 풀기 > 시간복잡도 높음, 너무 흔해서 하기 싫음(?) //2. 루프로 풀기 > 월, 성채너구리 수, 새끼너구리 수 를 받을 변수들을 만들고 루프를 돌리면 풀릴 것 같다. //3. 피보나치 수열에서 온 문제같으니 피보나치 스타일로 풀기. // 3번으로만 풀어볼 예정. // 달 / 성체 / 새끼 / 총마릿수 순으로 나열해봄 (0달부터 6달까지) // 0 2 0 2 // 1 2 2 4 // 2 4 2 6 // 3 6 4 10 // 4 10 6 16 // 5 16 10 26 // 6 26 16 42 // 보면 새끼수가 피보나치 수열*2의 꼴로 증가하고 있음. 또한 n달에 대한 총마릿수는, // n달새끼구하기 함수가 있다면 > n달새끼구하기(n)+n달새끼구하기(n+1) = n달 총마릿수 //(수식으로 증명 안해도 논리적으로 n달에 성채수만큼 n+1달에 새끼가 생기기 때문에 둘을 더하면 n달 총마릿수가 됨이 보임.) //피보나치 수열의 일반항이 있던거 같은데?? 싶어서 구글링해서 찾음. // (1/sqrt5) * (pow((1+sqrt5)/2, n) - pow((1-sqrt5)/2, n)); 요런 양식이고, 너구리는 해당 식의 두배로 늘음. 식을 고 문법으로 표현하면 됨. kbReader := bufio.NewReader(os.Stdin) fmt.Print("n달 후의 너구리 수를 구합니다. n이될 값을 입력하세요 : ") strtmp, err := kbReader.ReadString('\n') if err != nil { log.Fatal(err) } fmt.Printf("입력값 : %s\n", strtmp) strtmp = strings.Replace(strtmp, "\r", "", -1) strtmp = strings.Replace(strtmp, "\n", "", -1) //시간체크 start := time.Now() i, err := strconv.Atoi(strtmp) if err != nil { log.Fatal(err) } fiboN := fibo(float64(i)) * 2 fiboNp1 := fibo(float64(i+1)) * 2 //위에 두 피보나치 일반항*2값을 더한게 n달차 너구리 총합. (부동소수점 문제 안생기게 반올림 처리) result := fmt.Sprint(math.Round(fiboN + fiboNp1)) fmt.Println("result >> ", result) fmt.Println("time : ", time.Now().Sub(start)) } func fibo(n float64) float64 { return (1 / math.Sqrt(5)) * (math.Pow((1+math.Sqrt(5))/2, n) - math.Pow((1-math.Sqrt(5))/2, n)) }
package internal import ( "bytes" "encoding/json" "fmt" "io" "log" "mime/multipart" "net/http" "os" ) type SendFileCommand struct { wsc *WSClient } func (cmd *SendFileCommand) Send(filepath string) (*http.Response, error) { log.Println("Sending shot:", filepath) // Prepare endpoint data to send the file protocol := cmd.wsc.httpprotocol port := cmd.wsc.httpport domain := cmd.wsc.httpdomain uploaduri := cmd.wsc.uploaduri // Url to send the file posturl := fmt.Sprintf("%s://%s:%s%s", protocol, domain, port, uploaduri) // Read file file, err := os.Open(filepath) if err != nil { log.Printf("File %s to send not found in directory\n", filepath) log.Println("ERROR:", err) return nil, err } defer file.Close() // This is the body of the multipart request body := &bytes.Buffer{} //Writter for of the Body request writer := multipart.NewWriter(body) part, _ := writer.CreateFormFile("file", filepath) io.Copy(part, file) writer.Close() // Dispatch request r, _ := http.NewRequest("POST", posturl, body) r.Header.Add("Content-Type", writer.FormDataContentType()) client := &http.Client{} res, ee := client.Do(r) return res, ee } func NewSendFileCommand(wsc *WSClient) *SendFileCommand { return &SendFileCommand{ wsc: wsc, } } type RespondServerCommand struct { wsc *WSClient } func (cmd *RespondServerCommand) SendBashResponse(bashres BashResponse) error { strbashres, _ := json.Marshal(bashres) return cmd.wsc.SendMessage(string(strbashres)) } func (cmd *RespondServerCommand) SendFileNotification(fileres FileNotification) error { strfileres, _ := json.Marshal(fileres) return cmd.wsc.SendMessage(string(strfileres)) } func NewRespondServerCommand(wsc *WSClient) *RespondServerCommand { return &RespondServerCommand{ wsc: wsc, } }
package models import( "github.com/astaxie/beego/orm" _ "github.com/go-sql-driver/mysql" "fmt" "time" "github.com/astaxie/beego" ) func init() { dbhost := beego.AppConfig.String("dbhost") dbport := beego.AppConfig.String("dbport") dbuser := beego.AppConfig.String("dbuser") dbpassword := beego.AppConfig.String("dbpassword") db := beego.AppConfig.String("db") conn := dbuser + ":" + dbpassword + "@tcp(" + dbhost + ":" + dbport + ")/"+db+"?charset=utf8" //fmt.Println("传入参数是:"+conn) //orm.RegisterDataBase("default", "mysql", "root:admin@tcp(10.0.0.46:3306)/test?charset=utf8") orm.RegisterDataBase("default", "mysql", conn, 30) // register model orm.RegisterModel(new(Route)) // create table orm.RunSyncdb("default", false, true) } type Route struct { Id int UserName string `orm:"size(50)"` RouteName string `orm:"size(50)"` Port string `orm:"size(120)"` //Port int Uri string `orm:"size(100)"` HostIp string `orm:"size(320)"` Created time.Time `orm:"auto_now;type(datetime)"` } //添加route func AddRoute(route *Route) error { o := orm.NewOrm() _,err := o.Insert(route) if err !=nil { fmt.Println("添加route的信息到数据库失败") fmt.Println(err.Error()) return err } return nil } //更新route func UpdateRoute(route *Route) error { o := orm.NewOrm() _, err := o.Update(route) if err != nil { fmt.Println("在数据库更新route的信息失败") return err } return nil } //删除route func DelRoute(id int) error{ o := orm.NewOrm() _, err := o.Delete(&Route{Id: id}) if err != nil { fmt.Println("在数据库删除route的信息失败") } return nil } //根据UserName查询对应的route的所有记录信息 func ListRoutes(username string) (*[]orm.Params,error) { o :=orm.NewOrm() var routes []orm.Params _, err := o.QueryTable("route").Filter("user_name", username).Values(&routes) return &routes,err } //根据id查找route的记录 func GetRouteById(id int) (*Route,error){ o :=orm.NewOrm() route :=Route{Id : id} err := o.Read(&route) if err != nil { if err == orm.ErrNoRows { fmt.Printf("在数据库里,查询不到id %d对应的记录\n",id) } else if err == orm.ErrMissPK { fmt.Printf("id %d是无效的主键\n",id) }else{ fmt.Printf("根据id %d查询route的记录,发现未知的数据库访问错误\n",id) } return &route,err }else { fmt.Printf("查询到id %d对应的数据库记录\n",id) return &route,err } } //是否修改了路由,如果修改了,返回true,否则返回false func IfChangeuri(id int,uri string) bool { if r,err := GetRouteById(id);err != nil { return false }else { if r.Uri == uri { return false }else { return true } } } func RouteInUserSet(uri string) (bool,error) { o :=orm.NewOrm() var routes []orm.Params _, err := o.QueryTable("route").Values(&routes) if err != nil { fmt.Printf("查询数据库的路由信息失败\n") return false,err } for _,v := range routes { fmt.Printf("uri是%s\n",v["Uri"]) if v["Uri"] == uri { return true,nil } } return false,nil } //加载所有的route信息 func LoadAllRoutes() (*[]orm.Params,error) { o :=orm.NewOrm() var routes []orm.Params _, err := o.QueryTable("route").Values(&routes) return &routes,err }
/* Copyright 2021 The Nuclio Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package loggerus import ( "bytes" "context" "encoding/json" "fmt" "math" "reflect" "strings" "github.com/logrusorgru/aurora/v3" "github.com/sirupsen/logrus" ) type TextFormatter struct { maxVariableLen int enrichWhoField bool auroraInstance aurora.Aurora contextFormatter func(context.Context) string } func NewTextFormatter(maxVariableLen int, enrichWhoField bool, color bool, contextFormatter func(context.Context) string) (*TextFormatter, error) { return &TextFormatter{ maxVariableLen: maxVariableLen, enrichWhoField: enrichWhoField, auroraInstance: aurora.NewAurora(color), contextFormatter: contextFormatter, }, nil } func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) { buffer := bytes.NewBuffer([]byte{}) // write date buffer.WriteString(f.auroraInstance.White(entry.Time.Format("02-01-06 15:04:05.000")).String()) // nolint: errcheck // write logger name if f.enrichWhoField { buffer.WriteString(" " + f.auroraInstance.Cyan(f.getFormattedWho(entry.Data)).String()) // nolint: errcheck } // write level buffer.WriteString(" " + f.getLevelOutput(entry.Level)) // nolint: errcheck // write message buffer.WriteString(" " + entry.Message) // nolint: errcheck // write fields buffer.WriteString(f.getFieldsOutput(entry.Data)) // nolint: errcheck // add newline buffer.WriteByte('\n') // nolint: errcheck return buffer.Bytes(), nil } func (f *TextFormatter) getLevelOutput(level logrus.Level) string { switch level { case logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel: return f.auroraInstance.Red("(E)").String() case logrus.WarnLevel: return f.auroraInstance.Yellow("(W)").String() case logrus.InfoLevel: return f.auroraInstance.Blue("(I)").String() case logrus.DebugLevel: return f.auroraInstance.Green("(D)").String() case logrus.TraceLevel: return f.auroraInstance.Green("(T)").String() } return f.auroraInstance.BrightRed("(?)").String() } func (f *TextFormatter) getFieldsOutput(fields logrus.Fields) string { maxVariableLen := f.maxVariableLen if maxVariableLen == 0 { maxVariableLen = math.MaxInt64 } singleLineKV := map[string]string{} blockKV := map[string]string{} for fieldKey, fieldValue := range fields { if fieldKey == "ctx" { // if we were provided with a context formatter if f.contextFormatter != nil { // if the value is a context (it should be) if ctx, isContext := fieldValue.(context.Context); isContext { // only if there's a value if contextValue := f.contextFormatter(ctx); contextValue != "" { singleLineKV["ctx"] = contextValue } } } continue } // if we're dealing with a struct, use json switch reflect.Indirect(reflect.ValueOf(fieldValue)).Kind() { case reflect.Slice, reflect.Map, reflect.Struct: fieldValueBytes, _ := json.Marshal(fieldValue) // if it's short - add to single line. otherwise to block if len(fieldValueBytes) <= maxVariableLen { singleLineKV[fieldKey] = string(fieldValueBytes) } else { blockBuffer := bytes.NewBuffer([]byte{}) if err := json.Indent(blockBuffer, fieldValueBytes, "", "\t"); err != nil { blockBuffer.WriteString(fmt.Sprintf("Failed to encode: %s", err.Error())) // nolint: errcheck } blockKV[fieldKey] = blockBuffer.String() } case reflect.String: stringFieldValue := fmt.Sprintf("%s", fieldValue) // if there are newlines in output, add to block if strings.Contains(stringFieldValue, "\n") { blockKV[fieldKey] = stringFieldValue } else { singleLineKV[fieldKey] = fmt.Sprintf(`"%s"`, fieldValue) } default: singleLineKV[fieldKey] = fmt.Sprintf("%v", fieldValue) } } fieldsOutput := "" if len(singleLineKV) != 0 { fieldsOutput = f.auroraInstance.White(" :: ").String() } separator := f.auroraInstance.White(" || ").String() for singleLineKey, singleLineValue := range singleLineKV { fieldsOutput += fmt.Sprintf("%s=%s%s", f.auroraInstance.Blue(singleLineKey).String(), singleLineValue, separator) } // remove last || fieldsOutput = strings.TrimSuffix(fieldsOutput, separator) if len(blockKV) != 0 { for blockKey, blockValue := range blockKV { fieldsOutput += fmt.Sprintf("\n* %s:\n", f.auroraInstance.Blue(blockKey).String()) fieldsOutput += blockValue fieldsOutput += "\n" } } return fieldsOutput } func (f *TextFormatter) getFormattedWho(data logrus.Fields) string { who, ok := data["who"] if ok { whoStr := fmt.Sprintf("%20s", who) return fmt.Sprintf("%20s", whoStr[len(whoStr)-20:]) } return fmt.Sprintf("%20s", "") }
package main import ( "fmt" "math/rand" "time" ) func intersect(a []int, b []int) { l := a m := b if len(a) >= len(b) { l = b m = a } var result []int for _, i := range l { for _, j := range m { if i == j && result == nil { tmp := []int{i} result = tmp } else if i == j { result = append(result, i) } } } } func main() { a := rand.Perm(30000) b := rand.Perm(40000) old := time.Now() intersect(a, b) s := time.Since(old) fmt.Println("Ejecutado en", s, "segundos") }
package gedcom_test import ( "testing" "github.com/elliotchance/gedcom" "github.com/elliotchance/tf" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" ) var ( // John and Jane share the same pointer on purpose. They will be used for // pointer comparisons. elliot = individual(gedcom.NewDocument(), "P1", "Elliot /Chance/", "4 Jan 1843", "17 Mar 1907") john = individual(gedcom.NewDocument(), "P2", "John /Smith/", "4 Jan 1803", "17 Mar 1877") jane = individual(gedcom.NewDocument(), "P2", "Jane /Doe/", "3 Mar 1803", "14 June 1877") bob = individual(gedcom.NewDocument(), "P4", "Bob /Jones/", "1749", "1810") harry = individual(gedcom.NewDocument(), "P5", "Harry /Gold/", "1889", "1936") ) var individualNodesTests = map[string]struct { Doc1, Doc2 *gedcom.Document MinimumWeightedSimilarity float64 PreferPointerAbove float64 WantCompare gedcom.IndividualComparisons WantMerge gedcom.IndividualNodes }{ "BothDocumentsEmpty": { Doc1: gedcom.NewDocument(), Doc2: gedcom.NewDocument(), MinimumWeightedSimilarity: 0.0, PreferPointerAbove: 1.0, WantCompare: gedcom.IndividualComparisons{}, }, "Doc2Empty": { Doc1: gedcom.NewDocumentWithNodes(gedcom.Nodes{elliot}), Doc2: gedcom.NewDocument(), MinimumWeightedSimilarity: 0.0, PreferPointerAbove: 1.0, WantCompare: gedcom.IndividualComparisons{ gedcom.NewIndividualComparison(elliot, nil, nil), }, WantMerge: gedcom.IndividualNodes{ elliot, }, }, "Doc1Empty": { Doc1: gedcom.NewDocument(), Doc2: gedcom.NewDocumentWithNodes(gedcom.Nodes{elliot}), MinimumWeightedSimilarity: 0.0, PreferPointerAbove: 1.0, WantCompare: gedcom.IndividualComparisons{ gedcom.NewIndividualComparison(nil, elliot, nil), }, WantMerge: gedcom.IndividualNodes{ elliot, }, }, "SameIndividualInBothDocuments": { Doc1: gedcom.NewDocumentWithNodes(gedcom.Nodes{elliot}), Doc2: gedcom.NewDocumentWithNodes(gedcom.Nodes{elliot}), MinimumWeightedSimilarity: 0.0, PreferPointerAbove: 1.0, WantCompare: gedcom.IndividualComparisons{ gedcom.NewIndividualComparison(elliot, elliot, gedcom.NewSurroundingSimilarity(0.5, 1.0, 1.0, 1.0)), }, WantMerge: gedcom.IndividualNodes{ elliot, }, }, "SameIndividualsInDifferentOrder": { Doc1: gedcom.NewDocumentWithNodes(gedcom.Nodes{elliot, john, jane}), Doc2: gedcom.NewDocumentWithNodes(gedcom.Nodes{jane, elliot, john}), MinimumWeightedSimilarity: 0.0, PreferPointerAbove: 1.0, WantCompare: gedcom.IndividualComparisons{ gedcom.NewIndividualComparison(elliot, elliot, gedcom.NewSurroundingSimilarity(0.5, 1.0, 1.0, 1.0)), gedcom.NewIndividualComparison(john, john, gedcom.NewSurroundingSimilarity(0.5, 1.0, 1.0, 1.0)), gedcom.NewIndividualComparison(jane, jane, gedcom.NewSurroundingSimilarity(0.5, 1.0, 1.0, 1.0)), }, WantMerge: gedcom.IndividualNodes{ elliot, john, jane, }, }, "ZeroMinimumSimilarity": { Doc1: gedcom.NewDocumentWithNodes(gedcom.Nodes{elliot, jane}), Doc2: gedcom.NewDocumentWithNodes(gedcom.Nodes{jane, john}), MinimumWeightedSimilarity: 0.0, PreferPointerAbove: 1.0, WantCompare: gedcom.IndividualComparisons{ // elliot and john match because the minimumSimilarity is so // low. gedcom.NewIndividualComparison(jane, jane, gedcom.NewSurroundingSimilarity(0.5, 1, 1.0, 1.0)), gedcom.NewIndividualComparison(elliot, john, gedcom.NewSurroundingSimilarity(0.5, 0.24743589743589745, 1.0, 1.0)), }, WantMerge: gedcom.IndividualNodes{ jane, gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("Elliot /Chance/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1803"), // john gedcom.NewDateNode("4 Jan 1843"), // elliot ), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1877"), // john gedcom.NewDateNode("17 Mar 1907"), // elliot ), gedcom.NewNameNode("John /Smith/"), ), }, }, "OneMatch": { Doc1: gedcom.NewDocumentWithNodes(gedcom.Nodes{elliot, jane}), Doc2: gedcom.NewDocumentWithNodes(gedcom.Nodes{jane, john}), MinimumWeightedSimilarity: 0.75, PreferPointerAbove: 1.0, WantCompare: gedcom.IndividualComparisons{ gedcom.NewIndividualComparison(jane, jane, gedcom.NewSurroundingSimilarity(0.5, 1.0, 1.0, 1.0)), gedcom.NewIndividualComparison(elliot, nil, nil), gedcom.NewIndividualComparison(nil, john, nil), }, WantMerge: gedcom.IndividualNodes{ jane, elliot, john, }, }, "NoMatches": { Doc1: gedcom.NewDocumentWithNodes(gedcom.Nodes{elliot, jane}), Doc2: gedcom.NewDocumentWithNodes(gedcom.Nodes{bob, john}), MinimumWeightedSimilarity: 0.9, PreferPointerAbove: 1.0, WantCompare: gedcom.IndividualComparisons{ gedcom.NewIndividualComparison(elliot, nil, nil), gedcom.NewIndividualComparison(jane, nil, nil), gedcom.NewIndividualComparison(nil, bob, nil), gedcom.NewIndividualComparison(nil, john, nil), }, WantMerge: gedcom.IndividualNodes{ elliot, jane, bob, john, }, }, "AlwaysUsePointer": { // John and Jane are both P2. Even though they are completely different // we force pointers to match with a prefer value of 0.0. Doc1: gedcom.NewDocumentWithNodes(gedcom.Nodes{elliot, jane}), Doc2: gedcom.NewDocumentWithNodes(gedcom.Nodes{bob, john}), MinimumWeightedSimilarity: 0.9, PreferPointerAbove: 0.0, WantCompare: gedcom.IndividualComparisons{ gedcom.NewIndividualComparison(jane, john, gedcom.NewSurroundingSimilarity(0.5, 0.8209932199959546, 1.0, 1.0)), gedcom.NewIndividualComparison(elliot, nil, nil), gedcom.NewIndividualComparison(nil, bob, nil), }, WantMerge: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1803"), // john gedcom.NewDateNode("3 Mar 1803"), // jane ), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1877"), // john gedcom.NewDateNode("14 June 1877"), // jane ), gedcom.NewNameNode("John /Smith/"), ), elliot, bob, }, }, "AlwaysUseUID1": { // Harry and John will always match because of the shared unique // identifier. Doc1: gedcom.NewDocumentWithNodes(gedcom.Nodes{ elliot, setUID(individual(gedcom.NewDocument(), "P5", "Harry /Gold/", "1889", "1936"), "EE13561DDB204985BFFDEEBF82A5226C"), }), Doc2: gedcom.NewDocumentWithNodes(gedcom.Nodes{ bob, setUID(individual(gedcom.NewDocument(), "P2", "John /Smith/", "4 Jan 1803", "17 Mar 1877"), "EE13561DDB204985BFFDEEBF82A5226C5B2E"), }), MinimumWeightedSimilarity: 0.9, PreferPointerAbove: 0.0, WantCompare: gedcom.IndividualComparisons{ gedcom.NewIndividualComparison(harry, john, gedcom.NewSurroundingSimilarity(0.5, 0.15, 1.0, 1.0)), gedcom.NewIndividualComparison(elliot, nil, nil), gedcom.NewIndividualComparison(nil, bob, nil), }, WantMerge: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P5", // P5 = harry gedcom.NewNameNode("Harry /Gold/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1803"), // john gedcom.NewDateNode("1889"), // harry ), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1877"), // john gedcom.NewDateNode("1936"), // harry ), gedcom.NewUniqueIDNode("EE13561DDB204985BFFDEEBF82A5226C"), gedcom.NewNameNode("John /Smith/"), ), elliot, bob, }, }, "AlwaysUseUID2": { // This is the same as above, but we use the opposite PreferPointerAbove // value to prove that it doesn't affect unique identifier matches. Doc1: gedcom.NewDocumentWithNodes(gedcom.Nodes{ elliot, setUID(individual(gedcom.NewDocument(), "P5", "Harry /Gold/", "1889", "1936"), "EE13561DDB204985BFFDEEBF82A5226C"), }), Doc2: gedcom.NewDocumentWithNodes(gedcom.Nodes{ bob, setUID(individual(gedcom.NewDocument(), "P2", "John /Smith/", "4 Jan 1803", "17 Mar 1877"), "EE13561DDB204985BFFDEEBF82A5226C5B2E"), }), MinimumWeightedSimilarity: 0.9, PreferPointerAbove: 1.0, WantCompare: gedcom.IndividualComparisons{ gedcom.NewIndividualComparison(harry, john, gedcom.NewSurroundingSimilarity(0.5, 0.15, 1.0, 1.0)), gedcom.NewIndividualComparison(elliot, nil, nil), gedcom.NewIndividualComparison(nil, bob, nil), }, WantMerge: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P5", // P5 = harry gedcom.NewNameNode("Harry /Gold/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1803"), // john gedcom.NewDateNode("1889"), // harry ), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1877"), // john gedcom.NewDateNode("1936"), // harry ), gedcom.NewUniqueIDNode("EE13561DDB204985BFFDEEBF82A5226C"), gedcom.NewNameNode("John /Smith/"), ), elliot, bob, }, }, } func TestIndividualNodes_Similarity(t *testing.T) { // ghost:ignore var tests = []struct { a, b gedcom.IndividualNodes minSimilarity float64 expected float64 }{ // Exact matches. { a: gedcom.IndividualNodes{}, b: gedcom.IndividualNodes{}, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 1.0, }, { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), }, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 1.0, }, { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Sep 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("Apr 1907")), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("Jane /DOE/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Sep 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("Apr 1907")), ), }, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 1.0, }, // Exact matches, but missing information on both sides. These // specifically should NOT return 1.0 as it would throw out the real // similarities. See the docs for explanation. { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewDeathNode("", gedcom.NewDateNode("Apr 1907")), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("John /Smith/"), gedcom.NewDeathNode("", gedcom.NewDateNode("Apr 1907")), ), }, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 0.875, }, { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("John /Smith/"), ), }, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 0.75, }, // Similar matches but the same sized slice on both sides. { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Sep 1845")), ), gedcom.NewDocument().AddIndividual("P3", gedcom.NewNameNode("Bob /Jones/"), gedcom.NewBurialNode("", gedcom.NewDateNode("1927")), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P4", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Abt. Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("1907")), ), gedcom.NewDocument().AddIndividual("P5", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Bef. 1846")), ), gedcom.NewDocument().AddIndividual("P6", gedcom.NewNameNode("Bob Thomas /Jones/"), gedcom.NewBurialNode("", gedcom.NewDateNode("1927")), ), }, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 0.872532146404072, }, // The slices are different lengths. The same score should be returned // when different sizes slices are swapped. { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Sep 1845")), ), gedcom.NewDocument().AddIndividual("P3", gedcom.NewNameNode("Bob /Jones/"), gedcom.NewBurialNode("", gedcom.NewDateNode("1927")), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P4", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Between 1845 and 1846")), ), gedcom.NewDocument().AddIndividual("P5", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Bef. 10 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("Abt. 1908")), ), }, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 0.7754008744441251, }, { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P4", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Between 1845 and 1846")), ), gedcom.NewDocument().AddIndividual("P5", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Bef. 10 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("Abt. 1908")), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Sep 1845")), ), gedcom.NewDocument().AddIndividual("P3", gedcom.NewNameNode("Bob /Jones/"), gedcom.NewBurialNode("", gedcom.NewDateNode("1927")), ), }, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 0.7754008744441251, }, // Whenever one slice is empty the result will always be 0.5. { a: gedcom.IndividualNodes{}, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Sep 1845")), ), gedcom.NewDocument().AddIndividual("P3", gedcom.NewNameNode("Bob /Jones/"), gedcom.NewBurialNode("", gedcom.NewDateNode("1927")), ), }, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 0.5, }, { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Sep 1845")), ), gedcom.NewDocument().AddIndividual("P3", gedcom.NewNameNode("Bob /Jones/"), gedcom.NewBurialNode("", gedcom.NewDateNode("1927")), ), }, b: gedcom.IndividualNodes{}, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 0.5, }, // These ones are just way off and should not be considered matches. { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P4", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Between 1845 and 1846")), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P5", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Bef. 10 Jan 1943")), gedcom.NewDeathNode("", gedcom.NewDateNode("Abt. 2008")), ), }, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 0.5, }, { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Sep 1845")), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P3", gedcom.NewNameNode("Bob /Jones/"), gedcom.NewBirthNode("", gedcom.NewDateNode("1627")), ), }, minSimilarity: gedcom.DefaultMinimumSimilarity, expected: 0.5, }, // Different values for minimumSimilarity. { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Sep 1845")), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P3", gedcom.NewNameNode("Bob /Jones/"), gedcom.NewBirthNode("", gedcom.NewDateNode("1627")), ), }, minSimilarity: 0.95, expected: 0.5, }, { a: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P1", gedcom.NewNameNode("John /Smith/"), gedcom.NewBirthNode("", gedcom.NewDateNode("4 Jan 1843")), gedcom.NewDeathNode("", gedcom.NewDateNode("17 Mar 1907")), ), gedcom.NewDocument().AddIndividual("P2", gedcom.NewNameNode("Jane /Doe/"), gedcom.NewBirthNode("", gedcom.NewDateNode("Sep 1845")), ), }, b: gedcom.IndividualNodes{ gedcom.NewDocument().AddIndividual("P3", gedcom.NewNameNode("Bob /Jones/"), gedcom.NewBirthNode("", gedcom.NewDateNode("1627")), ), }, minSimilarity: 0.0, expected: 0.45708333333333334, }, } for _, test := range tests { t.Run("", func(t *testing.T) { options := gedcom.NewSimilarityOptions() options.MinimumSimilarity = test.minSimilarity got := test.a.Similarity(test.b, options) assert.Equal(t, test.expected, got) }) } } func TestIndividualNodes_Compare(t *testing.T) { for testName, test := range individualNodesTests { t.Run(testName, func(t *testing.T) { similarityOptions := gedcom.NewSimilarityOptions() similarityOptions.MinimumWeightedSimilarity = test.MinimumWeightedSimilarity similarityOptions.PreferPointerAbove = test.PreferPointerAbove compareOptions := gedcom.NewIndividualNodesCompareOptions() compareOptions.SimilarityOptions = similarityOptions individuals1 := test.Doc1.Individuals() individuals2 := test.Doc2.Individuals() got := individuals1.Compare(individuals2, compareOptions) // The comparison results (got) will include the options from above. // However, the fixture for this test does not provide the // compareOptions as it would make the fixture verbose and // confusing. Instead we set the Options on each of the comparison // results so that the deep equal passes. for _, x := range test.WantCompare { if x.Similarity != nil { x.Similarity.Options = similarityOptions } } assertEqual(t, test.WantCompare, got) }) } } func assertEqual(t *testing.T, expected, actual interface{}) bool { simplifyErrors := cmp.Transformer("Errors", func(in error) string { if in == nil { return "" } return in.Error() }) // IgnoreUnexported tell the diff engine to ignore unexported fields for the // following types. diff := cmp.Diff(expected, actual, cmpopts.IgnoreUnexported( gedcom.SimpleNode{}, gedcom.IndividualNode{}, gedcom.IndividualComparison{}, gedcom.FamilyNode{}, gedcom.DateNode{}, gedcom.ChildNode{}, ), simplifyErrors) if diff != "" { assert.Fail(t, diff) } return diff == "" } func TestNewIndividualNodesCompareOptions(t *testing.T) { actual := gedcom.NewIndividualNodesCompareOptions() assert.Equal(t, actual.SimilarityOptions, gedcom.NewSimilarityOptions()) } func TestIndividualNodes_Nodes(t *testing.T) { Nodes := tf.Function(t, gedcom.IndividualNodes.Nodes) i1 := individual(gedcom.NewDocument(), "P1", "Elliot /Chance/", "", "") i2 := individual(gedcom.NewDocument(), "P2", "Joe /Bloggs/", "", "") Nodes(nil).Returns(nil) Nodes(gedcom.IndividualNodes{}).Returns(nil) Nodes(gedcom.IndividualNodes{i1, i2}).Returns(gedcom.Nodes{i1, i2}) } func TestIndividualNodes_Merge(t *testing.T) { for testName, test := range individualNodesTests { t.Run(testName, func(t *testing.T) { similarityOptions := gedcom.NewSimilarityOptions() similarityOptions.MinimumWeightedSimilarity = test.MinimumWeightedSimilarity similarityOptions.PreferPointerAbove = test.PreferPointerAbove compareOptions := gedcom.NewIndividualNodesCompareOptions() compareOptions.SimilarityOptions = similarityOptions individuals1 := test.Doc1.Individuals() individuals2 := test.Doc2.Individuals() doc := gedcom.NewDocument() got, err := individuals1.Merge(individuals2, doc, compareOptions) assert.NoError(t, err) assertIndividualNodes(t, test.WantMerge, got) }) } } func assertIndividualNodes(t *testing.T, expected, actual gedcom.IndividualNodes) { assert.Equal(t, expected.GEDCOMString(0), actual.GEDCOMString(0)) } func setUID(i *gedcom.IndividualNode, uid string) *gedcom.IndividualNode { i.AddNode(gedcom.NewUniqueIDNode(uid)) return i }
// Copyright © 2020 Attestant Limited. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package wallet import ( "context" "fmt" "regexp" "strings" "sync" eth2client "github.com/attestantio/go-eth2-client" api "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/attestantio/vouch/services/chaintime" "github.com/attestantio/vouch/services/metrics" "github.com/attestantio/vouch/services/validatorsmanager" "github.com/pkg/errors" "github.com/rs/zerolog" zerologger "github.com/rs/zerolog/log" "github.com/wealdtech/go-bytesutil" e2wallet "github.com/wealdtech/go-eth2-wallet" filesystem "github.com/wealdtech/go-eth2-wallet-store-filesystem" e2wtypes "github.com/wealdtech/go-eth2-wallet-types/v2" "golang.org/x/sync/semaphore" ) // Service is the manager for wallet accounts. type Service struct { mutex sync.RWMutex monitor metrics.AccountManagerMonitor processConcurrency int64 stores []e2wtypes.Store accountPaths []string passphrases [][]byte accounts map[phase0.BLSPubKey]e2wtypes.Account validatorsManager validatorsmanager.Service slotsPerEpoch phase0.Slot domainProvider eth2client.DomainProvider farFutureEpoch phase0.Epoch currentEpochProvider chaintime.Service } // module-wide log. var log zerolog.Logger // New creates a new wallet account manager. func New(ctx context.Context, params ...Parameter) (*Service, error) { parameters, err := parseAndCheckParameters(params...) if err != nil { return nil, errors.Wrap(err, "problem with parameters") } // Set logging. log = zerologger.With().Str("service", "accountmanager").Str("impl", "wallet").Logger() if parameters.logLevel != log.GetLevel() { log = log.Level(parameters.logLevel) } // Warn about lack of slashing protection log.Warn().Msg("The wallet account manager does not provide built-in slashing protection. Please use the dirk account manager for production systems.") stores := make([]e2wtypes.Store, 0, len(parameters.locations)) if len(parameters.locations) == 0 { // Use default location. stores = append(stores, filesystem.New()) } else { for _, location := range parameters.locations { stores = append(stores, filesystem.New(filesystem.WithLocation(location))) } } slotsPerEpoch, err := parameters.slotsPerEpochProvider.SlotsPerEpoch(ctx) if err != nil { return nil, errors.Wrap(err, "failed to obtain slots per epoch") } farFutureEpoch, err := parameters.farFutureEpochProvider.FarFutureEpoch(ctx) if err != nil { return nil, errors.Wrap(err, "failed to obtain far future epoch") } s := &Service{ monitor: parameters.monitor, processConcurrency: parameters.processConcurrency, stores: stores, accountPaths: parameters.accountPaths, passphrases: parameters.passphrases, validatorsManager: parameters.validatorsManager, slotsPerEpoch: phase0.Slot(slotsPerEpoch), domainProvider: parameters.domainProvider, farFutureEpoch: farFutureEpoch, currentEpochProvider: parameters.currentEpochProvider, } if err := s.refreshAccounts(ctx); err != nil { return nil, errors.Wrap(err, "failed to fetch accounts") } if err := s.refreshValidators(ctx); err != nil { return nil, errors.Wrap(err, "failed to fetch validator states") } return s, nil } // Refresh refreshes the accounts from local store, and account validator state from // the validators provider. // This is a relatively expensive operation, so should not be run in the validating path. func (s *Service) Refresh(ctx context.Context) { if err := s.refreshAccounts(ctx); err != nil { log.Error().Err(err).Msg("Failed to refresh accounts") } if err := s.refreshValidators(ctx); err != nil { log.Error().Err(err).Msg("Failed to refresh validators") } } // refreshAccounts refreshes the accounts from local store. func (s *Service) refreshAccounts(ctx context.Context) error { // Find the relevant wallets. wallets := make(map[string]e2wtypes.Wallet) pathsByWallet := make(map[string][]string) for _, path := range s.accountPaths { pathBits := strings.Split(path, "/") var paths []string var exists bool if paths, exists = pathsByWallet[pathBits[0]]; !exists { paths = make([]string, 0) } pathsByWallet[pathBits[0]] = append(paths, path) // Try each store in turn. found := false for _, store := range s.stores { wallet, err := e2wallet.OpenWallet(pathBits[0], e2wallet.WithStore(store)) if err == nil { wallets[wallet.Name()] = wallet found = true break } } if !found { log.Warn().Str("wallet", pathBits[0]).Msg("Failed to find wallet in any store") } } verificationRegexes := accountPathsToVerificationRegexes(s.accountPaths) // Fetch accounts for each wallet. accounts := make(map[phase0.BLSPubKey]e2wtypes.Account) for _, wallet := range wallets { // if _, isProvider := wallet.(e2wtypes.WalletAccountsByPathProvider); isProvider { // fmt.Printf("TODO: fetch accounts by path") // } else { s.fetchAccountsForWallet(ctx, wallet, accounts, verificationRegexes) //} } log.Trace().Int("accounts", len(accounts)).Msg("Obtained accounts") s.mutex.Lock() s.accounts = accounts s.mutex.Unlock() return nil } // refreshValidators refreshes the validator information for our known accounts. func (s *Service) refreshValidators(ctx context.Context) error { accountPubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts)) for pubKey := range s.accounts { accountPubKeys = append(accountPubKeys, pubKey) } if err := s.validatorsManager.RefreshValidatorsFromBeaconNode(ctx, accountPubKeys); err != nil { return errors.Wrap(err, "failed to refresh validators") } return nil } // ValidatingAccountsForEpoch obtains the validating accounts for a given epoch. func (s *Service) ValidatingAccountsForEpoch(ctx context.Context, epoch phase0.Epoch) (map[phase0.ValidatorIndex]e2wtypes.Account, error) { // stateCount is used to update metrics. stateCount := map[api.ValidatorState]uint64{ api.ValidatorStateUnknown: 0, api.ValidatorStatePendingInitialized: 0, api.ValidatorStatePendingQueued: 0, api.ValidatorStateActiveOngoing: 0, api.ValidatorStateActiveExiting: 0, api.ValidatorStateActiveSlashed: 0, api.ValidatorStateExitedUnslashed: 0, api.ValidatorStateExitedSlashed: 0, api.ValidatorStateWithdrawalPossible: 0, api.ValidatorStateWithdrawalDone: 0, } validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account) pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts)) for pubKey := range s.accounts { pubKeys = append(pubKeys, pubKey) } validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys) for index, validator := range validators { state := api.ValidatorToState(validator, epoch, s.farFutureEpoch) stateCount[state]++ if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting { account := s.accounts[validator.PublicKey] log.Trace(). Str("name", account.Name()). Str("public_key", fmt.Sprintf("%x", account.PublicKey().Marshal())). Uint64("index", uint64(index)). Str("state", state.String()). Msg("Validating account") validatingAccounts[index] = account } } // Update metrics if this is the current epoch. if epoch == s.currentEpochProvider.CurrentEpoch() { stateCount[api.ValidatorStateUnknown] += uint64(len(s.accounts) - len(validators)) for state, count := range stateCount { s.monitor.Accounts(strings.ToLower(state.String()), count) } } return validatingAccounts, nil } // ValidatingAccountsForEpochByIndex obtains the specified validating accounts for a given epoch. func (s *Service) ValidatingAccountsForEpochByIndex(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) (map[phase0.ValidatorIndex]e2wtypes.Account, error) { validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account) pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts)) for pubKey := range s.accounts { pubKeys = append(pubKeys, pubKey) } indexPresenceMap := make(map[phase0.ValidatorIndex]bool) for _, index := range indices { indexPresenceMap[index] = true } validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys) for index, validator := range validators { if _, present := indexPresenceMap[index]; !present { continue } state := api.ValidatorToState(validator, epoch, s.farFutureEpoch) if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting { validatingAccounts[index] = s.accounts[validator.PublicKey] } } return validatingAccounts, nil } // accountPathsToVerificationRegexes turns account paths in to regexes to allow verification. func accountPathsToVerificationRegexes(paths []string) []*regexp.Regexp { regexes := make([]*regexp.Regexp, 0, len(paths)) for _, path := range paths { log := log.With().Str("path", path).Logger() parts := strings.Split(path, "/") if len(parts) == 0 || len(parts[0]) == 0 { log.Debug().Msg("Invalid path") continue } if len(parts) == 1 { parts = append(parts, ".*") } parts[1] = strings.TrimPrefix(parts[1], "^") var specifier string if strings.HasSuffix(parts[1], "$") { specifier = fmt.Sprintf("^%s/%s", parts[0], parts[1]) } else { specifier = fmt.Sprintf("^%s/%s$", parts[0], parts[1]) } regex, err := regexp.Compile(specifier) if err != nil { log.Warn().Str("specifier", specifier).Err(err).Msg("Invalid path regex") continue } regexes = append(regexes, regex) } return regexes } func (s *Service) fetchAccountsForWallet(ctx context.Context, wallet e2wtypes.Wallet, accounts map[phase0.BLSPubKey]e2wtypes.Account, verificationRegexes []*regexp.Regexp) { var mu sync.Mutex sem := semaphore.NewWeighted(s.processConcurrency) var wg sync.WaitGroup for account := range wallet.Accounts(ctx) { wg.Add(1) go func(ctx context.Context, sem *semaphore.Weighted, wg *sync.WaitGroup, wallet e2wtypes.Wallet, account e2wtypes.Account, accounts map[phase0.BLSPubKey]e2wtypes.Account, mu *sync.Mutex) { defer wg.Done() if err := sem.Acquire(ctx, 1); err != nil { log.Error().Err(err).Msg("Failed to acquire semaphore") return } defer sem.Release(1) // Ensure the name matches one of our account paths. name := fmt.Sprintf("%s/%s", wallet.Name(), account.Name()) verified := false for _, verificationRegex := range verificationRegexes { if verificationRegex.Match([]byte(name)) { verified = true break } } if !verified { log.Debug().Str("account", name).Msg("Received unwanted account from server; ignoring") return } var pubKey []byte if provider, isProvider := account.(e2wtypes.AccountCompositePublicKeyProvider); isProvider { pubKey = provider.CompositePublicKey().Marshal() } else { pubKey = account.PublicKey().Marshal() } // Ensure we can unlock the account with a known passphrase. unlocked := false if unlocker, isUnlocker := account.(e2wtypes.AccountLocker); isUnlocker { for _, passphrase := range s.passphrases { if err := unlocker.Unlock(ctx, passphrase); err == nil { unlocked = true break } } } if !unlocked { log.Warn().Str("account", name).Msg("Failed to unlock account with any passphrase") return } // Set up account as unknown to beacon chain. mu.Lock() accounts[bytesutil.ToBytes48(pubKey)] = account mu.Unlock() }(ctx, sem, &wg, wallet, account, accounts, &mu) } wg.Wait() }
package data_test import ( "accountapi/data" "encoding/json" "strings" "testing" ) // TestRecordType for testing unmarshalling JSON values. type TestRecordType struct { TestType data.RecordType `json:"testType"` } // TestValidRecordType verifies proper constraints for consts ("enums"), parsing and unmarshalling. func TestValidRecordType(t *testing.T) { rt := data.Accounts if !rt.IsValid() { t.Error("RecordType Accounts should be valid.") t.Fail() } if rt.String() != "accounts" { t.Error("Accounts string should be \"accounts\".") t.Fail() } jString := `{"testType":"accounts"}` jStruct := TestRecordType{} err := json.NewDecoder(strings.NewReader(jString)).Decode(&jStruct) if err != nil { t.Errorf("Can't unmarshal RecordType: %s", err.Error()) t.Fail() } if jStruct.TestType != rt { t.Errorf("Expected RecordType value: '%s', got: '%s'", rt.String(), jStruct.TestType.String()) t.Fail() } b, err := json.Marshal(&jStruct) if err != nil { t.Errorf("Can't marshal RecordType to string: %s\n", err.Error()) t.Fail() } else if string(b) != jString { t.Errorf("Expected marshalled value: '%s', got: '%s'\n", jString, string(b)) t.Fail() } } // TestInvalidRecordType verifies response of functions when called with invalid proper constraints for consts ("enums"), parsing and unmarshalling. func TestInvalidRecordType(t *testing.T) { rt := data.AccountEvents // The last RecordType value, when it's increased it should become an invalid value. rt++ if rt.IsValid() { t.Error("Invalid RecordType not detected") t.Fail() } jString := `{"testType":"fake_accounts"}` jStruct := TestRecordType{} err := json.NewDecoder(strings.NewReader(jString)).Decode(&jStruct) if err == nil { t.Error("Unmarshalling should fail for invalid enum values") t.Fail() } defer func() { if r := recover(); r == nil { t.Error("Calling String on invalid RecordType value should panic.") } }() _ = rt.String() // This should panic. }
// No Exercício #06 da seção "Exercícios", usamos for range para percorrer uma slice de string que representava uma lista de itens a comprar no mercado. Agora, resolva o mesmo exercício usando a sintaxe básica da instrução for (sintaxe apresentada aqui). package main import "fmt" func main() { var lista = []string{"ovos", "leite", "granola", "iogurte"} for i := 0; i < 4; i++ { fmt.Printf("%d - %s\n", i+1, lista[i]) } }
package main import ( "fmt" "net/http" "runtime" "search-package/controllers" "strconv" ) var port = 3000 func main() { runtime.GOMAXPROCS(4) controllers.RegisterSearchController() fmt.Printf("%s %d \n", "Server running on port", port) http.ListenAndServe("localhost:"+strconv.Itoa(port), nil) }
package solutions /* * @lc app=leetcode id=1 lang=golang * * [1] Two Sum */ /* Your runtime beats 96.22 % of golang submissions Your memory usage beats 48.53 % of golang submissions (4.2 MB) */ // @lc code=start func twoSum(nums []int, target int) []int { t := make(map[int]int) for i := 0; i < len(nums); i++ { if saved, ok := t[target-nums[i]]; ok { return []int{saved, i} } t[nums[i]] = i } return []int{-1, -1} } // @lc code=end
package leetcode import "testing" func TestLargestSumAfterKNegations(t *testing.T) { if largestSumAfterKNegations([]int{4, 2, 3}, 1) != 5 { t.Fatal() } if largestSumAfterKNegations([]int{3, -1, 0, 2}, 3) != 6 { t.Fatal() } if largestSumAfterKNegations([]int{2, -3, -1, 5, -4}, 2) != 13 { t.Fatal() } if largestSumAfterKNegations([]int{-8, 3, -5, -3, -5, -2}, 6) != 22 { t.Fatal() } }
package filewriter import . "backend/armarchitecture" type Registers struct { registers []Register }
package events import ( "context" "sync" "testing" "time" "github.com/cloudfly/ecenter/pkg/sender" "github.com/cloudfly/ecenter/pkg/types" "github.com/cloudfly/ecenter/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" log "github.com/sirupsen/logrus" ) var ( mockSender *MockSender ) func init() { // 设置日志等级 log.SetLevel(log.DebugLevel) mockSender = &MockSender{} sender.Register("mock", CreateMockSender) sender.Build("mock", nil) } type MockSender struct { sync.Mutex data []string } func CreateMockSender(setting map[string]string) (sender.Sender, error) { return mockSender, nil } func (sender *MockSender) Send(title, message, from string, to []string) (int, error) { sender.Lock() for _, u := range to { sender.data = append(sender.data, u+":"+message) } sender.Unlock() return len(to), nil } type MockGroupProvider struct{} func (gp MockGroupProvider) Groups2Users(data types.Set) types.Set { return data } func TestEvent_Update(t *testing.T) { m := test.InitMySQL(t) defer test.DestroyMySQL(t) defer time.Sleep(time.Millisecond * 200) engine, err := NewEngine(context.Background(), m, &MockEmitter{}, MockGroupProvider{}) require.NoError(t, err) ctx := context.Background() event, err := engine.FireEvent(ctx, "1", "app=demo", WithEventName("demo"), WithEventLevel(types.ERROR), WithEventMessage("this is alert message"), WithEventExpiretime(time.Now().Add(time.Hour)), WithEventRecoveryNotice(true), WithEventHandlers( Handler{ Channels: types.NewSetFromString("stdout"), Receivers: types.StringList{"jim"}, Everytime: true, }, Handler{ Channels: types.NewSetFromString("stdout"), Receivers: types.StringList{"jack"}, Delay: time.Second * 5, Interval: time.Second * 10, }, ), ) require.NoError(t, err) expireTime := time.Now().Add(time.Hour * 2).Round(time.Second) newEvent := Event{ ID: 2, Name: "task_name2", Level: types.ERROR, Message: "test message2", ExpireTime: expireTime, } applyCommand(event, cmdUpdate{newEvent, engine}) time.Sleep(time.Millisecond * 100) event, err = engine.GetEvent(engine.ctx, event.ID) require.NoError(t, err) assert.Equal(t, int64(1), event.ID) // 不允许修改 assert.Equal(t, types.ERROR, event.Level) assert.Equal(t, "test message2", event.Message) assert.Equal(t, expireTime, event.ExpireTime) } func TestEvents_CRUD(t *testing.T) { m := test.InitMySQL(t) defer test.DestroyMySQL(t) defer time.Sleep(time.Millisecond * 200) engine, err := NewEngine(context.Background(), m, &MockEmitter{}, MockGroupProvider{}) require.NoError(t, err) ctx := context.Background() event, err := engine.FireEvent(ctx, "1", "app=demo", WithEventLevel(types.ERROR), WithEventMessage("this is alert message"), WithEventExpiretime(time.Now().Add(time.Hour)), WithEventRecoveryNotice(true), WithEventHandlers( Handler{ Channels: types.NewSetFromString("stdout"), Receivers: types.StringList{"jim"}, Everytime: true, }, Handler{ Channels: types.NewSetFromString("stdout"), Receivers: types.StringList{"jack"}, Delay: time.Second * 5, Interval: time.Second * 10, }, ), ) require.NoError(t, err) defer engine.CloseEventByFingerprint(ctx, event.GetFingerprint()) event, err = engine.GetEvent(ctx, event.ID) require.NoError(t, err) assert.Equal(t, "1", event.Name) assert.Equal(t, "app=demo", event.Metric) assert.Equal(t, types.ERROR, event.Level) // fetch event from cache events, err := engine.GetEvents(ctx, "", "1", "app=demo", false, false, nil, nil, time.Time{}, time.Time{}, "", 0, 0) require.NoError(t, err) assert.Equal(t, 1, len(events)) assert.Equal(t, "app=demo", events[0].Metric) assert.Equal(t, types.ERROR, events[0].Level) // 开发环境的部分 DB 有时区错误, 所以这里的时间范围条件放大一些, 绕过时区引起的查询错误 events, err = engine.GetEvents(ctx, "", "1", "app=demo", false, false, []types.Level{types.ERROR}, nil, time.Now().Add(-time.Hour*24), time.Now().Add(24*time.Hour), "alert", 1, 10) require.NoError(t, err) assert.Equal(t, 1, len(events)) assert.Equal(t, "app=demo", events[0].Metric) assert.Equal(t, types.ERROR, events[0].Level) _, err = engine.FireEvent(ctx, "1", "app=demo", WithEventLevel(types.FATAL), ) assert.NoError(t, err) time.Sleep(time.Millisecond * 100) // 更新是异步的, 暂停一下 event, err = engine.GetEvent(ctx, event.ID) require.NoError(t, err) assert.Equal(t, "app=demo", event.Metric) assert.Equal(t, types.FATAL, event.Level) assert.Equal(t, int64(0), event.CloseTimeUnixnano) e, err := engine.getEvent(ctx, event.GetFingerprint()) assert.NoError(t, err) assert.Equal(t, "app=demo", e.Metric) assert.Equal(t, types.FATAL, e.Level) assert.Equal(t, int64(0), event.CloseTimeUnixnano) { engine2, err := NewEngine(context.Background(), m, &MockEmitter{}, MockGroupProvider{}) require.NoError(t, err) e, err = engine2.getEvent(ctx, event.GetFingerprint()) assert.NoError(t, err) assert.Equal(t, "app=demo", e.Metric) assert.Equal(t, types.FATAL, e.Level) assert.Equal(t, int64(0), event.CloseTimeUnixnano) } } func TestEvents_Tags(t *testing.T) { m := test.InitMySQL(t) defer test.DestroyMySQL(t) defer time.Sleep(time.Millisecond * 200) engine, err := NewEngine(context.Background(), m, &MockEmitter{}, MockGroupProvider{}) require.NoError(t, err) ctx := context.Background() event1, err := engine.FireEvent(ctx, "1", "app=demo", WithEventLevel(types.ERROR), WithEventMessage("this is alert message"), WithEventExpiretime(time.Now().Add(time.Hour)), ) require.NoError(t, err) require.NoError(t, engine.AddEventTag(ctx, event1.ID, "public")) require.NoError(t, engine.AddEventTag(ctx, event1.ID, "gogo")) event2, err := engine.FireEvent(ctx, "2", "app=godlike", WithEventLevel(types.WARN), WithEventMessage("this is alert message"), WithEventExpiretime(time.Now().Add(time.Hour)), ) require.NoError(t, err) require.NoError(t, engine.AddEventTag(ctx, event2.ID, "private")) require.NoError(t, engine.AddEventTag(ctx, event2.ID, "gogo")) events, err := engine.GetEvents(ctx, "", "", "", false, false, nil, []string{"public"}, time.Now().Add(-time.Hour*24), time.Now().Add(time.Hour*24), "alert", 1, 10) require.NoError(t, err) assert.Equal(t, 1, len(events)) events, err = engine.GetEvents(ctx, "", "", "", false, false, nil, []string{"gogo"}, time.Now().Add(-time.Hour*24), time.Now().Add(time.Hour*24), "alert", 1, 10) require.NoError(t, err) assert.Equal(t, 2, len(events)) require.NoError(t, engine.DeleteEventTag(ctx, event2.ID, "gogo")) events, err = engine.GetEvents(ctx, "", "", "", false, false, nil, []string{"gogo"}, time.Now().Add(-time.Hour*24), time.Now().Add(time.Hour*24), "alert", 1, 10) require.NoError(t, err) assert.Equal(t, 1, len(events)) require.NoError(t, engine.DeleteEventTag(ctx, event2.ID, "not-exist-tag-value")) } func TestEvents_Receivers(t *testing.T) { m := test.InitMySQL(t) defer test.DestroyMySQL(t) defer time.Sleep(time.Millisecond * 200) engine, err := NewEngine(context.Background(), m, &MockEmitter{}, MockGroupProvider{}) require.NoError(t, err) ctx := context.Background() event, err := engine.FireEvent(ctx, "1", "app=demo", WithEventLevel(types.ERROR), WithEventMessage("this is alert message"), WithEventExpiretime(time.Now().Add(time.Hour)), ) require.NoError(t, err) require.NoError(t, engine.AddEventReceiver(ctx, "jim", event.ID)) _, err = engine.FireEvent(ctx, "2", "app=godlike", WithEventLevel(types.WARN), WithEventMessage("this is alert message"), WithEventExpiretime(time.Now().Add(time.Hour)), ) require.NoError(t, err) time.Sleep(time.Millisecond * 200) events, err := engine.GetEvents(ctx, "jim", "", "", false, false, nil, nil, time.Now().Add(-time.Hour*24), time.Now().Add(time.Hour*24), "alert", 1, 10) require.NoError(t, err) require.Equal(t, 1, len(events)) receivers, err := engine.GetEventReceivers(ctx, event.ID) require.NoError(t, err) require.Equal(t, 1, len(receivers)) } func TestEngine_CloseEvent(t *testing.T) { m := test.InitMySQL(t) defer test.DestroyMySQL(t) engine, err := NewEngine(context.Background(), m, &MockEmitter{}, MockGroupProvider{}) require.NoError(t, err) ctx := context.Background() event, err := engine.FireEvent(ctx, "1", "app=demo", WithEventLevel(types.ERROR), WithEventMessage("this is alert message"), WithEventExpiretime(time.Now().Add(time.Hour)), WithEventRecoveryNotice(true), WithEventHandlers( Handler{ Channels: types.NewSetFromString("stdout"), Receivers: types.StringList{"jim"}, Everytime: true, }, Handler{ Channels: types.NewSetFromString("stdout"), Receivers: types.StringList{"jack"}, Delay: time.Second * 5, Interval: time.Second * 10, }, ), ) require.NoError(t, err) err = engine.CloseEvent(ctx, "1", "app=demo") assert.NoError(t, err) time.Sleep(time.Millisecond * 100) e, err := engine.getEvent(ctx, event.GetFingerprint()) assert.NoError(t, err) assert.Nil(t, e) event, err = engine.GetEvent(ctx, event.ID) assert.NoError(t, err) assert.Equal(t, "app=demo", event.Metric) assert.Equal(t, types.ERROR, event.Level) assert.Zero(t, event.CloseTimeUnixnano) } func TestEngine_Restart(t *testing.T) { m := test.InitMySQL(t) defer test.DestroyMySQL(t) defer time.Sleep(time.Millisecond * 200) engine, err := NewEngine(context.Background(), m, &MockEmitter{}, MockGroupProvider{}) require.NoError(t, err) ctx := context.Background() e, err := engine.FireEvent(ctx, "1", "app=demo", WithEventLevel(types.ERROR), WithEventMessage("this is alert message"), WithEventExpiretime(time.Now().Add(time.Hour)), WithEventRecoveryNotice(true), WithEventHandlers( Handler{ Channels: types.NewSetFromString("stdout"), Receivers: types.StringList{"jim"}, Everytime: true, }, Handler{ Channels: types.NewSetFromString("stdout"), Receivers: types.StringList{"jack"}, Delay: time.Second * 5, Interval: time.Second * 10, }, ), ) require.NoError(t, err) defer engine.CloseEventByFingerprint(ctx, e.GetFingerprint()) engine2, err := NewEngine(context.Background(), m, &MockEmitter{}, MockGroupProvider{}) require.NoError(t, err) defer engine2.CloseEventByFingerprint(ctx, e.GetFingerprint()) event, err := engine2.getEvent(ctx, e.GetFingerprint()) assert.NoError(t, err) assert.Equal(t, "app=demo", event.Metric) assert.Equal(t, e.GetFingerprint(), event.GetFingerprint()) assert.Equal(t, types.ERROR, event.Level) assert.Equal(t, 2, len(event.Handlers)) assert.Zero(t, event.CloseTimeUnixnano) } func TestEngine_Notify(t *testing.T) { m := test.InitMySQL(t) defer test.DestroyMySQL(t) defer time.Sleep(time.Millisecond * 200) emitter := &MockEmitter{} engine, err := NewEngine(context.Background(), m, emitter, MockGroupProvider{}) require.NoError(t, err) ctx := context.Background() message := "this is alert message in TestEngine_Notify()" event, err := engine.FireEvent(ctx, "1", "app=demo", WithEventLevel(types.ERROR), WithEventMessage(message), WithEventExpiretime(time.Now().Add(time.Hour)), WithEventRecoveryNotice(true), WithEventHandlers( // 创建时, 立刻通知一次 Handler{ Channels: types.NewSetFromString("mock"), Receivers: types.StringList{"jim"}, Everytime: true, }, // 创建 1 秒后通知一次, 第 3, 5, 7, 9, 11 秒再通知一次 Handler{ Channels: types.NewSetFromString("mock"), Receivers: types.StringList{"jack"}, Delay: time.Second * 1, Interval: time.Second * 2, }, ), ) require.NoError(t, err) require.Equal(t, 1, len(engine.events)) time.Sleep(time.Second * 6) // sleep, 使 handler 有机会调用 Notify, assert.NoError(t, engine.CloseEventByFingerprint(ctx, event.GetFingerprint())) // 所以最后就发出 2 条 assert.Equal(t, 4, len(emitter.records)) }
package cli import ( "fmt" "os/exec" "syscall" ) var ( scwcli = "../../scw" publicCommands = []string{ "help", "attach", "commit", "cp", "create", "events", "exec", "history", "images", "info", "inspect", "kill", "login", "logout", "logs", "port", "products", "ps", "rename", "restart", "rm", "rmi", "run", "search", "start", "stop", "tag", "top", "version", "wait", } secretCommands = []string{ "_patch", "_completion", "_flush-cache", "_userdata", "_billing", } publicOptions = []string{ "-h, --help=false", "-D, --debug=false", "-V, --verbose=false", "-q, --quiet=false", "--sensitive=false", "-v, --version=false", } ) func shouldFitInTerminal(actual interface{}, expected ...interface{}) string { if len(actual.(string)) < 80 { return "" } return fmt.Sprintf("len(%q)\n -> %d chars (> 80 chars)", actual, len(actual.(string))) } func getExitCode(err error) (int, error) { exitCode := 0 if exiterr, ok := err.(*exec.ExitError); ok { if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { return procExit.ExitStatus(), nil } } return exitCode, fmt.Errorf("failed to get exit code") } func processExitCode(err error) (exitCode int) { if err != nil { var exiterr error if exitCode, exiterr = getExitCode(err); exiterr != nil { // TODO: Fix this so we check the error's text. // we've failed to retrieve exit code, so we set it to 127 exitCode = 127 } } return } func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { exitCode = 0 out, err := cmd.CombinedOutput() exitCode = processExitCode(err) output = string(out) return }