text
stringlengths
11
4.05M
/* * @lc app=leetcode.cn id=239 lang=golang * * [239] 滑动窗口最大值 */ package main import "fmt" // @lc code=start func maxSlidingWindow(nums []int, k int) []int { numsLen := len(nums) if numsLen < 2 { return nums } // 存数字下标 queue := make([]int, 0, k) ans := make([]int, 0, numsLen-k+1) for i, v := range nums { // 队列头超出窗口大小,移除 if i >= k && queue[0] == i-k { queue = queue[1:] } // 队列按大到小排序 for len(queue) != 0 && nums[queue[len(queue)-1]] < v { queue = queue[0 : len(queue)-1] } queue = append(queue, i) // 队列头即为答案 if i >= k-1 { ans = append(ans, nums[queue[0]]) } } return ans } // @lc code=end func main() { fmt.Println(maxSlidingWindow([]int{1, 3, -1, -3, 5, 3, 6, 7}, 3)) }
/* Copyright 2021 The Nuclio Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package loggerus import ( "context" "testing" "github.com/nuclio/logger" "github.com/stretchr/testify/suite" ) type loggerSuite struct { suite.Suite logger logger.Logger } func (suite *loggerSuite) SetupSuite() { var err error // initialize logger for test suite.logger, err = NewLoggerusForTests("test") suite.Require().NoError(err) } func (suite *loggerSuite) TestLog() { suite.logger.Debug("test") suite.logger.Warn("test") suite.logger.Error("test") suite.logger.Info("test") } func (suite *loggerSuite) TestLogWith() { suite.logger.DebugWith("test", "with", "something") suite.logger.WarnWith("test", "with", "something") suite.logger.ErrorWith("test", "with", "something") suite.logger.InfoWith("test", "with", "something") } func (suite *loggerSuite) TestLogWithCtx() { ctx := context.WithValue(context.TODO(), "RequestID", "123") // nolint suite.logger.DebugWithCtx(ctx, "test", "with", "something") suite.logger.WarnWithCtx(ctx, "test", "with", "something") suite.logger.ErrorWithCtx(ctx, "test", "with", "something") suite.logger.InfoWithCtx(ctx, "test", "with", "something") } func TestLoggerTestSuite(t *testing.T) { suite.Run(t, new(loggerSuite)) }
package main import ( "github.com/jorgenpo/stayfit-server/config" "log" "os" "github.com/jorgenpo/stayfit-server/database" "github.com/jorgenpo/stayfit-server/server" ) func main() { serverConfig, err := config.GetConfig() if err != nil { log.Fatalf("Failed to load config file: %e", err) os.Exit(1) } serverDatabase, err := database.GetDatabase(serverConfig.DatabaseName, serverConfig.DatabaseUser, serverConfig.DatabasePassword) if err != nil { log.Fatalf("Failed to open database connection: %e", err) os.Exit(2) } stayFitServer := server.StayFitServer{} stayFitServer.Init(serverConfig, serverDatabase) }
package goecharts import ( "encoding/json" "fmt" "reflect" gf "github.com/wanglovesyang/gframe" ) type BarSettings struct { Title string `json:"title"` TruncPrecision int32 `json:"trunc_precision"` HideMarkPoint bool `json:"hide_markpoint"` HideMarkLine bool `json:"hide_markline"` } func parseBarSettings(s interface{}) (ret *BarSettings, reterr error) { switch a := s.(type) { case BarSettings: ret = &a case *BarSettings: ret = a default: jsonData, err := json.Marshal(s) if err != nil { reterr = err return } if err := json.Unmarshal(jsonData, &ret); err != nil { reterr = err return } } return } func rangeNum(size int32) (ret []int32) { ret = make([]int32, size) for i := int32(0); i < size; i++ { ret[i] = int32(i) } return } func extractXAxisData(x, y interface{}) (ret interface{}, reterr error) { if x == nil { return nil, nil } switch xx := x.(type) { case string: if dy, suc := y.(*gf.DataFrame); !suc { reterr = fmt.Errorf("argument y should be a dataframe when x is given as a string") } else { if valy, err := dy.GetIdColumns(xx); err == nil { ret = valy[0] return } if valy, err := dy.GetValColumns(xx); err == nil { ret = valy[0] return } reterr = fmt.Errorf("columns %s does not exist in dataframe", xx) } case *gf.DataFrame: if xx.Shape()[1] != 1 { reterr = fmt.Errorf("cannot adopt data frame with multiple columns as x label") return } if col, err := xx.GetIdColumns(xx.Columns()...); err == nil { ret = col return } if col, err := xx.GetValColumns(xx.Columns()...); err == nil { ret = col return } reterr = fmt.Errorf("are you kidding me?") default: if tp := reflect.TypeOf(x).Kind(); tp == reflect.Slice { ret = x return } reterr = fmt.Errorf("Invalid type of x axis") } return } func extractSeries(x, y interface{}, maker SeriesMaker, tp string) (ret []*Series, reterr error) { switch yy := y.(type) { case []float32: ret = []*Series{maker(yy, "-", tp)} case map[string][]float32: for k, v := range yy { ret = append(ret, maker(v, k, tp)) } case [][]float32: for k, v := range yy { ret = append(ret, maker(v, fmt.Sprintf("s%d", k), tp)) } case *gf.DataFrame: xx := x.(string) columns := yy.ValueColumnNames() if len(columns) == 0 { reterr = fmt.Errorf("no value columns in the dataframe") return } var cols [][]float32 if cols, reterr = yy.GetValColumns(columns...); reterr != nil { return } for i, c := range cols { if columns[i] != xx { ret = append(ret, maker(c, columns[i], tp)) } } default: reterr = fmt.Errorf("unsupported y type") } return } func Bar(x interface{}, y interface{}, param interface{}) (ret *Chart) { var reterr error defer func() { if reterr != nil { panic(reterr) } }() bp, reterr := parseBarSettings(param) if reterr != nil { return } xAxisData, reterr := extractXAxisData(x, y) if reterr != nil { return } maker := DefaultSeries if bp.TruncPrecision > 0 { maker = TruncatedSeriesMaker(maker, bp.TruncPrecision) } if !bp.HideMarkPoint { maker = SeriesMakerWithMarkPoint(maker, bp.TruncPrecision) } if !bp.HideMarkLine { maker = SeriesMakerWithMarkLine(maker, bp.TruncPrecision) } series, reterr := extractSeries(x, y, DefaultSeries, "bar") if reterr != nil { return } xAxis := DefaultXAxis(xAxisData, "category") title := DefaultTitle(bp.Title) var displaySeriesNames []string for _, s := range series { displaySeriesNames = append(displaySeriesNames, s.Name) } ret = &Chart{ opt: &ChartOption{ Title: []*Title{title}, ToolBox: DefaultToolBox(), ToolTip: DefaultToolTip(), Series: series, Legend: []*Legend{ DefaultLegend(displaySeriesNames), }, XAxis: []*XAxis{ xAxis, }, YAxis: []*YAxis{ DefaultYAxis(), }, Color: defaultColorSet, }, } return }
package tpl // 指定package是tpl,我们写自定义的模板的时候,也需要指定为tpl const ltgtTpl = `{{ $f := .Field }}{{ $r := .Rules }} // 使用模板的时候,会传入相应的RulesContext,Filed和Rules是其的字段,定义变量f,r,f是域,r是规则 {{ if $r.Lt }} // 看r的Lt是否有,如果有看是否有Gt {{ if $r.Gt }} // 判断r是否设置了Gt这个规则 {{ if gt $r.GetLt $r.GetGt }} // 如果小于的数字大于大于的数字,那ok // 通过accessor . 获取传入的值,是val,如果小于等于小的数字,大于等于小于的数字,那么有错 if val := {{ accessor . }}; val <= {{ $r.Gt }} || val >= {{ $r.Lt }} { return {{ err . "value must be inside range (" $r.GetGt ", " $r.GetLt ")" }} // 返回err(., "value must be inside range (" $r.GetGt ", " $r.GetLt ")"),err是在register.go中定义的函数,并且已经添加到了模板的函数映射关系中,模板可以直接调用 } {{ else }} //假设此时是<a,>b,并且a<=b,那么此时val的范围应该是[a, b]之外,通过accessor .可以获取要进行验证的值 if val := {{ accessor . }}; val >= {{ $r.Lt }} && val <= {{ $r.Gt }} { return {{ err . "value must be outside range [" $r.GetLt ", " $r.GetGt "]" }} } {{ end }} {{ else if $r.Gte }} {{ if gt $r.GetLt $r.GetGte }} if val := {{ accessor . }}; val < {{ $r.Gte }} || val >= {{ $r.Lt }} { return {{ err . "value must be inside range [" $r.GetGte ", " $r.GetLt ")" }} } {{ else }} if val := {{ accessor . }}; val >= {{ $r.Lt }} && val < {{ $r.Gte }} { return {{ err . "value must be outside range [" $r.GetLt ", " $r.GetGte ")" }} } {{ end }} {{ else }} if {{ accessor . }} >= {{ $r.Lt }} { return {{ err . "value must be less than " $r.GetLt }} } {{ end }} {{ else if $r.Lte }} {{ if $r.Gt }} {{ if gt $r.GetLte $r.GetGt }} if val := {{ accessor . }}; val <= {{ $r.Gt }} || val > {{ $r.Lte }} { return {{ err . "value must be inside range (" $r.GetGt ", " $r.GetLte "]" }} } {{ else }} if val := {{ accessor . }}; val > {{ $r.Lte }} && val <= {{ $r.Gt }} { return {{ err . "value must be outside range (" $r.GetLte ", " $r.GetGt "]" }} } {{ end }} {{ else if $r.Gte }} {{ if gt $r.GetLte $r.GetGte }} if val := {{ accessor . }}; val < {{ $r.Gte }} || val > {{ $r.Lte }} { return {{ err . "value must be inside range [" $r.GetGte ", " $r.GetLte "]" }} } {{ else }} if val := {{ accessor . }}; val > {{ $r.Lte }} && val < {{ $r.Gte }} { return {{ err . "value must be outside range (" $r.GetLte ", " $r.GetGte ")" }} } {{ end }} {{ else }} if {{ accessor . }} > {{ $r.Lte }} { return {{ err . "value must be less than or equal to " $r.GetLte }} } {{ end }} {{ else if $r.Gt }} if {{ accessor . }} <= {{ $r.Gt }} { return {{ err . "value must be greater than " $r.GetGt }} } {{ else if $r.Gte }} if {{ accessor . }} < {{ $r.Gte }} { return {{ err . "value must be greater than or equal to " $r.GetGte }} } {{ end }} `
package ircserver import ( "fmt" "strings" "gopkg.in/sorcix/irc.v2" ) func init() { Commands["server_KILL"] = &ircCommand{ Func: (*IRCServer).cmdServerKill, MinParams: 1, } } func (i *IRCServer) cmdServerKill(s *Session, reply *Replyctx, msg *irc.Message) { if len(msg.Params) < 2 { i.sendServices(reply, &irc.Message{ Prefix: i.ServerPrefix, Command: irc.ERR_NEEDMOREPARAMS, Params: []string{"*", msg.Command, "Not enough parameters"}, }) return } killPrefix := msg.Prefix for id, session := range i.sessions { if id.Id != s.Id.Id || id.Reply == 0 || NickToLower(session.Nick) != NickToLower(msg.Prefix.Name) { continue } killPrefix = &session.ircPrefix break } session, ok := i.nicks[NickToLower(msg.Params[0])] if !ok { i.sendServices(reply, &irc.Message{ Prefix: i.ServerPrefix, Command: irc.ERR_NOSUCHNICK, Params: []string{"*", msg.Params[0], "No such nick/channel"}, }) return } killPath := fmt.Sprintf("ircd!%s!%s", killPrefix.Host, killPrefix.Name) killPath = strings.Replace(killPath, "!!", "!", -1) i.sendUser(session, reply, &irc.Message{ Prefix: killPrefix, Command: irc.KILL, Params: []string{session.Nick, fmt.Sprintf("%s (%s)", killPath, msg.Trailing())}, }) i.sendServices(reply, i.sendCommonChannels(session, reply, &irc.Message{ Prefix: &session.ircPrefix, Command: irc.QUIT, Params: []string{"Killed: " + msg.Trailing()}, })) i.deleteSessionLocked(session, reply.msgid) }
package fsnotify // no-op on Windows
package train import ( "github.com/y4v8/errors" ) type ParamsRoute struct { From string `url:"routes[0][from]"` To string `url:"routes[0][to]"` Date string `url:"routes[0][date]"` Train string `url:"routes[0][train]"` } type DataRoute struct { Tpl string `json:"tpl"` Routes []TrainRoute `json:"routes"` } type TrainRoute struct { Train string `json:"train"` List []Station `json:"list"` } type Station struct { Name string `json:"name"` Code int `json:"code"` ArrivalTime string `json:"arrivalTime"` DepartureTime string `json:"departureTime"` Distance string `json:"distance"` Lat float64 `json:"lat"` Long float64 `json:"long"` } func (a *Api) Route(param ParamsRoute) (*DataRoute, error) { var data DataRoute err := a.requestDataObject("POST", "route/", param, &data) if err != nil { return nil, errors.Wrap(err) } return &data, nil }
package resources import ( "errors" "fmt" ) var resourceTypes []Resource func InitResourcesForPersonage(personageId int64) error { checkConnection() columns := []string{"resource_id", "personage_id", "amount"} data := make([][]interface{}, len(resourceTypes)) for i := 0; i < len(data); i++ { data[i] = make([]interface{}, len(columns)) data[i][0] = resourceTypes[i].Id data[i][1] = personageId data[i][2] = 0 } return connection.MultipleInsert(STORAGETABLE, columns, data) } func AddResource(personageId int64, resourceId int32, amount int64) error { if !IsEnoughResource(personageId, resourceId, amount) { return errors.New("Not enough resources") } query := "UPDATE %s SET amount=amount-%d WHERE resource_id=%d AND personage_id=%d;" query = fmt.Sprintf(query, STORAGETABLE, amount, resourceId, personageId) _, err := connection.ManualQuery(query) return err } func GetPersonageResources(personageId int64, accountId int64) []ResourceResponse { query := `SELECT resource_id, name, amount FROM resource_storages INNER JOIN resource_types ON resource_storages.resource_id=resource_types.id AND personage_id=%d AND personage_id IN (SELECT id FROM personages where account_id=%d);` query = fmt.Sprintf(query, personageId, accountId) res, err := connection.ManualQuery(query) var result []ResourceResponse for err == nil && res.Next() { storedResource := ResourceResponse{} res.Scan(&storedResource.Id, &storedResource.Name, &storedResource.Amount) result = append(result, storedResource) } return result } func IsEnoughResource(personageId int64, resourceId int32, amount int64) bool { checkConnection() query := "SELECT * FROM %s WHERE resource_id=%d AND personage_id=%d AND amount >= %d;" query = fmt.Sprintf(query, STORAGETABLE, resourceId, personageId, amount) res, err := connection.ManualQuery(query) if err != nil { return false } defer res.Close() return res.Next() } //Must be called in init to init all resources list func getResourceTypes() error { if len(resourceTypes) > 0 { return nil } checkConnection() res, err := connection.Select(TYPETABLE, "id", "name") if err != nil { return err } defer res.Close() for res.Next() { resType := Resource{} res.Scan(&resType.Id, &resType.Name) resourceTypes = append(resourceTypes, resType) } return nil }
package models import ( "fmt" "github.com/jinzhu/gorm" "time" ) type Cate struct { ID int `gorm:"primary_key" json:"id"` Name string `json:"name"` State int `json:"state"` CreatedOn int `json:"created_on"` ModifiedOn int `json:"modified_on"` } // 获取所有栏目 func GetCates(name string) (cates []Cate) { query := db.Select("id,name,state,created_on,modified_on") if name != "" { query = query.Where("name LIKE ?", fmt.Sprintf("%%%s%%", name)) } query.Order("id DESC").Find(&cates) return } // GetCate 获取单个栏目 func GetCate(where *Cate) (cate Cate) { db.Where(where).First(&cate) return } // AddCate 栏目创建 func AddCate(cate *Cate) int64 { return db.Create(cate).RowsAffected } // EditCate 栏目编辑 func EditCate(id int, name string, state int) bool { db.Model(&Cate{}).Where("id = ?", id).Updates(Cate{Name: name, State: state}) return true } // DelCate 栏目删除 func DelCate(id int) bool { db.Where("id = ?", id).Delete(&Cate{}) return true } func (tag *Cate) BeforeCreate(scope *gorm.Scope) error { scope.SetColumn("CreatedOn", time.Now().Unix()) return nil } func (tag *Cate) BeforeUpdate(scope *gorm.Scope) error { scope.SetColumn("ModifiedOn", time.Now().Unix()) return nil }
package server import ( "errors" "net/http" "golang.org/x/net/context" "github.com/Sirupsen/logrus" "github.com/bryanl/dolb/dao" "github.com/bryanl/dolb/do" "github.com/bryanl/dolb/dolbutil" "github.com/bryanl/dolb/kvs" "github.com/bryanl/dolb/pkg/app" "github.com/bryanl/dolb/service" "github.com/gorilla/mux" ) // Config is configuration for the load balancer service. type Config struct { BaseDomain string ClusterOpsFactory func() ClusterOps Context context.Context DBSession dao.Session KVS kvs.KVS ServerURL string DigitalOceanFactory func(token string, config *Config) do.DigitalOcean OauthClientID string OauthClientSecret string OauthCallback string LBUpdateChan chan *dao.LoadBalancer logger *logrus.Entry } // NewConfig creates a Config. func NewConfig(bd, su string, sess dao.Session) *Config { return &Config{ BaseDomain: bd, ClusterOpsFactory: NewClusterOps, Context: context.Background(), DBSession: sess, ServerURL: su, DigitalOceanFactory: func(token string, config *Config) do.DigitalOcean { client := do.GodoClientFactory(token) return do.NewLiveDigitalOcean(client, config.BaseDomain) }, LBUpdateChan: make(chan *dao.LoadBalancer, 10), logger: app.DefaultLogger(), } } // SetLogger sets a logger for config. func (c *Config) SetLogger(l *logrus.Entry) { c.logger = l } // GetLogger returns the config's logger. func (c *Config) GetLogger() *logrus.Entry { return c.logger } // IDGen returns a new random id. func (c *Config) IDGen() string { id := dolbutil.GenerateRandomID() return dolbutil.TruncateID(id) } // DigitalOcean returns a new instance of do.DigitalOcean. func (c *Config) DigitalOcean(token string) do.DigitalOcean { return c.DigitalOceanFactory(token, c) } // API is a the load balancer API. type API struct { Mux http.Handler } // New creates an instance of API. func New(config *Config) (*API, error) { mux := mux.NewRouter() a := &API{ Mux: mux, } if config.ServerURL == "" { return nil, errors.New("missing ServerURL") } mux.Handle("/api/lb", service.Handler{Config: config, F: LBListHandler}).Methods("GET") mux.Handle("/api/lb", service.Handler{Config: config, F: LBCreateHandler}).Methods("POST") mux.Handle("/api/lb/{lb_id}", service.Handler{Config: config, F: LBRetrieveHandler}).Methods("GET") mux.Handle("/api/lb/{lb_id}", service.Handler{Config: config, F: LBDeleteHandler}).Methods("DELETE") mux.Handle("/api/user", service.Handler{Config: config, F: UserRetrieveHandler}).Methods("GET") mux.Handle(service.PingPath, service.Handler{Config: config, F: PingHandler}).Methods("POST") mux.Handle("/api/lb/{lb_id}/services", service.Handler{Config: config, F: ServiceCreateHandler}).Methods("POST") mux.Handle("/api/lb/{lb_id}/services", service.Handler{Config: config, F: ServiceListHandler}).Methods("GET") return a, nil }
package httpserver import ( "context" "encoding/json" "io/ioutil" "log" "net/http" "strconv" "github.com/ekotlikoff/gochess/internal/model" matchserver "github.com/ekotlikoff/gochess/internal/server/backend/match" gateway "github.com/ekotlikoff/gochess/internal/server/frontend" ) // HTTPBackend handles http requests type HTTPBackend struct { MatchServer *matchserver.MatchingServer BasePath string Port int } // Serve the http server func (backend *HTTPBackend) Serve() { bp := backend.BasePath if len(bp) > 0 && (bp[len(bp)-1:] == "/" || bp[0:1] != "/") { panic("Invalid gateway base path") } mux := http.NewServeMux() mux.Handle(bp+"/http/match", makeSearchForMatchHandler(backend.MatchServer)) mux.Handle(bp+"/http/sync", makeSyncHandler()) mux.Handle(bp+"/http/async", makeAsyncHandler()) log.Println("HTTP server listening on port", backend.Port, "...") http.ListenAndServe(":"+strconv.Itoa(backend.Port), mux) } // SetQuiet logging func SetQuiet() { log.SetOutput(ioutil.Discard) } func makeSearchForMatchHandler( matchServer *matchserver.MatchingServer, ) http.Handler { handler := func(w http.ResponseWriter, r *http.Request) { player := gateway.GetSession(w, r) if player == nil { return } else if !player.GetSearchingForMatch() { player.Reset() player.SetSearchingForMatch(true) matchServer.MatchPlayer(player) } ctx, cancel := context.WithTimeout(context.Background(), matchserver.PollingDefaultTimeout) defer cancel() if player.HasMatchStarted(ctx) { player.SetSearchingForMatch(false) } else { // Return HTTP 202 until match starts. w.WriteHeader(http.StatusAccepted) } } return http.HandlerFunc(handler) } func makeSyncHandler() http.Handler { handler := func(w http.ResponseWriter, r *http.Request) { player := gateway.GetSession(w, r) if player == nil { return } switch r.Method { case "GET": pieceMove := player.GetSyncUpdate() if pieceMove != nil { json.NewEncoder(w).Encode(*pieceMove) return } // Return HTTP 204 if no update. w.WriteHeader(http.StatusNoContent) case "POST": var moveRequest model.MoveRequest err := json.NewDecoder(r.Body).Decode(&moveRequest) if err != nil { log.Println("Failed to parse move body ", err) w.WriteHeader(http.StatusBadRequest) return } success := player.MakeMove(moveRequest) if !success { w.WriteHeader(http.StatusBadRequest) return } } } return http.HandlerFunc(handler) } func makeAsyncHandler() http.Handler { handler := func(w http.ResponseWriter, r *http.Request) { player := gateway.GetSession(w, r) if player == nil { return } switch r.Method { case "GET": asyncUpdate := player.GetAsyncUpdate() if asyncUpdate == nil { // Return HTTP 204 if no update. w.WriteHeader(http.StatusNoContent) return } w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(asyncUpdate); err != nil { w.WriteHeader(http.StatusInternalServerError) return } if asyncUpdate.GameOver { player.WaitForMatchOver() player.Reset() } case "POST": var requestAsync matchserver.RequestAsync err := json.NewDecoder(r.Body).Decode(&requestAsync) if err != nil { log.Println("Bad request", err) w.WriteHeader(http.StatusBadRequest) return } player.RequestAsync(requestAsync) } } return http.HandlerFunc(handler) }
package responses import ( "encoding/json" "testing" "github.com/stretchr/testify/assert" ) func TestDecodeAccountsPendingResponse(t *testing.T) { encoded := "{\n \"blocks\" : {\n \"nano_1111111111111111111111111111111111111111111111111117353trpda\": [\"142A538F36833D1CC78B94E11C766F75818F8B940771335C6C1B8AB880C5BB1D\"],\n \"nano_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3\": [\"4C1FEEF0BEA7F50BE35489A1233FE002B212DEA554B55B1B470D78BD8F210C74\"]\n }\n}" var decoded AccountsPendingResponse json.Unmarshal([]byte(encoded), &decoded) blocks := *decoded.Blocks assert.Len(t, blocks, 2) assert.Equal(t, "4C1FEEF0BEA7F50BE35489A1233FE002B212DEA554B55B1B470D78BD8F210C74", blocks["nano_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3"][0]) assert.Equal(t, "142A538F36833D1CC78B94E11C766F75818F8B940771335C6C1B8AB880C5BB1D", blocks["nano_1111111111111111111111111111111111111111111111111117353trpda"][0]) } func TestDecodeAccountsPendingResponseError(t *testing.T) { encoded := "{\"error\": \"Account not found\"}" var decoded AccountsPendingResponse json.Unmarshal([]byte(encoded), &decoded) assert.Nil(t, decoded.Blocks) }
package main import "fmt" import "math" import "strconv" import "strings" import "regexp" func main() { fmt.Println(ControlCode( "7904006306693", "876814", "1665979", "20080519", "35959", "zZ7Z]xssKqkEf_6K9uH(EcV+%x+u[Cca9T%+_$kiLjT8(zr3T9b5Fx2xG-D+_EBS", )) //output: 7B-F3-48-A8 } func verhoeff(num string, times int) string { d := [10][10]int{ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, {1, 2, 3, 4, 0, 6, 7, 8, 9, 5}, {2, 3, 4, 0, 1, 7, 8, 9, 5, 6}, {3, 4, 0, 1, 2, 8, 9, 5, 6, 7}, {4, 0, 1, 2, 3, 9, 5, 6, 7, 8}, {5, 9, 8, 7, 6, 0, 4, 3, 2, 1}, {6, 5, 9, 8, 7, 1, 0, 4, 3, 2}, {7, 6, 5, 9, 8, 2, 1, 0, 4, 3}, {8, 7, 6, 5, 9, 3, 2, 1, 0, 4}, {9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, } p := [8][10]int{ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, {1, 5, 7, 6, 2, 8, 3, 0, 9, 4}, {5, 8, 0, 3, 7, 9, 6, 1, 4, 2}, {8, 9, 1, 6, 0, 4, 3, 5, 2, 7}, {9, 4, 5, 3, 1, 2, 6, 8, 7, 0}, {4, 2, 8, 6, 5, 7, 3, 9, 0, 1}, {2, 7, 9, 3, 8, 0, 6, 4, 1, 5}, {7, 0, 4, 6, 9, 1, 3, 2, 5, 8}, } inv := []int{0, 4, 3, 2, 1, 5, 6, 7, 8, 9} for ; times > 0; times-- { c := 0 for i := len(num) - 1; i >= 0; i-- { c = d[c][p[(len(num)-i)%8][num[i]-'0']] } num += strconv.FormatInt(int64(inv[c]), 10) } return num } func arc4(msg string, key string) string { var state [256]int for i := 0; i < 256; i++ { state[i] = i } j := 0 for i := 0; i < 256; i++ { j = (j + state[i] + int(key[i%len(key)])) % 256 temp := state[i] state[i] = state[j] state[j] = temp } x, y := 0, 0 output := "" for i := 0; i < len(msg); i++ { x = (x + 1) % 256 y = (state[x] + y) % 256 temp := state[x] state[x] = state[y] state[y] = temp output += fmt.Sprintf("%02x", int(msg[i])^state[(state[x]+state[y])%256]) } return strings.ToUpper(output) } func base64(number int) string { result := "" dic := "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+/" for number > 0 { result = string(dic[number%64]) + result number = int(math.Floor(float64(number / 64))) } return result } func ControlCode(auth string, number string, nit string, date string, total string, key string) string { code := "" number = verhoeff(number, 2) nit = verhoeff(nit, 2) date = verhoeff(date, 2) total = verhoeff(total, 2) parseInt := func(n string) int64 { i, _ := strconv.ParseInt(n, 10, 64); return i } vf := verhoeff(strconv.FormatInt( parseInt(number)+ parseInt(nit)+ parseInt(date)+ parseInt(total), 10), 5) vf = vf[len(vf)-5:] input := []string{auth, number, nit, date, total} idx := 0 for i := 0; i < 5; i++ { code += input[i] + key[idx:idx+1+int(vf[i]-'0')] idx += 1 + int(vf[i]-'0') } code = arc4(code, key+vf) fmt.Println(code) final_sum := 0 total_sum := 0 partial_sum := []int{0, 0, 0, 0, 0} for i := 0; i < len(code); i++ { partial_sum[i%5] += int(code[i]) total_sum += int(code[i]) } for i := 0; i < 5; i++ { final_sum += int(math.Floor(float64((total_sum * partial_sum[i]) / (1 + int(vf[i]-'0'))))) } var matched []string regex := regexp.MustCompile(`.{2}`).FindAllStringSubmatch(arc4(base64(final_sum), key+vf), -1) for _, m := range regex { matched = append(matched, m[0]) } code = strings.Join(matched, "-") return code }
package context import ( "log" "github.com/jinzhu/gorm" "github.com/kivutar/chainz/model" // Side effect import of postgres _ "github.com/jinzhu/gorm/dialects/postgres" ) // OpenDB creates the connection to the database func OpenDB(config *Config) (*gorm.DB, error) { log.Println("Database is connecting... ") db, err := gorm.Open("postgres", config.DBURL) if err != nil { return nil, err } db.DropTableIfExists(&model.Author{}, &model.Book{}) db.Debug().AutoMigrate(&model.Author{}, &model.Book{}) log.Println("Database is connected ") return db, nil }
package register import "github.com/MintegralTech/juno/operation" var FieldMap map[string]operation.Operation type Register struct { } func NewRegister() *Register { FieldMap = make(map[string]operation.Operation, 16) return &Register{} } func (r *Register) Register(fieldName string, e operation.Operation) { FieldMap[fieldName] = e }
package main import ( "context" "encoding/json" "fmt" "log" "net/http" "os" "os/exec" "strings" "time" "github.com/docker/docker/client" "github.com/gorilla/mux" ) var PORT string func init() { PORT = os.Getenv("PORT") if len(PORT) == 0 { PORT = "8080" } } func main() { // Create Server and Route Handlers r := mux.NewRouter() r.HandleFunc("/deploy", handleDeploy) srv := &http.Server{ Handler: r, Addr: ":" + PORT, // ReadTimeout: 20 * time.Second, // WriteTimeout: 20 * time.Second, } log.Println("Starting Server on port - " + PORT) if err := srv.ListenAndServe(); err != nil { log.Fatal(err) } } func handleDeploy(w http.ResponseWriter, r *http.Request) { (w).Header().Set("Access-Control-Allow-Origin", "*") repo := r.FormValue("repo") port := r.FormValue("port") if len(repo) == 0 { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode("no 'repo' provided") return } addr, err := generateContainer(repo, port) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(err.Error()) return } fmt.Println("running on address " + addr) fmt.Println("complete...") w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(map[string]string{"port": addr}) } func generateContainer(repo, port string) (string, error) { // get container name ss := strings.Split(repo, "/") name := strings.Split(ss[len(ss)-1], ".")[0] // init docker client fmt.Println("initializing docker client...") cli, err := client.NewClientWithOpts(client.WithVersion("1.37")) if err != nil { return "", err } // remove container if already exists fmt.Println("removing container if already exists...") if container, err := cli.ContainerInspect(context.Background(), name); err == nil { cmd := exec.Command("docker", "rm", "-f", "/"+container.Name) _, err := cmd.CombinedOutput() if err != nil { return "", err } } // generate project container fail := make(chan error) go func() { fmt.Println("generating container...") cmd := exec.Command("bash", "build.sh", repo, port) out, err := cmd.CombinedOutput() if err != nil { fail <- fmt.Errorf(string(out)) } }() // get project address fmt.Println("getting container address...") for { select { case f := <-fail: return "", f default: container, err := cli.ContainerInspect(context.Background(), name) if err != nil { continue } for _, ports := range container.ContainerJSONBase.HostConfig.PortBindings { fmt.Println("accessing address...") return ports[0].HostPort, nil } } time.Sleep(2 * time.Second) } }
package handlers import ( "time" cors "github.com/rs/cors/wrapper/gin" "api-gaming/internal/config" "github.com/gin-gonic/gin" "context" "github.com/shaj13/go-guardian/auth" "github.com/shaj13/go-guardian/auth/strategies/bearer" "github.com/shaj13/go-guardian/store" ) var router = gin.Default() var authenticator auth.Authenticator var cache store.Cache // Run will start the server func Run() { getHandlers() router.Run(":9990") } /* Setup GoGuardian - A simple clean, and idomatic way * to create a powerful modern API and web authentication. * Sole purpoose is to authenticate requests, which it does * through an extensible set of authentication methods known * as strategies. */ func setupGoGuardian() { authenticator = auth.New() cache = store.NewFIFO(context.Background(), time.Minute*5) tokenStrategy := bearer.New(verifyToken, cache) authenticator.EnableStrategy(bearer.CachedStrategyKey, tokenStrategy) } /*func handlerMiddleware() gin.HandlerFunc { }*/ // Get handlers will create our routes of our entire application // this way every group of routes can be defined in their own file // so this one won't be so messy func getHandlers() { corsConfig := cors.New(cors.Options{ AllowedOrigins: []string{"*"}, AllowedMethods: []string{"PUT", "PATCH", "GET", "POST", "DELETE", "OPTIONS"}, AllowedHeaders: []string{"*"}, MaxAge: int(12 * time.Hour), }) // Set up application middlewares router.Use(corsConfig) router.Use(config.DBMiddleware(config.DBCONN)) setupGoGuardian() addUserHandlers() addVideoHandler() }
package caaa import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document00400104 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.004.001.04 Document"` Message *AcceptorCompletionAdviceResponseV04 `xml:"AccptrCmpltnAdvcRspn"` } func (d *Document00400104) AddMessage() *AcceptorCompletionAdviceResponseV04 { d.Message = new(AcceptorCompletionAdviceResponseV04) return d.Message } // The AcceptorCompletionAdviceResponse message is sent by the acquirer (or its agent) to acknowledge the acceptor (or its agent) of the outcome of the payment transaction, and the transfer the financial data of the transaction contained in the completion advice. type AcceptorCompletionAdviceResponseV04 struct { // Completion advice response message management information. Header *iso20022.Header11 `xml:"Hdr"` // Information related to the completion advice response. CompletionAdviceResponse *iso20022.AcceptorCompletionAdviceResponse4 `xml:"CmpltnAdvcRspn"` // Trailer of the message containing a MAC. SecurityTrailer *iso20022.ContentInformationType11 `xml:"SctyTrlr"` } func (a *AcceptorCompletionAdviceResponseV04) AddHeader() *iso20022.Header11 { a.Header = new(iso20022.Header11) return a.Header } func (a *AcceptorCompletionAdviceResponseV04) AddCompletionAdviceResponse() *iso20022.AcceptorCompletionAdviceResponse4 { a.CompletionAdviceResponse = new(iso20022.AcceptorCompletionAdviceResponse4) return a.CompletionAdviceResponse } func (a *AcceptorCompletionAdviceResponseV04) AddSecurityTrailer() *iso20022.ContentInformationType11 { a.SecurityTrailer = new(iso20022.ContentInformationType11) return a.SecurityTrailer }
package common // Skips duplicate messages (based on .ID) func Dedup(messageChan chan LogMessage) chan LogMessage { resultChan := make(chan LogMessage) idCache := make(map[string]bool) go func() { for message := range messageChan { if !idCache[message.ID] { resultChan <- message idCache[message.ID] = true } } close(resultChan) }() return resultChan }
func fac(x int,y int) int { if x < 1 { return 1 } else { y:=fac(x-1); z := a * b; return x * y } } { x := fac(1,false); y := fac() }
package util import "encoding/binary" func HostTo2Net(n uint16) []byte { b := make([]byte, 2) binary.BigEndian.PutUint16(b, n) return b } //主机序网络序互转 func HostTo4Net(n uint32) []byte { b := make([]byte, 4) binary.BigEndian.PutUint32(b, n) return b } //主机序网络序互转 func HostTo8Net(n uint64) []byte { b := make([]byte, 8) binary.BigEndian.PutUint64(b, n) return b }
package repository; // UserRepository handles user manipulations in the database type UserRepository struct { } // ProvideUserRepository is the provider for UserRepository func ProvideUserRepository() (*UserRepository, error) { return &UserRepository{}, nil }
/* * Work on parallel processing * * */ package main import ( "bufio" "fmt" "os" "strings" "strconv" ) func main() { // Printing to the command line // fmt.Println(buildCorrectedGFF("incorrectFormat.gff")) // Call buildCorrectedGFF buildCorrectedGFF("inccorectlyFormated.gff") } /* * This function will attempt to correct a given GFF files format, according to the GFF3 specifications maintained by The Sequence Oncology Project * @params: a string representing the path for the gff file to be formatted */ func buildCorrectedGFF(path string) { // Run function readlines to read in data data, err := readLines(path) // Print error if it arises if err != nil { fmt.Println(err) } // A slice to hold our corrected data var correctedLines []string // Counter to give lines unique ID's if neccecary IDCounter := 0 // A for each loop to move through each line of the GFF for _, lines := range data { // Split each line at its tab splitAtTab := strings.Split(lines, "\t") // Stores the element of each line element := splitAtTab[2] // Iterate through ever line. Save each one in memory that is not a "start_codon" or "stop_codon" item. if element == "gene" || element == "CDS" || element == "exon" || element == "mRNA"|| element == "UTR" || element == "rRNA" { // Split up the attributes at each semicolon attributes := strings.Split(splitAtTab[8], "; ") // Add back in the semicolon for each attribute for index, attribute := range attributes { attributes[index] = attribute + ";" } /*THIS WAS WRITTEN LIKE THIS TO SAVE TIME. ASSUMES THAT THE GFF HAS AN INCORRECTLY FORMATED NAME ATTRIBUTE*/ /*TO HANDLE MORE CASES, IF STATEMENTS LIKE THE ONE BELOW THAT CHECKS FOR AN ID SHOULD BE WRITTEN*/ // Fixes the name attribute name_attribute := strings.Split(attributes[0], " ") attributes[0] = "Name=" + name_attribute[1] // Checks if attributes have an ID. If not prepends it to attributes if !(stringInSlice("ID", attributes)) { // Need a counter for each of the element types IDCounter += 1 // Prepend unique ID to attributes attributes = append([]string{"ID=" + element + strconv.Itoa(IDCounter) +";"}, attributes...) } // Loops through attributes and replaces all spaces with = signs for index, _ := range attributes { attributes[index] = strings.Replace(attributes[index], " ", "=", 1) } /*THIS IS WHERE THE CORRECTED LINES ARE APPENED TO A SLICE*/ // This will append each line of data, from coloums 0 to 7, and then the fixed attributes, followed by a newline, to corrected lines correctedLines = append(correctedLines, strings.Join(append(splitAtTab[:8]), "\t")) correctedLines = append(correctedLines,"\t") correctedLines = append(correctedLines, attributes...) correctedLines = append(correctedLines,"\n") } else {fmt.Println("Line element does not follow GFF3 convention: \n" + lines)} } // If the GFF file has no GFF version, this will append a directive to the beginning of the file. if !(strings.Contains(correctedLines[0], "##gff-version")) { correctedLines = append([]string{"##gff-version\t3\n"}, correctedLines...) } // Write the corrected lines to a file writeFile(path, correctedLines) } /* * This function will return true if any elements in a input slice of strings, list, contains a substring, a, and false otherwise * @params: a: substring to search for in the slice of strings * @params list: a slice of strings to query * @return true if any element of list[] contains string a. */ func stringInSlice(a string, list []string) bool { for _, b := range list { if strings.Contains(b,a) { return true } } return false } /* * This function will write the corrected GFF file, as returned by buildCorrectedGFF as a string splice, to a created file named * <name of input file>_CORRECTED.gff" in the same directory the program was ran * @params: a string representing the path for the GFF file to be formatted * @return any error returned by the WriteString function. */ func writeFile(path string, sliceToWrite []string) (int, error) { // From the file path, produces a string with its name.gff changed to name_CORRECTED.gff // Savs this as newFilenameString newFilenameSlice := strings.Split(path, "/") newFilenameSlice = strings.Split(newFilenameSlice[len(newFilenameSlice)-1], ".") newFilenameSlice[0] = newFilenameSlice[0] + "_FORMATFIXED" newFilenameString := strings.Join(append(newFilenameSlice), ".") f, err := os.Create(newFilenameString) // Defer closing of the file defer f.Close() // Create new Writer object writer := bufio.NewWriter(f) // Write each line of the returned []string array containing the corrected lines of the GFF file at path n4, err := writer.WriteString(strings.Join(append(sliceToWrite), "")) return n4, err } /* * Function will open GFF file and store it in memory * @params: a string representing the path for the GFF file to be formatted * @return: a string splice containing each line of the GFF file, and any errors reported by the compiler */ func readLines(path string) ([]string, error) { // Open the file file, err := os.Open(path) // Catch any erros if err != nil { return nil, err } // Close the file only if it was opened successfully and there are no errors defer file.Close() // Declare a splice to store each line of the GFF file var lines []string // Create new scanner object for the file scanner := bufio.NewScanner(file) // Scan the file using our scanner object. Append each line to splice lines for scanner.Scan() { lines = append(lines, scanner.Text()) } return lines, scanner.Err() }
package frida_go import ( "github.com/a97077088/frida-go/cfrida" "unsafe" ) const ( RELAY_KIND_TURN_UDP = iota RELAY_KIND_TURN_TCP RELAY_KIND_TURN_TLS ) type RelayKind int type Relay struct { CObj } func (r *Relay) Free() { cfrida.G_object_unref(r.instance) } // NewRelay // 新建一个对象来自已经存在的对象实例指针。 // // Create a new object from an existing object instance pointer. func NewRelay(address string,username string,password string,kind RelayKind) *Relay { r := new(Relay) r.instance = cfrida.Frida_relay_new(address,username,password, int(kind)) r.ptr = unsafe.Pointer(r.instance) setFinalizer(r, (*Relay).Free) return r }
package main import "log" import "net/http" func checkLinks( urls []string) { c := make(chan string) for _, url := range urls { go checkLink(url, c) } reportFails := 0 for i:= 0; i < len(urls); i++ { if ("success" != <-c) { reportFails += 1 } } if reportFails > 0 { log.Printf("Encountered %d failures.\n", reportFails) } } func checkLink(url string, c chan string) { log.Printf("Checking %s\n", url) httpsUrl := "https://" + url _, err := http.Get(httpsUrl) if err != nil { log.Println(httpsUrl, "could not be reached.") c <- "failed" } else { log.Println(httpsUrl, "is online.") c <- "success" } }
package main import ( "fmt" "net" "io" "flag" ) func handleConn(conn *net.TCPConn, raddr *net.TCPAddr) { remote, err := net.DialTCP("tcp", nil, raddr) if err != nil { fmt.Println("dial remote fail. err:", err) conn.Close() return } finish := make(chan bool, 2) go pipe(conn, remote, finish) pipe(remote, conn, finish) <- finish <- finish conn.Close() remote.Close() } func pipe(src, dst *net.TCPConn, finish chan bool) { defer func() { //src.CloseRead() //dst.CloseWrite() finish <- true }() io.Copy(dst, src) } func main() { var laddr string var raddr string flag.StringVar(&laddr, "l", "", "local address") flag.StringVar(&raddr, "r", "", "remote address") flag.Parse() if laddr == "" || raddr == "" { fmt.Printf("-l localAddress -r remoteAddress\n") return } a, err := net.ResolveTCPAddr("tcp", laddr) if err != nil { fmt.Println(err) return } b, err := net.ResolveTCPAddr("tcp", raddr) if err != nil { fmt.Println(err) return } ln, err := net.ListenTCP("tcp", a) if err != nil { fmt.Println("listen error:", err) return } defer ln.Close() fmt.Println("listening on ", ln.Addr()) fmt.Println("remote address: ", raddr) for { conn, err := ln.AcceptTCP() if err != nil { fmt.Println("accept err:", err) continue } go handleConn(conn, b) } }
package rest import ( "io/ioutil" "net/http" "github.com/cosmos/cosmos-sdk/client/context" "github.com/cosmos/cosmos-sdk/client/utils" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/crypto/keys/keyerror" "github.com/cosmos/cosmos-sdk/x/auth" authtxb "github.com/cosmos/cosmos-sdk/x/auth/client/txbuilder" ) // SignBody defines the properties of a sign request's body. type SignBody struct { Tx auth.StdTx `json:"tx"` LocalAccountName string `json:"name"` Password string `json:"password"` ChainID string `json:"chain_id"` AccountNumber uint64 `json:"account_number"` Sequence uint64 `json:"sequence"` AppendSig bool `json:"append_sig"` } // nolint: unparam // sign tx REST handler func SignTxRequestHandlerFn(cdc *codec.Codec, cliCtx context.CLIContext) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { var m SignBody body, err := ioutil.ReadAll(r.Body) if err != nil { utils.WriteErrorResponse(w, http.StatusBadRequest, err.Error()) return } err = cdc.UnmarshalJSON(body, &m) if err != nil { utils.WriteErrorResponse(w, http.StatusBadRequest, err.Error()) return } txBldr := authtxb.TxBuilder{ ChainID: m.ChainID, AccountNumber: m.AccountNumber, Sequence: m.Sequence, } signedTx, err := txBldr.SignStdTx(m.LocalAccountName, m.Password, m.Tx, m.AppendSig) if keyerror.IsErrKeyNotFound(err) { utils.WriteErrorResponse(w, http.StatusBadRequest, err.Error()) return } else if keyerror.IsErrWrongPassword(err) { utils.WriteErrorResponse(w, http.StatusUnauthorized, err.Error()) return } else if err != nil { utils.WriteErrorResponse(w, http.StatusInternalServerError, err.Error()) return } utils.PostProcessResponse(w, cdc, signedTx, cliCtx.Indent) } }
//宣告程式屬於哪個package package main //引入套件 import ( "fmt" ) //常數宣告 const ip string = "127.0.0.1" var ip2 string = "" //主程式 func main(){ //使用:= 簡化變數宣告 var word string = "Hello World!!" word := "Hello World!!" //使用fmt 套件印出字串word fmt.Println(word) fmt.Println("MyIp:"+ip) //change my ip const can't change | var can change ip2 = "192.168.0.1" fmt.Println("change ip2 :"+ip2) }
// Package syncutil contains methods for working with sync code. package syncutil import ( "sync" ) // A OnceMap is a collection sync.Onces accessible by a key. The zero value is usable. type OnceMap[T comparable] struct { mu sync.Mutex m map[T]*sync.Once } // Do runs f once. func (o *OnceMap[T]) Do(key T, f func()) { o.mu.Lock() if o.m == nil { o.m = make(map[T]*sync.Once) } oo, ok := o.m[key] if !ok { oo = new(sync.Once) o.m[key] = oo } o.mu.Unlock() oo.Do(f) }
package main import ( "database/sql" "fmt" "os" _ "github.com/lib/pq" "net/http" "encoding/json" ) func main() { http.HandleFunc("/", getAll) PORT := getenv("PORT", "9100") http.ListenAndServe(":" + PORT, nil) } func getenv(key, fallback string) string { if value, ok := os.LookupEnv(key); ok { return value } return fallback } type User struct { Id int Age int FirstName string Username string Email string } func getAll(w http.ResponseWriter, r *http.Request) { // DB connection psqlInfo := fmt.Sprintf( "host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", getenv("DB_HOST", "app_db"), getenv("DB_PORT", "5432"), getenv("DB_USER", "dbUser"), getenv("DB_PASSWORD", "dbPassword"), getenv("DB_NAME", "dbName"), ) db, err := sql.Open("postgres", psqlInfo) if err != nil { panic(err) } defer db.Close() err = db.Ping() if err != nil { panic(err) } fmt.Println("DB has been successfully connected!") // request fmt.Println("Cool, we received request") sql := "SELECT * FROM users;" rows, err := db.Query(sql) if err != nil { panic(err) } defer rows.Close() users := make([]User, 0) for rows.Next() { var user User err = rows.Scan(&user.Id, &user.Age, &user.FirstName, &user.Username, &user.Email) if err != nil { panic(err) } users = append(users, user) } // get any error encountered during iteration err = rows.Err() if err != nil { panic(err) } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(users) }
package components import ( "html/template" "github.com/GoAdminGroup/go-admin/plugins/admin/modules" "github.com/GoAdminGroup/go-admin/template/types" ) type ImgAttribute struct { Name string Width string Height string Uuid string HasModal bool Src template.URL types.Attribute } func (compo *ImgAttribute) SetWidth(value string) types.ImgAttribute { compo.Width = value return compo } func (compo *ImgAttribute) SetHeight(value string) types.ImgAttribute { compo.Height = value return compo } func (compo *ImgAttribute) WithModal() types.ImgAttribute { compo.HasModal = true compo.Uuid = modules.Uuid() return compo } func (compo *ImgAttribute) SetSrc(value template.HTML) types.ImgAttribute { compo.Src = template.URL(value) return compo } func (compo *ImgAttribute) GetContent() template.HTML { return ComposeHtml(compo.TemplateList, compo.Separation, *compo, "image") }
package rateutil import ( "math" "sync" "sync/atomic" "time" ) //Rate ... type Rate struct { requestsCount []int64 responseTimeSum []int64 } var rate *Rate var once sync.Once //GetRateCounter ... func GetRateCounter() *Rate { once.Do(func() { rate = &Rate{ requestsCount: make([]int64, 2), responseTimeSum: make([]int64, 2), } go func() { timer := time.NewTicker(1 * time.Second) for { <-timer.C rate.swap() } }() }) return rate } func (rate *Rate) swap() { atomic.StoreInt64(&rate.requestsCount[0], atomic.LoadInt64(&rate.requestsCount[1])) atomic.StoreInt64(&rate.requestsCount[1], 0) atomic.StoreInt64(&rate.responseTimeSum[0], atomic.LoadInt64(&rate.responseTimeSum[1])) atomic.StoreInt64(&rate.responseTimeSum[1], 0) } //HitRequest ... func (rate *Rate) HitRequest() { atomic.AddInt64(&rate.requestsCount[1], 1) } //RequestsPerSecond ... func (rate *Rate) RequestsPerSecond() float64 { return float64(atomic.LoadInt64(&rate.requestsCount[0])) } //CommitResponseTime ... func (rate *Rate) CommitResponseTime(requestStart time.Time) { responseTime := time.Since(requestStart) atomic.AddInt64(&rate.responseTimeSum[1], responseTime.Microseconds()) } //AverageResponseTime ... func (rate *Rate) AverageResponseTime() float64 { if rate.RequestsPerSecond() > 0 { val := float64(atomic.LoadInt64(&rate.responseTimeSum[0])) / rate.RequestsPerSecond() / 1000 return math.Round(val*100) / 100 } return 0 }
package main import "fmt" func main() { arrays := []int{1,2,232,454,2323,354,5656,434354} maximum := arrays[0] for _,num := range arrays { if num > maximum { maximum = num } } fmt.Println(maximum) }
package admin import ( "github.com/apache/thrift/lib/go/thrift" "github.com/go-xe2/x/type/t" "github.com/go-xe2/xthrift/pdl" ) type RegSvcUpdateResultArgs struct { *pdl.TDynamicStructBase RegId int32 `thrift:"reg_id,1,required" json:"reg_id"` ParId int32 `thrift:"par_id,2,required" json:"par_id"` Name string `thrift:"name,3,required" json:"name"` fieldNameMaps map[string]string fields map[string]*pdl.TStructFieldInfo } var _ pdl.DynamicStruct = (*RegSvcUpdateResultArgs)(nil) var _ thrift.TStruct = (*RegSvcUpdateResultArgs)(nil) func NewRegSvcUpdateResultArgs() *RegSvcUpdateResultArgs { inst := &RegSvcUpdateResultArgs{ fieldNameMaps: make(map[string]string), fields: make(map[string]*pdl.TStructFieldInfo), } inst.TDynamicStructBase = pdl.NewBasicStruct(inst) return inst.init() } func (p *RegSvcUpdateResultArgs) init() *RegSvcUpdateResultArgs { p.fieldNameMaps["RegId"] = "RegId" p.fieldNameMaps["reg_id"] = "RegId" p.fields["RegId"] = pdl.NewStructFieldInfo(1, thrift.I32, func(obj pdl.DynamicStruct, val interface{}) bool { thisObj := obj.(*RegSvcUpdateResultArgs) n32 := t.Int32(val) thisObj.RegId = n32 return true }) p.fieldNameMaps["ParId"] = "ParId" p.fieldNameMaps["par_id"] = "ParId" p.fields["ParId"] = pdl.NewStructFieldInfo(2, thrift.I32, func(obj pdl.DynamicStruct, val interface{}) bool { thisObj := obj.(*RegSvcUpdateResultArgs) n32 := t.Int32(val) thisObj.ParId = n32 return true }) p.fieldNameMaps["Name"] = "Name" p.fieldNameMaps["name"] = "Name" p.fields["Name"] = pdl.NewStructFieldInfo(3, thrift.STRING, func(obj pdl.DynamicStruct, val interface{}) bool { thisObj := obj.(*RegSvcUpdateResultArgs) s := t.String(val) thisObj.Name = s return true }) return p } func (p *RegSvcUpdateResultArgs) Read(in thrift.TProtocol) error { _, err := in.ReadStructBegin() if err != nil { return err } var nMaxLoop = 512 nLoop := 0 var isMatch bool for { // 防止协议数据错误,无thrift.STOP时无限循环 nLoop++ if nLoop >= nMaxLoop { _ = in.Skip(thrift.STRUCT) return nil } isMatch = false fdName, fdType, fdId, err := in.ReadFieldBegin() if err != nil { return err } if fdType == thrift.STOP { break } if fdType == thrift.VOID { if err := in.ReadFieldEnd(); err != nil { return err } continue } if (fdId > 0 && fdId == 1) || (fdId <= 0 && fdName == "reg_id") { if fdId > 0 && fdType != thrift.I32 { if err := in.Skip(fdType); err != nil { return err } if err := in.ReadFieldEnd(); err != nil { return err } continue } isMatch = true n, err := in.ReadI32() if err != nil { return err } p.RegId = n } if (fdId > 0 && fdId == 2) || (fdId <= 0 && fdName == "par_id") { if fdId > 0 && fdType != thrift.I32 { if err := in.Skip(fdType); err != nil { return err } if err := in.ReadFieldEnd(); err != nil { return err } continue } isMatch = true n, err := in.ReadI32() if err != nil { return err } p.ParId = n } if (fdId > 0 && fdId == 3) || (fdId <= 0 && fdName == "name") { if fdId > 0 && fdType != thrift.STRING { if err := in.Skip(fdType); err != nil { return err } if err := in.ReadFieldEnd(); err != nil { return err } continue } isMatch = true s, err := in.ReadString() if err != nil { return err } p.Name = s } if !isMatch { if err := in.Skip(fdType); err != nil { return err } } if err := in.ReadFieldEnd(); err != nil { return err } } if err := in.ReadStructEnd(); err != nil { return err } return nil } func (p *RegSvcUpdateResultArgs) Write(out thrift.TProtocol) error { if err := out.WriteStructBegin("reg_svc_update_result_args"); err != nil { return err } if err := out.WriteFieldBegin("reg_id", thrift.I32, 1); err != nil { return err } if err := out.WriteI32(p.RegId); err != nil { return err } if err := out.WriteFieldEnd(); err != nil { return err } if err := out.WriteFieldBegin("par_id", thrift.I32, 2); err != nil { return err } if err := out.WriteI32(p.ParId); err != nil { return err } if err := out.WriteFieldEnd(); err != nil { return err } if err := out.WriteFieldBegin("name", thrift.STRING, 3); err != nil { return err } if err := out.WriteString(p.Name); err != nil { return err } if err := out.WriteFieldEnd(); err != nil { return err } if err := out.WriteFieldStop(); err != nil { return err } if err := out.WriteStructEnd(); err != nil { return err } return nil } func (p *RegSvcUpdateResultArgs) NewInstance() pdl.DynamicStruct { return NewRegSvcUpdateResultArgs() } func (p *RegSvcUpdateResultArgs) AllFields() map[string]*pdl.TStructFieldInfo { return p.fields } func (p *RegSvcUpdateResultArgs) FieldNameMaps() map[string]string { return p.fieldNameMaps }
package types import ( "bytes" "encoding/json" "errors" "time" "github.com/shopspring/decimal" ) const ( DateFormat = "2006-01-02" DatetimeFormat = "2006-01-02 15:04:05" ) type Date struct { time.Time Empty bool } func (t *Date) UnmarshalJSON(data []byte) error { if string(data) == "null" { return nil } var err error t.Time, err = time.Parse(`"`+DateFormat+`"`, string(data)) return err } func (t *Date) UnmarshalText(data []byte) error { if len(data) == 0 { t.Empty = true return nil } var err error t.Time, err = time.Parse(DateFormat, string(data)) return err } func (t Date) String() string { if t.Empty { return "" } return t.Format(DateFormat) } func (t Date) GoString() string { return t.String() } type Datetime struct { time.Time } func (t *Datetime) UnmarshalJSON(data []byte) error { if string(data) == "null" { return nil } var err error t.Time, err = time.Parse(`"`+DatetimeFormat+`"`, string(data)) return err } func (t *Datetime) UnmarshalText(data []byte) error { var err error t.Time, err = time.Parse(DatetimeFormat, string(data)) return err } func (t Datetime) String() string { return t.Format(DatetimeFormat) } func (t Datetime) GoString() string { return t.String() } type MarketingConsent int8 const abc = 1 const ( UNSET MarketingConsent = iota REFUSED GRANTED ) func (m *MarketingConsent) UnmarshalText(data []byte) error { switch len(data) { case 3: if data[0] == '"' && data[2] == '"' { data = data[1:2] } fallthrough case 1: switch data[0] { case '0': *m = REFUSED case '1': *m = GRANTED default: return errors.New("invalid marketing consent") } case 2: if data[0] != '"' || data[1] != '"' { return errors.New("invalid marketing consent") } fallthrough case 0: *m = UNSET default: return errors.New("invalid marketing consent") } return nil } func (m *MarketingConsent) UnmarshalJSON(data []byte) error { return m.UnmarshalText(data) } func (m MarketingConsent) MarshalText() ([]byte, error) { return []byte(m.String()), nil } func (m MarketingConsent) String() string { switch m { case REFUSED: return "0" case GRANTED: return "1" } return "" } type PhpBool bool var ( phpTrue = "1" phpFalse = "0" ) func (b *PhpBool) String() string { if *b { return phpTrue } return phpFalse } type CurrencyValue struct { decimal.Decimal fixed int32 } func (c *CurrencyValue) String() string { return c.StringFixed(c.fixed) } func (c *CurrencyValue) MarshalText() ([]byte, error) { return []byte(c.String()), nil } func (c *CurrencyValue) UnmarshalJSON(b []byte) error { var s string if err := json.Unmarshal(b, &s); err != nil { return err } return c.UnmarshalText([]byte(s)) } func (c *CurrencyValue) UnmarshalText(b []byte) error { fixed := bytes.LastIndexByte(b, byte('.')) if fixed > 0 { c.fixed = int32(len(b[fixed+1:])) } return c.Decimal.UnmarshalText(b) }
// Copyright 2018 The go-bindata Authors. All rights reserved. // Use of this source code is governed by a CC0 1.0 Universal (CC0 1.0) // Public Domain Dedication license that can be found in the LICENSE file. package bindata import ( "os" "path/filepath" "unicode" ) // asset holds information about a single asset to be processed. type asset struct { // path contains full file path. path string // name contains key used in TOC -- name by which asset is referenced. name string // Function name for the procedure returning the asset contents. funcName string // fi field contains the file information (to minimize calling os.Stat // on the same file while processing). fi os.FileInfo } func normalize(in string) (out string) { up := true for _, r := range in { if unicode.IsLetter(r) || unicode.IsDigit(r) { if up { out += string(unicode.ToUpper(r)) up = false } else { out += string(r) } continue } if r == '/' || r == '.' { up = true } } return out } // newAsset will create, initialize, and return new asset based on file // path or real path if its symlink. func newAsset(cfg *Config, path, name, realPath string, fi os.FileInfo) (ast *asset) { ast = &asset{ path: path, name: filepath.ToSlash(name), fi: fi, } if len(realPath) == 0 { ast.funcName = cfg.AssetPrefix + normalize(name) } else { ast.funcName = cfg.AssetPrefix + normalize(realPath) } return ast }
package repository import ( "database/sql" "fmt" "github.com/jinzhu/gorm" "github.com/radyatamaa/loyalti-go-echo/src/database" "github.com/radyatamaa/loyalti-go-echo/src/domain/model" "github.com/sirupsen/logrus" ) type SpecialProgramRepository interface { CreateSpecial(special *model.SpecialProgram) error UpdateSpecial(special *model.SpecialProgram) error DeleteSpecial(special *model.SpecialProgram) error } type special_repo struct { DB *gorm.DB } func CreateSpecialRepository(db *gorm.DB) SpecialProgramRepository { return &special_repo{ DB: db, } } func (p *special_repo) CreateSpecial(special *model.SpecialProgram) error { db := database.ConnectionDB() specialObj := *special err := db.Create(&specialObj).Error return err } func (p *special_repo) UpdateSpecial(special *model.SpecialProgram) error { db := database.ConnectionDB() err := db.Model(&special).Where("program_name = ?", special.ProgramName).Update(&special).Error return err } func (p *special_repo) DeleteSpecial(special *model.SpecialProgram) error { db := database.ConnectionDB() err := db.Model(&special).Where("program_name = ?", special.ProgramName).Update("active", false).Error return err } //type SpecialRepository interface { // CreateSpecial (newspecial *model.SpecialProgram) error // UpdateSpecial (newspecial *model.SpecialProgram) error // DeleteSpecial (newspecial *model.SpecialProgram) error //} // //type special_repo struct { // DB *gorm.DB //} // //func CreateSpecialRepository (db *gorm.DB) SpecialRepository { // return &special_repo{ // DB:db, // } //} // //func (p *special_repo) CreateSpecial (newspecial *model.SpecialProgram) error { // db:= database.ConnectionDB() // err := db.Create(&newspecial).Error // if err == nil { // fmt.Println("Tidak ada Error!") // } // return err //} // //func (p *special_repo) UpdateSpecial (newspecial *model.SpecialProgram) error { // db := database.ConnectionDB() // //} func CreateSpecial(special *model.SpecialProgram) string { db := database.ConnectionDB() specialObj := *special db.Create(&specialObj) return specialObj.ProgramName } func UpdateSpecial(special *model.SpecialProgram) string { db := database.ConnectionDB() db.Model(&special).Where("program_name = ?", special.ProgramName).Update(&special) return special.ProgramName } func DeleteSpecial(special *model.SpecialProgram) string { db := database.ConnectionDB() db.Model(&special).Where("program_name = ?", special.ProgramName).Update("active", false) return "berhasil dihapus" } func GetSpecialProgram(page *int, size *int, sort *int, category *int, id *int) []model.SpecialProgram { db := database.ConnectionDB() //db := database.ConnectPostgre() var program []model.SpecialProgram var rows *sql.Rows var err error var total int if sort != nil { switch *sort { case 1: if page != nil && size != nil && category == nil { rows, err = db.Find(&program).Order("created asc").Count(total).Limit(*size).Offset(*page).Rows() fmt.Println("test") if err != nil { panic(err) } } if category != nil && page != nil && size != nil { rows, err = db.Where("category_id = ?", category).Find(&program).Order("created asc").Count(total).Limit(*size).Offset(*page).Rows() fmt.Println("apakah masuk") if err != nil { panic(err) } } case 2: if page != nil && size != nil && category == nil { rows, err = db.Find(&program).Order("created desc").Count(total).Limit(*size).Offset(*page).Rows() if err != nil { panic(err) } } if category != nil && page != nil && size != nil { rows, err = db.Where("category_id = ?", category).Find(&program).Order("created desc").Count(total).Limit(*size).Offset(*page).Rows() if err != nil { panic(err) } } case 3: if page != nil && size != nil && category == nil { rows, err = db.Find(&program).Order("program_name asc").Count(total).Limit(*size).Offset(*page).Rows() if err != nil { panic(err) } } if category != nil && page != nil && size != nil { rows, err = db.Where("category_id = ?", category).Find(&program).Order("program_name asc").Count(total).Limit(*size).Offset(*page).Rows() if err != nil { panic(err) } } case 4: if page != nil && size != nil && category == nil { rows, err = db.Find(&program).Order("program_name desc").Count(total).Limit(*size).Offset(*page).Rows() if err != nil { panic(err) } rows.Close() } if category != nil && page != nil && size != nil { rows, err = db.Where("category_id = ?", category).Find(&program).Order("program_name desc").Count(total).Limit(*size).Offset(*page).Rows() if err != nil { panic(err) } } } } else { if page != nil && size != nil { rows, err = db.Find(&program).Order("created desc").Count(total).Limit(*size).Offset(*page).Rows() if err != nil { panic(err) } } else { fmt.Println("masuk ga") rows, err = db.Find(&program).Rows() if err != nil { panic(err) } } } if id != nil { rows, err = db.Where("id = ?", id).First(&program).Rows() if err != nil { panic(err) } } result := make([]model.SpecialProgram, 0) for rows.Next() { t := new(model.SpecialProgram) fmt.Println(t) err = rows.Scan( &t.Id, &t.Created, &t.CreatedBy, &t.Modified, &t.ModifiedBy, &t.Active, &t.IsDeleted, &t.Deleted, &t.Deleted_by, &t.ProgramName, &t.ProgramImage, &t.ProgramStartDate, &t.ProgramEndDate, &t.ProgramDescription, &t.Card, &t.OutletID, &t.MerchantId, &t.CategoryId, &t.Benefit, &t.TermsAndCondition, &t.Tier, &t.RedeemRules, &t.RewardTarget, &t.QRCodeId, ) merchant := new(model.Merchant) db.Table("merchants"). Select("merchants.merchant_name"). Where("id = ?", t.MerchantId). First(&merchant) //t.MerchantName = merchant.MerchantName if err != nil { logrus.Error(err) return nil } result = append(result, *t) } db.Close() return result }
package main import ( "fmt" "io/ioutil" "net/http" ) func main(){ res,err:=http.Get("https://www.baidu.com") if err!=nil{ fmt.Println("http.Get err",err) return } buf,err:=ioutil.ReadAll(res.Body) if err!=nil{ fmt.Println("ioutil.ReadAll err",err) return } fmt.Println(string(buf[:])) }
package pg import ( "github.com/kyleconroy/sqlc/internal/sql/ast" ) type RangeSubselect struct { Lateral bool Subquery ast.Node Alias *Alias } func (n *RangeSubselect) Pos() int { return 0 }
package main // START OMIT func (cp *connectionPool) GetWithTimeout(d time.Duration) (rv *memcached.Client, err error) { // short-circuit available connetions select { case rv, isopen := <-cp.connections: if !isopen { return nil, errClosedPool } return rv, nil default: } // END OMIT // START P2 OMIT // create a very short timer, 1ms t := time.NewTimer(ConnPoolAvailWaitTime) defer t.Stop() select { case rv, isopen := <-cp.connections: // connection became available if !isopen { return nil, errClosedPool } return rv, nil case <-t.C: // waited 1ms } } // END P2 OMIT func f() { select { case rv, isopen := <-cp.connections: // connection became available if !isopen { return nil, errClosedPool } return rv, nil case <-t.C: // START P3 OMIT t.Reset(d) // reuse original timer for full timeout select { case rv, isopen := <-cp.connections: // keep trying to get connection from main pool if !isopen { return nil, errClosedPool } return rv, nil case cp.createsem <- true: // create a new connection rv, err := cp.mkConn(cp.host, cp.auth) if err != nil { <-cp.createsem // buffer only allows poolSize + poolOverflow } return rv, err case <-t.C: // exceeded caller provided timeout return nil, ErrTimeout // END P3 OMIT } } }
package common const ( GreetingFormat = "hello,%s!\n" Math = "math" English = "english" Chinese = "chinese" )
package mppuma import ( "flag" mp "github.com/mackerelio/go-mackerel-plugin" ) // PumaPlugin mackerel plugin for Puma type PumaPlugin struct { Prefix string Host string Port string Sock string Token string Single bool WithGC bool } func merge(m1, m2 map[string]float64) map[string]float64 { ans := make(map[string]float64) for k, v := range m1 { ans[k] = v } for k, v := range m2 { ans[k] = v } return (ans) } // FetchMetrics interface for mackerelplugin func (p PumaPlugin) FetchMetrics() (map[string]float64, error) { ret := make(map[string]float64) stats, err := p.getStatsAPI() if err != nil { return nil, err } ret = p.fetchStatsMetrics(stats) if p.WithGC == false { return ret, nil } gcStats, err := p.getGCStatsAPI() if err != nil { return nil, err } gcStatsMetrics, _ := p.fetchGCStatsMetrics(gcStats) ret = merge(ret, gcStatsMetrics) return ret, nil } // GraphDefinition interface for mackerelplugin func (p PumaPlugin) GraphDefinition() map[string]mp.Graphs { graphdef := graphdefStats if p.Single == true { graphdef = graphdefStatsSingle } if p.WithGC == false { return graphdef } for k, v := range graphdefGC { graphdef[k] = v } return graphdef } // MetricKeyPrefix interface for PluginWithPrefix func (p PumaPlugin) MetricKeyPrefix() string { if p.Prefix == "" { p.Prefix = "puma" } return p.Prefix } // Do the plugin func Do() { var ( optPrefix = flag.String("metric-key-prefix", "puma", "Metric key prefix") optHost = flag.String("host", "127.0.0.1", "The bind url to use for the control server") optPort = flag.String("port", "9293", "The bind port to use for the control server") optSock = flag.String("sock", "", "The bind socket to use for the control server") optToken = flag.String("token", "", "The token to use as authentication for the control server") optSingle = flag.Bool("single", false, "Puma in single mode") optWithGC = flag.Bool("with-gc", false, "Output include GC stats for Puma 3.10.0~") optTempfile = flag.String("tempfile", "", "Temp file name") ) flag.Parse() var puma PumaPlugin puma.Prefix = *optPrefix puma.Host = *optHost puma.Port = *optPort puma.Sock = *optSock puma.Token = *optToken puma.Single = *optSingle puma.WithGC = *optWithGC helper := mp.NewMackerelPlugin(puma) helper.Tempfile = *optTempfile helper.Run() }
package main import ( "crypto/x509" "fmt" ) func main() { pubB := []byte{48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 129, 28, 207, 85, 1, 130, 45, 3, 66, 0, 4, 233, 114, 166, 49, 76, 93, 192, 127, 172, 110, 4, 122, 99, 20, 48, 45, 11, 46, 233, 77, 11, 54, 86, 19, 235, 137, 78, 117, 34, 23, 193, 45, 178, 83, 160, 44, 110, 166, 53, 216, 224, 80, 122, 184, 111, 82, 26, 110, 117, 253, 90, 129, 89, 9, 46, 19, 125, 189, 114, 43, 163, 25, 113, 54} pubKey, err := x509.ParsePKIXPublicKey(pubB) if err != nil { fmt.Println(err) } fmt.Println(pubKey) ////读取内容 //keyBytes, err := ioutil.ReadFile("static/33.key") //if err != nil { // fmt.Println(err) //} // ////解码私钥 //block, _ := pem.Decode(keyBytes) //if block == nil { // fmt.Println("block is nil") //} //fmt.Println(block.Bytes) //fmt.Println("length:", len(block.Bytes)) // //sm2PubKey, err := x509.ParsePKIXPublicKey(block.Bytes) //fmt.Println("-----sm2PubKey-----") //fmt.Println(sm2PubKey) //fmt.Println("KEYTYPE:", reflect.TypeOf(sm2PubKey)) }
package coinmarketcap_go import ( "context" "encoding/json" "errors" "fmt" "github.com/drankou/coinmarketcap-go/types" "github.com/google/go-querystring/query" log "github.com/sirupsen/logrus" "golang.org/x/time/rate" "io/ioutil" "net/http" "os" ) const ( API_URL = "https://pro-api.coinmarketcap.com" SANDBOX_URL = "https://sandbox-api.coinmarketcap.com" ) type CoinmarketcapClient struct { client *http.Client limiter *rate.Limiter } func (c *CoinmarketcapClient) Init(plan types.ApiPlan) error { c.client = &http.Client{} c.limiter = rate.NewLimiter(types.APIRateLimits[plan], 1) return nil } // ------ Cryptocurrency ------ // // Returns a mapping of all cryptocurrencies to unique CoinMarketCap ids. // https://pro.coinmarketcap.com/api/v1/#operation/getV1CryptocurrencyMap func (c *CoinmarketcapClient) CryptocurrencyIdMap(request *types.CryptocurrencyMapRequest) ([]types.Cryptocurrency, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/cryptocurrency/map", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.CryptocurrencyMapResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("CryptocurrencyIdMap: %d: %s", resp.StatusCode, resp.Status)) } } // Returns all static metadata available for one or more cryptocurrencies. // https://pro.coinmarketcap.com/api/v1/#operation/getV1CryptocurrencyInfo func (c *CoinmarketcapClient) CryptocurrencyInfo(request *types.CryptocurrencyInfoRequest) (map[string]*types.CryptocurrencyInfo, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/cryptocurrency/info", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } err = c.limiter.Wait(context.Background()) if err != nil { return nil, err } resp, err := c.client.Do(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.CryptocurrencyInfoResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("CryptocurrencyInfo: %d: %s", resp.StatusCode, resp.Status)) } } // Returns a ranked and sorted list of all cryptocurrencies for a historical UTC date. // https://pro.coinmarketcap.com/api/v1/#operation/getV1CryptocurrencyListingsHistorical func (c *CoinmarketcapClient) CryptocurrencyListingsHistorical(request *types.CryptocurrencyListingsHistoricalRequest) ([]types.CryptocurrencyListing, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/cryptocurrency/listings/historical", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.CryptocurrencyListingsHistoricalResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("CryptocurrencyListingsHistorical: %d: %s:", resp.StatusCode, resp.Status)) } } // Returns a paginated list of all active cryptocurrencies with latest market data. // https://pro.coinmarketcap.com/api/v1/#operation/getV1CryptocurrencyListingsLatest func (c *CoinmarketcapClient) CryptocurrencyListingsLatest(request *types.CryptocurrencyListingsLatestRequest) ([]types.CryptocurrencyListing, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/cryptocurrency/listings/latest", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.CryptocurrencyListingsLatestResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("CryptocurrencyListingsLatest: %d: %s", resp.StatusCode, resp.Status)) } } func (c *CoinmarketcapClient) CryptocurrencyOHLCVHistorical(request *types.CryptocurrencyOHLCVHistoricalRequest) (map[string]*types.OHLCVHistoricalResult, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/cryptocurrency/ohlcv/historical", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.CryptocurrencyOHLCVHistoricalResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("CryptocurrencyOHLCVHistorical: %d: %s", resp.StatusCode, resp.Status)) } } // Returns the latest OHLCV (Open, High, Low, Close, Volume) market values for one or more cryptocurrencies for the current UTC day. // https://pro.coinmarketcap.com/api/v1/#operation/getV1CryptocurrencyOhlcvLatest func (c *CoinmarketcapClient) CryptocurrencyOHLCVLatest(request *types.CryptocurrencyOHLCVLatestRequest) (map[string]*types.CryptocurrencyOHLCV, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/cryptocurrency/ohlcv/latest", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.CryptocurrencyOHLCVLatestResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("CryptocurrencyOHLCVLatest: %d: %s", resp.StatusCode, resp.Status)) } } func (c *CoinmarketcapClient) CryptocurrencyQuotesLatest(request *types.CryptocurrencyQuotesLatestRequest) (map[string]types.CryptocurrencyQuote, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/cryptocurrency/quotes/latest", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.CryptocurrencyQuotesLatestResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("CryptocurrencyQuotesLatest: %d: %s", resp.StatusCode, resp.Status)) } } func (c *CoinmarketcapClient) CryptocurrencyPricePerformanceStats(request *types.CryptocurrencyPricePerformanceStatsRequest) (map[string]*types.PricePerformanceStats, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/cryptocurrency/price-performance-stats/latest", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.CryptocurrencyPricePerformanceStatsResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("CryptocurrencyPricePerformanceStats: %d: %s", resp.StatusCode, resp.Status)) } } // ------ Fiat ------ // // Returns a mapping of all supported fiat currencies to unique CoinMarketCap ids. // https://pro.coinmarketcap.com/api/v1/#operation/getV1FiatMap func (c *CoinmarketcapClient) FiatMap(request *types.FiatMapRequest) ([]types.Fiat, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/fiat/map", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.FiatMapResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("FiatMap: %d: %s", resp.StatusCode, resp.Status)) } } // ------ Exchange ------ // // Returns all static metadata for one or more exchanges. // https://pro.coinmarketcap.com/api/v1/#operation/getV1ExchangeInfo func (c *CoinmarketcapClient) ExchangeInfo(request *types.ExchangeInfoRequest) (map[string]*types.ExchangeInfo, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/exchange/info", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.ExchangeInfoResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("ExchangeInfo: %d: %s", resp.StatusCode, resp.Status)) } } // Returns a paginated list of all cryptocurrency exchanges by CoinMarketCap ID. // By default listing_status=active // https://pro.coinmarketcap.com/api/v1/#operation/getV1ExchangeMap func (c *CoinmarketcapClient) ExchangeIdMap(request *types.ExchangeIdMapRequest) ([]types.Exchange, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/exchange/map", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.ExchangeIdMapResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("ExchangeInfo: %d: %s", resp.StatusCode, resp.Status)) } } // ------ Global-Metrics ------ // // Returns the latest global cryptocurrency market metrics. // https://pro.coinmarketcap.com/api/v1/#operation/getV1GlobalmetricsQuotesLatest func (c *CoinmarketcapClient) GlobalMetricsQuotesLatest(request *types.GlobalMetricsQuotesLatestRequest) (*types.GlobalMetricsQuotesLatest, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/global-metrics/quotes/latest", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.GlobalMetricsQuotesLatestResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return &cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("GlobalMetricsQuotesLatest: %d: %s", resp.StatusCode, resp.Status)) } } // Returns an interval of historical global cryptocurrency market metrics based on time and interval parameters. // https://pro.coinmarketcap.com/api/v1/#operation/getV1GlobalmetricsQuotesHistorical func (c *CoinmarketcapClient) GlobalMetricsQuotesHistorical(request *types.GlobalMetricsQuotesHistoricalRequest) ([]types.AggregatedMarketQuote, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/global-metrics/quotes/historical", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.GlobalMetricsQuotesHistoricalResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data.Quotes, nil } else { return nil, errors.New(fmt.Sprintf("GlobalMetricsQuotesHistorical: %d: %s", resp.StatusCode, resp.Status)) } } // ------ Partners ------ // func (c *CoinmarketcapClient) PartnersFCASListingsLatest(request *types.FCASListingsLatestRequest) ([]types.FCASRating, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/partners/flipside-crypto/fcas/listings/latest", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.FCASListingsLatestResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("PartnersFCASListingsLatest: %d: %s", resp.StatusCode, resp.Status)) } } func (c *CoinmarketcapClient) PartnersFCASQuotesLatest(request *types.FCASQuotesLatestRequest) (map[string]*types.FCASRating, error) { httpRequest, err := http.NewRequest("GET", API_URL+"/v1/partners/flipside-crypto/fcas/quotes/latest", nil) if err != nil { log.Error(err) } err = prepareHttpRequest(httpRequest, request) if err != nil { return nil, err } resp, err := c.performHttpRequest(httpRequest) if err != nil { return nil, err } if resp.StatusCode == http.StatusOK { respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var cmcIdMapResponse types.FCASQuotesLatestResponse err = json.Unmarshal(respBody, &cmcIdMapResponse) if err != nil { return nil, err } return cmcIdMapResponse.Data, nil } else { return nil, errors.New(fmt.Sprintf("PartnersFCASQuotesLatest: %d: %s", resp.StatusCode, resp.Status)) } } func prepareHttpRequest(httpRequest *http.Request, request interface{}) error { values, err := query.Values(request) if err != nil { return err } httpRequest.Header.Set("Accepts", "application/json") httpRequest.Header.Add("X-CMC_PRO_API_KEY", os.Getenv("CMC_PRO_API_KEY")) httpRequest.URL.RawQuery = values.Encode() return nil } func (c *CoinmarketcapClient) performHttpRequest(req *http.Request) (*http.Response, error) { err := c.limiter.Wait(context.Background()) if err != nil { return nil, err } resp, err := c.client.Do(req) if err != nil { return nil, err } return resp, nil }
package Proxy import ( "fmt" "strconv" ) type ITask interface { RentHouse(desc string,price int) } type Task struct { } func (t *Task) RentHouse(desc string,price int) { fmt.Println(fmt.Sprintf("租房地址%s,价格%s",desc,strconv.Itoa(price))) } //代理 type AgentTask struct { task *Task } func NewAgentTask() *AgentTask { return &AgentTask{task:&Task{}} } func (t *AgentTask) RentHouse(desc string,price int) { t.task.RentHouse(desc,price) }
package frac // Div divides two fractions. func Div(frac1 Frac, frac2 Frac) Frac { // Adjust fractions to common denominator. num := frac1.Num * frac2.Den den := frac1.Den * frac2.Num // Yield result. return Frac{num, den} }
package eglm import ( "bytes" "fmt" "github.com/GameWith/gwlog/formatter" "net/http" "net/http/httptest" "strings" "testing" "github.com/GameWith/gwlog" "github.com/labstack/echo/v4" ) func TestMiddleware(t *testing.T) { e := echo.New() buf := new(bytes.Buffer) logger := gwlog.GetLogger() logger.SetOutput(buf) e.Logger = logger e.Use(Middleware(&Config{})) e.GET("/", func(c echo.Context) error { return c.String(http.StatusOK, "OK") }) status, _ := request(http.MethodGet, "/", e) if status != http.StatusOK { t.Errorf("got = %v, want = %v", status, http.StatusOK) } if strings.Contains(buf.String(), "type=ACCESS") == false { t.Errorf("invalid buf: %v", buf.String()) } } func TestMiddleware_skipper(t *testing.T) { e := echo.New() buf := new(bytes.Buffer) logger := gwlog.GetLogger() logger.SetOutput(buf) e.Logger = logger e.Use(Middleware(&Config{ Skipper: func(c echo.Context) bool { return c.Path() == "/" }, })) e.GET("/", func(c echo.Context) error { return c.String(http.StatusOK, "OK") }) status, _ := request(http.MethodGet, "/", e) if status != http.StatusOK { t.Errorf("got = %v, want = %v", status, http.StatusOK) } if 0 != buf.Len() { t.Errorf("invalid buf: %v", buf.String()) } } func TestMiddleware_loggingFunc(t *testing.T) { e := echo.New() buf := new(bytes.Buffer) logger := gwlog.GetLogger() logger.SetOutput(buf) e.Logger = logger e.Use(Middleware(&Config{ LoggingFunc: func(logger gwlog.Logger, _ *Parameter, _ echo.Context) error { logger.Print("a") return nil }, })) e.GET("/", func(c echo.Context) error { return c.String(http.StatusOK, "OK") }) status, _ := request(http.MethodGet, "/", e) if status != http.StatusOK { t.Errorf("got = %v, want = %v", status, http.StatusOK) } if strings.Contains(buf.String(), "msg=a") == false { t.Errorf("invalid buf: %v", buf.String()) } } func TestMiddleware_error(t *testing.T) { e := echo.New() buf := new(bytes.Buffer) logger := gwlog.GetLogger() logger.SetOutput(buf) e.Logger = logger e.Use(Middleware(&Config{})) e.GET("/", func(c echo.Context) error { return fmt.Errorf("ERROR") }) status, _ := request(http.MethodGet, "/", e) if status != http.StatusInternalServerError { t.Errorf("got = %v, want = %v", status, http.StatusInternalServerError) } if strings.Contains(buf.String(), "error=ERROR") == false { t.Errorf("NotFound Text: ERROR") } } func TestMiddleware_json(t *testing.T) { e := echo.New() buf := new(bytes.Buffer) logger := gwlog.GetLogger() logger.SetFormatter(&formatter.JSONFormatter{}) logger.SetOutput(buf) e.Logger = logger e.Use(Middleware(&Config{})) e.GET("/", func(c echo.Context) error { return c.String(http.StatusOK, "OK") }) status, _ := request(http.MethodGet, "/", e) if status != http.StatusOK { t.Errorf("got = %v, want = %v", status, http.StatusOK) } if strings.Contains(buf.String(), `"message":"GET /"`) == false { t.Errorf("NotFound Text: ERROR") } } func TestMiddleware_defaultLogger(t *testing.T) { e := echo.New() e.Use(Middleware(&Config{})) e.GET("/", func(c echo.Context) error { return c.String(http.StatusOK, "OK") }) status, body := request(http.MethodGet, "/", e) if status != http.StatusInternalServerError { t.Errorf("got = %v, want = %v", status, http.StatusInternalServerError) } bw := `{"message":"invalid instance type. only gwlog.Logger"}` + "\n" if body != bw { t.Errorf("got = %v, want = %v", body, bw) } } func request(method, path string, e *echo.Echo) (int, string) { req := httptest.NewRequest(method, path, nil) rec := httptest.NewRecorder() e.ServeHTTP(rec, req) return rec.Code, rec.Body.String() }
package first import ( "fmt" "go/ast" "reflect" ) type DumpVisitor struct { } func (visitor DumpVisitor) Visit(node ast.Node) ast.Visitor { fmt.Println(reflect.TypeOf(node)) return visitor }
package service import ( "github.com/roberthafner/bpmn-engine/domain/model" "github.com/roberthafner/bpmn-engine/domain/model/command" ) type DeploymentService interface { CreateDeployment(d model.DeploymentEntity) model.DeploymentEntity } func NewDeploymentService(ce command.CommandExecutor) DeploymentService { return deploymentService { commandExecutor: ce, } } type deploymentService struct { commandExecutor command.CommandExecutor } func (ds deploymentService) CreateDeployment(d model.DeploymentEntity) model.DeploymentEntity { cmd := command.NewDeployCmd(d) return ds.commandExecutor.Execute(cmd).(model.DeploymentEntity) }
package main import ( "github.com/Shopify/sarama" "log" ) var config = sarama.NewConfig() var localKafka = []string{"127.0.0.1:9093"} func init() { config.Producer.RequiredAcks = sarama.WaitForAll config.Producer.Partitioner = sarama.NewRandomPartitioner config.Producer.Return.Successes = true } func main() { send() } func send() { msg := sarama.ProducerMessage{} msg.Topic = "my-topic" msg.Value = sarama.StringEncoder("this is a test") client, err := sarama.NewAsyncProducer(localKafka, config) if err != nil { log.Fatalf("connect failed") } defer client.Close() client.Input() <- &msg }
package main import ( "fmt" ) func captured() (i int) { i = 1 defer func(j int) { fmt.Println("captured defer:", j) }(i) i++ return } func pointer() (i int) { i = 1 defer func(j *int) { fmt.Println("pointer defer:", *j) }(&i) i++ return } func latest() (i int) { i = 1 defer func() { fmt.Println("latest defer:", i) }() i++ return } func main() { fmt.Println("captured:", captured()) fmt.Println("pointer:", pointer()) fmt.Println("latest:", latest()) }
package arima import ( "math" "github.com/DoOR-Team/goutils/log" mtx "github.com/DoOR-Team/timeseries_forecasting/arima/matrix" "github.com/DoOR-Team/timeseries_forecasting/arima/utils" ) const maxIterationForHannanRissanen = 5 type Solver struct { } func newSolver() *Solver { return &Solver{} } func forecastARMA(params Config, dataStationary []float64, startIndex int, endIndex int) []float64 { trainLen := startIndex totalLen := endIndex errors := make([]float64, totalLen) data := make([]float64, totalLen) // copy(dataStationary, data) copy(data, dataStationary) forecastLen := endIndex - startIndex forecasts := make([]float64, forecastLen) _dp := params.getDegreeP() _dq := params.getDegreeQ() startIdx := int(math.Max(float64(_dp), float64(_dq))) for j := 0; j < startIndex; j++ { errors[j] = 0 } // populate errors and forecasts for j := startIdx; j < trainLen; j++ { forecast := params.forecastOnePointARMA(data, errors, j) dataError := data[j] - forecast errors[j] = dataError } // now we can forecast for j := trainLen; j < totalLen; j++ { forecast := params.forecastOnePointARMA(data, errors, j) data[j] = forecast errors[j] = 0 forecasts[j-trainLen] = forecast } // return forecasted values return forecasts } func forecastARIMA(params Config, data []float64, forecastStartIndex int, forecastEndIndex int) *Result { if !checkARIMADataLength(params, data, forecastStartIndex, forecastEndIndex) { initialConditionSize := params.d + params.D*params.m log.Fatalf( "not enough data for ARIMA. needed at least %d, have %d, startindex=%d, endindex = %d", initialConditionSize, len(data), forecastStartIndex, forecastEndIndex) } forecast_length := forecastEndIndex - forecastStartIndex forecast := make([]float64, forecast_length) data_train := make([]float64, forecastStartIndex) // copy(data, data_train) copy(data_train, data) // DIFFERENTIATE hasSeasonalI := params.D > 0 && params.m > 0 hasNonSeasonalI := params.d > 0 data_stationary := differentiate(params, data_train, hasSeasonalI, hasNonSeasonalI) // currently un-centered // END OF DIFFERENTIATE // ========================================== // =========== CENTERING ==================== mean_stationary := utils.ComputeMean(data_stationary) utils.Shift(data_stationary, (-1)*mean_stationary) dataVariance := utils.ComputeVariance(data_stationary) // ========================================== // ========================================== // FORECAST forecast_stationary := forecastARMA(params, data_stationary, len(data_stationary), len(data_stationary)+forecast_length) data_forecast_stationary := make([]float64, len(data_stationary)+forecast_length) // copy(data_stationary, data_forecast_stationary) copy(data_forecast_stationary, data_stationary) // copy(forecast_stationary, data_forecast_stationary) copy(data_forecast_stationary[len(data_stationary):], forecast_stationary) // END OF FORECAST // ========================================== // =========== UN-CENTERING ================= utils.Shift(data_forecast_stationary, mean_stationary) // ========================================== // =========================================== // INTEGRATE forecast_merged := integrate(params, data_forecast_stationary, hasSeasonalI, hasNonSeasonalI) // END OF INTEGRATE // =========================================== copy(forecast, forecast_merged[forecastStartIndex:]) return NewResult(forecast, dataVariance) } func estimateARIMA(params Config, data []float64, forecastStartIndex int, forecastEndIndex int) *Model { if !checkARIMADataLength(params, data, forecastStartIndex, forecastEndIndex) { initialConditionSize := params.d + params.D*params.m log.Fatalf( "not enough data for ARIMA. needed at least %d, have %d, startindex=%d, endindex = %d", initialConditionSize, len(data), forecastStartIndex, forecastEndIndex) } forecast_length := forecastEndIndex - forecastStartIndex data_train := make([]float64, forecastStartIndex) // copy(data, data_train) copy(data_train, data) hasSeasonalI := params.D > 0 && params.m > 0 hasNonSeasonalI := params.d > 0 data_stationary := differentiate(params, data_train, hasSeasonalI, hasNonSeasonalI) // currently un-centered // END OF DIFFERENTIATE // ========================================== // =========== CENTERING ==================== mean_stationary := utils.ComputeMean(data_stationary) utils.Shift(data_stationary, (-1)*mean_stationary) // ========================================== // FORECAST estimateARMA(data_stationary, &params, forecast_length, maxIterationForHannanRissanen) return &Model{Params: params, data: data, trainDataSize: forecastStartIndex} } func differentiate(params Config, trainingData []float64, hasSeasonalI bool, hasNonSeasonalI bool) []float64 { var dataStationary []float64 // currently un-centered if hasSeasonalI && hasNonSeasonalI { params.differentiateSeasonal(trainingData) params.differentiateNonSeasonal(params.getLastDifferenceSeasonal()) dataStationary = params.getLastDifferenceNonSeasonal() } else if hasSeasonalI { params.differentiateSeasonal(trainingData) dataStationary = params.getLastDifferenceSeasonal() } else if hasNonSeasonalI { params.differentiateNonSeasonal(trainingData) dataStationary = params.getLastDifferenceNonSeasonal() } else { dataStationary = make([]float64, len(trainingData)) // copy(trainingData, dataStationary) copy(dataStationary, trainingData) } return dataStationary } func integrate(params Config, dataForecastStationary []float64, hasSeasonalI bool, hasNonSeasonalI bool) []float64 { var forecastMerged []float64 if hasSeasonalI && hasNonSeasonalI { params.getIntegrateSeasonal(dataForecastStationary) params.getIntegrateNonSeasonal(params.getLastIntegrateSeasonal()) forecastMerged = params.getLastIntegrateNonSeasonal() } else if hasSeasonalI { params.getIntegrateSeasonal(dataForecastStationary) forecastMerged = params.getLastIntegrateSeasonal() } else if hasNonSeasonalI { params.getIntegrateNonSeasonal(dataForecastStationary) forecastMerged = params.getLastIntegrateNonSeasonal() } else { forecastMerged = make([]float64, len(dataForecastStationary)) // copy(dataForecastStationary, forecastMerged) copy(forecastMerged, dataForecastStationary) } return forecastMerged } func computeRMSE(left []float64, right []float64, leftIndexOffset, startIndex, endIndex int) float64 { len_left := len(left) len_right := len(right) if startIndex >= endIndex || startIndex < 0 || len_right < endIndex || len_left+leftIndexOffset < 0 || len_left+leftIndexOffset < endIndex { log.Fatalf("invalid arguments: startIndex=%d, endIndex=%d, len_left=%d, len_right=%d, leftOffset=%d", startIndex, endIndex, len_left, len_right, leftIndexOffset) } square_sum := 0.0 for i := startIndex; i < endIndex; i++ { dataerror := left[i+leftIndexOffset] - right[i] square_sum += dataerror * dataerror } return math.Sqrt(square_sum / float64(endIndex-startIndex)) } func computeRMSEValidation(data []float64, testDataPercentage float64, params Config) float64 { testDataLength := int(float64(len(data)) * testDataPercentage) trainingDataEndIndex := len(data) - testDataLength result := estimateARIMA(params, data, trainingDataEndIndex, len(data)) forecast := result.forecast(testDataLength).GetForecast() return computeRMSE(data, forecast, trainingDataEndIndex, 0, len(forecast)) } func setSigma2AndPredicationInterval(params Config, forecastResult *Result, forecastSize int) float64 { coeffs_AR := params.getCurrentARCoefficients() coeffs_MA := params.getCurrentMACoefficients() return forecastResult.SetConfInterval(confidence_constant_95pct, getCumulativeSumOfCoeff( ARMAtoMA(coeffs_AR, coeffs_MA, forecastSize))) } func checkARIMADataLength(params Config, data []float64, startIndex, endIndex int) bool { result := true initialConditionSize := int(params.d + params.D*params.m) if len(data) < initialConditionSize || startIndex < initialConditionSize || endIndex <= startIndex { result = false } return result } /** * Hannan-Rissanen algorithm for estimating ARMA parameters */ func estimateARMA(data_orig []float64, params *Config, forecast_length, maxIteration int) { data := make([]float64, len(data_orig)) total_length := len(data) // copy(data_orig, data) copy(data, data_orig) r := int(math.Max(float64(1+params.getDegreeP()), float64(1+params.getDegreeQ()))) length := total_length - forecast_length size := length - r if length < (2 * r) { log.Fatalf("not enough data points: length= %d, r=", length, r) } // step 1: apply Yule-Walker method and estimate AR(r) model on input data errors := make([]float64, length) // yuleWalkerParams := applyYuleWalkerAndGetInitialErrors(data, r, length, errors) _ = applyYuleWalkerAndGetInitialErrors(data, r, length, errors) for j := 0; j < r; j++ { errors[j] = 0 } // step 2: iterate Least-Square fitting until the parameters converge // instantiate Z-matrix matrix := make([][]float64, params.getNumParamsP()+params.getNumParamsQ()) for i, _ := range matrix { matrix[i] = make([]float64, size) } bestRMSE := float64(-1) // initial value remainIteration := maxIteration var bestParams *mtx.InsightsVector for remainIteration >= 0 { estimatedParams := iterationStep(*params, data, errors, matrix, r, length, size) // originalParams := params.getParamsIntoVector() params.setParamsFromVector(estimatedParams) // forecast for validation data and compute RMSE forecasts := forecastARMA(*params, data, length, len(data)) anotherRMSE := computeRMSE(data, forecasts, length, 0, forecast_length) // update errors train_forecasts := forecastARMA(*params, data, r, len(data)) for j := 0; j < size; j++ { errors[j+r] = data[j+r] - train_forecasts[j] } if bestRMSE < 0 || anotherRMSE < bestRMSE { bestParams = estimatedParams bestRMSE = anotherRMSE } remainIteration-- } params.setParamsFromVector(bestParams) } func applyYuleWalkerAndGetInitialErrors(data []float64, r, length int, errors []float64) []float64 { yuleWalker := Fit(data, r) bsYuleWalker := NewBackShift(r, true) bsYuleWalker.initializeParams(false) // return array from YuleWalker is an array of size r whose // 0-th index element is lag 1 coefficient etc // hence shifting lag index by one and copy over to BackShift operator for j := 0; j < r; j++ { bsYuleWalker.setParam(j+1, yuleWalker[j]) } m := 0 // populate error array for m < r { errors[m] = 0 m++ } // initial r-elements are set to zero for m < length { // from then on, initial estimate of error terms are // Z_t = X_t - \phi_1 X_{t-1} - \cdots - \phi_r X_{t-r} errors[m] = data[m] - bsYuleWalker.getLinearCombinationFrom(data, m) m++ } return yuleWalker } func iterationStep( params Config, data []float64, errors []float64, matrix [][]float64, r, length, size int) *mtx.InsightsVector { rowIdx := 0 // copy over shifted timeseries data into matrix offsetsAR := params.getOffsetsAR() for _, pIdx := range offsetsAR { // copy(data[r-pIdx:], matrix[rowIdx][:size]) copy(matrix[rowIdx][:size], data[r-pIdx:]) rowIdx++ } // copy over shifted errors into matrix offsetsMA := params.getOffsetsMA() for _, qIdx := range offsetsMA { // copy(errors[r-qIdx:], matrix[rowIdx][:size]) copy(matrix[rowIdx][:size], errors[r-qIdx:]) rowIdx++ } // instantiate matrix to perform least squares algorithm zt := mtx.NewInsightsMatrixWithData(matrix, false) // instantiate target vector vector := make([]float64, size) // copy(data[r:], vector[:size]) copy(vector[:size], data[r:]) x := mtx.NewInsightVectorWithData(vector, false) // obtain least squares solution ztx := zt.TimesVector(x) ztz := zt.ComputeAAT() estimatedVector := ztz.SolveSPDIntoVector(ztx, maxConditionNumber) return estimatedVector }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package expression import ( "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" "go.uber.org/zap" ) // specialFoldHandler stores functions for special UDF to constant fold var specialFoldHandler = map[string]func(*ScalarFunction) (Expression, bool){} func init() { specialFoldHandler = map[string]func(*ScalarFunction) (Expression, bool){ ast.If: ifFoldHandler, ast.Ifnull: ifNullFoldHandler, ast.Case: caseWhenHandler, ast.IsNull: isNullHandler, } } // FoldConstant does constant folding optimization on an expression excluding deferred ones. func FoldConstant(expr Expression) Expression { e, _ := foldConstant(expr) // keep the original coercibility, charset, collation and repertoire values after folding e.SetCoercibility(expr.Coercibility()) charset, collate := expr.GetType().GetCharset(), expr.GetType().GetCollate() e.GetType().SetCharset(charset) e.GetType().SetCollate(collate) e.SetRepertoire(expr.Repertoire()) return e } func isNullHandler(expr *ScalarFunction) (Expression, bool) { arg0 := expr.GetArgs()[0] if constArg, isConst := arg0.(*Constant); isConst { isDeferredConst := constArg.DeferredExpr != nil || constArg.ParamMarker != nil value, err := expr.Eval(chunk.Row{}) if err != nil { // Failed to fold this expr to a constant, print the DEBUG log and // return the original expression to let the error to be evaluated // again, in that time, the error is returned to the client. logutil.BgLogger().Debug("fold expression to constant", zap.String("expression", expr.ExplainInfo()), zap.Error(err)) return expr, isDeferredConst } if isDeferredConst { return &Constant{Value: value, RetType: expr.RetType, DeferredExpr: expr}, true } return &Constant{Value: value, RetType: expr.RetType}, false } if mysql.HasNotNullFlag(arg0.GetType().GetFlag()) { return NewZero(), false } return expr, false } func ifFoldHandler(expr *ScalarFunction) (Expression, bool) { args := expr.GetArgs() foldedArg0, _ := foldConstant(args[0]) if constArg, isConst := foldedArg0.(*Constant); isConst { arg0, isNull0, err := constArg.EvalInt(expr.Function.getCtx(), chunk.Row{}) if err != nil { // Failed to fold this expr to a constant, print the DEBUG log and // return the original expression to let the error to be evaluated // again, in that time, the error is returned to the client. logutil.BgLogger().Debug("fold expression to constant", zap.String("expression", expr.ExplainInfo()), zap.Error(err)) return expr, false } if !isNull0 && arg0 != 0 { return foldConstant(args[1]) } return foldConstant(args[2]) } // if the condition is not const, which branch is unknown to run, so directly return. return expr, false } func ifNullFoldHandler(expr *ScalarFunction) (Expression, bool) { args := expr.GetArgs() foldedArg0, isDeferred := foldConstant(args[0]) if constArg, isConst := foldedArg0.(*Constant); isConst { // Only check constArg.Value here. Because deferred expression is // evaluated to constArg.Value after foldConstant(args[0]), it's not // needed to be checked. if constArg.Value.IsNull() { return foldConstant(args[1]) } return constArg, isDeferred } // if the condition is not const, which branch is unknown to run, so directly return. return expr, false } func caseWhenHandler(expr *ScalarFunction) (Expression, bool) { args, l := expr.GetArgs(), len(expr.GetArgs()) var isDeferred, isDeferredConst bool for i := 0; i < l-1; i += 2 { expr.GetArgs()[i], isDeferred = foldConstant(args[i]) isDeferredConst = isDeferredConst || isDeferred if _, isConst := expr.GetArgs()[i].(*Constant); !isConst { // for no-const, here should return directly, because the following branches are unknown to be run or not return expr, false } // If the condition is const and true, and the previous conditions // has no expr, then the folded execution body is returned, otherwise // the arguments of the casewhen are folded and replaced. val, isNull, err := args[i].EvalInt(expr.GetCtx(), chunk.Row{}) if err != nil { return expr, false } if val != 0 && !isNull { foldedExpr, isDeferred := foldConstant(args[i+1]) isDeferredConst = isDeferredConst || isDeferred if _, isConst := foldedExpr.(*Constant); isConst { foldedExpr.GetType().SetDecimal(expr.GetType().GetDecimal()) return foldedExpr, isDeferredConst } return foldedExpr, isDeferredConst } } // If the number of arguments in casewhen is odd, and the previous conditions // is false, then the folded else execution body is returned. otherwise // the execution body of the else are folded and replaced. if l%2 == 1 { foldedExpr, isDeferred := foldConstant(args[l-1]) isDeferredConst = isDeferredConst || isDeferred if _, isConst := foldedExpr.(*Constant); isConst { foldedExpr.GetType().SetDecimal(expr.GetType().GetDecimal()) return foldedExpr, isDeferredConst } return foldedExpr, isDeferredConst } return expr, isDeferredConst } func foldConstant(expr Expression) (Expression, bool) { switch x := expr.(type) { case *ScalarFunction: if _, ok := unFoldableFunctions[x.FuncName.L]; ok { return expr, false } if function := specialFoldHandler[x.FuncName.L]; function != nil && !MaybeOverOptimized4PlanCache(x.GetCtx(), []Expression{expr}) { return function(x) } args := x.GetArgs() sc := x.GetCtx().GetSessionVars().StmtCtx argIsConst := make([]bool, len(args)) hasNullArg := false allConstArg := true isDeferredConst := false for i := 0; i < len(args); i++ { switch x := args[i].(type) { case *Constant: isDeferredConst = isDeferredConst || x.DeferredExpr != nil || x.ParamMarker != nil argIsConst[i] = true hasNullArg = hasNullArg || x.Value.IsNull() default: allConstArg = false } } if !allConstArg { // try to optimize on the situation when not all arguments are const // for most functions, if one of the arguments are NULL, the result can be a constant (NULL or something else) // // NullEQ and ConcatWS are excluded, because they could have different value when the non-constant value is // 1 or NULL. For example, concat_ws(NULL, NULL) gives NULL, but concat_ws(1, NULL) gives '' if !hasNullArg || !sc.InNullRejectCheck || x.FuncName.L == ast.NullEQ || x.FuncName.L == ast.ConcatWS { return expr, isDeferredConst } constArgs := make([]Expression, len(args)) for i, arg := range args { if argIsConst[i] { constArgs[i] = arg } else { constArgs[i] = NewOne() } } dummyScalarFunc, err := NewFunctionBase(x.GetCtx(), x.FuncName.L, x.GetType(), constArgs...) if err != nil { return expr, isDeferredConst } value, err := dummyScalarFunc.Eval(chunk.Row{}) if err != nil { return expr, isDeferredConst } if value.IsNull() { // This Constant is created to compose the result expression of EvaluateExprWithNull when InNullRejectCheck // is true. We just check whether the result expression is null or false and then let it die. Basically, // the constant is used once briefly and will not be retained for a long time. Hence setting DeferredExpr // of Constant to nil is ok. return &Constant{Value: value, RetType: x.RetType}, false } if isTrue, err := value.ToBool(sc); err == nil && isTrue == 0 { // This Constant is created to compose the result expression of EvaluateExprWithNull when InNullRejectCheck // is true. We just check whether the result expression is null or false and then let it die. Basically, // the constant is used once briefly and will not be retained for a long time. Hence setting DeferredExpr // of Constant to nil is ok. return &Constant{Value: value, RetType: x.RetType}, false } return expr, isDeferredConst } value, err := x.Eval(chunk.Row{}) retType := x.RetType.Clone() if !hasNullArg { // set right not null flag for constant value switch value.Kind() { case types.KindNull: retType.DelFlag(mysql.NotNullFlag) default: retType.AddFlag(mysql.NotNullFlag) } } if err != nil { logutil.BgLogger().Debug("fold expression to constant", zap.String("expression", x.ExplainInfo()), zap.Error(err)) return expr, isDeferredConst } if isDeferredConst { return &Constant{Value: value, RetType: retType, DeferredExpr: x}, true } return &Constant{Value: value, RetType: retType}, false case *Constant: if x.ParamMarker != nil { return &Constant{ Value: x.ParamMarker.GetUserVar(), RetType: x.RetType, DeferredExpr: x.DeferredExpr, ParamMarker: x.ParamMarker, }, true } else if x.DeferredExpr != nil { value, err := x.DeferredExpr.Eval(chunk.Row{}) if err != nil { logutil.BgLogger().Debug("fold expression to constant", zap.String("expression", x.ExplainInfo()), zap.Error(err)) return expr, true } return &Constant{Value: value, RetType: x.RetType, DeferredExpr: x.DeferredExpr}, true } } return expr, false }
package handlers import ( "crypto/md5" "encoding/hex" "encoding/json" "net/http" "github.com/sirupsen/logrus" "github.com/vitorfhc/heimdall/auth" "github.com/vitorfhc/heimdall/gql" ) type loginJSON struct { Username string Password string } func AuthHandler(w http.ResponseWriter, req *http.Request) { logrus.WithFields(logrus.Fields{ "handler": "AuthHandler", "method": req.Method, "uri": req.RequestURI, }).Info("Request received") if req.Method != "POST" { w.WriteHeader(http.StatusMethodNotAllowed) _, err := w.Write([]byte("The authentication must be done through a POST")) if err != nil { logrus.WithField("handler", "AuthHandler").Error("Error writing response: ", err) } return } jsonDecoder := json.NewDecoder(req.Body) loginData := loginJSON{} err := jsonDecoder.Decode(&loginData) if err != nil { logrus.WithField("handler", "AuthHandler").Error("Error trying to decode JSON from request body: ", err) w.WriteHeader(http.StatusInternalServerError) return } employer := gql.GetEmployer(loginData.Username) hashBytes := md5.Sum([]byte(loginData.Password)) hash := hex.EncodeToString(hashBytes[:]) if employer.Password != hash { w.WriteHeader(http.StatusUnauthorized) return } tokenString, err := auth.GenerateToken(employer) if err != nil { logrus.WithField("handler", "AuthHandler").Error("Error trying to sign token: ", err) w.WriteHeader(http.StatusInternalServerError) return } http.SetCookie(w, &http.Cookie{ Name: "pipo-token", Value: tokenString, HttpOnly: true, // Uncomment below on production // Secure: true, // Domain: "domain.com", }) w.Header().Add("Content-Type", "application/json") w.WriteHeader(http.StatusOK) }
package main import ( "github.com/gin-contrib/sessions" "github.com/gin-gonic/gin" // "log" "fmt" "net/http" //"time" ) func mwAdmin() gin.HandlerFunc { return func(c *gin.Context) { c.Next() } } func mwIsUser() gin.HandlerFunc { return func(c *gin.Context) { session := sessions.Default(c) v := session.Get("user_id") fmt.Println("DEBUG:mwIsUser():", c.Request.URL.Path, "v:", v) if v != nil /*&& v.(int64) > 0*/ { fmt.Println("DEBUG:mwIsUser() Now user logged:v.(int64)= ", v) c.Redirect(http.StatusMovedPermanently, "/user/") c.Abort() } c.Next() } } func mwIsNotUser() gin.HandlerFunc { return func(c *gin.Context) { session := sessions.Default(c) v := session.Get("user_id") fmt.Println("DEBUG:mwIsNotUser()", c.Request.URL.Path, "v:", v) if v == nil /*&& v.(int64) > 0*/ { fmt.Println("DEBUG:mwIsNotUser() v=nil") c.Redirect(http.StatusMovedPermanently, "/auth/passwordrecover/") c.Abort() } c.Next() } } func confCORS(c *gin.Context) { // c.Header("server", WEBSERV_NAME) // Content-Security-Policy: // default-src 'self'; // connect-src 'self' https://sentry.prod.mozaws.net; // font-src 'self' https://addons.cdn.mozilla.net; // frame-src 'self' https://ic.paypal.com https://paypal.com // img-src 'self' data: blob: https://www.paypal.com https://ssl.google-analytics.com // media-src https://videos.cdn.mozilla.net; // object-src 'none'; // script-src 'self' https://addons.mozilla.org // style-src 'self' 'unsafe-inline' https://addons.cdn.mozilla.net; // report-uri /__cspreport__ c.Header("Content-Security-Policy", ` default-src 'self'; connect-src 'self'; font-src 'self' https://fonts.gstatic.com; frame-src 'self' https://www.google.com/recaptcha/ https://www.google.com/maps/; img-src 'self' https://lh3.googleusercontent.com/ https://images.unsplash.com data: blob: 'self' https://source.unsplash.com; object-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval'; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; `) if c.Request.Method == "OPTIONS" { if len(c.Request.Header["Access-Control-Request-Headers"]) > 0 { c.Header("Access-Control-Allow-Headers", c.Request.Header["Access-Control-Request-Headers"][0]) } c.AbortWithStatus(http.StatusOK) } } func mwCaptcha() gin.HandlerFunc { return func(c *gin.Context) { // var w http.ResponseWriter = c.Writer // var req *http.Request = c.Req // captcha.Server(captcha.StdWidth, captcha.StdHeight) // before request c.Next() // after request } }
// Copyright 2020 MongoDB Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "errors" "fmt" "log" "sort" "github.com/pelletier/go-toml" "github.com/spf13/afero" "github.com/spf13/viper" ) //go:generate mockgen -destination=../mocks/mock_profile.go -package=mocks github.com/mongodb/mongocli/internal/config Setter,Saver,SetSaver,Getter,Config type profile struct { name *string configDir string fs afero.Fs } func Properties() []string { return []string{ projectID, orgID, service, publicAPIKey, privateAPIKey, opsManagerURL, baseURL, opsManagerCACertificate, opsManagerSkipVerify, } } var p = newProfile() type Setter interface { Set(string, string) } type Saver interface { Save() error } type SetSaver interface { Setter Saver } type Getter interface { GetString(string) string } type Config interface { Setter Getter Saver Service() string PublicAPIKey() string PrivateAPIKey() string OpsManagerURL() string OpsManagerCACertificate() string OpsManagerSkipVerify() string } func Default() Config { return p } // List returns the names of available profiles func List() []string { m := viper.AllSettings() keys := make([]string, 0, len(m)) for k := range m { keys = append(keys, k) } // keys in maps are non deterministic, trying to give users a consistent output sort.Strings(keys) return keys } // Exists returns true if there are any set settings for the profile name. func Exists(name string) bool { return len(viper.GetStringMap(name)) > 0 } func newProfile() *profile { configDir, err := configHome() if err != nil { log.Fatal(err) } name := DefaultProfile np := &profile{ name: &name, configDir: configDir, fs: afero.NewOsFs(), } return np } func Name() string { return p.Name() } func (p *profile) Name() string { return *p.name } func SetName(name *string) { p.SetName(name) } func (p *profile) SetName(name *string) { p.name = name } func Set(name, value string) { p.Set(name, value) } func (p *profile) Set(name, value string) { viper.Set(fmt.Sprintf("%s.%s", *p.name, name), value) } func GetString(name string) string { return p.GetString(name) } func (p *profile) GetString(name string) string { if viper.IsSet(name) && viper.GetString(name) != "" { return viper.GetString(name) } if p.name != nil { return viper.GetString(fmt.Sprintf("%s.%s", *p.name, name)) } return "" } // Service get configured service func Service() string { return p.Service() } func (p *profile) Service() string { if viper.IsSet(service) { return viper.GetString(service) } serviceKey := fmt.Sprintf("%s.%s", *p.name, service) if viper.IsSet(serviceKey) { return viper.GetString(serviceKey) } return CloudService } // SetService set configured service func SetService(v string) { p.SetService(v) } func (p *profile) SetService(v string) { p.Set(service, v) } // PublicAPIKey get configured public api key func PublicAPIKey() string { return p.PublicAPIKey() } func (p *profile) PublicAPIKey() string { return p.GetString(publicAPIKey) } // SetPublicAPIKey set configured publicAPIKey func SetPublicAPIKey(v string) { p.SetPublicAPIKey(v) } func (p *profile) SetPublicAPIKey(v string) { p.Set(publicAPIKey, v) } // PrivateAPIKey get configured private api key func PrivateAPIKey() string { return p.PrivateAPIKey() } func (p *profile) PrivateAPIKey() string { return p.GetString(privateAPIKey) } // SetPrivateAPIKey set configured private api key func SetPrivateAPIKey(v string) { p.SetPrivateAPIKey(v) } func (p *profile) SetPrivateAPIKey(v string) { p.Set(privateAPIKey, v) } // OpsManagerURL get configured ops manager base url func OpsManagerURL() string { return p.OpsManagerURL() } func (p *profile) OpsManagerURL() string { return p.GetString(opsManagerURL) } // SetOpsManagerURL set configured ops manager base url func SetOpsManagerURL(v string) { p.SetOpsManagerURL(v) } func (p *profile) SetOpsManagerURL(v string) { p.Set(opsManagerURL, v) } // OpsManagerCACertificate get configured ops manager CA certificate location func OpsManagerCACertificate() string { return p.OpsManagerCACertificate() } func (p *profile) OpsManagerCACertificate() string { return p.GetString(opsManagerCACertificate) } // SkipVerify get configured ops manager CA certificate location func OpsManagerSkipVerify() string { return p.OpsManagerSkipVerify() } func (p *profile) OpsManagerSkipVerify() string { return p.GetString(opsManagerSkipVerify) } // ProjectID get configured project ID func ProjectID() string { return p.ProjectID() } func (p *profile) ProjectID() string { return p.GetString(projectID) } // SetProjectID sets the global project ID func SetProjectID(v string) { p.SetProjectID(v) } func (p *profile) SetProjectID(v string) { p.Set(projectID, v) } // OrgID get configured organization ID func OrgID() string { return p.OrgID() } func (p *profile) OrgID() string { return p.GetString(orgID) } // SetOrgID sets the global organization ID func SetOrgID(v string) { p.SetOrgID(v) } func (p *profile) SetOrgID(v string) { p.Set(orgID, v) } // IsAccessSet return true if API keys have been set up. // For Ops Manager we also check for the base URL. func IsAccessSet() bool { return p.IsAccessSet() } func (p *profile) IsAccessSet() bool { isSet := p.PublicAPIKey() != "" && p.PrivateAPIKey() != "" if p.Service() == OpsManagerService { isSet = isSet && p.OpsManagerURL() != "" } return isSet } // GetConfigDescription returns a map describing the configuration func GetConfigDescription() map[string]string { return p.GetConfigDescription() } func (p *profile) GetConfigDescription() map[string]string { settings := viper.GetStringMapString(p.Name()) newSettings := make(map[string]string, len(settings)) for k, v := range settings { if k == privateAPIKey || k == publicAPIKey { newSettings[k] = "redacted" } else { newSettings[k] = v } } return newSettings } // Delete deletes an existing configuration. The profiles are reloaded afterwards, as // this edits the file directly. func Delete() error { return p.Delete() } func (p *profile) Delete() error { // Configuration needs to be deleted from toml, as viper doesn't support this yet. // FIXME :: change when https://github.com/spf13/viper/pull/519 is merged. configurationAfterDelete := viper.AllSettings() t, err := toml.TreeFromMap(configurationAfterDelete) if err != nil { return err } // Delete from the toml manually err = t.Delete(p.Name()) if err != nil { return err } s := t.String() f, err := p.fs.OpenFile(fmt.Sprintf("%s/%s.toml", p.configDir, ToolName), fileFlags, 0600) if err != nil { return err } if _, err := f.WriteString(s); err != nil { return err } // Force reload, so that viper has the new configuration return p.Load(true) } // Rename replaces the profile to a new profile name, overwriting any profile that existed before. func Rename(newProfileName string) error { return p.Rename(newProfileName) } func (p *profile) Rename(newProfileName string) error { // Configuration needs to be deleted from toml, as viper doesn't support this yet. // FIXME :: change when https://github.com/spf13/viper/pull/519 is merged. configurationAfterDelete := viper.AllSettings() t, err := toml.TreeFromMap(configurationAfterDelete) if err != nil { return err } t.Set(newProfileName, t.Get(p.Name())) err = t.Delete(p.Name()) if err != nil { return err } s := t.String() f, err := p.fs.OpenFile(fmt.Sprintf("%s/%s.toml", p.configDir, ToolName), fileFlags, 0600) if err != nil { return err } if _, err := f.WriteString(s); err != nil { return err } // Force reload, so that viper has the new configuration return p.Load(true) } // Load loads the configuration from disk func Load() error { return p.Load(true) } func (p *profile) Load(readEnvironmentVars bool) error { viper.SetConfigType(configType) viper.SetConfigName(ToolName) viper.SetConfigPermissions(0600) viper.AddConfigPath(p.configDir) if readEnvironmentVars { viper.SetEnvPrefix(EnvPrefix) viper.AutomaticEnv() } // TODO: review why this is not working as expected viper.RegisterAlias(baseURL, opsManagerURL) // If a config file is found, read it in. if err := viper.ReadInConfig(); err != nil { // ignore if it doesn't exists var e viper.ConfigFileNotFoundError if errors.As(err, &e) { return nil } return err } return nil } // Save the configuration to disk func Save() error { return p.Save() } func (p *profile) Save() error { exists, err := afero.DirExists(p.fs, p.configDir) if err != nil { return err } if !exists { err := p.fs.MkdirAll(p.configDir, 0700) if err != nil { return err } } configFile := fmt.Sprintf("%s/%s.toml", p.configDir, ToolName) return viper.WriteConfigAs(configFile) }
package BLC import "fmt" // 查询余额 先用它去 查询 余额 func (cli *Cli) getBalance(address string) { fmt.Println("") blockchain := BlockChainObject() defer blockchain.DB.Close() amount := blockchain.GetBalance(address) fmt.Println(amount) }
package creator import ( "context" "regexp" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo/server/auth" "github.com/argoproj/argo/util/labels" "github.com/argoproj/argo/workflow/common" ) func Label(ctx context.Context, obj metav1.Object) { claims := auth.GetClaimSet(ctx) if claims != nil { value := regexp.MustCompile("[^-_.a-z0-9A-Z]").ReplaceAllString(claims.Sub, "-") if len(value) > 63 { value = value[len(value)-63:] } labels.Label(obj, common.LabelKeyCreator, value) } }
package main import ( "fmt" "io/ioutil" "itops/hpmsa_exporter/collector" "net/http" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/log" "github.com/prometheus/common/version" kingpin "gopkg.in/alecthomas/kingpin.v2" yaml "gopkg.in/yaml.v2" ) const ( //VERSION of the exporter VERSION = "0.2.0" ) var ( listenAddress = kingpin.Flag( "web.listen-address", "Address to listen on for web interface and telemetry.", ).Default(":9114").String() metricsPath = kingpin.Flag( "web.telemetry-path", "Path under which to expose metrics.", ).Default("/metrics").String() configPath = kingpin.Flag( "config.file", "Path to the config file.", ).Default("config.yml").String() ) func main() { log.AddFlags(kingpin.CommandLine) kingpin.Version(version.Print("hpmsa_exporter")) kingpin.HelpFlag.Short('h') kingpin.Parse() log.Infoln("Starting HPMSA exporter", version.Info()) log.Infoln("Build context", version.BuildContext()) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(`<html> <head><title>HPMSA Exporter</title></head> <body> <h1> Exporter</h1> <p><a href="` + *metricsPath + `">Metrics</a></p> </body> </html>`)) }) http.HandleFunc( *metricsPath, prometheus.InstrumentHandlerFunc("metrics", handler), ) log.Infoln("Listening on", *listenAddress) log.Fatal(http.ListenAndServe(*listenAddress, nil)) } func handler(w http.ResponseWriter, r *http.Request) { target := r.URL.Query()["target"] config := r.URL.Query()["config"] if len(config) > 0 { configPath = &config[0] } log.Debugln("collect query:", target) cfg, err := readCfg() if err != nil { log.Fatalf("Could not read config file: %v\n", err) } nc, err := collector.New(target[0], cfg.Username, cfg.Password, cfg.Metrics) if err != nil { log.Warnln("Couldn't create", err) w.WriteHeader(http.StatusBadRequest) w.Write([]byte(fmt.Sprintf("Couldn't create %s", err))) return } registry := prometheus.NewRegistry() err = registry.Register(nc) if err != nil { log.Errorln("Couldn't register collector:", err) w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(fmt.Sprintf("Couldn't register collector: %s", err))) return } gatherers := prometheus.Gatherers{ prometheus.DefaultGatherer, registry, } h := promhttp.HandlerFor(gatherers, promhttp.HandlerOpts{}) h.ServeHTTP(w, r) } type config struct { Username string `yaml:"username"` Password string `yaml:"password"` Metrics *collector.Metric `yaml:"metrics"` } func readCfg() (*config, error) { b, err := ioutil.ReadFile(*configPath) if err != nil { return nil, err } c := &config{} err = yaml.Unmarshal(b, c) if err != nil { return nil, err } return c, nil } func filter(filters map[string]bool, name string, flag bool) bool { if len(filters) > 0 { return flag && filters[name] } return flag } func init() { prometheus.MustRegister(version.NewCollector("hpmsa_exporter")) }
/* * @lc app=leetcode.cn id=1030 lang=golang * * [1030] 距离顺序排列矩阵单元格 */ package solution // @lc code=start // BFS func allCellsDistOrder(R int, C int, r0 int, c0 int) (ans [][]int) { dirs := [][]int{{-1, 0}, {1, 0}, {0, -1}, {0, 1}} // up, down, left, right q := [][]int{} q = append(q, []int{r0, c0}) visited := make([][]bool, R) for i := range visited { visited[i] = make([]bool, C) } visited[r0][c0] = true for len(q) > 0 { x, y := q[0][0], q[0][1] ans = append(ans, []int{x, y}) for _, dir := range dirs { newX, newY := x+dir[0], y+dir[1] if newX >= 0 && newX < R && newY >= 0 && newY < C && !visited[newX][newY] { q = append(q, []int{newX, newY}) visited[newX][newY] = true } } q = q[1:] } return ans } // @lc code=end
package cli import ( "bytes" "fmt" "os" "os/exec" "testing" "github.com/scaleway/scaleway-cli/pkg/commands" . "github.com/smartystreets/goconvey/convey" ) func testHelpOutput(out string, err string) { // headers & footers So(out, ShouldContainSubstring, "Usage: scw [OPTIONS] COMMAND [arg...]") So(out, ShouldContainSubstring, "Interact with Scaleway from the command line.") So(out, ShouldContainSubstring, "Run 'scw COMMAND --help' for more information on a command.") // options So(out, ShouldContainSubstring, "Options:") for _, option := range publicOptions { So(out, ShouldContainSubstring, " "+option) } // public commands So(out, ShouldContainSubstring, "Commands:") for _, command := range publicCommands { So(out, ShouldContainSubstring, " "+command) } // secret commands for _, command := range secretCommands { So(out, ShouldNotContainSubstring, " "+command) } // :lipstick: /* for _, line := range strings.Split(out, "\n") { So(line, shouldFitInTerminal) } */ // FIXME: count amount of options/commands, and panic if amount is different // Testing stderr So(err, ShouldEqual, "") } func testHelpCommandOutput(command string, out string, err string) { // Header So(out, ShouldContainSubstring, fmt.Sprintf("Usage: scw %s", command)) // FIXME: test description // FIXME: test parameters // Options So(out, ShouldContainSubstring, "Options:") So(out, ShouldContainSubstring, " -h, --help=false") // FIXME: test other options // Examples // FIXME: todo //So(out, ShouldContainSubstring, "Examples:") // :lipstick: /* for _, line := range strings.Split(out, "\n") { So(line, shouldFitInTerminal) } */ } func TestHelp(t *testing.T) { Convey("Testing golang' `start(\"help\", ...)`", t, func() { Convey("start(\"help\")", func() { stdout := bytes.Buffer{} stderr := bytes.Buffer{} streams := commands.Streams{ Stdin: os.Stdin, Stdout: &stdout, Stderr: &stderr, } ec, err := Start([]string{"help"}, &streams) So(ec, ShouldEqual, 0) So(err, ShouldBeNil) testHelpOutput(stdout.String(), stderr.String()) }) cmds := append(publicCommands, secretCommands...) for _, command := range cmds { // FIXME: test 'start(COMMAND, "--help")' if command == "help" { continue } Convey(fmt.Sprintf("start(\"help\", \"%s\")", command), func() { stdout := bytes.Buffer{} stderr := bytes.Buffer{} streams := commands.Streams{ Stdin: os.Stdin, Stdout: &stdout, Stderr: &stderr, } ec, err := Start([]string{"help", command}, &streams) So(ec, ShouldEqual, 1) So(err, ShouldBeNil) testHelpCommandOutput(command, stdout.String(), stderr.String()) // secondary help usage // FIXME: should check for 'scw login' first /* if command != "help" { // FIXME: this should works for "help" too secondaryStdout := bytes.Buffer{} secondaryStderr := bytes.Buffer{} secondaryStreams := commands.Streams{ Stdin: os.Stdin, Stdout: &secondaryStdout, Stderr: &secondaryStderr, } secondEc, secondErr := Start([]string{command, "--help"}, &secondaryStreams) So(ec, ShouldEqual, secondEc) //So(outStdout, ShouldEqual, secondOut) So(fmt.Sprintf("%v", err), ShouldEqual, fmt.Sprintf("%v", secondErr)) } */ }) } }) Convey("Testing shell' `scw help`", t, func() { Convey("scw help", func() { cmd := exec.Command(scwcli, "help") out, ec, err := runCommandWithOutput(cmd) stderr := "" // FIXME: get real stderr // exit code So(ec, ShouldEqual, 0) So(err, ShouldBeNil) // streams testHelpOutput(out, stderr) }) cmds := append(publicCommands, secretCommands...) for _, command := range cmds { // FIXME: test 'scw COMMAND --help' Convey(fmt.Sprintf("scw help %s", command), func() { cmd := exec.Command(scwcli, "help", command) out, ec, err := runCommandWithOutput(cmd) stderr := "" // FIXME: get real stderr // exit code So(ec, ShouldEqual, 1) So(fmt.Sprintf("%s", err), ShouldEqual, "exit status 1") // streams testHelpCommandOutput(command, out, stderr) // secondary help usage // FIXME: should check for 'scw login' first /* if command != "help" { // FIXME: this should works for "help" too secondCmd := exec.Command(scwcli, command, "--help") secondOut, secondEc, secondErr := runCommandWithOutput(secondCmd) So(out, ShouldEqual, secondOut) So(ec, ShouldEqual, secondEc) So(fmt.Sprintf("%v", err), ShouldEqual, fmt.Sprintf("%v", secondErr)) } */ }) } Convey("Unknown command", func() { cmd := exec.Command(scwcli, "boogie") out, ec, err := runCommandWithOutput(cmd) So(out, ShouldContainSubstring, "scw: unknown subcommand boogie") So(ec, ShouldEqual, 1) So(fmt.Sprintf("%s", err), ShouldEqual, "exit status 1") }) }) }
package main import ( "flag" "fmt" "os" ) func usage() { pf := func(format string, a ...interface{}) { fmt.Fprintf(flag.CommandLine.Output(), format, a) } pln := func(s string) { fmt.Fprintln(flag.CommandLine.Output(), s) } pf("Usage of %s [options] <filename>\n\n", os.Args[0]) pln("Options:\n") flag.PrintDefaults() pln("") pln(" Read more about date and time layouts here: https://golang.org/pkg/time/#Parse") pln("") pln("File format:\n") pln(" Each non-empty line of the input file specified by <filename> represents an entry.") pln(" An entry consists of a date and a duration, separated by a space.") pln(" The duration is either an absolute value or a time range.") pln(" Absolute value: 6h12m0s") pln(" Time range: 0810-1422") pln("") pln(" Special case:") pln(" The last entry of a file can be an open range (e.g. `0930-`) and trk") pln(" will assume you are still working, using the current time to calculate") pln(" the duration.") pln("") pln(" Comments:") pln(" Everything following a number sign (#) on the same line is ignored.") pln("") pln(" Examples:") pln(" 19-9-25 0950-1830") pln(" 19-9-26 8h # took a day off") pln(" 19-9-27 1015-") pln("") pln("Output:\n") pln(" Output consists of seven columns:\n") pln(" Date\tDate of the entry") pln(" From\tStart time of the entry") pln(" To\tEnd time of the entry") pln(" Dur.\tDuration of the entry") pln(" Day\tHow much you worked that day (see special case above)") pln(" Week\tHow much you worked that week") pln(" Total\tIn combination with -weekly option, keeps a running") pln(" \ttotal of your overtime") pln("") }
package concurrent import ( "container/list" "sync" ) /* 安全的链表队列 */ type LinkedQueue struct { store *list.List mutex *sync.Mutex } /* 创建队列 */ func NewLinkedQueue() *LinkedQueue { return &LinkedQueue{store: list.New()} } /* 压入队列 */ func (q *LinkedQueue) Push(e interface{}) error { q.mutex.Lock() defer q.mutex.Unlock() q.store.PushBack(e) return nil } /* 弹出元素 */ func (q *LinkedQueue) Pop() interface{} { q.mutex.Lock() defer q.mutex.Unlock() return q.store.Front().Value } /* 队列大小 */ func (q *LinkedQueue) Size() int64 { return int64(q.store.Len()) }
package Problem0483 import ( "math" "strconv" ) func smallestGoodBase(n string) string { num, _ := strconv.ParseUint(n, 10, 64) // num = k^m + k^(m-1) + ... + k + 1 // 想要找到最小的 k // 可知 k 变小的时候,m 会变大 // k 最小可以是 2,即是 二进制 // k == 2 时,m == mMax mMax := int(math.Log2(float64(num))) // 从 mMax 开始往下检查,对应的 k 能否满足题意 for m := mMax; m >= 1; m-- { // k^m < num = k^m + k^(m-1) + ... + k + 1 // 展开 (k+1)^m 后,可知 // (k+1)^m > num = k^m + k^(m-1) + ... + k + 1 // 综上所示 // k^m < num < (k+1)^m,即 // k < num^(1/m) < k+1,即 // k == int(num^(1/m)) k := uint64(math.Pow(float64(num), 1.0/float64(m))) // 这样就确定了 k 的取值,只需要验证 k 是否满足题意即可 if isFound(num, k, m) { return strconv.FormatUint(k, 10) } } return strconv.FormatUint(num-1, 10) } // 返回 num == k^m + k^(m-1) + ... + k + 1 func isFound(num, k uint64, m int) bool { sum := uint64(1) a := uint64(1) for i := 1; i <= m; i++ { a *= k sum += a } return sum == num }
package uikit import ( "github.com/maxence-charriere/go-app/v7/pkg/app" ) // UIAccordionItem is a component type UIAccordionItem interface { app.UI // Class adds a CSS class to the section. Class(c string) UIAccordionItem // Title defines and styles the toggle for accordion item Title(v string) UIAccordionItem // Content sets the main content for accordion item. Content(elems ...app.UI) UIAccordionItem } type accordionItem struct { app.Compo Iclass string Ititle string Icontent []app.UI } func (a *accordionItem) Class(c string) UIAccordionItem { if a.Iclass != "" { a.Iclass += " " } a.Iclass += c return a } func (a *accordionItem) Title(v string) UIAccordionItem { a.Ititle = v return a } func (a *accordionItem) Content(elems ...app.UI) UIAccordionItem { a.Icontent = app.FilterUIElems(elems...) return a } // AccordionItem returns a UI Accordion Item func AccordionItem() UIAccordionItem { return &accordionItem{} } func (a *accordionItem) Render() app.UI { return app.Li(). Class(a.Iclass). Body( app.A(). Class("uk-accordion-title"). Href("#"). // TODO fix this Text(a.Ititle), app.Div(). Class("uk-accordion-content"). Body( a.Icontent..., ), ) }
package bytemap type valuesIF interface { get(idx int) interface{} } type interfaceValues []interface{} func (iv interfaceValues) get(idx int) interface{} { return iv[idx] } type floatValues []float64 func (fv floatValues) get(idx int) interface{} { return fv[idx] }
package kvraft import ( "../labgob" "../labrpc" "log" "../raft" "sync" "sync/atomic" "bytes" "time" ) const Debug = 0 func DPrintf(format string, a ...interface{}) (n int, err error) { if Debug > 0 { log.Printf(format, a...) } return } const ( GET = "Get" PUT = "Put" APPEND = "Append" ) // 具体操作内容 type Op struct { Method string Key string Value string ClientId int64 MsgId int64 RequestSeq int64 } // KVServer: 对数据应用操作;感知重复的 client 请求 type KVServer struct { mu sync.Mutex me int rf *raft.Raft applyCh chan raft.ApplyMsg dead int32 // set by Kill() maxraftstate int // snapshot if log grows this big data map[string]string persister *raft.Persister lastApplied map[int64]int64 // 最近一次的应用记录 notifyData map[int64]chan NotifyMsg } // 发送任务给 raft func (kv *KVServer) SendOpToRaft(op Op) (res NotifyMsg) { waitTimer := time.NewTimer(WaitTimeout) defer waitTimer.Stop() _, _, isLeader := kv.rf.Start(op) if !isLeader { DPrintf("%v isn't leader, fail start ...\n", kv.me) res.Err = ErrWrongLeader return } kv.mu.Lock() ch := make(chan NotifyMsg) kv.notifyData[op.RequestSeq] = ch kv.mu.Unlock() select { case res =<- ch: DPrintf("%v finish apply %v ...\n", kv.me, op) kv.mu.Lock() delete(kv.notifyData, op.RequestSeq) kv.mu.Unlock() return case <- waitTimer.C: DPrintf("%v wait timeout...\n", kv.me) kv.mu.Lock() delete(kv.notifyData, op.RequestSeq) kv.mu.Unlock() res.Err = ErrTimeout return } } // 更新 clerks 提交的日志 // 若请求不连续,则不更新 func (kv *KVServer) Get(args *GetArgs, reply *GetReply) { DPrintf("%v receive Get : %v from %v \n", kv.me, args.Key, args.ClerkId) _, isLeader := kv.rf.GetState() if !isLeader { reply.Err = ErrWrongLeader reply.IsLeader = false return } operation := Op{ Method: GET, Key: args.Key, ClientId: args.ClerkId, MsgId: args.MsgId, RequestSeq: nrand(), } res := kv.SendOpToRaft(operation) reply.Err = res.Err if res.Err != ErrWrongLeader { reply.IsLeader = false } // todo: 当 kvserver 不属于多数服务器,则不应该完成 Get() } func (kv *KVServer) PutAppend(args *PutAppendArgs, reply *PutAppendReply) { DPrintf("%v receive PutAppend : %v from %v \n", kv.me, args.Key, args.ClerkId) // 仅读取操作必须是 leader operation := Op{ Method: args.Op, Key: args.Key, Value: args.Value, ClientId: args.ClerkId, MsgId: args.MsgId, RequestSeq: nrand(), } res := kv.SendOpToRaft(operation) reply.Err = res.Err } func (kv *KVServer) WaitApplyCh() { DPrintf("%v is waiting for applyCh ... \n", kv.me) for msg := range kv.applyCh { if !msg.CommandValid { kv.mu.Lock() kv.ApplySnapshot(kv.persister.ReadSnapshot()) kv.mu.Unlock() continue } operation := msg.Command.(Op) var repeated bool if lastMsgId, ok := kv.lastApplied[operation.ClientId]; ok { if lastMsgId == operation.MsgId { DPrintf("%v get receive repeated msg ... \n", kv.me) repeated = true } } else { repeated = false } kv.mu.Lock() if !repeated { if operation.Method == PUT { kv.data[operation.Key] = operation.Value } else if operation.Method == APPEND { kv.data[operation.Key] += operation.Value } } kv.SaveSnapshot(msg.CommandIndex) if ch, ok := kv.notifyData[operation.RequestSeq]; ok { ch <- NotifyMsg{ Err: OK, Value: kv.data[operation.Key], } } kv.mu.Unlock() } } // // the tester calls Kill() when a KVServer instance won't // be needed again. for your convenience, we supply // code to set rf.dead (without needing a lock), // and a killed() method to test rf.dead in // long-running loops. you can also add your own // code to Kill(). you're not required to do anything // about this, but it may be convenient (for example) // to suppress debug output from a Kill()ed instance. // func (kv *KVServer) Kill() { atomic.StoreInt32(&kv.dead, 1) kv.rf.Kill() // Your code here, if desired. } func (kv *KVServer) killed() bool { z := atomic.LoadInt32(&kv.dead) return z == 1 } // // 关于快照 Lab 3b // func (kv *KVServer) SaveSnapshot(idx int) { if kv.persister.RaftStateSize() < kv.maxraftstate || kv.maxraftstate == -1 { return } DPrintf("%v start saving snapshot, raft size is %v", kv.me, kv.persister.RaftStateSize()) w := new(bytes.Buffer) e := labgob.NewEncoder(w) if err := e.Encode(kv.data); err != nil { panic(err) } if err := e.Encode(kv.lastApplied); err != nil { panic(err) } kv.rf.DiscardPreviousLog(idx, w.Bytes()) } func (kv *KVServer) ApplySnapshot(data []byte) { if len(data) < 1 || data == nil { return } DPrintf("%v start reading snapshot ...", kv.me) r := bytes.NewBuffer(data) d := labgob.NewDecoder(r) var lastApplied map[int64]int64 var kvdata map[string]string if d.Decode(&kvdata) != nil || d.Decode(&lastApplied) != nil { panic("read snapshot err") } else { kv.lastApplied = lastApplied kv.data = kvdata } } // // servers[] contains the ports of the set of // servers that will cooperate via Raft to // form the fault-tolerant key/value service. // me is the index of the current server in servers[]. // the k/v server should store snapshots through the underlying Raft // implementation, which should call persister.SaveStateAndSnapshot() to // atomically save the Raft state along with the snapshot. // the k/v server should snapshot when Raft's saved state exceeds maxraftstate bytes, // in order to allow Raft to garbage-collect its log. if maxraftstate is -1, // you don't need to snapshot. // StartKVServer() must return quickly, so it should start goroutines // for any long-running work. // func StartKVServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister, maxraftstate int) *KVServer { // call labgob.Register on structures you want // Go's RPC library to marshall/unmarshall. labgob.Register(Op{}) kv := new(KVServer) kv.me = me kv.maxraftstate = maxraftstate kv.applyCh = make(chan raft.ApplyMsg) kv.rf = raft.Make(servers, me, persister, kv.applyCh) kv.dead = 0 kv.data = make(map[string]string) kv.persister = persister kv.lastApplied = map[int64]int64{} kv.notifyData = map[int64]chan NotifyMsg{} DPrintf("reading snapshot ...") kv.ApplySnapshot(kv.persister.ReadSnapshot()) go kv.WaitApplyCh() return kv }
package log import ( "bytes" "io" "strings" "testing" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" ) func TestLog(t *testing.T) { f := &bytes.Buffer{} SetLogJSON(false) SetOutput(f) Printf("hello %v", "everyone") if !strings.HasSuffix(f.String(), "hello everyone\n") { t.Fatal("fail") } } func TestLogJSON(t *testing.T) { SetLogJSON(true) Build("") type tcase struct { level int format string args string ops func(...interface{}) fops func(string, ...interface{}) expMsg string expLvl zapcore.Level } fn := func(tc tcase) func(*testing.T) { return func(t *testing.T) { observedZapCore, observedLogs := observer.New(zap.DebugLevel) Set(zap.New(observedZapCore).Sugar()) SetLevel(tc.level) if tc.format != "" { tc.fops(tc.format, tc.args) } else { tc.ops(tc.args) } if observedLogs.Len() < 1 { t.Fatal("fail") } allLogs := observedLogs.All() if allLogs[0].Message != tc.expMsg { t.Fatal("fail") } if allLogs[0].Level != tc.expLvl { t.Fatal("fail") } } } tests := map[string]tcase{ "Print": { level: 1, args: "Print json logger", ops: func(args ...interface{}) { Print(args...) }, expMsg: "Print json logger", expLvl: zapcore.InfoLevel, }, "Printf": { level: 1, format: "Printf json %v", args: "logger", fops: func(format string, args ...interface{}) { Printf(format, args...) }, expMsg: "Printf json logger", expLvl: zapcore.InfoLevel, }, "Info": { level: 1, args: "Info json logger", ops: func(args ...interface{}) { Info(args...) }, expMsg: "Info json logger", expLvl: zapcore.InfoLevel, }, "Infof": { level: 1, format: "Infof json %v", args: "logger", fops: func(format string, args ...interface{}) { Infof(format, args...) }, expMsg: "Infof json logger", expLvl: zapcore.InfoLevel, }, "Debug": { level: 3, args: "Debug json logger", ops: func(args ...interface{}) { Debug(args...) }, expMsg: "Debug json logger", expLvl: zapcore.DebugLevel, }, "Debugf": { level: 3, format: "Debugf json %v", args: "logger", fops: func(format string, args ...interface{}) { Debugf(format, args...) }, expMsg: "Debugf json logger", expLvl: zapcore.DebugLevel, }, "Warn": { level: 2, args: "Warn json logger", ops: func(args ...interface{}) { Warn(args...) }, expMsg: "Warn json logger", expLvl: zapcore.WarnLevel, }, "Warnf": { level: 2, format: "Warnf json %v", args: "logger", fops: func(format string, args ...interface{}) { Warnf(format, args...) }, expMsg: "Warnf json logger", expLvl: zapcore.WarnLevel, }, "Error": { level: 1, args: "Error json logger", ops: func(args ...interface{}) { Error(args...) }, expMsg: "Error json logger", expLvl: zapcore.ErrorLevel, }, "Errorf": { level: 1, format: "Errorf json %v", args: "logger", fops: func(format string, args ...interface{}) { Errorf(format, args...) }, expMsg: "Errorf json logger", expLvl: zapcore.ErrorLevel, }, "Http": { level: 1, args: "Http json logger", ops: func(args ...interface{}) { HTTP(args...) }, expMsg: "Http json logger", expLvl: zapcore.InfoLevel, }, "Httpf": { level: 1, format: "Httpf json %v", args: "logger", fops: func(format string, args ...interface{}) { HTTPf(format, args...) }, expMsg: "Httpf json logger", expLvl: zapcore.InfoLevel, }, } for name, tc := range tests { t.Run(name, fn(tc)) } } func BenchmarkLogPrintf(t *testing.B) { SetLogJSON(false) SetLevel(1) SetOutput(io.Discard) t.ResetTimer() for i := 0; i < t.N; i++ { Printf("X %s", "Y") } } func BenchmarkLogJSONPrintf(t *testing.B) { SetLogJSON(true) SetLevel(1) ec := zap.NewProductionEncoderConfig() ec.EncodeDuration = zapcore.NanosDurationEncoder ec.EncodeTime = zapcore.EpochNanosTimeEncoder enc := zapcore.NewJSONEncoder(ec) logger := zap.New( zapcore.NewCore( enc, zapcore.AddSync(io.Discard), zap.DebugLevel, )).Sugar() Set(logger) t.ResetTimer() for i := 0; i < t.N; i++ { Printf("X %s", "Y") } }
package streamdal import ( "encoding/json" "fmt" "github.com/pkg/errors" ) // DestinationOutput is used for displaying destinations as a table type DestinationOutput struct { Name string `json:"name" header:"Name"` ID string `json:"id" header:"Destination ID"` Type string `json:"type" header:"Type"` Archived bool `json:"archived" header:"Is Archived"` } var ( errDestinationsFailed = errors.New("unable to get list of destinations") errNoDestinations = errors.New("you have no destinations") errCreateDestinationFailed = errors.New("failed to create destination") ) // ListDestinations lists all of an account's replay destinations func (b *Streamdal) ListDestinations() error { output, err := b.listDestinations() if err != nil { return err } b.Printer(output) return nil } func (b *Streamdal) listDestinations() ([]DestinationOutput, error) { res, _, err := b.Get("/v1/destination", nil) if err != nil { return nil, errDestinationsFailed } output := make([]DestinationOutput, 0) err = json.Unmarshal(res, &output) if err != nil { return nil, errDestinationsFailed } if len(output) == 0 { return nil, errNoDestinations } return output, nil } func (b *Streamdal) createDestination(dstType string) (*DestinationOutput, error) { p := map[string]interface{}{ "type": b.Opts.Streamdal.Create.Destination.XApiDestinationType, "name": b.Opts.Streamdal.Create.Destination.Name, "notes": b.Opts.Streamdal.Create.Destination.Notes, "metadata": b.getDestinationMetadata(dstType), } res, code, err := b.Post("/v1/destination", p) if err != nil { return nil, errCreateDestinationFailed } if code > 299 { errResponse := &BlunderErrorResponse{} if err := json.Unmarshal(res, errResponse); err != nil { return nil, errCreateDestinationFailed } for _, e := range errResponse.Errors { err := fmt.Errorf("%s: '%s' %s", errCreateDestinationFailed, e.Field, e.Message) b.Log.Error(err) } return nil, fmt.Errorf("received a non-200 response (%d) from API", code) } createdDestination := &DestinationOutput{} if err := json.Unmarshal(res, createdDestination); err != nil { return nil, errCreateCollectionFailed } return createdDestination, nil } func (b *Streamdal) CreateDestination(dstType string) error { apiDestinationType, err := convertDestinationType(dstType) if err != nil { return errors.Wrap(err, "unable to convert destination type") } b.Opts.Streamdal.Create.Destination.XApiDestinationType = apiDestinationType destination, err := b.createDestination(dstType) if err != nil { return err } b.Log.Infof("Created %s destination %s!\n", b.Opts.Streamdal.Create.Destination.XApiDestinationType, destination.ID) return nil } func convertDestinationType(dstType string) (string, error) { switch dstType { case "kafka": return "kafka", nil case "http": return "http", nil case "aws-sqs": return "sqs", nil case "rabbit": return "rmq", nil default: return "", fmt.Errorf("unrecognized destination type '%s'", dstType) } } func (b *Streamdal) getDestinationMetadata(destType string) map[string]interface{} { switch destType { case "kafka": return b.getDestinationMetadataKafka() case "http": return b.getDestinationMetadataHTTP() case "aws-sqs": return b.getDestinationMetadataSQS() case "rabbit": return b.getDestinationMetadataRabbitMQ() } return nil } func (b *Streamdal) getDestinationMetadataKafka() map[string]interface{} { return map[string]interface{}{ "topic": b.Opts.Streamdal.Create.Destination.Kafka.Args.Topics[0], "address": b.Opts.Streamdal.Create.Destination.Kafka.XConn.Address, "use_tls": b.Opts.Streamdal.Create.Destination.Kafka.XConn.UseTls, "insecure_tls": b.Opts.Streamdal.Create.Destination.Kafka.XConn.TlsSkipVerify, "sasl_type": b.Opts.Streamdal.Create.Destination.Kafka.XConn.SaslType, "username": b.Opts.Streamdal.Create.Destination.Kafka.XConn.SaslUsername, "password": b.Opts.Streamdal.Create.Destination.Kafka.XConn.SaslPassword, } } func (b *Streamdal) getDestinationMetadataHTTP() map[string]interface{} { headers := make([]map[string]string, 0) for k, v := range b.Opts.Streamdal.Create.Destination.Http.Headers { headers = append(headers, map[string]string{k: v}) } return map[string]interface{}{ "url": b.Opts.Streamdal.Create.Destination.Http.Url, "headers": headers, } } func (b *Streamdal) getDestinationMetadataSQS() map[string]interface{} { return map[string]interface{}{ "aws_account_id": b.Opts.Streamdal.Create.Destination.AwsSqs.Args.RemoteAccountId, "queue_name": b.Opts.Streamdal.Create.Destination.AwsSqs.Args.QueueName, } } func (b *Streamdal) getDestinationMetadataRabbitMQ() map[string]interface{} { return map[string]interface{}{ "dsn": b.Opts.Streamdal.Create.Destination.Rabbit.XConn.Address, "exchange": b.Opts.Streamdal.Create.Destination.Rabbit.Args.ExchangeName, "routing_key": b.Opts.Streamdal.Create.Destination.Rabbit.Args.RoutingKey, "exchange_type": b.Opts.Streamdal.Create.Destination.Rabbit.Args.ExchangeType, "exchange_declare": b.Opts.Streamdal.Create.Destination.Rabbit.Args.ExchangeDeclare, "exchange_durable": b.Opts.Streamdal.Create.Destination.Rabbit.Args.ExchangeDurable, "exchange_auto_delete": b.Opts.Streamdal.Create.Destination.Rabbit.Args.ExchangeAutoDelete, } }
package main import ( "fmt" "io/ioutil" "log" "os" "time" "github.com/pandada8/logd/lib/common" "github.com/pandada8/logd/lib/dumper" "github.com/DataDog/zstd" "github.com/go-redis/redis" "github.com/spf13/viper" ) type DumperBridge struct { redis *redis.Client redisCluster *redis.ClusterClient isCluster bool limit int worker *common.Worker dumpers map[string]*dumper.Dumper } func NewDumperBridge() *DumperBridge { var ( isCluster bool redisClient *redis.Client redisClusterClient *redis.ClusterClient ) switch viper.Get("redis").(type) { case string: isCluster = false redisClient = redis.NewClient(&redis.Options{ Addr: viper.GetString("redis"), Password: "", DB: 0, MaxRetries: 2, }) err := redisClient.Ping().Err() if err != nil { log.Println("failed to ping redis") } case []interface{}: isCluster = true redisClusterClient = redis.NewClusterClient(&redis.ClusterOptions{ Addrs: viper.GetStringSlice("redis"), Password: "", MaxRetries: 2, }) err := redisClusterClient.Ping().Err() if err != nil { log.Println("failed to ping redis cluster") } } return &DumperBridge{ isCluster: isCluster, redis: redisClient, redisCluster: redisClusterClient, limit: viper.GetInt("limit"), worker: common.NewWorker(viper.GetInt("dumper_concurrency")), } } func (d *DumperBridge) Count(key string) (int, error) { var ( num int64 err error ) if d.isCluster { num, err = d.redisCluster.LLen(key).Result() } else { num, err = d.redis.LLen(key).Result() } return int(num), err } func (d *DumperBridge) Range(key string, start int, end int) []string { var ( result []string err error ) if d.isCluster { result, err = d.redisCluster.LRange(key, int64(start), int64(end)).Result() } else { result, err = d.redis.LRange(key, int64(start), int64(end)).Result() } if err != nil { return []string{} } return result } func (d *DumperBridge) GenerateFile(key string) (string, error) { var ( dumped int ) stepSize := viper.GetInt("step_size") file, err := ioutil.TempFile("", "dumper") defer file.Close() defer func() { if err != nil { os.Remove(file.Name()) } }() if err != nil { return "", err } zfile := zstd.NewWriterLevel(file, viper.GetInt("compress_level")) defer zfile.Close() for dumped = 0; dumped < d.limit; { result := d.Range(key, dumped, dumped+stepSize) for _, line := range result { zfile.Write([]byte(line + "\n")) } dumped += len(result) } if d.isCluster { err = d.redisCluster.LTrim(key, int64(dumped), -1).Err() } else { err = d.redis.LTrim(key, int64(dumped), -1).Err() } if err != nil { return "", err } return file.Name(), nil } func (d *DumperBridge) DumpKey(key string) (err error) { d.worker.Run() defer d.worker.Done() count, _ := d.Count(key) if count <= d.limit { return } for i := count / d.limit; i > 0; i-- { newName := fmt.Sprintf("%s.%d.json.zstd", key, time.Now().UnixNano()) log.Printf("dumping %s", newName) dumped, err := d.GenerateFile(key) defer os.Remove(dumped) if err != nil { log.Println("???", err) } err = (*d.dumpers[key]).HandleFile(dumped, newName) if err != nil { log.Println("???", err) } } return } func (d *DumperBridge) ShouldDump() []string { ret := []string{} for key, _ := range d.dumpers { num, err := d.Count(key) if err != nil { log.Printf("Error when checking, given up: %s", err) break } if num > d.limit { ret = append(ret, key) } } return ret } func (d *DumperBridge) LoadDumpers() { d.dumpers = map[string]*dumper.Dumper{} output := viper.Get("output").([]interface{}) for n, i := range output { cfg := i.(map[interface{}]interface{}) name := cfg["name"].(string) dtype := cfg["type"].(string) dumper := dumper.GetDumper(dtype, cfg) if n == 0 { d.dumpers["default"] = &dumper } d.dumpers[name] = &dumper } } func (d *DumperBridge) Start() { d.LoadDumpers() ctlChan := ctlSig.Recv() var ctl string defer func() { ctlSig.Clean(ctlChan) }() go func() { for { ctl = <-*ctlChan } }() t := time.Now() waitTime := 1 * time.Second for { t = time.Now() for key, _ := range d.dumpers { go d.DumpKey(key) } d.worker.Wait() if ctl == "quit" { return } // update the checkInterval dynamically if time.Now().Sub(t) < waitTime { time.Sleep(waitTime - time.Now().Sub(t)) } } }
package isogen import ( "context" "fmt" "os" "path/filepath" "strings" "opendev.org/airship/airshipctl/pkg/bootstrap/cloudinit" "opendev.org/airship/airshipctl/pkg/container" "opendev.org/airship/airshipctl/pkg/document" "opendev.org/airship/airshipctl/pkg/errors" "opendev.org/airship/airshipctl/pkg/log" "opendev.org/airship/airshipctl/pkg/util" "sigs.k8s.io/kustomize/v3/pkg/fs" ) const ( builderConfigFileName = "builder-conf.yaml" ) // GenerateBootstrapIso will generate data for cloud init and start ISO builder container func GenerateBootstrapIso(settings *Settings, args []string) error { if settings.IsogenConfigFile == "" { log.Print("Reading config file location from global settings is not supported") return errors.ErrNotImplemented{} } ctx := context.Background() cfg := &Config{} if err := util.ReadYAMLFile(settings.IsogenConfigFile, &cfg); err != nil { return err } if err := verifyInputs(cfg, args); err != nil { return err } docBundle, err := document.NewBundle(fs.MakeRealFS(), args[0], "") if err != nil { return err } log.Print("Creating ISO builder container") builder, err := container.NewContainer( &ctx, cfg.Container.ContainerRuntime, cfg.Container.Image) if err != nil { return err } err = generateBootstrapIso(docBundle, builder, cfg, settings.Debug) if err != nil { return err } log.Print("Checking artifacts") return verifyArtifacts(cfg) } func verifyInputs(cfg *Config, args []string) error { if len(args) == 0 { log.Print("Specify path to document model. Config param from global settings is not supported") return errors.ErrNotImplemented{} } if cfg.Container.Volume == "" { log.Print("Specify volume bind for ISO builder container") return errors.ErrWrongConfig{} } if (cfg.Builder.UserDataFileName == "") || (cfg.Builder.NetworkConfigFileName == "") { log.Print("UserDataFileName or NetworkConfigFileName are not specified in ISO builder config") return errors.ErrWrongConfig{} } vols := strings.Split(cfg.Container.Volume, ":") switch { case len(vols) == 1: cfg.Container.Volume = fmt.Sprintf("%s:%s", vols[0], vols[0]) case len(vols) > 2: log.Print("Bad container volume format. Use hostPath:contPath") return errors.ErrWrongConfig{} } return nil } func getContainerCfg(cfg *Config, userData []byte, netConf []byte) (map[string][]byte, error) { hostVol := strings.Split(cfg.Container.Volume, ":")[0] fls := make(map[string][]byte) fls[filepath.Join(hostVol, cfg.Builder.UserDataFileName)] = userData fls[filepath.Join(hostVol, cfg.Builder.NetworkConfigFileName)] = netConf builderData, err := cfg.ToYAML() if err != nil { return nil, err } fls[filepath.Join(hostVol, builderConfigFileName)] = builderData return fls, nil } func verifyArtifacts(cfg *Config) error { hostVol := strings.Split(cfg.Container.Volume, ":")[0] metadataPath := filepath.Join(hostVol, cfg.Builder.OutputMetadataFileName) _, err := os.Stat(metadataPath) return err } func generateBootstrapIso( docBubdle document.Bundle, builder container.Container, cfg *Config, debug bool, ) error { cntVol := strings.Split(cfg.Container.Volume, ":")[1] log.Print("Creating cloud-init for ephemeral K8s") userData, netConf, err := cloudinit.GetCloudData(docBubdle, EphemeralClusterAnnotation) if err != nil { return err } var fls map[string][]byte fls, err = getContainerCfg(cfg, userData, netConf) if err = util.WriteFiles(fls, 0600); err != nil { return err } vols := []string{cfg.Container.Volume} builderCfgLocation := filepath.Join(cntVol, builderConfigFileName) log.Printf("Running default container command. Mounted dir: %s", vols) if err := builder.RunCommand( []string{}, nil, vols, []string{fmt.Sprintf("BUILDER_CONFIG=%s", builderCfgLocation)}, debug, ); err != nil { return err } log.Print("ISO successfully built.") if !debug { log.Print("Removing container.") return builder.RmContainer() } log.Debugf("Debug flag is set. Container %s stopped but not deleted.", builder.GetId()) return nil }
package internal var ( // PostgresVersionKey is the query's store key used to set the postgres server version. PostgresVersionKey = pgversion{} // IncrementorKey is the scope's context key used to save current incrementor value. IncrementorKey = incrementorKey{} ) type pgversion struct{} type incrementorKey struct{}
package main import ( "fmt" "os" "bufio" "strconv" ) func main() { f, _ := os.Open("input.txt") defer f.Close() scanner := bufio.NewScanner(f) freqChanges := make([]int, 0) freqMap := make(map[int]int) for scanner.Scan() { change, _ := strconv.Atoi(scanner.Text()) freqChanges = append(freqChanges, change) } var frequency int = 0 for { for i := 0; i < len(freqChanges); i++ { frequency += freqChanges[i] freqMap[frequency]++ if freqMap[frequency] == 2 { fmt.Println(frequency) return } } } }
package database import ( "database/sql" "fmt" "log" ) func Connect(databasename string) *sql.DB { db, err := sql.Open("mysql", "root:pass@(localhost:3306)/"+databasename) if err != nil { log.Fatal(err) } fmt.Println("connected") return db }
package main import ( "log" "net" "os" "os/signal" "sync" "syscall" "time" ) // support for reloading configuration without restarting Redwood var configRequests = make(chan chan *config) // getConfig returns the current configuration. func getConfig() *config { ch := make(chan *config) configRequests <- ch return <-ch } var listenerChan = make(chan net.Listener) var activeConnections sync.WaitGroup // manageConfig manages Redwood's configuration, reloading it when SIGHUP is received. func manageConfig() { conf, err := loadConfiguration() if err != nil { log.Fatal(err) } hupChan := make(chan os.Signal, 1) signal.Notify(hupChan, syscall.SIGHUP) termChan := make(chan os.Signal, 1) signal.Notify(termChan, syscall.SIGTERM) accessLog := NewCSVLog(conf.AccessLog) tlsLog := NewCSVLog(conf.TLSLog) conf.startWebServer() // listeners is a list of all currently-open network listeners. var listeners []net.Listener for { select { case req := <-configRequests: req <- conf case data := <-accessLogChan: accessLog.Log(data) case data := <-tlsLogChan: tlsLog.Log(data) case <-hupChan: log.Println("Received SIGHUP") newConf, err := loadConfiguration() if err != nil { log.Println("Error reloading configuration:", err) break } conf = newConf accessLog.Close() tlsLog.Close() accessLog = NewCSVLog(conf.AccessLog) tlsLog = NewCSVLog(conf.TLSLog) conf.startWebServer() case l := <-listenerChan: listeners = append(listeners, l) case <-termChan: log.Println("Received SIGTERM") for _, l := range listeners { l.Close() } if conf.PIDFile != "" { os.Remove(conf.PIDFile) } go func() { // Allow 20 seconds for active connections to finish. time.Sleep(20 * time.Second) os.Exit(0) }() // Or exit when all active connections have finished. activeConnections.Wait() os.Exit(0) } } }
package main import "fmt" func main1001() { //append() //copy() var s []int = []int{1,2,3,4,5} //s1 := make([]int,5) s1 := []int{6,7,8,9} copy(s,s1) fmt.Println(s1) //使用copy进行拷贝 在内存中存储两个独立的切片内容 如果任意一个发生修改 不会影响到另一个 fmt.Printf("%p\n",s) fmt.Printf("%p\n",s1) s1[2] = 123 fmt.Println(s) fmt.Println(s1) s = append(s,6,7,8,9) fmt.Println(s) fmt.Println(s1) } func main() { var arr []int = []int{8,7,5,6,4,2,3,1,9} //外层控制行 for i:=0;i<len(arr)-1 ; i++ { //内层控制列 for j:=0;j<len(arr)-1-i ; j++ { //满足条件 进行交换 大于 升序 小于 降序 if arr[j]<arr[j+1] { //交换数据 arr[j],arr[j+1] = arr[j+1],arr[j] } } } fmt.Println(arr) }
package common import "errors" var ( // Database Related Error ErrNoConnectionProvider = errors.New(ErrorMessageNoConnectionProvider) ErrNoTransactionFunction = errors.New(ErrorMessageNoTransactionFunction) ErrNotExist = errors.New(ErrorMessageNotExist) ErrAlreadyExist = errors.New(ErrorMessageAlreadyExist) ErrInvalidParameter = errors.New(ErrorMessageInvalidParameter) ErrInvalidStatus = errors.New(ErrorMessageInvalidStatus) ErrInternalError = errors.New(ErrorMessageInternalError) ErrNoPrivileges = errors.New(ErrorMessageNoPrivileges) ErrRefreshTokenExpired = errors.New(ErrorMessageRefreshTokenExpired) ErrTokenExpired = errors.New(ErrorMessageTokenExpired) ErrNoDeviceInformation = errors.New(ErrorMessageNoDeviceInformation) ErrLimitExceed = errors.New(ErrorMessageLimitExceed) )
package ratelimiters import ( "sync" "time" "github.com/corverroos/ratelimit" ) // Coffee rate limiter is a WIP. func NewCoffee(period time.Duration, limit int) *Coffee{ return &Coffee{ period: period, limit: limit, nowFunc: time.Now, mm: newMapMutex(), counts: map[string]burst{}, } } type burst struct { t time.Time count int } type Coffee struct { period time.Duration limit int counts map[string]burst mm *mapMutex nowFunc func() time.Time } type mapMutex struct { locks map[string]chan struct{} mu sync.Mutex } func newMapMutex() *mapMutex{ return &mapMutex{ locks: make(map[string]chan struct{}), } } func (m *mapMutex) lock(res string) { m.mu.Lock() if _, ok := m.locks[res]; !ok { m.locks[res] = make(chan struct{}, 1) m.mu.Unlock() return } m.mu.Unlock() select { case <- m.locks[res]: } } func (m *mapMutex) unlock(res string) { m.locks[res] <- struct{}{} } func (c *Coffee) Request(resource string) bool { c.mm.lock(resource) defer c.mm.unlock(resource) i, ok := c.counts[resource] if !ok || i.t != c.nowFunc().Truncate(c.period){ c.counts[resource] = burst{ t: c.nowFunc().Truncate(c.period), count: 1, } return true } c.counts[resource] = burst{ t: i.t, count: i.count+1, } return i.count+1 <= c.limit } var _ ratelimit.RateLimiter = (*Coffee)(nil)
// Package bootstrap provides a cluster-destroyer for Bootstrap node package bootstrap
package main import ( "log" "net/http" "os" "text/template" "github.com/gorilla/sessions" _ "github.com/lib/pq" ) var tmpl *template.Template var ( port = ":80" certFilePath = "" keyFilePath = "" appDir = "" logFile = os.File{} key = []byte("087736079f8d9e4c7fc7b642bb4c7afa") store = sessions.NewCookieStore(key) // datetime form layout dateSettingsLayout = "Monday 02 January 2006" dtLayout = "02.01.2006 - 15:04" dateLayout = "02.01.2006" dbTimeLayout = "2006-01-02 15:04:00" ) func logging(f http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { log.Printf("%s [INFO] %s: %s\n", r.RemoteAddr, r.URL.Path, r.Method) f(w, r) } } func init() { data, err := getCmdLineArgs(os.Args) if !err.Empty() { log.Fatalln(err) } // Setup log file if val, ok := data["logfile"]; ok { logFile, err := os.OpenFile(val, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { log.Fatalf("[FATAL] init(): Error opening log file: %s\n%s\n", data["logfile"], err) } log.SetOutput(logFile) } // Set up database db = dbInit(data["dbhost"], data["dbuser"], data["dbpassword"], data["dbdatabase"], data["dbport"]) // Set app dir and load templates appDir = data["app_dir"] tmpl = template.Must(template.ParseGlob(appDir + "/templates/*")) // Set port if val, ok := data["port"]; ok { port = ":" + val } // Setup SSL if val, ok := data["certfile"]; ok { certFilePath = val } if val, ok := data["keyfile"]; ok { keyFilePath = val } // Check if API access exists for this application apiAccess, err := GetLocalAPIKeys(db) if !err.Empty() { log.Fatalf("[FATAL] init(): Error while getting api local keys:\n%s\n", err) } if len(apiAccess) == 0 { a := API{ Name: "Accounting Master Key", Active: true, AccessRights: GetAllAccessRights(), LocalKey: true, } a.GenerateAPIKey() if err = a.Create(db); !err.Empty() { log.Fatalf("[FATAL] init(): Error while creating master api key:\n%s\n", err) } } } func main() { defer db.Close() defer logFile.Close() http.Handle( "/static/", http.StripPrefix("/static/", http.FileServer(http.Dir(appDir+"/static/")), ), ) var api APIHandler http.Handle("/api/", api) // General http.HandleFunc("/", logging(handleRoot)) http.HandleFunc("/settings/", logging(handleSettings)) http.HandleFunc("/settings/api/", logging(handleAPISettingsOverview)) http.HandleFunc("/settings/api/form/", logging(handleAPISettings)) http.HandleFunc("/login/", logging(handleLogin)) http.HandleFunc("/logout/", logging(handleLogout)) // Accounts http.HandleFunc("/accounts/", logging(handleAccountOverview)) http.HandleFunc("/accounts/form/", logging(handleAccountForm)) // Transactions http.HandleFunc("/transactions/", logging(handleTransactionOverview)) http.HandleFunc("/transactions/form/", logging(handleTransactionForm)) http.HandleFunc("/transactions/delete/{id}/", logging(handleTransactionDeletion)) // Statistics http.HandleFunc("/statistics/", logging(handleStatisticsOverview)) // Categories http.HandleFunc("/categories/", logging(handleCategoryOverview)) if certFilePath != "" && keyFilePath != "" { log.Fatalln(http.ListenAndServeTLS(port, certFilePath, keyFilePath, nil)) } else { log.Fatalln(http.ListenAndServe(port, nil)) } }
package main import ( "github.com/gin-gonic/gin" "flag" "os" "net/http" "os/exec" "time" "fmt" ) func main() { port := flag.String("port","","默认监听端口8080, 设置监听端口示例:\r\n\t./monitor -port 9999\r\n") flag.String("启动监控","","参数n:name 生成报告的文件名\r\n\t参数t:time 监控时长,单位分钟\r\n\tget示例:http://192.168.x.x:8080/start?n=test&t=30\r\n") flag.String("杀掉所有监控任务","","get示例:http://192.168.x.x:8080/stop\r\n") flag.String("查看报告","","get示例:http://192.168.x.x:8080/report\r\n") flag.String("退出程序","","get示例:http://192.168.x.x:8080/close\r\n") flag.Parse() gin.SetMode(gin.ReleaseMode) r := gin.Default() //浏览报告 r.StaticFS("/report",http.Dir("./report/")) r.GET("/start", func(c *gin.Context) {//格式 ?n=name&t=time 其中&后可为空 默认30分钟 name := c.DefaultQuery("n", "name") //取name值 time := c.DefaultQuery("t", "30") //取执行时间,单位分钟 lsCmd := exec.Command("/bin/sh", "-c", "./nmonCTL.sh "+name+" "+time) go func(){ err := lsCmd.Run() if err!=nil{ fmt.Println(err) } }() c.JSON(200, gin.H{ "message": string("已执行"+name+"场景监控,持续时间"+time+"分钟"), }) }) r.GET("/close", func(c *gin.Context) { c.JSON(200, gin.H{ "message": "结束程序!", }) go func() { time.Sleep(time.Second * 2) os.Exit(0) }() }) r.GET("/stop", func(c *gin.Context) { lsCmd := exec.Command("/bin/sh", "-c", "ps -ef|grep nmon|grep -v grep|awk {'print $2'}|xargs kill -9") err := lsCmd.Start() if err!=nil{ fmt.Println(err) } c.JSON(200, gin.H{ "message": "已结束所有监听任务!", }) }) sport := ":" sport += *port if *port==""{ sport +="8080" } r.Run(sport) // listen and serve on 0.0.0.0:8080 }
package app import ( "log" "regexp" ) var pattern *regexp.Regexp func init() { var err error pattern, err = regexp.Compile("[^a-zA-z0-9]+") if err != nil { log.Fatal(err) } } func StripURL(url string, result chan string) { result <- pattern.ReplaceAllString(url, "") }
package main import ( "fmt" "testing" ) func TestIsLongPressedName(t *testing.T) { ans := IsLongPressedName("alex", "aaleex") fmt.Println(ans) }
package users import ( "github.com/jakewitcher/pos-server/graph/model" "strconv" ) type UserEntity struct { Id int64 `json:"id"` EmployeeId int64 `json:"employee_id"` Username string `json:"username"` Password string `json:"password"` } func (u *UserEntity) ToDTO() *model.User { return &model.User{ ID: strconv.FormatInt(u.Id, 10), EmployeeID: strconv.FormatInt(u.EmployeeId, 10), Username: u.Username, } }
package remote import ( "fmt" "reflect" "regexp" ) func Format(str string, data interface{}) string { var par = map[string]string{} var val = reflect.ValueOf(data) val = reflect.Indirect(val) switch val.Kind() { case reflect.Map: for _, k := range val.MapKeys() { var v = val.MapIndex(k) par[fmt.Sprint(k.Interface())] = fmt.Sprint(v.Interface()) } case reflect.Array, reflect.Slice: for i := 0; i != val.Len(); i++ { var v = val.Index(i) par[fmt.Sprint(i)] = fmt.Sprint(v.Interface()) } case reflect.Struct: var typ = val.Type() for i := 0; i != typ.NumField(); i++ { var f = typ.Field(i) var k = f.Name var v = val.FieldByName(k) if v.CanInterface() { par[k] = fmt.Sprint(v.Interface()) } } } return regexp.MustCompile(`({[\w\d]+})`).ReplaceAllStringFunc(str, func(s string) string { var d, ok = par[s[1:len(s)-1]] if ok { return d } return s }) }
package ciolite // Api functions that support: users/email_accounts/folders/messages import ( "bytes" "encoding/json" "fmt" "net/url" ) // GetUserEmailAccountsFolderMessageParams query values data struct. // Optional: Delimiter, IncludeBody, BodyType, IncludeHeaders, IncludeFlags, // and (for GetUserEmailAccountsFolderMessages only) Limit, Offset. type GetUserEmailAccountsFolderMessageParams struct { // Optional: Delimiter string `json:"delimiter,omitempty"` BodyType string `json:"body_type,omitempty"` IncludeBody bool `json:"include_body,omitempty"` IncludeFlags bool `json:"include_flags,omitempty"` // IncludeHeaders can be "0", "1", or "raw" IncludeHeaders string `json:"include_headers,omitempty"` // Optional for GetUserEmailAccountsFolderMessages (not used by GetUserEmailAccountFolderMessage): Limit int `json:"limit,omitempty"` Offset int `json:"offset,omitempty"` } // GetUsersEmailAccountFolderMessagesResponse data struct type GetUsersEmailAccountFolderMessagesResponse struct { MessageID string `json:"message_id,omitempty"` Subject string `json:"subject,omitempty"` InReplyTo string `json:"in_reply_to,omitempty"` ResourceURL string `json:"resource_url,omitempty"` Folders []string `json:"folders,omitempty"` References []string `json:"references,omitempty"` ReceivedHeaders []string `json:"received_headers,omitempty"` ListHeaders ListHeaders `json:"list_headers,omitempty"` Addresses GetUsersEmailAccountFolderMessageAddresses `json:"addresses,omitempty"` PersonInfo PersonInfo `json:"person_info,omitempty"` Attachments []UsersEmailAccountFolderMessageAttachment `json:"attachments,omitempty"` Bodies []UsersEmailAccountFolderMessageBody `json:"bodies,omitempty"` SentAt int `json:"sent_at,omitempty"` ReceivedAt int `json:"received_at,omitempty"` } // UsersEmailAccountFolderMessageAttachment embedded data struct within GetUsersEmailAccountFolderMessagesResponse type UsersEmailAccountFolderMessageAttachment struct { Type string `json:"type,omitempty"` FileName string `json:"file_name,omitempty"` BodySection string `json:"body_section,omitempty"` ContentDisposition string `json:"content_disposition,omitempty"` MessageID string `json:"message_id,omitempty"` XAttachmentID string `json:"x_attachment_id,omitempty"` Size int `json:"size,omitempty"` AttachmentID int `json:"attachment_id,omitempty"` } // UsersEmailAccountFolderMessageBody embedded data struct within GetUsersEmailAccountFolderMessagesResponse type UsersEmailAccountFolderMessageBody struct { BodySection string `json:"body_section,omitempty"` Type string `json:"type,omitempty"` Encoding string `json:"encoding,omitempty"` Content string `json:"content,omitempty"` Size int `json:"size,omitempty"` } // ListHeaders embedded data struct within GetUsersEmailAccountFolderMessagesResponse type ListHeaders map[string]string // UnmarshalJSON is here because the empty state is an array in the json, and is a object/map when populated func (m *ListHeaders) UnmarshalJSON(b []byte) error { if bytes.Equal([]byte(`[]`), b) { // its the empty array, set an empty map *m = make(map[string]string) return nil } mp := make(map[string]string) err := json.Unmarshal(b, &mp) if err != nil { return err } *m = mp return nil } // PersonInfo embedded data struct within GetUsersEmailAccountFolderMessagesResponse and WebhookMessageData type PersonInfo map[string]map[string]string // UnmarshalJSON is here because the empty state is an array in the json, and is a object/map when populated func (m *PersonInfo) UnmarshalJSON(b []byte) error { if bytes.Equal([]byte(`[]`), b) { // its the empty array, set an empty map *m = make(map[string]map[string]string) return nil } mp := make(map[string]map[string]string) err := json.Unmarshal(b, &mp) if err != nil { return err } *m = mp return nil } // Address embedded data struct within GetUsersEmailAccountFolderMessageAddresses and WebhookMessageDataAddresses type Address struct { Email string `json:"email,omitempty"` Name string `json:"name,omitempty"` } // UnmarshalJSON is here because the empty state is an array in the json, and is a object/map when populated func (m *Address) UnmarshalJSON(b []byte) error { if bytes.Equal([]byte(`[]`), b) { // its the empty array, set an empty struct *m = Address{} return nil } // avoid recursion type addressTemp Address var tmp addressTemp if err := json.Unmarshal(b, &tmp); err != nil { return err } *m = Address(tmp) return nil } // GetUsersEmailAccountFolderMessageAddresses data struct within GetUsersEmailAccountFolderMessagesResponse type GetUsersEmailAccountFolderMessageAddresses struct { From []Address `json:"from,omitempty"` To []Address `json:"to,omitempty"` Cc []Address `json:"cc,omitempty"` Bcc []Address `json:"bcc,omitempty"` Sender []Address `json:"sender,omitempty"` ReplyTo []Address `json:"reply_to,omitempty"` } // UnmarshalJSON is here because the empty state is an array in the json, and is a object/map when populated func (m *GetUsersEmailAccountFolderMessageAddresses) UnmarshalJSON(b []byte) error { if bytes.Equal([]byte(`[]`), b) { // its the empty array, set an empty struct *m = GetUsersEmailAccountFolderMessageAddresses{} return nil } // avoid recursion type getUsersEmailAccountFolderMessageAddressesTemp GetUsersEmailAccountFolderMessageAddresses var tmp getUsersEmailAccountFolderMessageAddressesTemp if err := json.Unmarshal(b, &tmp); err != nil { return err } *m = GetUsersEmailAccountFolderMessageAddresses(tmp) return nil } // MoveUserEmailAccountFolderMessageParams form values data struct. // Requires: NewFolderID, and may optionally contain Delimiter. type MoveUserEmailAccountFolderMessageParams struct { // Required: NewFolderID string `json:"new_folder_id"` // Optional: Delimiter string `json:"delimiter,omitempty"` } // MoveUserEmailAccountFolderMessageResponse data struct type MoveUserEmailAccountFolderMessageResponse struct { Success bool `json:"success,omitempty"` } // GetUserEmailAccountsFolderMessages gets listings of email messages for a user. // queryValues may optionally contain Delimiter, IncludeBody, BodyType, // IncludeHeaders, IncludeFlags, Limit, Offset func (cioLite CioLite) GetUserEmailAccountsFolderMessages(userID string, label string, folder string, queryValues GetUserEmailAccountsFolderMessageParams) ([]GetUsersEmailAccountFolderMessagesResponse, error) { // Make request request := clientRequest{ Method: "GET", Path: fmt.Sprintf("/lite/users/%s/email_accounts/%s/folders/%s/messages", userID, label, url.QueryEscape(folder)), QueryValues: queryValues, UserID: userID, AccountLabel: label, } // Make response var response []GetUsersEmailAccountFolderMessagesResponse // Request err := cioLite.doFormRequest(request, &response) return response, err } // GetUserEmailAccountFolderMessage gets file, contact and other information about a given email message. // queryValues may optionally contain Delimiter, IncludeBody, BodyType, IncludeHeaders, IncludeFlags func (cioLite CioLite) GetUserEmailAccountFolderMessage(userID string, label string, folder string, messageID string, queryValues GetUserEmailAccountsFolderMessageParams) (GetUsersEmailAccountFolderMessagesResponse, error) { // Make request request := clientRequest{ Method: "GET", Path: fmt.Sprintf("/lite/users/%s/email_accounts/%s/folders/%s/messages/%s", userID, label, url.QueryEscape(folder), url.QueryEscape(messageID)), QueryValues: queryValues, UserID: userID, AccountLabel: label, } // Make response var response GetUsersEmailAccountFolderMessagesResponse // Request err := cioLite.doFormRequest(request, &response) return response, err } // MoveUserEmailAccountFolderMessage moves a message. // formValues requires NewFolderID, and may optionally contain Delimiter func (cioLite CioLite) MoveUserEmailAccountFolderMessage(userID string, label string, folder string, messageID string, queryValues MoveUserEmailAccountFolderMessageParams) (MoveUserEmailAccountFolderMessageResponse, error) { // Make request request := clientRequest{ Method: "PUT", Path: fmt.Sprintf("/lite/users/%s/email_accounts/%s/folders/%s/messages/%s", userID, label, url.QueryEscape(folder), url.QueryEscape(messageID)), QueryValues: queryValues, UserID: userID, AccountLabel: label, } // Make response var response MoveUserEmailAccountFolderMessageResponse // Request err := cioLite.doFormRequest(request, &response) return response, err }
package store import ( "fmt" "github.com/go-redis/redis" ) type Redis struct { conf *Config } func (s *Redis) Get() *redis.Client { redisOnce.Do(func() { redisClient = redis.NewClient(&redis.Options{ Addr: fmt.Sprintf("%s:%d", s.conf.Get().Redis.Host, s.conf.Get().Redis.Port), Password: s.conf.Get().Redis.Pass, DB: 0, }) }) return redisClient } func (s *Redis) GetSendEmailKey(email, from string) string { return fmt.Sprintf("class:email_%s:%s", from, email) }
package domain import ( "time" "github.com/gofrs/uuid" ) type FarmCreated struct { UID uuid.UUID Name string Type string Latitude string Longitude string Country string City string IsActive bool CreatedDate time.Time } type FarmNameChanged struct { FarmUID uuid.UUID Name string } type FarmTypeChanged struct { FarmUID uuid.UUID Type string } type FarmGeolocationChanged struct { FarmUID uuid.UUID Latitude string Longitude string } type FarmRegionChanged struct { FarmUID uuid.UUID Country string City string }
// Copyright 2020 The Kubernetes Authors. // SPDX-License-Identifier: Apache-2.0 package v1alpha1 import ( "k8s.io/apimachinery/pkg/runtime" "regexp" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "kmodules.xyz/client-go/apiextensions" "x-helm.dev/apimachinery/apis/shared" "x-helm.dev/apimachinery/crds" ) const ( ResourceKindAppRelease = "AppRelease" ResourceAppRelease = "apprelease" ResourceAppReleases = "appreleases" ) // Descriptor defines the Metadata and informations about the AppRelease. type Descriptor struct { // Type is the type of the appRelease (e.g. WordPress, MySQL, Cassandra). Type string `json:"type,omitempty"` // Version is an optional version indicator for the AppRelease. Version string `json:"version,omitempty"` // Description is a brief string description of the AppRelease. Description string `json:"description,omitempty"` // Icons is an optional list of icons for an appRelease. Icon information includes the source, size, // and mime type. Icons []shared.ImageSpec `json:"icons,omitempty"` // Maintainers is an optional list of maintainers of the appRelease. The maintainers in this list maintain the // the source code, images, and package for the appRelease. Maintainers []shared.ContactData `json:"maintainers,omitempty"` // Owners is an optional list of the owners of the installed appRelease. The owners of the appRelease should be // contacted in the event of a planned or unplanned disruption affecting the appRelease. Owners []shared.ContactData `json:"owners,omitempty"` // Keywords is an optional list of key words associated with the appRelease (e.g. MySQL, RDBMS, database). Keywords []string `json:"keywords,omitempty"` // Links are a list of descriptive URLs intended to be used to surface additional documentation, dashboards, etc. Links []shared.Link `json:"links,omitempty"` // Notes contain a human readable snippets intended as a quick start for the users of the AppRelease. // CommonMark markdown syntax may be used for rich text representation. Notes string `json:"notes,omitempty"` } // AppReleaseSpec defines the specification for an AppRelease. type AppReleaseSpec struct { // Descriptor regroups information and metadata about an appRelease. Descriptor Descriptor `json:"descriptor,omitempty"` // Release regroups information and metadata about a Helm release. Release ReleaseInfo `json:"release,omitempty"` // Components is a list of Kinds for AppRelease's components (e.g. Deployments, Pods, Services, CRDs). It // can be used in conjunction with the AppRelease's Selector to list or watch the AppReleases components. Components []metav1.GroupVersionKind `json:"components,omitempty"` // Selector is a label query over kinds that created by the appRelease. It must match the component objects' labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors Selector *metav1.LabelSelector `json:"selector,omitempty"` Editor *metav1.GroupVersionResource `json:"editor,omitempty"` // +optional ResourceKeys []string `json:"resourceKeys,omitempty"` // +optional FormKeys []string `json:"formKeys,omitempty"` } type ReleaseInfo struct { Name string `json:"name"` Version string `json:"version,omitempty"` Status string `json:"status,omitempty"` FirstDeployed *metav1.Time `json:"firstDeployed,omitempty"` LastDeployed *metav1.Time `json:"lastDeployed,omitempty"` ModifiedAt *metav1.Time `json:"modified-at,omitempty"` Form *runtime.RawExtension `json:"form,omitempty"` } // ComponentList is a generic status holder for the top level resource type ComponentList struct { // Object status array for all matching objects Objects []ObjectStatus `json:"components,omitempty"` } // ObjectStatus is a generic status holder for objects type ObjectStatus struct { // Link to object Link string `json:"link,omitempty"` // Name of object Name string `json:"name,omitempty"` // Kind of object Kind string `json:"kind,omitempty"` // Object group Group string `json:"group,omitempty"` // Status. Values: InProgress, Ready, Unknown Status string `json:"status,omitempty"` } // AppReleaseStatus defines controller's the observed state of AppRelease type AppReleaseStatus struct { // ObservedGeneration is the most recent generation observed. It corresponds to the // Object's generation, which is updated on mutation by the API Server. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` // Conditions represents the latest state of the object // +optional // +patchMergeKey=type // +patchStrategy=merge Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // Resources embeds a list of object statuses // +optional ComponentList `json:",inline,omitempty"` // ComponentsReady: status of the components in the format ready/total // +optional ComponentsReady string `json:"componentsReady,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Type",type=string,description="The type of the appRelease",JSONPath=`.spec.descriptor.type`,priority=0 // +kubebuilder:printcolumn:name="Version",type=string,description="The creation date",JSONPath=`.spec.descriptor.version`,priority=0 // +kubebuilder:printcolumn:name="Age",type=date,description="The creation date",JSONPath=`.metadata.creationTimestamp`,priority=0 // AppRelease is the Schema for the appReleases API type AppRelease struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec AppReleaseSpec `json:"spec,omitempty"` Status AppReleaseStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true // AppReleaseList contains a list of AppRelease type AppReleaseList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []AppRelease `json:"items"` } func init() { SchemeBuilder.Register(&AppRelease{}, &AppReleaseList{}) } func (_ AppRelease) CustomResourceDefinition() *apiextensions.CustomResourceDefinition { return crds.MustCustomResourceDefinition(GroupVersion.WithResource(ResourceAppReleases)) } // StripVersion the version part of gv func StripVersion(gv string) string { if gv == "" { return gv } re := regexp.MustCompile(`^[vV][0-9].*`) // If it begins with only version, (group is nil), return empty string which maps to core group if re.MatchString(gv) { return "" } return strings.Split(gv, "/")[0] }
package client import ( "encoding/json" "fmt" "io" "net/http" "net/url" "path" "strings" ) type ApiClient struct { Http *http.Client config *Config } func New(config *Config) (*ApiClient, error) { h := &http.Client{} if err := validateConfig(config); err != nil { return nil, err } a := &ApiClient{Http: h, config: config} return a, nil } func (a *ApiClient) Token() string { return a.config.Token } func (a *ApiClient) SetToken(tok string) { a.config.Token = tok } func (a *ApiClient) makeReq(method, url string, body io.Reader, contentType ...string) (*http.Request, error) { request, err := http.NewRequest(method, url, body) if err != nil { return nil, err } request.Header.Set("Authorization", joinStr("Bearer ", a.Token())) if method == "PUT" || method == "POST" { request.Header.Set("content-type", setContentType(contentType)) } return request, nil } func (a *ApiClient) makeURL(url string) string { url = joinStr(a.config.Base, url) url = path.Clean(url) if !path.IsAbs(url) { url = joinStr("/", url) } return joinStr(a.serverStr(), url) } func (a *ApiClient) serverStr() string { var port string if !((a.config.Protocol == "http" && a.config.Port == "80") || (a.config.Protocol == "https" && a.config.Port == "443")) { port = joinStr(":", a.config.Port) } return joinStr(a.config.Protocol, "://", a.config.Host, port) } func joinStr(str ...string) string { return strings.Join(str, "") } func (a *ApiClient) basicReq(method, url string) (map[string]interface{}, error) { req, err := a.makeReq(method, a.makeURL(url), nil) if err != nil { return nil, err } jsonData, err := a.executeReq(req) if err != nil { return nil, err } return jsonData, nil } func (a *ApiClient) Get(url string) (map[string]interface{}, error) { return a.basicReq("GET", url) } func (a *ApiClient) Delete(url string) (map[string]interface{}, error) { return a.basicReq("DELETE", url) } func (a *ApiClient) Post(url string) (map[string]interface{}, error) { } func (a *ApiClient) Put(url string, data url.Values, contentType ...string) (map[string]interface{}, error) { bodyStr := data.Encode() req, err := a.makeReq(method, a.makeURL(url), bodyStr, contentType) if err != nil { return nil, err } } func (a *ApiClient) PutParams(url string, params map[string]string) (map[string]interface{}, error) { values := makeValues(params) return a.Put(url, data, "application/x-www-form-urlencoded") } func makeValues(params map[string]string) url.Values { values := make(url.Values) for k, v := range params { values.Set(k, v) } return values } func (a *ApiClient) executeReq(req *http.Request) (map[string]interface{}, error) { resp, err := a.Http.Do(req) if err != nil { return nil, err } if err = a.checkRespErr(resp); err != nil { return nil, err } jsonData, err := parseJSON(resp.Body) if err != nil { return nil, err } return jsonData, nil } func parseJSON(data io.ReadCloser) (map[string]interface{}, error) { respData := make(map[string]interface{}) dec := json.NewDecoder(data) if err := dec.Decode(&respData); err != nil { return nil, err } return respData, nil } func (a *ApiClient) checkRespErr(resp *http.Response) error { if resp.StatusCode >= 300 { rdata, err := parseJSON(resp.Body) if err != nil { return err } detail, _ := rdata["detail"].(string) err = fmt.Errorf("%s :: %s %s", resp.Status, rdata["msg"].(string), detail) return err } return nil } func setContentType(contentType []string) string { var cType string if contentType != nil { cType = contentType[0] } else { cType = "application/json" } return cType } // location functions // monitor functions
package cmds import ( "flag" "fmt" "io/ioutil" "os" "strings" "github.com/appscode/go/log" stringz "github.com/appscode/go/strings" "github.com/appscode/kutil/tools/analytics" pcm "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1" cs "github.com/kubedb/apimachinery/client/clientset/versioned/typed/kubedb/v1alpha1" snapc "github.com/kubedb/apimachinery/pkg/controller/snapshot" "github.com/kubedb/postgres/pkg/controller" "github.com/kubedb/postgres/pkg/docker" "github.com/spf13/cobra" crd_cs "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" ) var ( opt = controller.Options{ Docker: docker.Docker{ Registry: "kubedb", ExporterTag: "canary", }, OperatorNamespace: namespace(), GoverningService: "kubedb", Address: ":8080", EnableRbac: false, EnableAnalytics: true, AnalyticsClientID: analytics.ClientID(), } ) func NewCmdRun(version string) *cobra.Command { var ( masterURL string kubeconfigPath string prometheusCrdGroup = pcm.Group prometheusCrdKinds = pcm.DefaultCrdKinds ) cmd := &cobra.Command{ Use: "run", Short: "Run Postgres in Kubernetes", DisableAutoGenTag: true, Run: func(cmd *cobra.Command, args []string) { config, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfigPath) if err != nil { log.Fatalf("Could not get kubernetes config: %s", err) } // raise throttling time. ref: https://github.com/appscode/voyager/issues/640 config.Burst = 100 config.QPS = 100 client := kubernetes.NewForConfigOrDie(config) apiExtKubeClient := crd_cs.NewForConfigOrDie(config) extClient := cs.NewForConfigOrDie(config) promClient, err := pcm.NewForConfig(&prometheusCrdKinds, prometheusCrdGroup, config) if err != nil { log.Fatalln(err) } cronController := snapc.NewCronController(client, extClient) // Start Cron cronController.StartCron() // Stop Cron defer cronController.StopCron() w := controller.New(client, apiExtKubeClient, extClient, promClient, cronController, opt) defer runtime.HandleCrash() // Ensuring Custom Resource Definitions err = w.Setup() if err != nil { log.Fatalln(err) } fmt.Println("Starting operator...") w.RunAndHold() }, } // operator flags cmd.Flags().StringVar(&masterURL, "master", masterURL, "The address of the Kubernetes API server (overrides any value in kubeconfig)") cmd.Flags().StringVar(&kubeconfigPath, "kubeconfig", kubeconfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).") cmd.Flags().StringVar(&opt.GoverningService, "governing-service", opt.GoverningService, "Governing service for database statefulset") cmd.Flags().StringVar(&opt.Docker.Registry, "docker-registry", opt.Docker.Registry, "User provided docker repository") cmd.Flags().StringVar(&opt.Docker.ExporterTag, "exporter-tag", stringz.Val(version, opt.Docker.ExporterTag), "Tag of kubedb/operator used as exporter") cmd.Flags().StringVar(&opt.Address, "address", opt.Address, "Address to listen on for web interface and telemetry.") cmd.Flags().BoolVar(&opt.EnableRbac, "rbac", opt.EnableRbac, "Enable RBAC for database workloads") fs := flag.NewFlagSet("prometheus", flag.ExitOnError) fs.StringVar(&prometheusCrdGroup, "prometheus-crd-apigroup", prometheusCrdGroup, "prometheus CRD API group name") fs.Var(&prometheusCrdKinds, "prometheus-crd-kinds", " - EXPERIMENTAL (could be removed in future releases) - customize CRD kind names") cmd.Flags().AddGoFlagSet(fs) return cmd } func namespace() string { if ns := os.Getenv("OPERATOR_NAMESPACE"); ns != "" { return ns } if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { if ns := strings.TrimSpace(string(data)); len(ns) > 0 { return ns } } return metav1.NamespaceDefault }
package config import ( "github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2/middleware/session" "github.com/gofiber/storage/dynamodb" "github.com/gofiber/storage/memcache" "github.com/gofiber/storage/memory" "github.com/gofiber/storage/mongodb" "github.com/gofiber/storage/mysql" "github.com/gofiber/storage/postgres" "github.com/gofiber/storage/redis" "github.com/gofiber/storage/sqlite3" ) func (config *Config) GetSessionConfig() session.Config { var storage fiber.Storage switch config.GetString("SESSION_PROVIDER") { case "dynamodb": storage = dynamodb.New(dynamodb.Config{ Table: config.GetString("SESSION_TABLENAME"), }) case "memcache": storage = memcache.New(memcache.Config{ Servers: config.GetString("SESSION_HOST") + ":" + config.GetString("SESSION_PORT"), }) case "mongodb": storage = mongodb.New(mongodb.Config{ Host: config.GetString("SESSION_HOST"), Port: config.GetInt("SESSION_PORT"), Database: config.GetString("SESSION_DATABASE"), Collection: config.GetString("SESSION_TABLENAME"), }) case "mysql": storage = mysql.New(mysql.Config{ Host: config.GetString("SESSION_HOST"), Port: config.GetInt("SESSION_PORT"), Username: config.GetString("SESSION_USERNAME"), Password: config.GetString("SESSION_PASSWORD"), Database: config.GetString("SESSION_DATABASE"), Table: config.GetString("SESSION_TABLENAME"), }) case "postgres": storage = postgres.New(postgres.Config{ Host: config.GetString("SESSION_HOST"), Port: config.GetInt("SESSION_PORT"), Database: config.GetString("SESSION_DATABASE"), Table: config.GetString("SESSION_TABLENAME"), }) case "redis": storage = redis.New(redis.Config{ Host: config.GetString("SESSION_HOST"), Port: config.GetInt("SESSION_PORT"), Username: config.GetString("SESSION_USERNAME"), Password: config.GetString("SESSION_PASSWORD"), Database: config.GetInt("SESSION_DATABASE"), }) case "sqlite3": storage = sqlite3.New(sqlite3.Config{ Database: config.GetString("SESSION_DATABASE"), Table: config.GetString("SESSION_TABLENAME"), }) default: storage = memory.New() } return session.Config{ Expiration: config.GetDuration("SESSION_EXPIRATION"), Storage: storage, CookieHTTPOnly: true, } }
package hello_service type HelloService struct { } // Hello 方法的输入参数和 输出参数均改用 protobuf 定义的String类型表示 func (p *HelloService) Hello(request *String ,reply *String)error { reply.Value="Hello:-->"+request.Value return nil }
package server import ( "context" "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "errors" "fmt" "io" "math/big" "net" "testing" "time" "github.com/dkorittki/loago/pkg/api/v1" "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/test/bufconn" ) const ( secret = "foobar" authScheme = "basic" ) var ( testRequest = &api.RunRequest{ Endpoints: []*api.RunRequest_Endpoint{ { Url: "http://foo.bar", Weight: 1, }, }, Amount: 1, Type: api.RunRequest_FAKE, MinWaitTime: 1000, MaxWaitTime: 2000, } ) type MockHandler struct{} func (h *MockHandler) Run(_ *api.RunRequest, srv api.Worker_RunServer) error { for i := 0; i < 3; i++ { r := &api.EndpointResult{HttpStatusCode: 200} err := srv.Send(r) if err != nil { return err } } return nil } func (h *MockHandler) Ping(_ context.Context, _ *api.PingRequest) (*api.PingResponse, error) { // unimplemented return nil, errors.New("unimplemented") } func generateBufDialer(lis *bufconn.Listener) func(context.Context, string) (net.Conn, error) { return func(ctx context.Context, s string) (net.Conn, error) { return lis.Dial() } } func ctxWithSecret(ctx context.Context, scheme string, token string) context.Context { md := metadata.Pairs("authorization", fmt.Sprintf("%s %v", scheme, token)) nCtx := metautils.NiceMD(md).ToOutgoing(ctx) return nCtx } func handleConnClose(t *testing.T, conn *grpc.ClientConn) { err := conn.Close() if err != nil { t.Logf("error on closing connection: %v", err) } } func generateTLSCert() (tls.Certificate, error) { priv, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { return tls.Certificate{}, err } template := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ CommonName: "localhost", }, NotBefore: time.Now(), NotAfter: time.Now().Add(time.Hour * 24), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, } cert, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) if err != nil { return tls.Certificate{}, err } return tls.Certificate{Certificate: [][]byte{cert}, PrivateKey: priv}, nil } func TestServer_NoTLS_NoSecret(t *testing.T) { bufconnListener := bufconn.Listen(1024) errChan := make(chan error) s, err := newWorkerServer(nil, "", &MockHandler{}, bufconnListener) require.NoError(t, err) go func() { errChan <- s.Serve() }() defer s.Stop() select { case <-time.After(500 * time.Millisecond): break case err := <-errChan: t.Fatalf("cannot start server: %v", err) } // start client dialer := generateBufDialer(bufconnListener) ctx := context.Background() conn, err := grpc.DialContext(ctx, "localhost", grpc.WithContextDialer(dialer), grpc.WithInsecure(), grpc.WithBlock()) require.NoError(t, err) defer handleConnClose(t, conn) cl := api.NewWorkerClient(conn) stream, err := cl.Run(context.Background(), testRequest) require.NoError(t, err) var responds []*api.EndpointResult for { resp, err := stream.Recv() if err != nil { if err == io.EOF { break } else { t.Fatalf("unknown error received while receiving gRPC message: %v", err) } } responds = append(responds, resp) } assert.Len(t, responds, 3) for _, v := range responds { assert.Equal(t, int32(200), v.HttpStatusCode) } } func TestServer_withTLS_NoSecret(t *testing.T) { cert, err := generateTLSCert() require.NoError(t, err) // start server bufconnListener := bufconn.Listen(1024) errChan := make(chan error) s, err := newWorkerServer(&cert, "", &MockHandler{}, bufconnListener) require.NoError(t, err) go func() { errChan <- s.Serve() }() defer s.Stop() select { case <-time.After(500 * time.Millisecond): break case err := <-errChan: t.Fatalf("cannot start server: %v", err) } // start client ctx := context.Background() creds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) dialer := generateBufDialer(bufconnListener) conn, err := grpc.DialContext(ctx, "localhost", grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(creds), grpc.WithBlock()) require.NoError(t, err) defer handleConnClose(t, conn) cl := api.NewWorkerClient(conn) stream, err := cl.Run(context.Background(), testRequest) require.NoError(t, err) var responds []*api.EndpointResult for { resp, err := stream.Recv() if err != nil { if err == io.EOF { break } else { t.Fatalf("unknown error received while receiving gRPC message: %v", err) } } responds = append(responds, resp) } assert.Len(t, responds, 3) for _, v := range responds { assert.Equal(t, int32(200), v.HttpStatusCode) } } func TestServer_withTLS_Unauthenticated(t *testing.T) { cert, err := generateTLSCert() require.NoError(t, err) // start server bufconnListener := bufconn.Listen(1024) errChan := make(chan error) s, err := newWorkerServer(&cert, secret, &MockHandler{}, bufconnListener) require.NoError(t, err) go func() { errChan <- s.Serve() }() defer s.Stop() select { case <-time.After(500 * time.Millisecond): break case err := <-errChan: t.Fatalf("cannot start server: %v", err) } // start client ctx := context.Background() creds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) dialer := generateBufDialer(bufconnListener) conn, err := grpc.DialContext(ctx, "localhost", grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(creds), grpc.WithBlock()) require.NoError(t, err) defer handleConnClose(t, conn) cl := api.NewWorkerClient(conn) stream, err := cl.Run(ctx, testRequest) require.NoError(t, err) resp, err := stream.Recv() require.Error(t, err) assert.Nil(t, resp) grpcErr, ok := status.FromError(err) require.True(t, ok) assert.Equal(t, codes.Unauthenticated, grpcErr.Code()) } func TestServer_withTLS_withInvalidSecret(t *testing.T) { cert, err := generateTLSCert() require.NoError(t, err) // start server bufconnListener := bufconn.Listen(1024) errChan := make(chan error) s, err := newWorkerServer(&cert, secret, &MockHandler{}, bufconnListener) require.NoError(t, err) go func() { errChan <- s.Serve() }() defer s.Stop() select { case <-time.After(500 * time.Millisecond): break case err := <-errChan: t.Fatalf("cannot start server: %v", err) } // start client ctx := ctxWithSecret(context.Background(), authScheme, secret+"invalid") creds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) dialer := generateBufDialer(bufconnListener) conn, err := grpc.DialContext(ctx, "localhost", grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(creds), grpc.WithBlock()) require.NoError(t, err) defer handleConnClose(t, conn) cl := api.NewWorkerClient(conn) stream, err := cl.Run(ctx, testRequest) require.NoError(t, err) resp, err := stream.Recv() require.Error(t, err) assert.Nil(t, resp) grpcErr, ok := status.FromError(err) require.True(t, ok) assert.Equal(t, codes.PermissionDenied, grpcErr.Code()) } func TestServer_withTLS_withValidSecret(t *testing.T) { cert, err := generateTLSCert() require.NoError(t, err) // start server bufconnListener := bufconn.Listen(1024) errChan := make(chan error) s, err := newWorkerServer(&cert, secret, &MockHandler{}, bufconnListener) require.NoError(t, err) go func() { errChan <- s.Serve() }() defer s.Stop() select { case <-time.After(500 * time.Millisecond): break case err := <-errChan: t.Fatalf("cannot start server: %v", err) } // start client ctx := ctxWithSecret(context.Background(), authScheme, secret) creds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) dialer := generateBufDialer(bufconnListener) conn, err := grpc.DialContext(ctx, "localhost", grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(creds), grpc.WithBlock()) require.NoError(t, err) defer handleConnClose(t, conn) cl := api.NewWorkerClient(conn) stream, err := cl.Run(ctx, testRequest) require.NoError(t, err) var responds []*api.EndpointResult for { resp, err := stream.Recv() if err != nil { if err == io.EOF { break } else { t.Fatalf("unknown error received while receiving gRPC message: %v", err) } } responds = append(responds, resp) } assert.Len(t, responds, 3) for _, v := range responds { assert.Equal(t, int32(200), v.HttpStatusCode) } }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package charset import ( "bytes" "fmt" "reflect" "strings" "unsafe" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" "golang.org/x/text/encoding" "golang.org/x/text/transform" ) // ErrInvalidCharacterString returns when the string is invalid in the specific charset. var ErrInvalidCharacterString = terror.ClassParser.NewStd(mysql.ErrInvalidCharacterString) // encodingBase defines some generic functions. type encodingBase struct { enc encoding.Encoding self Encoding } func (encodingBase) MbLen(_ string) int { return 0 } func (encodingBase) ToUpper(src string) string { return strings.ToUpper(src) } func (encodingBase) ToLower(src string) string { return strings.ToLower(src) } func (b encodingBase) IsValid(src []byte) bool { isValid := true b.self.Foreach(src, opFromUTF8, func(from, to []byte, ok bool) bool { isValid = ok return ok }) return isValid } func (b encodingBase) Transform(dest *bytes.Buffer, src []byte, op Op) (result []byte, err error) { if dest == nil { dest = &bytes.Buffer{} dest.Grow(len(src)) } dest.Reset() b.self.Foreach(src, op, func(from, to []byte, ok bool) bool { if !ok { if err == nil && (op&opSkipError == 0) { err = generateEncodingErr(b.self.Name(), from) } if op&opTruncateTrim != 0 { return false } if op&opTruncateReplace != 0 { dest.WriteByte('?') return true } } if op&opCollectFrom != 0 { dest.Write(from) } else if op&opCollectTo != 0 { dest.Write(to) } return true }) return dest.Bytes(), err } func (b encodingBase) Foreach(src []byte, op Op, fn func(from, to []byte, ok bool) bool) { var tfm transform.Transformer var peek func([]byte) []byte if op&opFromUTF8 != 0 { tfm = b.enc.NewEncoder() peek = EncodingUTF8Impl.Peek } else { tfm = b.enc.NewDecoder() peek = b.self.Peek } var buf [4]byte for i, w := 0, 0; i < len(src); i += w { w = len(peek(src[i:])) nDst, _, err := tfm.Transform(buf[:], src[i:i+w], false) meetErr := err != nil || (op&opToUTF8 != 0 && beginWithReplacementChar(buf[:nDst])) if !fn(src[i:i+w], buf[:nDst], !meetErr) { return } } } // replacementBytes are bytes for the replacement rune 0xfffd. var replacementBytes = []byte{0xEF, 0xBF, 0xBD} // beginWithReplacementChar check if dst has the prefix '0xEFBFBD'. func beginWithReplacementChar(dst []byte) bool { return bytes.HasPrefix(dst, replacementBytes) } // generateEncodingErr generates an invalid string in charset error. func generateEncodingErr(name string, invalidBytes []byte) error { arg := fmt.Sprintf("%X", invalidBytes) return ErrInvalidCharacterString.FastGenByArgs(name, arg) } // HackSlice converts string to slice without copy. // Use at your own risk. func HackSlice(s string) (b []byte) { pBytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) pString := (*reflect.StringHeader)(unsafe.Pointer(&s)) pBytes.Data = pString.Data pBytes.Len = pString.Len pBytes.Cap = pString.Len return } // HackString converts slice to string without copy. // Use it at your own risk. func HackString(b []byte) (s string) { if len(b) == 0 { return "" } pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) pstring.Data = pbytes.Data pstring.Len = pbytes.Len return }
package main import "fmt" func main() { inputArray := []int{1, 2, 3, 4, 5, 6, 7, 8} count := 1 for len(inputArray) != 1 { if count%2 == 0 { var temp []int for i := 0; i < len(inputArray)-1; i += 2 { temp = append(temp, inputArray[i]*inputArray[i+1]) } inputArray = make([]int, len(temp)) copy(inputArray, temp) } else { var temp []int for i := 0; i < len(inputArray)-1; i += 2 { temp = append(temp, inputArray[i]+inputArray[i+1]) } inputArray = make([]int, len(temp)) copy(inputArray, temp) } //fmt.Println(inputArray) count++ } fmt.Println(inputArray) }
package main import "fmt" /* Adding comments */ func main() { fmt.Println("Hi There!") }
package main import ( "bufio" "fmt" "io" "log" "os" "strings" ) func main() { if len(os.Args) < 2 { fmt.Printf("Usage: %s <file>\n", os.Args[0]) return } in, err := os.Open(os.Args[1]) if err != nil { log.Fatal(err) } defer in.Close() br := bufio.NewReader(in) for { line, c := br.ReadString('\n') if c == io.EOF { line = strings.TrimSuffix(line, "\n") if line != "" { fmt.Println(line) } break } if c != nil { log.Fatal(c) } fmt.Println(strings.TrimSuffix(line, "\n")) } }
package flatten func Flatten(list interface{}) []interface{} { if list == nil { return []interface{}{} } else if _, ok := list.([]interface{}); !ok { return []interface{}{list} } collection := make([]interface{}, 0) for _, element := range list.([]interface{}) { collection = append(collection, Flatten(element)...) } return collection }