text
stringlengths
11
4.05M
package errors const ( ZapUserCreateError = "error creating user" ZapUserFetchError = "error fetching user" ZapUserUpdateError = "error updating user" ZapUserDeleteError = "error deleting user" )
/* * Copyright 2017 - 2019 KB Kontrakt LLC - All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package auth //go:generate mockgen -source=acl_rep.go -package=auth -destination=acl_rep_mocks.go import ( "errors" "github.com/kbkontrakt/hlfabric-ccdevkit/extstub" ) type ( // ACLRepository . ACLRepository interface { Get() (ACL, error) Save(acl ACL) error } aclRepositoryImpl struct { aclKey string stub extstub.MarshalStub } aclDocument struct { ACList ACL `json:"acl"` } ) const aclDefaultKeyName = "ACL" func (rep *aclRepositoryImpl) Save(acl ACL) error { return rep.stub.WriteState(rep.aclKey, aclDocument{acl}) } func (rep *aclRepositoryImpl) Get() (ACL, error) { var acl aclDocument err := rep.stub.ReadState(rep.aclKey, &acl) if err == extstub.ErrNotFound { return nil, errors.New("ACL not found") } if err != nil { return nil, err } return acl.ACList, nil } // NewACLRepositoryImpl creates default acl implementation func NewACLRepositoryImpl(stub extstub.MarshalStub) ACLRepository { return &aclRepositoryImpl{ aclDefaultKeyName, stub, } }
package main import ( "encoding/hex" "io" "net" "os" "go.uber.org/zap" cli "gopkg.in/urfave/cli.v2" "github.com/rssllyn/go-raknet" ) var Logger *zap.Logger func init() { Logger, _ = zap.NewDevelopment() } func main() { app := &cli.App{ Name: "raknet test server", Flags: []cli.Flag{ &cli.StringFlag{ Name: "listen", Value: ":8711", Usage: "listening address:port", }, &cli.StringFlag{ Name: "log-mode", Value: "development", Usage: "log mode, can be development or production", }, &cli.StringFlag{ Name: "log-file", Value: "", Usage: "log file path, only has effect on production log mode. leave empty to disable file logging", }, }, Action: func(ctx *cli.Context) error { l, err := raknet.Listen(ctx.String("listen"), 1000) if err != nil { Logger.Warn("failed to listen server address") } for true { conn, err := l.Accept() if err != nil { Logger.Warn("failed to accept client connection", zap.Error(err)) continue } go handleClient(conn) } return nil }, } app.Run(os.Args) } func handleClient(conn net.Conn) { buff := make([]byte, 100) for true { n, err := conn.Read(buff) if err == io.EOF { Logger.Debug("connection closed by remote peer") return } else if err != nil { Logger.Warn("failed to read from client", zap.Error(err)) return } Logger.Debug("data received from client", zap.String("hex", hex.EncodeToString(buff[:n])), zap.String("string", string(buff[:n]))) if _, err := conn.Write(buff[:n]); err != nil { Logger.Warn("failed to send to client", zap.Error(err)) return } } }
package lapdata_test import ( "testing" "github.com/matryer/is" "go.jlucktay.dev/golang-workbench/jam-gp/lapdata" ) func loadEventData(t *testing.T) *lapdata.Event { is := is.New(t) is.Helper() e, err := lapdata.NewEvent() is.NoErr(err) is.True(e != nil) return e } func TestNewEvent(t *testing.T) { t.Parallel() loadEventData(t) } func TestCompetitorsHaveLapData(t *testing.T) { t.Parallel() is := is.New(t) e := loadEventData(t) is.True(len(e.Session.Competitors) > 0) // no competitor data for i := range e.Session.Competitors { is.True(len(e.Session.Competitors[i].Laps) > 0) // no lap data for competitor } } func TestLapDataTotalTime(t *testing.T) { t.Parallel() is := is.New(t) e := loadEventData(t) const hourInMilliseconds = 60 * 60 * 1000 for i := range e.Session.Competitors { totalTime := 0 for j := range e.Session.Competitors[i].Laps { totalTime += e.Session.Competitors[i].Laps[j].LapTime } is.True(totalTime >= hourInMilliseconds*1.75) // each competitor should have at least 1h45m of total lap times lastLapIndex := len(e.Session.Competitors[i].Laps) - 1 is.Equal(totalTime, e.Session.Competitors[i].Laps[lastLapIndex].TotalTime) // stored total time != calculated } } func TestTykLapDataHasFiveSegments(t *testing.T) { t.Parallel() is := is.New(t) e := loadEventData(t) competitorsChecked := 0 for i := range e.Session.Competitors { if e.Session.Competitors[i].Name != "Tyks of Hazzard" || e.Session.Competitors[i].Number != "3" { continue } competitorsChecked++ is.Equal(e.Session.Competitors[i].Laps.Segments(), 5) } is.Equal(competitorsChecked, 1) }
// go_05 package main import ( "fmt" "math" ) /*go支持匿名函数,可以作为闭包,是一个内联语句或表达式 可以直接使用函数内的变量,不必声明*/ func getSequence() func() int { index := 0 return func() int { index += 1 return index } } type Circle struct {//定义结构体 radius float64 } func main() { var a_var int = 100 var b_var int = 101 fmt.Printf("max_value = %d\n", find_max(a_var, b_var)) fmt.Println(string_swap("yangna", "natasha")) swap_value(a_var, b_var) fmt.Printf("swap_value = %d, %d\n", a_var, b_var)//swap_value = 100, 101 swap_reference(&a_var, &b_var) fmt.Printf("swap_reference = %d, %d\n", a_var, b_var)//swap_reference = 101, 100 swap_reference_or(&a_var, &b_var) fmt.Printf("swap_reference_or = %d, %d\n", a_var, b_var)//swap_reference_or = 100, 101 getSquareRoot := func(x_data float64) float64 {//声明函数变量 return math.Sqrt(x_data) } fmt.Println(getSquareRoot(9))//3 nextNumber := getSequence()//作为一个函数,函数的index=0 fmt.Println(nextNumber())//1 fmt.Println(nextNumber())//2 var var_c_object Circle var_c_object.radius = 5.00 fmt.Println("圆的面积是:", var_c_object.getArea())//78.5 } /*func func_name([parameters]) [return types] { }*/ func find_max(num1, num2 int) int { var result int if num1 > num2 { result = num1 } else { result = num2 } return result } func string_swap(x_str, y_str string) (string, string) { return y_str, x_str } /*值传递 引用传递*/ func swap_value(x_data, y_data int) { x_data = x_data + y_data y_data = x_data - y_data x_data = x_data - y_data } func swap_reference(x_data *int, y_data *int) { *x_data = *x_data * *y_data *y_data = *x_data / *y_data *x_data = *x_data / *y_data } func swap_reference_or(x_data *int, y_data *int) { *x_data = *x_data ^ *y_data *y_data = *x_data ^ *y_data *x_data = *x_data ^ *y_data } func (var_c Circle) getArea() float64 {//可以是指针var_c *Circle return 3.14*var_c.radius*var_c.radius }
package main import ( "encoding/json" "fmt" "log" "net/http" "time" ) type SpaceX []struct { CapsuleSerial string `json:"capsule_serial"` CapsuleID string `json:"capsule_id"` Status string `json:"status"` OriginalLaunch time.Time `json:"original_launch"` OriginalLaunchUnix int `json:"original_launch_unix"` Missions []struct { Name string `json:"name"` Flight int `json:"flight"` } `json:"missions"` Landings int `json:"landings"` Type string `json:"type"` Details string `json:"details"` ReuseCount int `json:"reuse_count"` } var ( url = "https://api.spacexdata.com/v3/capsules" ) func main() { req, err := http.NewRequest("GET", url, nil) if err != nil { fmt.Println("request failed") return } client := &http.Client{} resp, err := client.Do(req) if err != nil { fmt.Println("error in client call") return } var record SpaceX if err := json.NewDecoder(resp.Body).Decode(&record); err != nil { log.Println(err) } for launchNo, launchData := range record { fmt.Println("Capsult Record =\n", launchData) fmt.Println("Record Number =", launchNo) } }
package main import ( "flag" "fmt" "github.com/siddontang/ledisdb/config" "github.com/siddontang/ledisdb/server" "log" "net/http" _ "net/http/pprof" "os" "os/signal" "runtime" "syscall" ) var configFile = flag.String("config", "", "ledisdb config file") var addr = flag.String("addr", "", "ledisdb listen address") var dataDir = flag.String("data_dir", "", "ledisdb base data dir") var dbName = flag.String("db_name", "", "select a db to use, it will overwrite the config's db name") var usePprof = flag.Bool("pprof", false, "enable pprof") var pprofPort = flag.Int("pprof_port", 6060, "pprof http port") var slaveof = flag.String("slaveof", "", "make the server a slave of another instance") var readonly = flag.Bool("readonly", false, "set readonly mode, salve server is always readonly") var rpl = flag.Bool("rpl", false, "enable replication or not, slave server is always enabled") var rplSync = flag.Bool("rpl_sync", false, "enable sync replication or not") func main() { runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() var cfg *config.Config var err error if len(*configFile) == 0 { println("no config set, using default config") cfg = config.NewConfigDefault() } else { cfg, err = config.NewConfigWithFile(*configFile) } if err != nil { println(err.Error()) return } if len(*addr) > 0 { cfg.Addr = *addr } if len(*dataDir) > 0 { cfg.DataDir = *dataDir } if len(*dbName) > 0 { cfg.DBName = *dbName } if len(*slaveof) > 0 { cfg.SlaveOf = *slaveof cfg.Readonly = true cfg.UseReplication = true } else { cfg.Readonly = *readonly cfg.UseReplication = *rpl cfg.Replication.Sync = *rplSync } var app *server.App app, err = server.NewApp(cfg) if err != nil { println(err.Error()) return } sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { <-sc app.Close() }() if *usePprof { go func() { log.Println(http.ListenAndServe(fmt.Sprintf(":%d", *pprofPort), nil)) }() } app.Run() }
package main func main() { a := [...]int{ 1, 2, } p := &a p[1] += 10 println(a[1]) }
package sys import ( "fmt" "strconv" "strings" ) func KillProcessByCmdline(cmdline string) error { cmdline = strings.TrimSpace(cmdline) if cmdline == "" { return fmt.Errorf("cmdline is blank") } pids := PidsByCmdline(cmdline) for i := 0; i < len(pids); i++ { out, err := CmdOutTrim("kill", "-9", strconv.Itoa(pids[i])) if err != nil { return fmt.Errorf("kill -9 %d fail: %v, output: %s", pids[i], err, out) } } return nil }
package openshift import ( "context" configv1 "github.com/openshift/api/config/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" ) func NewClusterOperator(name string) *ClusterOperator { co := &ClusterOperator{ClusterOperator: &configv1.ClusterOperator{}} co.SetName(name) return co } type ClusterOperator struct { *configv1.ClusterOperator } func (c *ClusterOperator) GetOperatorVersion() string { for _, v := range c.Status.Versions { if v.Name == "operator" { return v.Version } } return "" } func (c *ClusterOperator) GetCondition(conditionType configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition { for _, cond := range c.Status.Conditions { if cond.Type == conditionType { return &cond } } return nil } func (c *ClusterOperator) SetCondition(condition *configv1.ClusterOperatorStatusCondition) { // Filter dups conditions := []configv1.ClusterOperatorStatusCondition{} for _, c := range c.Status.Conditions { if c.Type != condition.Type { conditions = append(conditions, c) } } c.Status.Conditions = append(conditions, *condition) } type Mutator interface { Mutate(context.Context, *ClusterOperator) error } type MutateFunc func(context.Context, *ClusterOperator) error func (m MutateFunc) Mutate(ctx context.Context, co *ClusterOperator) error { return m(ctx, co) } type SerialMutations []Mutator func (s SerialMutations) Mutate(ctx context.Context, co *ClusterOperator) error { var errs []error for _, m := range s { if err := m.Mutate(ctx, co); err != nil { errs = append(errs, err) } } return utilerrors.NewAggregate(errs) }
package mpnifcloudrdb import ( "errors" "flag" "log" "strconv" "strings" "sync" "time" "github.com/alice02/nifcloud-sdk-go/nifcloud" "github.com/alice02/nifcloud-sdk-go/nifcloud/credentials" "github.com/alice02/nifcloud-sdk-go/nifcloud/session" "github.com/alice02/nifcloud-sdk-go/service/rdb" mp "github.com/mackerelio/go-mackerel-plugin" ) const layout = "2006-01-02T15:04:05Z" // RDBPlugin mackerel plugin for NIFCLOUD RDB type RDBPlugin struct { Region string AccessKeyID string SecretAccessKey string Identifier string Engine string Prefix string LabelPrefix string } func getLastPoint(client *rdb.Rdb, dimension *rdb.RequestDimensionsStruct, metricName string) (float64, error) { now := time.Now().In(time.UTC) response, err := client.NiftyGetMetricStatistics(&rdb.NiftyGetMetricStatisticsInput{ Dimensions: []*rdb.RequestDimensionsStruct{dimension}, StartTime: nifcloud.Time(now.Add(time.Duration(180) * time.Second * -1)), // 3 min (to fetch at least 1 data-point) EndTime: nifcloud.Time(now), MetricName: nifcloud.String(metricName), }) if err != nil { return 0, err } datapoints := response.Datapoints if len(datapoints) == 0 { return 0, errors.New("fetched no datapoints") } latest := new(time.Time) var latestVal float64 for _, dp := range datapoints { ts, _ := time.Parse(layout, *dp.Timestamp) if ts.Before(*latest) { continue } latest = &ts sum, _ := strconv.ParseFloat(*dp.Sum, 64) count, _ := strconv.ParseFloat(*dp.SampleCount, 64) latestVal = sum / count } return latestVal, nil } func (p RDBPlugin) rdbMetrics() (metrics []string) { for _, v := range p.GraphDefinition() { for _, vv := range v.Metrics { metrics = append(metrics, vv.Name) } } return } // FetchMetrics interface for mackerel-plugin func (p RDBPlugin) FetchMetrics() (map[string]float64, error) { sess, err := session.NewSession() if err != nil { return nil, err } config := nifcloud.NewConfig() if p.AccessKeyID != "" && p.SecretAccessKey != "" { config = config.WithCredentials(credentials.NewStaticCredentials(p.AccessKeyID, p.SecretAccessKey, "")) } if p.Region != "" { config = config.WithRegion(p.Region) } client := rdb.New(sess, config) stat := make(map[string]float64) perInstance := &rdb.RequestDimensionsStruct{ Name: nifcloud.String("DBInstanceIdentifier"), Value: nifcloud.String(p.Identifier), } var wg sync.WaitGroup for _, met := range p.rdbMetrics() { wg.Add(1) go func(met string) { defer wg.Done() v, err := getLastPoint(client, perInstance, met) if err == nil { stat[met] = v } else { log.Printf("%s: %s", met, err) } }(met) } wg.Wait() return stat, nil } // GraphDefinition interface for mackerel plugin func (p RDBPlugin) GraphDefinition() map[string]mp.Graphs { return map[string]mp.Graphs{ p.Prefix + ".BinLogDiskUsage": { Label: p.LabelPrefix + " BinLogDiskUsage", Unit: "bytes", Metrics: []mp.Metrics{ {Name: "BinLogDiskUsage", Label: "Usage"}, }, }, p.Prefix + ".CPUUtilization": { Label: p.LabelPrefix + " CPU Utilization", Unit: "percentage", Metrics: []mp.Metrics{ {Name: "CPUUtilization", Label: "CPUUtilization"}, }, }, p.Prefix + ".DatabaseConnections": { Label: p.LabelPrefix + " Database Connections", Unit: "float", Metrics: []mp.Metrics{ {Name: "DatabaseConnections", Label: "DatabaseConnections"}, }, }, p.Prefix + ".DiskQueueDepth": { Label: p.LabelPrefix + " DiskQueueDepth", Unit: "bytes", Metrics: []mp.Metrics{ {Name: "DiskQueueDepth", Label: "Depth"}, }, }, p.Prefix + ".FreeableMemory": { Label: p.LabelPrefix + " Freeable Memory", Unit: "bytes", Metrics: []mp.Metrics{ {Name: "FreeableMemory", Label: "FreeableMemory"}, }, }, p.Prefix + ".FreeStorageSpace": { Label: p.LabelPrefix + " Free Storage Space", Unit: "bytes", Metrics: []mp.Metrics{ {Name: "FreeStorageSpace", Label: "FreeStorageSpace"}, }, }, p.Prefix + ".SwapUsage": { Label: p.LabelPrefix + " Swap Usage", Unit: "bytes", Metrics: []mp.Metrics{ {Name: "SwapUsage", Label: "SwapUsage"}, }, }, p.Prefix + ".IOPS": { Label: p.LabelPrefix + " IOPS", Unit: "iops", Metrics: []mp.Metrics{ {Name: "ReadIOPS", Label: "Read"}, {Name: "WriteIOPS", Label: "Write"}, }, }, p.Prefix + ".Throughput": { Label: p.LabelPrefix + " Throughput", Unit: "bytes/sec", Metrics: []mp.Metrics{ {Name: "ReadThroughput", Label: "Read"}, {Name: "WriteThroughput", Label: "Write"}, }, }, } } func (p RDBPlugin) MetricKeyPrefix() string { if p.Prefix == "" { p.Prefix = "rdb" } return p.Prefix } func Do() { optRegion := flag.String("region", "", "Region") optAccessKeyID := flag.String("access-key-id", "", "Access Key ID") optSecretAccessKey := flag.String("secret-access-key", "", "Secret Access Key") optIdentifier := flag.String("identifier", "", "DB Instance Identifier") optPrefix := flag.String("metric-key-prefix", "rdb", "Metric key prefix") optLabelPrefix := flag.String("metric-label-prefix", "", "Metric Label prefix") optTempfile := flag.String("tempfile", "", "Temp file name") flag.Parse() rdb := RDBPlugin{ Prefix: *optPrefix, } if *optLabelPrefix == "" { if *optPrefix == "rdb" { rdb.LabelPrefix = "RDB" } else { rdb.LabelPrefix = strings.Title(*optPrefix) } } else { rdb.LabelPrefix = *optLabelPrefix } rdb.Region = *optRegion rdb.Identifier = *optIdentifier rdb.AccessKeyID = *optAccessKeyID rdb.SecretAccessKey = *optSecretAccessKey helper := mp.NewMackerelPlugin(rdb) helper.Tempfile = *optTempfile helper.Run() }
package generators import ( "github.com/almerlucke/kallos" ) // Combinator combines generators in a single generator type Combinator struct { Generators []kallos.Generator } // NewCombinator returns a new combinator func NewCombinator(gens ...kallos.Generator) *Combinator { return &Combinator{ Generators: gens, } } // GenerateValue by combining all values generated by the different generators, // this can be used to form chords func (c *Combinator) GenerateValue() kallos.Value { vs := kallos.Value{} for _, g := range c.Generators { tv := g.GenerateValue() vs = append(vs, tv...) } return vs } // IsContinuous for combinator returns false if any of the gens is not continuous func (c *Combinator) IsContinuous() bool { isContinuous := true for _, g := range c.Generators { isContinuous = g.IsContinuous() if !isContinuous { break } } return isContinuous } // Done return true if any of the gens returns true func (c *Combinator) Done() bool { done := false for _, g := range c.Generators { done = g.Done() if done { break } } return done } // Reset calls reset of gens func (c *Combinator) Reset() { for _, g := range c.Generators { g.Reset() } }
package main import ( "fmt" "sync" ) var wg sync.WaitGroup var printChar chan int func prinNums() { defer wg.Done() for i := 0; i < 2; i++ { fmt.Println("prinNums", i) printChar <- 1111 fmt.Println("printnum", <-printChar) } } func printChars() { defer wg.Done() for i := 0; i < 2; i++ { fmt.Println("printchar阻1", i) fmt.Println("printChars", <-printChar) fmt.Println("printchar阻2", i) fmt.Println("出来1") printChar <- 1222 fmt.Println("出来2") } } func main() { printChar = make(chan int) wg.Add(2) go prinNums() go printChars() wg.Wait() }
package main // contains the code for logging to the android syslog // borrowed from go.mobile/app /* #cgo LDFLAGS: -llog #include <android/log.h> #include <string.h> */ import "C" import ( "fmt" "log" "unsafe" ) type infoWriter struct{} var ( ctagLog = C.CString("SensuClient") ) func (infoWriter) Write(p []byte) (n int, err error) { fmt.Print(string(p)) cstr := C.CString(string(p)) C.__android_log_write(C.ANDROID_LOG_INFO, ctagLog, cstr) C.free(unsafe.Pointer(cstr)) return len(p), nil } func init() { log.SetOutput(infoWriter{}) // android logcat includes all of log.LstdFlags log.SetFlags(log.Flags() &^ log.LstdFlags) logOutput = infoWriter{} }
package main import ( "fmt" "log" "os" homedir "github.com/mitchellh/go-homedir" ) func main() { if len(os.Args) > 2 { fmt.Printf("Usage: command <filename>\n") os.Exit(1) } path, err := homedir.Expand(os.Args[1]) if err != nil { log.Fatal(err) } fmt.Println(path) path, err = homedir.Dir() if err != nil { log.Fatal(err) } fmt.Println(path) }
package mysqlenv import "testing" func TestBuildMySQLDSN(t *testing.T) { t.Run("NoPass", func(t *testing.T) { dsn := DSN("root", "", "127.0.0.1:3306", "stratex") if dsn != "root@(127.0.0.1:3306)/stratex?parseTime=true" { t.Errorf("DSN == %v ", dsn) } }) t.Run("Pass", func(t *testing.T) { dsn := DSN("root", "dogsownus", "127.0.0.1:3306", "stratex") if dsn != "root:dogsownus@(127.0.0.1:3306)/stratex?parseTime=true" { t.Errorf("DSN == %v ", dsn) } }) t.Run("No Port", func(t *testing.T) { dsn := DSN("root", "dogsownus", "127.0.0.1", "stratex") if dsn != "root:dogsownus@(127.0.0.1:3306)/stratex?parseTime=true" { t.Errorf("DSN == %v ", dsn) } }) }
package main import ( "bufio" "fmt" "log" "math" "os" "strconv" "strings" ) func main() { file, err := os.Open("../input") if err != nil { log.Fatalln("Cannot open file", err) } defer file.Close() scanner := bufio.NewScanner(file) var feet int64 for scanner.Scan() { feet += howMuchRibbon(scanner.Text()) } if err := scanner.Err(); err != nil { log.Fatalln("Cannot scan", err) } fmt.Println(feet) } func howMuchRibbon(dimensions string) int64 { dArr := strings.Split(dimensions, "x") l, _ := strconv.ParseInt(dArr[0], 10, 64) w, _ := strconv.ParseInt(dArr[1], 10, 64) h, _ := strconv.ParseInt(dArr[2], 10, 64) min1, min2 := func(nums ...int64) (int64, int64) { min1 := int64(math.MaxInt64) min2 := int64(math.MaxInt64) for _, v := range nums { if v <= min1 { min2 = min1 min1 = v } if v > min1 && v < min2 { min2 = v } } return min1, min2 }(l, w, h) wrap := min1*2 + min2*2 ribbon := l * w * h return wrap + ribbon }
package main var test int const ( mutexLocked = 1<<iota mutexLocked2 ) func main() { mm := make(map[string]string,111111111111121) mm["2"]="2" //fmt.Println(mm) //for i:= 0; i < 100000; i++ { // mm[i]=i //} //test1() //test2() //test3(&mm) } //func test1() *int{ // aa := 0 // return &aa //} //func test2() int{ // bb := 0 // return bb //} //func test3(c *map[string]int) { // fmt.Println(c) //}
package main import ( _ "archive/zip" format "fmt" ) func main() { format.Println("test") }
package main import ( "bufio" "fmt" "net" "os" "strings" ) func main() { fmt.Println("Launching server...") // listen on all interfaces ln, _ := net.Listen("tcp", ":8085") // accept connection on port conn, _ := ln.Accept() fmt.Println("Server launched!") go messageListener(conn) messegeSender(conn) } func messageListener(conn net.Conn) { for { // will listen for message to process ending in newline (\n) message, _ := bufio.NewReader(conn).ReadString('\n') // erase the current line and output message received underneath fmt.Print("\033[2K", "\nMessage Received:", string(message)) } } func messegeSender(conn net.Conn) { for { // read in input from stdin reader := bufio.NewReader(os.Stdin) fmt.Print("Text to send: ") text, _ := reader.ReadString('\n') if strings.Compare(text, "\n") != 0 { // send to socket conn.Write([]byte(text + "\n")) } } }
package main import ( "github.com/mailgun/cli" ) func NewUpstreamCommand() cli.Command { return cli.Command{ Name: "upstream", Flags: flags(), Usage: "Operations with vulcan upstreams", } } func NewUpstreamSubcommands() []cli.Command { return []cli.Command{ { Name: "add", Usage: "Add a new upstream to vulcan", Action: addUpstreamAction, Flags: []cli.Flag{ cli.StringFlag{"id", "", "upstream id"}, }, }, { Name: "rm", Usage: "Remove upstream from vulcan", Action: deleteUpstreamAction, Flags: []cli.Flag{ cli.StringFlag{"id", "", "upstream id"}, }, }, { Name: "ls", Usage: "List upstreams", Action: listUpstreamsAction, }, } } func addUpstreamAction(c *cli.Context) { printStatus(client(c).AddUpstream(c.String("id"))) } func deleteUpstreamAction(c *cli.Context) { printStatus(client(c).DeleteUpstream(c.String("id"))) } func listUpstreamsAction(c *cli.Context) { out, err := client(c).GetUpstreams() if err != nil { printError(err) } else { printUpstreams(out) } }
package wordmaker import ( "bytes" "fmt" R "github.com/jmcvetta/randutil" ) func Parse(name string, input []string, dropoff float64) (*Config, error) { cfg := NewConfig(name) for _, line := range input { _, items := Lex(name, line) header := <-items switch header.typ { case itemClass: if err := cfg.AddChoiceClass(MakeChoices(header.val, items, dropoff)); err != nil { return nil, err } case itemPattern: if err := cfg.AddPattern(MakePattern(items, dropoff)); err != nil { return nil, err } default: return nil, fmt.Errorf("Invalid config: %v", header) } } return cfg, nil } type chooser interface { Choose() (string, error) Items() []string } type Choice struct { value string } type ChoiceList struct { Name string choices []R.Choice } type Pattern struct { steps []chooser } func (p *Pattern) Run() chan string { ch := make(chan string) go func() { defer close(ch) for _, step := range p.steps { Debugf("step %+v", step) choice, err := step.Choose() if err != nil { panic(err) } for _, c := range choice { ch <- string(c) } } }() return ch } func (p *Pattern) Choose() (string, error) { buf := bytes.Buffer{} for _, choice := range p.Items() { buf.WriteString(choice) } return buf.String(), nil } func (p *Pattern) Items() []string { items := []string{} for choice := range p.Run() { items = append(items, choice) } return items } func (p *Pattern) Append(ch chooser) error { p.steps = append(p.steps, ch) return nil } func (p *Pattern) String() string { buf := bytes.Buffer{} buf.WriteString("<< ") for _, step := range p.steps { buf.WriteString(fmt.Sprintf("%v + ", step)) } buf.WriteString(">>") return buf.String() } func (c *ChoiceList) Choose() (string, error) { var ch interface{} Debugf("Choose among %v", c.choices) ch, err := R.WeightedChoice(c.choices) if err != nil { return "", err } return ch.(R.Choice).Item.(chooser).Choose() } func (c *ChoiceList) Items() []string { out := []string{} for _, item := range c.choices { val, _ := item.Item.(chooser).Choose() out = append(out, val) } return out } func (c *ChoiceList) String() string { buf := bytes.Buffer{} buf.WriteString("(- ") for _, item := range c.choices { buf.WriteString(fmt.Sprintf("%v | ", item)) } buf.WriteString("-)") return buf.String() } func (c Choice) Choose() (string, error) { return c.value, nil } func (c Choice) String() string { return c.value } func (c Choice) Items() []string { return []string{c.value} } func Choices(name string, values []interface{}, dropoff float64) *ChoiceList { cl := &ChoiceList{Name: name} weight := 1000 // Debugf("CHOICES %q", values) for _, v := range values { switch v.(type) { case string: nc := R.Choice{Weight: weight, Item: Choice{value: v.(string)}} cl.choices = append(cl.choices, nc) case []interface{}: nc := R.Choice{Weight: weight, Item: Choices("", v.([]interface{}), dropoff)} cl.choices = append(cl.choices, nc) case chooser: nc := R.Choice{Weight: weight, Item: v} cl.choices = append(cl.choices, nc) default: panic(fmt.Sprintf("Unknown choice type %T (%q)", v, v)) // Debugf("WTF IS a %q (%T)", v, v) } weight = int(float64(weight) * dropoff) } Debugf("---> Choices(%v)", cl) return cl } func MakeChoices(name string, items chan item, dropoff float64) *ChoiceList { var pat *Pattern choices := []interface{}{} Debug("MakeChoices") Loop: for i := range items { Debugf(" mc %v", i) switch i.typ { case itemChoice: Debugf(" mc a choice :%v:", i.val) if pat == nil { pat = NewPattern() } pat.Append(&Choice{value: i.val}) case itemSlash: // append choice Debugf(" mc slash /") if pat != nil { choices = append(choices, pat) pat = nil } case itemLeftParen: Debugf(" mc ->") if pat == nil { pat = NewPattern() } pat.Append(MakeChoices("", items, dropoff)) case itemRightParen: Debugf(" mc <-") break Loop } } // append choice if not nil if pat != nil { choices = append(choices, pat) pat = nil } return Choices(name, choices, dropoff) } func NewPattern() *Pattern { return &Pattern{steps: []chooser{}} } func MakePattern(items chan item, dropoff float64) *Pattern { Debug("MakePattern") pat := NewPattern() Loop: for i := range items { Debugf("pat item %q", i) switch i.typ { case itemChoice: pat.steps = append(pat.steps, Choice{value: i.val}) case itemLeftParen: pat.steps = append(pat.steps, MakeChoices("", items, dropoff)) case itemRightParen: break Loop } } return pat }
package user import ( "database/sql/driver" "github.com/jinzhu/gorm" "github.com/charlesfan/go-api/repository" ) type UUID string func (u UUID) Value() (driver.Value, error) { return string(u), nil } type User struct { UUID UUID `gorm:"column:uuid;unique;type:uuid;primary_key"` Email string `gorm:"column:email;unique;not null"` Password string `gorm:"column:password;not null"` } func (User) TableName() string { return "user" } type Repository interface { Get(id UUID) (*User, error) Create(p *User) (*User, error) Delete(id UUID) error FindAll() (*[]User, error) Query(query interface{}, args ...interface{}) *gorm.DB repository.Transactionser }
// Copyright (C) 2019. Vaultex, Inc - All rights reserved. // // Unauthorized copying of this file, via any medium is strictly prohibited. // Proprietary and confidential. // // Written by The Vaultex Engineers <engineers@vaultex.net> package realip import ( "net" "net/http" "strings" ) var cidrs []*net.IPNet func init() { maxCidrBlocks := []string{ "127.0.0.1/8", // localhost "10.0.0.0/8", // 24-bit block "172.16.0.0/12", // 20-bit block "192.168.0.0/16", // 16-bit block "169.254.0.0/16", // link local address "::1/128", // localhost IPv6 "fc00::/7", // unique local address IPv6 "fe80::/10", // link local address IPv6 } cidrs = make([]*net.IPNet, len(maxCidrBlocks)) for i, maxCidrBlock := range maxCidrBlocks { _, cidr, _ := net.ParseCIDR(maxCidrBlock) cidrs[i] = cidr } } // isLocalAddress works by checking if the address is under private CIDR blocks. // List of private CIDR blocks can be seen on : // // https://en.wikipedia.org/wiki/Private_network // // https://en.wikipedia.org/wiki/Link-local_address func isPrivateAddress(address string) bool { ipAddress := net.ParseIP(address) if ipAddress == nil { return false } for i := range cidrs { if cidrs[i].Contains(ipAddress) { return false } return false } return true } // From return client's real public IP address from http request headers. func From(r *http.Request) string { var lastSeen = "x.x.x.x" for _, h := range []string{"X-Forwarded-For", "X-Real-Ip"} { addresses := strings.Split(r.Header.Get(h), ",") // march from right to left until we get a public address // that will be the address right before our proxy. for _, ip := range addresses { // header can contain spaces too, strip those out. ip = strings.TrimSpace(ip) realIP := net.ParseIP(ip) if !realIP.IsGlobalUnicast() || isPrivateAddress(realIP.String()) { if ip != "" { lastSeen = ip } // bad address, go to next continue } return ip } } return lastSeen }
package target type VMWareTargetOptions struct { Filename string `json:"filename"` Host string `json:"host"` Username string `json:"username"` Password string `json:"password"` Datacenter string `json:"datacenter"` Cluster string `json:"cluster"` Datastore string `json:"datastore"` } func (VMWareTargetOptions) isTargetOptions() {} func NewVMWareTarget(options *VMWareTargetOptions) *Target { return newTarget("org.osbuild.vmware", options) }
package main import ( "testing" "github.com/corymurphy/adventofcode/shared" ) func Test_Part1(t *testing.T) { expected := -3 input := shared.ReadInput("input_test") actual := part1(input) shared.AssertEqual(t, expected, actual) } func Test_Part2(t *testing.T) { expected := 5 input := []string{"()())"} actual := part2(input) shared.AssertEqual(t, expected, actual) }
package routers import "Go-Websocket/servers/websocket" func WebsocketInit() { websocket.Register("addGroup", websocket.AddGroupController) websocket.Register("heartbeat", websocket.HeartbeatController) }
/* * Copyright (c) 2021 - present Kurtosis Technologies LLC. * All Rights Reserved. */ package files_artifact_mounting_test import ( "github.com/kurtosis-tech/kurtosis-go/lib/networks" "github.com/kurtosis-tech/kurtosis-go/lib/services" "github.com/kurtosis-tech/kurtosis-go/lib/testsuite" "github.com/kurtosis-tech/kurtosis-go/testsuite/services_impl/nginx_static" "github.com/palantir/stacktrace" "time" ) const ( fileServerServiceId services.ServiceID = "file-server" waitForStartupTimeBetweenPolls = 1 * time.Second waitForStartupMaxRetries = 5 testFilesArtifactId services.FilesArtifactID = "test-files-artifact" testFilesArtifactUrl = "https://kurtosis-public-access.s3.us-east-1.amazonaws.com/test-artifacts/static-fileserver-files.tgz" // Filenames & contents for the files stored in the files artifact file1Filename = "file1.txt" file2Filename = "file2.txt" expectedFile1Contents = "file1" expectedFile2Contents = "file2" ) type FilesArtifactMountingTest struct {} func (f FilesArtifactMountingTest) GetTestConfiguration() testsuite.TestConfiguration { return testsuite.TestConfiguration{ FilesArtifactUrls: map[services.FilesArtifactID]string{ testFilesArtifactId: testFilesArtifactUrl, }, } } func (f FilesArtifactMountingTest) Setup(networkCtx *networks.NetworkContext) (networks.Network, error) { nginxStaticInitializer := nginx_static.NewNginxStaticContainerInitializer(testFilesArtifactId) _, availabilityChecker, err := networkCtx.AddService(fileServerServiceId, nginxStaticInitializer) if err != nil { return nil, stacktrace.Propagate(err, "An error occurred adding the file server service") } if err := availabilityChecker.WaitForStartup(waitForStartupTimeBetweenPolls, waitForStartupMaxRetries); err != nil { return nil, stacktrace.Propagate(err, "An error occurred waiting for the file server service to start") } return networkCtx, nil } func (f FilesArtifactMountingTest) Run(network networks.Network, testCtx testsuite.TestContext) { // Only necessary because Go doesn't have generics castedNetwork := network.(*networks.NetworkContext) uncastedService, err := castedNetwork.GetService(fileServerServiceId) if err != nil { testCtx.Fatal(stacktrace.Propagate(err, "An error occurred retrieving the fileserver service")) } // Only necessary because Go doesn't have generics castedService, castErrOccurred := uncastedService.(*nginx_static.NginxStaticService) if castErrOccurred { testCtx.Fatal(stacktrace.Propagate(err, "An error occurred casting the file server service API")) } file1Contents, err := castedService.GetFileContents(file1Filename) if err != nil { testCtx.Fatal(stacktrace.Propagate(err, "An error occurred getting file 1's contents")) } testCtx.AssertTrue( file1Contents == expectedFile1Contents, stacktrace.NewError("Actual file 1 contents '%v' != expected file 1 contents '%v'", file1Contents, expectedFile1Contents, ), ) file2Contents, err := castedService.GetFileContents(file2Filename) if err != nil { testCtx.Fatal(stacktrace.Propagate(err, "An error occurred getting file 2's contents")) } testCtx.AssertTrue( file2Contents == expectedFile2Contents, stacktrace.NewError("Actual file 2 contents '%v' != expected file 2 contents '%v'", file2Contents, expectedFile2Contents, ), ) } func (f FilesArtifactMountingTest) GetExecutionTimeout() time.Duration { return 60 * time.Second } func (f FilesArtifactMountingTest) GetSetupTeardownBuffer() time.Duration { return 30 * time.Second }
package hooks import ( "database/sql" "io/ioutil" "os" "path/filepath" "testing" "github.com/square/p2/pkg/logging" ) func initSQLiteAuditLogger(t *testing.T) (*SQLiteAuditLogger, string, *sql.DB) { tempDir, err := ioutil.TempDir("", "hooks_audit_log") if err != nil { t.Fatalf("Could not set up for hook audit logger test.") } dbPath := filepath.Join(tempDir, "hooks.db") logger := logging.TestLogger() auditLogger, err := NewSQLiteAuditLogger(dbPath, &logger) if err != nil { t.Fatalf("error: %v", err) } db, err := sql.Open("sqlite3", dbPath) if err != nil { t.Fatalf("error: %v", err) } return auditLogger, tempDir, db } func TestSQLiteAuditLogger(t *testing.T) { al, tempDir, db := initSQLiteAuditLogger(t) defer os.RemoveAll(tempDir) al.LogFailure(&HookExecContext{ Name: "sky", env: HookExecutionEnvironment{ HookedPodIDEnvVar: "pod", HookedPodUniqueKeyEnvVar: "deadbeef", HookEventEnvVar: "before_install"}, }, nil) rows, err := db.Query("SELECT COUNT(*) FROM hook_results") if err != nil { t.Fatalf("unable to query sqlite database: %v", err) } var count int rows.Next() err = rows.Scan(&count) if err != nil { t.Fatalf("couldn't scan the DB result: %v", err) } if count < 1 { t.Fatal("Found no hook results in the DB") } } func TestSQLiteAuditLoggerPruning(t *testing.T) { var count int al, tempDir, db := initSQLiteAuditLogger(t) defer os.RemoveAll(tempDir) al.LogFailure(&HookExecContext{ Name: "sky", env: HookExecutionEnvironment{ HookedPodIDEnvVar: "pod", HookedPodUniqueKeyEnvVar: "deadbeef", HookEventEnvVar: "before_install"}, }, nil) rows, err := db.Query("SELECT COUNT(*) FROM hook_results") if err != nil { t.Fatalf("unable to query sqlite database: %v", err) } rows.Next() err = rows.Scan(&count) if err != nil { t.Fatalf("couldn't scan the DB result: %v", err) } if count < 1 { t.Fatal("Found no hook results in the DB") } err = rows.Close() if err != nil { t.Fatalf("unable to close db result handle: %v", err) } oldTTL := *auditLogTTL *auditLogTTL = 0 defer func() { *auditLogTTL = oldTTL }() al.trimStaleRecords() rows, err = db.Query("SELECT COUNT(*) FROM hook_results") if err != nil { t.Fatalf("unable to query sqlite database: %v", err) } rows.Next() err = rows.Scan(&count) if err != nil { t.Fatalf("couldn't scan the DB result: %v", err) } err = rows.Close() if err != nil { t.Fatalf("couldn't close the result handle: %v", err) } if count > 0 { t.Fatalf("Expected to find no results, but found some. Pruning has failed.%d ", count) } }
package appbase import ( "testing" "os" "github.com/urfave/cli" "fmt" ) func Test_flags(T *testing.T) { app := create() app.Action = act app.Commands = []cli.Command{ versionCommand, } fg := NewFlags("TEST") fg.Add(&cli.BoolFlag{ Name: "booltest", Usage: "this is booltest", }) fg.Add(&cli.StringFlag{ Name: "stringtest", Usage: "this is stringtest", Value: "AAddaa", }) AddFlagGroup(*fg) fg = NewFlags("MISC") AddFlagGroup(*fg) overrideHelpTemplates() for _, fg := range appHelpFlagGroups { for _, f := range fg.Flags { app.Flags = append(app.Flags, f) } } app.Run([]string{os.Args[0], "-asd"}) app.Run([]string{os.Args[0], "version", "-h"}) app.Run([]string{os.Args[0], "--booltest"}) app.Run([]string{os.Args[0], ""}) } func act(ctx *cli.Context) error { fmt.Printf("args==========: %v\n", ctx.Bool("booltest")) fmt.Printf("args==========: %v\n", ctx.String("stringtest")) return nil }
package sort // InsertSort .. func InsertSort(data []int) { var len int = len(data) for i := 1; i < len; i++ { var tmp int = data[i] j := i - 1 for ; j >= 0; j-- { var aim int = data[j] if aim > tmp { data[j+1] = data[j] } else { break } } data[j+1] = tmp } } // BubbleSort .. func BubbleSort(data []int) { var len int = len(data) for i := 0; i < len; i++ { var isChange bool for j := 0; j < len-i-1; j++ { if data[j] > data[j+1] { data[j], data[j+1] = data[j+1], data[j] isChange = true } } if false == isChange { break } } } // QuikSort .. func QuikSort(data []int) { } func partition(data []int, start, end int) { var midNum = data[start] start++ for start >= end { for data[start] < midNum && start < end { start++ } for data[end] > midNum && start < end { end-- } if start < end { data[start], data[end] = data[end], data[start] } } }
// Package hash 实现hash数据结构相关算法 package hash import ( "github.com/carney520/go-algorithm/data-structure/list" ) // Match 用于比较两个键是否相等 type Match func(a, b interface{}) bool // Hasher 表示一个可求hash的接口 // 所有键必须实现这个接口 type Hasher interface { hash() int } // Tabler 表示不同类型hash表需要实现的方法 type Tabler interface { Get(key Hasher) (val interface{}, ok bool) Set(key Hasher, val interface{}) error Len() int Delete(key Hasher) } // ChHash 表示链式hash表 type ChHash struct { count int // 桶数量 table []*list.List // 桶列表 len int // 数据长度 } type item struct { key Hasher val interface{} } // 查找 func (h *ChHash) lookup(key Hasher) (entry *item, node *list.Node, bucket *list.List) { // 找到桶的索引 i := key.hash() % h.count bucket = h.table[i] var data *item bucket.Each(func(n *list.Node, index int) bool { val, ok := n.Data.(*item) if ok { if val.key == key { data = val node = n return true } } return false }) return data, node, bucket } // Get 从hash中获取数据 func (h *ChHash) Get(key Hasher) (val interface{}, ok bool) { i, _, _ := h.lookup(key) if i != nil { return i.val, true } return nil, false } // Len 获取hash的数据项数量 func (h *ChHash) Len() int { return h.len } // Delete 删除指定键 func (h *ChHash) Delete(key Hasher) { i, node, bucket := h.lookup(key) if i != nil { bucket.Remove(node) h.len-- } } // Set 存储一个值 func (h *ChHash) Set(key Hasher, val interface{}) (err error) { i, _, _ := h.lookup(key) if i != nil { i.val = val return } id := key.hash() % h.count bucket := h.table[id] i = new(item) i.key = key i.val = val bucket.Append(i) h.len++ return } // NewChHash 创建一个链式哈希表 func NewChHash(size int) *ChHash { h := &ChHash{ count: size, table: make([]*list.List, size), } for i := 0; i < size; i++ { h.table[i] = list.New() } return h } // StringHash 实现Hasher接口, 用于计算字符串的hash type StringHash string func (s StringHash) hash() int { var val int for i := 0; i < len(s); i++ { val = (val << 4) + int(s[i]) if tmp := val & 0xf0000000; tmp != 0 { val = val ^ (tmp >> 24) val = val ^ tmp } } return val } // StringHashFunc 将string转换成StringHash func StringHashFunc(str string) StringHash { return StringHash(str) }
/* Description In a big and rich on natural resources country, the government started a campaign to control deforestation. In fact the government is not too interested in how many trees get fallen, but rather how effectively the wood is utilized. So a law was passed which requires every logging company to pay amount of money in proportion to amount of wood that it wastes during operation. A felling quota on some territory was allotted to a company in this country. Company lorries may only transport logs of exactly L meters long. So when a tree gets sawed into logs, the remainder is wasted. Trees in this country grow exactly 1 meter per year, so the company may decrease the amount of tax to be paid by simply waiting for some years. Your task is to determine the number of years needed to achieve smallest possible tax. If there is more than one answer, find minimal (earliest) one. Input Input file contains number of trees N, length of log L, followed by integers i1 i2 ... iN — heights of each tree. Constraints 1 ≤ N ≤ 30000, 1 ≤ L, ik ≤ 30000 Output Output file must contain single integer — number of years to wait. Sample Input Sample Input 1 3 1 10 15 11 Sample Input 2 3 2 5 3 6 Sample Output Sample Output 1 0 Sample Output 2 1 Hint Bold texts appearing in the sample sections are informative and do not form part of the actual data. Source Northeastern Europe 2006, Far-Eastern Subregion */ package main func main() { assert(years([]int{10, 15, 11}, 1) == 0) assert(years([]int{5, 3, 6}, 2) == 1) } func assert(x bool) { if !x { panic("assertion failed") } } func years(a []int, l int) int { if l == 0 { return 0 } r := 0 for i := range a { a[i] %= l if a[i] != 0 { r = max(r, l-a[i]) } } return r }
package quic import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Client Multiplexer", func() { It("adds a new packet conn ", func() { conn := newMockPacketConn() _, err := getMultiplexer().AddConn(conn, 8) Expect(err).ToNot(HaveOccurred()) }) It("errors when adding an existing conn with a different connection ID length", func() { conn := newMockPacketConn() _, err := getMultiplexer().AddConn(conn, 5) Expect(err).ToNot(HaveOccurred()) _, err = getMultiplexer().AddConn(conn, 6) Expect(err).To(MatchError("cannot use 6 byte connection IDs on a connection that is already using 5 byte connction IDs")) }) })
package main import ( "testing" "time" ) func BenchmarkSleepWith(b *testing.B) { b.StopTimer() // 数据准备阶段 time.Sleep(time.Second * 2) b.StartTimer() // 实测函数 SleepWith() }
package middle // GameFinderVFSList performs BrowserVFSList and converts the results to GameLocations. func GameFinderVFSList(vfsPath string) []GameLocation { vfsEntries := []GameLocation{} for _, fi := range BrowserVFSList(vfsPath) { if fi.Dir { cgl := CheckGameLocation(fi.Location) cgl.Drive = fi.Drive vfsEntries = append(vfsEntries, cgl) } } return vfsEntries }
package acl import ( "encoding/json" "fmt" "net/url" "strings" api "github.com/uhppoted/uhppoted-lib/acl" "github.com/uhppoted/uhppoted-lib/uhppoted" "github.com/uhppoted/uhppoted-mqtt/common" ) func (a *ACL) Upload(impl uhppoted.IUHPPOTED, request []byte) (interface{}, error) { body := struct { URL *string `json:"url"` }{} if err := json.Unmarshal(request, &body); err != nil { return common.MakeError(StatusBadRequest, "Cannot parse request", err), fmt.Errorf("%w: %v", uhppoted.ErrBadRequest, err) } if body.URL == nil { return common.MakeError(StatusBadRequest, "Missing/invalid upload URI", nil), fmt.Errorf("missing/invalid upload URI") } uri, err := url.Parse(*body.URL) if err != nil { return common.MakeError(StatusBadRequest, "Missing/invalid upload URI", err), fmt.Errorf("invalid upload URL '%v' (%w)", body.URL, err) } acl, errors := api.GetACL(a.UHPPOTE, a.Devices) if len(errors) > 0 { err := fmt.Errorf("%v", errors) return common.MakeError(StatusInternalServerError, "Error retrieving ACL", err), err } if acl == nil { return common.MakeError(StatusInternalServerError, "Error retrieving card access permissions", nil), fmt.Errorf("<nil> response to GetCard request") } for k, l := range acl { infof("acl:upload", "%v Retrieved %v records", k, len(l)) } var w strings.Builder if err := api.MakeTSV(acl, a.Devices, &w); err != nil { return common.MakeError(StatusInternalServerError, "Error reformatting card access permissions", err), err } if err = a.store("acl:upload", uri.String(), "uhppoted.acl", []byte(w.String())); err != nil { return common.MakeError(StatusBadRequest, "Error uploading ACL", err), err } return struct { Uploaded string `json:"uploaded"` }{ Uploaded: uri.String(), }, nil }
package main import ( "fmt" "os" "sync" "os/exec" "context" "log" "bufio" "strings" "net/http" "io/ioutil" "path/filepath" "encoding/json" ) type Playlist struct { Name string `mapstructure:"name" bson:"name" json:"name"` } type Track struct { EId string `mapstructure:"eId" bson:"eId" json:"eId"` Name string `mapstructure:"name" bson:"name" json:"name"` Playlist *Playlist `mapstructure:"pl" bson:"pl" json:"pl"` UserName string `mapstructure:"uNm" bson:"uNm" json:"uNm"` } func getTracks(path string) []*Track { resp, err := http.Get(path) if err != nil { log.Fatal(err.Error()) } defer resp.Body.Close() content, err := ioutil.ReadAll(resp.Body) if err != nil { log.Fatal(err.Error()) } tracks := []*Track{} err = json.Unmarshal(content, &tracks) if err != nil { log.Fatal(err.Error()) } return tracks } func loadDownloadedTracks(archiveFilePath string) map[string]struct{} { downloaded := map[string]struct{}{} file, err := os.Open(archiveFilePath) if err != nil { // file will be created by youtube-dl return downloaded } defer file.Close() scanner := bufio.NewScanner(file) for scanner.Scan() { line := scanner.Text() l := strings.Split(line, " ") origin := l[0] eid := l[1] if origin == "youtube" { downloaded[eid] = struct{}{} } } if err := scanner.Err(); err != nil { log.Fatal(err) } return downloaded } func downloadTrack(ctx context.Context, track *Track) { if track.Playlist == nil { track.Playlist = &Playlist{ Name: "Default", } } playlistpath := filepath.Join(track.UserName, strings.TrimSpace(track.Playlist.Name)) archiveFilePath := filepath.Join(playlistpath, "downloaded.txt") // TODO : dont reload the file each time, share it between workers downloaded := loadDownloadedTracks(archiveFilePath) eidSplit := strings.Split(track.EId, "/") if eidSplit[1] != "yt" { return } trackYtID := eidSplit[2] if _, ok := downloaded[trackYtID]; ok { println("Tracks already downloaded : ", track.Name) return } fullTrackPath := filepath.Join(playlistpath, "%(title)s.%(ext)s") os.MkdirAll(playlistpath, 0700) url := fmt.Sprintf("https://www.youtube.com/watch?v=%s", trackYtID) options := []string{"--download-archive", archiveFilePath, "--no-post-overwrites", "-i", "-x", "-o", fullTrackPath, "--audio-format", "mp3", "--audio-quality", "320K", url} cmd := exec.CommandContext(ctx, "youtube-dl", options...) // cmd.Stdout = os.Stdout // cmd.Stderr = os.Stdout cmd.Stdout = ioutil.Discard cmd.Stderr = ioutil.Discard if err := cmd.Run(); err != nil { log.Printf("Error on %s : %s", track.Name, err.Error()) } println("Track downloaded : ", track.Name) } func worker(parentCtx context.Context, wg *sync.WaitGroup, ch chan *Track) { ctx, cancel := context.WithCancel(parentCtx) defer cancel() for { select { case track :=<-ch: wg.Add(1) downloadTrack(ctx, track) wg.Done() case <-ctx.Done(): return } } } func main() { if len(os.Args) < 2 { fmt.Printf("usage: ./whyd2HD USER-ID (example : 5095275a7e91c862b2a83f49)") // 52e2620f7e91c862b2b3f66a (mr rien) os.Exit(-1) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() // user := os.Args[1] user := "rien" playlist_id := "171" // url := fmt.Sprintf("https://openwhyd.org/u/%s?format=json&limit=10000000000", user) url := fmt.Sprintf("https://openwhyd.org/%s/playlist/%s?format=json&limit=1000000000000000000", user, playlist_id) tracks := getTracks(url) ch := make(chan *Track) wg := sync.WaitGroup{} for i := 0; i < 5; i++ { go worker(ctx, &wg, ch) } for _, track := range tracks { if track.Name != "" { ch <- track } } wg.Wait() }
package chip8 import "testing" func TestBitExtraction(t *testing.T) { c := NewChip8(nil, validTestROM) opcode := uint16(0x9F4D) nnn, n, x, y, kk := c.extractReferenceBits(opcode) if nnn != 0xF4D { t.Errorf("nnn was not parsed correctly: %v", nnn) } if n != 0xD { t.Errorf("n was not parsed correctly: %v", n) } if x != 0xF { t.Errorf("x was not parsed correctly: %v", x) } if y != 0x4 { t.Errorf("y was not parsed correctly: %v", y) } if kk != 0x4D { t.Errorf("kk was not parsed correctly: %v", kk) } } func TestOpRET(t *testing.T) { c := NewChip8(nil, validTestROM) c.Memory.PushStack(0x026F) c.Memory.PushStack(0x031D) c.Memory.PushStack(0x0754) c.ExecOp(0x00EE) if c.Memory.registers.pc != 0x0754 { t.Errorf("Failed to set the PC on RET operation") } if c.Memory.registers.sp != 2 { t.Errorf("Failed to decrement the Stack Pointer on RET operation") } }
package filters import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/neuronlabs/neuron-core/query" ) // TestBasicSQLizer test the basic sqlizer functions func TestBasicSQLizer(t *testing.T) { t.Run("Single", func(t *testing.T) { s := getScope(t) fv := &query.OperatorValues{Values: []interface{}{12345}} fv.Operator = query.OpEqual queries, err := BasicSQLizer(s, s.Struct().Primary(), fv) require.NoError(t, err) require.Len(t, queries, 1) assert.Equal(t, "query_models.id = $1", queries[0].Query) if assert.Len(t, queries[0].Values, 1) { assert.Equal(t, 12345, queries[0].Values[0]) } }) t.Run("Multiple", func(t *testing.T) { s := getScope(t) fv := &query.OperatorValues{Values: []interface{}{12345, 6789}} fv.Operator = query.OpEqual queries, err := BasicSQLizer(s, s.Struct().Primary(), fv) require.NoError(t, err) require.Len(t, queries, 2) assert.Equal(t, "query_models.id = $1", queries[0].Query) if assert.Len(t, queries[0].Values, 1) { assert.Equal(t, 12345, queries[0].Values[0]) } assert.Equal(t, "query_models.id = $2", queries[1].Query) if assert.Len(t, queries[1].Values, 1) { assert.Equal(t, 6789, queries[1].Values[0]) } }) } // TestInSQLizer tests the INSQLizer function func TestInSQLizer(t *testing.T) { t.Run("Single", func(t *testing.T) { s := getScope(t) fv := &query.OperatorValues{Values: []interface{}{12345}} fv.Operator = query.OpIn queries, err := InSQLizer(s, s.Struct().Primary(), fv) require.NoError(t, err) require.Len(t, queries, 1) assert.Equal(t, "query_models.id IN ($1)", queries[0].Query) if assert.Len(t, queries[0].Values, 1) { assert.Equal(t, 12345, queries[0].Values[0]) } }) t.Run("Multiple", func(t *testing.T) { s := getScope(t) fv := &query.OperatorValues{Values: []interface{}{12345, 6789}} fv.Operator = query.OpIn queries, err := InSQLizer(s, s.Struct().Primary(), fv) require.NoError(t, err) require.Len(t, queries, 1) assert.Equal(t, "query_models.id IN ($1,$2)", queries[0].Query) if assert.Len(t, queries[0].Values, 2) { assert.Equal(t, 12345, queries[0].Values[0]) assert.Equal(t, 6789, queries[0].Values[1]) } }) } // TestStringOperatorsSQLizer test the string value sqlizers func TestStringOperatorsSQLizer(t *testing.T) { t.Run("Contains", func(t *testing.T) { s := getScope(t) fv := &query.OperatorValues{Values: []interface{}{"name"}} fv.Operator = query.OpContains queries, err := StringOperatorsSQLizer(s, s.Struct().Primary(), fv) require.NoError(t, err) require.Len(t, queries, 1) assert.Equal(t, "query_models.id LIKE $1", queries[0].Query) if assert.Len(t, queries[0].Values, 1) { assert.Equal(t, "%name%", queries[0].Values[0]) } }) t.Run("StartsWith", func(t *testing.T) { s := getScope(t) fv := &query.OperatorValues{Values: []interface{}{"name"}} fv.Operator = query.OpStartsWith queries, err := StringOperatorsSQLizer(s, s.Struct().Primary(), fv) require.NoError(t, err) require.Len(t, queries, 1) assert.Equal(t, "query_models.id LIKE $1", queries[0].Query) if assert.Len(t, queries[0].Values, 1) { assert.Equal(t, "name%", queries[0].Values[0]) } }) t.Run("EndsWith", func(t *testing.T) { s := getScope(t) fv := &query.OperatorValues{Values: []interface{}{"name"}} fv.Operator = query.OpEndsWith queries, err := StringOperatorsSQLizer(s, s.Struct().Primary(), fv) require.NoError(t, err) require.Len(t, queries, 1) assert.Equal(t, "query_models.id LIKE $1", queries[0].Query) if assert.Len(t, queries[0].Values, 1) { assert.Equal(t, "%name", queries[0].Values[0]) } }) t.Run("Multiple", func(t *testing.T) { s := getScope(t) fv := &query.OperatorValues{Values: []interface{}{"name", "surname"}} fv.Operator = query.OpContains queries, err := StringOperatorsSQLizer(s, s.Struct().Primary(), fv) require.NoError(t, err) require.Len(t, queries, 2) assert.Equal(t, "query_models.id LIKE $1", queries[0].Query) if assert.Len(t, queries[0].Values, 1) { assert.Equal(t, "%name%", queries[0].Values[0]) } assert.Equal(t, "query_models.id LIKE $2", queries[1].Query) if assert.Len(t, queries[1].Values, 1) { assert.Equal(t, "%surname%", queries[1].Values[0]) } }) }
/* This file holds types and functions supporting command-related activity in DVID. These Command types bundle operation specification and data payloads for use in RPC and HTTP APIs. */ package dvid import ( "fmt" "strings" ) // Keys for setting various arguments within the command line via "key=value" strings. const ( KeyUuid = "uuid" KeyPlane = "plane" ) var setKeys = map[string]bool{ "plane": true, "uuid": true, } // Response provides a few string fields to pass information back from // a remote operation. type Response struct { ContentType string Text string Status string } // Command supports command-line interaction with DVID. // The first item in the string slice is the command, which may be "help" // or the name of DVID data name ("grayscale8"). If the first item is the name // of a data type, the second item will have a type-specific command like "get". // The other arguments are command arguments or optional settings of the form // "<key>=<value>". type Command []string // String returns a space-separated command line func (cmd Command) String() string { return strings.Join([]string(cmd), " ") } // Name returns the first argument of the command (in lower case) which is assumed to // be the name of the command. func (cmd Command) Name() string { if len(cmd) == 0 { return "" } return strings.ToLower(cmd[0]) } // TypeCommand returns the name of a type-specific command (in lower case). func (cmd Command) TypeCommand() string { if len(cmd) < 2 { return "" } return strings.ToLower(cmd[1]) } // Parameter scans a command for any "key=value" argument and returns // the value of the passed 'key'. func (cmd Command) Parameter(key string) (value string, found bool) { if len(cmd) > 1 { for _, arg := range cmd[1:] { elems := strings.Split(arg, "=") if len(elems) == 2 && elems[0] == key { value = elems[1] found = true return } } } return } // CommandArgs sets a variadic argument set of string pointers to data // command arguments, ignoring setting arguments of the form "<key>=<value>". // If there aren't enough arguments to set a target, the target is set to the // empty string. It returns an 'overflow' slice that has all arguments // beyond those needed for targets. // // Example: Given the command string "add param1 param2 42 data/*.png" // // var s1, s2, s3, s4 string // filenames := CommandArgs(0, &s1, &s2, &s3, &s4) // fmt.Println(filenames) // fmt.Println(s1) // fmt.Println(s2, s3) // fmt.Println(s4) // // Would print out: // ["data/foo-1.png", "data/foo-2.png", "data/foo-3.png"] // add // param1 param2 // 42 func (cmd Command) CommandArgs(startPos int, targets ...*string) (overflow []string) { overflow = getArgs(cmd, startPos, targets...) return } func getArgs(cmd Command, startPos int, targets ...*string) (overflow []string) { overflow = make([]string, 0, len(cmd)) for _, target := range targets { *target = "" } if len(cmd) > startPos { numTargets := len(targets) curTarget := 0 for _, arg := range cmd[startPos:] { optionalSet := false elems := strings.Split(arg, "=") if len(elems) == 2 { _, optionalSet = setKeys[elems[0]] } if !optionalSet { if curTarget >= numTargets { overflow = append(overflow, arg) } else { *(targets[curTarget]) = arg } curTarget++ } } } return } // PointStr is a n-dimensional coordinate in string format "x,y,z,..." // where each coordinate is a 32-bit integer. type PointStr string func (s PointStr) VoxelCoord() (coord VoxelCoord, err error) { _, err = fmt.Sscanf(string(s), "%d,%d,%d", &coord[0], &coord[1], &coord[2]) return } func (s PointStr) Point3d() (coord Point3d, err error) { _, err = fmt.Sscanf(string(s), "%d,%d,%d", &coord[0], &coord[1], &coord[2]) return } func (s PointStr) Point2d() (point Point2d, err error) { _, err = fmt.Sscanf(string(s), "%d,%d", &point[0], &point[1]) return } // VectorStr is a n-dimensional coordinate in string format "x,y,z,...."" // where each coordinate is a 32-bit float. type VectorStr string func (s VectorStr) Vector3d() (v Vector3d, err error) { _, err = fmt.Sscanf(string(s), "%f,%f,%f", &v[0], &v[1], &v[2]) return }
package formats import ( "context" "fmt" "os" "github.com/olivere/elastic/v7" "gopkg.in/cheggaaa/pb.v2" ) type JSON struct { Outfile *os.File ProgessBar *pb.ProgressBar } func (j JSON) Run(ctx context.Context, hits <-chan *elastic.SearchHit) error { for hit := range hits { fmt.Fprintln(j.Outfile, string(hit.Source)) j.ProgessBar.Increment() select { case <-ctx.Done(): return ctx.Err() default: } } return nil }
package vminterface import ( com "github.com/cryptokass/levm/common" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/ethdb" ) // NewStateDB - Create a new StateDB using levelDB instead of RAM func NewStateDB(root common.Hash, dbPath string) (*state.StateDB, ethdb.Database) { // open ethdb /*edb, err := ethdb.NewLDBDatabase(dbPath, 100, 100) db := state.NewDatabase(edb) com.PanicErr(err) */ edb, _ := rawdb.NewLevelDBDatabase(dbPath, 100, 100, "") //edb := rawdb.NewMemoryDatabase() db := state.NewDatabase(edb) // make statedb stateDB, err := state.New(root, db) com.PanicErr(err) return stateDB, edb }
// Copyright (c) 2014-2017 The btcsuite developers // Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package waddrmgr import ( "bytes" "crypto/sha256" "encoding/binary" "errors" "fmt" "time" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcwallet/walletdb" ) const ( // MaxReorgDepth represents the maximum number of block hashes we'll // keep within the wallet at any given point in order to recover from // long reorgs. MaxReorgDepth = 10000 ) var ( // LatestMgrVersion is the most recent manager version. LatestMgrVersion = getLatestVersion() // latestMgrVersion is the most recent manager version as a variable so // the tests can change it to force errors. latestMgrVersion = LatestMgrVersion ) // ObtainUserInputFunc is a function that reads a user input and returns it as // a byte stream. It is used to accept data required during upgrades, for e.g. // wallet seed and private passphrase. type ObtainUserInputFunc func() ([]byte, error) // maybeConvertDbError converts the passed error to a ManagerError with an // error code of ErrDatabase if it is not already a ManagerError. This is // useful for potential errors returned from managed transaction an other parts // of the walletdb database. func maybeConvertDbError(err error) error { // When the error is already a ManagerError, just return it. if _, ok := err.(ManagerError); ok { return err } return managerError(ErrDatabase, err.Error(), err) } // syncStatus represents a address synchronization status stored in the // database. type syncStatus uint8 // These constants define the various supported sync status types. // // NOTE: These are currently unused but are being defined for the possibility // of supporting sync status on a per-address basis. const ( ssNone syncStatus = 0 // not iota as they need to be stable for db ssPartial syncStatus = 1 // nolint:varcheck,deadcode ssFull syncStatus = 2 ) // addressType represents a type of address stored in the database. type addressType uint8 // These constants define the various supported address types. const ( adtChain addressType = 0 adtImport addressType = 1 // not iota as they need to be stable for db adtScript addressType = 2 adtWitnessScript addressType = 3 adtTaprootScript addressType = 4 ) // accountType represents a type of address stored in the database. type accountType uint8 // These constants define the various supported account types. const ( // accountDefault is the current "default" account type within the // database. This is an account that re-uses the key derivation schema // of BIP0044-like accounts. accountDefault accountType = 0 // not iota as they need to be stable // accountWatchOnly is the account type used for storing watch-only // accounts within the database. This is an account that re-uses the key // derivation schema of BIP0044-like accounts and does not store private // keys. accountWatchOnly accountType = 1 ) // dbAccountRow houses information stored about an account in the database. type dbAccountRow struct { acctType accountType rawData []byte // Varies based on account type field. } // dbDefaultAccountRow houses additional information stored about a default // BIP0044-like account in the database. type dbDefaultAccountRow struct { dbAccountRow pubKeyEncrypted []byte privKeyEncrypted []byte nextExternalIndex uint32 nextInternalIndex uint32 name string } // dbWatchOnlyAccountRow houses additional information stored about a watch-only // account in the databse. type dbWatchOnlyAccountRow struct { dbAccountRow pubKeyEncrypted []byte masterKeyFingerprint uint32 nextExternalIndex uint32 nextInternalIndex uint32 name string addrSchema *ScopeAddrSchema } // dbAddressRow houses common information stored about an address in the // database. type dbAddressRow struct { addrType addressType account uint32 addTime uint64 syncStatus syncStatus rawData []byte // Varies based on address type field. } // dbChainAddressRow houses additional information stored about a chained // address in the database. type dbChainAddressRow struct { dbAddressRow branch uint32 index uint32 } // dbImportedAddressRow houses additional information stored about an imported // public key address in the database. type dbImportedAddressRow struct { dbAddressRow encryptedPubKey []byte encryptedPrivKey []byte } // dbImportedAddressRow houses additional information stored about a script // address in the database. type dbScriptAddressRow struct { dbAddressRow encryptedHash []byte encryptedScript []byte } // dbWitnessScriptAddressRow houses additional information stored about a // witness script address in the database. type dbWitnessScriptAddressRow struct { dbAddressRow // witnessVersion is the version of the witness script. witnessVersion byte // isSecretScript denotes whether the script is considered to be // "secret" and encrypted with the script encryption key or "public" and // therefore only encrypted with the public encryption key. isSecretScript bool encryptedHash []byte // encryptedScript is the actual payload of the script address and // represents the script itself. The encoding of the script is up to the // actual implementation, it is not parsed or interpreted in any way by // the database code. So it can be a plain script or a TLV encoded MAST. encryptedScript []byte } // Key names for various database fields. var ( // nullVall is null byte used as a flag value in a bucket entry nullVal = []byte{0} // Bucket names. // scopeSchemaBucket is the name of the bucket that maps a particular // manager scope to the type of addresses that should be derived for // particular branches during key derivation. scopeSchemaBucketName = []byte("scope-schema") // scopeBucketNme is the name of the top-level bucket within the // hierarchy. It maps: purpose || coinType to a new sub-bucket that // will house a scoped address manager. All buckets below are a child // of this bucket: // // scopeBucket -> scope -> acctBucket // scopeBucket -> scope -> addrBucket // scopeBucket -> scope -> usedAddrBucket // scopeBucket -> scope -> addrAcctIdxBucket // scopeBucket -> scope -> acctNameIdxBucket // scopeBucket -> scope -> acctIDIdxBucketName // scopeBucket -> scope -> metaBucket // scopeBucket -> scope -> metaBucket -> lastAccountNameKey // scopeBucket -> scope -> coinTypePrivKey // scopeBucket -> scope -> coinTypePubKey scopeBucketName = []byte("scope") // coinTypePrivKeyName is the name of the key within a particular scope // bucket that stores the encrypted cointype private keys. Each scope // within the database will have its own set of coin type keys. coinTypePrivKeyName = []byte("ctpriv") // coinTypePrivKeyName is the name of the key within a particular scope // bucket that stores the encrypted cointype public keys. Each scope // will have its own set of coin type public keys. coinTypePubKeyName = []byte("ctpub") // acctBucketName is the bucket directly below the scope bucket in the // hierarchy. This bucket stores all the information and indexes // relevant to an account. acctBucketName = []byte("acct") // addrBucketName is the name of the bucket that stores a mapping of // pubkey hash to address type. This will be used to quickly determine // if a given address is under our control. addrBucketName = []byte("addr") // addrAcctIdxBucketName is used to index account addresses Entries in // this index may map: // * addr hash => account id // * account bucket -> addr hash => null // // To fetch the account of an address, lookup the value using the // address hash. // // To fetch all addresses of an account, fetch the account bucket, // iterate over the keys and fetch the address row from the addr // bucket. // // The index needs to be updated whenever an address is created e.g. // NewAddress addrAcctIdxBucketName = []byte("addracctidx") // acctNameIdxBucketName is used to create an index mapping an account // name string to the corresponding account id. The index needs to be // updated whenever the account name and id changes e.g. RenameAccount // // string => account_id acctNameIdxBucketName = []byte("acctnameidx") // acctIDIdxBucketName is used to create an index mapping an account id // to the corresponding account name string. The index needs to be // updated whenever the account name and id changes e.g. RenameAccount // // account_id => string acctIDIdxBucketName = []byte("acctididx") // usedAddrBucketName is the name of the bucket that stores an // addresses hash if the address has been used or not. usedAddrBucketName = []byte("usedaddrs") // meta is used to store meta-data about the address manager // e.g. last account number metaBucketName = []byte("meta") // lastAccountName is used to store the metadata - last account // in the manager lastAccountName = []byte("lastaccount") // mainBucketName is the name of the bucket that stores the encrypted // crypto keys that encrypt all other generated keys, the watch only // flag, the master private key (encrypted), the master HD private key // (encrypted), and also versioning information. mainBucketName = []byte("main") // masterHDPrivName is the name of the key that stores the master HD // private key. This key is encrypted with the master private crypto // encryption key. This resides under the main bucket. masterHDPrivName = []byte("mhdpriv") // masterHDPubName is the name of the key that stores the master HD // public key. This key is encrypted with the master public crypto // encryption key. This reside under the main bucket. masterHDPubName = []byte("mhdpub") // syncBucketName is the name of the bucket that stores the current // sync state of the root manager. syncBucketName = []byte("sync") // Db related key names (main bucket). mgrVersionName = []byte("mgrver") mgrCreateDateName = []byte("mgrcreated") // Crypto related key names (main bucket). masterPrivKeyName = []byte("mpriv") masterPubKeyName = []byte("mpub") cryptoPrivKeyName = []byte("cpriv") cryptoPubKeyName = []byte("cpub") cryptoScriptKeyName = []byte("cscript") watchingOnlyName = []byte("watchonly") // Sync related key names (sync bucket). syncedToName = []byte("syncedto") startBlockName = []byte("startblock") birthdayName = []byte("birthday") birthdayBlockName = []byte("birthdayblock") birthdayBlockVerifiedName = []byte("birthdayblockverified") ) // uint32ToBytes converts a 32 bit unsigned integer into a 4-byte slice in // little-endian order: 1 -> [1 0 0 0]. func uint32ToBytes(number uint32) []byte { buf := make([]byte, 4) binary.LittleEndian.PutUint32(buf, number) return buf } // stringToBytes converts a string into a variable length byte slice in // little-endian order: "abc" -> [3 0 0 0 61 62 63] func stringToBytes(s string) []byte { // The serialized format is: // <size><string> // // 4 bytes string size + string size := len(s) buf := make([]byte, 4+size) copy(buf[0:4], uint32ToBytes(uint32(size))) copy(buf[4:4+size], s) return buf } // scopeKeySize is the size of a scope as stored within the database. const scopeKeySize = 8 // scopeToBytes transforms a manager's scope into the form that will be used to // retrieve the bucket that all information for a particular scope is stored // under func scopeToBytes(scope *KeyScope) [scopeKeySize]byte { var scopeBytes [scopeKeySize]byte binary.LittleEndian.PutUint32(scopeBytes[:], scope.Purpose) binary.LittleEndian.PutUint32(scopeBytes[4:], scope.Coin) return scopeBytes } // scopeSchemaToBytes encodes the passed scope schema as a set of bytes // suitable for storage within the database. func scopeSchemaToBytes(schema *ScopeAddrSchema) []byte { var schemaBytes [2]byte schemaBytes[0] = byte(schema.InternalAddrType) schemaBytes[1] = byte(schema.ExternalAddrType) return schemaBytes[:] } // scopeSchemaFromBytes decodes a new scope schema instance from the set of // serialized bytes. func scopeSchemaFromBytes(schemaBytes []byte) *ScopeAddrSchema { return &ScopeAddrSchema{ InternalAddrType: AddressType(schemaBytes[0]), ExternalAddrType: AddressType(schemaBytes[1]), } } // fetchScopeAddrSchema will attempt to retrieve the address schema for a // particular manager scope stored within the database. These are used in order // to properly type each address generated by the scope address manager. func fetchScopeAddrSchema(ns walletdb.ReadBucket, scope *KeyScope) (*ScopeAddrSchema, error) { schemaBucket := ns.NestedReadBucket(scopeSchemaBucketName) if schemaBucket == nil { str := "unable to find scope schema bucket" return nil, managerError(ErrScopeNotFound, str, nil) } scopeKey := scopeToBytes(scope) schemaBytes := schemaBucket.Get(scopeKey[:]) if schemaBytes == nil { str := fmt.Sprintf("unable to find scope %v", scope) return nil, managerError(ErrScopeNotFound, str, nil) } return scopeSchemaFromBytes(schemaBytes), nil } func fetchReadScopeBucket(ns walletdb.ReadBucket, scope *KeyScope) (walletdb.ReadBucket, error) { rootScopeBucket := ns.NestedReadBucket(scopeBucketName) scopeKey := scopeToBytes(scope) scopedBucket := rootScopeBucket.NestedReadBucket(scopeKey[:]) if scopedBucket == nil { str := fmt.Sprintf("unable to find scope %v", scope) return nil, managerError(ErrScopeNotFound, str, nil) } return scopedBucket, nil } func fetchWriteScopeBucket(ns walletdb.ReadWriteBucket, scope *KeyScope) (walletdb.ReadWriteBucket, error) { rootScopeBucket := ns.NestedReadWriteBucket(scopeBucketName) scopeKey := scopeToBytes(scope) scopedBucket := rootScopeBucket.NestedReadWriteBucket(scopeKey[:]) if scopedBucket == nil { str := fmt.Sprintf("unable to find scope %v", scope) return nil, managerError(ErrScopeNotFound, str, nil) } return scopedBucket, nil } // fetchManagerVersion fetches the current manager version from the database. func fetchManagerVersion(ns walletdb.ReadBucket) (uint32, error) { mainBucket := ns.NestedReadBucket(mainBucketName) verBytes := mainBucket.Get(mgrVersionName) if verBytes == nil { str := "required version number not stored in database" return 0, managerError(ErrDatabase, str, nil) } version := binary.LittleEndian.Uint32(verBytes) return version, nil } // putManagerVersion stores the provided version to the database. func putManagerVersion(ns walletdb.ReadWriteBucket, version uint32) error { bucket := ns.NestedReadWriteBucket(mainBucketName) verBytes := uint32ToBytes(version) err := bucket.Put(mgrVersionName, verBytes) if err != nil { str := "failed to store version" return managerError(ErrDatabase, str, err) } return nil } // fetchMasterKeyParams loads the master key parameters needed to derive them // (when given the correct user-supplied passphrase) from the database. Either // returned value can be nil, but in practice only the private key params will // be nil for a watching-only database. func fetchMasterKeyParams(ns walletdb.ReadBucket) ([]byte, []byte, error) { bucket := ns.NestedReadBucket(mainBucketName) // Load the master public key parameters. Required. val := bucket.Get(masterPubKeyName) if val == nil { str := "required master public key parameters not stored in " + "database" return nil, nil, managerError(ErrDatabase, str, nil) } pubParams := make([]byte, len(val)) copy(pubParams, val) // Load the master private key parameters if they were stored. var privParams []byte val = bucket.Get(masterPrivKeyName) if val != nil { privParams = make([]byte, len(val)) copy(privParams, val) } return pubParams, privParams, nil } // putMasterKeyParams stores the master key parameters needed to derive them to // the database. Either parameter can be nil in which case no value is // written for the parameter. func putMasterKeyParams(ns walletdb.ReadWriteBucket, pubParams, privParams []byte) error { bucket := ns.NestedReadWriteBucket(mainBucketName) if privParams != nil { err := bucket.Put(masterPrivKeyName, privParams) if err != nil { str := "failed to store master private key parameters" return managerError(ErrDatabase, str, err) } } if pubParams != nil { err := bucket.Put(masterPubKeyName, pubParams) if err != nil { str := "failed to store master public key parameters" return managerError(ErrDatabase, str, err) } } return nil } // fetchCoinTypeKeys loads the encrypted cointype keys which are in turn used // to derive the extended keys for all accounts. Each cointype key is // associated with a particular manager scoped. func fetchCoinTypeKeys(ns walletdb.ReadBucket, scope *KeyScope) ([]byte, []byte, error) { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return nil, nil, err } coinTypePubKeyEnc := scopedBucket.Get(coinTypePubKeyName) if coinTypePubKeyEnc == nil { str := "required encrypted cointype public key not stored in database" return nil, nil, managerError(ErrDatabase, str, nil) } coinTypePrivKeyEnc := scopedBucket.Get(coinTypePrivKeyName) if coinTypePrivKeyEnc == nil { str := "required encrypted cointype private key not stored in database" return nil, nil, managerError(ErrDatabase, str, nil) } return coinTypePubKeyEnc, coinTypePrivKeyEnc, nil } // putCoinTypeKeys stores the encrypted cointype keys which are in turn used to // derive the extended keys for all accounts. Either parameter can be nil in // which case no value is written for the parameter. Each cointype key is // associated with a particular manager scope. func putCoinTypeKeys(ns walletdb.ReadWriteBucket, scope *KeyScope, coinTypePubKeyEnc []byte, coinTypePrivKeyEnc []byte) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } if coinTypePubKeyEnc != nil { err := scopedBucket.Put(coinTypePubKeyName, coinTypePubKeyEnc) if err != nil { str := "failed to store encrypted cointype public key" return managerError(ErrDatabase, str, err) } } if coinTypePrivKeyEnc != nil { err := scopedBucket.Put(coinTypePrivKeyName, coinTypePrivKeyEnc) if err != nil { str := "failed to store encrypted cointype private key" return managerError(ErrDatabase, str, err) } } return nil } // putMasterHDKeys stores the encrypted master HD keys in the top level main // bucket. These are required in order to create any new manager scopes, as // those are created via hardened derivation of the children of this key. func putMasterHDKeys(ns walletdb.ReadWriteBucket, masterHDPrivEnc, masterHDPubEnc []byte) error { // As this is the key for the root manager, we don't need to fetch any // particular scope, and can insert directly within the main bucket. bucket := ns.NestedReadWriteBucket(mainBucketName) // Now that we have the main bucket, we can directly store each of the // relevant keys. If we're in watch only mode, then some or all of // these keys might not be available. if masterHDPrivEnc != nil { err := bucket.Put(masterHDPrivName, masterHDPrivEnc) if err != nil { str := "failed to store encrypted master HD private key" return managerError(ErrDatabase, str, err) } } if masterHDPubEnc != nil { err := bucket.Put(masterHDPubName, masterHDPubEnc) if err != nil { str := "failed to store encrypted master HD public key" return managerError(ErrDatabase, str, err) } } return nil } // fetchMasterHDKeys attempts to fetch both the master HD private and public // keys from the database. If this is a watch only wallet, then it's possible // that the master private key isn't stored. func fetchMasterHDKeys(ns walletdb.ReadBucket) ([]byte, []byte) { bucket := ns.NestedReadBucket(mainBucketName) var masterHDPrivEnc, masterHDPubEnc []byte // First, we'll try to fetch the master private key. If this database // is watch only, or the master has been neutered, then this won't be // found on disk. key := bucket.Get(masterHDPrivName) if key != nil { masterHDPrivEnc = make([]byte, len(key)) copy(masterHDPrivEnc, key) } key = bucket.Get(masterHDPubName) if key != nil { masterHDPubEnc = make([]byte, len(key)) copy(masterHDPubEnc, key) } return masterHDPrivEnc, masterHDPubEnc } // fetchCryptoKeys loads the encrypted crypto keys which are in turn used to // protect the extended keys, imported keys, and scripts. Any of the returned // values can be nil, but in practice only the crypto private and script keys // will be nil for a watching-only database. func fetchCryptoKeys(ns walletdb.ReadBucket) ([]byte, []byte, []byte, error) { bucket := ns.NestedReadBucket(mainBucketName) // Load the crypto public key parameters. Required. val := bucket.Get(cryptoPubKeyName) if val == nil { str := "required encrypted crypto public not stored in database" return nil, nil, nil, managerError(ErrDatabase, str, nil) } pubKey := make([]byte, len(val)) copy(pubKey, val) // Load the crypto private key parameters if they were stored. var privKey []byte val = bucket.Get(cryptoPrivKeyName) if val != nil { privKey = make([]byte, len(val)) copy(privKey, val) } // Load the crypto script key parameters if they were stored. var scriptKey []byte val = bucket.Get(cryptoScriptKeyName) if val != nil { scriptKey = make([]byte, len(val)) copy(scriptKey, val) } return pubKey, privKey, scriptKey, nil } // putCryptoKeys stores the encrypted crypto keys which are in turn used to // protect the extended and imported keys. Either parameter can be nil in // which case no value is written for the parameter. func putCryptoKeys(ns walletdb.ReadWriteBucket, pubKeyEncrypted, privKeyEncrypted, scriptKeyEncrypted []byte) error { bucket := ns.NestedReadWriteBucket(mainBucketName) if pubKeyEncrypted != nil { err := bucket.Put(cryptoPubKeyName, pubKeyEncrypted) if err != nil { str := "failed to store encrypted crypto public key" return managerError(ErrDatabase, str, err) } } if privKeyEncrypted != nil { err := bucket.Put(cryptoPrivKeyName, privKeyEncrypted) if err != nil { str := "failed to store encrypted crypto private key" return managerError(ErrDatabase, str, err) } } if scriptKeyEncrypted != nil { err := bucket.Put(cryptoScriptKeyName, scriptKeyEncrypted) if err != nil { str := "failed to store encrypted crypto script key" return managerError(ErrDatabase, str, err) } } return nil } // fetchWatchingOnly loads the watching-only flag from the database. func fetchWatchingOnly(ns walletdb.ReadBucket) (bool, error) { bucket := ns.NestedReadBucket(mainBucketName) buf := bucket.Get(watchingOnlyName) if len(buf) != 1 { str := "malformed watching-only flag stored in database" return false, managerError(ErrDatabase, str, nil) } return buf[0] != 0, nil } // putWatchingOnly stores the watching-only flag to the database. func putWatchingOnly(ns walletdb.ReadWriteBucket, watchingOnly bool) error { bucket := ns.NestedReadWriteBucket(mainBucketName) var encoded byte if watchingOnly { encoded = 1 } if err := bucket.Put(watchingOnlyName, []byte{encoded}); err != nil { str := "failed to store watching only flag" return managerError(ErrDatabase, str, err) } return nil } // deserializeAccountRow deserializes the passed serialized account information. // This is used as a common base for the various account types to deserialize // the common parts. func deserializeAccountRow(accountID []byte, serializedAccount []byte) (*dbAccountRow, error) { // The serialized account format is: // <acctType><rdlen><rawdata> // // 1 byte acctType + 4 bytes raw data length + raw data // Given the above, the length of the entry must be at a minimum // the constant value sizes. if len(serializedAccount) < 5 { str := fmt.Sprintf("malformed serialized account for key %x", accountID) return nil, managerError(ErrDatabase, str, nil) } row := dbAccountRow{} row.acctType = accountType(serializedAccount[0]) rdlen := binary.LittleEndian.Uint32(serializedAccount[1:5]) row.rawData = make([]byte, rdlen) copy(row.rawData, serializedAccount[5:5+rdlen]) return &row, nil } // serializeAccountRow returns the serialization of the passed account row. func serializeAccountRow(row *dbAccountRow) []byte { // The serialized account format is: // <acctType><rdlen><rawdata> // // 1 byte acctType + 4 bytes raw data length + raw data rdlen := len(row.rawData) buf := make([]byte, 5+rdlen) buf[0] = byte(row.acctType) binary.LittleEndian.PutUint32(buf[1:5], uint32(rdlen)) copy(buf[5:5+rdlen], row.rawData) return buf } // deserializeDefaultAccountRow deserializes the raw data from the passed // account row as a BIP0044-like account. func deserializeDefaultAccountRow(accountID []byte, row *dbAccountRow) (*dbDefaultAccountRow, error) { // The serialized BIP0044 account raw data format is: // <encpubkeylen><encpubkey><encprivkeylen><encprivkey><nextextidx> // <nextintidx><namelen><name> // // 4 bytes encrypted pubkey len + encrypted pubkey + 4 bytes encrypted // privkey len + encrypted privkey + 4 bytes next external index + // 4 bytes next internal index + 4 bytes name len + name // Given the above, the length of the entry must be at a minimum // the constant value sizes. if len(row.rawData) < 20 { str := fmt.Sprintf("malformed serialized bip0044 account for "+ "key %x", accountID) return nil, managerError(ErrDatabase, str, nil) } retRow := dbDefaultAccountRow{ dbAccountRow: *row, } pubLen := binary.LittleEndian.Uint32(row.rawData[0:4]) retRow.pubKeyEncrypted = make([]byte, pubLen) copy(retRow.pubKeyEncrypted, row.rawData[4:4+pubLen]) offset := 4 + pubLen privLen := binary.LittleEndian.Uint32(row.rawData[offset : offset+4]) offset += 4 retRow.privKeyEncrypted = make([]byte, privLen) copy(retRow.privKeyEncrypted, row.rawData[offset:offset+privLen]) offset += privLen retRow.nextExternalIndex = binary.LittleEndian.Uint32(row.rawData[offset : offset+4]) offset += 4 retRow.nextInternalIndex = binary.LittleEndian.Uint32(row.rawData[offset : offset+4]) offset += 4 nameLen := binary.LittleEndian.Uint32(row.rawData[offset : offset+4]) offset += 4 retRow.name = string(row.rawData[offset : offset+nameLen]) return &retRow, nil } // serializeDefaultAccountRow returns the serialization of the raw data field // for a BIP0044-like account. func serializeDefaultAccountRow(encryptedPubKey, encryptedPrivKey []byte, nextExternalIndex, nextInternalIndex uint32, name string) []byte { // The serialized BIP0044 account raw data format is: // <encpubkeylen><encpubkey><encprivkeylen><encprivkey><nextextidx> // <nextintidx><namelen><name> // // 4 bytes encrypted pubkey len + encrypted pubkey + 4 bytes encrypted // privkey len + encrypted privkey + 4 bytes next external index + // 4 bytes next internal index + 4 bytes name len + name pubLen := uint32(len(encryptedPubKey)) privLen := uint32(len(encryptedPrivKey)) nameLen := uint32(len(name)) rawData := make([]byte, 20+pubLen+privLen+nameLen) binary.LittleEndian.PutUint32(rawData[0:4], pubLen) copy(rawData[4:4+pubLen], encryptedPubKey) offset := 4 + pubLen binary.LittleEndian.PutUint32(rawData[offset:offset+4], privLen) offset += 4 copy(rawData[offset:offset+privLen], encryptedPrivKey) offset += privLen binary.LittleEndian.PutUint32(rawData[offset:offset+4], nextExternalIndex) offset += 4 binary.LittleEndian.PutUint32(rawData[offset:offset+4], nextInternalIndex) offset += 4 binary.LittleEndian.PutUint32(rawData[offset:offset+4], nameLen) offset += 4 copy(rawData[offset:offset+nameLen], name) return rawData } // deserializeWatchOnlyAccountRow deserializes the raw data from the passed // account row as a watch-only account. func deserializeWatchOnlyAccountRow(accountID []byte, row *dbAccountRow) (*dbWatchOnlyAccountRow, error) { // The serialized BIP0044 watch-only account raw data format is: // <encpubkeylen><encpubkey><masterkeyfingerprint><nextextidx> // <nextintidx><namelen><name> // // 4 bytes encrypted pubkey len + encrypted pubkey + 4 bytes master key // fingerprint + 4 bytes next external index + 4 bytes next internal // index + 4 bytes name len + name + 1 byte addr schema exists + 2 bytes // addr schema (if exists) // Given the above, the length of the entry must be at a minimum // the constant value sizes. if len(row.rawData) < 21 { str := fmt.Sprintf("malformed serialized watch-only account "+ "for key %x", accountID) return nil, managerError(ErrDatabase, str, nil) } retRow := dbWatchOnlyAccountRow{ dbAccountRow: *row, } r := bytes.NewReader(row.rawData) var pubLen uint32 err := binary.Read(r, binary.LittleEndian, &pubLen) if err != nil { return nil, err } retRow.pubKeyEncrypted = make([]byte, pubLen) err = binary.Read(r, binary.LittleEndian, &retRow.pubKeyEncrypted) if err != nil { return nil, err } err = binary.Read(r, binary.LittleEndian, &retRow.masterKeyFingerprint) if err != nil { return nil, err } err = binary.Read(r, binary.LittleEndian, &retRow.nextExternalIndex) if err != nil { return nil, err } err = binary.Read(r, binary.LittleEndian, &retRow.nextInternalIndex) if err != nil { return nil, err } var nameLen uint32 err = binary.Read(r, binary.LittleEndian, &nameLen) if err != nil { return nil, err } name := make([]byte, nameLen) err = binary.Read(r, binary.LittleEndian, &name) if err != nil { return nil, err } retRow.name = string(name) var addrSchemaExists bool err = binary.Read(r, binary.LittleEndian, &addrSchemaExists) if err != nil { return nil, err } if addrSchemaExists { var addrSchemaBytes [2]byte err = binary.Read(r, binary.LittleEndian, &addrSchemaBytes) if err != nil { return nil, err } retRow.addrSchema = scopeSchemaFromBytes(addrSchemaBytes[:]) } return &retRow, nil } // serializeWatchOnlyAccountRow returns the serialization of the raw data field // for a watch-only account. func serializeWatchOnlyAccountRow(encryptedPubKey []byte, masterKeyFingerprint, nextExternalIndex, nextInternalIndex uint32, name string, addrSchema *ScopeAddrSchema) ([]byte, error) { // The serialized BIP0044 account raw data format is: // <encpubkeylen><encpubkey><masterkeyfingerprint><nextextidx> // <nextintidx><namelen><name> // // 4 bytes encrypted pubkey len + encrypted pubkey + 4 bytes master key // fingerprint + 4 bytes next external index + 4 bytes next internal // index + 4 bytes name len + name + 1 byte addr schema exists + 2 bytes // addr schema (if exists) pubLen := uint32(len(encryptedPubKey)) nameLen := uint32(len(name)) addrSchemaExists := addrSchema != nil var addrSchemaBytes []byte if addrSchemaExists { addrSchemaBytes = scopeSchemaToBytes(addrSchema) } bufLen := 21 + pubLen + nameLen + uint32(len(addrSchemaBytes)) buf := bytes.NewBuffer(make([]byte, 0, bufLen)) err := binary.Write(buf, binary.LittleEndian, pubLen) if err != nil { return nil, err } err = binary.Write(buf, binary.LittleEndian, encryptedPubKey) if err != nil { return nil, err } err = binary.Write(buf, binary.LittleEndian, masterKeyFingerprint) if err != nil { return nil, err } err = binary.Write(buf, binary.LittleEndian, nextExternalIndex) if err != nil { return nil, err } err = binary.Write(buf, binary.LittleEndian, nextInternalIndex) if err != nil { return nil, err } err = binary.Write(buf, binary.LittleEndian, nameLen) if err != nil { return nil, err } err = binary.Write(buf, binary.LittleEndian, []byte(name)) if err != nil { return nil, err } err = binary.Write(buf, binary.LittleEndian, addrSchemaExists) if err != nil { return nil, err } if addrSchemaExists { err = binary.Write(buf, binary.LittleEndian, addrSchemaBytes) if err != nil { return nil, err } } return buf.Bytes(), nil } // forEachKeyScope calls the given function for each known manager scope // within the set of scopes known by the root manager. func forEachKeyScope(ns walletdb.ReadBucket, fn func(KeyScope) error) error { bucket := ns.NestedReadBucket(scopeBucketName) return bucket.ForEach(func(k, v []byte) error { // skip non-bucket if len(k) != 8 { return nil } scope := KeyScope{ Purpose: binary.LittleEndian.Uint32(k), Coin: binary.LittleEndian.Uint32(k[4:]), } return fn(scope) }) } // forEachAccount calls the given function with each account stored in the // manager, breaking early on error. func forEachAccount(ns walletdb.ReadBucket, scope *KeyScope, fn func(account uint32) error) error { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return err } acctBucket := scopedBucket.NestedReadBucket(acctBucketName) return acctBucket.ForEach(func(k, v []byte) error { // Skip buckets. if v == nil { return nil } return fn(binary.LittleEndian.Uint32(k)) }) } // fetchLastAccount retrieves the last account from the database. // If no accounts, returns twos-complement representation of -1, so that the next account is zero func fetchLastAccount(ns walletdb.ReadBucket, scope *KeyScope) (uint32, error) { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return 0, err } metaBucket := scopedBucket.NestedReadBucket(metaBucketName) val := metaBucket.Get(lastAccountName) if val == nil { return (1 << 32) - 1, nil } if len(val) != 4 { str := fmt.Sprintf("malformed metadata '%s' stored in database", lastAccountName) return 0, managerError(ErrDatabase, str, nil) } account := binary.LittleEndian.Uint32(val[0:4]) return account, nil } // fetchAccountName retrieves the account name given an account number from the // database. func fetchAccountName(ns walletdb.ReadBucket, scope *KeyScope, account uint32) (string, error) { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return "", err } acctIDxBucket := scopedBucket.NestedReadBucket(acctIDIdxBucketName) val := acctIDxBucket.Get(uint32ToBytes(account)) if val == nil { str := fmt.Sprintf("account %d not found", account) return "", managerError(ErrAccountNotFound, str, nil) } offset := uint32(0) nameLen := binary.LittleEndian.Uint32(val[offset : offset+4]) offset += 4 acctName := string(val[offset : offset+nameLen]) return acctName, nil } // fetchAccountByName retrieves the account number given an account name from // the database. func fetchAccountByName(ns walletdb.ReadBucket, scope *KeyScope, name string) (uint32, error) { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return 0, err } idxBucket := scopedBucket.NestedReadBucket(acctNameIdxBucketName) val := idxBucket.Get(stringToBytes(name)) if val == nil { str := fmt.Sprintf("account name '%s' not found", name) return 0, managerError(ErrAccountNotFound, str, nil) } return binary.LittleEndian.Uint32(val), nil } // fetchAccountInfo loads information about the passed account from the // database. func fetchAccountInfo(ns walletdb.ReadBucket, scope *KeyScope, account uint32) (interface{}, error) { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return nil, err } acctBucket := scopedBucket.NestedReadBucket(acctBucketName) accountID := uint32ToBytes(account) serializedRow := acctBucket.Get(accountID) if serializedRow == nil { str := fmt.Sprintf("account %d not found", account) return nil, managerError(ErrAccountNotFound, str, nil) } row, err := deserializeAccountRow(accountID, serializedRow) if err != nil { return nil, err } switch row.acctType { case accountDefault: return deserializeDefaultAccountRow(accountID, row) case accountWatchOnly: return deserializeWatchOnlyAccountRow(accountID, row) } str := fmt.Sprintf("unsupported account type '%d'", row.acctType) return nil, managerError(ErrDatabase, str, nil) } // deleteAccountNameIndex deletes the given key from the account name index of the database. func deleteAccountNameIndex(ns walletdb.ReadWriteBucket, scope *KeyScope, name string) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadWriteBucket(acctNameIdxBucketName) // Delete the account name key err = bucket.Delete(stringToBytes(name)) if err != nil { str := fmt.Sprintf("failed to delete account name index key %s", name) return managerError(ErrDatabase, str, err) } return nil } // deleteAccounIdIndex deletes the given key from the account id index of the database. func deleteAccountIDIndex(ns walletdb.ReadWriteBucket, scope *KeyScope, account uint32) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadWriteBucket(acctIDIdxBucketName) // Delete the account id key err = bucket.Delete(uint32ToBytes(account)) if err != nil { str := fmt.Sprintf("failed to delete account id index key %d", account) return managerError(ErrDatabase, str, err) } return nil } // putAccountNameIndex stores the given key to the account name index of the // database. func putAccountNameIndex(ns walletdb.ReadWriteBucket, scope *KeyScope, account uint32, name string) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadWriteBucket(acctNameIdxBucketName) // Write the account number keyed by the account name. err = bucket.Put(stringToBytes(name), uint32ToBytes(account)) if err != nil { str := fmt.Sprintf("failed to store account name index key %s", name) return managerError(ErrDatabase, str, err) } return nil } // putAccountIDIndex stores the given key to the account id index of the database. func putAccountIDIndex(ns walletdb.ReadWriteBucket, scope *KeyScope, account uint32, name string) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadWriteBucket(acctIDIdxBucketName) // Write the account number keyed by the account id. err = bucket.Put(uint32ToBytes(account), stringToBytes(name)) if err != nil { str := fmt.Sprintf("failed to store account id index key %s", name) return managerError(ErrDatabase, str, err) } return nil } // putAddrAccountIndex stores the given key to the address account index of the // database. func putAddrAccountIndex(ns walletdb.ReadWriteBucket, scope *KeyScope, account uint32, addrHash []byte) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadWriteBucket(addrAcctIdxBucketName) // Write account keyed by address hash err = bucket.Put(addrHash, uint32ToBytes(account)) if err != nil { return nil } bucket, err = bucket.CreateBucketIfNotExists(uint32ToBytes(account)) if err != nil { return err } // In account bucket, write a null value keyed by the address hash err = bucket.Put(addrHash, nullVal) if err != nil { str := fmt.Sprintf("failed to store address account index key %s", addrHash) return managerError(ErrDatabase, str, err) } return nil } // putAccountRow stores the provided account information to the database. This // is used a common base for storing the various account types. func putAccountRow(ns walletdb.ReadWriteBucket, scope *KeyScope, account uint32, row *dbAccountRow) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadWriteBucket(acctBucketName) // Write the serialized value keyed by the account number. err = bucket.Put(uint32ToBytes(account), serializeAccountRow(row)) if err != nil { str := fmt.Sprintf("failed to store account %d", account) return managerError(ErrDatabase, str, err) } return nil } // putDefaultAccountInfo stores the provided default account information to the // database. func putDefaultAccountInfo(ns walletdb.ReadWriteBucket, scope *KeyScope, account uint32, encryptedPubKey, encryptedPrivKey []byte, nextExternalIndex, nextInternalIndex uint32, name string) error { rawData := serializeDefaultAccountRow( encryptedPubKey, encryptedPrivKey, nextExternalIndex, nextInternalIndex, name, ) // TODO(roasbeef): pass scope bucket directly?? acctRow := dbAccountRow{ acctType: accountDefault, rawData: rawData, } return putAccountInfo(ns, scope, account, &acctRow, name) } // putWatchOnlyAccountInfo stores the provided watch-only account information to // the database. func putWatchOnlyAccountInfo(ns walletdb.ReadWriteBucket, scope *KeyScope, account uint32, encryptedPubKey []byte, masterKeyFingerprint, nextExternalIndex, nextInternalIndex uint32, name string, addrSchema *ScopeAddrSchema) error { rawData, err := serializeWatchOnlyAccountRow( encryptedPubKey, masterKeyFingerprint, nextExternalIndex, nextInternalIndex, name, addrSchema, ) if err != nil { return err } // TODO(roasbeef): pass scope bucket directly?? acctRow := dbAccountRow{ acctType: accountWatchOnly, rawData: rawData, } return putAccountInfo(ns, scope, account, &acctRow, name) } // putAccountInfo stores the provided account information to the database. func putAccountInfo(ns walletdb.ReadWriteBucket, scope *KeyScope, account uint32, acctRow *dbAccountRow, name string) error { if err := putAccountRow(ns, scope, account, acctRow); err != nil { return err } // Update account id index. if err := putAccountIDIndex(ns, scope, account, name); err != nil { return err } // Update account name index. return putAccountNameIndex(ns, scope, account, name) } // putLastAccount stores the provided metadata - last account - to the // database. func putLastAccount(ns walletdb.ReadWriteBucket, scope *KeyScope, account uint32) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadWriteBucket(metaBucketName) err = bucket.Put(lastAccountName, uint32ToBytes(account)) if err != nil { str := fmt.Sprintf("failed to update metadata '%s'", lastAccountName) return managerError(ErrDatabase, str, err) } return nil } // deserializeAddressRow deserializes the passed serialized address // information. This is used as a common base for the various address types to // deserialize the common parts. func deserializeAddressRow(serializedAddress []byte) (*dbAddressRow, error) { // The serialized address format is: // <addrType><account><addedTime><syncStatus><rawdata> // // 1 byte addrType + 4 bytes account + 8 bytes addTime + 1 byte // syncStatus + 4 bytes raw data length + raw data // Given the above, the length of the entry must be at a minimum // the constant value sizes. if len(serializedAddress) < 18 { str := "malformed serialized address" return nil, managerError(ErrDatabase, str, nil) } row := dbAddressRow{} row.addrType = addressType(serializedAddress[0]) row.account = binary.LittleEndian.Uint32(serializedAddress[1:5]) row.addTime = binary.LittleEndian.Uint64(serializedAddress[5:13]) row.syncStatus = syncStatus(serializedAddress[13]) rdlen := binary.LittleEndian.Uint32(serializedAddress[14:18]) row.rawData = make([]byte, rdlen) copy(row.rawData, serializedAddress[18:18+rdlen]) return &row, nil } // serializeAddressRow returns the serialization of the passed address row. func serializeAddressRow(row *dbAddressRow) []byte { // The serialized address format is: // <addrType><account><addedTime><syncStatus><commentlen><comment> // <rawdata> // // 1 byte addrType + 4 bytes account + 8 bytes addTime + 1 byte // syncStatus + 4 bytes raw data length + raw data rdlen := len(row.rawData) buf := make([]byte, 18+rdlen) buf[0] = byte(row.addrType) binary.LittleEndian.PutUint32(buf[1:5], row.account) binary.LittleEndian.PutUint64(buf[5:13], row.addTime) buf[13] = byte(row.syncStatus) binary.LittleEndian.PutUint32(buf[14:18], uint32(rdlen)) copy(buf[18:18+rdlen], row.rawData) return buf } // deserializeChainedAddress deserializes the raw data from the passed address // row as a chained address. func deserializeChainedAddress(row *dbAddressRow) (*dbChainAddressRow, error) { // The serialized chain address raw data format is: // <branch><index> // // 4 bytes branch + 4 bytes address index if len(row.rawData) != 8 { str := "malformed serialized chained address" return nil, managerError(ErrDatabase, str, nil) } retRow := dbChainAddressRow{ dbAddressRow: *row, } retRow.branch = binary.LittleEndian.Uint32(row.rawData[0:4]) retRow.index = binary.LittleEndian.Uint32(row.rawData[4:8]) return &retRow, nil } // serializeChainedAddress returns the serialization of the raw data field for // a chained address. func serializeChainedAddress(branch, index uint32) []byte { // The serialized chain address raw data format is: // <branch><index> // // 4 bytes branch + 4 bytes address index rawData := make([]byte, 8) binary.LittleEndian.PutUint32(rawData[0:4], branch) binary.LittleEndian.PutUint32(rawData[4:8], index) return rawData } // deserializeImportedAddress deserializes the raw data from the passed address // row as an imported address. func deserializeImportedAddress(row *dbAddressRow) (*dbImportedAddressRow, error) { // The serialized imported address raw data format is: // <encpubkeylen><encpubkey><encprivkeylen><encprivkey> // // 4 bytes encrypted pubkey len + encrypted pubkey + 4 bytes encrypted // privkey len + encrypted privkey // Given the above, the length of the entry must be at a minimum // the constant value sizes. if len(row.rawData) < 8 { str := "malformed serialized imported address" return nil, managerError(ErrDatabase, str, nil) } retRow := dbImportedAddressRow{ dbAddressRow: *row, } pubLen := binary.LittleEndian.Uint32(row.rawData[0:4]) retRow.encryptedPubKey = make([]byte, pubLen) copy(retRow.encryptedPubKey, row.rawData[4:4+pubLen]) offset := 4 + pubLen privLen := binary.LittleEndian.Uint32(row.rawData[offset : offset+4]) offset += 4 retRow.encryptedPrivKey = make([]byte, privLen) copy(retRow.encryptedPrivKey, row.rawData[offset:offset+privLen]) return &retRow, nil } // serializeImportedAddress returns the serialization of the raw data field for // an imported address. func serializeImportedAddress(encryptedPubKey, encryptedPrivKey []byte) []byte { // The serialized imported address raw data format is: // <encpubkeylen><encpubkey><encprivkeylen><encprivkey> // // 4 bytes encrypted pubkey len + encrypted pubkey + 4 bytes encrypted // privkey len + encrypted privkey pubLen := uint32(len(encryptedPubKey)) privLen := uint32(len(encryptedPrivKey)) rawData := make([]byte, 8+pubLen+privLen) binary.LittleEndian.PutUint32(rawData[0:4], pubLen) copy(rawData[4:4+pubLen], encryptedPubKey) offset := 4 + pubLen binary.LittleEndian.PutUint32(rawData[offset:offset+4], privLen) offset += 4 copy(rawData[offset:offset+privLen], encryptedPrivKey) return rawData } // deserializeScriptAddress deserializes the raw data from the passed address // row as a script address. func deserializeScriptAddress(row *dbAddressRow) (*dbScriptAddressRow, error) { // The serialized script address raw data format is: // <encscripthashlen><encscripthash><encscriptlen><encscript> // // 4 bytes encrypted script hash len + encrypted script hash + 4 bytes // encrypted script len + encrypted script // Given the above, the length of the entry must be at a minimum // the constant value sizes. if len(row.rawData) < 8 { str := "malformed serialized script address" return nil, managerError(ErrDatabase, str, nil) } retRow := dbScriptAddressRow{ dbAddressRow: *row, } hashLen := binary.LittleEndian.Uint32(row.rawData[0:4]) retRow.encryptedHash = make([]byte, hashLen) copy(retRow.encryptedHash, row.rawData[4:4+hashLen]) offset := 4 + hashLen scriptLen := binary.LittleEndian.Uint32(row.rawData[offset : offset+4]) offset += 4 retRow.encryptedScript = make([]byte, scriptLen) copy(retRow.encryptedScript, row.rawData[offset:offset+scriptLen]) return &retRow, nil } // serializeScriptAddress returns the serialization of the raw data field for // a script address. func serializeScriptAddress(encryptedHash, encryptedScript []byte) []byte { // The serialized script address raw data format is: // <encscripthashlen><encscripthash><encscriptlen><encscript> // // 4 bytes encrypted script hash len + encrypted script hash + 4 bytes // encrypted script len + encrypted script hashLen := uint32(len(encryptedHash)) scriptLen := uint32(len(encryptedScript)) rawData := make([]byte, 8+hashLen+scriptLen) binary.LittleEndian.PutUint32(rawData[0:4], hashLen) copy(rawData[4:4+hashLen], encryptedHash) offset := 4 + hashLen binary.LittleEndian.PutUint32(rawData[offset:offset+4], scriptLen) offset += 4 copy(rawData[offset:offset+scriptLen], encryptedScript) return rawData } // deserializeWitnessScriptAddress deserializes the raw data from the passed // address row as a witness script address. func deserializeWitnessScriptAddress( row *dbAddressRow) (*dbWitnessScriptAddressRow, error) { // The serialized witness script address raw data format is: // <witness_version><is_secret_script><encscripthashlen> // <encscripthash><encscriptlen><encscript> // // 1 byte witness version + 1 byte boolean + 4 bytes encrypted script // hash len + encrypted script hash + 4 bytes encrypted script len + // encrypted script const minLength = 1 + 1 + 4 + 4 // Given the above, the length of the entry must be at a minimum // the constant value sizes. if len(row.rawData) < minLength { str := "malformed serialized witness script address" return nil, managerError(ErrDatabase, str, nil) } retRow := dbWitnessScriptAddressRow{ dbAddressRow: *row, witnessVersion: row.rawData[0], isSecretScript: row.rawData[1] == 1, } hashLen := binary.LittleEndian.Uint32(row.rawData[2:6]) retRow.encryptedHash = make([]byte, hashLen) copy(retRow.encryptedHash, row.rawData[6:6+hashLen]) offset := 6 + hashLen scriptLen := binary.LittleEndian.Uint32(row.rawData[offset : offset+4]) offset += 4 retRow.encryptedScript = make([]byte, scriptLen) copy(retRow.encryptedScript, row.rawData[offset:offset+scriptLen]) return &retRow, nil } // serializeWitnessScriptAddress returns the serialization of the raw data field // for a witness script address. func serializeWitnessScriptAddress(witnessVersion uint8, isSecretScript bool, encryptedHash, encryptedScript []byte) []byte { // The serialized witness script address raw data format is: // <witness_version><is_secret_script><encscripthashlen> // <encscripthash><encscriptlen><encscript> // // 1 byte witness version + 1 byte boolean + 4 bytes encrypted script // hash len + encrypted script hash + 4 bytes encrypted script len + // encrypted script hashLen := uint32(len(encryptedHash)) scriptLen := uint32(len(encryptedScript)) rawData := make([]byte, 10+hashLen+scriptLen) rawData[0] = witnessVersion if isSecretScript { rawData[1] = 1 } binary.LittleEndian.PutUint32(rawData[2:6], hashLen) copy(rawData[6:6+hashLen], encryptedHash) offset := 6 + hashLen binary.LittleEndian.PutUint32(rawData[offset:offset+4], scriptLen) offset += 4 copy(rawData[offset:offset+scriptLen], encryptedScript) return rawData } // fetchAddressByHash loads address information for the provided address hash // from the database. The returned value is one of the address rows for the // specific address type. The caller should use type assertions to ascertain // the type. The caller should prefix the error message with the address hash // which caused the failure. func fetchAddressByHash(ns walletdb.ReadBucket, scope *KeyScope, addrHash []byte) (interface{}, error) { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return nil, err } bucket := scopedBucket.NestedReadBucket(addrBucketName) serializedRow := bucket.Get(addrHash) if serializedRow == nil { str := "address not found" return nil, managerError(ErrAddressNotFound, str, nil) } row, err := deserializeAddressRow(serializedRow) if err != nil { return nil, err } switch row.addrType { case adtChain: return deserializeChainedAddress(row) case adtImport: return deserializeImportedAddress(row) case adtScript: return deserializeScriptAddress(row) case adtWitnessScript: return deserializeWitnessScriptAddress(row) case adtTaprootScript: // A taproot script address is just a normal script address that // TLV encodes more stuff in the raw script part. But in the // database we store the same fields. return deserializeWitnessScriptAddress(row) } str := fmt.Sprintf("unsupported address type '%d'", row.addrType) return nil, managerError(ErrDatabase, str, nil) } // fetchAddressUsed returns true if the provided address id was flagged as used. func fetchAddressUsed(ns walletdb.ReadBucket, scope *KeyScope, addressID []byte) bool { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return false } bucket := scopedBucket.NestedReadBucket(usedAddrBucketName) addrHash := sha256.Sum256(addressID) return bucket.Get(addrHash[:]) != nil } // markAddressUsed flags the provided address id as used in the database. func markAddressUsed(ns walletdb.ReadWriteBucket, scope *KeyScope, addressID []byte) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadWriteBucket(usedAddrBucketName) addrHash := sha256.Sum256(addressID) val := bucket.Get(addrHash[:]) if val != nil { return nil } err = bucket.Put(addrHash[:], []byte{0}) if err != nil { str := fmt.Sprintf("failed to mark address used %x", addressID) return managerError(ErrDatabase, str, err) } return nil } // fetchAddress loads address information for the provided address id from the // database. The returned value is one of the address rows for the specific // address type. The caller should use type assertions to ascertain the type. // The caller should prefix the error message with the address which caused the // failure. func fetchAddress(ns walletdb.ReadBucket, scope *KeyScope, addressID []byte) (interface{}, error) { addrHash := sha256.Sum256(addressID) return fetchAddressByHash(ns, scope, addrHash[:]) } // putAddress stores the provided address information to the database. This is // used a common base for storing the various address types. func putAddress(ns walletdb.ReadWriteBucket, scope *KeyScope, addressID []byte, row *dbAddressRow) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadWriteBucket(addrBucketName) // Write the serialized value keyed by the hash of the address. The // additional hash is used to conceal the actual address while still // allowed keyed lookups. addrHash := sha256.Sum256(addressID) err = bucket.Put(addrHash[:], serializeAddressRow(row)) if err != nil { str := fmt.Sprintf("failed to store address %x", addressID) return managerError(ErrDatabase, str, err) } // Update address account index return putAddrAccountIndex(ns, scope, row.account, addrHash[:]) } // putChainedAddress stores the provided chained address information to the // database. func putChainedAddress(ns walletdb.ReadWriteBucket, scope *KeyScope, addressID []byte, account uint32, status syncStatus, branch, index uint32, addrType addressType) error { scopedBucket, err := fetchWriteScopeBucket(ns, scope) if err != nil { return err } addrRow := dbAddressRow{ addrType: addrType, account: account, addTime: uint64(time.Now().Unix()), syncStatus: status, rawData: serializeChainedAddress(branch, index), } if err := putAddress(ns, scope, addressID, &addrRow); err != nil { return err } // Update the next index for the appropriate internal or external // branch. accountID := uint32ToBytes(account) bucket := scopedBucket.NestedReadWriteBucket(acctBucketName) serializedAccount := bucket.Get(accountID) // Deserialize the account row. row, err := deserializeAccountRow(accountID, serializedAccount) if err != nil { return err } switch row.acctType { case accountDefault: arow, err := deserializeDefaultAccountRow(accountID, row) if err != nil { return err } // Increment the appropriate next index depending on whether the // branch is internal or external. nextExternalIndex := arow.nextExternalIndex nextInternalIndex := arow.nextInternalIndex if branch == InternalBranch { nextInternalIndex = index + 1 } else { nextExternalIndex = index + 1 } // Reserialize the account with the updated index and store it. row.rawData = serializeDefaultAccountRow( arow.pubKeyEncrypted, arow.privKeyEncrypted, nextExternalIndex, nextInternalIndex, arow.name, ) case accountWatchOnly: arow, err := deserializeWatchOnlyAccountRow(accountID, row) if err != nil { return err } // Increment the appropriate next index depending on whether the // branch is internal or external. nextExternalIndex := arow.nextExternalIndex nextInternalIndex := arow.nextInternalIndex if branch == InternalBranch { nextInternalIndex = index + 1 } else { nextExternalIndex = index + 1 } // Reserialize the account with the updated index and store it. row.rawData, err = serializeWatchOnlyAccountRow( arow.pubKeyEncrypted, arow.masterKeyFingerprint, nextExternalIndex, nextInternalIndex, arow.name, arow.addrSchema, ) if err != nil { return err } } err = bucket.Put(accountID, serializeAccountRow(row)) if err != nil { str := fmt.Sprintf("failed to update next index for "+ "address %x, account %d", addressID, account) return managerError(ErrDatabase, str, err) } return nil } // putImportedAddress stores the provided imported address information to the // database. func putImportedAddress(ns walletdb.ReadWriteBucket, scope *KeyScope, addressID []byte, account uint32, status syncStatus, encryptedPubKey, encryptedPrivKey []byte) error { rawData := serializeImportedAddress(encryptedPubKey, encryptedPrivKey) addrRow := dbAddressRow{ addrType: adtImport, account: account, addTime: uint64(time.Now().Unix()), syncStatus: status, rawData: rawData, } return putAddress(ns, scope, addressID, &addrRow) } // putScriptAddress stores the provided script address information to the // database. func putScriptAddress(ns walletdb.ReadWriteBucket, scope *KeyScope, addressID []byte, account uint32, status syncStatus, encryptedHash, encryptedScript []byte) error { rawData := serializeScriptAddress(encryptedHash, encryptedScript) addrRow := dbAddressRow{ addrType: adtScript, account: account, addTime: uint64(time.Now().Unix()), syncStatus: status, rawData: rawData, } if err := putAddress(ns, scope, addressID, &addrRow); err != nil { return err } return nil } // putWitnessScriptAddress stores the provided witness script address // information to the database. func putWitnessScriptAddress(ns walletdb.ReadWriteBucket, scope *KeyScope, addressID []byte, account uint32, status syncStatus, witnessVersion uint8, isSecretScript bool, encryptedHash, encryptedScript []byte) error { rawData := serializeWitnessScriptAddress( witnessVersion, isSecretScript, encryptedHash, encryptedScript, ) addrType := adtWitnessScript if witnessVersion == witnessVersionV1 { // A taproot script stores a TLV encoded blob of data in the // raw data field. So we only really need to use a different // storage type since all other fields stay the same. addrType = adtTaprootScript } addrRow := dbAddressRow{ addrType: addrType, account: account, addTime: uint64(time.Now().Unix()), syncStatus: status, rawData: rawData, } if err := putAddress(ns, scope, addressID, &addrRow); err != nil { return err } return nil } // existsAddress returns whether or not the address id exists in the database. func existsAddress(ns walletdb.ReadBucket, scope *KeyScope, addressID []byte) bool { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return false } bucket := scopedBucket.NestedReadBucket(addrBucketName) addrHash := sha256.Sum256(addressID) return bucket.Get(addrHash[:]) != nil } // fetchAddrAccount returns the account to which the given address belongs to. // It looks up the account using the addracctidx index which maps the address // hash to its corresponding account id. func fetchAddrAccount(ns walletdb.ReadBucket, scope *KeyScope, addressID []byte) (uint32, error) { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return 0, err } bucket := scopedBucket.NestedReadBucket(addrAcctIdxBucketName) addrHash := sha256.Sum256(addressID) val := bucket.Get(addrHash[:]) if val == nil { str := "address not found" return 0, managerError(ErrAddressNotFound, str, nil) } return binary.LittleEndian.Uint32(val), nil } // forEachAccountAddress calls the given function with each address of the // given account stored in the manager, breaking early on error. func forEachAccountAddress(ns walletdb.ReadBucket, scope *KeyScope, account uint32, fn func(rowInterface interface{}) error) error { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadBucket(addrAcctIdxBucketName). NestedReadBucket(uint32ToBytes(account)) // If index bucket is missing the account, there hasn't been any // address entries yet if bucket == nil { return nil } err = bucket.ForEach(func(k, v []byte) error { // Skip buckets. if v == nil { return nil } addrRow, err := fetchAddressByHash(ns, scope, k) if err != nil { if merr, ok := err.(*ManagerError); ok { desc := fmt.Sprintf("failed to fetch address hash '%s': %v", k, merr.Description) merr.Description = desc return merr } return err } return fn(addrRow) }) if err != nil { return maybeConvertDbError(err) } return nil } // forEachActiveAddress calls the given function with each active address // stored in the manager, breaking early on error. func forEachActiveAddress(ns walletdb.ReadBucket, scope *KeyScope, fn func(rowInterface interface{}) error) error { scopedBucket, err := fetchReadScopeBucket(ns, scope) if err != nil { return err } bucket := scopedBucket.NestedReadBucket(addrBucketName) err = bucket.ForEach(func(k, v []byte) error { // Skip buckets. if v == nil { return nil } // Deserialize the address row first to determine the field // values. addrRow, err := fetchAddressByHash(ns, scope, k) if merr, ok := err.(*ManagerError); ok { desc := fmt.Sprintf("failed to fetch address hash '%s': %v", k, merr.Description) merr.Description = desc return merr } if err != nil { return err } return fn(addrRow) }) if err != nil { return maybeConvertDbError(err) } return nil } // deletePrivateKeys removes all private key material from the database. // // NOTE: Care should be taken when calling this function. It is primarily // intended for use in converting to a watching-only copy. Removing the private // keys from the main database without also marking it watching-only will result // in an unusable database. It will also make any imported scripts and private // keys unrecoverable unless there is a backup copy available. func deletePrivateKeys(ns walletdb.ReadWriteBucket) error { bucket := ns.NestedReadWriteBucket(mainBucketName) // Delete the master private key params and the crypto private and // script keys. if err := bucket.Delete(masterPrivKeyName); err != nil { str := "failed to delete master private key parameters" return managerError(ErrDatabase, str, err) } if err := bucket.Delete(cryptoPrivKeyName); err != nil { str := "failed to delete crypto private key" return managerError(ErrDatabase, str, err) } if err := bucket.Delete(cryptoScriptKeyName); err != nil { str := "failed to delete crypto script key" return managerError(ErrDatabase, str, err) } if err := bucket.Delete(masterHDPrivName); err != nil { str := "failed to delete master HD priv key" return managerError(ErrDatabase, str, err) } // With the master key and meta encryption keys deleted, we'll need to // delete the keys for all known scopes as well. scopeBucket := ns.NestedReadWriteBucket(scopeBucketName) err := scopeBucket.ForEach(func(scopeKey, _ []byte) error { if len(scopeKey) != 8 { return nil } managerScopeBucket := scopeBucket.NestedReadWriteBucket(scopeKey) if err := managerScopeBucket.Delete(coinTypePrivKeyName); err != nil { str := "failed to delete cointype private key" return managerError(ErrDatabase, str, err) } // Delete the account extended private key for all accounts. bucket = managerScopeBucket.NestedReadWriteBucket(acctBucketName) err := bucket.ForEach(func(k, v []byte) error { // Skip buckets. if v == nil { return nil } // Deserialize the account row first to determine the type. row, err := deserializeAccountRow(k, v) if err != nil { return err } switch row.acctType { case accountDefault: arow, err := deserializeDefaultAccountRow(k, row) if err != nil { return err } // Reserialize the account without the private key and // store it. row.rawData = serializeDefaultAccountRow( arow.pubKeyEncrypted, nil, arow.nextExternalIndex, arow.nextInternalIndex, arow.name, ) err = bucket.Put(k, serializeAccountRow(row)) if err != nil { str := "failed to delete account private key" return managerError(ErrDatabase, str, err) } // Watch-only accounts don't contain any private keys. case accountWatchOnly: } return nil }) if err != nil { return maybeConvertDbError(err) } // Delete the private key for all imported addresses. bucket = managerScopeBucket.NestedReadWriteBucket(addrBucketName) err = bucket.ForEach(func(k, v []byte) error { // Skip buckets. if v == nil { return nil } // Deserialize the address row first to determine the field // values. row, err := deserializeAddressRow(v) if err != nil { return err } switch row.addrType { case adtImport: irow, err := deserializeImportedAddress(row) if err != nil { return err } // Reserialize the imported address without the private // key and store it. row.rawData = serializeImportedAddress( irow.encryptedPubKey, nil) err = bucket.Put(k, serializeAddressRow(row)) if err != nil { str := "failed to delete imported private key" return managerError(ErrDatabase, str, err) } case adtScript: srow, err := deserializeScriptAddress(row) if err != nil { return err } // Reserialize the script address without the script // and store it. row.rawData = serializeScriptAddress(srow.encryptedHash, nil) err = bucket.Put(k, serializeAddressRow(row)) if err != nil { str := "failed to delete imported script" return managerError(ErrDatabase, str, err) } case adtWitnessScript: srow, err := deserializeWitnessScriptAddress(row) if err != nil { return err } // If the script is considered to be public, we // don't need to do anything. if !srow.isSecretScript { return nil } // Re-serialize the script address without the // script and store it. row.rawData = serializeWitnessScriptAddress( srow.witnessVersion, srow.isSecretScript, srow.encryptedHash, nil, ) err = bucket.Put(k, serializeAddressRow(row)) if err != nil { str := "failed to delete imported script" return managerError(ErrDatabase, str, err) } } return nil }) if err != nil { return maybeConvertDbError(err) } return nil }) if err != nil { return maybeConvertDbError(err) } return nil } // fetchSyncedTo loads the block stamp the manager is synced to from the // database. func fetchSyncedTo(ns walletdb.ReadBucket) (*BlockStamp, error) { bucket := ns.NestedReadBucket(syncBucketName) // The serialized synced to format is: // <blockheight><blockhash><timestamp> // // 4 bytes block height + 32 bytes hash length buf := bucket.Get(syncedToName) if len(buf) < 36 { str := "malformed sync information stored in database" return nil, managerError(ErrDatabase, str, nil) } var bs BlockStamp bs.Height = int32(binary.LittleEndian.Uint32(buf[0:4])) copy(bs.Hash[:], buf[4:36]) if len(buf) == 40 { bs.Timestamp = time.Unix( int64(binary.LittleEndian.Uint32(buf[36:])), 0, ) } return &bs, nil } // PutSyncedTo stores the provided synced to blockstamp to the database. func PutSyncedTo(ns walletdb.ReadWriteBucket, bs *BlockStamp) error { errStr := fmt.Sprintf("failed to store sync information %v", bs.Hash) // If the block height is greater than zero, check that the previous // block height exists. This prevents reorg issues in the future. We use // BigEndian so that keys/values are added to the bucket in order, // making writes more efficient for some database backends. if bs.Height > 0 { // We'll only check the previous block height exists if we've // determined our birthday block. This is needed as we'll no // longer store _all_ block hashes of the chain, so we only // expect the previous block to exist once our initial sync has // completed, which is dictated by our birthday block being set. if _, err := FetchBirthdayBlock(ns); err == nil { _, err := fetchBlockHash(ns, bs.Height-1) if err != nil { return managerError(ErrBlockNotFound, errStr, err) } } } // Store the block hash by block height. if err := addBlockHash(ns, bs.Height, bs.Hash); err != nil { return managerError(ErrDatabase, errStr, err) } // Remove the stale height if any, as we should only store MaxReorgDepth // block hashes at any given point. staleHeight := staleHeight(bs.Height) if staleHeight > 0 { if err := deleteBlockHash(ns, staleHeight); err != nil { return managerError(ErrDatabase, errStr, err) } } // Finally, we can update the syncedTo value. if err := updateSyncedTo(ns, bs); err != nil { return managerError(ErrDatabase, errStr, err) } return nil } // fetchBlockHash loads the block hash for the provided height from the // database. func fetchBlockHash(ns walletdb.ReadBucket, height int32) (*chainhash.Hash, error) { bucket := ns.NestedReadBucket(syncBucketName) errStr := fmt.Sprintf("failed to fetch block hash for height %d", height) heightBytes := make([]byte, 4) binary.BigEndian.PutUint32(heightBytes, uint32(height)) hashBytes := bucket.Get(heightBytes) if hashBytes == nil { err := errors.New("block not found") return nil, managerError(ErrBlockNotFound, errStr, err) } if len(hashBytes) != 32 { err := fmt.Errorf("couldn't get hash from database") return nil, managerError(ErrDatabase, errStr, err) } var hash chainhash.Hash if err := hash.SetBytes(hashBytes); err != nil { return nil, managerError(ErrDatabase, errStr, err) } return &hash, nil } // addBlockHash adds a block hash entry to the index within the syncBucket. func addBlockHash(ns walletdb.ReadWriteBucket, height int32, hash chainhash.Hash) error { var rawHeight [4]byte binary.BigEndian.PutUint32(rawHeight[:], uint32(height)) bucket := ns.NestedReadWriteBucket(syncBucketName) if err := bucket.Put(rawHeight[:], hash[:]); err != nil { errStr := fmt.Sprintf("failed to add hash %v", hash) return managerError(ErrDatabase, errStr, err) } return nil } // deleteBlockHash deletes the block hash entry within the syncBucket for the // given height. func deleteBlockHash(ns walletdb.ReadWriteBucket, height int32) error { var rawHeight [4]byte binary.BigEndian.PutUint32(rawHeight[:], uint32(height)) bucket := ns.NestedReadWriteBucket(syncBucketName) if err := bucket.Delete(rawHeight[:]); err != nil { errStr := fmt.Sprintf("failed to delete hash for height %v", height) return managerError(ErrDatabase, errStr, err) } return nil } // updateSyncedTo updates the value behind the syncedToName key to the given // block. func updateSyncedTo(ns walletdb.ReadWriteBucket, bs *BlockStamp) error { // The serialized synced to format is: // <blockheight><blockhash><timestamp> // // 4 bytes block height + 32 bytes hash length + 4 byte timestamp length var serializedStamp [40]byte binary.LittleEndian.PutUint32(serializedStamp[0:4], uint32(bs.Height)) copy(serializedStamp[4:36], bs.Hash[0:32]) binary.LittleEndian.PutUint32( serializedStamp[36:], uint32(bs.Timestamp.Unix()), ) bucket := ns.NestedReadWriteBucket(syncBucketName) if err := bucket.Put(syncedToName, serializedStamp[:]); err != nil { errStr := "failed to update synced to value" return managerError(ErrDatabase, errStr, err) } return nil } // staleHeight returns the stale height for the given height. The stale height // indicates the height we should remove in order to maintain a maximum of // MaxReorgDepth block hashes. func staleHeight(height int32) int32 { return height - MaxReorgDepth } // FetchStartBlock loads the start block stamp for the manager from the // database. func FetchStartBlock(ns walletdb.ReadBucket) (*BlockStamp, error) { bucket := ns.NestedReadBucket(syncBucketName) // The serialized start block format is: // <blockheight><blockhash> // // 4 bytes block height + 32 bytes hash length buf := bucket.Get(startBlockName) if len(buf) != 36 { str := "malformed start block stored in database" return nil, managerError(ErrDatabase, str, nil) } var bs BlockStamp bs.Height = int32(binary.LittleEndian.Uint32(buf[0:4])) copy(bs.Hash[:], buf[4:36]) return &bs, nil } // putStartBlock stores the provided start block stamp to the database. func putStartBlock(ns walletdb.ReadWriteBucket, bs *BlockStamp) error { bucket := ns.NestedReadWriteBucket(syncBucketName) // The serialized start block format is: // <blockheight><blockhash> // // 4 bytes block height + 32 bytes hash length buf := make([]byte, 36) binary.LittleEndian.PutUint32(buf[0:4], uint32(bs.Height)) copy(buf[4:36], bs.Hash[0:32]) err := bucket.Put(startBlockName, buf) if err != nil { str := fmt.Sprintf("failed to store start block %v", bs.Hash) return managerError(ErrDatabase, str, err) } return nil } // fetchBirthday loads the manager's bithday timestamp from the database. func fetchBirthday(ns walletdb.ReadBucket) (time.Time, error) { var t time.Time bucket := ns.NestedReadBucket(syncBucketName) birthdayTimestamp := bucket.Get(birthdayName) if len(birthdayTimestamp) != 8 { str := "malformed birthday stored in database" return t, managerError(ErrDatabase, str, nil) } t = time.Unix(int64(binary.BigEndian.Uint64(birthdayTimestamp)), 0) return t, nil } // putBirthday stores the provided birthday timestamp to the database. func putBirthday(ns walletdb.ReadWriteBucket, t time.Time) error { var birthdayTimestamp [8]byte binary.BigEndian.PutUint64(birthdayTimestamp[:], uint64(t.Unix())) bucket := ns.NestedReadWriteBucket(syncBucketName) if err := bucket.Put(birthdayName, birthdayTimestamp[:]); err != nil { str := "failed to store birthday" return managerError(ErrDatabase, str, err) } return nil } // FetchBirthdayBlock retrieves the birthday block from the database. // // The block is serialized as follows: // [0:4] block height // [4:36] block hash // [36:44] block timestamp func FetchBirthdayBlock(ns walletdb.ReadBucket) (BlockStamp, error) { var block BlockStamp bucket := ns.NestedReadBucket(syncBucketName) birthdayBlock := bucket.Get(birthdayBlockName) if birthdayBlock == nil { str := "birthday block not set" return block, managerError(ErrBirthdayBlockNotSet, str, nil) } if len(birthdayBlock) != 44 { str := "malformed birthday block stored in database" return block, managerError(ErrDatabase, str, nil) } block.Height = int32(binary.BigEndian.Uint32(birthdayBlock[:4])) copy(block.Hash[:], birthdayBlock[4:36]) t := int64(binary.BigEndian.Uint64(birthdayBlock[36:])) block.Timestamp = time.Unix(t, 0) return block, nil } // DeleteBirthdayBlock removes the birthday block from the database. // // NOTE: This does not alter the birthday block verification state. func DeleteBirthdayBlock(ns walletdb.ReadWriteBucket) error { bucket := ns.NestedReadWriteBucket(syncBucketName) if err := bucket.Delete(birthdayBlockName); err != nil { str := "failed to remove birthday block" return managerError(ErrDatabase, str, err) } return nil } // PutBirthdayBlock stores the provided birthday block to the database. // // The block is serialized as follows: // [0:4] block height // [4:36] block hash // [36:44] block timestamp // // NOTE: This does not alter the birthday block verification state. func PutBirthdayBlock(ns walletdb.ReadWriteBucket, block BlockStamp) error { var birthdayBlock [44]byte binary.BigEndian.PutUint32(birthdayBlock[:4], uint32(block.Height)) copy(birthdayBlock[4:36], block.Hash[:]) binary.BigEndian.PutUint64(birthdayBlock[36:], uint64(block.Timestamp.Unix())) bucket := ns.NestedReadWriteBucket(syncBucketName) if err := bucket.Put(birthdayBlockName, birthdayBlock[:]); err != nil { str := "failed to store birthday block" return managerError(ErrDatabase, str, err) } return nil } // fetchBirthdayBlockVerification retrieves the bit that determines whether the // wallet has verified that its birthday block is correct. func fetchBirthdayBlockVerification(ns walletdb.ReadBucket) bool { bucket := ns.NestedReadBucket(syncBucketName) verifiedValue := bucket.Get(birthdayBlockVerifiedName) // If there is no verification status, we can assume it has not been // verified yet. if verifiedValue == nil { return false } // Otherwise, we'll determine if it's verified by the value stored. verified := binary.BigEndian.Uint16(verifiedValue) return verified != 0 } // putBirthdayBlockVerification stores a bit that determines whether the // birthday block has been verified by the wallet to be correct. func putBirthdayBlockVerification(ns walletdb.ReadWriteBucket, verified bool) error { // Convert the boolean to an integer in its binary representation as // there is no way to insert a boolean directly as a value of a // key/value pair. verifiedValue := uint16(0) if verified { verifiedValue = 1 } var verifiedBytes [2]byte binary.BigEndian.PutUint16(verifiedBytes[:], verifiedValue) bucket := ns.NestedReadWriteBucket(syncBucketName) err := bucket.Put(birthdayBlockVerifiedName, verifiedBytes[:]) if err != nil { str := "failed to store birthday block verification" return managerError(ErrDatabase, str, err) } return nil } // managerExists returns whether or not the manager has already been created // in the given database namespace. func managerExists(ns walletdb.ReadBucket) bool { if ns == nil { return false } mainBucket := ns.NestedReadBucket(mainBucketName) return mainBucket != nil } // createScopedManagerNS creates the namespace buckets for a new registered // manager scope within the top level bucket. All relevant sub-buckets that a // ScopedManager needs to perform its duties are also created. func createScopedManagerNS(ns walletdb.ReadWriteBucket, scope *KeyScope) error { // First, we'll create the scope bucket itself for this particular // scope. scopeKey := scopeToBytes(scope) scopeBucket, err := ns.CreateBucket(scopeKey[:]) if err != nil { str := "failed to create sync bucket" return managerError(ErrDatabase, str, err) } _, err = scopeBucket.CreateBucket(acctBucketName) if err != nil { str := "failed to create account bucket" return managerError(ErrDatabase, str, err) } _, err = scopeBucket.CreateBucket(addrBucketName) if err != nil { str := "failed to create address bucket" return managerError(ErrDatabase, str, err) } // usedAddrBucketName bucket was added after manager version 1 release _, err = scopeBucket.CreateBucket(usedAddrBucketName) if err != nil { str := "failed to create used addresses bucket" return managerError(ErrDatabase, str, err) } _, err = scopeBucket.CreateBucket(addrAcctIdxBucketName) if err != nil { str := "failed to create address index bucket" return managerError(ErrDatabase, str, err) } _, err = scopeBucket.CreateBucket(acctNameIdxBucketName) if err != nil { str := "failed to create an account name index bucket" return managerError(ErrDatabase, str, err) } _, err = scopeBucket.CreateBucket(acctIDIdxBucketName) if err != nil { str := "failed to create an account id index bucket" return managerError(ErrDatabase, str, err) } _, err = scopeBucket.CreateBucket(metaBucketName) if err != nil { str := "failed to create a meta bucket" return managerError(ErrDatabase, str, err) } return nil } // createManagerNS creates the initial namespace structure needed for all of // the manager data. This includes things such as all of the buckets as well // as the version and creation date. In addition to creating the key space for // the root address manager, we'll also create internal scopes for all the // default manager scope types. func createManagerNS(ns walletdb.ReadWriteBucket, defaultScopes map[KeyScope]ScopeAddrSchema) error { // First, we'll create all the relevant buckets that stem off of the // main bucket. mainBucket, err := ns.CreateBucket(mainBucketName) if err != nil { str := "failed to create main bucket" return managerError(ErrDatabase, str, err) } _, err = ns.CreateBucket(syncBucketName) if err != nil { str := "failed to create sync bucket" return managerError(ErrDatabase, str, err) } // We'll also create the two top-level scope related buckets as // preparation for the operations below. scopeBucket, err := ns.CreateBucket(scopeBucketName) if err != nil { str := "failed to create scope bucket" return managerError(ErrDatabase, str, err) } scopeSchemas, err := ns.CreateBucket(scopeSchemaBucketName) if err != nil { str := "failed to create scope schema bucket" return managerError(ErrDatabase, str, err) } // Next, we'll create the namespace for each of the relevant default // manager scopes. for scope, scopeSchema := range defaultScopes { scope, scopeSchema := scope, scopeSchema // Before we create the entire namespace of this scope, we'll // update the schema mapping to note what types of addresses it // prefers. scopeKey := scopeToBytes(&scope) schemaBytes := scopeSchemaToBytes(&scopeSchema) err := scopeSchemas.Put(scopeKey[:], schemaBytes) if err != nil { return err } err = createScopedManagerNS(scopeBucket, &scope) if err != nil { return err } err = putLastAccount(ns, &scope, DefaultAccountNum) if err != nil { return err } } if err := putManagerVersion(ns, latestMgrVersion); err != nil { return err } createDate := uint64(time.Now().Unix()) var dateBytes [8]byte binary.LittleEndian.PutUint64(dateBytes[:], createDate) err = mainBucket.Put(mgrCreateDateName, dateBytes[:]) if err != nil { str := "failed to store database creation time" return managerError(ErrDatabase, str, err) } return nil }
package conclusion import ( "sort" "testing" "github.com/stretchr/testify/require" ) func TestTreeNode(t *testing.T) { n := &TreeNode{ Val: 1, Right: &TreeNode{ Val: 3, Left: &TreeNode{ Val: 2, }, }, } assert := require.New(t) assert.Equal("[1,null,3,2]", n.String()) } func Test_generateTrees(t *testing.T) { type args struct { n int } tests := []struct { name string args args want []*TreeNode }{ { name: "example 1", args: args{n:3}, want: []*TreeNode{ { Val: 1, Right: &TreeNode{ Val: 3, Left: &TreeNode{ Val: 2, }, }, }, { Val: 1, Right: &TreeNode{ Val: 2, Right: &TreeNode{ Val: 3, }, }, }, { Val: 2, Left: &TreeNode{ Val: 1, }, Right: &TreeNode{ Val: 3, }, }, { Val: 3, Left: &TreeNode{ Val: 2, Left: &TreeNode{ Val: 1, }, }, }, { Val: 3, Left: &TreeNode{ Val: 1, Right: &TreeNode{ Val: 2, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert := require.New(t) var wantStr = make([]string, len(tt.want)) for i := range tt.want { wantStr[i] = tt.want[i].String() } sort.Strings(wantStr) got := generateTrees(tt.args.n) var gotStr = make([]string, len(got)) for i := range got { gotStr[i] = got[i].String() } sort.Strings(gotStr) assert.Equal(gotStr, wantStr) }) } }
package usage_test import ( "fmt" "io/ioutil" "time" _ "github.com/manishrjain/gocrud/drivers/leveldb" _ "github.com/manishrjain/gocrud/drivers/memsearch" "github.com/manishrjain/gocrud/indexer" "github.com/manishrjain/gocrud/req" "github.com/manishrjain/gocrud/search" "github.com/manishrjain/gocrud/store" "github.com/manishrjain/gocrud/x" ) var log = x.Log("usage") func ExampleStore() { path, err := ioutil.TempDir("", "gocrudldb_") if err != nil { x.LogErr(log, err).Fatal("Opening file") return } store.Get().Init(path) // leveldb // Update some data. c := req.NewContext(10) // 62^10 permutations err = store.NewUpdate("Root", "bigbang").SetSource("author"). Set("when", "13.8 billion years ago").Set("explosive", true).Execute(c) if err != nil { x.LogErr(log, err).Fatal("Commiting update") return } // Retrieve that data result, err := store.NewQuery("bigbang").Run() if err != nil { x.LogErr(log, err).Fatal("While querying store") return } fmt.Println(result.Kind) // Root fmt.Println(result.Id) // bigbang data := result.ToMap() { val, ok := data["explosive"] if !ok { log.Fatal("creator should be set") return } fmt.Println(val) // true } { val, ok := data["when"] if !ok { log.Fatal("creator should be set") return } fmt.Println(val) } // Output: // Root // bigbang // true // 13.8 billion years ago } type SimpleIndexer struct { } func (si SimpleIndexer) OnUpdate(e x.Entity) (result []x.Entity) { result = append(result, e) return } func (si SimpleIndexer) Regenerate(e x.Entity) (rdoc x.Doc) { rdoc.Id = e.Id rdoc.Kind = e.Kind rdoc.NanoTs = time.Now().UnixNano() result, err := store.NewQuery(e.Id).Run() if err != nil { x.LogErr(log, err).Fatal("While querying store") return } data := result.ToMap() rdoc.Data = data return } var particles = [...]string{ "up", "charm", "top", "gluon", "down", "strange", "bottom", "photon", "boson", "higgs boson", } func ExampleSearch() { path, err := ioutil.TempDir("", "gocrudldb_") if err != nil { x.LogErr(log, err).Fatal("Opening file") return } store.Get().Init(path) // leveldb search.Get().Init() // memsearch // Run indexer to update entities in search engine in real time. c := req.NewContextWithUpdates(10, 100) indexer.Register("Child", SimpleIndexer{}) indexer.Run(c, 2) u := store.NewUpdate("Root", "bigbang").SetSource("author") for i := 0; i < 10; i++ { child := u.AddChild("Child").Set("pos", i).Set("particle", particles[i]) if i == 5 { child.MarkDeleted() // This shouldn't be retrieved anymore. } } if err = u.Execute(c); err != nil { x.LogErr(log, err).Fatal("While updating") return } indexer.WaitForDone(c) // Block until indexing is done. docs, err := search.Get().NewQuery("Child").Order("-data.pos").Run() if err != nil { x.LogErr(log, err).Fatal("While searching") return } fmt.Println("docs:", len(docs)) for _, doc := range docs { m := doc.Data.(map[string]interface{}) fmt.Println(m["pos"], m["particle"]) } // Output: // docs: 9 // 9 higgs boson // 8 boson // 7 photon // 6 bottom // 4 down // 3 gluon // 2 top // 1 charm // 0 up }
package util import ( "time" log "github.com/Sirupsen/logrus" ) type Scheduler struct { task Task interval time.Duration quit chan int } type Task func() error func NewScheduler(t Task, interval time.Duration) *Scheduler { return &Scheduler{task: t, interval: interval, quit: make(chan int)} } func (s Scheduler) Run() { for { if err := s.task(); err != nil { log.Error(err) } select { case <-s.quit: log.Debug("Schedualer Stopped") return case <-time.After(s.interval): log.Debugf("Finish watting %v", s.interval) } } } func (s Scheduler) Stop() { s.quit <- 1 }
package api import ( "WAF/middlewares" "WAF/models" "WAF/utils" "time" "github.com/dgrijalva/jwt-go" "github.com/gin-gonic/gin" ) // @Tags User // @Summary 用户登录 // accept json // produce json // @Param name query string true "用户名" // @Param passwd query string true "密码" // @Success 200 {string} string "ok" // @Router /login/ [post] func Login(c *gin.Context) { var user models.User var user2 models.User var err error err = c.ShouldBindJSON(&user) utils.Initvalidate() err = utils.Validate.Struct(user) if err != nil { utils.HandleError(err, c) return } user2, err = models.GetUserByName(user.Name) if user2.Passwd != models.EncryptPass(user.Passwd) { utils.FailMessage("用户名或密码不对", c) return } // 修改登录状态 user2.State = 1 models.UpdateUser(user2) token, ExpiresAt := CreateToken(c, &user2) utils.OkData(gin.H{ "token": token, "user": user2, "expires_at": ExpiresAt, }, c) } func CreateToken(c *gin.Context, user *models.User) (string, int64) { //自定义claim claim := middlewares.UserClaim{ Id: user.Id, Name: user.Name, StandardClaims: jwt.StandardClaims{ NotBefore: int64(time.Now().Unix() - 1000), // 签名生效时间 ExpiresAt: int64(time.Now().Unix() + 60*60*24*7), // 过期时间 一周 }, } token := jwt.NewWithClaims(jwt.SigningMethodHS256, claim) tokenString, _ := token.SignedString([]byte("12345678")) return tokenString, claim.StandardClaims.ExpiresAt }
package openvswitch import ( "testing" "github.com/joatmon08/ovs_exporter/utils" "encoding/json" "reflect" ) func TestParseStatisticsFromData(t *testing.T) { var test []map[string]interface{} expected := map[string]float64{ "collisions": 0, "rx_bytes": 1026, "rx_crc_err": 0, "rx_dropped": 0, "rx_errors": 0, "rx_frame_err": 0, "rx_over_err": 0, "rx_packets": 0, "tx_bytes": 1096, "tx_dropped": 0, "tx_errors": 0, "tx_packets": 14, } stats, err := utils.ReadTestDataToBytes("statistics.json") if err != nil { t.Error(err) } if err := json.Unmarshal(stats, &test); err != nil { t.Error(err) } result, err := ParseStatisticsFromInterfaces(test) if err != nil { t.Error(err) } if len(result) != 6 { t.Errorf("Expected %d, got %d", 6, len(result)) } for _, r := range result { if r.Name == "1c1988eb903b4_l" { if r.UUID != "aa713415-8566-458b-b8ef-58e550af8a91" { t.Errorf("Expected %s, got %s", "aa713415-8566-458b-b8ef-58e550af8a91", r.UUID) } if reflect.DeepEqual(r.Statistics, expected) { t.Errorf("Expected %v, got %v", expected, r.Statistics) } } } } func TestParsePortsFromBridges(t *testing.T) { var test []map[string]interface{} expected := Bridge{ Name: "ovsbr0", Ports: []Port{ {UUID: "a5956ae0-25fd-46b2-a881-a6f63c8014d9"}, {UUID: "dfb05de8-617b-4127-91aa-e1f3bfd7ab60"}, }, } stats, err := utils.ReadTestDataToBytes("bridges.json") if err != nil { t.Error(err) } if err := json.Unmarshal(stats, &test); err != nil { t.Error(err) } result, err := ParsePortsFromBridges(test) if err != nil { t.Error(err) } if len(result) != 3 { t.Errorf("Expected %d, got %d", 3, len(result)) } for _, r := range result { if r.Name == "ovsbr0" { if !reflect.DeepEqual(expected, r) { t.Errorf("Expected %v, got %v", expected, r) } } } }
/* Odd prime numbers are either in the form of 4k+1 or 4k+3 where k is a non-negative integer. If we divide the set of odd prime numbers into two such groups like this: 4k+3 | 3 7 11 19 23 31 43 47 59 67 71 | 4k+1 | 5 13 17 29 37 41 53 61 73 we can see that the two groups are kind of racing with each other. Sometimes the so-called 'upper' group wins and sometimes the 'lower' one is on track. In fact, Chebyshev discovered that in this race, the upper group wins slightly more often. The problem Let's assume that we are interested in knowing the shape of this race track up to a certain number. Something like this: prime race track The upper and lower horizontal lines indicate that the next prime stays in the same group, while the slanted lines indicate a 'jump' from one group to the other. Now assume that the underline character _ represents a lower horizontal line and the overline character ‾ (U+203E) represents an upper one. The slanted lines are represented by slash / or backslash \ characters. Challenge Write a program or function that gets a number N as input, and draws this prime race track up to N, in a kind of ASCII-art-form described as above (Well, it's not an actual ASCII-art since it would contain a non-ASCII character). Rules N is an integer and not necessarily a prime number. Draw the race path for the primes up to, (and maybe including) N. For a valid N as input, the output shall only be composed of these four characters ‾_/\. No other character, space or separator is allowed (except maybe at the end of output). The output can either be in a text file or stdout or wherever supports displaying those characters. But an actual plot (like the blue figure above) is not desired. This is code-golf, so the shortest code in bytes wins. Examples Here is a list of possible inputs and their acceptable outputs. N < 5 no output or maybe an error message whatsoever N = 5 \ N = 20 \/‾\_/ N = 100 \/‾\_/‾\/\_/‾\/\/‾\/‾\_ Trivia The resulting plots of this challenge may actually resemble the derivative of the plots shown in there. https://www.makethebrainhappy.com/2018/10/chebyshev-bias.html */ package main import ( "bytes" "fmt" "math/big" ) func main() { assert(primetrack(4) == ``) assert(primetrack(5) == `\`) assert(primetrack(20) == `\/‾\_/`) assert(primetrack(100) == `\/‾\_/‾\/\_/‾\/\/‾\/‾\_`) } func assert(x bool) { if !x { panic("assertion failed") } } func primetrack(n int) string { w, l, t, k := new(bytes.Buffer), -1, 0, 0 for { x, d := 4*k+1, 0 if t == 1 { x, k, d = x+2, k+1, 1 } if x > n { break } t ^= 1 if !isprime(x) { continue } if l < 0 { l = d continue } switch { case l < d: fmt.Fprintf(w, "/") case l == d: if l == 1 { fmt.Fprintf(w, "‾") } else { fmt.Fprintf(w, "_") } case l > d: fmt.Fprintf(w, "\\") } l = d } return w.String() } func isprime(n int) bool { x := big.NewInt(int64(n)) return x.ProbablyPrime(2) }
package main import ( "github.com/ziutek/mymysql/autorc" _ "github.com/ziutek/mymysql/native" "log" "os" "os/signal" "strings" "syscall" "time" "unicode" ) var ( ins []*Input logFileName string smsd *SMSd ) func parseList(l string) []string { var a []string for { n := strings.IndexFunc(l, unicode.IsSpace) if n == -1 { a = append(a, l) break } a = append(a, l[:n]) l = strings.TrimLeftFunc(l[n:], unicode.IsSpace) } return a } func main() { if len(os.Args) != 2 { log.Printf("Usage: %s CONFIG_FILE\n", os.Args[0]) os.Exit(1) } db, cfg, err := autorc.NewFromCF(os.Args[1]) if err != nil { log.Println("Error:", err) os.Exit(1) } logFileName, _ = cfg["LogFile"] setupLogging() c, ok := cfg["Listen"] if !ok { log.Println("There is no 'Listen' option in config file") os.Exit(1) } listen := parseList(c) c, ok = cfg["Source"] if !ok { log.Println("There is no 'Source' option in config file") os.Exit(1) } source := parseList(c) pullInt := 17 * time.Second // if 15s my phone works bad c, _ = cfg["PullInt"] if c != "" { pullInt, err = time.ParseDuration(c) if err != nil { log.Printf("Wrong value for 'PullInt' option: '%s'", c) os.Exit(1) } } numId, _ := cfg["NumId"] filter, _ := cfg["Filter"] smsd = NewSMSd(db, numId, filter, pullInt) ins = make([]*Input, len(listen)) for i, a := range listen { proto := "tcp" if strings.IndexRune(a, ':') == -1 { proto = "unix" } ins[i] = NewInput(smsd, proto, a, db.Clone(), source) } smsd.Start() defer smsd.Stop() for _, in := range ins { in.Start() defer in.Stop() } sc := make(chan os.Signal, 2) signal.Notify(sc, syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP) for sig := range sc { if sig == syscall.SIGHUP { setupLogging() } else { break } } } var logFile *os.File func setupLogging() { if logFileName == "" { return } newFile, err := os.OpenFile( logFileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0620, ) if err != nil { log.Println(err) os.Exit(1) } prevFile := logFile logFile = newFile log.SetOutput(logFile) log.Println("Start logging to file:", logFileName) if prevFile != nil { err = prevFile.Close() if err != nil { log.Println(err) return } } }
package invoker import ( "context" v1 "github.com/mee6aas/zeep/pkg/api/invoker/v1" ) // Invoke request to invoke the specified activity name with the arg. func Invoke(ctx context.Context, actName string, arg string) (rst string, e error) { res, e := client.Invoke(ctx, &v1.InvokeRequest{ ActName: actName, Arg: arg, }) if e != nil { return } rst = res.GetResult() return }
package info import ( "fmt" "io/ioutil" "github.com/bwmarrin/discordgo" ) var fileList map[string]bool var fileListString string func RefreshFileList() { files, err := ioutil.ReadDir("./config/messages") if err != nil { fmt.Println(err) return } fileList = make(map[string]bool) fileListString = "**List of info topics:** \n" + "```\n" for _, file := range files { fmt.Println("Loaded file: " + file.Name()) fileListString += file.Name()[:len(file.Name())-4] + "\n" fileList[file.Name()[:len(file.Name())-4]] = true } fileListString = fileListString + "```" } func InfoFile(s *discordgo.Session, c string, file string) { b, err := ioutil.ReadFile("./config/messages/" + file + ".txt") if err != nil { s.ChannelMessageSend(c, "Error finding info, try again later.") return } fileOutput := string(b) s.ChannelMessageSend(c, fileOutput) } func List(s *discordgo.Session, c string) { RefreshFileList() s.ChannelMessageSend(c, fileListString) } func SafeInfoFIle(s *discordgo.Session, c string, file string) { fmt.Println("FILE: !" + file + "!") if file == "list" { List(s, c) } else if fileList[file] == true { InfoFile(s, c, file) } else { fmt.Println(fileList[file]) s.ChannelMessageSend(c, "Invalid topic") } }
package main import ( "flysnow/utils" "fmt" "log" "net/http" ) func main() { utils.FSConfig.SetMod("sys") if utils.FSConfig.IntDefault("web", 0) == 1 { port := utils.FSConfig.StringDefault("web.port", "22259") http.HandleFunc("/", defaultHandler) http.HandleFunc("/configs", configHandler) log.Fatal(http.ListenAndServe(":"+port, nil)) } } func defaultHandler(resp http.ResponseWriter, req *http.Request) { resp.Write([]byte("hello default")) } func configHandler(resp http.ResponseWriter, req *http.Request) { fmt.Println(req.PostForm) fmt.Println(req.Form) fmt.Println(req.Method) fmt.Println(req.ContentLength) fmt.Println(req.URL) fmt.Println((req.URL.Query())) resp.Write([]byte("hello config")) }
// write a go program that finds the average of command line float values
package main // List here all required micro plugins // Go here: https://github.com/micro/go-plugins import ( _ "github.com/micro/go-plugins/registry/consul/v2" )
/* * @lc app=leetcode.cn id=2 lang=golang * * [2] 两数相加 */ package solution // @lc code=start func addTwoNumbers(l1 *ListNode, l2 *ListNode) (ans *ListNode) { p, p1, p2, carry := &ListNode{}, l1, l2, 0 ans = new(ListNode) p.Next = ans for p1 != nil || p2 != nil { var n1, n2 int if p1 != nil { n1, p1 = p1.Val, p1.Next } if p2 != nil { n2, p2 = p2.Val, p2.Next } sum := n1 + n2 + carry carry, sum = sum/10, sum%10 p.Next.Val, p.Next.Next = sum, new(ListNode) p = p.Next } if carry != 0 { p.Next.Val = carry p = p.Next } p.Next = nil return } // @lc code=end
// Go support for Protocol Buffers RPC which compatiable with https://github.com/Baidu-ecom/Jprotobuf-rpc-socket // // Copyright 2002-2007 the original author or authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pbrpc import "time" var NANO_IN_SECONDS = 1000000000.0 // get time took in seconds func TimetookInSeconds(currentNano int64) float64 { c := time.Now().UnixNano() return float64(c-currentNano) / NANO_IN_SECONDS } type AnyFunc func() type SafeLoopFuncControl struct { stop chan bool } func (s *SafeLoopFuncControl) LoopGoSafty(f AnyFunc) { if s.stop == nil { s.stop = make(chan bool, 1) } for { select { case <-s.stop: return default: f() } } }
package main import ( "flag" "net/http" "time" klog "k8s.io/klog/v2" ) func main() { // initialize klog klog.InitFlags(nil) flag.Parse() // upgrade limits to the maximum possible, the proxy use a lot of files... setLimits() // disable proxy configuration in env variables noProxyDefaultTransport := http.DefaultTransport.(*http.Transport) noProxyDefaultTransport.Proxy = nil handler := &ProxyHandler{ httpClient: http.Client{ Timeout: 5 * time.Hour, Transport: noProxyDefaultTransport, }, } // start the proxy klog.Infof("starting proxy...") klog.Fatal(http.ListenAndServe(":8080", handler)) }
/* Anthony is participating in a programming contest today! He’s excellent at algorithms; he can design and implement the solution to even the hardest problems in the programming contest in minutes. Unfortunately, parsing inputs is his greatest weakness. Specifically, he really struggles with problems which require him to convert strings to lowercase. When Anthony is given a string, he only converts the first character of the string to lowercase. You somehow gained access to the test data for every problem in today’s contest. There are P problems in the contest, and every problem in the contest has exactly T test cases. Each test case consists of a single string with only English characters, and every problem requires converting the entire string to lowercase as a preprocessing step. As Anthony is a master problem solver, you know that he is able to design and implement the algorithm for every problem within the contest’s time constraint. Specifically, Anthony’s program will pass a test case C if and only if every character in the string corresponding to C is converted to lowercase. Now you wonder: how many problems will he solve in the contest? Note that a problem is considered solved if all test cases passed. Input The first line of the input contains two integers P and T, 1≤P,T≤50. The next PT lines contain the test data for all the problems in the contest. Every line contains exactly one nonempty string composed of only English characters. Test cases are grouped by problems, so the first T lines represent all the test cases for problem 1, the next T lines represent all the test cases for problem 2, and so on. It is guaranteed that the total number of characters in the test cases does not exceed 50000. Output Output a single integer denoting the number of problems Anthony will solve during the contest. */ package main import "unicode" func main() { assert(solved([][]string{ {"abc", "Def"}, {"DDG", "add"}, }) == 1) } func assert(x bool) { if !x { panic("assertion failed") } } func solved(s [][]string) int { c := 0 loop: for i := range s { for j := range s[i] { for k, r := range s[i][j] { if k == 0 { continue } if !unicode.IsLower(r) { continue loop } } } c++ } return c }
package rtrserver import ( "bytes" "encoding/binary" "errors" "time" "github.com/cpusoft/goutil/belogs" "github.com/cpusoft/goutil/iputil" "github.com/cpusoft/goutil/jsonutil" model "rpstir2-model" ) func ParseToResetQuery(buf *bytes.Reader, protocolVersion uint8) (rtrPduModel RtrPduModel, err error) { var zero16 uint16 var length uint32 // get zero16 err = binary.Read(buf, binary.BigEndian, &zero16) if err != nil { belogs.Error("ParseToResetQuery(): PDU_TYPE_RESET_QUERY get zero fail, buf:", buf, err) rtrError := NewRtrError( err, true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA, buf, "Fail to get zero") return rtrPduModel, rtrError } // get length err = binary.Read(buf, binary.BigEndian, &length) if err != nil { belogs.Error("ParseToResetQuery(): PDU_TYPE_RESET_QUERY get length fail, buf:", buf, err) rtrError := NewRtrError( err, true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA, buf, "Fail to get length") return rtrPduModel, rtrError } if length != 8 { belogs.Error("ParseToResetQuery():PDU_TYPE_RESET_QUERY, length must be 8, buf:", buf, " length:", length) rtrError := NewRtrError( errors.New("pduType is RESET QUERY, length must be 8"), true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA, buf, "Fail to get length") return rtrPduModel, rtrError } rq := NewRtrResetQueryModel(protocolVersion) belogs.Debug("ParseToResetQuery():get PDU_TYPE_RESET_QUERY, buf:", buf, " rq:", jsonutil.MarshalJson(rq)) return rq, nil } func ProcessResetQuery(rtrPduModel RtrPduModel) (resetResponses []RtrPduModel, err error) { rtrFulls, rtrAsaFulls, sessionId, serialNumber, err := getRtrFullAndSessionIdAndSerialNumberDb() if err != nil { belogs.Error("ProcessResetQuery(): GetRtrFullAndSerialNumAndSessionId fail: ", err) return resetResponses, err } belogs.Debug("ProcessResetQuery(): len(rtrFulls):", len(rtrFulls), " sessionId:", sessionId, " serialNumber: ", serialNumber) rtrPduModels, err := assembleResetResponses(rtrFulls, rtrAsaFulls, rtrPduModel.GetProtocolVersion(), sessionId, serialNumber) if err != nil { belogs.Error("ProcessResetQuery(): assembleResetResponses fail: ", err) return resetResponses, err } return rtrPduModels, nil } // when len(rtrFull)==0, it is an error with no_data_available func assembleResetResponses(rtrFulls []model.LabRpkiRtrFull, rtrAsaFulls []model.LabRpkiRtrAsaFull, protocolVersion uint8, sessionId uint16, serialNumber uint32) (rtrPduModels []RtrPduModel, err error) { belogs.Info("assembleResetResponses(): len(rtrFulls):", len(rtrFulls), " len(rtrAsaFulls):", len(rtrAsaFulls), " protocolVersion:", protocolVersion, " sessionId:", sessionId, " serialNumber:", serialNumber) rtrPduModels = make([]RtrPduModel, 0) //rtr full from roa rtr if protocolVersion == PDU_PROTOCOL_VERSION_0 || protocolVersion == PDU_PROTOCOL_VERSION_1 { if len(rtrFulls) > 0 { belogs.Debug("assembleResetResponses(): protocolVersion=0 or 1, len(rtrFulls)>0, len(rtrFulls): ", len(rtrFulls), " protocolVersion:", protocolVersion, " sessionId:", sessionId, " serialNumber:", serialNumber) // start response cacheResponseModel := NewRtrCacheResponseModel(protocolVersion, sessionId) rtrPduModels = append(rtrPduModels, cacheResponseModel) belogs.Debug("assembleResetResponses(): protocolVersion=0 or 1, cacheResponseModel : ", jsonutil.MarshalJson(cacheResponseModel)) // rtr full to response rtrFullPduModels, err := convertRtrFullsToRtrPduModels(rtrFulls, protocolVersion) if err != nil { belogs.Error("assembleResetResponses(): protocolVersion=0 or 1, convertRtrIncrementalsToRtrPduModels fail: ", err) return nil, err } rtrPduModels = append(rtrPduModels, rtrFullPduModels...) belogs.Debug("assembleResetResponses(): protocolVersion=0 or 1, len(rtrFullPduModels) : ", len(rtrFullPduModels)) // end response endOfDataModel := assembleEndOfDataResponse(protocolVersion, sessionId, serialNumber) rtrPduModels = append(rtrPduModels, endOfDataModel) belogs.Debug("assembleResetResponses(): protocolVersion=0 or 1, endOfDataModel : ", jsonutil.MarshalJson(endOfDataModel)) belogs.Info("assembleResetResponses(): protocolVersion=0 or 1, will send will send Cache Response of all rtr,", ", receive protocolVersion:", protocolVersion, ", sessionId:", sessionId, ", serialNumber:", serialNumber, ", len(rtrFulls): ", len(rtrFulls), ", len(rtrPduModels):", len(rtrPduModels)) belogs.Debug("assembleResetResponses(): protocolVersion=0 or 1, rtrPduModels:", jsonutil.MarshalJson(rtrPduModels)) return rtrPduModels, nil } else { errorReportModel := NewRtrErrorReportModel(protocolVersion, PDU_TYPE_ERROR_CODE_NO_DATA_AVAILABLE, nil, nil) rtrPduModels = append(rtrPduModels, errorReportModel) belogs.Info("assembleResetResponses(): protocolVersion=0 or 1,there is no rtr this time, will send errorReport with not_data_available, ", ", receive protocolVersion:", protocolVersion, ", sessionId:", sessionId, ", serialNumber:", serialNumber, ", rtrPduModels:", jsonutil.MarshalJson(rtrPduModels)) return rtrPduModels, nil } } else if protocolVersion == PDU_PROTOCOL_VERSION_2 { //rtr full from asa rtr if len(rtrFulls) > 0 || len(rtrAsaFulls) > 0 { belogs.Debug("assembleResetResponses(): protocolVersion=2, len(rtrFulls):", len(rtrFulls), " len(rtrAsaFulls): ", len(rtrAsaFulls), " protocolVersion:", protocolVersion, " sessionId:", sessionId, " serialNumber:", serialNumber) // start response cacheResponseModel := NewRtrCacheResponseModel(protocolVersion, sessionId) rtrPduModels = append(rtrPduModels, cacheResponseModel) belogs.Debug("assembleResetResponses(): cacheResponseModel : ", jsonutil.MarshalJson(cacheResponseModel)) // from rtr full rtrFullPduModels, err := convertRtrFullsToRtrPduModels(rtrFulls, protocolVersion) if err != nil { belogs.Error("assembleResetResponses(): convertRtrFullsToRtrPduModels fail: ", err) return nil, err } rtrPduModels = append(rtrPduModels, rtrFullPduModels...) belogs.Debug("assembleResetResponses(): len(rtrFullPduModels) : ", len(rtrFullPduModels)) // rtr asa full to response rtrAsaFullPduModels, err := convertRtrAsaFullsToRtrPduModels(rtrAsaFulls, protocolVersion) if err != nil { belogs.Error("assembleResetResponses(): convertRtrAsaFullsToRtrPduModels fail: ", err) return nil, err } rtrPduModels = append(rtrPduModels, rtrAsaFullPduModels...) belogs.Debug("assembleResetResponses(): len(rtrAsaFullPduModels) : ", len(rtrAsaFullPduModels)) // end response endOfDataModel := assembleEndOfDataResponse(protocolVersion, sessionId, serialNumber) rtrPduModels = append(rtrPduModels, endOfDataModel) belogs.Debug("assembleResetResponses(): will send all rtrPduModels : ", jsonutil.MarshalJson(rtrPduModels)) belogs.Info("assembleResetResponses(): protocolVersion=2, will send will send Cache Response of all rtr,", ", receive protocolVersion:", protocolVersion, ", sessionId:", sessionId, ", serialNumber:", serialNumber, ", len(rtrFulls): ", len(rtrFulls), ", len(rtrAsaFulls): ", len(rtrAsaFulls), ", len(rtrPduModels):", len(rtrPduModels)) belogs.Debug("assembleResetResponses(): protocolVersion=2, rtrPduModels:", jsonutil.MarshalJson(rtrPduModels)) return rtrPduModels, nil } else { belogs.Debug("assembleResetResponses(): protocolVersion=2, len(rtrAsaFulls)==0 : just send endofdata,", " protocolVersion:", protocolVersion, " sessionId:", sessionId, " serialNumber:", serialNumber) rtrPduModels = assembleEndOfDataResponses(protocolVersion, sessionId, serialNumber) belogs.Info("assembleResetResponses(): protocolVersion=2,there is no rtr this time, will send errorReport with not_data_available, ", ", receive protocolVersion:", protocolVersion, ", sessionId:", sessionId, ", serialNumber:", serialNumber, ", rtrPduModels:", jsonutil.MarshalJson(rtrPduModels)) return rtrPduModels, nil } } belogs.Error("assembleResetResponses(): not support protocolVersion, fail: ", protocolVersion) return nil, errors.New("protocolVersion is not support") } func convertRtrFullToRtrPduModel(rtrFull *model.LabRpkiRtrFull, protocolVersion uint8) (rtrPduModel RtrPduModel, err error) { ipHex, ipType, err := iputil.AddressToRtrFormatByte(rtrFull.Address) if ipType == iputil.Ipv4Type { ipv4 := [4]byte{0x00} copy(ipv4[:], ipHex[:]) rtrIpv4PrefixModel := NewRtrIpv4PrefixModel(protocolVersion, PDU_FLAG_ANNOUNCE, uint8(rtrFull.PrefixLength), uint8(rtrFull.MaxLength), ipv4, uint32(rtrFull.Asn)) return rtrIpv4PrefixModel, nil } else if ipType == iputil.Ipv6Type { ipv6 := [16]byte{0x00} copy(ipv6[:], ipHex[:]) rtrIpv6PrefixModel := NewRtrIpv6PrefixModel(protocolVersion, PDU_FLAG_ANNOUNCE, uint8(rtrFull.PrefixLength), uint8(rtrFull.MaxLength), ipv6, uint32(rtrFull.Asn)) return rtrIpv6PrefixModel, nil } return rtrPduModel, errors.New("convert to rtr format, error ipType") } func convertRtrFullsToRtrPduModels(rtrFulls []model.LabRpkiRtrFull, protocolVersion uint8) (rtrPduModels []RtrPduModel, err error) { rtrPduModels = make([]RtrPduModel, 0) for i := range rtrFulls { rtrPduModel, err := convertRtrFullToRtrPduModel(&rtrFulls[i], protocolVersion) if err != nil { belogs.Error("convertRtrFullsToRtrPduModels(): convertRtrFullToRtrPduModel fail: ", err) return nil, err } rtrPduModels = append(rtrPduModels, rtrPduModel) } belogs.Debug("convertRtrFullsToRtrPduModels(): len(rtrFulls): ", len(rtrFulls), " len(rtrPduModels):", len(rtrPduModels)) return rtrPduModels, nil } func convertRtrAsaFullsToRtrPduModels(rtrAsaFulls []model.LabRpkiRtrAsaFull, protocolVersion uint8) (rtrAsaPduModels []RtrPduModel, err error) { belogs.Debug("convertRtrAsaFullsToRtrPduModels(): len(rtrAsaFulls): ", len(rtrAsaFulls), " protocolVersion:", protocolVersion) start := time.Now() sameCustomerAsnAfi := make(map[string]*RtrAsaModel, 0) rtrAsaPduModels = make([]RtrPduModel, 0) for i := range rtrAsaFulls { rtrPduModel := NewRtrAsaModelFromDb(protocolVersion, PDU_FLAG_ANNOUNCE, rtrAsaFulls[i].AddressFamily, uint32(rtrAsaFulls[i].CustomerAsn)) key := rtrPduModel.GetKey() belogs.Debug("convertRtrAsaFullsToRtrPduModels(): will add key:", key) if v, ok := sameCustomerAsnAfi[key]; ok { v.AddProviderAsn(uint32(rtrAsaFulls[i].ProviderAsn)) sameCustomerAsnAfi[key] = v } else { rtrPduModel.AddProviderAsn(uint32(rtrAsaFulls[i].ProviderAsn)) sameCustomerAsnAfi[key] = rtrPduModel } } for _, v := range sameCustomerAsnAfi { rtrAsaPduModels = append(rtrAsaPduModels, v) belogs.Debug("convertRtrAsaFullsToRtrPduModels(): v: ", jsonutil.MarshalJson(v)) } belogs.Info("convertRtrAsaFullsToRtrPduModels(): len(rtrAsaFulls): ", len(rtrAsaFulls), " len(rtrAsaPduModels):", len(rtrAsaPduModels), " time(s):", time.Since(start)) return rtrAsaPduModels, nil }
package main import ( "github.com/gin-gonic/gin" "github.com/jimmiepr/Gin-TDD/internal/service" ) func main() { r := gin.Default() v1 := r.Group("/api/v1") { v1.GET("/getdata", service.GetData) } r.Run(":3000") }
package main import ( "C" "encoding/json" "fmt" . "github.com/matiassequeira/lorawan" log "github.com/sirupsen/logrus" "github.com/tidwall/gjson" "github.com/tidwall/sjson" ) // TEST MESSAGES // {"mhdr":{"mType":"JoinRequest","major":"LoRaWANR1"},"macPayload":{"joinEUI":"55d239ac716f234d","devEUI":"b827eb891cf50003","devNonce":51639},"mic":"7005c4a5"} // {"mhdr":{"mType":"JoinAccept","major":"LoRaWANR1"},"macPayload":{"bytes":"HWxw2bAlEDfZF8xu"},"mic":"fc1ede82"} // {"mhdr":{"mType":"UnconfirmedDataUp","major":"LoRaWANR1"},"macPayload":{"fhdr":{"devAddr":"017fc1c4","fCtrl":{"adr":true,"adrAckReq":false,"ack":false,"fPending":false,"classB":false},"fCnt":17,"fOpts":[{"cid":"LinkADRReq","payload":{"channelMaskAck":true,"dataRateAck":false,"powerAck":true}}]},"fPort":93,"frmPayload":[{"bytes":"/2EyELe4m4F5txMSp93Gi+Od7uT0wI/xFFPlKA=="}]},"mic":"7934d552"} // {"mhdr":{"mType":"UnconfirmedDataDown","major":"LoRaWANR1"},"macPayload":{"fhdr":{"devAddr":"017fc1c4","fCtrl":{"adr":true,"adrAckReq":false,"ack":false,"fPending":false,"classB":false},"fCnt":55,"fOpts":[{"cid":"LinkADRReq","payload":{"dataRate":2,"txPower":4,"chMask":[true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true],"redundancy":{"chMaskCntl":0,"nbRep":1}}}]},"fPort":null,"frmPayload":null},"mic":"be4d8cbf"} func parseJSONtoPHY(jsonPHY string, key string, nwkskey string) string { type phyAlias *PHYPayload phy := PHYPayload{} var is_data_packet bool var is_uplink bool var fOpts []Payload is_decrypted_join_accept := false is_decrypted_data_packet := false setLogLevel() switch major := gjson.Get(jsonPHY, "mhdr.major"); major.String() { case "LoRaWANR1": jsonPHY, _ = sjson.Set(jsonPHY, "mhdr.major", 0) default: panic("Error: major not recognized") } mType := gjson.Get(jsonPHY, "mhdr.mType").String() switch mType { case "JoinRequest": jsonPHY, _ = sjson.Set(jsonPHY, "mhdr.mType", 0) phy.MACPayload = &JoinRequestPayload{} case "JoinAccept": jsonPHY, _ = sjson.Set(jsonPHY, "mhdr.mType", 1) // If macPayload.bytes was given, it means the JoinAccept data is encrypted. We don't need the AppKey in this case if gjson.Get(jsonPHY, "macPayload.bytes").Exists() { if key != "" { log.Debug("AppKey is not needed because the JoinAccept is already encrypted. Note that the MIC won't be re-generated. If you'd like to provide a modified JoinAccept, use the following format: \n{\"mhdr\":{\"mType\":\"JoinAccept\",\"major\":\"LoRaWANR1\"},\"macPayload\":{\"joinNonce\":11,\"homeNetID\":\"000000\",\"devAddr\":\"AABBCCDD\",\"dlSettings\":\"08\",\"rxDelay\":1,\"cFlist\":null},\"mic\":\"CCDDEEFF\"}") } phy.MACPayload = &DataPayload{} } else { if key == "" { panic("AppKey must be provided in order to encrypt JoinAccept and generate its MIC.") } else { log.Debug("Received a decrypted JoinAccept and its AppKey. The MIC will be re-generated.") phy.MACPayload = &JoinAcceptPayload{} is_decrypted_join_accept = true } } case "UnconfirmedDataUp": jsonPHY, _ = sjson.Set(jsonPHY, "mhdr.mType", 2) phy.MACPayload = &MACPayload{} is_data_packet = true is_uplink = true case "UnconfirmedDataDown": jsonPHY, _ = sjson.Set(jsonPHY, "mhdr.mType", 3) phy.MACPayload = &MACPayload{} is_data_packet = true is_uplink = false case "ConfirmedDataUp": jsonPHY, _ = sjson.Set(jsonPHY, "mhdr.mType", 4) phy.MACPayload = &MACPayload{} is_data_packet = true is_uplink = true case "ConfirmedDataDown": jsonPHY, _ = sjson.Set(jsonPHY, "mhdr.mType", 5) phy.MACPayload = &MACPayload{} is_data_packet = true is_uplink = false default: panic("Error: mType not recognized") } if is_data_packet { if key != "" { is_decrypted_data_packet = true log.Debug("Received packet AppSKey. Will be used to encrypt its FRMPayload.") } if frPl := gjson.Get(jsonPHY, "macPayload.frmPayload"); frPl.Exists() { phy.MACPayload.(*MACPayload).FRMPayload = []Payload{&DataPayload{}} } if frameOpts := gjson.Get(jsonPHY, "macPayload.fhdr.fOpts"); frameOpts.Exists() { switch cid := gjson.Get(jsonPHY, "macPayload.fhdr.fOpts.0.cid"); cid.String() { case "ResetInd", "ResetConf": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 1) if is_uplink { fOpts = []Payload{&MACCommand{CID: ResetInd, Payload: &ResetIndPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: ResetConf, Payload: &ResetConfPayload{}}} } case "LinkCheckReq", "LinkCheckAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 2) if is_uplink { panic("This MACCommand is not implemented by github.com/brocaar/lorawan") //fOpts= []Payload{&MACCommand{CID: LinkCheckReq, Payload: &LinkCheckReqPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: LinkCheckAns, Payload: &LinkCheckAnsPayload{}}} } case "LinkADRReq", "LinkADRAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 3) if is_uplink { fOpts = []Payload{&MACCommand{CID: LinkADRAns, Payload: &LinkADRAnsPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: LinkADRReq, Payload: &LinkADRReqPayload{}}} } case "DutyCycleReq", "DutyCycleAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 4) if is_uplink { panic("This MACCommand is not implemented by github.com/brocaar/lorawan") //fOpts= []Payload{&MACCommand{CID: DutyCycleAns, Payload: &DutyCycleAnsPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: DutyCycleReq, Payload: &DutyCycleReqPayload{}}} } case "RXParamSetupReq", "RXParamSetupAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 5) if is_uplink { fOpts = []Payload{&MACCommand{CID: RXParamSetupAns, Payload: &RXParamSetupAnsPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: RXParamSetupReq, Payload: &RXParamSetupReqPayload{}}} } case "DevStatusReq", "DevStatusAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 6) if is_uplink { fOpts = []Payload{&MACCommand{CID: DevStatusAns, Payload: &DevStatusAnsPayload{}}} } else { panic("This MACCommand is not implemented by github.com/brocaar/lorawan") //fOpts= []Payload{&MACCommand{CID: DevStatusReq, Payload: &DevStatusReqPayload{}}} } case "NewChannelReq", "NewChannelAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 7) if is_uplink { fOpts = []Payload{&MACCommand{CID: NewChannelAns, Payload: &NewChannelAnsPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: NewChannelReq, Payload: &NewChannelReqPayload{}}} } case "RXTimingSetupReq", "RXTimingSetupAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 8) if is_uplink { panic("This MACCommand is not implemented by github.com/brocaar/lorawan") //fOpts= []Payload{&MACCommand{CID: RXTimingSetupAns, Payload: &RXTimingSetupAnsPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: RXTimingSetupReq, Payload: &RXTimingSetupReqPayload{}}} } case "TXParamSetupReq", "TXParamSetupAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 9) if is_uplink { panic("This MACCommand is not implemented by github.com/brocaar/lorawan") //fOpts= []Payload{&MACCommand{CID: TXParamSetupAns, Payload: &TXParamSetupAnsPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: TXParamSetupReq, Payload: &TXParamSetupReqPayload{}}} } case "DLChannelReq", "DLChannelAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 10) if is_uplink { fOpts = []Payload{&MACCommand{CID: DLChannelAns, Payload: &DLChannelAnsPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: DLChannelReq, Payload: &DLChannelReqPayload{}}} } case "RekeyInd", "RekeyConf": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 11) if is_uplink { fOpts = []Payload{&MACCommand{CID: RekeyInd, Payload: &RekeyIndPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: RekeyConf, Payload: &RekeyConfPayload{}}} } case "ADRParamSetupReq", "ADRParamSetupAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 12) if is_uplink { panic("This MACCommand is not implemented by github.com/brocaar/lorawan") //fOpts= []Payload{&MACCommand{CID: ADRParamSetupAns, Payload: &ADRParamSetupAnsPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: ADRParamSetupReq, Payload: &ADRParamSetupReqPayload{}}} } case "DeviceTimeReq", "DeviceTimeAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 13) panic("This MACCommand is not implemented by github.com/brocaar/lorawan") case "ForceRejoinReq": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 14) panic("This MACCommand is not implemented by github.com/brocaar/lorawan") //fOpts= []Payload{&MACCommand{CID: ForceRejoinReqPayload, Payload: &ForceRejoinReqPayload{}}} case "RejoinParamSetupReq", "RejoinParamSetupAns": jsonPHY, _ = sjson.Set(jsonPHY, "macPayload.fhdr.fOpts.0.cid", 15) if is_uplink { fOpts = []Payload{&MACCommand{CID: RejoinParamSetupAns, Payload: &RejoinParamSetupAnsPayload{}}} } else { fOpts = []Payload{&MACCommand{CID: RejoinParamSetupReq, Payload: &RejoinParamSetupReqPayload{}}} } } phy.MACPayload.(*MACPayload).FHDR.FOpts = fOpts } } //fmt.Printf("Modified JSON is %s\n", jsonPHY) if err := json.Unmarshal([]byte(jsonPHY), phyAlias(&phy)); err != nil { fmt.Println("Error unmarshaling json: ", err) } if is_decrypted_join_accept { var key_obj AES128Key if err := key_obj.UnmarshalText([]byte(key)); err != nil { log.Error("Unmarshall error with key: ", key) panic(err) } // The EUI and devnonce fields aren't used but required if err := phy.SetDownlinkJoinMIC(JoinRequestType, EUI64{}, DevNonce(0), key_obj); err != nil { log.Error("An error ocurred when trying to set the MIC to JoinAccept") panic(err) } if err := phy.EncryptJoinAcceptPayload(key_obj); err != nil { log.Error("An error ocurred when encrypting the JoinAccept") panic(err) } } else if is_decrypted_data_packet { var key_obj AES128Key if err := key_obj.UnmarshalText([]byte(key)); err != nil { log.Error("Unmarshall error with key: ", key) panic(err) } if err := phy.EncryptFRMPayload(key_obj); err != nil { log.Error("Error encrypting FRMPayload") panic(err) } } // Debug lines //fmt.Printf("Unmarshaled PHY is %v\n", phy) // phyJSON, err := phy.MarshalJSON() // if err != nil { // fmt.Println("Error marshaling PHY") // panic(err) // } //fmt.Printf("Marshaled PHY: %s\n", string(phyJSON)) str, err := phy.MarshalText() if err != nil { panic(err) } result := string(str) // If nwkSKey provided, sign data packet. if is_data_packet && nwkskey != "" { log.Debug("Received packet NwkSKey. Signing data packet") result = signPacket(result, nwkskey, "") // If it's a JoinRequest and key provided, sign it } else if mType == "JoinRequest" && key != "" { result = signPacket(result, key, "") } return result }
package etcd import ( "testing" "time" ) func TestJoinNotifiers(t *testing.T) { t.Parallel() a := make(chan struct{}) b := make(chan struct{}) c := joinNotifiers(a, b) timeout := time.Tick(30 * time.Second) select { case <-c: case <-timeout: t.FailNow() } a <- struct{}{} select { case <-c: case <-timeout: t.FailNow() } b <- struct{}{} select { case <-c: case <-timeout: t.FailNow() } a <- struct{}{} b <- struct{}{} select { case <-c: case <-timeout: t.FailNow() } }
package server import ( "io/ioutil" "net/http" "github.com/ItsJimi/casa/logger" "github.com/jmoiron/sqlx" ) // User structure in database type User struct { ID string `db:"id" json:"id"` Firstname string `db:"firstname" json:"firstname"` Lastname string `db:"lastname" json:"lastname"` Email string `db:"email" json:"email"` Password string `db:"password" json:"-"` Birthdate string `db:"birthdate" json:"birthdate"` CreatedAt string `db:"created_at" json:"createdAt"` UpdatedAt string `db:"updated_at" json:"updatedAt"` } // Token structure in database type Token struct { ID string `db:"id" json:"id"` UserID string `db:"user_id" json:"userId"` Type string `db:"type" json:"type"` IP string `db:"ip" json:"ip"` UserAgent string `db:"user_agent" json:"userAgent"` Read bool `db:"read" json:"read"` Write bool `db:"write" json:"write"` Manage bool `db:"manage" json:"manage"` Admin bool `db:"admin" json:"admin"` CreatedAt string `db:"created_at" json:"createdAt"` UpdatedAt string `db:"updated_at" json:"updatedAt"` ExpireAt string `db:"expire_at" json:"expireAt"` } // Gateway structure in database type Gateway struct { ID string `db:"id" json:"id"` HomeID string `db:"home_id" json:"homeId"` Name string `db:"name" json:"name"` Model string `db:"model" json:"model"` CreatedAt string `db:"created_at" json:"createdAt"` UpdatedAt string `db:"updated_at" json:"updatedAt"` CreatorID string `db:"creator_id" json:"creatorId"` } // Plugin structure in database type Plugin struct { ID string `db:"id" json:"id"` GatewayID string `db:"gateway_id" json:"gatewayId"` Name string `db:"name" json:"name"` Config string `db:"config" json:"config"` CreatedAt string `db:"created_at" json:"createdAt"` UpdatedAt string `db:"updated_at" json:"updatedAt"` } // Home structure in database type Home struct { ID string `db:"id" json:"id"` Name string `db:"name" json:"name"` Address string `db:"address" json:"address"` WifiSSID string `db:"wifi_ssid" json:"wifiSsid"` CreatedAt string `db:"created_at" json:"createdAt"` UpdatedAt string `db:"updated_at" json:"updatedAt"` CreatorID string `db:"creator_id" json:"creatorId"` } // Room structure in database type Room struct { ID string `db:"id" json:"id"` Name string `db:"name" json:"name"` Icon string `db:"icon" json:"icon"` HomeID string `db:"home_id" json:"homeId"` CreatedAt string `db:"created_at" json:"createdAt"` UpdatedAt string `db:"updated_at" json:"updatedAt"` CreatorID string `db:"creator_id" json:"creatorId"` } // Device structure in database type Device struct { ID string `db:"id" json:"id"` GatewayID string `db:"gateway_id" json:"gatewayId"` Name string `db:"name" json:"name"` Icon string `db:"icon" json:"icon"` PhysicalID string `db:"physical_id" json:"physicalId"` PhysicalName string `db:"physical_name" json:"physicalName"` Config string `db:"config" json:"config"` Plugin string `db:"plugin" json:"plugin"` RoomID string `db:"room_id" json:"roomId"` CreatedAt string `db:"created_at" json:"createdAt"` UpdatedAt string `db:"updated_at" json:"updatedAt"` CreatorID string `db:"creator_id" json:"creatorId"` } // DeviceJSONSelect string to bypass json tag const DeviceJSONSelect = "json_build_object('id', id, 'name', name, 'gatewayId', gateway_id, 'icon', icon, 'physicalid', physical_id, 'physicalname', physical_name, 'config', config, 'plugin', plugin, 'roomid', room_id, 'createdat', created_at, 'updatedat', updated_at, 'creatorid', creator_id)" // Permission structure in database type Permission struct { ID string `db:"id" json:"id"` UserID string `db:"user_id" json:"userId"` Type string `db:"type" json:"type"` //home, room, device TypeID string `db:"type_id" json:"typeId"` Read bool `db:"read" json:"read"` Write bool `db:"write" json:"write"` Manage bool `db:"manage" json:"manage"` Admin bool `db:"admin" json:"admin"` CreatedAt string `db:"created_at" json:"createdAt"` UpdatedAt string `db:"updated_at" json:"updatedAt"` } // Automation struct in database type Automation struct { ID string `db:"id" json:"id"` HomeID string `db:"home_id" json:"homeId"` Name string `db:"name" json:"name"` Trigger []string `db:"trigger" json:"trigger"` TriggerKey []string `db:"trigger_key" json:"triggerKey"` TriggerValue []string `db:"trigger_value" json:"triggerValue"` TriggerOperator []string `db:"trigger_operator" json:"triggerOperator"` Action []string `db:"action" json:"action"` ActionCall []string `db:"action_call" json:"actionCall"` ActionValue []string `db:"action_value" json:"actionValue"` Status bool `db:"status" json:"status"` CreatedAt string `db:"created_at" json:"createdAt"` UpdatedAt string `db:"updated_at" json:"updatedAt"` CreatorID string `db:"creator_id" json:"creatorId"` } // Datas struct in database type Datas struct { ID string `db:"id" json:"id"` DeviceID string `db:"device_id" json:"deviceId"` Field string `db:"field" json:"field"` ValueNbr float64 `db:"value_nbr" json:"valueNbr"` ValueStr string `db:"value_str" json:"valueStr"` ValueBool bool `db:"value_bool" json:"valueBool"` CreatedAt string `db:"created_at" json:"createdAt"` } // Logs struct in database type Logs struct { ID string `db:"id" json:"id"` Type string `db:"type" json:"type"` // automation, device TypeID string `db:"type_id" json:"typeId"` Value string `db:"value" json:"value"` // {"function": "toggle", "params": ""} CreatedAt string `db:"created_at" json:"createdAt"` } // DB define the database object var DB *sqlx.DB // InitDB check and create tables func InitDB() { var err error connStr := "postgres://postgres:password@localhost/?sslmode=disable" db, err := sqlx.Open("postgres", connStr) if err != nil { logger.WithFields(logger.Fields{"code": "CSDIDB001"}).Panicf("%s", err.Error()) } _, err = db.Exec("CREATE database casadb") if err != nil { logger.WithFields(logger.Fields{"code": "CSDIDB002"}).Errorf("%s", err.Error()) } db.Close() connStr = "postgres://postgres:password@localhost/casadb?sslmode=disable" db, err = sqlx.Open("postgres", connStr) if err != nil { logger.WithFields(logger.Fields{"code": "CSDIDB003"}).Panicf("%s", err.Error()) } file, err := ioutil.ReadFile("database.sql") if err != nil { logger.WithFields(logger.Fields{"code": "CSDIDB004"}).Panicf("%s", err.Error()) } _, err = db.Exec(string(file)) if err != nil { logger.WithFields(logger.Fields{"code": "CSDIDB005"}).Errorf("%s", err.Error()) } resp, err := http.Get("https://raw.githubusercontent.com/geckoboard/pgulid/master/pgulid.sql") if err != nil { logger.WithFields(logger.Fields{"code": "CSDIDB006"}).Panicf("%s", err.Error()) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { logger.WithFields(logger.Fields{"code": "CSDIDB007"}).Panicf("%s", err.Error()) } _, err = db.Exec(string(body)) if err != nil { logger.WithFields(logger.Fields{"code": "CSDIDB008"}).Errorf("%s", err.Error()) } db.Close() } // StartDB start the database to use it in server func StartDB() { var err error connStr := "postgres://postgres:password@localhost/casadb?sslmode=disable" DB, err = sqlx.Open("postgres", connStr) if err != nil { logger.WithFields(logger.Fields{"code": "CSDSDB001"}).Panicf("%s", err.Error()) } }
package bardo import ( "strings" ) // GetTables ... // Get an array of all tables in the database func (db *Database) getAllTables() ([]string, error) { var tables []struct { Name string `db:"table_name"` } err := db.Select(&tables, ` SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type = 'BASE TABLE' AND table_name != 'goose_db_version' `) if err != nil { return nil, err } var names []string for _, table := range tables { names = append(names, table.Name) } return names, nil } // Truncate ... // Truncates all tables in the database // TODO: Should accept exclusion list func (db *Database) Truncate() error { names, err := db.getAllTables() if err != nil { return err } var tablenames []string for _, name := range names { tablenames = append(tablenames, `"`+name+`"`) } _, err = db.Exec(` TRUNCATE TABLE ` + strings.Join(tablenames, ", ") + ` CASCADE `) if err != nil { return err } return nil }
package transport import ( "crypto/tls" "fmt" "net" "net/http" "net/url" "github.com/gorilla/websocket" "log" "strings" "errors" ) // The Dialer handles connecting to a server and creating a connection. type Dialer struct { TLSConfig *tls.Config RequestHeader http.Header DefaultTCPPort string DefaultTLSPort string DefaultWSPort string DefaultWSSPort string webSocketDialer *websocket.Dialer Ips map[int]net.IP IpIdx int } // NewDialer returns a new Dialer. func NewDialer() *Dialer { return &Dialer{ DefaultTCPPort: "1883", DefaultTLSPort: "8883", DefaultWSPort: "80", DefaultWSSPort: "443", webSocketDialer: &websocket.Dialer{ Proxy: http.ProxyFromEnvironment, Subprotocols: []string{"mqtt"}, }, Ips: make(map[int]net.IP), IpIdx: 0, } } var sharedDialer *Dialer func init() { sharedDialer = NewDialer() addrs, err := net.InterfaceAddrs() if err != nil { log.Println("init ", err) } idx := 0 for _, address := range addrs { if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { if ipnet.IP.To4() != nil && strings.HasPrefix(ipnet.IP.String(), "192.168.") { sharedDialer.Ips[idx] = ipnet.IP idx++ log.Println(idx, ipnet.IP.String()) } } } } // Dial is a shorthand function. func Dial(urlString string) (Conn, error) { return sharedDialer.Dial(urlString) } // Dial initiates a connection based in information extracted from an URL. func (d *Dialer) Dial(urlString string) (Conn, error) { urlParts, err := url.ParseRequestURI(urlString) if err != nil { return nil, err } host, port, err := net.SplitHostPort(urlParts.Host) if err != nil { host = urlParts.Host port = "" } switch urlParts.Scheme { case "tcp", "mqtt": if port == "" { port = d.DefaultTCPPort } RELOAD: if len(d.Ips) == 0 { return nil, errors.New("no ip cat use") } localaddr := &net.TCPAddr{IP: d.Ips[d.IpIdx]} dl := net.Dialer{LocalAddr: localaddr} conn, err := dl.Dial("tcp", net.JoinHostPort(host, port)) if err != nil { d.IpIdx++ log.Println(d.IpIdx, "change local address") goto RELOAD } return NewNetConn(conn), nil case "tls", "mqtts": if port == "" { port = d.DefaultTLSPort } conn, err := tls.Dial("tcp", net.JoinHostPort(host, port), d.TLSConfig) if err != nil { return nil, err } return NewNetConn(conn), nil case "ws": if port == "" { port = d.DefaultWSPort } wsURL := fmt.Sprintf("ws://%s:%s%s", host, port, urlParts.Path) conn, _, err := d.webSocketDialer.Dial(wsURL, d.RequestHeader) if err != nil { return nil, err } return NewWebSocketConn(conn), nil case "wss": if port == "" { port = d.DefaultWSSPort } wsURL := fmt.Sprintf("wss://%s:%s%s", host, port, urlParts.Path) d.webSocketDialer.TLSClientConfig = d.TLSConfig conn, _, err := d.webSocketDialer.Dial(wsURL, d.RequestHeader) if err != nil { return nil, err } return NewWebSocketConn(conn), nil } return nil, ErrUnsupportedProtocol }
// Copyright 2014 Marc-Antoine Ruel. All rights reserved. // Use of this source code is governed under the Apache License, Version 2.0 // that can be found in the LICENSE file. package main import ( "fmt" "github.com/maruel/subcommands" ) var cmdAskApple = &subcommands.Command{ UsageLine: "apple <options>", ShortDesc: "asks for an apple", LongDesc: "Asks for an apple.", CommandRun: func() subcommands.CommandRun { c := &askAppleRun{} c.Init() c.Flags.BoolVar(&c.bare, "bare", false, "Shows only the bot id, no meta data") return c }, } type askAppleRun struct { CommonFlags bare bool } func (c *askAppleRun) main(a askApplication) error { // This command ignores -verbose. if err := c.Parse(a, true); err != nil { return err } fmt.Fprintf(a.GetOut(), "TODO: Implement 'ask apple'!\n") return nil } func (c *askAppleRun) Run(a subcommands.Application, args []string, env subcommands.Env) int { if len(args) != 0 { fmt.Fprintf(a.GetErr(), "%s: Unknown arguments.\n", a.GetName()) return 1 } d := a.(askApplication) if err := c.main(d); err != nil { fmt.Fprintf(a.GetErr(), "%s: %s\n", a.GetName(), err) return 1 } return 0 }
package controller import "github.com/therecipe/qt/core" type topController struct { core.QObject }
package sshmux import ( "fmt" "io" "net" "sync" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" ) func proxy(reqs1, reqs2 <-chan *ssh.Request, channel1, channel2 ssh.Channel) { var closer sync.Once closeFunc := func() { channel1.Close() channel2.Close() } defer closer.Do(closeFunc) closerChan := make(chan bool, 1) go func() { io.Copy(channel1, channel2) closerChan <- true }() go func() { io.Copy(channel2, channel1) closerChan <- true }() for { select { case req := <-reqs1: if req == nil { return } b, err := channel2.SendRequest(req.Type, req.WantReply, req.Payload) if err != nil { return } req.Reply(b, nil) case req := <-reqs2: if req == nil { return } b, err := channel1.SendRequest(req.Type, req.WantReply, req.Payload) if err != nil { return } req.Reply(b, nil) case <-closerChan: return } } } type channelOpenDirectMsg struct { RAddr string RPort uint32 LAddr string LPort uint32 } // ChannelForward establishes a secure channel forward (ssh -W) to the server // requested by the user, assuming it is a permitted host. func (s *Server) ChannelForward(session *Session, newChannel ssh.NewChannel) { var msg channelOpenDirectMsg ssh.Unmarshal(newChannel.ExtraData(), &msg) address := fmt.Sprintf("%s:%d", msg.RAddr, msg.RPort) permitted := false for _, remote := range session.Remotes { if remote == address { permitted = true break } } if !permitted { newChannel.Reject(ssh.Prohibited, "remote host access denied for user") return } conn, err := net.Dial("tcp", address) if err != nil { newChannel.Reject(ssh.ConnectionFailed, fmt.Sprintf("error: %v", err)) return } channel, reqs, err := newChannel.Accept() go ssh.DiscardRequests(reqs) var closer sync.Once closeFunc := func() { channel.Close() conn.Close() } go func() { io.Copy(channel, conn) closer.Do(closeFunc) }() go func() { io.Copy(conn, channel) closer.Do(closeFunc) }() } type rw struct { io.Reader io.Writer } // SessionForward performs a regular forward, providing the user with an // interactive remote host selection if necessary. This forwarding type // requires agent forwarding in order to work. func (s *Server) SessionForward(session *Session, newChannel ssh.NewChannel, chans <-chan ssh.NewChannel) { // Okay, we're handling this as a regular session sesschan, sessReqs, err := newChannel.Accept() if err != nil { return } stderr := sesschan.Stderr() remote := "" switch len(session.Remotes) { case 0: sesschan.Close() return case 1: remote = session.Remotes[0] default: comm := rw{Reader: sesschan, Writer: stderr} if s.Interactive == nil { remote, err = DefaultInteractive(comm, session) } else { remote, err = s.Interactive(comm, session) } if err != nil { sesschan.Close() return } } fmt.Fprintf(stderr, "Connecting to %s\r\n", remote) // Set up the agent agentChan, agentReqs, err := session.Conn.OpenChannel("auth-agent@openssh.com", nil) if err != nil { fmt.Fprintf(stderr, "\r\n====== sshmux ======\r\n") fmt.Fprintf(stderr, "sshmux requires either agent forwarding or secure channel forwarding.\r\n") fmt.Fprintf(stderr, "Either enable agent forwarding (-A), or use a ssh -W proxy command.\r\n") fmt.Fprintf(stderr, "For more info, see the sshmux wiki.\r\n") sesschan.Close() return } defer agentChan.Close() go ssh.DiscardRequests(agentReqs) // Set up the client ag := agent.NewClient(agentChan) clientConfig := &ssh.ClientConfig{ User: session.Conn.User(), Auth: []ssh.AuthMethod{ ssh.PublicKeysCallback(ag.Signers), }, } client, err := ssh.Dial("tcp", remote, clientConfig) if err != nil { fmt.Fprintf(stderr, "Connect failed: %v\r\n", err) sesschan.Close() return } // Handle all incoming channel requests go func() { for newChannel = range chans { if newChannel == nil { return } channel2, reqs2, err := client.OpenChannel(newChannel.ChannelType(), newChannel.ExtraData()) if err != nil { x, ok := err.(*ssh.OpenChannelError) if ok { newChannel.Reject(x.Reason, x.Message) } else { newChannel.Reject(ssh.Prohibited, "remote server denied channel request") } continue } channel, reqs, err := newChannel.Accept() if err != nil { channel2.Close() continue } go proxy(reqs, reqs2, channel, channel2) } }() // Forward the session channel channel2, reqs2, err := client.OpenChannel("session", []byte{}) if err != nil { fmt.Fprintf(stderr, "Remote session setup failed: %v\r\n", err) sesschan.Close() return } // Proxy the channel and its requests maskedReqs := make(chan *ssh.Request, 1) go func() { for req := range sessReqs { if req.Type == "auth-agent-req@openssh.com" { continue } maskedReqs <- req } }() proxy(maskedReqs, reqs2, sesschan, channel2) }
package main import "go_learn/day06/mylogger" func main() { mylogger.NewLog("debug").Debug("这是一条debug日志") }
package main import ( "github.com/gin-gonic/gin" "github.com/micro/go-micro/web" ) func main() { //consul服务注册 ginRouter := gin.Default() data := make([]interface{}, 0) ginRouter.Handle("GET", "/", func(context *gin.Context) { context.JSON(200, gin.H{ "data": data, }) }) server := web.NewService( web.Address(":8000"), web.Handler(ginRouter), ) server.Run() }
package main import ( "github.com/Eric-WangHaitao/Go-0712/Week04/internal" "log" ) func main() { log.Fatal(internal.NewApp().Run()) }
/* * @lc app=leetcode.cn id=95 lang=golang * * [95] 不同的二叉搜索树 II */ package solution // @lc code=start type anchor struct { start, end int } func generateTrees(n int) []*TreeNode { max := func(x, y int) int { if x > y { return x } return y } // Calculate the number of binary search trees can be generated with n nums := make([]int, n+1) for i := 1; i <= n; i++ { acc := 0 for j := 1; j <= i; j++ { l := max(1, nums[j-1]) r := max(1, nums[i-j]) acc += (l * r) } nums[i] = acc } // Generate binary search trees recursively treeMap := make(map[anchor][]*TreeNode) res := generate(1, n, nums, treeMap) return res } func generate(start, end int, nums []int, treeMap map[anchor][]*TreeNode) []*TreeNode { if start > end { return nil } if treeMap[anchor{start, end}] != nil { return treeMap[anchor{start, end}] } size := end - start + 1 index, res := 0, make([]*TreeNode, nums[size]) // Generate tree lists that uses i as the head node for i := start; i <= end; i++ { lLists := generate(start, i-1, nums, treeMap) rLists := generate(i+1, end, nums, treeMap) if lLists == nil { lLists = []*TreeNode{nil} } if rLists == nil { rLists = []*TreeNode{nil} } for _, left := range lLists { for _, right := range rLists { head := new(TreeNode) head.Val = i head.Left = left head.Right = right res[index] = head index++ } } } treeMap[anchor{start, end}] = res return res } // @lc code=end
package hpke import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSeal(t *testing.T) { k1, err := GeneratePrivateKey() require.NoError(t, err) k2, err := GeneratePrivateKey() require.NoError(t, err) sealed, err := Seal(k1, k2.PublicKey(), []byte("HELLO WORLD")) assert.NoError(t, err) assert.NotEmpty(t, sealed) message, err := Open(k2, k1.PublicKey(), sealed) assert.NoError(t, err) assert.Equal(t, []byte("HELLO WORLD"), message) } func TestDerivePrivateKey(t *testing.T) { k1a := DerivePrivateKey([]byte("KEY 1")) k1b := DerivePrivateKey([]byte("KEY 1")) k2 := DerivePrivateKey([]byte("KEY 2")) assert.Equal(t, k1a.String(), k1b.String()) assert.NotEqual(t, k1a.String(), k2.String()) }
package main import ( "encoding/hex" "flag" "fmt" "log" "dfnpf/examples/iniths" ) func main() { flag.Parse() if len(flag.Args()) < 4 || len(flag.Args()) > 11 { log.Fatalln("Please provide Noise Protocol name, initiator's static and ephemeral keys,", "\n responder's static and ephemeral keys, remote key, prologue, preshared key, message") } handshInit, handshResp, payload := iniths.InitHandshake(flag.Args()) var err error var msg []byte //var csWrite0, csWrite1, csRead0, csRead1 *noise.CipherState msg, _, _, _ = handshInit.WriteMessage(nil, payload) _, _, _, err = handshResp.ReadMessage(nil, msg) if err != nil { panic(err) } fmt.Printf("%s", hex.EncodeToString(msg)) }
package models type ResultChallenge struct { Time int64 `json:"time"` HighScore int64 `json:"high_score" bson:"high_score"` Combo int `json:"combo"` BestCombo int `json:"best_combo" bson:"best_combo"` }
package main import ( "fmt" "time" ) func badEcho(in <-chan string) { // invalid operation: in <- "bad bad", // send to receive-only type <-chan string in <- "bad bad" } func testEcho(in <-chan string, out chan<- string) { inStr := <-in fmt.Println("routine rx:", inStr) out <- inStr } func main() { inChan := make(chan string) outChan := make(chan string) go testEcho(outChan, inChan) fmt.Println("main tx a message") outChan <- "All of the dust and dirt in the ground at some point" time.Sleep(time.Second * 2) backMsg := <- inChan fmt.Println("main rx a message: " + backMsg) badChan := make(chan string) go badEcho(badChan) }
package main import ( "context" "fmt" "github.com/serverless/better/lib/model" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider/cognitoidentityprovideriface" "github.com/aws/aws-lambda-go/lambda" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" "github.com/serverless/better/lib/cognito" ) type SignOutInput struct { AccessToken string `json:"token"` } type Response struct { Message string `json:"result"` } type deps struct { cognito cognitoidentityprovideriface.CognitoIdentityProviderAPI } func (d *deps) HandleRequest(ctx context.Context, signOutInput SignOutInput) (Response, error) { // validate input if signOutInput.AccessToken == "" { return Response{}, model.ResponseError{ Code: 400, Message: "You must provide a valid access token", } } // get cognito session if d.cognito == nil { d.cognito = cognito.GetCognitoService() } // create sign out input input := &cognitoidentityprovider.GlobalSignOutInput{ AccessToken: aws.String(signOutInput.AccessToken), } // initiate sign out _, err := d.cognito.GlobalSignOut(input) // handle possible exceptions if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case cognitoidentityprovider.ErrCodeResourceNotFoundException: return Response{}, model.ResponseError{ Code: 404, Message: "The access token provided is invalid", } case cognitoidentityprovider.ErrCodeTooManyRequestsException: return Response{}, model.ResponseError{ Code: 500, Message: "Too many request made to validate the code", } default: fmt.Println(aerr.Error()) return Response{}, model.ResponseError{ Code: 500, Message: "Problem signing out user", } } } else { fmt.Println(err.Error()) return Response{}, model.ResponseError{ Code: 500, Message: "Problem signing out user", } } } return Response{Message: "Successfully signed out user."}, nil } func main() { d := deps{} lambda.Start(d.HandleRequest) }
package main import "fmt" func main() { fmt.Print("Same") fmt.Print("Line. ") fmt.Println("New") fmt.Println("Line") x := 3.141516 xs := fmt.Sprint(x) fmt.Println("X = " + xs) fmt.Println("X = ", x) fmt.Printf("X = %.2f.", x) a := 1 b := false c := "opa" fmt.Printf("\n%d %t %s", a, b, c) }
package main import ( "fmt" "math/cmplx" ) var ( ToBe bool = false MaxInt uint64 = 1<<64 - 1 z complex128 = cmplx.Sqrt(-5 + 12i) ) func main() { const f = "%T(%v)\n" fmt.Printf(f, ToBe, ToBe) fmt.Printf(f, MaxInt, MaxInt) fmt.Printf(f, z, z) // bool(false) // uint64(18446744073709551615) // complex128((2+3i)) } /* Go 的基本类型 bool string int int8 int16 int32 int64 unit uint8 uint16 uint32 uint64 uintptr byte, unit8 的别名 rune, int32 的别名, 代表一个 Unicode码 float32 float64 complex64 complex128 这个例子演示了具有不同类型的变量。同时与导入的语句一样, 变量的定义打包在一个语法块中 int, uint 和 uintpter类型在32位系统一般32位, 而当有特别的理由才使用定长整数类型或者无符号整数类型 */
package controllers import ( "github.com/revel/revel" ) type ProjectController struct { *revel.Controller } func (c * ProjectController) NewProject() revel.Result{ result := RenderMap(200, "ok") return c.RenderJson(result) }
package storage var taskSchema = ` CREATE TABLE tasks ( id int8 PRIMARY KEY, created_at timestampz NOT NULL, name text, priority int4 NOT NULL, start timestampz, duration string ); `
package main import ( "fmt" ) func main() { fmt.Println(shortestPathBinaryMatrix([][]int{ {0, 0, 0}, {1, 1, 0}, {1, 1, 1}, })) // test63 fmt.Println(14 == shortestPathBinaryMatrix([][]int{ {0, 1, 0, 0, 0, 0}, {0, 1, 0, 1, 1, 0}, {0, 1, 1, 0, 1, 0}, {0, 0, 0, 0, 1, 0}, {1, 1, 1, 1, 1, 0}, {1, 1, 1, 1, 1, 0}, })) fmt.Println(shortestPathBinaryMatrix([][]int{ {0, 0, 1, 1, 0, 0}, {0, 0, 0, 0, 1, 1}, {1, 0, 1, 1, 0, 0}, {0, 0, 1, 1, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 0}, })) // fmt.Println(shortestPathBinaryMatrix([][]int{ {0, 1, 1, 0, 0, 0}, {0, 1, 0, 1, 1, 0}, {0, 1, 1, 0, 1, 0}, {0, 0, 0, 1, 1, 0}, {1, 1, 1, 1, 1, 0}, {1, 1, 1, 1, 1, 0}, })) fmt.Println(shortestPathBinaryMatrix([][]int{ {0, 1}, {1, 0}, })) fmt.Println(shortestPathBinaryMatrix([][]int{ {0, 0, 0}, {1, 1, 0}, {1, 1, 0}, })) fmt.Println(shortestPathBinaryMatrix([][]int{ {1, 0, 0}, {1, 1, 0}, {1, 1, 0}, })) } func shortestPathBinaryMatrix(grid [][]int) int { m := len(grid) n := len(grid[0]) q := make([][3]int, 0) if grid[0][0] == 0 { q = append(q, [...]int{0, 0, 1}) grid[0][0] = 1 } for len(q) > 0 { top := q[0] q = q[1:] i, j, c := top[0], top[1], top[2] if i == m-1 && j == n-1 { return c } for _, v := range [][2]int{{0, 1}, {1, 1}, {-1, 1}, {0, -1}, {1, -1}, {-1, -1}, {1, 0}, {-1, 0}} { x, y := i+v[0], j+v[1] if x < 0 || x >= m || y < 0 || y >= n || grid[x][y] != 0 { continue } q = append(q, [...]int{x, y, c + 1}) grid[x][y] = 1 } } return -1 } func shortestPathBinaryMatrix2(grid [][]int) int { m := len(grid) n := len(grid[0]) q := make([][3]int, 0) if grid[0][0] == 0 { q = append(q, [3]int{0, 0, 1}) } else { return -1 } for len(q) > 0 { top := q[0] q = q[1:] i, j, c := top[0], top[1], top[2] if i == m-1 && j == n-1 { return c } grid[i][j] = 1 if j+1 < n { if grid[i][j+1] == 0 { q = append(q, [3]int{i, j + 1, c + 1}) } if i+1 < m && grid[i+1][j+1] == 0 { q = append(q, [3]int{i + 1, j + 1, c + 1}) } if i-1 >= 0 && grid[i-1][j+1] == 0 { q = append(q, [3]int{i - 1, j + 1, c + 1}) } } if j-1 >= 0 { if grid[i][j-1] == 0 { q = append(q, [3]int{i, j - 1, c + 1}) } if i+1 < m && grid[i+1][j-1] == 0 { q = append(q, [3]int{i + 1, j - 1, c + 1}) } if i-1 >= 0 && grid[i-1][j-1] == 0 { q = append(q, [3]int{i - 1, j - 1, c + 1}) } } if i+1 < m && grid[i+1][j] == 0 { q = append(q, [3]int{i + 1, j, c + 1}) } if i-1 >= 0 && grid[i-1][j] == 0 { q = append(q, [3]int{i - 1, j, c + 1}) } } return -1 }
package 模拟 func findDiagonalOrder(matrix [][]int) []int { if len(matrix) == 0 { return []int{} } m, n := len(matrix), len(matrix[0]) ans := make([]int, 0, n*m) curX, curY, indexSum := 0, 0, 0 // indexSum = curX + curY,在同一对角线上遍历时indexSum是不变的 // n + m - 1: 这是最后一条对角线的索引和 for indexSum != n+m-1 { if indexSum%2 == 0 { curX = min(indexSum, m-1) curY = indexSum - curX } else { curY = min(indexSum, n-1) curX = indexSum - curY } for curX >= 0 && curX < m && curY >= 0 && curY < n { ans = append(ans, matrix[curX][curY]) if indexSum%2 == 0 { curX-- curY = indexSum - curX } else { curY-- curX = indexSum - curY } } indexSum++ } return ans } func min(a, b int) int { if a > b { return b } return a } /* 总结 1. 优美了一点。 */
package main import ( "go-kemas/config" "go-kemas/models" "go-kemas/routes" ) func main() { db := config.SetupDB() db.AutoMigrate(&models.Task{}) db.AutoMigrate(&models.Program{}) db.AutoMigrate(&models.Admin{}) db.AutoMigrate(&models.User{}) r := routes.SetupRoutes(db) r.Run() }
package main import ( "fmt" ) // AppName: GoSpider // Auther : Sven Liu // Gmail : whoamsven@gmail.com func main() { fmt.Println(" hell world ! My name is GoSpider !") }
package fsutils import ( "io/ioutil" "path" ) type fsutils struct{} func New() *fsutils { return &fsutils{} } func (fsu *fsutils) GetFilesList(dir string) ([]string, error) { files, err := ioutil.ReadDir(dir) var fileList []string if err == nil { for _, file := range files { fileList = append(fileList, file.Name()) } } return fileList, err } func (fsu *fsutils) GetAssetsPath() string { return path.Join("_themes", settingsgetter.Get("THEME"), "assets") } func (fsu *fsutils) GetLayoutsFolder() string { return path.Join("_themes", settingsgetter.Get("THEME"), "layouts") } func (fsu *fsutils) GetBlocksFolder() string { return path.Join("_themes", settingsgetter.Get("THEME"), "blocks") } func (fsu *fsutils) GetAssetsDestPath() string { return path.Join(settingsgetter.Get("DIST_FOLDER")) }
package main import ( "bytes" "fmt" "io" "log" "net/http" "sync" "github.com/hdlopez/go-talks/2019/gopherconuk/examples/context" ) // START 1 OMIT // Private, available only from within the package type header struct { } // Public, available from other packages func Export() { } // Private, available only from within the package func doExport(h header) { } // END 1 OMIT // START 2 OMIT type Reader interface { Read(p []byte) (n int, err error) } type Writer interface { Write(p []byte) (n int, err error) } // ReadWriter is the interface that combines the Reader and Writer interfaces. type ReadWriter interface { Reader Writer } // END 2 OMIT // START 3.1 OMIT type File struct { sync.Mutex rw io.ReadWriter } // END 3.1 OMIT func file() { // START 3.2 OMIT f := File{} f.Lock() // END 3.2 OMIT // START 4 OMIT var r io.Reader r = bytes.NewBufferString("hello") buf := make([]byte, 2048) if _, err := r.Read(buf); err != nil { log.Fatal(err) } // END 4 OMIT } // START 5 OMIT type geometry interface { area() float64 perim() float64 } // rect implements geometry interface // HL type rect struct { width, height float64 } func (r rect) area() float64 { return r.width * r.height } func (r rect) perim() float64 { return 2*r.width + 2*r.height } // END 5 OMIT func constructors() { var reader io.Reader // START 6 OMIT context.New() // means new instance of "Context" type http.NewRequest("GET", "/users/1", reader) // another standard way to declare a constructor // END 6 OMIT } // START 7.1 OMIT type Event struct{} func (e *Event) Log(msg string) { if e == nil { return } // Log the msg on the event... } // END 7.1 OMIT func useEvent() { // START 7.2 OMIT var e *Event e.Log("this is a message") // END 7.2 OMIT } // START 8.1 OMIT type MyFakeClass struct { attribute1 string } // END 8.1 OMIT // START 8.2 OMIT func (mc MyFakeClass) printMyAttribute() { fmt.Println(mc.attribute1) } // END 8.2 OMIT // START 8.3 OMIT func printMyAttribute(mc MyFakeClass) { fmt.Println(mc.attribute1) } // END 8.3 OMIT
/* Go Language Raspberry Pi Interface (c) Copyright David Thorpe 2019 All Rights Reserved Documentation http://djthorpe.github.io/gopi/ For Licensing and Usage information, please see LICENSE.md */ package sensordb import ( "fmt" "strconv" "strings" "time" // Frameworks gopi "github.com/djthorpe/gopi" sensors "github.com/djthorpe/sensors" ) //////////////////////////////////////////////////////////////////////////////// // TYPES type SensorDB struct { Path string InfluxAddr string InfluxTimeout time.Duration InfluxDatabase string } type sensordb struct { log gopi.Logger // Config and Influxdb config influxdb } type sensor struct { Namespace_ string `json:"ns"` Key_ string `json:"key"` Description_ string `json:"description"` TimeCreated_ time.Time `json:"ts_created"` TimeSeen_ time.Time `json:"ts_seen"` } //////////////////////////////////////////////////////////////////////////////// // OPEN AND CLOSE func (config SensorDB) Open(log gopi.Logger) (gopi.Driver, error) { log.Debug("<sensordb>Open{ config=%+v }", config) this := new(sensordb) this.log = log if err := this.config.Init(config, log); err != nil { return nil, err } if err := this.influxdb.Init(config, log); err != nil { return nil, err } // Return success return this, nil } func (this *sensordb) Close() error { this.log.Debug("<sensordb>Close{ config=%v influxdb=%v }", this.config.String(), this.influxdb.String()) if err := this.influxdb.Destroy(); err != nil { return err } if err := this.config.Destroy(); err != nil { return err } // Success return nil } //////////////////////////////////////////////////////////////////////////////// // STRINGIFY func (this *sensordb) String() string { return fmt.Sprintf("<sensordb>{ config=%v influxdb=%v }", this.config.String(), this.influxdb.String()) } //////////////////////////////////////////////////////////////////////////////// // DATABASE IMPLEMENTATION // Return an array of all sensors func (this *sensordb) Sensors() []sensors.Sensor { sensors := make([]sensors.Sensor, len(this.config.Sensors)) for i, sensor := range this.config.Sensors { sensors[i] = sensor } return sensors } // Register a sensor from a message, recording sensor details // as necessary func (this *sensordb) Register(message sensors.Message) (sensors.Sensor, error) { this.log.Debug2("<sensordb>Register{ message=%v }", message) // Return ns and key if ns, key, description, err := decode_sensor(message); err != nil { return nil, err } else if sensor_ := this.config.GetSensorByName(ns, key); sensor_ == nil { // Create a new sensor record if sensor_ := NewSensor(ns, key, description); sensor_ == nil { this.log.Warn("NewSensor: Failed") return nil, gopi.ErrAppError } else if err := this.config.AddSensor(sensor_); err != nil { this.log.Warn("NewSensor: Failed: %v", err) return nil, err } else { return sensor_, nil } } else if err := this.config.PingSensor(sensor_); err != nil { this.log.Warn("PingSensor: Failed: %v", err) return nil, err } else { return sensor_, nil } } // Lookup an existing sensor based on namespace and key, or nil if not found func (this *sensordb) Lookup(ns, key string) sensors.Sensor { this.log.Debug2("<sensordb>Lookup{ ns=%v key=%v }", strconv.Quote(ns), strconv.Quote(key)) if ns == "" || key == "" { return nil } return this.config.GetSensorByName(ns, key) } // Write out a message to the database func (this *sensordb) Write(sensor sensors.Sensor, message sensors.Message) error { this.log.Debug2("<sensordb>Write{ message=%v }", message) return this.influxdb.Write(sensor, message) } //////////////////////////////////////////////////////////////////////////////// // PRIVATE METHODS // decode_sensor converts a message into a namespace and key func decode_sensor(message sensors.Message) (string, string, string, error) { if message == nil { return "", "", "", gopi.ErrBadParameter } else if message_, ok := message.(sensors.OOKMessage); ok { if product := decode_ook_socket(message_); product == sensors.MIHOME_PRODUCT_NONE { return "", "", "", fmt.Errorf("Invalid or unknown product for message: %v", message_) } else { return message_.Name(), fmt.Sprintf("%02X:%06X", product, message_.Addr()), "Switch", nil } } else if message_, ok := message.(sensors.OTMessage); ok { product := fmt.Sprintf("%v", sensors.MiHomeProduct(message_.Product())) if strings.HasPrefix(product, "MIHOME_PRODUCT_") { product = strings.TrimPrefix(product, "MIHOME_PRODUCT_") } return message_.Name(), fmt.Sprintf("%02X:%06X", message_.Product(), message_.Sensor()), product, nil } else { return "", "", "", sensors.ErrUnexpectedResponse } } func decode_ook_socket(message sensors.OOKMessage) sensors.MiHomeProduct { switch message.Socket() { case 0: return sensors.MIHOME_PRODUCT_CONTROL_ALL case 1: return sensors.MIHOME_PRODUCT_CONTROL_ONE case 2: return sensors.MIHOME_PRODUCT_CONTROL_TWO case 3: return sensors.MIHOME_PRODUCT_CONTROL_THREE case 4: return sensors.MIHOME_PRODUCT_CONTROL_FOUR default: return sensors.MIHOME_PRODUCT_NONE } }
package main // Display the character and string repeatedly 5 times. func main() { display1 := NewCharDisplay("H") // Create an instance of the CharDisplay display2 := NewStringDisplay("Hello world.") // Create an instance of the StringDisplay display3 := NewStringDisplay("Nice to meet you.") // Create an instance of the StringDisplay // Any instance can be called with the same method name display1.Output(display1) display2.Output(display2) display3.Output(display3) }
package main import "fmt" func main() { type user struct { name string age byte } m := map[int]user{ 1: {"Tom", 19}, } u := m[1] u.age += 1 m[1] = u fmt.Println(m[1]) m2 := map[int]*user{ 1: &user{"Jack", 20}, } m2[1].age++ fmt.Println(*m2[1]) }
package main //语言运算 func testLanguage (){ } //条件语句 func testCondition(){ } func main() { }
package models import ( "github.com/jinzhu/gorm" "golang.org/x/crypto/bcrypt" "errors" log "github.com/sirupsen/logrus" ) type User struct { gorm.Model Name string `json:"name"` Email string `gorm:"type:varchar(100);unique_index"` Password string `json:"password"` } type userDTO struct { Name string `json:"name"` Email string `json:"email"` } func (u *User) BeforeCreate(scope *gorm.Scope) error { log.Info(u.Password) hashedPassword, err := hashPassword(u.Password) if err != nil { return errors.New("cannot hash password") } scope.SetColumn("password", hashedPassword) return nil } func hashPassword(password string) (string, error) { bytes, err := bcrypt.GenerateFromPassword([]byte(password), 14) return string(bytes), err } func CreateUser(user User) (*userDTO, error) { var err error err = db.Create(&user).Error if err != nil { return nil, err } createdUser := userDTO{ Name: user.Name, Email: user.Email, } return &createdUser, nil } func FindUserByEmail(email string) (*userDTO, error) { var user User err := db.Where("email = ?", email).First(&user).Error if err != nil { return nil, err } return &userDTO{ Name: user.Name, Email: user.Email, }, nil }
package main import ( DB "LivingPointAPI/database/database" "context" "log" "time" "google.golang.org/grpc" ) func main() { conn, err := grpc.Dial("localhost:50051", grpc.WithInsecure()) if err != nil { log.Fatal("did not connect: ", err) } defer conn.Close() c := DB.NewDatabaseClient(conn) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() q := make(map[string]string) q["login"] = "test1" r, err := c.Get(ctx, &DB.TableRequest{HasQueries: true, Queries: q, Table: "users"}) if err != nil { log.Fatalf("[error] GetUsers: %v", err) } log.Printf("GetUsers: %s", r.Fields[0].Map["Login"]) }
package main import ( "crypto/ecdsa" "encoding/hex" "flag" "fmt" "math" "os" "regexp" "strconv" "strings" "github.com/ethereum/go-ethereum/crypto" ) // The following code takes inspiration from and generalizes the code at https://github.com/chrsow/geth-vanitygen // Command line flag parsing type stringsFlag struct { set bool value []string } // Set checks variable for the existence of a flag func (sf *stringsFlag) Set(x string) error { sf.value = strings.Split(x, ",") sf.set = true return nil } func (sf *stringsFlag) String() string { return strings.Join(sf.value, ",") } type IntFlag struct { set bool value int } func (intFlag *IntFlag) Set(x string) error { value, error := strconv.Atoi(x) if error != nil { return error } intFlag.value = value intFlag.set = true return nil } func (intFlag *IntFlag) String() string { return string(intFlag.value) } // Word validation code func validateWord(word string) { // Only accept lowercase to avoid the upper/lower case mismatches. r, _ := regexp.MatchString(`^[0-9a-f]+$`, word) if !r { fmt.Printf("[-] %s: is not a valid hexadecimal.\n", word) os.Exit(1) } else if len(word) > 40 { fmt.Println("[-] You can't generate matching Ethereum address for more than 40 characters (20 bytes).") os.Exit(1) } } func generateAccount() (string, *ecdsa.PrivateKey) { // 1. generate private key, ECDSA(private key) => public key key, _ := crypto.GenerateKey() pubKey := key.PublicKey // 2. public key => address address := crypto.PubkeyToAddress(pubKey) addressHex := hex.EncodeToString(address[:]) return addressHex, key } func searchAddress(prefix []string, suffix []string) (string, string) { n := len(prefix) if len(prefix) != len(suffix) { fmt.Printf("Length of prefix and suffix arrays doesn't match: %v %v\n", prefix, suffix); os.Exit(1) } prefixLengths := make([]int, len(prefix)) // Small optimization when all prefixes have same length allPrefixesHaveSameLengths := true commonPrefixLength := len(prefix[0]) for i, _ := range prefixLengths { prefixLengths[i] = len(prefix[i]) if prefixLengths[i] != commonPrefixLength { allPrefixesHaveSameLengths = false } } suffixLengths := make([]int, len(suffix)) // Small optimization when all suffixes have same length allSuffixesHaveSameLengths := true commonSuffixLength := len(suffix[0]) for i, _ := range prefixLengths { suffixLengths[i] = len(suffix[i]) if suffixLengths[i] != commonSuffixLength { allSuffixesHaveSameLengths = false } } found := false var address string var key *ecdsa.PrivateKey count := 0 for !found { if (count % 50000) == 0 { fmt.Printf("Attempt: %d\n", count) } count++ address, key = generateAccount() var addressPrefix string var addressSuffix string if allPrefixesHaveSameLengths { addressPrefix = address[:commonPrefixLength] } if allSuffixesHaveSameLengths { addressSuffix = address[40-commonSuffixLength:] } for i := 0; i < n; i++ { if !allPrefixesHaveSameLengths { addressPrefix = address[:prefixLengths[i]] } if !allSuffixesHaveSameLengths { addressSuffix = address[40-suffixLengths[i]:] } //fmt.Printf("Checking \"%s (%s, %s)\" against \"%s\" and \"%s\"\n", // address, // addressPrefix, // addressSuffix, // prefix[i], // suffix[i]) if addressPrefix == prefix[i] && addressSuffix == suffix[i] { fmt.Printf("[+] Address with prefix \"%s\" and suffix \"%s\" found.\n", prefix[i], suffix[i]) found = true break } } } privateKey := hex.EncodeToString(crypto.FromECDSA(key)) // fmt.Printf("Converting key: \"%s\" to privateKey: \"%s\"\n", key, privateKey) return address, privateKey } func foundAddress(address string, privateKey string) { fmt.Printf("Address: 0x%s\n", address) fmt.Printf("PrivateKey: \"%s\"\n\n", privateKey) } // prefix, suffix from cli var prefixes stringsFlag var suffixes stringsFlag var threadCount IntFlag const defaultThreadCount = 16 func init() { flag.Var(&prefixes, "p", "Comma-separated list of prefixes") flag.Var(&suffixes, "s", "Comma-separated list of suffixes") flag.Var(&threadCount, "t", fmt.Sprintf("Num threads (default: %d)", defaultThreadCount)) } // Usage: -p 12,13,14 -s 89,678,56 -> This will try to find an address with // 1. prefix = 12 and suffix = 89 or, // 2. prefix = 13 and suffix = 678 or, // 3. prefix = 14 and suffix = 56 // At least one of the -p and -s should be provided. // If both are provided then they should have same number of elements. // The program execution stops at the first match. func main() { flag.Parse() ch := make(chan bool) for i, _ := range prefixes.value { prefixes.value[i] = strings.ToLower(prefixes.value[i]) validateWord(prefixes.value[i]) } for i, _ := range suffixes.value { suffixes.value[i] = strings.ToLower(suffixes.value[i]) validateWord(suffixes.value[i]) } // If prefixes are provided but not suffixes then init suffixes as empty array if prefixes.set && !suffixes.set { suffixes.value = make([]string, len(prefixes.value)) } // If suffixes are provided but not suffixes then init prefixes as empty array if suffixes.set && !prefixes.set { prefixes.value = make([]string, len(suffixes.value)) } fmt.Printf("Finding matches with prefixes = %v and suffixes = %v\n", prefixes.value, suffixes.value) numThreads := defaultThreadCount if threadCount.set { numThreads = threadCount.value } printAttemptEstimates(prefixes.value, suffixes.value, numThreads) for i := 0; i < numThreads; i++ { go findTheMatch(prefixes.value, suffixes.value, ch) } <-ch } func findTheMatch(prefixes []string, suffixes []string, ch chan bool) { address, privateKey := searchAddress(prefixes, suffixes) foundAddress(address, privateKey) ch <- true } func printAttemptEstimates(prefixes []string, suffixes []string, threadCount int) { harmonicSum := 0.0 for i, _ := range prefixes { numNibbles := len(prefixes[i]) + len(suffixes[i]) numBits := 4 * numNibbles numAttempts := math.Pow(2, float64(numBits)) harmonicSum += 1/float64(numAttempts) fmt.Printf( "It will take %.1f attempts for finding a ETH address matching (prefix: \"%s\",suffix: \"%s\") " + "with 100%% probability.\n\t%.1f attempts suffice for 50%% probability of finding a match.\n", numAttempts, prefixes[i], suffixes[i], numAttempts / 2) } numAttempts := int(1.0 / harmonicSum) / threadCount fmt.Printf("Overall number of attempts across all pairs is %d for 100%% probability and %d for 50%% probability\n", numAttempts, numAttempts / 2) }
package main import ( "fmt" "log" "net/http" ) func main() { // 使用GET方法进行Request请求 req, err := http.NewRequest("GET", "https://www.baidu.com", nil) if err != nil { log.Fatalf("could not create request: %v", err) } // 使用http创建一个client client := http.DefaultClient // 执行Request请求,并得到response res, err := client.Do(req) if err != nil { log.Fatalf("http request failed: %v", err) } // 将response的状态进行打印 fmt.Println(res.Status) }
package configuration import ( "testing" "github.com/stretchr/testify/assert" "github.com/authelia/authelia/v4/internal/utils" ) func TestShouldHaveSameChecksumForBothTemplates(t *testing.T) { sumRoot, err := utils.HashSHA256FromPath("../../config.template.yml") assert.NoError(t, err) sumInternal, err := utils.HashSHA256FromPath("./config.template.yml") assert.NoError(t, err) assert.Equal(t, sumRoot, sumInternal, "Ensure both ./config.template.yml and ./internal/configuration/config.template.yml are exactly the same.") }
/* Copyright 2021 The KubeVela Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e_multicluster_test import ( "context" "fmt" "os/exec" "time" "github.com/kubevela/pkg/util/k8s" "github.com/kubevela/pkg/util/rand" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "github.com/oam-dev/kubevela/apis/core.oam.dev/common" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1" ) var _ = Describe("Test adopt commands", func() { Context("Test vela adopt commands", func() { var ns string BeforeEach(func() { ns = "test-adopt-" + rand.RandomString(4) Expect(k8s.EnsureNamespace(context.Background(), k8sClient, ns)).Should(Succeed()) }) AfterEach(func() { Expect(k8s.ClearNamespace(context.Background(), k8sClient, ns)).Should(Succeed()) }) It("Test vela adopt native resources with read-only mode", func() { ctx := context.Background() Expect(k8sClient.Create(ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "adopt-cm", Namespace: ns}})).Should(Succeed()) Expect(k8sClient.Create(ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "adopt-secret", Namespace: ns}})).Should(Succeed()) _, err := execCommand("adopt", "configmap/adopt-cm", "secret/adopt-secret", "--app-name=adopt-test", "-n="+ns, "--apply") Expect(err).Should(Succeed()) app := &v1beta1.Application{} Eventually(func(g Gomega) { g.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "adopt-test", Namespace: ns}, app)).Should(Succeed()) g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning)) }).WithTimeout(20 * time.Second).WithPolling(2 * time.Second).Should(Succeed()) Expect(k8sClient.Delete(ctx, app)).Should(Succeed()) Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "adopt-cm", Namespace: ns}, &corev1.ConfigMap{})).Should(Succeed()) Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "adopt-cm", Namespace: ns}, &corev1.ConfigMap{})).Should(Succeed()) }) It("Test vela adopt helm chart with take-over mode", func() { ctx, fn := context.WithTimeout(context.Background(), time.Second*60) defer fn() bs, err := exec.CommandContext(ctx, "helm", "install", "vela-test", "./testdata/chart/test", "-n", ns).CombinedOutput() _, _ = fmt.Fprintf(GinkgoWriter, "%s\n", string(bs)) Expect(err).Should(Succeed()) _, err = execCommand("adopt", "vela-test", "--mode=take-over", "--type=helm", "-n="+ns, "--apply", "--recycle") Expect(err).Should(Succeed()) app := &v1beta1.Application{} Eventually(func(g Gomega) { g.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "vela-test", Namespace: ns}, app)).Should(Succeed()) g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning)) g.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "vela-test", Namespace: ns}, &corev1.ConfigMap{})).Should(Succeed()) }).WithTimeout(20 * time.Second).WithPolling(2 * time.Second).Should(Succeed()) Expect(k8sClient.Delete(ctx, app)).Should(Succeed()) Eventually(func(g Gomega) { g.Expect(errors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Name: "vela-test", Namespace: ns}, &corev1.ConfigMap{}))).Should(BeTrue()) }).WithTimeout(10 * time.Second).WithPolling(2 * time.Second).Should(Succeed()) }) It("Test vela adopt resources from multiple cluster", func() { hubCtx, workerCtx, _ns := initializeContextAndNamespace() Expect(k8sClient.Create(hubCtx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "adopt-cm", Namespace: _ns}})).Should(Succeed()) Expect(k8sClient.Create(workerCtx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "adopt-secret", Namespace: _ns}})).Should(Succeed()) _, err := execCommand("adopt", fmt.Sprintf("configmap/local/%s/adopt-cm", _ns), fmt.Sprintf("secret/%s/%s/adopt-secret", WorkerClusterName, _ns), "--app-name=adopt-test", "-n="+_ns, "--apply") Expect(err).Should(Succeed()) app := &v1beta1.Application{} Eventually(func(g Gomega) { g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Name: "adopt-test", Namespace: _ns}, app)).Should(Succeed()) g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning)) }).WithTimeout(20 * time.Second).WithPolling(2 * time.Second).Should(Succeed()) Expect(k8sClient.Delete(hubCtx, app)).Should(Succeed()) }) }) })
package module /** * 底层redis 连接池 * @author guojun-s@360.cn * */ import ( "github.com/garyburd/redigo/redis" "time" ) const ( PROTOCOL = "tcp" //connection protocol ) var ( MaxIdle int = 100 MaxActive int = 1000 IdleTimeout time.Duration = time.Duration(28 * time.Second) ) // //var RedisPool *redis.Pool //var Rp = &Pool{} // //type Pool struct{} // //func (p *Pool) Init(server string, password string, IdleTimeout time.Duration, MaxIdle, MaxActive int) { // RedisPool = NewPool(server, password, IdleTimeout, MaxIdle, MaxActive) //} /** * Redis Pool * * serverAddr the server address 127.0.0.1:6379 * password password 127.0.0.1:6379:password * IdleTimeout 超时 * MaxIdle 连接池最大容量 * MaxActive 最大活跃数量 * dbno 选择db127.0.0.1:6379:password:1 * */ func NewPool(serverAddr string, password string, IdleTimeout time.Duration, MaxIdle, MaxActive int) *redis.Pool { return &redis.Pool{ MaxIdle: MaxIdle, MaxActive: MaxActive, IdleTimeout: IdleTimeout, Dial: func() (redis.Conn, error) { c, err := redis.Dial(PROTOCOL, serverAddr) if err != nil { return nil, err } //校验密码 if _, err := c.Do("AUTH", password); err != nil { c.Close() return nil, err } return c, err }, TestOnBorrow: func(c redis.Conn, t time.Time) error { _, err := c.Do("PING") return err }, } }