text
stringlengths
11
4.05M
package main import ( "lesson/lesson13-ready-spider/engine" "lesson/lesson13-ready-spider/zhenai/parser" "lesson/lesson13-ready-spider/scheduler" ) // 获取并打印 城市第一页的用户信息 // 入口函数 func main() { url := "http://www.zhenai.com/zhenghun" e := engine.ConcurrentEngine{ Scheduler: &scheduler.SimpleScheduler{}, WorkerCount:3, } e.Run(engine.Request{ Url: url, ParseFunc: parser.ParseCityList, }) }
package dushengchen /** Submission: https://leetcode.com/submissions/detail/740781943/ Runtime: 6 ms, faster than 67.71% of Go online submissions for Diameter of Binary Tree. Memory Usage: 4.7 MB, less than 5.00% of Go online submissions for Diameter of Binary Tree. */ func maximalSquare(matrix [][]byte) int { maxArea := 0 for i := range matrix { for j := range matrix[i] { if matrix[i][j] == '0' { continue } r := 1 for { } } } return maxArea }
package cmd import ( "encoding/json" "fmt" "github.com/bitmaelum/bitmaelum-suite/internal/container" "github.com/bitmaelum/bitmaelum-suite/internal/parse" "github.com/bitmaelum/bitmaelum-suite/pkg/address" "github.com/spf13/cobra" "time" ) type jsonOut map[string]interface{} // inviteCmd represents the invite command var inviteCmd = &cobra.Command{ Use: "invite", Short: "Invite a new user onto your server", Long: `This command will generate an invitation token that must be used for registering an account on your server. Only the specified address can register the account`, Run: func(cmd *cobra.Command, args []string) { s, _ := cmd.Flags().GetString("address") d, _ := cmd.Flags().GetString("duration") asJSON, _ := cmd.Flags().GetBool("json") addr, err := address.New(s) if err != nil { outError("incorrect address specified", asJSON) return } duration, err := parse.ValidDuration(d) if err != nil { outError("incorrect duration specified", asJSON) return } inviteRepo := container.GetInviteRepo() token, err := inviteRepo.Get(addr.Hash()) if err == nil { msg := fmt.Sprintf("'%s' already allowed to register with token: %s\n", addr.String(), token) outError(msg, asJSON) return } token, err = inviteRepo.Create(addr.Hash(), duration) if err != nil { msg := fmt.Sprintf("error while inviting address: %s", err) outError(msg, asJSON) return } if asJSON { output := jsonOut{ "address": addr.String(), "token": token, "valid_until": time.Now().Add(duration), } out, _ := json.Marshal(output) fmt.Printf("%s", out) } else { fmt.Printf("'%s' is allowed to register on our server until %s.\n", addr.String(), time.Now().Add(duration).Format(time.RFC822)) fmt.Printf("The invitation token is: %s\n", token) } }, } func outError(msg string, asJSON bool) { if !asJSON { fmt.Print(msg) return } out, _ := json.Marshal(jsonOut{"error": msg}) fmt.Printf("%s", out) } func init() { rootCmd.AddCommand(inviteCmd) inviteCmd.Flags().String("address", "", "Address to register") inviteCmd.Flags().String("duration", "30", "NUmber of days (or duration like 1w2d3h4m6s) allowed for registration") inviteCmd.Flags().Bool("json", false, "Return JSON response when set") _ = inviteCmd.MarkFlagRequired("address") }
package main type Node struct{ Val int Left *Node Right *Node Height int Size int } func AvlInsert(root *Node, val int) *Node { if root == nil { node := Node{Val: val, Height: 0, Size: 1} return &node } if val < root.Val { root.Left = AvlInsert(root.Left, val) } else { root.Right = AvlInsert(root.Right, val) } root.Size += 1 balance := root.Left.Height - root.Right.Height if balance > 1 { lsubH := -1 rsubH := -1 if root.Left.Left != nil { lsubH = root.Left.Left.Height } if root.Left.Right != nil { rsubH = root.Left.Right.Height } if lsubH >= rsubH { // LL return rotateRight(root) } else { // LR root.Left = rotateLeft(root.Left) return rotateRight(root) } } else if balance < -1 { lsubH := -1 rsubH := -1 if root.Right.Left != nil { lsubH = root.Right.Left.Height } if root.Right.Right != nil { rsubH = root.Right.Right.Height } if lsubH >= rsubH { // RL root.Right = rotateRight(root.Right) return rotateLeft(root) } else { // RR return rotateLeft(root) } } h := 0 if root.Left != nil { h = root.Left.Height } if root.Right != nil && root.Right.Height > h { h = root.Right.Height } root.Height = 1 + h return root } func rotateLeft(node *Node) *Node { } func rotateRight(node *Node) *Node { }
package xmbs type Rule struct { Process string `yaml:"process,omitempty"` Resource string `yaml:"resource,omitempty"` Amount string `yaml:"amount,omitempty"` When string `yaml:"when,omitempty"` } type Config struct { Rules []Rule `yaml:"rules"` }
package example import ( "github.com/Hexilee/gotten" "net/http" "time" ) type ( SimpleParams struct { Id int Page int } Item struct { TypeId int IId int Name string Description string } ExpectResult []*Item ObjectNotFound struct { Key string Reason string Description string } SimpleService struct { GetItems func(SimpleParams) (gotten.Response, error) `method:"GET";path:"itemType/{id}"` } ) var ( creator, err = gotten.NewBuilder(). SetBaseUrl("https://api.sample.com"). AddCookie(&http.Cookie{Name: "clientcookieid", Value: "121", Expires: time.Now().Add(111 * time.Second)}). Build() simpleServiceImpl = new(SimpleService) ) func init() { err := creator.Impl(simpleServiceImpl) if err != nil { panic(err) } }
package messages type postRequestModel struct { Content string `json:"content"` UserID string `json:"user_id"` }
package dictd /* Server encapsulation. * * This contains a bundle of useful helpers, as well as a few data structures * to handle registered Databases and Commands. */ type Server struct { Name string Info string commands map[string]func(*Session, Command) } /* GetHandler returns a Command handler for the given dict.Command `command` */ func (s *Server) GetHandler(command *Command) func(*Session, Command) { name := command.Command if value, ok := s.commands[name]; ok { return value } return nil } /* Register a Command `handler` under name `name`. */ func (s *Server) RegisterHandler( name string, handler func(*Session, Command), ) { s.commands[name] = handler } /* Create a new server by name `name`. */ func NewServer(name string) Server { server := Server{ Name: name, Info: "", commands: map[string]func(*Session, Command){}, } registerDefaultHandlers(&server) return server }
package sse import ( "fmt" "net/http" "strings" ) type Event struct { Id *int Name string Data string } func (e Event) Marshal() []byte { m := "" if e.Id != nil { m += "id:" + fmt.Sprint(*e.Id) + "\n" } m += "event:" + e.Name + "\n" for _, line := range strings.Split(e.Data, "\n") { m += "data:" + line + "\n" } m += "\n" return []byte(m) } type Connection chan []byte func (c *Connection) ServeHTTP(w http.ResponseWriter, r *http.Request) { flusher, _ := w.(http.Flusher) defer close(*c) w.Header().Set("Content-Type", "text/event-stream; charset=utf-8") w.WriteHeader(200) if flusher != nil { flusher.Flush() } for { select { case event, ok := <-*c: if !ok { return } w.Write(event) if flusher != nil { flusher.Flush() } case <-w.(http.CloseNotifier).CloseNotify(): return } } }
/* * Copyright 2017 StreamSets Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package expression import ( "github.com/streamsets/datacollector-edge/api" "github.com/streamsets/datacollector-edge/container/common" "github.com/streamsets/datacollector-edge/container/creation" "github.com/streamsets/datacollector-edge/container/execution/runner" "strings" "testing" ) const ( EXPRESSION_PROCESSOR_CONFIGS = "expressionProcessorConfigs" HEADER_ATTRIBUTE_CONFIGS = "headerAttributeConfigs" FIELD_TO_SET = "fieldToSet" ATTRIBUTE_TO_SET = "attributeToSet" ) func getStageContext() (*common.StageContextImpl, *common.ErrorSink) { stageConfig := common.StageConfiguration{} stageConfig.Library = LIBRARY stageConfig.StageName = STAGE_NAME stageConfig.InstanceName = "expr1" stageConfig.Configuration = make([]common.Config, 2) fieldValueConfigs := []interface{}{} fieldValueConfigs = append(fieldValueConfigs, map[string]interface{}{ FIELD_TO_SET: "/d", EXPRESSION: "${math:ceil(record:value('/a'))}", }) fieldValueConfigs = append(fieldValueConfigs, map[string]interface{}{ FIELD_TO_SET: "/e", EXPRESSION: "${math:floor(record:value('/b'))}", }) headerAttributeConfigs := []interface{}{} headerAttributeConfigs = append(headerAttributeConfigs, map[string]interface{}{ ATTRIBUTE_TO_SET: "eval", EXPRESSION: "${str:toUpper(record:value('/c'))}", }) stageConfig.Configuration[0] = common.Config{ Name: EXPRESSION_PROCESSOR_CONFIGS, Value: fieldValueConfigs, } stageConfig.Configuration[1] = common.Config{ Name: HEADER_ATTRIBUTE_CONFIGS, Value: headerAttributeConfigs, } errorSink := common.NewErrorSink() return &common.StageContextImpl{ StageConfig: &stageConfig, Parameters: nil, ErrorSink: errorSink, ErrorRecordPolicy: common.ErrorRecordPolicyStage, }, errorSink } func TestExpressionProcessor_Success(t *testing.T) { stageContext, errSink := getStageContext() stageBean, err := creation.NewStageBean(stageContext.StageConfig, stageContext.Parameters) if err != nil { t.Fatal(err) } stageInstance := stageBean.Stage.(*ExpressionProcessor) if stageInstance == nil { t.Fatal("Failed to create stage instance") } issues := stageInstance.Init(stageContext) if len(issues) != 0 { t.Error(issues[0].Message) } defer stageInstance.Destroy() records := make([]api.Record, 1) records[0], _ = stageContext.CreateRecord("abc", map[string]interface{}{"a": float64(2.55), "b": float64(3.55), "c": "random"}) batch := runner.NewBatchImpl("random", records, "randomOffset") batchMaker := runner.NewBatchMakerImpl(runner.StagePipe{}) err = stageInstance.Process(batch, batchMaker) if err != nil { t.Fatal("Error when processing batch " + err.Error()) } records = batchMaker.GetStageOutput() record := records[0] dValue, err := record.Get("/d") if err != nil { t.Error("Error when getting value of /d " + err.Error()) } if dValue.Value.(float64) != float64(3) { t.Errorf("Error in expression processor when evaluating /d, Expected : 6. Actual:%d", dValue.Value) } eValue, err := record.Get("/e") if err != nil { t.Error("Error when getting value of /e " + err.Error()) } if eValue.Value.(float64) != float64(3) { t.Errorf("Error in expression processor when evaluating /e, Expected : 5. Actual:%d", eValue.Value) } headers := record.GetHeader().GetAttributes() header, ok := headers["eval"] if !ok || strings.Compare(header, "RANDOM") != 0 { t.Errorf("Error in expression processor when evaluating header eval, Expected : random. Actual:%s", header) } if errSink.GetTotalErrorRecords() != 0 { t.Fatal("There should be no error records in error sink") } } func TestExpressionProcessor_Error(t *testing.T) { stageContext, errSink := getStageContext() stageContext.StageConfig.Configuration[1] = common.Config{ Name: HEADER_ATTRIBUTE_CONFIGS, Value: []interface{}{map[string]interface{}{ ATTRIBUTE_TO_SET: "eval", EXPRESSION: "${unsupport:unsupported()}", }}, } stageBean, err := creation.NewStageBean(stageContext.StageConfig, stageContext.Parameters) stageInstance := stageBean.Stage.(*ExpressionProcessor) if stageInstance == nil { t.Fatal("Failed to create stage instance") } issues := stageInstance.Init(stageContext) if len(issues) != 0 { t.Error(issues[0].Message) } defer stageInstance.Destroy() records := make([]api.Record, 1) records[0], _ = stageContext.CreateRecord("abc", map[string]interface{}{"a": float64(2.55), "b": float64(3.55), "c": "random"}) batch := runner.NewBatchImpl("random", records, "randomOffset") batchMaker := runner.NewBatchMakerImpl(runner.StagePipe{}) err = stageInstance.Process(batch, batchMaker) if err != nil { t.Fatal("Error when processing batch " + err.Error()) } if len(batchMaker.GetStageOutput()) != 0 { t.Fatal("The record should not be in batch maker and should have router to error") } if errSink.GetTotalErrorRecords() != 1 { t.Fatal("There should be one error record in error sink") } }
// Licensed to Apache Software Foundation (ASF) under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Apache Software Foundation (ASF) licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // package util import ( "fmt" "path/filepath" "testing" ) func TestResolveAbs(t *testing.T) { configAbsPath, _ := filepath.Abs("config.go") tests := []struct { CfgPath string Path string Exists bool }{ { configAbsPath, "./config.go", true, }, { "not_exist/e2e.yaml", configAbsPath, true, }, { "config.go", "./config.go", true, }, { "./config.go", "config.go", true, }, { "./config.go", "go.mod", false, }, { "./config.go", "", false, }, { "../../examples/compose/e2e.yaml", "env", true, }, { "../../examples/compose/e2e.yaml", "./env", true, }, { "not_exists/e2e.yaml", "./config.go", false, }, { "not_exists/e2e.yaml", "config.go", false, }, } for _, tt := range tests { t.Run(fmt.Sprintf("cfg path: %s, verify path: %s", tt.CfgPath, tt.Path), func(t *testing.T) { CfgFile = tt.CfgPath result := ResolveAbs(tt.Path) if PathExist(result) != tt.Exists { t.Errorf("path %s not exists", result) } }) } }
// testjson2 project doc.go /* testjson2 document */ package main
package bridge import ( "fmt" "testing" ) //func TestBridge(t *testing.T) { // sa := SoftwareA{Software{"a"}} // sb := SoftwareB{Software{"b"}} // // pa := NewPhoneA("pa") // pb := NewPhoneB("pb") // // pa.setSoft(&sa) // pa.Run() // // pb.setSoft(&sb) // pb.Run() // // fmt.Println() // p := TSoftware{&sb} // p.Run() //} func TestBridgeStep1(t *testing.T) { c := NewUniform() s := NewShirt() fmt.Println("-----Start------") w := NewWorker() w.SetCloth(c) w.Dress() w.Undress() w.SetCloth(s) w.Dress() w.Undress() fmt.Println("-----End------") fmt.Println("-----Start------") stu := NewStudent() stu.SetCloth(c) stu.Dress() stu.Undress() fmt.Println("-----End------") }
// Copyright © 2018 Inanc Gumus // Learn Go Programming Course // License: https://creativecommons.org/licenses/by-nc-sa/4.0/ // // For more tutorials : https://learngoprogramming.com // In-person training : https://www.linkedin.com/in/inancgumus/ // Follow me on twitter: https://twitter.com/inancgumus package main import ( "fmt" "os" ) // STEPS: // // Compile it by typing: // go build -o myprogram // // Then run it by typing: // ./myprogram // // If you're on Windows, then type: // myprogram func main() { fmt.Println(os.Args[0]) }
package user import "fmt" type UseCase interface { ValidateUser(email, password string) error } type Service struct{} func NewService() *Service { return &Service{} } func (s *Service) ValidateUser(email, password string) error { //@TODO create validation rules, using databases or something else if email == "eminetto@gmail.com" && password != "1234567" { return fmt.Errorf("Invalid user") } return nil }
package pkg_test import ( "fmt" "reflect" "sync" "testing" "unsafe" ) func StringToByteUnsafe(s string) []byte { strh := (*reflect.StringHeader)(unsafe.Pointer(&s)) var sh reflect.SliceHeader sh.Data = strh.Data sh.Len = strh.Len sh.Cap = strh.Len return *(*[]byte)(unsafe.Pointer(&sh)) } func Convert(s []byte) []byte { return StringToByteUnsafe(string(s)) } type T struct { S []byte } func Copy(s []byte) T { return T{S: s} } func Mid(a []byte, b []byte) []byte { fmt.Printf("%p %s %p %s\n", a, a, b, b) wg := sync.WaitGroup{} wg.Add(1) go func() { b = b[1:2] wg.Done() }() wg.Wait() fmt.Printf("%p %s %p %s\n", a, a, b, b) return b } func TestSomething(t *testing.T) { str := "123" a := Convert([]byte(str)) b := Copy(a) Mid(a, b.S) }
package mssql import ( "database/sql" ) // Repository is sql server implementation of repository type Repository struct { Connection *sql.DB }
package main //import "fmt" type Broadcaster struct { Registered []chan int Sender chan int } func NewBroadcaster() *Broadcaster { b := new(Broadcaster) b.Registered = make([]chan int, 0) b.Sender = make(chan int) go func() { for { value := <-b.Sender for _, ch := range b.Registered { go func(c chan int) { c <- value }(ch) } } }() return b } func (b *Broadcaster) Register() <-chan int { ch := make(chan int) b.Registered = append(b.Registered, ch) return ch } func (b *Broadcaster) Send() chan<- int { return b.Sender } // func main() { // b := NewBroadcaster() // one := b.Register() // two := b.Register() // three := b.Register() // go func() { // b.Send() <- 1 // }() // println(two) // fmt.Printf("One just recieved %v, two recieved %v, three received %v\n", <-one, "nothing", <-three) // // One just recieved 1, two recieved 1, too! //}
package main import ( "fmt" "time" tm "github.com/buger/goterm" "github.com/goburrow/modbus" "github.com/influxdata/influxdb/client/v2" log "github.com/sirupsen/logrus" ) const ( tsdb = "powermeterdb" tsuser = "poweruser" tspassword = "P@ssw0rd" ) var normal chan bool type EnergyMeter struct { MODEL string `json:"model,omitempty"` SN string `json:"sn,omitempty"` Freq float64 `json:"freq,omitempty"` Uavg float64 `json:"uavg,omitempty"` Iavg float64 `json:"iavg,omitempty"` Psum int32 `json:"psum,omitempty"` Qsum int32 `json:"qsum,omitempty"` Ssum int32 `json:"ssum,omitempty"` PFavg float64 `json:"pfavg,omitempty"` EA float64 `json:"ea,omitempty"` ER float64 `json:"er,omitempty"` CO2 float64 `json:"co2,omitempty"` } func main() { BKK, err := time.LoadLocation("Asia/Bangkok") if err != nil { log.Error(err) return } c, err := client.NewHTTPClient(client.HTTPConfig{ Addr: "http://localhost:8086", Username: tsuser, Password: tspassword, }) if err != nil { log.Fatal(err) } defer c.Close() normal = make(chan bool, 1) SlaveId := byte(1) addr := uint16(0x59) // handler := modbus.NewTCPClientHandler("127.0.0.1:502") handler := modbus.NewRTUClientHandler("/dev/ttyUSB0") handler.BaudRate = 9600 handler.DataBits = 8 handler.Parity = "N" handler.StopBits = 2 handler.SlaveId = SlaveId handler.Timeout = 1 * time.Second err = handler.Connect() if err != nil { fmt.Println(err) } defer handler.Close() client := modbus.NewClient(handler) for { var output string results, err := client.ReadHoldingRegisters(addr, 6) if err != nil { fmt.Println(err) break } year := (int32(results[0])<<8 | int32(results[1])) month := (int32(results[2])<<8 | int32(results[3])) date := (int32(results[4])<<8 | int32(results[5])) hour := (int32(results[6])<<8 | int32(results[7])) minute := (int32(results[8])<<8 | int32(results[9])) second := (int32(results[10])<<8 | int32(results[11])) output = fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d", year, month, date, hour, minute, second) dtString := fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d", year, month, date, hour, minute, second) results, err = client.ReadHoldingRegisters(0x01, 1) if err != nil { log.Println(err) break } freq := (int32(results[0])<<8 | int32(results[1])) output += fmt.Sprintf("\n Freq : %0.2f Hz", float64(freq)/100) results, err = client.ReadHoldingRegisters(0x02, 2) if err != nil { log.Println(err) break } avgU := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n U avg : %0.1f V", float64(avgU)/10) results, err = client.ReadHoldingRegisters(0x04, 2) if err != nil { log.Println(err) break } avgLU := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n UL avg : %0.1f V", float64(avgLU)/10) results, err = client.ReadHoldingRegisters(0x06, 2) if err != nil { log.Println(err) break } avgI := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n I avg : %0.3f A", float64(avgI)/1000) results, err = client.ReadHoldingRegisters(0x08, 2) if err != nil { log.Println(err) break } In := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n In : %0.3f A", float64(In)/1000) results, err = client.ReadHoldingRegisters(0x0A, 2) if err != nil { log.Println(err) break } Psum := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n Psum : %d W", Psum) results, err = client.ReadHoldingRegisters(0x0C, 2) if err != nil { log.Println(err) break } Qsum := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n Qsum : %d VAR", Qsum) results, err = client.ReadHoldingRegisters(0x0C, 2) if err != nil { log.Println(err) break } Ssum := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n Ssum : %d VA", Ssum) results, err = client.ReadHoldingRegisters(0x10, 2) if err != nil { log.Println(err) break } PFavg := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n PF avg : %.3f VA", float64(PFavg)/1000) results, err = client.ReadHoldingRegisters(0x12, 2) if err != nil { log.Println(err) break } ea := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n Ea : %0.1f kWh", float64(ea)/10) results, err = client.ReadHoldingRegisters(0x14, 2) if err != nil { log.Println(err) break } er := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n Er : %0.1f kVARh", float64(er)/10) // results, err = client.ReadHoldingRegisters(0x16, 2) // if err != nil { // log.Println(err) // break // } // cost := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) // log.Printf("Cost : %0.1f ฿", float64(cost)/10) results, err = client.ReadHoldingRegisters(0x18, 2) if err != nil { log.Println(err) break } co2 := (int32(results[0])<<24 | int32(results[1])<<16 | int32(results[2])<<8 | int32(results[3])) output += fmt.Sprintf("\n CO2 : %0.1f kg", float64(co2)/10) datetime, err := time.ParseInLocation(dtString, "2006-01-02 15:04:05", BKK) if err != nil { datetime = time.Now() } data := EnergyMeter{ MODEL: "CPM-20-A5V6-ADH", SN: "1410170013-3500", Freq: float64(freq) / 100, Uavg: float64(avgU) / 10, Iavg: float64(avgI) / 1000, Psum: Psum, Qsum: Qsum, Ssum: Ssum, PFavg: float64(PFavg) / 1000, EA: float64(ea) / 10, ER: float64(er) / 10, CO2: float64(co2) / 10, } writePoints(c, datetime, data) tm.Clear() // Clear current screen tm.MoveCursor(1, 1) tm.Print(output) tm.Flush() // Call it every time at the end of rendering // time.Sleep(time.Second) } } func writePoints(c client.Client, dt time.Time, data EnergyMeter) { bp, err := client.NewBatchPoints(client.BatchPointsConfig{ Database: tsdb, Precision: "s", }) if err != nil { log.Error(err) } fields := map[string]interface{}{ "Freq": data.Freq, "Uavg": data.Uavg, "Iavg": data.Iavg, "Psum": data.Psum, "Qsum": data.Qsum, "PFavg": data.PFavg, "EA": data.EA, "ER": data.ER, "CO2": data.CO2, } tags := map[string]string{ "Model": data.MODEL, "SN": data.SN, } pt, err := client.NewPoint( "energylog", tags, fields, dt, ) if err != nil { log.Fatal(err) } bp.AddPoint(pt) if err := c.Write(bp); err != nil { log.Fatal(err) } }
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package cmd import ( "fmt" "strings" "github.com/spf13/cobra" "github.com/aws-controllers-k8s/dev-tools/pkg/config" "github.com/aws-controllers-k8s/dev-tools/pkg/repository" "github.com/aws-controllers-k8s/dev-tools/pkg/util" ) var ( optAddRepoType string ) func init() { addRepositoryCmd.PersistentFlags().StringVarP(&optAddRepoType, "type", "t", "controller", "repository type") } var addRepositoryCmd = &cobra.Command{ Use: "repository <service> ...", Aliases: []string{"repo", "repos", "repository", "repositories"}, RunE: addRepository, Args: cobra.MinimumNArgs(1), } func addRepository(cmd *cobra.Command, args []string) error { cfg, err := config.Load(ackConfigPath) if err != nil { return err } repoManager, err := repository.NewManager(cfg) if err != nil { return err } for _, service := range args { service = strings.ToLower(service) // Check it doesn't already exist in the configuration if util.InStrings(service, cfg.Repositories.Services) { fmt.Printf("repository for service %s has already been added\n", service) continue } _, err := repoManager.AddRepository(service, repository.GetRepositoryTypeFromString(optAddRepoType)) if err != nil { return err } ctx := cmd.Context() if err := repoManager.EnsureRepository(ctx, service); err != nil { return err } cfg.Repositories.Services = append(cfg.Repositories.Services, service) if err := config.Save(cfg, ackConfigPath); err != nil { return err } } return nil }
package upload import ( "github.com/xeha-gmbh/homelab/proxmox/upload/api" "github.com/xeha-gmbh/homelab/shared" "github.com/spf13/cobra" flag "github.com/spf13/pflag" "os" "os/exec" ) var ( output shared.MessagePrinter ) func NewProxmoxUploadCommand() *cobra.Command { payload := &ProxmoxUploadRequest{} cmd := &cobra.Command{ Use: "upload", Short: "upload file to Proxmox storage device", PreRunE: func(cmd *cobra.Command, args []string) error { cmd.SetOutput(os.Stdout) if err := cmd.ParseFlags(args); err != nil { return err } output = shared.WithConfig(cmd, &payload.ExtraArgs) if err := checkCurlIsOnPath(); err != nil { output.Fatal(shared.ErrDependency.ExitCode, "Dependency unmet. Cause: {{index .cause}}", map[string]interface{}{ "event": "pre_failed", "cause": err.Error(), }) return shared.ErrDependency } return nil }, RunE: func(cmd *cobra.Command, args []string) error { var err error err = payload.Upload() if err != nil { output.Fatal(shared.ErrOp.ExitCode, "Upload file {{index .file}} failed. Cause: {{index .cause}}", map[string]interface{}{ "event": "upload_failed", "file": payload.File, "cause": err.Error(), }) return shared.ErrOp } output.Info("Upload file {{index .file}} is successful.", map[string]interface{}{ "event": "upload_success", "file": payload.File, }) return nil }, } payload.InjectExtraArgs(cmd) addProxmoxLoginCommandFlags(cmd.PersistentFlags(), payload) markProxmoxUploadCommandRequiredFlags(cmd) return cmd } // Mark required upload command flags func markProxmoxUploadCommandRequiredFlags(cmd *cobra.Command) { for _, f := range []string{ api.FlagNode, api.FlagFile, } { cmd.MarkPersistentFlagRequired(f) cmd.MarkFlagRequired(f) } } // Bind proxmox upload command flags to ProxmoxUploadRequest structure. func addProxmoxLoginCommandFlags(flagSet *flag.FlagSet, payload *ProxmoxUploadRequest) { flagSet.StringVar( &payload.Node, api.FlagNode, "", "The Proxmox cluster node that the upload operation targets. Required.", ) flagSet.StringVar( &payload.Storage, api.FlagStorage, "", "The storage device label to upload file to. "+ "If not set, command will query the node specified by --node to match the first storage device that accepts the file format --format.", ) flagSet.StringVar( &payload.File, api.FlagFile, "", "The absolute path to the file to upload. Required.", ) flagSet.StringVar( &payload.Format, api.FlagFormat, api.DefaultFormat, "The format of the file specified.", ) } func checkCurlIsOnPath() error { if _, err := exec.LookPath("curl"); err != nil { return err } return nil }
/* * @lc app=leetcode.cn id=46 lang=golang * * [46] 全排列 */ package main import ( "fmt" ) // @lc code=start func backtracking(nums, res []int, ans *[][]int, used *[]bool) { numsLen := len(nums) if len(res) == numsLen { *ans = append(*ans, append([]int{}, res...)) } for i := 0; i < numsLen; i++ { if !(*used)[i] { (*used)[i] = true res = append(res, nums[i]) backtracking(nums, res, ans, used) res = res[:len(res)-1] (*used)[i] = false } } } func permute(nums []int) [][]int { ans := make([][]int, 0) used := make([]bool, len(nums)) backtracking(nums, []int{}, &ans, &used) return ans } // @lc code=end func main() { fmt.Println(permute([]int{1, 2, 3})) }
package envoy import ( core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/golang/protobuf/ptypes/wrappers" ) func headersToAdd(headers map[string]string) []*core.HeaderValueOption { var res []*core.HeaderValueOption for headerName, headerVal := range headers { header := &core.HeaderValueOption{ Header: &core.HeaderValue{ Key: headerName, Value: headerVal, }, Append: &wrappers.BoolValue{ // In Knative Serving, headers are set instead of appended. // Ref: https://github.com/knative/serving/pull/6366 Value: false, }, } res = append(res, header) } return res }
package config // package provides global config parameters in case I ever want to change them const TileSize = 40
package caaa import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document00100103 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.001.001.03 Document"` Message *AcceptorAuthorisationRequestV03 `xml:"AccptrAuthstnReq"` } func (d *Document00100103) AddMessage() *AcceptorAuthorisationRequestV03 { d.Message = new(AcceptorAuthorisationRequestV03) return d.Message } // The AcceptorAuthorisationRequest message is sent by an acceptor (or its agent) to the acquirer (or its agent) , to check with the issuer (or its agent) that the account associated to the card has the resources to fund the payment. This checking will include validation of the card data and any additional transaction data provided. type AcceptorAuthorisationRequestV03 struct { // Authorisation request message management information. Header *iso20022.Header7 `xml:"Hdr"` // Information related to the authorisation request. AuthorisationRequest *iso20022.AcceptorAuthorisationRequest3 `xml:"AuthstnReq"` // Trailer of the message containing a MAC. SecurityTrailer *iso20022.ContentInformationType8 `xml:"SctyTrlr"` } func (a *AcceptorAuthorisationRequestV03) AddHeader() *iso20022.Header7 { a.Header = new(iso20022.Header7) return a.Header } func (a *AcceptorAuthorisationRequestV03) AddAuthorisationRequest() *iso20022.AcceptorAuthorisationRequest3 { a.AuthorisationRequest = new(iso20022.AcceptorAuthorisationRequest3) return a.AuthorisationRequest } func (a *AcceptorAuthorisationRequestV03) AddSecurityTrailer() *iso20022.ContentInformationType8 { a.SecurityTrailer = new(iso20022.ContentInformationType8) return a.SecurityTrailer }
// Copyright (c) 2019 Zededa, Inc. // SPDX-License-Identifier: Apache-2.0 package types import ( "github.com/satori/go.uuid" "github.com/stretchr/testify/assert" "testing" "time" ) var underlayUUID = uuid.UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} var overlayUUID = uuid.UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0xd4, 0xd4, 0xd4, 0xd4, 0x30, 0xc8} var appNetworkConfig = AppNetworkConfig{ OverlayNetworkList: []OverlayNetworkConfig{ {Network: overlayUUID}, }, UnderlayNetworkList: []UnderlayNetworkConfig{ {Network: underlayUUID}, }, } func TestIsIPv6(t *testing.T) { testMatrix := map[string]struct { config NetworkInstanceConfig expectedValue bool }{ "AddressTypeIPV6": { config: NetworkInstanceConfig{IpType: AddressTypeIPV6}, expectedValue: true, }, "AddressTypeCryptoIPV6": { config: NetworkInstanceConfig{IpType: AddressTypeCryptoIPV6}, expectedValue: true, }, "AddressTypeIPV4": { config: NetworkInstanceConfig{IpType: AddressTypeIPV4}, expectedValue: false, }, "AddressTypeCryptoIPV4": { config: NetworkInstanceConfig{IpType: AddressTypeCryptoIPV4}, expectedValue: false, }, "AddressTypeNone": { config: NetworkInstanceConfig{IpType: AddressTypeNone}, expectedValue: false, }, "AddressTypeLast": { config: NetworkInstanceConfig{IpType: AddressTypeLast}, expectedValue: false, }, } for testname, test := range testMatrix { t.Logf("Running test case %s", testname) isIPv6 := test.config.IsIPv6() assert.IsType(t, test.expectedValue, isIPv6) } } func TestGetOverlayConfig(t *testing.T) { testMatrix := map[string]struct { network uuid.UUID config AppNetworkConfig }{ "Overlay UUID": { network: overlayUUID, config: appNetworkConfig, }, } for testname, test := range testMatrix { t.Logf("Running test case %s", testname) config := test.config.getOverlayConfig(test.network) assert.IsType(t, test.config.OverlayNetworkList[0], *config) } } func TestGetUnderlayConfig(t *testing.T) { testMatrix := map[string]struct { network uuid.UUID config AppNetworkConfig }{ "Underlay UUID": { network: underlayUUID, config: appNetworkConfig, }, } for testname, test := range testMatrix { t.Logf("Running test case %s", testname) config := test.config.getUnderlayConfig(test.network) assert.IsType(t, test.config.UnderlayNetworkList[0], *config) } } func TestIsNetworkUsed(t *testing.T) { var otherUUID = uuid.UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0xb8, 0xd4, 0x30, 0xc8} testMatrix := map[string]struct { network uuid.UUID expectedValue bool config AppNetworkConfig }{ "Overlay UUID": { network: overlayUUID, expectedValue: true, config: appNetworkConfig, }, "Underlay UUID": { network: underlayUUID, expectedValue: true, config: appNetworkConfig, }, "Other UUID": { network: otherUUID, expectedValue: false, config: appNetworkConfig, }, } for testname, test := range testMatrix { t.Logf("Running test case %s", testname) networkUsed := test.config.IsNetworkUsed(test.network) assert.Equal(t, test.expectedValue, networkUsed) } } // Make sure IsDPCUsable passes var usablePort = NetworkPortConfig{ IfName: "eth0", Name: "eth0", IsMgmt: true, DhcpConfig: DhcpConfig{Dhcp: DT_CLIENT}, } var usablePorts = []NetworkPortConfig{usablePort} var unusablePort1 = NetworkPortConfig{ IfName: "eth0", Name: "eth0", IsMgmt: false, DhcpConfig: DhcpConfig{Dhcp: DT_CLIENT}, } var unusablePorts1 = []NetworkPortConfig{unusablePort1} var unusablePort2 = NetworkPortConfig{ IfName: "eth0", Name: "eth0", IsMgmt: true, DhcpConfig: DhcpConfig{Dhcp: DT_NONE}, } var unusablePorts2 = []NetworkPortConfig{unusablePort2} var mixedPorts = []NetworkPortConfig{usablePort, unusablePort1, unusablePort2} func TestIsDPCUsable(t *testing.T) { n := time.Now() testMatrix := map[string]struct { devicePortConfig DevicePortConfig expectedValue bool }{ "Management and DT_CLIENT": { devicePortConfig: DevicePortConfig{ LastFailed: time.Time{}, LastSucceeded: n, Ports: usablePorts, }, expectedValue: true, }, "Mixture of usable and unusable ports": { devicePortConfig: DevicePortConfig{ LastFailed: time.Time{}, LastSucceeded: n, Ports: mixedPorts, }, expectedValue: true, }, "Not management and DT_CLIENT": { devicePortConfig: DevicePortConfig{ LastFailed: time.Time{}, LastSucceeded: n, Ports: unusablePorts1, }, expectedValue: false, }, "Management and DT_NONE": { devicePortConfig: DevicePortConfig{ LastFailed: time.Time{}, LastSucceeded: n, Ports: unusablePorts2, }, expectedValue: false, }, } for testname, test := range testMatrix { t.Logf("Running test case %s", testname) value := test.devicePortConfig.IsDPCUsable() assert.Equal(t, test.expectedValue, value) } } func TestIsDPCTestable(t *testing.T) { n := time.Now() testMatrix := map[string]struct { devicePortConfig DevicePortConfig expectedValue bool }{ "Difference is exactly 60 seconds": { devicePortConfig: DevicePortConfig{ LastFailed: n.Add(time.Second * 60), LastSucceeded: n, Ports: usablePorts, }, expectedValue: false, }, "Difference is 61 seconds": { devicePortConfig: DevicePortConfig{ LastFailed: n.Add(time.Second * 61), LastSucceeded: n, Ports: usablePorts, }, expectedValue: false, }, "Difference is 59 seconds": { devicePortConfig: DevicePortConfig{ LastFailed: n.Add(time.Second * 59), LastSucceeded: n, Ports: usablePorts, }, expectedValue: false, }, "LastFailed is 0": { devicePortConfig: DevicePortConfig{ LastFailed: time.Time{}, LastSucceeded: n, Ports: usablePorts, }, expectedValue: true, }, "Last Succeded is after Last Failed": { devicePortConfig: DevicePortConfig{ LastFailed: n, LastSucceeded: n.Add(time.Second * 61), Ports: usablePorts, }, expectedValue: true, }, } for testname, test := range testMatrix { t.Logf("Running test case %s", testname) value := test.devicePortConfig.IsDPCTestable() assert.Equal(t, test.expectedValue, value) } } func TestIsDPCUntested(t *testing.T) { n := time.Now() testMatrix := map[string]struct { devicePortConfig DevicePortConfig expectedValue bool }{ "Last failed and Last Succesed are 0": { devicePortConfig: DevicePortConfig{ LastFailed: time.Time{}, LastSucceeded: time.Time{}, Ports: usablePorts, }, expectedValue: true, }, "Last Succesed is not 0": { devicePortConfig: DevicePortConfig{ LastFailed: time.Time{}, LastSucceeded: n, Ports: usablePorts, }, expectedValue: false, }, "Last failed is not 0": { devicePortConfig: DevicePortConfig{ LastFailed: time.Time{}, LastSucceeded: n, Ports: usablePorts, }, expectedValue: false, }, } for testname, test := range testMatrix { t.Logf("Running test case %s", testname) value := test.devicePortConfig.IsDPCUntested() assert.Equal(t, test.expectedValue, value) } } func TestWasDPCWorking(t *testing.T) { n := time.Now() testMatrix := map[string]struct { devicePortConfig DevicePortConfig expectedValue bool }{ "LastSucceeded is 0": { devicePortConfig: DevicePortConfig{ LastFailed: n, LastSucceeded: time.Time{}, Ports: usablePorts, }, expectedValue: false, }, "Last Succeded is after Last Failed": { devicePortConfig: DevicePortConfig{ LastFailed: n, LastSucceeded: n.Add(time.Second * 60), Ports: usablePorts, }, expectedValue: true, }, "Last Failed is after Last Succeeded": { devicePortConfig: DevicePortConfig{ LastFailed: n.Add(time.Second * 60), LastSucceeded: n, Ports: usablePorts, }, expectedValue: false, }, } for testname, test := range testMatrix { t.Logf("Running test case %s", testname) value := test.devicePortConfig.WasDPCWorking() assert.Equal(t, test.expectedValue, value) } } func TestGetPortByName(t *testing.T) { testMatrix := map[string]struct { deviceNetworkStatus DeviceNetworkStatus port string expectedValue NetworkPortStatus }{ "Test name is port one": { deviceNetworkStatus: DeviceNetworkStatus{ Ports: []NetworkPortStatus{ {Name: "port one"}, }, }, port: "port one", expectedValue: NetworkPortStatus{ Name: "port one", }, }, } for testname, test := range testMatrix { t.Logf("Running test case %s", testname) value := test.deviceNetworkStatus.GetPortByName(test.port) assert.Equal(t, test.expectedValue, *value) } } func TestGetPortByIfName(t *testing.T) { testMatrix := map[string]struct { deviceNetworkStatus DeviceNetworkStatus port string expectedValue NetworkPortStatus }{ "Test IfnName is port one": { deviceNetworkStatus: DeviceNetworkStatus{ Ports: []NetworkPortStatus{ {IfName: "port one"}, }, }, port: "port one", expectedValue: NetworkPortStatus{ IfName: "port one", }, }, } for testname, test := range testMatrix { t.Logf("Running test case %s", testname) value := test.deviceNetworkStatus.GetPortByIfName(test.port) assert.Equal(t, test.expectedValue, *value) } }
// DRUNKWATER TEMPLATE(add description and prototypes) // Question Title and Description on leetcode.com // Function Declaration and Function Prototypes on leetcode.com //105. Construct Binary Tree from Preorder and Inorder Traversal //Given preorder and inorder traversal of a tree, construct the binary tree. //Note: //You may assume that duplicates do not exist in the tree. //For example, given //preorder = [3,9,20,15,7] //inorder = [9,3,15,20,7] //Return the following binary tree: // 3 // / \ // 9 20 // / \ // 15 7 ///** // * Definition for a binary tree node. // * type TreeNode struct { // * Val int // * Left *TreeNode // * Right *TreeNode // * } // */ //func buildTree(preorder []int, inorder []int) *TreeNode { //} // Time Is Money
package controllers import ( "encoding/json" "fmt" "net/http" "../models" "github.com/gorilla/mux" ) func CreateIngredient(w http.ResponseWriter, r *http.Request) { ingredient := &models.Ingredient{} json.NewDecoder(r.Body).Decode(ingredient) createdIngredient := db.Create(ingredient) var errMessage = createdIngredient.Error var meal models.Meal db.First(&meal, ingredient.MealID) // currentMealCalories := meal.Calories meal.Calories += ingredient.Calories db.Save(&meal) // update meal calories if createdIngredient.Error != nil { fmt.Println(errMessage) } json.NewEncoder(w).Encode(createdIngredient) } func UpdateIngredient(w http.ResponseWriter, r *http.Request) { ingredient := &models.Ingredient{} params := mux.Vars(r) var id = params["id"] db.First(&ingredient, id) json.NewDecoder(r.Body).Decode(ingredient) db.Save(&ingredient) json.NewEncoder(w).Encode(&ingredient) } func DeleteIngredient(w http.ResponseWriter, r *http.Request) { params := mux.Vars(r) var id = params["id"] var ingredient models.Ingredient db.First(&ingredient, id) var meal models.Meal db.First(&meal, ingredient.MealID) meal.Calories -= ingredient.Calories db.Save(&meal) db.Delete(&ingredient) json.NewEncoder(w).Encode("Ingredient deleted") } func GetIngredient(w http.ResponseWriter, r *http.Request) { params := mux.Vars(r) var id = params["id"] var ingredient models.Ingredient db.First(&ingredient, id) json.NewEncoder(w).Encode(&ingredient) }
package main import "fmt" func main0101() { //数组 是指一系列同一类型数据的集合 //数组定义 //var 数组名 [长度]类型 var a [10]int //fmt.Println(len(a)) fmt.Println(a) //注意以下方式 //报错err //var arr int = 10 //var a [n]int //数组赋值 //a[0] = 1 //a[1] = 2 //a[2] = 3 //a[3] = 4 //fmt.Println(a) //fmt.Println(a[0]) //fmt.Println(a[1]) //下标超范围 数组越界 //fmt.Println(a[10]) //for循环 数组赋值 输出 for i:=0;i<10;i++{ a[i] = i+1 } fmt.Println(a) for i:=0;i<10 ;i++ { fmt.Println(a[i]) } //匿名变量 for _,v := range a{ fmt.Println(v) } //数组存储的元素类型可以是其他类型 //定义完成 直接输出 //var a [10]float64 //直接输出 默认为0 //var a [10]string //直接输出 默认为空字符 //var a [10]bool //直接输出 默认为false } func main0102() { //数组初始化 //1.全部初始化 var a [5]int = [5]int{1,2,3,4,5} fmt.Println(a) //自动推导 b := [5]int{1,2,3,4,5} fmt.Println(b) //部分初始化 //没有初始化的部分 默认为0 c := [5]int{1,2,3} fmt.Println(c) //指定某个元素初始化 d := [5]int{2:10,4:20} fmt.Println(d) //... 通过初始化确定长度 f:=[...]int{1,2,3} fmt.Println(len(f)) } func main() { //常见问题 //数组长度 常量 var arr [5]int = [5]int{1,2,3,4,5} //数组下标 数组下标越界 //arr[6] = 123 //err //arr[-1] = 123 //err //数组名 //arr = 123 //err //两个数组类型相同 个数相同 可以赋值 //arr1 := arr // //fmt.Println(arr) //fmt.Println(arr1) // //fmt.Printf("%T\n",arr) //数组名表示 整个数组 数组名对应地址 就是数组第一个元素的地址 fmt.Printf("%p\n",&arr) fmt.Printf("%p\n",&arr[0]) fmt.Printf("%p\n",&arr[1]) fmt.Printf("%p\n",&arr[2]) }
/* Copyright 2016 Stanislav Liberman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package flyweight import ( "testing" "github.com/lirm/aeron-go/aeron/atomic" ) type StringFly struct { FWBase s StringField } func (m *StringFly) Wrap(buf *atomic.Buffer, offset int) Flyweight { pos := offset pos += m.s.Wrap(buf, pos, m, false) m.SetSize(pos - offset) return m } func TestStringFlyweight(t *testing.T) { str := "Hello worlds!" buf := atomic.MakeBuffer(make([]byte, 128), 128) // TODO Test aligned reads var fw StringFly fw.Wrap(buf, 0) fw.s.Set(str) t.Logf("%v", fw) if 4+len(str) != fw.Size() { t.Error("Expected length", 4+len(str), "have", fw.Size()) } if str != fw.s.Get() { t.Error("Got", fw.s.Get(), "instead of", str) } } type PaddedFly struct { FWBase l1 Int64Field i1 Int32Field pad Padding i2 Int32Field pad2 Padding } func (m *PaddedFly) Wrap(buf *atomic.Buffer, offset int) Flyweight { pos := offset pos += m.l1.Wrap(buf, pos) pos += m.i1.Wrap(buf, pos) pos += m.pad.Wrap(buf, pos, 64, 64) pos += m.i2.Wrap(buf, pos) pos += m.pad2.Wrap(buf, pos, 128, 64) m.SetSize(pos - offset) return m } func TestPadding_Wrap(t *testing.T) { buf := atomic.MakeBuffer(make([]byte, 256), 256) var fw PaddedFly fw.Wrap(buf, 0) if fw.Size() != 192 { t.Logf("fw size: %d", fw.size) t.Fail() } }
package ydySqlParser import ( "fmt" "github.com/youtube/vitess/go/vt/sqlparser" "strings" ) func BuildNewSql(sql string) string { stmt, err := sqlparser.Parse(sql) if err != nil { fmt.Println(err) return sql } switch v := stmt.(type) { case *sqlparser.Select: new_subquery := &sqlparser.Subquery{Select: v} new_subquery = Subquery(new_subquery) // buf1 := sqlparser.NewTrackedBuffer(nil) new_subquery.Select.Format(buf1) return buf1.String() } return "" } func Subquery(v *sqlparser.Subquery) *sqlparser.Subquery { //range select field for i, vv := range v.Select.(*sqlparser.Select).SelectExprs { switch vvv := vv.(type) { case *sqlparser.StarExpr: //如果是 * 不再支持 v.Select.(*sqlparser.Select).SelectExprs[i] = &sqlparser.AliasedExpr{Expr: sqlparser.NewStrVal([]byte("invalid field *")), As: sqlparser.NewColIdent("")} case *sqlparser.AliasedExpr: // switch e := vvv.Expr.(type) { case *sqlparser.Subquery: e = Subquery(e) case *sqlparser.FuncExpr: //字段使用方法 //如果不是insert 就要过滤下 if strings.ToLower(e.Name.String()) != "insert" { // e = FuncExpr(e) } e = FuncExpr(e) case *sqlparser.ColName: // fmt.Printf("--val %#v \n", e.Name.String()) //关键字段不能使用 As if keywordsFilter(e.Name.String()) && len(vvv.As.String()) > 0 { // // // v.Select.(*sqlparser.Select).SelectExprs[i] = &sqlparser.AliasedExpr{Expr: sqlparser.NewStrVal([]byte("invalid field *")), As: sqlparser.NewColIdent("")} // vvv.As = sqlparser.NewColIdent("") }else{ e = ColName(e) } default: fmt.Printf("--val %#v \n", e) } } } //range from sql for _, vv := range v.Select.(*sqlparser.Select).From { switch vvv := vv.(type) { case *sqlparser.AliasedTableExpr: // fmt.Printf("vvv %#v", vvv) switch e := vvv.Expr.(type) { case *sqlparser.Subquery: e = Subquery(e) // // case *sqlparser.FuncExpr: // // e = FuncExpr(e) // // case *sqlparser.ColName: // // e = ColName(e) // default: // fmt.Printf("--val %#v \n", e) } // buf2 := sqlparser.NewTrackedBuffer(nil) // vvv.Expr.Format(buf2) // // source_table := buf2.String() // // target_table, _ := sqlparser.Parse("select * from test") // // new_select := target_table.(*sqlparser.Select) // // new_subquery := &sqlparser.Subquery{Select: new_select} // // vvv.Expr = new_subquery } } return v } func FuncExpr(e *sqlparser.FuncExpr) *sqlparser.FuncExpr { // fun_name := strings.ToLower(e.Name.String()) // // //禁止字段使用以下函数 // if t := func(str string) bool { // funlist := []string{"left", "right", "elt", "replace", "insert", "substring", "CONCAT", "BIN", "oct", "hex", "ASCII"} // for _, f := range funlist { // if f == fun_name { // return true // } // } // return false // }(fun_name); t { // // } for i, ee := range e.Exprs { // fmt.Printf("ee : %#v \n", ee) switch eee := ee.(type) { case *sqlparser.AliasedExpr: switch eeee := eee.Expr.(type) { case *sqlparser.Subquery: eeee = Subquery(eeee) case *sqlparser.FuncExpr: eeee = FuncExpr(eeee) case *sqlparser.ColName: eeee = ColName(eeee) // fmt.Printf("ee, %s, %s \n", e.Name.CompliantName(), eeee.Name.String()) //禁止字段使用 函数 if keywordsFilter(eeee.Name.String()) { e.Exprs[i] = &sqlparser.AliasedExpr{Expr: sqlparser.NewStrVal([]byte(eeee.Name.String() + " field not use func")), As: sqlparser.NewColIdent("")} } default: } // case *sqlparser.JoinTableExpr: } } return e } func ColName(c *sqlparser.ColName) *sqlparser.ColName { // fmt.Printf("===> %#v ,\n", c.Name) // if c.Name.String() == "abc" { // // _colident := sqlparser.NewColIdent("md5(abcd)") // // c.Name = _colident // } return c } func keywordsFilter(str string) bool { fieldList := []string{ "mobile", "u_mobile", "b_mobile", "bu_mobile", "link_mobile", "link2_mobile", "emergency_mobile", "link2_mate_mobile", "customer_verification", // "id_card", "b_id_card", "link_id_card", "link2_id_card", "link2_mate_id_card", // "bank_card_one", "bank_card_two", "bank_card", "b_bank_card", } for _, _f := range fieldList { if str == _f { return true } } return false }
// gofun.go package main // int a; // typedef void (*cb)(char* data); // extern void callCb(cb callback, char* extra, char* arg); import "C" // C是一个虚包, 上面的注释是c代码, 可以在golang中加 `C.` 前缀访问, 具体参考上面给出的文档 import "time" //export hello func hello(arg *C.char) *C.char { //name := gjson.Get(arg, "name") //return "hello" + name.String() return C.CString("hello peter:::" + C.GoString(arg)) } // 通过export注解,把这个函数暴露到动态链接库里面 //export helloP func helloP(arg *C.char, cb C.cb, extra *C.char) *C.char { C.callCb(cb, extra, C.CString("one")) time.Sleep(time.Second) C.callCb(cb, extra, C.CString("two")) return C.CString("hello peter:::" + C.GoString(arg)) } func main() { println("go main func") }
package middlewares import ( "regexp" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/authelia/authelia/v4/internal/configuration/schema" ) func TestNewPasswordPolicyProvider(t *testing.T) { testCases := []struct { desc string have schema.PasswordPolicy expected PasswordPolicyProvider }{ { desc: "ShouldReturnUnconfiguredProvider", have: schema.PasswordPolicy{}, expected: &StandardPasswordPolicyProvider{}, }, { desc: "ShouldReturnProviderWhenZxcvbn", have: schema.PasswordPolicy{ZXCVBN: schema.PasswordPolicyZXCVBN{Enabled: true, MinScore: 10}}, expected: &ZXCVBNPasswordPolicyProvider{minScore: 10}, }, { desc: "ShouldReturnConfiguredProviderWithMin", have: schema.PasswordPolicy{Standard: schema.PasswordPolicyStandard{Enabled: true, MinLength: 8}}, expected: &StandardPasswordPolicyProvider{min: 8}, }, { desc: "ShouldReturnConfiguredProviderWitHMinMax", have: schema.PasswordPolicy{Standard: schema.PasswordPolicyStandard{Enabled: true, MinLength: 8, MaxLength: 100}}, expected: &StandardPasswordPolicyProvider{min: 8, max: 100}, }, { desc: "ShouldReturnConfiguredProviderWithMinLowercase", have: schema.PasswordPolicy{Standard: schema.PasswordPolicyStandard{Enabled: true, MinLength: 8, RequireLowercase: true}}, expected: &StandardPasswordPolicyProvider{min: 8, patterns: []regexp.Regexp{*regexp.MustCompile(`[a-z]+`)}}, }, { desc: "ShouldReturnConfiguredProviderWithMinLowercaseUppercase", have: schema.PasswordPolicy{Standard: schema.PasswordPolicyStandard{Enabled: true, MinLength: 8, RequireLowercase: true, RequireUppercase: true}}, expected: &StandardPasswordPolicyProvider{min: 8, patterns: []regexp.Regexp{*regexp.MustCompile(`[a-z]+`), *regexp.MustCompile(`[A-Z]+`)}}, }, { desc: "ShouldReturnConfiguredProviderWithMinLowercaseUppercaseNumber", have: schema.PasswordPolicy{Standard: schema.PasswordPolicyStandard{Enabled: true, MinLength: 8, RequireLowercase: true, RequireUppercase: true, RequireNumber: true}}, expected: &StandardPasswordPolicyProvider{min: 8, patterns: []regexp.Regexp{*regexp.MustCompile(`[a-z]+`), *regexp.MustCompile(`[A-Z]+`), *regexp.MustCompile(`[0-9]+`)}}, }, { desc: "ShouldReturnConfiguredProviderWithMinLowercaseUppercaseSpecial", have: schema.PasswordPolicy{Standard: schema.PasswordPolicyStandard{Enabled: true, MinLength: 8, RequireLowercase: true, RequireUppercase: true, RequireSpecial: true}}, expected: &StandardPasswordPolicyProvider{min: 8, patterns: []regexp.Regexp{*regexp.MustCompile(`[a-z]+`), *regexp.MustCompile(`[A-Z]+`), *regexp.MustCompile(`[^a-zA-Z0-9]+`)}}, }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { actual := NewPasswordPolicyProvider(tc.have) assert.Equal(t, tc.expected, actual) }) } } func TestPasswordPolicyProvider_Validate(t *testing.T) { testCases := []struct { desc string config schema.PasswordPolicy have []string expected []error }{ { desc: "ShouldValidateAllPasswords", config: schema.PasswordPolicy{}, have: []string{"a", "1", "a really str0ng pass12nm3kjl12word@@#4"}, expected: []error{nil, nil, nil}, }, { desc: "ShouldValidatePasswordMinLength", config: schema.PasswordPolicy{Standard: schema.PasswordPolicyStandard{Enabled: true, MinLength: 8}}, have: []string{"a", "b123", "1111111", "aaaaaaaa", "1o23nm1kio2n3k12jn"}, expected: []error{errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, nil, nil}, }, { desc: "ShouldValidatePasswordMaxLength", config: schema.PasswordPolicy{Standard: schema.PasswordPolicyStandard{Enabled: true, MaxLength: 30}}, have: []string{ "a1234567894654wkjnkjasnskjandkjansdkjnas", "012345678901234567890123456789a", "0123456789012345678901234567890123456789", "012345678901234567890123456789", "1o23nm1kio2n3k12jn", }, expected: []error{errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, nil, nil}, }, { desc: "ShouldValidatePasswordAdvancedLowerUpperMin8", config: schema.PasswordPolicy{Standard: schema.PasswordPolicyStandard{Enabled: true, MinLength: 8, RequireLowercase: true, RequireUppercase: true}}, have: []string{"a", "b123", "1111111", "aaaaaaaa", "1o23nm1kio2n3k12jn", "ANJKJQ@#NEK!@#NJK!@#", "qjik2nkjAkjlmn123"}, expected: []error{errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, nil}, }, { desc: "ShouldValidatePasswordAdvancedAllMax100Min8", config: schema.PasswordPolicy{Standard: schema.PasswordPolicyStandard{Enabled: true, MinLength: 8, MaxLength: 100, RequireLowercase: true, RequireUppercase: true, RequireNumber: true, RequireSpecial: true}}, have: []string{ "a", "b123", "1111111", "aaaaaaaa", "1o23nm1kio2n3k12jn", "ANJKJQ@#NEK!@#NJK!@#", "qjik2nkjAkjlmn123", "qjik2n@jAkjlmn123", "qjik2n@jAkjlmn123qjik2n@jAkjlmn123qjik2n@jAkjlmn123qjik2n@jAkjlmn123qjik2n@jAkjlmn123qjik2n@jAkjlmn123", }, expected: []error{ errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, errPasswordPolicyNoMet, nil, errPasswordPolicyNoMet, }, }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { require.Equal(t, len(tc.have), len(tc.expected)) for i := 0; i < len(tc.have); i++ { provider := NewPasswordPolicyProvider(tc.config) t.Run(tc.have[i], func(t *testing.T) { assert.Equal(t, tc.expected[i], provider.Check(tc.have[i])) }) } }) } }
package gobot func doTest() { site := Site{BaseUrl: "http://golang.org"} urls := []string{site.BaseUrl} site.BreadthFirst(Crawl, urls) }
package sheets import ( "bufio" "os" "fmt" "io" "strings" retry "github.com/avast/retry-go" "github.com/pkg/errors" "google.golang.org/api/googleapi" sheets "google.golang.org/api/sheets/v4" ) type Spreadsheet struct { Client *Client *sheets.Spreadsheet } type Sheet struct { *sheets.Sheet Spreadsheet *Spreadsheet Client *Client } func (s *Spreadsheet) Id() string { return s.SpreadsheetId } func (s *Spreadsheet) Url() string { return s.SpreadsheetUrl } func (s *Spreadsheet) GetSheet(title string) *Sheet { query := strings.ToLower(title) for _, sheet := range s.Sheets { lowerTitle := strings.ToLower(sheet.Properties.Title) if lowerTitle == query { return &Sheet{sheet, s, s.Client} } } return nil } func (s *Spreadsheet) DeleteSheet(title string) error { query := strings.ToLower(title) for _, sheet := range s.Sheets { lowerTitle := strings.ToLower(sheet.Properties.Title) if lowerTitle == query { _, err := s.DoBatch(&sheets.Request{ DeleteSheet: &sheets.DeleteSheetRequest{ SheetId: sheet.Properties.SheetId, }, }) return err } } return errors.New("sheet does not exist") } func (s *Spreadsheet) DuplicateSheet(title, newTitle string) (*Sheet, error) { origin := s.GetSheet(title) if origin == nil { return nil, errors.New("origin sheet does not exist") } alreadyExists := s.GetSheet(newTitle) if alreadyExists != nil { return nil, errors.New("destination sheet already exist") } var maxIndex int64 for _, sheet := range s.Sheets { if sheet.Properties.Index > maxIndex { maxIndex = sheet.Properties.Index } } _, err := s.DoBatch(&sheets.Request{ DuplicateSheet: &sheets.DuplicateSheetRequest{ InsertSheetIndex: maxIndex + 1, NewSheetName: newTitle, SourceSheetId: origin.Properties.SheetId, }, }) if err != nil { if !isFakeDuplicateSheetError(err) { return nil, errors.Wrap(err, "couldn't duplicate sheet") } // Need to make sure that we've got the latest state of the sheet currentSheet, err := s.Client.GetSpreadsheet(s.Id()) if err != nil { return nil, errors.Wrap(err, "error refreshing spreadsheet after fake duplicate error") } s.Spreadsheet = currentSheet.Spreadsheet } duplicate := s.GetSheet(newTitle) if duplicate == nil { return nil, errors.New("duplicate sheet does not exist") } return duplicate, nil } func isFakeDuplicateSheetError(err error) bool { rerr, ok := err.(retry.Error) if !ok { return false } var ( firstErrorIsNotDuplicate = true hasSubsequentDuplicate = false ) for i, e := range rerr.WrappedErrors() { if gerr, ok := e.(*googleapi.Error); ok { if gerr.Code == 400 && strings.Contains(gerr.Message, "duplicateSheet") { if i == 0 { firstErrorIsNotDuplicate = false } else { hasSubsequentDuplicate = true } } } if e != nil { fmt.Fprintf(os.Stderr, "%d - %v\n", i, e) } } return firstErrorIsNotDuplicate && hasSubsequentDuplicate } func (s *Spreadsheet) AddProtectedRange(req *sheets.AddProtectedRangeRequest) error { _, err := s.DoBatch(&sheets.Request{ AddProtectedRange: req, }) if err != nil { if !isFakeProtectedRangeError(err) { return errors.Wrap(err, "couldn't add protected range to sheet") } } return nil } func isFakeProtectedRangeError(err error) bool { rerr, ok := err.(retry.Error) if !ok { return false } var ( firstErrorIsNotBadRequest = true hasSubsequentBadRequest = false ) for i, e := range rerr.WrappedErrors() { if gerr, ok := e.(*googleapi.Error); ok { if gerr.Code == 400 && strings.Contains(gerr.Message, "addProtectedRange") { if i == 0 { firstErrorIsNotBadRequest = false } else { hasSubsequentBadRequest = true } } } if e != nil { fmt.Fprintf(os.Stderr, "%d - %v\n", i, e) } } return firstErrorIsNotBadRequest && hasSubsequentBadRequest } func (s *Sheet) Title() string { return s.Properties.Title } func (s *Sheet) TopLeft() CellPos { return CellPos{0, 0} } func (s *Sheet) BottomRight() CellPos { if len(s.Data) == 0 { return s.TopLeft() } rows := 0 cols := 0 if len(s.Data[0].RowData) > 0 { rows = len(s.Data[0].RowData) - 1 if len(s.Data[0].RowData[0].Values) > 0 { cols = len(s.Data[0].RowData[0].Values) - 1 } } return CellPos{Row: rows, Col: cols} } func (s *Sheet) DataRange() SheetRange { return SheetRange{ SheetName: s.Properties.Title, Range: CellRange{ Start: s.TopLeft(), End: s.BottomRight(), }, } } func (s *Sheet) Update(data [][]string) error { return s.UpdateFromPosition(data, s.TopLeft()) } func (s *Sheet) GetContents() ([][]string, error) { if s.Data == nil { return nil, fmt.Errorf("No data fetched, only callable on sheets fetched with GetSpreadsheetWithData TODO: fetch!") } // Not sure where there would be multiple data data := s.Data[0] matrix := make([][]string, len(data.RowData)) for rowNum, rowData := range data.RowData { row := make([]string, len(rowData.Values)) for colIdx, value := range rowData.Values { if value.EffectiveValue != nil && value.EffectiveValue.StringValue != nil { row[colIdx] = *value.EffectiveValue.StringValue } else { row[colIdx] = "" } } matrix[rowNum] = row } return matrix, nil } func (s *Sheet) UpdateFromPosition(data [][]string, start CellPos) error { // Convert to interfaces to satisfy the Google API converted := make([][]interface{}, 0) for _, row := range data { converted = append(converted, strToInterface(row)) } return s.UpdateFromPositionIface(converted, start) } func (s *Sheet) UpdateFromPositionIface(data [][]interface{}, start CellPos) error { cellRange := start.RangeForData(data) sheetRange := fmt.Sprintf("%s!%s", s.Title(), cellRange.String()) // TODO: Resize sheet vRange := &sheets.ValueRange{ Range: sheetRange, Values: data, } req := s.Client.Sheets.Spreadsheets.Values.Update(s.Spreadsheet.Id(), sheetRange, vRange) req.ValueInputOption("USER_ENTERED") return googleRetry(func() error { _, err := req.Do(s.Client.options...) return err }) } type ValueUpdateRequest struct { Start CellPos Data [][]interface{} } func (s *Sheet) BatchUpdateFromPositionIface(requests ...*ValueUpdateRequest) error { if len(requests) == 0 { return nil } updates := sheets.BatchUpdateValuesRequest{ ValueInputOption: "USER_ENTERED", } for i := range requests { updates.Data = append(updates.Data, &sheets.ValueRange{ Range: fmt.Sprintf("%s!%s", s.Title(), requests[i].Start.RangeForData(requests[i].Data).String()), Values: requests[i].Data, }) } return googleRetry(func() error { _, err := s.Client.Sheets.Spreadsheets.Values.BatchUpdate(s.Spreadsheet.Id(), &updates).Do(s.Client.options...) return err }) } func (s *Sheet) Append(data [][]interface{}) error { req := s.Client.Sheets.Spreadsheets.Values.Append( s.Spreadsheet.Id(), s.DataRange().String(), &sheets.ValueRange{ Values: data, }, ) req.ValueInputOption("USER_ENTERED") return googleRetry(func() error { _, err := req.Do(s.Client.options...) return err }) } func (s *Spreadsheet) DoBatch(requests ...*sheets.Request) (*sheets.BatchUpdateSpreadsheetResponse, error) { if len(requests) == 0 { return nil, nil } batchUpdateReq := sheets.BatchUpdateSpreadsheetRequest{ Requests: requests, IncludeSpreadsheetInResponse: true, } var resp *sheets.BatchUpdateSpreadsheetResponse err := googleRetry(func() error { var rerr error resp, rerr = s.Client.Sheets.Spreadsheets.BatchUpdate(s.Id(), &batchUpdateReq).Do(s.Client.options...) return rerr }) if err != nil { return nil, err } s.Spreadsheet = resp.UpdatedSpreadsheet return resp, nil } func (s *Spreadsheet) AddSheet(title string) (*Sheet, error) { sheet := s.GetSheet(title) if sheet != nil { return sheet, nil } props := sheets.SheetProperties{Title: title} addReq := sheets.Request{AddSheet: &sheets.AddSheetRequest{Properties: &props}} _, err := s.DoBatch(&addReq) if err != nil { return nil, err } sheet = s.GetSheet(title) if sheet == nil { return nil, fmt.Errorf("Unable to get sheet after adding it: %s", title) } return sheet, nil } func (s *Spreadsheet) Share(email string) error { return s.Client.ShareFile(s.Id(), email) } func (s *Spreadsheet) ShareNotify(email string) error { return s.Client.ShareFileNotify(s.Id(), email) } func (s *Spreadsheet) ShareWithAnyone() error { return s.Client.ShareWithAnyone(s.Id()) } func TsvToArr(reader io.Reader, delimiter string) [][]string { scanner := bufio.NewScanner(reader) data := make([][]string, 0) for scanner.Scan() { pieces := strings.Split(scanner.Text(), delimiter) data = append(data, pieces) } return data } func strToInterface(strs []string) []interface{} { arr := make([]interface{}, len(strs)) for i, s := range strs { arr[i] = s } return arr }
package main import ( bit "math/bits" f "fmt" ) func main() { var a uint=31 f.Printf("bits.Len(%d)=%d\n",a,bit.Len(a)) a++ f.Printf("bits.Len(%d)=%d\n",a,bit.Len(a)) }
package sol import ( "reflect" "testing" ) func TestSol(t *testing.T) { testcases := []struct { input []int want []int }{ { input: []int{1, 2, 3, 4}, want: []int{24, 12, 8, 6}, }, { input: []int{-1, 1, 0, -3, 3}, want: []int{0, 0, 9, 0, 0}, }, { input: []int{1, 1}, want: []int{1, 1}, }, { input: []int{0, 0}, want: []int{0, 0}, }, } for _, tt := range testcases { ans := productExceptSelf2(tt.input) if !reflect.DeepEqual(tt.want, ans) { t.Fatalf("it should be: %v, but got: %v", tt.want, ans) } } }
package agency // UserAgent represents the results from a call to Scan(). type UserAgent struct { Browser struct { Type string Name string } Device struct { Type string } OS struct { Name string Version string } }
package rank import ( "time" "Barracks/data" "container/heap" ) type RankInfo struct { RankData *rankData RankHeap *rankHeap Problems []data.Problem } func newUserRow (user *data.User, problems *[]data.Problem) (u userRow) { u = userRow{ Rank: 1, StrId: (*user).StrId, ID: (*user).ID, ProblemStatuses: make([]problemStatus, len(*problems)), } return } func newRankData (contest *data.Contest, users *[]data.User, problems *[]data.Problem) (r *rankData) { r = &rankData{ CalcAt: time.Now(), ContestInfo: contest, UserRows: make([]userRow, len(*users)), UserMap: make(map[uint]uint), ProblemMap: make(map[uint]uint), ProblemCodeMap: make(map[uint]string), } for index, problem := range *problems { r.ProblemMap[problem.ID] = uint(index) r.ProblemCodeMap[problem.ID] = problem.Code } for index, user := range *users { r.UserMap[user.ID] = uint(index) r.UserRows[index] = newUserRow(&user, problems) } return } func NewRankInfo (contest *data.Contest, users *[]data.User, problems *[]data.Problem) (r *RankInfo){ r = &RankInfo{} r.RankData = newRankData(contest, users, problems) r.RankHeap = &rankHeap{} heap.Init(r.RankHeap) r.Problems = *problems return } func (r RankInfo) calcRanks() { for index, userRow := range r.RankData.UserRows { heap.Push(r.RankHeap, rankNode{Penalty: userRow.Penalty, UserIndex: uint(index), TotalScore: userRow.TotalScore}) } rankValue := uint(1) var beforeRankNode *rankNode for r.RankHeap.Len() > 0 { popRankNode := rankNode(heap.Pop(r.RankHeap).(rankNode)) if beforeRankNode != nil && (beforeRankNode.TotalScore != popRankNode.TotalScore || beforeRankNode.Penalty != popRankNode.Penalty) { rankValue++ } r.RankData.UserRows[popRankNode.UserIndex].Rank = rankValue beforeRankNode = &popRankNode } } func (r RankInfo) analyzeSubmissions(submissions []data.Submission) { contestInfo := r.RankData.ContestInfo // 각각의 제출에 대하여 for _, submission := range submissions { // userRow와 problemStatus를 구한다. if _, ok := r.RankData.UserMap[submission.UserID]; !ok { continue } userRow := &r.RankData.UserRows[r.RankData.UserMap[submission.UserID]] problemIdx := r.RankData.ProblemMap[submission.ProblemID] problemStatus := &userRow.ProblemStatuses[problemIdx] // 만약 제출이 정답 소스코드라면 if data.IsAccepted(submission.Result) { // 문제가 맞지 않은 상황이라면 if !problemStatus.Accepted { penalty := submission.CreatedAt.Sub(contestInfo.Start) + time.Duration(problemStatus.WrongCount) * 20 * time.Minute (*userRow).TotalScore += r.Problems[problemIdx].Score (*userRow).Penalty += penalty (*problemStatus).Accepted = true (*userRow).AcceptedCnt++ } } else { // 제출이 틀린 소스코드라면 if !problemStatus.Accepted { (*problemStatus).WrongCount++ } } } } func (r RankInfo) AddSubmissions (submissions []data.Submission) { r.analyzeSubmissions(submissions) r.calcRanks() } func (r RankInfo) GetUserProblemStatusSummary (userId uint) (summary []problemStatusSummary) { mappedId, ok := r.RankData.UserMap[userId] if !ok { summary = nil return } userRowRef := &r.RankData.UserRows[mappedId] summary = make([]problemStatusSummary, len(r.RankData.ProblemMap)) idx := 0 for key, val := range r.RankData.ProblemMap { summary[idx] = problemStatusSummary{ ProblemCode: r.RankData.ProblemCodeMap[key], ProblemId: key, Accepted: userRowRef.ProblemStatuses[val].Accepted, Wrong: !summary[idx].Accepted && userRowRef.ProblemStatuses[val].WrongCount > 0, Score: r.Problems[r.RankData.ProblemMap[key]].Score, } idx++ } return } func (r RankInfo) GetUserSummary(userId uint, subId uint) (summary *UserRankSummary) { mappedId, ok := r.RankData.UserMap[userId] if !ok { summary = nil return } userRowRef := &r.RankData.UserRows[mappedId] summary = &UserRankSummary{ LastSubId: subId, Penalty: userRowRef.Penalty, StrId: userRowRef.StrId, UserId: userId, AcceptedCnt: userRowRef.AcceptedCnt, Rank: userRowRef.Rank, ProblemStatus: r.GetUserProblemStatusSummary(userId), TotalScore: userRowRef.TotalScore, } return } func (r RankInfo) GetRanking () (summary []userRow) { return r.RankData.UserRows }
package light import ( "net/http" "sync" "reflect" "strings" "errors" "encoding/json" "fmt" ) var handlerMap map[string]reflect.Value = make(map[string]reflect.Value) var lock sync.Mutex type Light struct { response http.ResponseWriter request *http.Request Parm string } func (this *Light)ServeHTTP(w http.ResponseWriter, r *http.Request){ defer func() { if err := recover();err !=nil { fmt.Println(err) http.NotFound(w,r) } }() this.response = w this.request = r h,m,err := getHandle(this.request) if err!=nil{ panic(err) } beforeFun:=h.Elem().MethodByName("Before") this.invoke(beforeFun) mfun:=h.Elem().MethodByName(m) this.invoke(mfun) afterFun:=h.Elem().MethodByName("After") this.invoke(afterFun) } func (this *Light) invoke(mfun reflect.Value){ var in []reflect.Value if !mfun.IsValid() { return } switch mfun.Type().NumIn() { case 0: value:=mfun.Call(nil) outValue(value,this.response) case 1: in = append(in,reflect.ValueOf(this)) value:=mfun.Call(nil) outValue(value,this.response) default: in = append(in,reflect.ValueOf(this.response)) in = append(in,reflect.ValueOf(this.request)) value:=mfun.Call(in) outValue(value,this.response) } } func outValue(v []reflect.Value,w http.ResponseWriter){ for _,out:=range v { switch out.Kind() { case reflect.Int: w.WriteHeader(int(out.Int())) case reflect.String: w.Write([]byte(out.String())) default: o,err:=json.Marshal(out.Interface()) if err == nil { w.Write(o) } } } } func getHandle(r *http.Request) (h reflect.Value,m string,e error) { path := strings.Trim(r.URL.Path,"/") var pkg string for i := len(path) - 1; i >= 0; i-- { if path[i] == '/' { pkg = path[:i] m = strings.Title(path[i+1:]) break } } lock.Lock() h,ok:=handlerMap[pkg] lock.Unlock() if ok { return h,m,nil } return reflect.Value{},"",errors.New("handler not found") } type Handler interface {} func (this *Light) Add(h Handler){ rv:=reflect.ValueOf(h) verify(rv) pkgPath := rv.Elem().Type().PkgPath() lock.Lock() handlerMap[pkgPath] = rv lock.Unlock() } func verify(rv reflect.Value) { if rv.Type().Kind() != reflect.Ptr && rv.Type().Kind()!= reflect.Struct{ panic("添加路由错误:"+rv.String()) } } func Run(l *Light) { http.ListenAndServe(":8080",l) }
package integers // Add adds two integers returning the result func Add(a, b int) int { return a + b }
/* Copyright 2021 RadonDB. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package syncer import ( "github.com/presslabs/controller-util/syncer" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/radondb/radondb-mysql-kubernetes/cluster" "github.com/radondb/radondb-mysql-kubernetes/utils" ) // NewServiceAccountSyncer returns serviceAccount syncer. func NewServiceAccountSyncer(cli client.Client, c *cluster.Cluster) syncer.Interface { serviceAccount := &corev1.ServiceAccount{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "ServiceAccount", }, ObjectMeta: metav1.ObjectMeta{ Name: c.GetNameForResource(utils.ServiceAccount), Namespace: c.Namespace, Labels: c.GetLabels(), }, } return syncer.NewObjectSyncer("ServiceAccount", c.Unwrap(), serviceAccount, cli, func() error { return nil }) }
package metadata import ( "incognito-chain/common" ) type IssuingEVMResponse struct { MetadataBase RequestedTxID common.Hash `json:"RequestedTxID"` UniqTx []byte `json:"UniqETHTx"` ExternalTokenID []byte `json:"ExternalTokenID"` SharedRandom []byte `json:"SharedRandom,omitempty"` } func NewIssuingEVMResponse( requestedTxID common.Hash, uniqTx []byte, externalTokenID []byte, metaType int, ) *IssuingEVMResponse { metadataBase := MetadataBase{ Type: metaType, } return &IssuingEVMResponse{ RequestedTxID: requestedTxID, UniqTx: uniqTx, ExternalTokenID: externalTokenID, MetadataBase: metadataBase, } } func (iRes IssuingEVMResponse) Hash() *common.Hash { record := iRes.RequestedTxID.String() record += string(iRes.UniqTx) record += string(iRes.ExternalTokenID) record += iRes.MetadataBase.Hash().String() // final hash hash := common.HashH([]byte(record)) return &hash } func (iRes *IssuingEVMResponse) SetSharedRandom(r []byte) { iRes.SharedRandom = r }
package main import ( "encoding/json" "flag" "fmt" "github.com/songgao/water" "golang.org/x/net/ipv4" "log" "net" "os" "os/exec" ) const ( // BUFFERSIZE represents a size of read buffer. BUFFERSIZE = 1500 // MTU represents a maximum transmission unit. MTU = "1300" ) func runIP(args ...string) { cmd := exec.Command("/sbin/ip", args...) cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin err := cmd.Run() if nil != err { log.Fatalln("Error running /sbin/ip:", err) } } // Tunneling represents thg information for tunneling. type Tunneling struct { RemoteIP string `json:"RemoteIP"` Port int `json:"Port"` } // LoadConfig represents a function to read tunneling information from json file. func LoadConfig() (Tunneling, error) { var config Tunneling file, err := os.Open("tunneling.json") // Perform error handling defer func() { errClose := file.Close() if errClose != nil { log.Fatal("can't close the file", errClose) } }() if err != nil { log.Fatal(err) } decoder := json.NewDecoder(file) err = decoder.Decode(&config) if err != nil { log.Fatal(err) } return config, err } func main() { //var CBNetAgent = poc_cb_net.NewCBNetworkAgent() //temp := CBNetAgent.GetNetworkInterface() //var localIP, remoteIP *string //var port *int //for _, networkInterface := range temp { // if networkInterface.Name == "eth0" || networkInterface.Name == "ens4" { // fmt.Println(networkInterface) // for _, IP := range networkInterface.IPs { // if IP.Version == "IPv4" { // pieces := strings.Split(IP.CLADNetID, "/") // prefix := pieces[1] // IPAddressWithPrefix := IP.IPAddress + "/" + prefix // fmt.Println(IPAddressWithPrefix) // localIP = flag.String("local", IPAddressWithPrefix, "Local tun interface IP/MASK like 192.168.3.3⁄24") // break // } // } // } //} //config, err := LoadConfig() var ( localIP = flag.String("local", "192.168.7.1/24", "Local tun interface IP/MASK like 192.168.3.3⁄24") remoteIP = flag.String("remote", "3.128.34.227", "Remote server (external) IP like 8.8.8.8") port = flag.Int("port", 20000, "UDP port for communication") ) flag.Parse() // check if we have anything if "" == *localIP { flag.Usage() log.Fatalln("\nlocal ip is not specified") } if "" == *remoteIP { flag.Usage() log.Fatalln("\nremote server is not specified") } // create TUN interface iface, err := water.New(water.Config{ DeviceType: water.TUN, }) if nil != err { log.Fatalln("Unable to allocate TUN interface:", err) } log.Println("Interface allocated:", iface.Name()) // set interface parameters runIP("link", "set", "dev", iface.Name(), "mtu", MTU) runIP("addr", "add", *localIP, "dev", iface.Name()) runIP("link", "set", "dev", iface.Name(), "up") // resolve remote addr remoteAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%v", *remoteIP, *port)) if nil != err { log.Fatalln("Unable to resolve remote addr:", err) } // listen to local socket lstnAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%v", *port)) if nil != err { log.Fatalln("Unable to get UDP socket:", err) } lstnConn, err := net.ListenUDP("udp", lstnAddr) if nil != err { log.Fatalln("Unable to listen on UDP socket:", err) } defer lstnConn.Close() // recv in separate thread go func() { buf := make([]byte, BUFFERSIZE) for { n, addr, err := lstnConn.ReadFromUDP(buf) // just debug header, _ := ipv4.ParseHeader(buf[:n]) fmt.Printf("Received %d bytes from %v: %+v", n, addr, header) if err != nil || n == 0 { fmt.Println("Error: ", err) continue } // write to TUN interface nWrite, errWrite := iface.Write(buf[:n]) if errWrite != nil || nWrite == 0 { fmt.Printf("Error(%d len): %s", nWrite, errWrite) } } }() // and one more loop packet := make([]byte, BUFFERSIZE) for { plen, err := iface.Read(packet) if err != nil { break } // debug :) header, _ := ipv4.ParseHeader(packet[:plen]) fmt.Printf("Sending to remote: %+v (%+v)", header, err) // real send lstnConn.WriteToUDP(packet[:plen], remoteAddr) } }
/* * Copyright © 2019-2022 Software AG, Darmstadt, Germany and/or its licensors * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package adabas import ( "fmt" "testing" "time" "github.com/SoftwareAG/adabas-go-api/adatypes" "github.com/stretchr/testify/assert" ) func TestMapRepository(t *testing.T) { initTestLogWithFile(t, "map_repositories.log") adatypes.Central.Log.Infof("TEST: %s", t.Name()) ada, _ := NewAdabas(24) defer ada.Close() AddGlobalMapRepositoryReference("24,4") defer DelGlobalMapRepository(ada, 4) id := NewAdabasID() defer id.Close() adabasMap, rep, err := SearchMapRepository(id, "EMPLOYEES-NAT-DDM") assert.NoError(t, err) assert.NotNil(t, adabasMap) assert.NotNil(t, rep) assert.Equal(t, Dbid(24), rep.DatabaseURL.URL.Dbid) //assert.Equal(t, "", rep.DatabaseURL.URL.Host) } func TestGlobalMapRepository(t *testing.T) { initTestLogWithFile(t, "map_repositories.log") adatypes.Central.Log.Infof("TEST: %s", t.Name()) ada, _ := NewAdabas(23) defer ada.Close() AddGlobalMapRepository(ada.URL, 4) defer DelGlobalMapRepository(ada.URL, 4) ada.SetDbid(24) AddGlobalMapRepository(ada.URL, 4) defer DelGlobalMapRepository(ada.URL, 4) ada2, _ := NewAdabas(1) defer ada2.Close() adabasMaps, err := AllGlobalMaps(ada2) assert.NoError(t, err) assert.NotNil(t, adabasMaps) for _, m := range adabasMaps { fmt.Printf("%s -> %d\n", m.Name, m.Isn) } listMaps, lerr := AllGlobalMapNames(ada2) assert.NoError(t, lerr) assert.NotNil(t, listMaps) for _, m := range listMaps { fmt.Printf("%s\n", m) } } func TestGlobalMapConnectionString(t *testing.T) { initTestLogWithFile(t, "map_repositories.log") adatypes.Central.Log.Infof("TEST: %s", t.Name()) ada, _ := NewAdabas(24) defer ada.Close() AddGlobalMapRepository(ada.URL, 4) defer DelGlobalMapRepository(ada.URL, 4) connection, cerr := NewConnection("acj;map=EMPLOYEES") if !assert.NoError(t, cerr) { return } defer connection.Close() request, rerr := connection.CreateReadRequest() if !assert.NoError(t, rerr) { return } request.QueryFields("name,personnel-id") result, err := request.ReadLogicalWith("personnel-id=[11100301:11100303]") if !assert.NoError(t, err) { return } _ = result.DumpValues() } func TestGlobalMapConnectionDirect(t *testing.T) { initTestLogWithFile(t, "map_repositories.log") adatypes.Central.Log.Infof("TEST: %s", t.Name()) ada, _ := NewAdabas(24) defer ada.Close() AddGlobalMapRepository(ada.URL, 4) defer DelGlobalMapRepository(ada, 4) connection, cerr := NewConnection("acj;map") if !assert.NoError(t, cerr) { return } defer connection.Close() request, rerr := connection.CreateMapReadRequest("EMPLOYEES") if !assert.NoError(t, rerr) { return } request.QueryFields("name,personnel-id") result, err := request.ReadLogicalWith("personnel-id=[11100301:11100303]") if !assert.NoError(t, err) { return } _ = result.DumpValues() } func TestThreadMapCache(t *testing.T) { initTestLogWithFile(t, "global_map_repositories.log") DumpGlobalMapRepositories() CleanGlobalMapRepository() StartAsynchronousMapCache(10) adatypes.Central.Log.Infof("TEST: %s", t.Name()) ada, _ := NewAdabas(23) defer ada.Close() m, _, err := SearchMapRepository(ada.ID, "VEHICLESGo") assert.Nil(t, m) if !assert.Error(t, err) { fmt.Println("Map got:", m) return } fmt.Println("Search failed: ", err) AddGlobalMapRepository(ada.URL, 250) defer DelGlobalMapRepository(ada, 250) time.Sleep(60 * time.Second) m, _, err = SearchMapRepository(ada.ID, "VEHICLESGo") if !assert.NoError(t, err) { return } fmt.Println("Map names found: ", m.Name) assert.Equal(t, "VEHICLESGo", m.Name) }
package db import ( "regexp" "sync" ) // DatabaseIndex is the in memory index of a collection of conversations, and their tags. // Exported functions are goroutine safe while un-exported functions assume the caller will use the appropriate locks type DatabaseIndex struct { // in memory metadata index, built on load and updated when new series come in mu sync.RWMutex conversations map[string]*Conversation // map conversations key to the Conversations object names []string // sorted list of the conversations names lastID uint64 // last used conversations ID. They're in memory only for this shard } // NewDatabaseIndex creates the in memory index func NewDatabaseIndex() *DatabaseIndex { return &DatabaseIndex{ conversations: make(map[string]*Conversation), names: make([]string, 0), } } // Conversation returns the measurement object from the index by the name func (db *DatabaseIndex) Conversation(name string) *Conversation { db.mu.RLock() defer db.mu.RUnlock() return db.conversations[name] } // ConversationsCount returns the number of conversations currently indexed by the database. // Useful for reporting and monitoring. func (db *DatabaseIndex) ConversationsCount() (nConversations int) { db.mu.RLock() defer db.mu.RUnlock() nConversations = len(db.conversations) return } // createSeriesIndexIfNotExists adds the series for the given measurement to the index and sets its ID or returns the existing series object func (db *DatabaseIndex) createConversationIndexIfNotExists(name string, conversation *Conversation) *Conversation { // if there is a measurement for this id, it's already been added cc := db.conversations[conversation.Key] if cc != nil { return cc } // set the in memory ID for query processing on this shard conversation.id = db.lastID + 1 db.lastID++ db.conversations[conversation.Key] = conversation return conversation } // conversationsByRegex returns the conversations that match the regex. func (db *DatabaseIndex) conversationsByRegex(re *regexp.Regexp) Conversations { var matches Conversations for _, c := range db.conversations { if re.MatchString(c.Name) { matches = append(matches, c) } } return matches } // Conversations returns a list of all conversations. func (db *DatabaseIndex) Conversations() Conversations { conversations := make(Conversations, 0, len(db.conversations)) for _, m := range db.conversations { conversations = append(conversations, m) } return conversations } func (db *DatabaseIndex) DropConversation(name string) { db.mu.Lock() defer db.mu.Unlock() c := db.conversations[name] if c == nil { return } delete(db.conversations, name) var names []string for _, n := range db.names { if n != name { names = append(names, n) } } db.names = names }
package repository import ( "context" "log" "math/rand" "strconv" "cloud.google.com/go/firestore" firebase "firebase.google.com/go" "github.com/b1g2h3/todoapp/entity" "google.golang.org/api/iterator" "google.golang.org/api/option" ) // TodoRepository interface type TodoRepository interface { GetLists(list *entity.List) ([]entity.List, error) AddList(list *entity.List) (*entity.List, error) GetTasks(ListID string) ([]entity.Task, error) GetTask(ListID, TaskID string) ([]entity.Task, error) AddTask(task *entity.Task) (*entity.Task, error) UpdateTask(task *entity.Task) (*entity.Task, error) DestroyTask(Name string) } type repo struct{} // Client var ( c *firestore.Client lists []entity.List tasks []entity.Task ) // initilaze firestore Client func init() { ctx := context.Background() opt := option.WithCredentialsFile("C:/Users/vlast/Desktop/credentionals.json") config := &firebase.Config{ProjectID: "todo-3840c"} app, err := firebase.NewApp(ctx, config, opt) if err != nil { log.Fatalf("error initializing app: %v\n", err) } c, err = app.Firestore(ctx) if err != nil { log.Fatalf("app.Firestore: %v", err) } } func (*repo) GetLists(list *entity.List) ([]entity.List, error) { ctx := context.Background() iter := c.Collection("lists").Where("UID", "==", list.UID).Documents(ctx) for { doc, err := iter.Next() if err == iterator.Done { break } if err != nil { log.Fatalf("Failed to iterate: %v", err) } list := entity.List{ ID: doc.Data()["ID"].(string), UID: doc.Data()["UID"].(string), Name: doc.Data()["Name"].(string), } lists = append(lists, list) } return lists, nil } func (*repo) AddList(list *entity.List) (*entity.List, error) { ctx := context.Background() list.ID, _ = strconv.Itoa(rand.Intn(10000)) _, _, err := c.Collection("lists").Add(ctx, list) if err != nil { log.Fatalf("Failed add list: %v", err) } return list, nil } func (*repo) GetTasks(ListID string) ([]entity.Task, error) { ctx := context.Background() iter := c.Collection("task").Where("ListID", "==", ListID).Documents(ctx) for { doc, err := iter.Next() if err == iterator.Done { break } if err != nil { log.Fatalf("Failed to iterate: %v", err) } task := entity.Task{ ID: doc.Data()["ID"].(string), ListID: doc.Data()["ListID"].(string), UID: doc.Data()["UID"].(string), Name: doc.Data()["Name"].(string), } tasks = append(tasks, task) } return tasks, nil } func (*repo) GetTask(ListID, TaskID string) ([]entity.Task, error) { ctx := context.Background() iter := c.Collection("task").Where("ListID", "==", ListID).Where("ID", "==", TaskID).Documents(ctx) for { doc, err := iter.Next() if err == iterator.Done { break } if err != nil { log.Fatalf("Failed to iterate: %v", err) } task := entity.Task{ ID: doc.Data()["ID"].(string), ListID: doc.Data()["ListID"].(string), UID: doc.Data()["UID"].(string), Name: doc.Data()["Name"].(string), } tasks = append(tasks, task) } return tasks, nil } func (*repo) AddTask(task *entity.Task) (*entity.Task, error) { ctx := context.Background() task.ID = strconv.Itoa(rand.Intn(10000)) Name := task.ID + task.ListID _, err := c.Collection("task").Doc(Name).Set(ctx, task) if err != nil { log.Fatalf("Failed add task: %v", err) } return task, nil } func (*repo) UpdateTask(task *entity.Task) (*entity.Task, error) { ctx := context.Background() Name := task.ID + task.ListID _, err := c.Collection("task").Doc(Name).Set(ctx, task) if err != nil { log.Fatalf("Failed Update Task: %v", err) } return task, nil } func (*repo) DestroyTask(Name string) { ctx := context.Background() _, err := c.Collection("task").Doc(Name).Delete(ctx) if err != nil { log.Fatalf("Failed destroy Task: %v", err) } } // NewTodoRepository init func for repo func NewTodoRepository() TodoRepository { return &repo{} }
package main import "fmt" func main() { var ruby bool var java int var golang string fmt.Println(golang, ruby, java) }
package cmd import ( "github.com/spf13/cobra" ) func init() { RootCmd.AddCommand(uninstallCommand) } var uninstallCommand = &cobra.Command{ Use: "uninstall", Short: "Uninstall a node", Run: func(cmd *cobra.Command, args []string) {}, }
package cfile import ( "io" "os" ) // Writer returns f.WriteAt(-1). func (f *File) Writer() *Writer { return f.WriterAt(-1) } // WriterAt acquires a write-lock, seeks to the given offset and returns a writer. // if off is < 0, it seeks to the end of the file, otherwise it seeks to the off value. func (f *File) WriterAt(off int64) *Writer { f.mux.Lock() f.wg.Add(1) if off < 0 { off, _ = f.f.Seek(0, io.SeekEnd) } else { f.f.Seek(off, io.SeekStart) } return &Writer{ f: f, off: off, } } // Writer implements `io.Writer`, `io.WriterAt`, `io.Seeker`` and `io.Closer`. type Writer struct { f *File off int64 } // Write implements `io.Writer`. func (w *Writer) Write(b []byte) (n int, err error) { if w.f == nil { return 0, os.ErrClosed } n, err = w.f.f.Write(b) w.off += int64(n) return } // WriteAt implements `io.WriterAt`. func (w *Writer) WriteAt(b []byte, off int64) (n int, err error) { if w.f == nil { return 0, os.ErrClosed } return w.f.WriteAt(b, off) } // Seek implements `io.Seeker`. func (w *Writer) Seek(offset int64, whence int) (n int64, err error) { if w.f == nil { return 0, os.ErrClosed } n, err = w.f.f.Seek(offset, whence) w.off = n return } // Close releases the parent's write-lock. func (w *Writer) Close() (err error) { if w.f == nil { return os.ErrClosed } defer func() { w.f.wg.Done() w.f.mux.Unlock() w.f = nil }() if w.f.SyncAfterWriterClose { if err = w.f.f.Sync(); err != nil { return } } var sz int64 if sz, err = getSize(w.f.f); err != nil { return } w.f.sz.Store(sz) return }
package main import ( "fmt" "github.com/jackytck/projecteuler/tools" ) func isBouncy(n int) bool { ds := tools.Digits(n) var up, down bool for i := 1; i < len(ds); i++ { if ds[i] > ds[i-1] { up = true } else if ds[i] < ds[i-1] { down = true } if up && down { break } } return up && down } func solve(percent int) int { var bouncy int total := 99 for total*percent > 100*bouncy { total++ if isBouncy(total) { bouncy++ } } // fmt.Println(bouncy, total, float64(bouncy)/float64(total)) return total } func main() { fmt.Println(solve(50)) fmt.Println(solve(90)) fmt.Println(solve(99)) } // Find the least number for which the proportion of bouncy numbers is exactly // 99%. // Note: // Straightforward brute force.
package main import ( "fmt" "os" "os/signal" "syscall" "github.com/anihouse/bot" "github.com/anihouse/bot/app" "github.com/anihouse/bot/config" "github.com/anihouse/bot/db" "github.com/anihouse/bot/tpl" "github.com/urfave/cli" ) func run(c *cli.Context) error { fmt.Println("Bot is running. Press Ctrl + C to exit.") config.Load(c.GlobalString("config")) tpl.Init() db.Init() bot.Init() app.Init() bot.Modules.Init() sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill) <-sc return nil }
package main import ( "database/sql" "fmt" "gorm.io/driver/mysql" "gorm.io/gorm" "time" ) // 自动迁移和迁移接口的方法 // 模型定义 type Student struct { ID uint Name string Age uint Email string CreatedAt time.Time UpdatedAt time.Time DeletedAt time.Time } var ( sqlDB *sql.DB gormDB *gorm.DB ) func InitDB() { // driverName driverName := "mysql" // DSN dbUser := "root" dbPassword := "root" protocol := "tcp" dbHost := "127.0.0.1" dbPort := "3306" dbName := "blog" parseTime := true loc := "Local" charset := "utf8mb4" dataSourceName := fmt.Sprintf("%s:%s@%s(%s:%s)/%s?charset=%s&parseTime=%t&loc=%s", dbUser, dbPassword, protocol, dbHost, dbPort, dbName, charset, parseTime, loc) // 数据库连接 if sqlDB == nil { sqlDB, _ = sql.Open(driverName, dataSourceName) } err := sqlDB.Ping() if err != nil { fmt.Printf("sqlDB.Ping() err:%s\n", err) return } // gorm 是用的 sql 包的连接池 sqlDB.SetMaxOpenConns(10) // 设置连接池最大打开连接数 sqlDB.SetMaxIdleConns(5) // 设置连接池最大空闲连接数 sqlDB.SetConnMaxLifetime(time.Hour) // 设置连接可复用的最大时间 // 使用现有数据库连接初始化 *gorm.DB // gorm 配置 gormDB, err = gorm.Open( mysql.New( mysql.Config{ Conn: sqlDB, }, ), &gorm.Config{ SkipDefaultTransaction: true, // 关闭写入操作默认启用事务 DisableAutomaticPing: true, // 关闭自动 Ping 数据库 }, ) if err != nil { fmt.Printf("gorm.Open() err:%s\n", err) return } // gorm 还支持使用 mysql 驱动的高级配置和使用自定义驱动。 } func main() { defer func() { if err := recover(); err != nil { fmt.Printf("panic() err:%s\n", err) return } }() // 初始化数据库 InitDB() // 自动迁移 err := gormDB.AutoMigrate(&Student{}) if err != nil { fmt.Printf("自动迁移失败,err:%s\n", err) return } // 迁移接口的方法 // 返回当前操作的数据库名 // currentDBName := gormDB.Migrator().CurrentDatabase() // fmt.Printf("当前操作的数据库名:%s\n", currentDBName) // 数据库表操作 // 判断数据库表是否已存在 isExist := gormDB.Migrator().HasTable(&Student{}) // isExist := gormDB.Migrator().HasTable("students") // fmt.Printf("数据库表是否存在:%t\n", isExist) // 创建数据库表 if isExist == false { // 数据库表不存在 // 默认情况下,GORM 会约定使用 ID 作为表的主键,可以通过标签 primaryKey 将其它字段设为主键。通过将多个字段设为主键,以达到创建复合主键,整型字段设为主键,默认为启用 AutoIncrement,如果需要禁用,使用标签autoIncrement:false。 // GORM 约定使用结构体名的复数形式作为表名,不过也可以根据需求修改,可以实现Tabler 接口来更改默认表名,不过这种方式不支持动态变化,它会被缓存下来以便后续使用,如果想要使用动态表名,可以使用Scopes,关于 Scopes 的使用方法,本文暂不展开。 // GORM 约定使用结构体的字段名作为数据表的字段名,可以通过标签 column 修改。 /*err := gormDB.Migrator().CreateTable(&Student{}) if err != nil { fmt.Printf("创建数据库表失败,错误:%s\n", err) return } fmt.Println("创建数据库表成功")*/ } else { // 数据库表已存在 // 重命名数据库表 // newName := "stu_" + time.Now().Format("2006-01-02 15:04:05") // gormDB.Migrator().RenameTable("students", newName) // gormDB.Migrator().RenameTable("students", "stu") // gormDB.Migrator().RenameTable(&Student{}, &Stu{}) // 删除数据库表 // gormDB.Migrator().DropTable("students") // gormDB.Migrator().DropTable(&Student{}) } // 数据库表字段操作 // 添加字段 /*type Student struct { Score uint } err := gormDB.Migrator().AddColumn(&Student{}, "Score") if err != nil { fmt.Printf("添加字段错误,err:%s\n", err) return }*/ // 删除字段 // gormDB.Migrator().DropColumn(&Student{}, "email") // 修改字段 /*type Student struct{ Name string UserName string } gormDB.Migrator().RenameColumn(&Student{}, "name", "user_name")*/ // 检查字段是否存在 // isExistField := gormDB.Migrator().HasColumn(&Student{}, "name") // fmt.Printf("字段是否存在:%t\n", isExistField) // 数据库表索引操作 type Student struct { Name string `gorm:"index:idx_name"` UserName string `gorm:"index:idx_user_name"` } // 创建索引 /*err = gormDB.Migrator().CreateIndex(&Student{}, "Name") if err != nil { fmt.Printf("创建索引失败1,err:%s\n", err) return }*/ /*err = gormDB.Migrator().CreateIndex(&Student{}, "idx_name") if err != nil { fmt.Printf("创建索引失败2,err:%s\n", err) return }*/ // 删除索引 // gormDB.Migrator().DropIndex(&Student{}, "idx_name") // gormDB.Migrator().DropIndex(&Student{}, "UserName") // 修改索引 err = gormDB.Migrator().RenameIndex(&Student{}, "UserName", "Name") if err != nil { fmt.Printf("修改索引名称失败,err:%s\n", err) return } // gormDB.Migrator().RenameIndex(&Student{}, "idx_name", "idx_user_name") // 查询索引 // isExistIndex := gormDB.Migrator().HasIndex(&Student{}, "Name") // isExistIndex := gormDB.Migrator().HasIndex(&Student{}, "idx_name") // fmt.Printf("查询索引是否存在:%t\n", isExistIndex) // 数据库表约束操作 }
package fileutil import ( "io/ioutil" "os" "github.com/pkg/errors" ) // Utility constants for managing files const ( DefaultFilePerm = 0644 DefaultDirPerm = 0755 ) // Copy copies a file from a source to a destination func Copy(source string, destination string) error { content, err := ioutil.ReadFile(source) if err != nil { return errors.Wrapf(err, "Error reading file %s", source) } err = ioutil.WriteFile(destination, content, DefaultFilePerm) if err != nil { return errors.Wrapf(err, "Error writing file %s", destination) } return nil } // Exist returns a boolean if the file exists func Exist(filename string) bool { _, err := os.Stat(filename) return err == nil }
package backup_test /* import ( "bytes" "net/http" "net/http/httptest" "os" "strings" "testing" "github.com/messagedb/messagedb" "github.com/messagedb/messagedb/cmd/influxd" ) // Ensure the backup can download from the server and save to disk. func TestBackupCommand(t *testing.T) { // Mock the backup endpoint. s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/data/snapshot" { t.Fatalf("unexpected url path: %s", r.URL.Path) } // Write a simple snapshot to the buffer. sw := messagedb.NewSnapshotWriter() sw.Snapshot = &messagedb.Snapshot{Files: []messagedb.SnapshotFile{ {Name: "meta", Size: 5, Index: 10}, }} sw.FileWriters["meta"] = messagedb.NopWriteToCloser(bytes.NewBufferString("55555")) if _, err := sw.WriteTo(w); err != nil { t.Fatal(err) } })) defer s.Close() // Create a temp path and remove incremental backups at the end. path := tempfile() defer os.Remove(path) defer os.Remove(path + ".0") defer os.Remove(path + ".1") // Execute the backup against the mock server. for i := 0; i < 3; i++ { if err := NewBackupCommand().Run("-host", s.URL, path); err != nil { t.Fatal(err) } } // Verify snapshot and two incremental snapshots were written. if _, err := os.Stat(path); err != nil { t.Fatalf("snapshot not found: %s", err) } else if _, err = os.Stat(path + ".0"); err != nil { t.Fatalf("incremental snapshot(0) not found: %s", err) } else if _, err = os.Stat(path + ".1"); err != nil { t.Fatalf("incremental snapshot(1) not found: %s", err) } } // Ensure the backup command returns an error if flags cannot be parsed. func TestBackupCommand_ErrFlagParse(t *testing.T) { cmd := NewBackupCommand() if err := cmd.Run("-bad-flag"); err == nil || err.Error() != `flag provided but not defined: -bad-flag` { t.Fatal(err) } else if !strings.Contains(cmd.Stderr.String(), "usage") { t.Fatal("usage message not displayed") } } // Ensure the backup command returns an error if the host cannot be parsed. func TestBackupCommand_ErrInvalidHostURL(t *testing.T) { if err := NewBackupCommand().Run("-host", "http://%f"); err == nil || err.Error() != `parse host url: parse http://%f: hexadecimal escape in host` { t.Fatal(err) } } // Ensure the backup command returns an error if the output path is not specified. func TestBackupCommand_ErrPathRequired(t *testing.T) { if err := NewBackupCommand().Run("-host", "//localhost"); err == nil || err.Error() != `snapshot path required` { t.Fatal(err) } } // Ensure the backup returns an error if it cannot connect to the server. func TestBackupCommand_ErrConnectionRefused(t *testing.T) { // Start and immediately stop a server so we have a dead port. s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) s.Close() // Execute the backup command. path := tempfile() defer os.Remove(path) if err := NewBackupCommand().Run("-host", s.URL, path); err == nil || !(strings.Contains(err.Error(), `connection refused`) || strings.Contains(err.Error(), `No connection could be made`)) { t.Fatal(err) } } // Ensure the backup returns any non-200 status codes. func TestBackupCommand_ErrServerError(t *testing.T) { s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) })) defer s.Close() // Execute the backup command. path := tempfile() defer os.Remove(path) if err := NewBackupCommand().Run("-host", s.URL, path); err == nil || err.Error() != `download: snapshot error: status=500` { t.Fatal(err) } } // BackupCommand is a test wrapper for main.BackupCommand. type BackupCommand struct { *main.BackupCommand Stderr bytes.Buffer } // NewBackupCommand returns a new instance of BackupCommand. func NewBackupCommand() *BackupCommand { cmd := &BackupCommand{BackupCommand: main.NewBackupCommand()} cmd.BackupCommand.Stderr = &cmd.Stderr return cmd } */
/** * Definition for a binary tree node. * type TreeNode struct { * Val int * Left *TreeNode * Right *TreeNode * } */ func kthSmallest(root *TreeNode, k int) int { _, res := r(root, 0, k) return res } func r(node *TreeNode, c, k int) (int, int) { cc := c res := 0 if node.Left != nil { cc, res = r(node.Left, cc, k) if cc == k { return cc, res } } cc++ if cc == k { return cc, node.Val } if node.Right != nil { cc, res = r(node.Right, cc, k) if cc == k { return cc, res } } return cc, res }
package isset import ( "fmt" ) type Bool struct { value bool valid bool } // IsValid returns whether a value has been set func (b *Bool) IsValid() bool { return b.valid } // Set a value. func (b *Bool) Set(nb bool) { b.valid = true b.value = nb } // Unset the variable, like setting it to nil func (b *Bool) Unset() { b.valid = false } // Get returns the contained value. func (b *Bool) Get() (bool, error) { if b.valid { return b.value, nil } return false, fmt.Errorf("runtime error: attempt to get value of Bool which is set to nil") } func (b *Bool) IsSetBool2Int() int { if b.valid { if b.value { return 1 } return 0 } return -1 } func (b *Bool) Reset() { b.valid = false b.value = false } func (b *Bool) IsSetInt2Bool(i int) Bool { switch i { case 1: b.Set(true) case 0: b.Set(false) case -1: b.valid = false b.value = false } return *b }
package led import ( "fmt" "log" "strconv" "time" "github.com/water78813/iot/manager" "gobot.io/x/gobot/platforms/firmata" ) type ledModule struct { ledState int pin string ip string interval string funcState string stopCh chan struct{} } // func LedAccessor(m map[string]string) error { pin := m["pin"] host := m["host"] interval := m["interval"] status := m["status"] mng := manager.GetMng() led, err := (*mng).GetMod("led") if err != nil { lm := &ledModule{ funcState: "init", ledState: 0, pin: pin, ip: host, interval: interval, stopCh: make(chan struct{}, 1), } mng.AddMod("led", lm) if led, err = (*mng).GetMod("led"); err != nil { return err } } fmt.Println("in accessor") if status == "on" { (*led).Start() mng.ModReload() } else if status == "off" { (*led).Stop() } else if status == "remove" { if (*led).GetFuncState() != "on" { (*led).Stop() } mng.RemoveMod("led") } return nil } func (lm *ledModule) Start() { fmt.Println("start") lm.funcState = "start" } func (lm *ledModule) Run() { interg, _ := strconv.Atoi(lm.interval) interval := time.Duration(interg) * time.Second next := time.Now() adaptor := firmata.NewTCPAdaptor(lm.ip) err := adaptor.Connect() if err != nil { log.Fatal(err) } defer func() { //before stop the program set the pin state be origin err := adaptor.DigitalWrite(lm.pin, 0) if err != nil { log.Fatal(err) } //wait for esp8266 getting the signal time.Sleep(time.Second) fmt.Println("End") adaptor.Disconnect() }() for { if lm.ledState == 0 { err := adaptor.DigitalWrite(lm.pin, 1) if err != nil { log.Fatal(err) } lm.ledState = 1 } else { err := adaptor.DigitalWrite(lm.pin, 0) if err != nil { log.Fatal(err) } lm.ledState = 0 } if interval > 0 { now := time.Now() next = next.Add(interval) if next.Before(now) { next = now.Add(interval) } select { case <-lm.stopCh: return case <-time.After(next.Sub(now)): } } } } func (lm *ledModule) Stop() { lm.funcState = "stop" lm.stopCh <- struct{}{} } func (lm *ledModule) GetFuncState() string { return lm.funcState } func (lm *ledModule) SetFuncState(s string) { lm.funcState = s }
package smaller import ( "testing" "github.com/google/go-cmp/cmp" ) func TestCountSmaller(t *testing.T) { testCases := map[string]struct { input []int expectedOutput []int }{ "Example 1": { input: []int{5, 2, 6, 1}, expectedOutput: []int{2, 1, 1, 0}, // Explanation: // To the right of 5 there are 2 smaller elements (2 and 1). // To the right of 2 there is only 1 smaller element (1). // To the right of 6 there is 1 smaller element (1). // To the right of 1 there is 0 smaller element. }, "Example 2": { input: []int{-1}, expectedOutput: []int{0}, }, "Example 3": { input: []int{-1, -1}, expectedOutput: []int{0, 0}, }, } for desc, tc := range testCases { t.Run(desc, func(t *testing.T) { if diff := cmp.Diff(tc.expectedOutput, countSmaller(tc.input)); diff != "" { t.Errorf("countSmaller(%d) mismatch (-want +got):\n%s", tc.input, diff) } }) } }
/* A biquadratic number is a number that is the fourth power of another integer, for example: 3^4 = 3*3*3*3 = 81 Given an integer as input, output the closest biquadratic number. Here are the first 15 double-squares: 1, 16, 81, 256, 625, 1296, 2401, 4096, 6561, 10000, 14641, 20736, 28561, 38416, 50625 This is code-golf so fewest bytes in each language wins This is OEIS A000583 */ package main import ( "fmt" "sort" ) func main() { for i := 0; i <= 10000; i++ { fmt.Println(i, biquadratic(i)) } } func biquadratic(n int) int { i := sort.Search(n, func(x int) bool { return ipow4(x) >= n }) x := ipow4(i) y := ipow4(i - 1) if abs(x-n) > abs(y-n) { return y } return x } // https://oeis.org/A000583 func ipow4(n int) int { return n * n * n * n } func abs(x int) int { if x < 0 { x = -x } return x }
package git /* #include <git2.h> extern void _go_git_populate_stash_apply_callbacks(git_stash_apply_options *opts); extern int _go_git_stash_foreach(git_repository *repo, void *payload); */ import "C" import ( "runtime" "unsafe" ) // StashFlag are flags that affect the stash save operation. type StashFlag int const ( // StashDefault represents no option, default. StashDefault StashFlag = C.GIT_STASH_DEFAULT // StashKeepIndex leaves all changes already added to the // index intact in the working directory. StashKeepIndex StashFlag = C.GIT_STASH_KEEP_INDEX // StashIncludeUntracked means all untracked files are also // stashed and then cleaned up from the working directory. StashIncludeUntracked StashFlag = C.GIT_STASH_INCLUDE_UNTRACKED // StashIncludeIgnored means all ignored files are also // stashed and then cleaned up from the working directory. StashIncludeIgnored StashFlag = C.GIT_STASH_INCLUDE_IGNORED ) // StashCollection represents the possible operations that can be // performed on the collection of stashes for a repository. type StashCollection struct { doNotCompare repo *Repository } // Save saves the local modifications to a new stash. // // Stasher is the identity of the person performing the stashing. // Message is the optional description along with the stashed state. // Flags control the stashing process and are given as bitwise OR. func (c *StashCollection) Save( stasher *Signature, message string, flags StashFlag) (*Oid, error) { oid := new(Oid) stasherC, err := stasher.toC() if err != nil { return nil, err } defer C.git_signature_free(stasherC) messageC := C.CString(message) defer C.free(unsafe.Pointer(messageC)) runtime.LockOSThread() defer runtime.UnlockOSThread() ret := C.git_stash_save( oid.toC(), c.repo.ptr, stasherC, messageC, C.uint32_t(flags)) runtime.KeepAlive(c) if ret < 0 { return nil, MakeGitError(ret) } return oid, nil } // StashApplyFlag are flags that affect the stash apply operation. type StashApplyFlag int const ( // StashApplyDefault is the default. StashApplyDefault StashApplyFlag = C.GIT_STASH_APPLY_DEFAULT // StashApplyReinstateIndex will try to reinstate not only the // working tree's changes, but also the index's changes. StashApplyReinstateIndex StashApplyFlag = C.GIT_STASH_APPLY_REINSTATE_INDEX ) // StashApplyProgress are flags describing the progress of the apply operation. type StashApplyProgress int const ( // StashApplyProgressNone means loading the stashed data from the object store. StashApplyProgressNone StashApplyProgress = C.GIT_STASH_APPLY_PROGRESS_NONE // StashApplyProgressLoadingStash means the stored index is being analyzed. StashApplyProgressLoadingStash StashApplyProgress = C.GIT_STASH_APPLY_PROGRESS_LOADING_STASH // StashApplyProgressAnalyzeIndex means the stored index is being analyzed. StashApplyProgressAnalyzeIndex StashApplyProgress = C.GIT_STASH_APPLY_PROGRESS_ANALYZE_INDEX // StashApplyProgressAnalyzeModified means the modified files are being analyzed. StashApplyProgressAnalyzeModified StashApplyProgress = C.GIT_STASH_APPLY_PROGRESS_ANALYZE_MODIFIED // StashApplyProgressAnalyzeUntracked means the untracked and ignored files are being analyzed. StashApplyProgressAnalyzeUntracked StashApplyProgress = C.GIT_STASH_APPLY_PROGRESS_ANALYZE_UNTRACKED // StashApplyProgressCheckoutUntracked means the untracked files are being written to disk. StashApplyProgressCheckoutUntracked StashApplyProgress = C.GIT_STASH_APPLY_PROGRESS_CHECKOUT_UNTRACKED // StashApplyProgressCheckoutModified means the modified files are being written to disk. StashApplyProgressCheckoutModified StashApplyProgress = C.GIT_STASH_APPLY_PROGRESS_CHECKOUT_MODIFIED // StashApplyProgressDone means the stash was applied successfully. StashApplyProgressDone StashApplyProgress = C.GIT_STASH_APPLY_PROGRESS_DONE ) // StashApplyProgressCallback is the apply operation notification callback. type StashApplyProgressCallback func(progress StashApplyProgress) error type stashApplyProgressCallbackData struct { callback StashApplyProgressCallback errorTarget *error } //export stashApplyProgressCallback func stashApplyProgressCallback(progress C.git_stash_apply_progress_t, handle unsafe.Pointer) C.int { payload := pointerHandles.Get(handle) data, ok := payload.(*stashApplyProgressCallbackData) if !ok { panic("could not retrieve data for handle") } if data == nil || data.callback == nil { return C.int(ErrorCodeOK) } err := data.callback(StashApplyProgress(progress)) if err != nil { *data.errorTarget = err return C.int(ErrorCodeUser) } return C.int(ErrorCodeOK) } // StashApplyOptions represents options to control the apply operation. type StashApplyOptions struct { Flags StashApplyFlag CheckoutOptions CheckoutOptions // options to use when writing files to the working directory ProgressCallback StashApplyProgressCallback // optional callback to notify the consumer of application progress } // DefaultStashApplyOptions initializes the structure with default values. func DefaultStashApplyOptions() (StashApplyOptions, error) { optsC := C.git_stash_apply_options{} runtime.LockOSThread() defer runtime.UnlockOSThread() ecode := C.git_stash_apply_options_init(&optsC, C.GIT_STASH_APPLY_OPTIONS_VERSION) if ecode < 0 { return StashApplyOptions{}, MakeGitError(ecode) } return StashApplyOptions{ Flags: StashApplyFlag(optsC.flags), CheckoutOptions: checkoutOptionsFromC(&optsC.checkout_options), }, nil } func populateStashApplyOptions(copts *C.git_stash_apply_options, opts *StashApplyOptions, errorTarget *error) *C.git_stash_apply_options { C.git_stash_apply_options_init(copts, C.GIT_STASH_APPLY_OPTIONS_VERSION) if opts == nil { return nil } copts.flags = C.uint32_t(opts.Flags) populateCheckoutOptions(&copts.checkout_options, &opts.CheckoutOptions, errorTarget) if opts.ProgressCallback != nil { progressData := &stashApplyProgressCallbackData{ callback: opts.ProgressCallback, errorTarget: errorTarget, } C._go_git_populate_stash_apply_callbacks(copts) copts.progress_payload = pointerHandles.Track(progressData) } return copts } func freeStashApplyOptions(copts *C.git_stash_apply_options) { if copts == nil { return } if copts.progress_payload != nil { pointerHandles.Untrack(copts.progress_payload) } freeCheckoutOptions(&copts.checkout_options) } // Apply applies a single stashed state from the stash list. // // If local changes in the working directory conflict with changes in the // stash then ErrorCodeConflict will be returned. In this case, the index // will always remain unmodified and all files in the working directory will // remain unmodified. However, if you are restoring untracked files or // ignored files and there is a conflict when applying the modified files, // then those files will remain in the working directory. // // If passing the StashApplyReinstateIndex flag and there would be conflicts // when reinstating the index, the function will return ErrorCodeConflict // and both the working directory and index will be left unmodified. // // Note that a minimum checkout strategy of 'CheckoutSafe' is implied. // // 'index' is the position within the stash list. 0 points to the most // recent stashed state. // // Returns error code ErrorCodeNotFound if there's no stashed state for the given // index, error code ErrorCodeConflict if local changes in the working directory // conflict with changes in the stash, the user returned error from the // StashApplyProgressCallback, if any, or other error code. // // Error codes can be interogated with IsErrorCode(err, ErrorCodeNotFound). func (c *StashCollection) Apply(index int, opts StashApplyOptions) error { var err error optsC := populateStashApplyOptions(&C.git_stash_apply_options{}, &opts, &err) defer freeStashApplyOptions(optsC) runtime.LockOSThread() defer runtime.UnlockOSThread() ret := C.git_stash_apply(c.repo.ptr, C.size_t(index), optsC) runtime.KeepAlive(c) if ret == C.int(ErrorCodeUser) && err != nil { return err } if ret < 0 { return MakeGitError(ret) } return nil } // StashCallback is called per entry when interating over all // the stashed states. // // 'index' is the position of the current stash in the stash list, // 'message' is the message used when creating the stash and 'id' // is the commit id of the stash. type StashCallback func(index int, message string, id *Oid) error type stashCallbackData struct { callback StashCallback errorTarget *error } //export stashForeachCallback func stashForeachCallback(index C.size_t, message *C.char, id *C.git_oid, handle unsafe.Pointer) C.int { payload := pointerHandles.Get(handle) data, ok := payload.(*stashCallbackData) if !ok { panic("could not retrieve data for handle") } err := data.callback(int(index), C.GoString(message), newOidFromC(id)) if err != nil { *data.errorTarget = err return C.int(ErrorCodeUser) } return C.int(ErrorCodeOK) } // Foreach loops over all the stashed states and calls the callback // for each one. // // If callback returns an error, this will stop looping. func (c *StashCollection) Foreach(callback StashCallback) error { var err error data := stashCallbackData{ callback: callback, errorTarget: &err, } handle := pointerHandles.Track(&data) defer pointerHandles.Untrack(handle) runtime.LockOSThread() defer runtime.UnlockOSThread() ret := C._go_git_stash_foreach(c.repo.ptr, handle) runtime.KeepAlive(c) if ret == C.int(ErrorCodeUser) && err != nil { return err } if ret < 0 { return MakeGitError(ret) } return nil } // Drop removes a single stashed state from the stash list. // // 'index' is the position within the stash list. 0 points // to the most recent stashed state. // // Returns error code ErrorCodeNotFound if there's no stashed // state for the given index. func (c *StashCollection) Drop(index int) error { runtime.LockOSThread() defer runtime.UnlockOSThread() ret := C.git_stash_drop(c.repo.ptr, C.size_t(index)) runtime.KeepAlive(c) if ret < 0 { return MakeGitError(ret) } return nil } // Pop applies a single stashed state from the stash list // and removes it from the list if successful. // // 'index' is the position within the stash list. 0 points // to the most recent stashed state. // // 'opts' controls how stashes are applied. // // Returns error code ErrorCodeNotFound if there's no stashed // state for the given index. func (c *StashCollection) Pop(index int, opts StashApplyOptions) error { var err error optsC := populateStashApplyOptions(&C.git_stash_apply_options{}, &opts, &err) defer freeStashApplyOptions(optsC) runtime.LockOSThread() defer runtime.UnlockOSThread() ret := C.git_stash_pop(c.repo.ptr, C.size_t(index), optsC) runtime.KeepAlive(c) if ret == C.int(ErrorCodeUser) && err != nil { return err } if ret < 0 { return MakeGitError(ret) } return nil }
package nutanix import ( "encoding/json" "github.com/pkg/errors" machinev1 "github.com/openshift/api/machine/v1" nutanixtypes "github.com/openshift/installer/pkg/types/nutanix" ) type config struct { PrismCentralAddress string `json:"nutanix_prism_central_address"` Port string `json:"nutanix_prism_central_port"` Username string `json:"nutanix_username"` Password string `json:"nutanix_password"` MemoryMiB int64 `json:"nutanix_control_plane_memory_mib"` DiskSizeMiB int64 `json:"nutanix_control_plane_disk_mib"` NumCPUs int64 `json:"nutanix_control_plane_num_cpus"` NumCoresPerSocket int64 `json:"nutanix_control_plane_cores_per_socket"` ProjectUUID string `json:"nutanix_control_plane_project_uuid"` Categories map[string]string `json:"nutanix_control_plane_categories"` PrismElementUUID string `json:"nutanix_prism_element_uuid"` SubnetUUID string `json:"nutanix_subnet_uuid"` Image string `json:"nutanix_image"` ImageURI string `json:"nutanix_image_uri"` BootstrapIgnitionImage string `json:"nutanix_bootstrap_ignition_image"` BootstrapIgnitionImageFilePath string `json:"nutanix_bootstrap_ignition_image_filepath"` } // TFVarsSources contains the parameters to be converted into Terraform variables type TFVarsSources struct { PrismCentralAddress string Port string Username string Password string ImageURI string BootstrapIgnitionData string ClusterID string ControlPlaneConfigs []*machinev1.NutanixMachineProviderConfig } // TFVars generate Nutanix-specific Terraform variables func TFVars(sources TFVarsSources) ([]byte, error) { bootstrapIgnitionImagePath, err := nutanixtypes.CreateBootstrapISO(sources.ClusterID, sources.BootstrapIgnitionData) if err != nil { return nil, errors.Wrap(err, "failed to create bootstrap ignition iso") } bootstrapIgnitionImageName := nutanixtypes.BootISOImageName(sources.ClusterID) controlPlaneConfig := sources.ControlPlaneConfigs[0] cfg := &config{ Port: sources.Port, PrismCentralAddress: sources.PrismCentralAddress, Username: sources.Username, Password: sources.Password, MemoryMiB: controlPlaneConfig.MemorySize.Value() / (1024 * 1024), DiskSizeMiB: controlPlaneConfig.SystemDiskSize.Value() / (1024 * 1024), NumCPUs: int64(controlPlaneConfig.VCPUSockets), NumCoresPerSocket: int64(controlPlaneConfig.VCPUsPerSocket), PrismElementUUID: *controlPlaneConfig.Cluster.UUID, SubnetUUID: *controlPlaneConfig.Subnets[0].UUID, Image: *controlPlaneConfig.Image.Name, ImageURI: sources.ImageURI, BootstrapIgnitionImage: bootstrapIgnitionImageName, BootstrapIgnitionImageFilePath: bootstrapIgnitionImagePath, } if controlPlaneConfig.Project.Type == machinev1.NutanixIdentifierUUID { cfg.ProjectUUID = *controlPlaneConfig.Project.UUID } cfg.Categories = make(map[string]string, len(controlPlaneConfig.Categories)) for _, category := range controlPlaneConfig.Categories { cfg.Categories[category.Key] = category.Value } return json.MarshalIndent(cfg, "", " ") }
package dp import ( "github.com/numacci/go-algorithm/stl/function" ) // KnapsackDP は,以下のような問題を解くときに利用される動的計画法である. // 重さと価値が定義されたN個の品物の中から重さの総和がWを超えないように // いくつか品物を選ぶとき,価値の総和の最大値はいくらになるか. // 重さの状態を保持しておく必要があるので,DPは以下のように定義する. // dp[i+1][j]: i番目までの品物から,重さの総和がjを超えないように // 品物を選んだ場合の価値の総和の最大値 // 品物を選べなければ価値の総和はゼロなので,DP遷移式の初期条件は // dp[0][j] = 0 func KnapsackDP(n, W int, w, v []int) int { dp := make([][]int, n+1) for i := 0; i <= n; i++ { dp[i] = make([]int, W+1) } for i := 0; i < n; i++ { for j := 0; j <= W; j++ { if j < w[i] { // i番目の品物がよりj重い→選べない dp[i+1][j] = dp[i][j] } else { // i番目の品物を選べる→選ばない場合と選んだ場合の価値の総和の最大 dp[i+1][j] = function.Max(dp[i][j], dp[i][j-w[i]]+v[i]) } } } return dp[n][W] }
package usecases import ( "time" "github.com/michaldziurowski/tech-challenge-time/server/timetracking/domain" ) type EventStore interface { AddEvent(event domain.SessionEvent) error GetEventsByRange(userId string, from time.Time, to time.Time) ([]domain.SessionEvent, error) } type Repository interface { AddSession(session domain.Session) (int64, error) GetSession(sessionId int64) (domain.Session, error) SetSessionName(sessionId int64, name string) error ToggleSessionState(sessionId int64) error } type DateProvider interface { GetCurrent() time.Time }
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha3 import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" capierrors "sigs.k8s.io/cluster-api/errors" ) const ( // ClusterFinalizer allows BareMetalClusterReconciler to clean up resources associated with BareMetalCluster before // removing it from the apiserver. ClusterFinalizer = "baremetalcluster.infrastructure.cluster.x-k8s.io" ) // BareMetalClusterSpec defines the desired state of BareMetalCluster. type BareMetalClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. ControlPlaneEndpoint APIEndpoint `json:"controlPlaneEndpoint"` NoCloudProvider bool `json:"noCloudProvider,omitempty"` } // IsValid returns an error if the object is not valid, otherwise nil. The // string representation of the error is suitable for human consumption. func (s *BareMetalClusterSpec) IsValid() error { missing := []string{} if s.ControlPlaneEndpoint.Host == "" { missing = append(missing, "ControlPlaneEndpoint.Host") } if s.ControlPlaneEndpoint.Port == 0 { missing = append(missing, "ControlPlaneEndpoint.Host") } if len(missing) > 0 { return fmt.Errorf("Missing fields from Spec: %v", missing) } return nil } // BareMetalClusterStatus defines the observed state of BareMetalCluster. type BareMetalClusterStatus struct { // LastUpdated identifies when this status was last observed. // +optional LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` // FailureReason indicates that there is a fatal problem reconciling the // state, and will be set to a token value suitable for // programmatic interpretation. // +optional FailureReason *capierrors.ClusterStatusError `json:"failureReason,omitempty"` // FailureMessage indicates that there is a fatal problem reconciling the // state, and will be set to a descriptive error message. // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Ready denotes that the baremetal cluster (infrastructure) is ready. In // Baremetal case, it does not mean anything for now as no infrastructure // steps need to be performed. Required by Cluster API. Set to True by the // BaremetalCluster controller after creation. Ready bool `json:"ready"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:resource:path=baremetalclusters,scope=Namespaced,categories=cluster-api,shortName=bmc;bmcluster // +kubebuilder:storageversion // +kubebuilder:subresource:status // +kubebuilder:object:root=true // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="BaremetalCluster is Ready" // +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.failureReason",description="Most recent error" // +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this BMCluster belongs" // +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint",description="Control plane endpoint" // BareMetalCluster is the Schema for the baremetalclusters API type BareMetalCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec BareMetalClusterSpec `json:"spec,omitempty"` Status BareMetalClusterStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true // BareMetalClusterList contains a list of BareMetalCluster type BareMetalClusterList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []BareMetalCluster `json:"items"` } func init() { SchemeBuilder.Register(&BareMetalCluster{}, &BareMetalClusterList{}) }
package main import "fmt" func main() { a := "192304" b := "92304" fmt.Println(multiply(a, b)) } func multiply(num1 string, num2 string) string { if len(num1) == 1 && num1[0] == '0' || len(num2) == 1 && num2[0] == '0' { return "0" } a := []byte(num1) b := []byte(num2) reverse(a) reverse(b) res := []byte{} for i := 0; i < len(b); i++ { res = add(res, mul(a, b[i], i+1)) } reverse(res) return string(res) } func mul(a []byte, b byte, digit int) []byte { res := []byte{} for i := 0; i < digit-1; i++ { res = append(res, '0') } carry := 0 for i := 0; i < len(a); i++ { res = append(res, byte(int((a[i]-'0')*(b-'0'))+carry)%10+'0') carry = (int((a[i]-'0')*(b-'0')) + carry) / 10 } if carry != 0 { res = append(res, byte(carry)+'0') } return res } func add(a []byte, b []byte) []byte { res := []byte{} carry := 0 for i := 0; i < len(a) || i < len(b); i++ { tmpA := 0 tmpB := 0 if i < len(a) { tmpA = int(a[i]) - '0' } if i < len(b) { tmpB = int(b[i]) - '0' } res = append(res, byte((tmpA+tmpB+carry)%10+'0')) carry = (tmpA + tmpB + carry) / 10 } if carry != 0 { res = append(res, byte(carry)+'0') } return res } // reverse reverses a slice of ints in place. func reverse(s []byte) { for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { s[i], s[j] = s[j], s[i] } }
package main import ( "github.com/therecipe/qt/widgets" ) func itemWidgets() { //List Widget listWidget := widgets.NewQListWidget(nil) listWidget.SetWindowTitle("List Widget") list := []string{"This", "Is", "A", "List", "View"} listWidget.AddItems(list) addWidget(listWidget) //Tree Widget treeWidget := widgets.NewQTreeWidget(nil) treeWidget.SetWindowTitle("Tree Widget") rootNode := treeWidget.InvisibleRootItem() americaItem := widgets.NewQTreeWidgetItem2([]string{"America"}, 0) mexicoItem := widgets.NewQTreeWidgetItem2([]string{"Canada"}, 0) usaItem := widgets.NewQTreeWidgetItem2([]string{"USA"}, 0) bostonItem := widgets.NewQTreeWidgetItem2([]string{"Boston"}, 0) europeItem := widgets.NewQTreeWidgetItem2([]string{"Europe"}, 0) italyItem := widgets.NewQTreeWidgetItem2([]string{"Italy"}, 0) romeItem := widgets.NewQTreeWidgetItem2([]string{"Rome"}, 0) veronaItem := widgets.NewQTreeWidgetItem2([]string{"Verona"}, 0) rootNode.AddChild(americaItem) rootNode.AddChild(europeItem) americaItem.AddChild(mexicoItem) americaItem.AddChild(usaItem) usaItem.AddChild(bostonItem) europeItem.AddChild(italyItem) italyItem.AddChild(romeItem) italyItem.AddChild(veronaItem) treeWidget.ExpandAll() addWidget(treeWidget) //Table Widget tableWidget := widgets.NewQTableWidget(nil) tableWidget.SetWindowTitle("Table Widget") table := [][]string{ 0: {"This", "Is", "The", "First", "Row"}, 1: {"This", "Is", "The", "Second", "Row"}, 2: {"This", "Is", "The", "Third", "Row"}, } tableWidget.SetRowCount(len(table)) tableWidget.SetColumnCount(len(table[0])) for row := 0; row < tableWidget.RowCount(); row++ { for column := 0; column < tableWidget.ColumnCount(); column++ { tableWidget.SetItem(row, column, widgets.NewQTableWidgetItem2(table[row][column], 0)) } } addWidget(tableWidget) }
package main import ( "errors" "net/http" "time" health "github.com/docker/go-healthcheck" "github.com/gorilla/mux" ) func main() { health.RegisterPeriodicThresholdFunc("postgresql", time.Second*5, 3, postgresqlCheck) health.RegisterPeriodicThresholdFunc("gateway", time.Second*5, 3, gateWayCheck) r := mux.NewRouter() r.HandleFunc("/health", health.StatusHandler) srv := &http.Server{ Handler: r, Addr: "0.0.0.0:8080", ReadTimeout: 15 * time.Second, } srv.ListenAndServe() } func postgresqlCheck() error { return nil } func gateWayCheck() error { return errors.New("no connection") }
package raftkv import "labrpc" import "crypto/rand" import "math/big" import "sync" type Clerk struct { mu sync.Mutex servers []*labrpc.ClientEnd clientId int64 nextRequestId int64 } func nrand() int64 { max := big.NewInt(int64(1) << 62) bigx, _ := rand.Int(rand.Reader, max) x := bigx.Int64() return x } func MakeClerk(servers []*labrpc.ClientEnd) *Clerk { ck := new(Clerk) ck.servers = servers ck.clientId = nrand() ck.nextRequestId = 1 return ck } // fetch the current value for a key. // returns "" if the key does not exist. // keeps trying forever in the face of all other errors. // // you can send an RPC with code like this: // ok := ck.servers[i].Call("KVServer.Get", &args, &reply) // // the types of args and reply (including whether they are pointers) // must match the declared types of the RPC handler function's // arguments. and reply must be passed as a pointer. func (ck *Clerk) Get(key string) string { ck.mu.Lock() requestId := ck.nextRequestId ck.nextRequestId += 1 ck.mu.Unlock() for { for i, _ := range ck.servers { // DPrintf("sending request %d to server %d", requestId, i) args := GetArgs{key, ck.clientId, requestId} var reply GetReply ok := ck.servers[i].Call("KVServer.Get", &args, &reply) if ok && reply.Err == OK && !reply.WrongLeader { return reply.Value } } } } // shared by Put and Append. // // you can send an RPC with code like this: // ok := ck.servers[i].Call("KVServer.PutAppend", &args, &reply) // // the types of args and reply (including whether they are pointers) // must match the declared types of the RPC handler function's // arguments. and reply must be passed as a pointer. func (ck *Clerk) PutAppend(key string, value string, op string) { ck.mu.Lock() requestId := ck.nextRequestId ck.nextRequestId += 1 ck.mu.Unlock() for { for i, _ := range ck.servers { // DPrintf("sending request %d to server %d", requestId, i) args := PutAppendArgs{key, value, op, ck.clientId, requestId} var reply GetReply ok := ck.servers[i].Call("KVServer.PutAppend", &args, &reply) if ok && reply.Err == OK && !reply.WrongLeader { return } } } } func (ck *Clerk) Put(key string, value string) { ck.PutAppend(key, value, "Put") } func (ck *Clerk) Append(key string, value string) { ck.PutAppend(key, value, "Append") }
/* ==================what are go routines ? A goroutine is a lightweight thread managed by the Go runtime. using go routines we can make our sequential program into concurrent program =====================================goroutines vs threads ? GOROUTINE THREAD 1.Goroutines are managed by the go runtime. 1.Operating system threads are managed by kernal. 2.Goroutine are not hardware dependent. 2.Threads are hardware dependent. 3.Goroutines have easy communication medium known as channel. 3.Thread doesnot have easy communication medium. 4.Due to the presence of channel one goroutine can 4.Due to lack of easy communication medium inter-threads communicate . communicate with other goroutine with low latency. takes place with high latency 5.Goroutine doesnot have ID because go doesnot have 5.Threads have their own unique ID because they have Thread Local Storage. Thread Local Storage. 6.Goroutines are cheaper than threads. 6.The cost of threads are higher than goroutine. 7.They are cooperatively scheduled. 7.They are preemptively scheduled. 8.They have fasted startup time than threads. 8.They have slow startup time than goroutines. 9.Goroutine has growable segmented stacks. 9.Threads doesnot have growable segmented stacks. ===================why goroutines are lightweight process? A goroutine is created with initial only 2KB of stack size. Each function in go already has a check if more stack is needed or not and the stack can be copied to another region in memory with twice the original size. This makes goroutine very light on resources. ================why goroutines are hardware independent? Goroutines exists only in the virtual space of go runtime and not in the OS =======================Blocking in go routines? a Go Runtime scheduler is needed which manages their lifecycle. Go Runtime maintains three C structs for this purpose: 1.The G Struct : This represents a single go routine with it’s properties such as stack pointer, base of stack, it’s ID, it’s cache and it’s status 2.The M Struct : This represents an OS thread. It also contains a pointer to the global queue of runnable goroutines, the current running goroutine and the reference to the scheduler 3.The Sched Struct : It is a global struct and contains the queues free and waiting goroutines as well as threads. =====================Goroutines c structs If a goroutine blocks on system call, it blocks it’s running thread. But another thread is taken from the waiting queue of Scheduler (the Sched struct) and used for other runnable goroutines. However, if you communicate using channels in go which exists only in virtual space, the OS doesn’t block the thread. Such goroutines simply go in the waiting state and other runnable goroutine (from the M struct) is scheduled in it’s place. */ package main import ( "fmt" "time" ) func number(num int) { for i := 0; i < num; i++ { // wait function which will wait for 1 sec before printing time.Sleep(time.Second) fmt.Println(i) } } func main() { //can make a function goroutine and concurrent by adding go keyword as prefix to a function call go number(5) go number(5) //it is added so that our main function do not terminate before execution of our go routines fmt.Scanln() }
/* Copyright (c) 2014-2015 Jason Ish * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package appcontext import ( "github.com/jasonish/evebox/core" "github.com/jasonish/evebox/elasticsearch" "github.com/jasonish/evebox/geoip" "github.com/jasonish/evebox/sqlite/configdb" ) type Config struct { Http struct { TlsEnabled bool TlsCertificate string TlsKey string ReverseProxy bool RequestLogging bool } LetsEncryptHostname string Authentication struct { Required bool // Username or Usernamepassword. Type string LoginMessage string } } type AppContext struct { // Configuration data that is not held in the configuration database. Config Config ConfigDB *configdb.ConfigDB Userstore core.UserStore // The interface to the underlying datastore. DataStore core.Datastore ElasticSearch *elasticsearch.ElasticSearch ReportService core.ReportService GeoIpService *geoip.GeoIpService Features map[core.Feature]bool // A default time range to send to a client. Mainly useful for oneshot // server mode where we want to set a better time range. DefaultTimeRange string // Tell the client to ignore any locally stored configuration of the // default time range. ForceDefaultTimeRange bool } func (c *AppContext) SetFeature(feature core.Feature) { if c.Features == nil { c.Features = map[core.Feature]bool{} } c.Features[feature] = true }
package charts import ( "github.com/go-echarts/go-echarts/v2/opts" "github.com/go-echarts/go-echarts/v2/render" "github.com/go-echarts/go-echarts/v2/types" ) // Graph represents a graph chart. type Graph struct { BaseConfiguration BaseActions } // Type returns the chart type. func (*Graph) Type() string { return types.ChartGraph } // NewGraph creates a new graph chart. func NewGraph() *Graph { chart := new(Graph) chart.initBaseConfiguration() chart.Renderer = render.NewChartRender(chart, chart.Validate) return chart } // AddSeries adds the new series. func (c *Graph) AddSeries(name string, nodes []opts.GraphNode, links []opts.GraphLink, options ...SeriesOpts) *Graph { series := SingleSeries{Name: name, Type: types.ChartGraph, Links: links, Data: nodes} series.ConfigureSeriesOpts(options...) c.MultiSeries = append(c.MultiSeries, series) return c } // SetGlobalOptions sets options for the Graph instance. func (c *Graph) SetGlobalOptions(options ...GlobalOpts) *Graph { c.BaseConfiguration.setBaseGlobalOptions(options...) return c } // SetDispatchActions sets actions for the Graph instance. func (c *Graph) SetDispatchActions(actions ...GlobalActions) *Graph { c.BaseActions.setBaseGlobalActions(actions...) return c } // Validate validates the given configuration. func (c *Graph) Validate() { // If there is no layout setting, default layout is set to "force". for i := 0; i < len(c.MultiSeries); i++ { if c.MultiSeries[i].Layout == "" { c.MultiSeries[i].Layout = "force" } } c.Assets.Validate(c.AssetsHost) }
package rados import ( "bytes" "fmt" "io" "os" "testing" "time" ) func errorOnError(t *testing.T, e error, message string, parameters ...interface{}) { if e != nil { t.Errorf("%v : %v", e, fmt.Sprintf(message, parameters...)) } } func fatalOnError(t *testing.T, e error, message string, parameters ...interface{}) { if e != nil { t.Fatalf("%v : %v", e, fmt.Sprintf(message, parameters...)) } } func poolName() string { return fmt.Sprintf("rados.go.test.%d.%d", time.Now().Unix(), os.Getpid()) } type radosTest struct { t *testing.T rados *Rados poolName string } func setup(t *testing.T) *radosTest { var rados *Rados var err error rados, err = NewDefault() fatalOnError(t, err, "Setup: New") poolName := poolName() err = rados.CreatePool(poolName) fatalOnError(t, err, "Setup: CreatePool") return &radosTest{ rados: rados, poolName: poolName, } } func teardown(t *testing.T, test *radosTest) { var err error err = test.rados.DeletePool(test.poolName) fatalOnError(t, err, "Teardown: DeletePool") err = test.rados.Release() fatalOnError(t, err, "Teardown: Release") } func Test_RadosNew(t *testing.T) { var rados *Rados var err error rados, err = NewDefault() fatalOnError(t, err, "New") err = rados.Release() fatalOnError(t, err, "Release") if rados, err = New("path that does not exist"); err == nil { t.Errorf("New should have failed") rados.Release() } } func Test_RadosCreateDeletePool(t *testing.T) { var rados *Rados var err error rados, err = NewDefault() fatalOnError(t, err, "New") defer rados.Release() poolName := poolName() err = rados.CreatePool(poolName) fatalOnError(t, err, "CreatePool") err = rados.DeletePool(poolName) fatalOnError(t, err, "DeletePool") } func Test_RadosContext(t *testing.T) { test := setup(t) defer teardown(t, test) ctx, err := test.rados.NewContext(test.poolName) fatalOnError(t, err, "NewContext") ctx.Release() if ctx, err = test.rados.NewContext("pool that does not exist"); err == nil { t.Errorf("NewContext should have failed") ctx.Release() } } // Test basic object operations. func Test_RadosObject(t *testing.T) { test := setup(t) defer teardown(t, test) ctx, err := test.rados.NewContext(test.poolName) fatalOnError(t, err, "NewContext") defer ctx.Release() name := "test-object" name2 := "test-object2" data := []byte("test data") // Create an object _, err = ctx.Create(name) fatalOnError(t, err, "Create") // Make sure it's there objInfo, err := ctx.Stat(name) fatalOnError(t, err, "Stat") if objInfo.Size() != int64(0) { t.Errorf("Object size mismatch, was %s, expected %s", objInfo.Size(), 0) } // Put data in the object err = ctx.Put(name, data) fatalOnError(t, err, "Put") // Make sure everything looks right objInfo, err = ctx.Stat(name) fatalOnError(t, err, "Stat") if objInfo.Name() != name { t.Errorf("Object name mismatch, was %s, expected %s", objInfo.Name(), name) } if objInfo.Size() != int64(len(data)) { t.Errorf("Object size mismatch, was %d, expected %d", objInfo.Size(), len(data)) } // Get the data back data2, err := ctx.Get(name) fatalOnError(t, err, "Get") // It better be the same if !bytes.Equal(data, data2) { t.Errorf("Object data mismatch, was %s, expected %s", data2, data) } // Open an existing object obj, err := ctx.Open(name) fatalOnError(t, err, "Open") // Make sure everything looks right if obj.Name() != name { t.Errorf("Object name mismatch, was %s, expected %s", obj.Name(), name) } if obj.Size() != int64(len(data)) { t.Errorf("Object size mismatch, was %d, expected %d", obj.Size(), len(data)) } // Open a new object obj, err = ctx.Open(name2) fatalOnError(t, err, "Open") // Make sure it's there objInfo, err = ctx.Stat(name2) fatalOnError(t, err, "Stat") if objInfo.Size() != int64(0) { t.Errorf("Object size mismatch, was %d, expected %d", objInfo.Size(), 0) } // Remove the objects err = ctx.Remove(name) errorOnError(t, err, "Remove") err = ctx.Remove(name2) errorOnError(t, err, "Remove") // They should be gone objInfo, err = ctx.Stat(name) if err == nil { t.Errorf("Object %s should have been deleted be status returned success", name) } objInfo, err = ctx.Stat(name2) if err == nil { t.Errorf("Object %s should have been deleted be status returned success", name2) } } func Test_ReadAtWriteAt(t *testing.T) { test := setup(t) defer teardown(t, test) ctx, err := test.rados.NewContext(test.poolName) fatalOnError(t, err, "NewContext") defer ctx.Release() name := "test-object" data := make([]byte, 5) // Create a new object obj, err := ctx.Create(name) fatalOnError(t, err, "Create") // Try to Read the first byte (expect EOF). n, err := obj.ReadAt(data, 0) if err == nil { t.Errorf("Expected non-nil error on ReadAt() of empty object.") } if err != io.EOF { t.Errorf("Expected EOF for ReadAt() of empty object, got %s", err) } if n != 0 { t.Errorf("Expected 0 bytes read for ReadAt() of empty object, got %d", n) } // Write some data to the beginning data = []byte("12345") n, err = obj.WriteAt(data, 0) fatalOnError(t, err, "WriteAt") if n != len(data) { t.Errorf("Expected to have %d bytes written but was %d", len(data), n) } // Read the third byte data = make([]byte, 1) n, err = obj.ReadAt(data, 2) fatalOnError(t, err, "ReadAt") if n != len(data) { t.Errorf("Expected to have %d bytes read but was %d", len(data), n) } if data[0] != '3' { t.Errorf("Expected to have read 3 but was %v", data[0]) } // Write the third byte with something new data[0] = 'C' n, err = obj.WriteAt(data, 2) fatalOnError(t, err, "WriteAt") if n != len(data) { t.Errorf("Expected to have %d bytes written but was %d", len(data), n) } // Make sure it's correct data = make([]byte, 1) n, err = obj.ReadAt(data, 2) fatalOnError(t, err, "ReadAt") if n != len(data) { t.Errorf("Expected to have %d bytes read but was %d", len(data), n) } if data[0] != 'C' { t.Errorf("Expected to have read C but was %v", data[0]) } // Try to read past the end data = make([]byte, 2) n, err = obj.ReadAt(data, 4) if err == nil { t.Errorf("Expected non-nil error on ReadAt() reading past end of object") } if err != io.EOF { t.Errorf("Expected EOF for ReadAt() reading past end of object, got %s", err) } if n != 1 { t.Errorf("Expected 1 bytes read for ReadAt() past end of object, got %d", n) } if data[0] != '5' { t.Errorf("Expected to have read 5 but was %v", data[0]) } // Try to write past the end data[0] = 'E' data[1] = 'F' n, err = obj.WriteAt(data, 4) fatalOnError(t, err, "WriteAt") if n != len(data) { t.Errorf("Expected to have %d bytes written but was %d", len(data), n) } // Read the whole object and make sure the data is correct data = []byte("12C4EF") data2, err := ctx.Get(name) fatalOnError(t, err, "Get") // It better be the same if !bytes.Equal(data, data2) { t.Errorf("Object data mismatch, was %s, expected %s", data2, data) } } func Test_Append(t *testing.T) { test := setup(t) defer teardown(t, test) ctx, err := test.rados.NewContext(test.poolName) fatalOnError(t, err, "NewContext") defer ctx.Release() name := "test-object" data := []byte("0123456789") // Append data to new object err = ctx.Append(name, data[:5]) fatalOnError(t, err, "Append") // Read object back data2, err := ctx.Get(name) fatalOnError(t, err, "Get") // Check data integrity if !bytes.Equal(data[:5], data2) { t.Errorf("Object data mismatch, was %s, expected %s", data2, data[:5]) } // Append more data to new object err = ctx.Append(name, data[5:]) fatalOnError(t, err, "Append") // Read object back data2, err = ctx.Get(name) fatalOnError(t, err, "Get") // Check data integrity if !bytes.Equal(data, data2) { t.Errorf("Object data mismatch, was %s, expected %s", data2, data) } } func Test_ListPools(t *testing.T) { test := setup(t) defer teardown(t, test) // List the pools pools, err := test.rados.ListPools() fatalOnError(t, err, "ListPools") // Make sure we find our test pool var i int for i, _ = range pools { if pools[i] == test.poolName { break } } if i == len(pools) { t.Errorf("Expected to find pool %s but it was not present.", test.poolName) } } // TODO: make this a real test func Test_ClusterStat(t *testing.T) { test := setup(t) defer teardown(t, test) err := test.rados.Stat() fatalOnError(t, err, "Stat") } // TODO: make this a real test func Test_PoolStat(t *testing.T) { test := setup(t) defer teardown(t, test) ctx, err := test.rados.NewContext(test.poolName) fatalOnError(t, err, "NewContext") defer ctx.Release() _, err = ctx.PoolStat() fatalOnError(t, err, "PoolStat") }
// Copyright 2014 The Sporting Exchange Limited. All rights reserved. // Use of this source code is governed by a free license that can be // found in the LICENSE file. // +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris /* Package pprof configures export of runtime/pprof data. Usage: import _ "go-nfr/pprof" The creates a unix domain socket that will serve a HTTP server that exports pprof data in a fashion analogous to the net/pprof package. The server listens at /tmp/.go_pid<N>. */ package pprof import ( "expvar" "fmt" "log" "net" "net/http" "net/http/pprof" "os" "path/filepath" ) func init() { s := &server{ path: listenPath(), } go s.serve() } func listenPath() string { dir := os.TempDir() file := fmt.Sprintf(".go_pid%d", os.Getpid()) return filepath.Join(dir, file) } // server serves net/http/pprof over a socket file. type server struct { path string } func (s *server) serve() { // Ensure existing socket is not listening. conn, err := net.Dial("unix", s.path) if err != nil { os.Remove(s.path) } else { log.Printf("nfr: pprof: %s: already listening", s.path) conn.Close() return } // Export pprof. mux := http.NewServeMux() mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) // Export /proc. fs := http.FileServer(http.Dir("/proc/self")) mux.Handle("/debug/proc/", http.StripPrefix("/debug/proc/", fs)) // Export debugging vars. mux.Handle("/debug/vars", http.HandlerFunc(expvarHandler)) // Serve the handlers. l, err := net.Listen("unix", s.path) if err != nil { log.Printf("nfr: pprof: %s", err) return } os.Chmod(s.path, 0666) server := &http.Server{ Handler: mux, } err = server.Serve(l) log.Printf("nfr: pprof: server exited: %s", err) } func expvarHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "{\n") first := true expvar.Do(func(kv expvar.KeyValue) { if !first { fmt.Fprintf(w, ",\n") } first = false fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) }) fmt.Fprintf(w, "\n}\n") }
// Copyright 2019 Radiation Detection and Imaging (RDI), LLC // Use of this source code is governed by the BSD 3-clause // license that can be found in the LICENSE file. package plot import ( "math" "strconv" "gonum.org/v1/plot" ) type FuncScale struct { Func func(float64) float64 } func (s *FuncScale) Normalize(min, max, x float64) float64 { if s.Func == nil { panic("s.Func is nil") } fMin := s.Func(min) return (s.Func(x) - fMin) / (s.Func(max) - fMin) } func Log10Min3(x float64) float64 { if x <= 0.001 { return -3 } return math.Log10(x) } func Log10Min15(x float64) float64 { if x <= 1e-15 { return -15 } return math.Log10(x) } type RollTicks struct { NSuggestedTicks int } func (t RollTicks) Ticks(min, max float64) []plot.Tick { if t.NSuggestedTicks == 0 { t.NSuggestedTicks = 4 } if max <= min { panic("illegal range") } tens := math.Pow10(int(math.Floor(math.Log10(max - min)))) n := (max - min) / tens for n < float64(t.NSuggestedTicks)-1 { tens /= 10 n = (max - min) / tens } majorMult := int(n / float64(t.NSuggestedTicks-1)) switch majorMult { case 7: majorMult = 6 case 9: majorMult = 8 } majorDelta := float64(majorMult) * tens val := math.Floor(min/majorDelta) * majorDelta // Makes a list of non-truncated y-values. var labels []float64 for val <= max { if val >= min { labels = append(labels, val) } val += majorDelta } prec := int(math.Ceil(math.Log10(val)) - math.Floor(math.Log10(majorDelta))) // Makes a list of big ticks. var ticks []plot.Tick for _, v := range labels { vRounded := round(v, prec) ticks = append(ticks, plot.Tick{Value: vRounded, Label: formatFloatTick(vRounded, -1)}) } minorDelta := majorDelta / 2 if ticks[len(ticks)-1].Value > max-minorDelta { ticks = ticks[:len(ticks)-1] } switch majorMult { case 3, 6: minorDelta = majorDelta / 3 case 5: minorDelta = majorDelta / 5 } val = math.Floor(min/minorDelta) * minorDelta for val <= max { found := false for _, t := range ticks { if t.Value == val { found = true } } if val >= min && val <= max && !found { ticks = append(ticks, plot.Tick{Value: val}) } val += minorDelta } return ticks } func round(x float64, prec int) float64 { if x == 0 { // Make sure zero is returned // without the negative bit set. return 0 } // Fast path for positive precision on integers. if prec >= 0 && x == math.Trunc(x) { return x } pow := math.Pow10(prec) intermed := x * pow if math.IsInf(intermed, 0) { return x } if x < 0 { x = math.Ceil(intermed - 0.5) } else { x = math.Floor(intermed + 0.5) } if x == 0 { return 0 } return x / pow } type LogTicks struct{} func (LogTicks) Ticks(min, max float64) []plot.Tick { val := math.Pow10(int(Log10Min15(min))) max = math.Pow10(int(math.Ceil(Log10Min15(max)))) var ticks []plot.Tick for val < max { for i := 1; i < 10; i++ { if i == 1 { ticks = append(ticks, plot.Tick{Value: val, Label: formatFloatTick(val, 5)}) } ticks = append(ticks, plot.Tick{Value: val * float64(i)}) } val *= 10 } ticks = append(ticks, plot.Tick{Value: val, Label: formatFloatTick(val, 5)}) return ticks } func formatFloatTick(v float64, prec int) string { return strconv.FormatFloat(v, 'g', prec, 64) }
package repositories import ( "database/sql" "fmt" "sync" _ "github.com/lib/pq" ) var ( db *sql.DB errDB error once sync.Once ) const ( host = "172.18.0.2" port = 5432 user = "postgres" password = "postgres" dbname = "postgres" ) func GetInstanceDB() (*sql.DB, error) { once.Do(func() { db, _ = configInstanceDB() }) if p := db.Ping(); p != nil { err := p return nil, err } return db, nil } func configInstanceDB() (*sql.DB, error) { psqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+ "password=%s dbname=%s sslmode=disable", host, port, user, password, dbname) db, errDB = sql.Open("postgres", psqlInfo) return db, errDB }
package db import ( "strings" "testing" s "github.com/thedevelopnik/netplan/pkg/models" ) func TestCreateSubnet(t *testing.T) { sn := s.Subnet{ Name: "create-test-subnet", Access: "public", Location: "us-east4", Provider: "GCP", Env: "dev", CidrBlock: "192.168.0.0/16", VPCID: 1, } conn, err := Conn() if err != nil { t.Error(err) } repo := New(conn) err = repo.CreateSubnet(&sn) if err != nil { t.Error(err) } if sn.ID == 0 { t.Error("subnet did not have valid ID") } if strings.Compare(sn.Name, "create-test-subnet") != 0 { t.Error("subnet name was not correct") } err = conn.Close() if err != nil { t.Error(err) } } func TestUpdateSubnet(t *testing.T) { sn := s.Subnet{ Name: "original-test-subnet", Access: "public", Location: "us-east4", Provider: "GCP", Env: "dev", CidrBlock: "192.168.0.0/16", VPCID: 1, } conn, err := Conn() if err != nil { t.Error(err) } repo := New(conn) err = repo.CreateSubnet(&sn) if err != nil { t.Error(err) } updater := s.Subnet{} updater.ID = sn.ID updater.Name = "update-test-subnet" updater.CidrBlock = "10.10.0.0/26" usn, err := repo.UpdateSubnet(&updater) if strings.Compare(usn.Name, "update-test-subnet") != 0 { t.Error("updated subnet does not have correct name") } if strings.Compare(usn.CidrBlock, "10.10.0.0/26") != 0 { t.Error("updated subnet does not have correct cidr block") } if usn.ID != sn.ID { t.Error("updated subnet does not represent the correct db record") } err = conn.Close() if err != nil { t.Error(err) } } func TestDeleteSubnet(t *testing.T) { sn := s.Subnet{ Name: "delete-test-subnet", Access: "public", Location: "us-east4", Provider: "GCP", Env: "dev", CidrBlock: "192.168.0.0/16", VPCID: 1, } conn, err := Conn() if err != nil { t.Error(err) } repo := New(conn) err = repo.CreateSubnet(&sn) if err != nil { t.Error(err) } err = repo.DeleteSubnet(sn.ID) if err != nil { t.Error(err) } var dsn s.Subnet err = conn.Where("id = ?", sn.ID).First(&dsn).Error if err == nil { t.Error("found a subnet that should have been deleted") } err = conn.Close() if err != nil { t.Error(err) } }
package main import ( "github.com/Skactor/bypass-detection/config" "github.com/Skactor/bypass-detection/logger" "github.com/Skactor/bypass-detection/server" ) func main() { err := logger.InitLogger() if err != nil { logger.Logger.Fatalf("Failed to init logger: %s", err.Error()) } cfg, err := config.Parse("./config.yaml") if err != nil { logger.Logger.Fatalf("Failed to parse configuration file: %s", err.Error()) return } server.StartServer(&cfg.Server) }
package view import ( "net/http" "go.sancus.dev/cms" "go.sancus.dev/web/errors" ) type View struct { config cms.ViewConfig server cms.Directory } func NewView(s cms.Directory, cfg cms.ViewConfig) (cms.View, error) { v := &View{ config: cfg, server: s, } if err := v.SetDefaults(); err != nil { return nil, err } return v, nil } func (v *View) SetDefaults() error { return v.config.SetDefaults() } func (v *View) Middleware(prefix string) func(http.Handler) http.Handler { return v.NewMiddleware(prefix).Middleware } func (v *View) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err := v.TryServeHTTP(w, r); err != nil { v.config.ErrorHandler(w, r, err) } } func (v *View) TryServeHTTP(w http.ResponseWriter, r *http.Request) error { if p, ok := v.pageInfo(r); ok { return p.TryServeHTTP(w, r) } else { return errors.ErrNotFound } } func (v *View) PageInfo(r *http.Request) (interface{}, bool) { return v.pageInfo(r) } func (v *View) Config() *cms.ViewConfig { return &v.config }
package cacheutil import ( "balansir/internal/logutil" "encoding/gob" "fmt" "os" "sync/atomic" "time" ) const ( snapshotPath = ".snapshot.gob" actionsThreshold1m = 100 actionsThreshold15m = 1 ) //BackupManager ... type BackupManager struct { ActionsCount int64 } //Snapshot ... type Snapshot struct { ShardAmount int ShardSize int Hits int64 Misses int64 Shards []*Shard KsHashMap map[uint64]string } //Hit ... func (bm *BackupManager) Hit() { atomic.AddInt64(&bm.ActionsCount, 1) } //Reset ... func (bm *BackupManager) Reset() { atomic.StoreInt64(&bm.ActionsCount, 0) } //GetHitsCount ... func (bm *BackupManager) GetHitsCount() int64 { return atomic.LoadInt64(&bm.ActionsCount) } //PersistCache ... func (bm *BackupManager) PersistCache() { ticker1m := time.NewTicker(1 * time.Minute) ticker5m := time.NewTicker(5 * time.Minute) ticker15m := time.NewTicker(15 * time.Minute) for { select { case <-ticker1m.C: actions := bm.GetHitsCount() if actions >= actionsThreshold1m { bm.takeCacheSnapshot() } case <-ticker5m.C: actions := bm.GetHitsCount() if actions > actionsThreshold15m && actions <= actionsThreshold1m { bm.takeCacheSnapshot() } case <-ticker15m.C: actions := bm.GetHitsCount() if actions <= actionsThreshold15m { bm.takeCacheSnapshot() } } } } func (bm *BackupManager) takeCacheSnapshot() { cache := GetCluster() cache.Mux.Lock() defer cache.Mux.Unlock() file, err := os.OpenFile(snapshotPath, os.O_CREATE|os.O_RDWR, 0660) if err != nil { logutil.Warning(fmt.Sprintf("failed to create/open cache snapshot file: %v", err)) return } defer file.Close() err = file.Truncate(0) if err != nil { logutil.Warning(fmt.Sprintf("Error while saving cache on disk: %v", err)) return } _, err = file.Seek(0, 0) if err != nil { logutil.Warning(fmt.Sprintf("Error while saving cache on disk: %v", err)) return } snapshot := &Snapshot{ Shards: cluster.shards, KsHashMap: cluster.updater.keyStorage.hashmap, } bm.Reset() encoder := gob.NewEncoder(file) err = encoder.Encode(snapshot) if err != nil { logutil.Warning(fmt.Sprintf("Error while saving cache on disk: %v", err)) } } //GetSnapshot ... func GetSnapshot() (*Snapshot, *os.File, error) { file, err := os.OpenFile(snapshotPath, os.O_CREATE|os.O_RDWR, 0660) if err != nil { return &Snapshot{}, nil, fmt.Errorf("failed to create/open cache snapshot file: %w", err) } decoder := gob.NewDecoder(file) snapshot := Snapshot{} decoder.Decode(&snapshot) //nolint return &snapshot, file, nil } //RestoreCache ... func RestoreCache() { cache := GetCluster() cache.Mux.Lock() defer cache.Mux.Unlock() snapshot, file, err := GetSnapshot() defer file.Close() if err != nil { logutil.Warning(err) return } stats, err := file.Stat() if err != nil { logutil.Warning(err) } if stats.Size() == 0 { return } cluster.shards = snapshot.Shards cluster.updater.keyStorage.hashmap = snapshot.KsHashMap logutil.Notice("Cache loaded from disk") }
package main import ( "./solutions" "fmt" ) /* * @problem: https://leetcode.com/problems/longest-substring-without-repeating-characters/ * */ func main() { testStr := "pwwkew" result := solutions.LongestSubString(testStr) fmt.Println(result) }
// Copyright 2023 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cloudidentity import ( "context" "fmt" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta" "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured" ) type Membership struct{} func MembershipToUnstructured(r *dclService.Membership) *unstructured.Resource { u := &unstructured.Resource{ STV: unstructured.ServiceTypeVersion{ Service: "cloudidentity", Version: "beta", Type: "Membership", }, Object: make(map[string]interface{}), } if r.CreateTime != nil { u.Object["createTime"] = *r.CreateTime } if r.DeliverySetting != nil { u.Object["deliverySetting"] = string(*r.DeliverySetting) } if r.DisplayName != nil && r.DisplayName != dclService.EmptyMembershipDisplayName { rDisplayName := make(map[string]interface{}) if r.DisplayName.FamilyName != nil { rDisplayName["familyName"] = *r.DisplayName.FamilyName } if r.DisplayName.FullName != nil { rDisplayName["fullName"] = *r.DisplayName.FullName } if r.DisplayName.GivenName != nil { rDisplayName["givenName"] = *r.DisplayName.GivenName } u.Object["displayName"] = rDisplayName } if r.Group != nil { u.Object["group"] = *r.Group } if r.MemberKey != nil && r.MemberKey != dclService.EmptyMembershipMemberKey { rMemberKey := make(map[string]interface{}) if r.MemberKey.Id != nil { rMemberKey["id"] = *r.MemberKey.Id } if r.MemberKey.Namespace != nil { rMemberKey["namespace"] = *r.MemberKey.Namespace } u.Object["memberKey"] = rMemberKey } if r.Name != nil { u.Object["name"] = *r.Name } if r.PreferredMemberKey != nil && r.PreferredMemberKey != dclService.EmptyMembershipPreferredMemberKey { rPreferredMemberKey := make(map[string]interface{}) if r.PreferredMemberKey.Id != nil { rPreferredMemberKey["id"] = *r.PreferredMemberKey.Id } if r.PreferredMemberKey.Namespace != nil { rPreferredMemberKey["namespace"] = *r.PreferredMemberKey.Namespace } u.Object["preferredMemberKey"] = rPreferredMemberKey } var rRoles []interface{} for _, rRolesVal := range r.Roles { rRolesObject := make(map[string]interface{}) if rRolesVal.ExpiryDetail != nil && rRolesVal.ExpiryDetail != dclService.EmptyMembershipRolesExpiryDetail { rRolesValExpiryDetail := make(map[string]interface{}) if rRolesVal.ExpiryDetail.ExpireTime != nil { rRolesValExpiryDetail["expireTime"] = *rRolesVal.ExpiryDetail.ExpireTime } rRolesObject["expiryDetail"] = rRolesValExpiryDetail } if rRolesVal.Name != nil { rRolesObject["name"] = *rRolesVal.Name } if rRolesVal.RestrictionEvaluations != nil && rRolesVal.RestrictionEvaluations != dclService.EmptyMembershipRolesRestrictionEvaluations { rRolesValRestrictionEvaluations := make(map[string]interface{}) if rRolesVal.RestrictionEvaluations.MemberRestrictionEvaluation != nil && rRolesVal.RestrictionEvaluations.MemberRestrictionEvaluation != dclService.EmptyMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation { rRolesValRestrictionEvaluationsMemberRestrictionEvaluation := make(map[string]interface{}) if rRolesVal.RestrictionEvaluations.MemberRestrictionEvaluation.State != nil { rRolesValRestrictionEvaluationsMemberRestrictionEvaluation["state"] = string(*rRolesVal.RestrictionEvaluations.MemberRestrictionEvaluation.State) } rRolesValRestrictionEvaluations["memberRestrictionEvaluation"] = rRolesValRestrictionEvaluationsMemberRestrictionEvaluation } rRolesObject["restrictionEvaluations"] = rRolesValRestrictionEvaluations } rRoles = append(rRoles, rRolesObject) } u.Object["roles"] = rRoles if r.Type != nil { u.Object["type"] = string(*r.Type) } if r.UpdateTime != nil { u.Object["updateTime"] = *r.UpdateTime } return u } func UnstructuredToMembership(u *unstructured.Resource) (*dclService.Membership, error) { r := &dclService.Membership{} if _, ok := u.Object["createTime"]; ok { if s, ok := u.Object["createTime"].(string); ok { r.CreateTime = dcl.String(s) } else { return nil, fmt.Errorf("r.CreateTime: expected string") } } if _, ok := u.Object["deliverySetting"]; ok { if s, ok := u.Object["deliverySetting"].(string); ok { r.DeliverySetting = dclService.MembershipDeliverySettingEnumRef(s) } else { return nil, fmt.Errorf("r.DeliverySetting: expected string") } } if _, ok := u.Object["displayName"]; ok { if rDisplayName, ok := u.Object["displayName"].(map[string]interface{}); ok { r.DisplayName = &dclService.MembershipDisplayName{} if _, ok := rDisplayName["familyName"]; ok { if s, ok := rDisplayName["familyName"].(string); ok { r.DisplayName.FamilyName = dcl.String(s) } else { return nil, fmt.Errorf("r.DisplayName.FamilyName: expected string") } } if _, ok := rDisplayName["fullName"]; ok { if s, ok := rDisplayName["fullName"].(string); ok { r.DisplayName.FullName = dcl.String(s) } else { return nil, fmt.Errorf("r.DisplayName.FullName: expected string") } } if _, ok := rDisplayName["givenName"]; ok { if s, ok := rDisplayName["givenName"].(string); ok { r.DisplayName.GivenName = dcl.String(s) } else { return nil, fmt.Errorf("r.DisplayName.GivenName: expected string") } } } else { return nil, fmt.Errorf("r.DisplayName: expected map[string]interface{}") } } if _, ok := u.Object["group"]; ok { if s, ok := u.Object["group"].(string); ok { r.Group = dcl.String(s) } else { return nil, fmt.Errorf("r.Group: expected string") } } if _, ok := u.Object["memberKey"]; ok { if rMemberKey, ok := u.Object["memberKey"].(map[string]interface{}); ok { r.MemberKey = &dclService.MembershipMemberKey{} if _, ok := rMemberKey["id"]; ok { if s, ok := rMemberKey["id"].(string); ok { r.MemberKey.Id = dcl.String(s) } else { return nil, fmt.Errorf("r.MemberKey.Id: expected string") } } if _, ok := rMemberKey["namespace"]; ok { if s, ok := rMemberKey["namespace"].(string); ok { r.MemberKey.Namespace = dcl.String(s) } else { return nil, fmt.Errorf("r.MemberKey.Namespace: expected string") } } } else { return nil, fmt.Errorf("r.MemberKey: expected map[string]interface{}") } } if _, ok := u.Object["name"]; ok { if s, ok := u.Object["name"].(string); ok { r.Name = dcl.String(s) } else { return nil, fmt.Errorf("r.Name: expected string") } } if _, ok := u.Object["preferredMemberKey"]; ok { if rPreferredMemberKey, ok := u.Object["preferredMemberKey"].(map[string]interface{}); ok { r.PreferredMemberKey = &dclService.MembershipPreferredMemberKey{} if _, ok := rPreferredMemberKey["id"]; ok { if s, ok := rPreferredMemberKey["id"].(string); ok { r.PreferredMemberKey.Id = dcl.String(s) } else { return nil, fmt.Errorf("r.PreferredMemberKey.Id: expected string") } } if _, ok := rPreferredMemberKey["namespace"]; ok { if s, ok := rPreferredMemberKey["namespace"].(string); ok { r.PreferredMemberKey.Namespace = dcl.String(s) } else { return nil, fmt.Errorf("r.PreferredMemberKey.Namespace: expected string") } } } else { return nil, fmt.Errorf("r.PreferredMemberKey: expected map[string]interface{}") } } if _, ok := u.Object["roles"]; ok { if s, ok := u.Object["roles"].([]interface{}); ok { for _, o := range s { if objval, ok := o.(map[string]interface{}); ok { var rRoles dclService.MembershipRoles if _, ok := objval["expiryDetail"]; ok { if rRolesExpiryDetail, ok := objval["expiryDetail"].(map[string]interface{}); ok { rRoles.ExpiryDetail = &dclService.MembershipRolesExpiryDetail{} if _, ok := rRolesExpiryDetail["expireTime"]; ok { if s, ok := rRolesExpiryDetail["expireTime"].(string); ok { rRoles.ExpiryDetail.ExpireTime = dcl.String(s) } else { return nil, fmt.Errorf("rRoles.ExpiryDetail.ExpireTime: expected string") } } } else { return nil, fmt.Errorf("rRoles.ExpiryDetail: expected map[string]interface{}") } } if _, ok := objval["name"]; ok { if s, ok := objval["name"].(string); ok { rRoles.Name = dcl.String(s) } else { return nil, fmt.Errorf("rRoles.Name: expected string") } } if _, ok := objval["restrictionEvaluations"]; ok { if rRolesRestrictionEvaluations, ok := objval["restrictionEvaluations"].(map[string]interface{}); ok { rRoles.RestrictionEvaluations = &dclService.MembershipRolesRestrictionEvaluations{} if _, ok := rRolesRestrictionEvaluations["memberRestrictionEvaluation"]; ok { if rRolesRestrictionEvaluationsMemberRestrictionEvaluation, ok := rRolesRestrictionEvaluations["memberRestrictionEvaluation"].(map[string]interface{}); ok { rRoles.RestrictionEvaluations.MemberRestrictionEvaluation = &dclService.MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation{} if _, ok := rRolesRestrictionEvaluationsMemberRestrictionEvaluation["state"]; ok { if s, ok := rRolesRestrictionEvaluationsMemberRestrictionEvaluation["state"].(string); ok { rRoles.RestrictionEvaluations.MemberRestrictionEvaluation.State = dclService.MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnumRef(s) } else { return nil, fmt.Errorf("rRoles.RestrictionEvaluations.MemberRestrictionEvaluation.State: expected string") } } } else { return nil, fmt.Errorf("rRoles.RestrictionEvaluations.MemberRestrictionEvaluation: expected map[string]interface{}") } } } else { return nil, fmt.Errorf("rRoles.RestrictionEvaluations: expected map[string]interface{}") } } r.Roles = append(r.Roles, rRoles) } } } else { return nil, fmt.Errorf("r.Roles: expected []interface{}") } } if _, ok := u.Object["type"]; ok { if s, ok := u.Object["type"].(string); ok { r.Type = dclService.MembershipTypeEnumRef(s) } else { return nil, fmt.Errorf("r.Type: expected string") } } if _, ok := u.Object["updateTime"]; ok { if s, ok := u.Object["updateTime"].(string); ok { r.UpdateTime = dcl.String(s) } else { return nil, fmt.Errorf("r.UpdateTime: expected string") } } return r, nil } func GetMembership(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) { c := dclService.NewClient(config) r, err := UnstructuredToMembership(u) if err != nil { return nil, err } r, err = c.GetMembership(ctx, r) if err != nil { return nil, err } return MembershipToUnstructured(r), nil } func ListMembership(ctx context.Context, config *dcl.Config, group string) ([]*unstructured.Resource, error) { c := dclService.NewClient(config) l, err := c.ListMembership(ctx, group) if err != nil { return nil, err } var resources []*unstructured.Resource for { for _, r := range l.Items { resources = append(resources, MembershipToUnstructured(r)) } if !l.HasNext() { break } if err := l.Next(ctx, c); err != nil { return nil, err } } return resources, nil } func ApplyMembership(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) { c := dclService.NewClient(config) r, err := UnstructuredToMembership(u) if err != nil { return nil, err } if ush := unstructured.FetchStateHint(opts); ush != nil { sh, err := UnstructuredToMembership(ush) if err != nil { return nil, err } opts = append(opts, dcl.WithStateHint(sh)) } r, err = c.ApplyMembership(ctx, r, opts...) if err != nil { return nil, err } return MembershipToUnstructured(r), nil } func MembershipHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) { c := dclService.NewClient(config) r, err := UnstructuredToMembership(u) if err != nil { return false, err } if ush := unstructured.FetchStateHint(opts); ush != nil { sh, err := UnstructuredToMembership(ush) if err != nil { return false, err } opts = append(opts, dcl.WithStateHint(sh)) } opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification)) _, err = c.ApplyMembership(ctx, r, opts...) if err != nil { if _, ok := err.(dcl.ApplyInfeasibleError); ok { return true, nil } return false, err } return false, nil } func DeleteMembership(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error { c := dclService.NewClient(config) r, err := UnstructuredToMembership(u) if err != nil { return err } return c.DeleteMembership(ctx, r) } func MembershipID(u *unstructured.Resource) (string, error) { r, err := UnstructuredToMembership(u) if err != nil { return "", err } return r.ID() } func (r *Membership) STV() unstructured.ServiceTypeVersion { return unstructured.ServiceTypeVersion{ "cloudidentity", "Membership", "beta", } } func (r *Membership) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) { return nil, unstructured.ErrNoSuchMethod } func (r *Membership) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) { return nil, unstructured.ErrNoSuchMethod } func (r *Membership) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error { return unstructured.ErrNoSuchMethod } func (r *Membership) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) { return nil, unstructured.ErrNoSuchMethod } func (r *Membership) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) { return nil, unstructured.ErrNoSuchMethod } func (r *Membership) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) { return nil, unstructured.ErrNoSuchMethod } func (r *Membership) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) { return GetMembership(ctx, config, resource) } func (r *Membership) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) { return ApplyMembership(ctx, config, resource, opts...) } func (r *Membership) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) { return MembershipHasDiff(ctx, config, resource, opts...) } func (r *Membership) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error { return DeleteMembership(ctx, config, resource) } func (r *Membership) ID(resource *unstructured.Resource) (string, error) { return MembershipID(resource) } func init() { unstructured.Register(&Membership{}) }
package dialer import ( "k0s.io/k0s/pkg/agent" ) var ( _ agent.Dialer = (*dialr)(nil) ) func New(c agent.Config) agent.Dialer { return &dialr{ c: c, } } type dialr struct { c agent.Config }
package terminal_test import ( "bytes" "io" "github.com/docker/docker/pkg/term" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/cloudfoundry-incubator/ltc/terminal" "github.com/cloudfoundry-incubator/ltc/terminal/mocks" ) type fakeStdin struct { io.Reader } func (s *fakeStdin) Fd() uintptr { return 42 } var _ = Describe("PasswordReader", func() { var ( fakeTerm *mocks.FakeTerm inBuffer *bytes.Buffer outBuffer *bytes.Buffer passwordReader terminal.PasswordReader ) BeforeEach(func() { fakeTerm = &mocks.FakeTerm{} inBuffer = bytes.NewBufferString("secret\n") outBuffer = &bytes.Buffer{} passwordReader = &terminal.TermPasswordReader{ Term: fakeTerm, Stdin: &fakeStdin{inBuffer}, Stdout: outBuffer, } }) Context("#PromptForPassword", func() { It("should disable echo and prompt for a password", func() { termState := &term.State{} fakeTerm.SaveStateReturns(termState, nil) Expect(passwordReader.PromptForPassword("P%s%s", "ro", "mpt")).To(Equal("secret")) Expect(outBuffer.String()).To(Equal("Prompt: \n")) Expect(fakeTerm.DisableEchoCallCount()).To(Equal(1)) fd, state := fakeTerm.DisableEchoArgsForCall(0) Expect(fd).To(Equal(uintptr(42))) Expect(state == termState).To(BeFalse()) Expect(fakeTerm.RestoreTerminalCallCount()).To(Equal(1)) fd, state = fakeTerm.RestoreTerminalArgsForCall(0) Expect(fd).To(Equal(uintptr(42))) Expect(state == termState).To(BeTrue()) }) }) })
package main import ( "fmt" "math" "math/rand" b2d "github.com/neguse/go-box2d-lite/box2dlite" "github.com/tanema/amore" "github.com/tanema/amore/gfx" "github.com/tanema/amore/keyboard" "github.com/tanema/amore/timer" ) const timeStep = 1.0 / 60 var ( gravity = b2d.Vec2{0.0, -10.0} iterations = 10 world = b2d.NewWorld(gravity, iterations) title = "" ) func main() { Demo1() amore.Start(update, draw) } func update(deltaTime float32) { if keyboard.IsDown(keyboard.KeyEscape) { amore.Quit() } if keyboard.IsDown(keyboard.Key1) { Demo1() } else if keyboard.IsDown(keyboard.Key2) { Demo2() } else if keyboard.IsDown(keyboard.Key3) { Demo3() } else if keyboard.IsDown(keyboard.Key4) { Demo4() } else if keyboard.IsDown(keyboard.Key5) { Demo5() } else if keyboard.IsDown(keyboard.Key6) { Demo6() } else if keyboard.IsDown(keyboard.Key7) { Demo7() } else if keyboard.IsDown(keyboard.Key8) { Demo8() } else if keyboard.IsDown(keyboard.Key9) { Demo9() } world.Step(float64(deltaTime)) } func renderBody(b *b2d.Body) { R := b2d.Mat22ByAngle(b.Rotation) x := b.Position h := b2d.MulSV(0.5, b.Width) o := b2d.Vec2{400, 400} S := b2d.Mat22{b2d.Vec2{20.0, 0.0}, b2d.Vec2{0.0, -20.0}} v1 := o.Add(S.MulV(x.Add(R.MulV(b2d.Vec2{-h.X, -h.Y})))) v2 := o.Add(S.MulV(x.Add(R.MulV(b2d.Vec2{h.X, -h.Y})))) v3 := o.Add(S.MulV(x.Add(R.MulV(b2d.Vec2{h.X, h.Y})))) v4 := o.Add(S.MulV(x.Add(R.MulV(b2d.Vec2{-h.X, h.Y})))) gfx.Line( float32(v1.X), float32(v1.Y), float32(v2.X), float32(v2.Y), float32(v3.X), float32(v3.Y), float32(v4.X), float32(v4.Y), float32(v1.X), float32(v1.Y)) } func renderJoint(j *b2d.Joint) { b1 := j.Body1 b2 := j.Body2 R1 := b2d.Mat22ByAngle(b1.Rotation) R2 := b2d.Mat22ByAngle(b2.Rotation) x1 := b1.Position p1 := x1.Add(R1.MulV(j.LocalAnchor1)) x2 := b2.Position p2 := x2.Add(R2.MulV(j.LocalAnchor2)) o := b2d.Vec2{400, 400} S := b2d.Mat22{b2d.Vec2{20.0, 0.0}, b2d.Vec2{0.0, -20.0}} x1 = o.Add(S.MulV(x1)) p1 = o.Add(S.MulV(p1)) x2 = o.Add(S.MulV(x2)) p2 = o.Add(S.MulV(p2)) gfx.Line(float32(x1.X), float32(x1.Y), float32(p1.X), float32(p1.Y)) gfx.Line(float32(x2.X), float32(x2.Y), float32(p2.X), float32(p2.Y)) } func draw() { gfx.SetLineWidth(2) for _, b := range world.Bodies { renderBody(b) } for _, j := range world.Joints { renderJoint(j) } gfx.Print(fmt.Sprintf("fps: %v", timer.GetFPS())) gfx.Print("Press numbers 1 -> 9 to see different demos", 0, 15) gfx.Print(title, 100, 100) } // Single box func Demo1() { title = "Single Box" world.Clear() var b1, b2 b2d.Body b1.Set(&b2d.Vec2{100.0, 20.0}, math.MaxFloat64) b1.Position = b2d.Vec2{0.0, -0.5 * b1.Width.Y} world.AddBody(&b1) b2.Set(&b2d.Vec2{1.0, 1.0}, 200.0) b2.Position = b2d.Vec2{0.0, 4.0} world.AddBody(&b2) } // A simple pendulum func Demo2() { title = "Single Pendulum" world.Clear() var b2, b1 b2d.Body var j b2d.Joint b1.Set(&b2d.Vec2{100.0, 20.0}, math.MaxFloat64) b1.Friction = 0.2 b1.Position = b2d.Vec2{0.0, -0.5 * b1.Width.Y} b1.Rotation = 0.0 world.AddBody(&b1) b2.Set(&b2d.Vec2{1.0, 1.0}, 100.0) b2.Friction = 0.2 b2.Position = b2d.Vec2{9.0, 11.0} b2.Rotation = 0.0 world.AddBody(&b2) j.Set(&b1, &b2, &b2d.Vec2{0.0, 11.0}) world.AddJoint(&j) } // Varying friction coefficients func Demo3() { title = "Varying friction coefficients" world.Clear() { var b b2d.Body b.Set(&b2d.Vec2{100.0, 20.0}, math.MaxFloat64) b.Position = b2d.Vec2{0.0, -0.5 * b.Width.Y} world.AddBody(&b) } { var b b2d.Body b.Set(&b2d.Vec2{13.0, 0.25}, math.MaxFloat64) b.Position = b2d.Vec2{-2.0, 11.0} b.Rotation = -0.25 world.AddBody(&b) } { var b b2d.Body b.Set(&b2d.Vec2{0.25, 1.0}, math.MaxFloat64) b.Position = b2d.Vec2{5.25, 9.5} world.AddBody(&b) } { var b b2d.Body b.Set(&b2d.Vec2{13.0, 0.25}, math.MaxFloat64) b.Position = b2d.Vec2{2.0, 7.0} b.Rotation = 0.25 world.AddBody(&b) } { var b b2d.Body b.Set(&b2d.Vec2{0.25, 1.0}, math.MaxFloat64) b.Position = b2d.Vec2{-5.25, 5.5} world.AddBody(&b) } frictions := []float64{0.75, 0.5, 0.35, 0.1, 0.0} for i := 0; i < 5; i++ { var b b2d.Body b.Set(&b2d.Vec2{0.5, 0.5}, 25.0) b.Friction = frictions[i] b.Position = b2d.Vec2{-7.5 + 2.0*float64(i), 14.0} world.AddBody(&b) } } // A vertical stack func Demo4() { title = "A vertical stack" world.Clear() { var b b2d.Body b.Set(&b2d.Vec2{100.0, 20.0}, math.MaxFloat64) b.Friction = 0.2 b.Position = b2d.Vec2{0.0, -0.5 * b.Width.Y} b.Rotation = 0.0 world.AddBody(&b) } for i := 0; i < 10; i++ { var b b2d.Body b.Set(&b2d.Vec2{1.0, 1.0}, 1.0) b.Friction = 0.2 x := rand.Float64()*0.2 - 0.1 b.Position = b2d.Vec2{x, 0.51 + 1.05*float64(i)} world.AddBody(&b) } } // A pyramid func Demo5() { title = "A pyramid" world.Clear() { var b b2d.Body b.Set(&b2d.Vec2{100.0, 20.0}, math.MaxFloat64) b.Friction = 0.2 b.Position = b2d.Vec2{0.0, -0.5 * b.Width.Y} b.Rotation = 0.0 world.AddBody(&b) } x := b2d.Vec2{-6.0, 0.75} for i := 0; i < 12; i++ { y := x for j := i; j < 12; j++ { var b b2d.Body b.Set(&b2d.Vec2{1.0, 1.0}, 10.0) b.Friction = 0.2 b.Position = y world.AddBody(&b) y = y.Add(b2d.Vec2{1.125, 0.0}) } x = x.Add(b2d.Vec2{0.5625, 2.0}) } } // A teeter func Demo6() { title = "A teeter" world.Clear() var b1, b2, b3, b4, b5 b2d.Body b1.Set(&b2d.Vec2{100.0, 20.0}, math.MaxFloat64) b1.Position = b2d.Vec2{0.0, -0.5 * b1.Width.Y} world.AddBody(&b1) b2.Set(&b2d.Vec2{12.0, 0.25}, 100) b2.Position = b2d.Vec2{0.0, 1.0} world.AddBody(&b2) b3.Set(&b2d.Vec2{0.5, 0.5}, 25.0) b3.Position = b2d.Vec2{-5.0, 2.0} world.AddBody(&b3) b4.Set(&b2d.Vec2{0.5, 0.5}, 25.0) b4.Position = b2d.Vec2{-5.5, 2.0} world.AddBody(&b4) b5.Set(&b2d.Vec2{1.0, 1.0}, 100) b5.Position = b2d.Vec2{5.5, 15.0} world.AddBody(&b5) { var j b2d.Joint j.Set(&b1, &b2, &b2d.Vec2{0.0, 1.0}) world.AddJoint(&j) } } // A suspension bridge func Demo7() { title = "A suspension bridge" world.Clear() var ba []*b2d.Body { var b b2d.Body b.Set(&b2d.Vec2{100.0, 20.0}, math.MaxFloat64) b.Friction = 0.2 b.Position = b2d.Vec2{0.0, -0.5 * b.Width.Y} b.Rotation = 0.0 world.AddBody(&b) ba = append(ba, &b) } const numPlunks = 15 const mass = 50.0 for i := 0; i < numPlunks; i++ { var b b2d.Body b.Set(&b2d.Vec2{1.0, 0.25}, mass) b.Friction = 0.2 b.Position = b2d.Vec2{-8.5 + 1.25*float64(i), 5.0} world.AddBody(&b) ba = append(ba, &b) } // Tuning const frequencyHz = 2.0 const dampingRatio = 0.7 // frequency in radians const omega = 2.0 * math.Pi * frequencyHz // damping coefficient const d = 2.0 * mass * dampingRatio * omega // spring stifness const k = mass * omega * omega // magic formulas const softness = 1.0 / (d + timeStep*k) const biasFactor = timeStep * k / (d + timeStep*k) for i := 0; i <= numPlunks; i++ { var j b2d.Joint j.Set(ba[i], ba[(i+1)%(numPlunks+1)], &b2d.Vec2{-9.125 + 1.25*float64(i), 5.0}) j.Softness = softness j.BiasFactor = biasFactor world.AddJoint(&j) } } // Dominos func Demo8() { title = "Dominos" world.Clear() var b1 b2d.Body b1.Set(&b2d.Vec2{100.0, 20.0}, math.MaxFloat64) b1.Position = b2d.Vec2{0.0, -0.5 * b1.Width.Y} world.AddBody(&b1) { var b b2d.Body b.Set(&b2d.Vec2{12.0, 0.5}, math.MaxFloat64) b.Position = b2d.Vec2{-1.5, 10.0} world.AddBody(&b) } for i := 0; i < 10; i++ { var b b2d.Body b.Set(&b2d.Vec2{0.2, 2.0}, 10.0) b.Position = b2d.Vec2{-6.0 + 1.0*float64(i), 11.25} b.Friction = 0.1 world.AddBody(&b) } { var b b2d.Body b.Set(&b2d.Vec2{14.0, 0.5}, math.MaxFloat64) b.Position = b2d.Vec2{1.0, 6.0} b.Rotation = 0.3 world.AddBody(&b) } var b2 b2d.Body b2.Set(&b2d.Vec2{0.5, 3.0}, math.MaxFloat64) b2.Position = b2d.Vec2{-7.0, 4.0} world.AddBody(&b2) var b3 b2d.Body b3.Set(&b2d.Vec2{12.0, 0.25}, 20.0) b3.Position = b2d.Vec2{-0.9, 1.0} world.AddBody(&b3) { var j b2d.Joint j.Set(&b1, &b3, &b2d.Vec2{-2.0, 1.0}) world.AddJoint(&j) } var b4 b2d.Body b4.Set(&b2d.Vec2{0.5, 0.5}, 10.0) b4.Position = b2d.Vec2{-10.0, 15.0} world.AddBody(&b4) { var j b2d.Joint j.Set(&b2, &b4, &b2d.Vec2{-7.0, 15.0}) world.AddJoint(&j) } var b5 b2d.Body b5.Set(&b2d.Vec2{2.0, 2.0}, 20.0) b5.Position = b2d.Vec2{6.0, 2.5} b5.Friction = 0.1 world.AddBody(&b5) { var j b2d.Joint j.Set(&b1, &b5, &b2d.Vec2{6.0, 2.6}) world.AddJoint(&j) } var b6 b2d.Body b6.Set(&b2d.Vec2{2.0, 0.2}, 10.0) b6.Position = b2d.Vec2{6.0, 3.6} world.AddBody(&b6) { var j b2d.Joint j.Set(&b5, &b6, &b2d.Vec2{7.0, 3.5}) world.AddJoint(&j) } } // A multi-pendulum func Demo9() { title = "A multi-pendulum" world.Clear() var b1 *b2d.Body { var b b2d.Body b.Set(&b2d.Vec2{100.0, 20.0}, math.MaxFloat64) b.Position = b2d.Vec2{0.0, -0.5 * b.Width.Y} world.AddBody(&b) b1 = &b } const mass = 10.0 // Tuning const frequencyHz = 4.0 const dampingRatio = 0.7 // frequency in radians const omega = 2.0 * math.Pi * frequencyHz // damping coefficient const d = 2.0 * mass * dampingRatio * omega // spring stiffness const k = mass * omega * omega // magic formulas const softness = 1.0 / (d + timeStep*k) const biasFactor = timeStep * k / (d + timeStep*k) const y = 12.0 for i := 0; i < 15; i++ { x := b2d.Vec2{0.5 + float64(i), y} var b b2d.Body b.Set(&b2d.Vec2{0.75, 0.25}, mass) b.Friction = 0.2 b.Position = x b.Rotation = 0.0 world.AddBody(&b) var j b2d.Joint j.Set(b1, &b, &b2d.Vec2{float64(i), y}) j.Softness = softness j.BiasFactor = biasFactor world.AddJoint(&j) b1 = &b } }
package sql_test import ( "errors" "reflect" "testing" sql "github.com/ndilsou/go-rdbms-playground" ) func TestPlanner_Plan(t *testing.T) { tests := []struct { name string stmt sql.Stmt want sql.PlanNode wantErr bool }{ { name: "no relation", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "first_name"}, {Name: "last_name"}, {Name: "age"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "my_table"}, }, }, wantErr: true, }, { name: "no column", stmt: &sql.SelectStmt{ Fields: []sql.Ident{}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, }, wantErr: true, }, { name: "simple plan", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, }, want: &sql.ProjectionNode{ Columns: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: &sql.TableScanNode{ RelationName: "t1", }, }, }, { name: "plan with where", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, Where: &sql.WhereClause{ Predicate: &sql.BinaryExpr{ LHS: &sql.Ident{Name: "a"}, Op: sql.EQ, RHS: &sql.BasicLit{Kind: sql.INT, Value: "1"}, }, }, }, want: &sql.ProjectionNode{ Columns: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: &sql.FilterNode{ Filter: &sql.BinaryExpr{ LHS: &sql.Ident{Name: "a"}, Op: sql.EQ, RHS: &sql.BasicLit{Kind: sql.INT, Value: "1"}, }, From: &sql.TableScanNode{ RelationName: "t1", }, }, }, }, { name: "plan with limit", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, Limit: &sql.LimitClause{ Value: 10, }, }, want: &sql.LimitNode{ Value: 10, From: &sql.ProjectionNode{ Columns: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: &sql.TableScanNode{ RelationName: "t1", }, }, }, }, { name: "plan with limit and offset and order by", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, Limit: &sql.LimitClause{ Value: 10, }, Offset: &sql.OffsetClause{Value: 5}, OrderBy: &sql.OrderByClause{Fields: []*sql.Ident{{Name: "a"}}}, }, want: &sql.LimitNode{ Value: 10, From: &sql.OffsetNode{ Value: 5, From: &sql.SortNode{ Keys: []string{"a"}, From: &sql.ProjectionNode{ Columns: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: &sql.TableScanNode{ RelationName: "t1", }, }, }, }, }, }, { name: "plan with order by", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, OrderBy: &sql.OrderByClause{Fields: []*sql.Ident{{Name: "a"}}}, }, want: &sql.SortNode{ Keys: []string{"a"}, From: &sql.ProjectionNode{ Columns: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: &sql.TableScanNode{ RelationName: "t1", }, }, }, }, { name: "plan with filter and unknown column", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, Where: &sql.WhereClause{ Predicate: &sql.BinaryExpr{ LHS: &sql.Ident{Name: "first_name"}, Op: sql.EQ, RHS: &sql.BasicLit{Kind: sql.INT, Value: "1"}, }, }, }, wantErr: true, }, { name: "plan with offset and no limit", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, Offset: &sql.OffsetClause{Value: 10}, }, wantErr: true, }, { name: "plan with limit and no projection", stmt: &sql.SelectStmt{ Limit: &sql.LimitClause{Value: 10}, }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := sql.NewPlanner(&mockCatalog{}) got, err := p.Plan(tt.stmt) if (err != nil) != tt.wantErr { t.Errorf("Plan() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("Plan() got = %v, want %v", got, tt.want) } }) } } var testRelations = map[string]sql.Relation{ "t1": { Name: "t1", Location: nil, Schema: map[string]sql.Column{ "a": { Name: "a", Type: sql.INTEGER, Location: nil, }, "b": { Name: "b", Type: sql.REAL, Location: nil, }, "c": { Name: "b", Type: sql.TEXT, Location: nil, }, }, }, } type mockCatalog struct { } func (m *mockCatalog) HasColumn(columnName string) bool { for _, v := range testRelations { if v.HasColumn(columnName) { return true } } return false } func (m *mockCatalog) GetRelation(s string) (sql.Relation, error) { r, ok := testRelations[s] if !ok { return sql.Relation{}, errors.New("no relation") } return r, nil } func BenchmarkPlanner_Plan(b *testing.B) { benchmarks := []struct { name string stmt sql.Stmt }{ { name: "simple plan", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, }, }, { name: "plan with limit and offset and order by", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, Limit: &sql.LimitClause{ Value: 10, }, Offset: &sql.OffsetClause{Value: 5}, OrderBy: &sql.OrderByClause{Fields: []*sql.Ident{{Name: "a"}}}, }, }, { name: "plan with filter and unknown column", stmt: &sql.SelectStmt{ Fields: []sql.Ident{{Name: "a"}, {Name: "b"}, {Name: "c"}}, From: sql.FromClause{ TableName: &sql.Ident{Name: "t1"}, }, Where: &sql.WhereClause{ Predicate: &sql.BinaryExpr{ LHS: &sql.Ident{Name: "first_name"}, Op: sql.EQ, RHS: &sql.BasicLit{Kind: sql.INT, Value: "1"}, }, }, }, }, } var plan sql.PlanNode var err error for _, bm := range benchmarks { b.Run(bm.name, func(b *testing.B) { p := sql.NewPlanner(&mockCatalog{}) for i := 0; i < b.N; i++ { plan, err = p.Plan(bm.stmt) } }) } ignoreBenchResult(plan, err) }
package controllers import ( "github.com/astaxie/beego/logs" "net/url" "strconv" "time" "github.com/canghai908/zbxtable/models" ) // ExpController operations for Group type ExpController struct { BaseController } // ExpRes is used //var GroupRes models.GroupList //var ExpRes list var ExpRes models.ExpList // URLMapping ... func (c *ExpController) URLMapping() { c.Mapping("GetItemTrend", c.GetItemTrend) c.Mapping("GetItemHistory", c.GetItemHistory) c.Mapping("Inspect", c.Inspect) } // GetItemTrend export // @Title Item趋势数据导出 // @Description 根据ItemID导出趋势数据为xlsx文件 // @Param X-Token header string true "x-token in header" // @Param body body models.ListQueryAll true "body for Host content" // @Success 200 {object} models.Group // @Failure 403 is empty // @router /trend [post] func (c *ExpController) GetItemTrend() { var v models.ListQueryAll err := json.Unmarshal(c.Ctx.Input.RequestBody, &v) if err != nil { ExpRes.Code = 500 ExpRes.Message = err.Error() c.Data["json"] = ExpRes c.ServeJSON() return } var Start, End int64 //如果时间为空,默认为一周 if len(v.Period) == 0 || v.Period[0] == "" || v.Period[1] == "" { tEnd := time.Now() End = tEnd.Unix() Start = tEnd.Add(-168 * time.Hour).Unix() } //时间格式化 timeLayout := "2006-01-02 15:04:05" loc, _ := time.LoadLocation("Local") st, _ := time.ParseInLocation(timeLayout, v.Period[0], loc) en, _ := time.ParseInLocation(timeLayout, v.Period[1], loc) Start = st.Unix() End = en.Unix() iodata, err := models.GetTrenDataFileName(v, Start, End) if err != nil { ExpRes.Code = 200 ExpRes.Message = err.Error() c.Data["json"] = ExpRes c.ServeJSON() return } oldfilename := v.Host.Name + "_" + v.Item.Name + "_trend" + ".xlsx" filename := url.QueryEscape(oldfilename) c.Ctx.Output.Header("Content-Type", "application/octet-stream") c.Ctx.Output.Header("Content-Disposition", "attachment;filename="+filename) c.Ctx.Output.Header("Content-Transfer-Encoding", "binary") c.Ctx.Output.Header("Access-Control-Expose-Headers", "Content-Disposition") c.Ctx.Output.Status = 200 c.Ctx.Output.EnableGzip = true c.Ctx.Output.Context.Output.Body(iodata) return } // GetItemHistory export // @Title 数据导出 // @Description 根据ItemID导出详情数据为xlsx文件 // @Param X-Token header string true "x-token in header" // @Param body body models.ListQueryAll true "body for Host content" // @Success 200 {object} models.Group // @Failure 403 is empty // @router /history [post] func (c *ExpController) GetItemHistory() { var v models.ListQueryAll err := json.Unmarshal(c.Ctx.Input.RequestBody, &v) if err != nil { ExpRes.Code = 500 ExpRes.Message = err.Error() c.Data["json"] = ExpRes c.ServeJSON() return } var Start, End int64 //如果时间为空,默认为一周 if len(v.Period) == 0 || v.Period[0] == "" || v.Period[1] == "" { tEnd := time.Now() End = tEnd.Unix() Start = tEnd.Add(-168 * time.Hour).Unix() } timeLayout := "2006-01-02 15:04:05" loc, _ := time.LoadLocation("Local") st, _ := time.ParseInLocation(timeLayout, v.Period[0], loc) en, _ := time.ParseInLocation(timeLayout, v.Period[1], loc) Start = st.Unix() End = en.Unix() iodata, err := models.GetHistoryDataFileName(v, Start, End) if err != nil { ExpRes.Code = 200 ExpRes.Message = err.Error() c.Data["json"] = ExpRes c.ServeJSON() return } oldfilename := v.Host.Name + "_" + v.Item.Name + "_history" + ".xlsx" filename := url.QueryEscape(oldfilename) c.Ctx.Output.Header("Content-Type", "application/octet-stream") c.Ctx.Output.Header("Content-Disposition", "attachment; filename="+filename) c.Ctx.Output.Header("Content-Transfer-Encoding", "binary") c.Ctx.Output.Header("Access-Control-Expose-Headers", "Content-Disposition") c.Ctx.Output.Status = 200 c.Ctx.Output.EnableGzip = true c.Ctx.Output.Context.Output.Body(iodata) return } // Inspect ITm // @Title 巡检报告导出 // @Description 按主机组导出巡检表 // @Param X-Token header string true "x-token in header" // @Param body body models.ListQueryNew true "body for Host content" // @Success 200 {object} models.Group // @Failure 403 is empty // @router /inspect [post] func (c *ExpController) Inspect() { var v models.HostGroupsPlist err := json.Unmarshal(c.Ctx.Input.RequestBody, &v) if err != nil { ExpRes.Code = 200 ExpRes.Message = err.Error() c.Data["json"] = ExpRes c.ServeJSON() return } hostdata, err := models.GetHostsByGroupIDList(v.GroupID) if err != nil { ExpRes.Code = 200 ExpRes.Message = err.Error() c.Data["json"] = ExpRes c.ServeJSON() } type ss struct { V1 float64 `json:"v1"` V2 float64 `json:"v2"` V3 float64 `json:"v3"` V4 float64 `json:"v4"` } tt := make([]ss, len(hostdata)) yy := make([]models.Insp, len(hostdata)) for kk, v := range hostdata { b, err := models.GetItemByKey(v.HostID, "system.cpu.util[,idle]") if err != nil { logs.Error(err) ExpRes.Code = 200 ExpRes.Message = err.Error() c.Data["json"] = ExpRes c.ServeJSON() } mem1, err := models.GetItemByKey(v.HostID, "vm.memory.size[total]") if err != nil { logs.Error(err) ExpRes.Code = 200 ExpRes.Message = err.Error() c.Data["json"] = ExpRes c.ServeJSON() } mem2, err := models.GetItemByKey(v.HostID, "vm.memory.size[available]") if err != nil { logs.Error(err) ExpRes.Code = 200 ExpRes.Message = err.Error() c.Data["json"] = ExpRes c.ServeJSON() } for _, v := range mem1 { vint64, _ := strconv.ParseFloat(v.Lastvalue, 64) tt[kk].V1 = vint64 } for _, v := range mem2 { vint64, _ := strconv.ParseFloat(v.Lastvalue, 64) tt[kk].V2 = vint64 } for _, v := range b { vint64, _ := strconv.ParseFloat(v.Lastvalue, 64) if vint64 != 0 { tt[kk].V3 = models.Round(100-vint64, 2) } else { tt[kk].V3 = 0 } if tt[kk].V1 != 0 { tt[kk].V4 = models.Round(tt[kk].V2/tt[kk].V1, 2) } else { tt[kk].V4 = 0 } } yy[kk].HostName = v.Name yy[kk].CPULoad = tt[kk].V3 yy[kk].MemPct = tt[kk].V4 } ByteData, err := models.ExpInspect(v.Name, yy) if err != nil { ExpRes.Code = 200 ExpRes.Message = err.Error() c.Data["json"] = ExpRes c.ServeJSON() return } oldfilename := v.Name + ".xlsx" filename := url.QueryEscape(oldfilename) c.Ctx.Output.Header("Content-Type", "application/octet-stream") c.Ctx.Output.Header("Content-Disposition", "attachment; filename="+filename) c.Ctx.Output.Header("Content-Transfer-Encoding", "binary") c.Ctx.Output.Header("Access-Control-Expose-Headers", "Content-Disposition") c.Ctx.Output.Status = 200 c.Ctx.Output.EnableGzip = true c.Ctx.Output.Context.Output.Body(ByteData) return }
package memrepo import ( "github.com/scjalliance/drivestream/commit" "github.com/scjalliance/drivestream/resource" ) var _ commit.Sequence = (*Commits)(nil) // Commits accesses a sequence of commits in an in-memory repository. type Commits struct { repo *Repository drive resource.ID } // Next returns the sequence number to use for the next commit. func (ref Commits) Next() (n commit.SeqNum, err error) { drv, ok := ref.repo.drives[ref.drive] if !ok { return 0, nil } return commit.SeqNum(len(drv.Commits)), nil } // Read reads commit data for a range of commits // starting at the given sequence number. Up to len(p) entries will // be returned in p. The number of entries is returned as n. func (ref Commits) Read(start commit.SeqNum, p []commit.Data) (n int, err error) { drv, ok := ref.repo.drives[ref.drive] if !ok { return 0, commit.NotFound{Drive: ref.drive, Commit: start} } length := commit.SeqNum(len(drv.Commits)) if start >= length { return 0, commit.NotFound{Drive: ref.drive, Commit: start} } for n < len(p) && start+commit.SeqNum(n) < length { p[n] = drv.Commits[start+commit.SeqNum(n)].Data n++ } return n, nil } // Ref returns a commit reference. func (ref Commits) Ref(c commit.SeqNum) commit.Reference { return Commit{ repo: ref.repo, drive: ref.drive, commit: c, } }
package middleware import ( "encoding/json" "fmt" "io" ) type Usuario struct { ID int `json:"id"` Login string `json:"login"` Senha string `json:"senha"` Email string `json:"email"` DataCriacao string `json:"dataCriacao"` IDGrupoUsuario int `json:"idGrupoUsuario"` } func (u Usuario) isValid() error { if u.ID == 0 { return fmt.Errorf("Preencha o id do usuário") } if u.Login == "" { return fmt.Errorf("Preencha o login do usuário") } if u.Senha == "" { return fmt.Errorf("Preencha o senha do usuário") } if u.Email == "" { return fmt.Errorf("Preencha o email do usuário") } if u.IDGrupoUsuario == 0 { return fmt.Errorf("Preencha o id do grupo do usuário") } return nil } func (u Usuario) isValidLogin() error { if u.Login == "" { return fmt.Errorf("Preencha o login do usuário") } if u.Senha == "" { return fmt.Errorf("Preencha o senha do usuário") } return nil } func NewUsuario(id int, login string, senha string, email string, dataCriacao string, idGrupoUsuario int) (*Usuario, error) { usuario := Usuario{ ID: id, Login: login, Senha: senha, Email: email, DataCriacao: dataCriacao, IDGrupoUsuario: idGrupoUsuario, } if err := usuario.isValid(); err != nil { return nil, err } return &usuario, nil } func NewFromJson(body io.ReadCloser) (*Usuario, error) { var usuario Usuario if err := json.NewDecoder(body).Decode(&usuario); err != nil { return nil, err } if err := usuario.isValidLogin(); err != nil { return nil, err } return &usuario, nil }
package main import ( "bufio" "fmt" "os" "strconv" "strings" ) func main() { var part int // part is defined as cmd argument if len(os.Args) > 1 && os.Args[1] == "part2" { part = 2 } else { //run part 1 as default part = 1 } inFile, _ := os.Open("input.txt") defer inFile.Close() scanner := bufio.NewScanner(inFile) scanner.Split(bufio.ScanLines) set := make(map[string]int) max := 0 for scanner.Scan() { row := strings.Fields(scanner.Text()) result := executeOp(row[4], row[6], row[5], set) if result { set[row[0]] = executeAction(set, row[0], row[2], row[1]) } // for part 2 compute max on each iteration if part == 2 { max = getMax(set, max) } } // compute only last max for part 1 if part == 1 { max = getMax(set, max) } fmt.Println(max) } func executeOp(a string, b string, op string, set map[string]int) bool { aValue := set[a] bValue, _ := strconv.Atoi(b) switch op { case "<": return aValue < bValue case "<=": return aValue <= bValue case ">": return aValue > bValue case ">=": return aValue >= bValue case "==": return aValue == bValue case "!=": return aValue != bValue default: fmt.Println("Unsupported op:", op) return false } } func executeAction(set map[string]int, a string, b string, act string) int { val, _ := strconv.Atoi(b) if act == "inc" { return set[a] + val } else { return set[a] - val } } func getMax(set map[string]int, crtMax int) int { max := 0 for _, v := range set { if v > max { max = v } } if max > crtMax { return max } return crtMax }
package protocol import ( "bytes" "strings" ) type OpQuery struct { *Op Flags int32 FullCollectionName string NumberToSkip int32 NumberToReturn int32 Query Document ReturnFieldsSelector Document } func (p *OpQuery) TableName() (*TableName, bool) { sp := strings.Split(p.FullCollectionName, ".") if len(sp) == 2 { return &TableName{sp[0], sp[1]}, true } return nil,false } func (p *OpQuery) Encode() ([]byte, error) { bf := bytes.Buffer{} if _, err := p.Append(&bf); err == nil { return bf.Bytes(), nil } else { return nil, err } } func (p *OpQuery) Decode(bs []byte) error { v0 := &Header{} if err := v0.Decode(bs); err != nil { return err } totals := len(bs) if int(v0.MessageLength) != totals { return &errMessageLength{int(v0.MessageLength), totals} } offset := HeaderLength v1 := readInt32(bs, offset) offset += 4 v2 := readString(bs, offset) offset += len(v2) + 1 v3 := readInt32(bs, offset) offset += 4 v4 := readInt32(bs, offset) offset += 4 v5, size, err := readDocument(bs, offset) if err != nil { return err } offset += size var v6 Document if offset < totals { v6, size, err = readDocument(bs, offset) if err != nil { return err } offset += size } if offset != totals { return &errMessageOffset{offset, totals} } p.OpHeader = v0 p.Flags = v1 p.FullCollectionName = v2 p.NumberToSkip = v3 p.NumberToReturn = v4 p.Query = v5 p.ReturnFieldsSelector = v6 return nil } func (p *OpQuery) Append(buffer *bytes.Buffer) (int, error) { cache := &bytes.Buffer{} wrote, err := newWriter(cache). writeInt32(p.Flags). writeString(p.FullCollectionName). writeInt32(p.NumberToSkip). writeInt32(p.NumberToReturn). writeDocument(p.Query). writeDocument(p.ReturnFieldsSelector). end() if err != nil { return 0, err } old := p.OpHeader.MessageLength wrote += HeaderLength p.OpHeader.MessageLength = int32(wrote) defer func() { p.OpHeader.MessageLength = old }() bf := &bytes.Buffer{} if _, err := p.OpHeader.Append(bf); err != nil { return 0, err } if _, err := cache.WriteTo(bf); err != nil { return 0, err } if _, err := bf.WriteTo(buffer); err != nil { return 0, err } return wrote, nil } func NewOpQuery() *OpQuery { return &OpQuery{ Op: &Op{}, } }
// +build !what,!whathappens package what func Happens(fmt string, args ...interface{}) {} func If(bool, string, ...interface{}) {}
/* Return the number of times that the string "hi" appears anywhere in the given string. */ package main import ( "fmt" ) func count_hi(s string) int { var n int = 0 for i := 0; i < len(s)-1; i++ { if s[i:i+2] == "hi" { n++ } } return n } func main(){ var status int = 0 if count_hi("abc hi ho") == 1 { status += 1 } if count_hi("ABChi hi") == 2 { status += 1 } if count_hi("hihi") == 2 { status += 1 } if count_hi("a") == 0 { status += 1 } if status == 4 { fmt.Println("OK") } else { fmt.Println("NOT OK") } }
func solveNQueens(n int) [][]string { res:=[][]string{} post := make([]int, n) for i := 0; i < n; i++ { post[i] = -1 } dfs(post, 0, &res) return res } func dfs(pos []int, row int, res *[][]string) { n := len(pos) if row == n { out:=make([]string,n) for j,v:=range pos{ a:="" for i:=0;i<n;i++{ if i==v{ a+="Q" }else{ a+="." } } out[j] = a } *res = append(*res,out) } else { for col := 0; col < n; col++ { if valid(pos, row, col) { (pos)[row] = col; dfs(pos, row+1, res) (pos)[row] = -1 } } } } func valid(pos []int, row int, col int) bool { for i := 0; i < row; i++ { if col == pos[i] || abs(row-i) == abs(col-pos[i]) { return false } } return true } func abs(a int) int { if a < 0 { return -a } return a }
package graphs import ( "bufio" "bytes" "image/color" "time" "github.com/gonum/plot" "github.com/gonum/plot/plotter" "github.com/gonum/plot/vg" "github.com/gonum/plot/vg/draw" "github.com/gonum/plot/vg/vgsvg" "github.com/spazbite187/sensornet" ) const ( xSize = 7 ySize = 3 ) // GetTempGraph ... func GetTempGraph(input []*sensornet.SensorData) ([]byte, error) { // xticks defines how we convert and display time.Time values. xticks := plot.TimeTicks{Format: "15:04\n2006-01-02"} currentLocation := time.Now().Location() xticks.Time = plot.UnixTimeIn(currentLocation) p, err := plot.New() if err != nil { return []byte{}, nil } p.X.Label.Text = "Time" p.Y.Label.Text = "Temp (F)" p.X.Tick.Marker = xticks data, err := tempPoints(input) if err != nil { return []byte{}, nil } line, points, err := plotter.NewLinePoints(data) if err != nil { return []byte{}, nil } points.Shape = draw.CircleGlyph{} points.GlyphStyle.Radius = 0 points.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} line.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} setColors(p) p.Add(plotter.NewGrid()) p.Add(line, points) // Save to []byte section c := vgsvg.New(xSize*vg.Inch, ySize*vg.Inch) // Create a Canvas for writing SVG images. p.Draw(draw.New(c)) // Draw to the Canvas. image, err := getSVGBytes(c) if err != nil { return []byte{}, nil } return image, nil } // GetSignalGraph ... func GetSignalGraph(input []*sensornet.SensorData) ([]byte, error) { // xticks defines how we convert and display time.Time values. xticks := plot.TimeTicks{Format: "15:04\n2006-01-02"} currentLocation := time.Now().Location() xticks.Time = plot.UnixTimeIn(currentLocation) p, err := plot.New() if err != nil { return []byte{}, nil } p.X.Label.Text = "Time" p.Y.Label.Text = "Signal (dBm)" p.X.Tick.Marker = xticks data, err := signalPoints(input) if err != nil { return []byte{}, nil } line, points, err := plotter.NewLinePoints(data) if err != nil { return []byte{}, nil } points.Shape = draw.CircleGlyph{} points.GlyphStyle.Radius = 0 points.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} line.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} setColors(p) p.Add(plotter.NewGrid()) p.Add(line, points) // Save to []byte section c := vgsvg.New(xSize*vg.Inch, ySize*vg.Inch) // Create a Canvas for writing SVG images. p.Draw(draw.New(c)) // Draw to the Canvas. image, err := getSVGBytes(c) if err != nil { return []byte{}, nil } return image, nil } // tempPoints returns x, y points based on SensorData. func tempPoints(data []*sensornet.SensorData) (plotter.XYs, error) { pts := make(plotter.XYs, len(data)) for k, v := range data { lastUpdate, err := time.Parse(time.ANSIC, v.LastUpdate) if err != nil { return pts, err } timeFloat := float64(lastUpdate.Unix()) pts[k].X = timeFloat pts[k].Y = v.TempF } return pts, nil } // signalPoints returns x, y points based on SensorData. func signalPoints(data []*sensornet.SensorData) (plotter.XYs, error) { pts := make(plotter.XYs, len(data)) for k, v := range data { lastUpdate, err := time.Parse(time.ANSIC, v.LastUpdate) if err != nil { return pts, err } timeFloat := float64(lastUpdate.Unix()) pts[k].X = timeFloat pts[k].Y = float64(v.Signal) } return pts, nil } func getSVGBytes(canvas *vgsvg.Canvas) ([]byte, error) { var b bytes.Buffer buffer := bufio.NewWriter(&b) if _, err := canvas.WriteTo(buffer); err != nil { return []byte{}, err } return b.Bytes(), nil } func setColors(p *plot.Plot) { p.X.Label.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} p.Y.Label.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} p.X.Tick.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} p.Y.Tick.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} p.X.Tick.LineStyle.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} p.Y.Tick.LineStyle.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} p.X.Tick.Label.Color = color.RGBA{R: 200, G: 200, B: 200, A: 255} p.Y.Tick.Label.Color = color.RGBA{R: 200, G: 200, B: 200, A: 255} p.BackgroundColor = color.RGBA{R: 55, G: 58, B: 60, A: 255} p.Title.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} p.Title.TextStyle.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} p.X.LineStyle.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} p.Y.LineStyle.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} p.Legend.Color = color.RGBA{R: 255, G: 255, B: 255, A: 255} }
package server // Notification is a JSONRPC notification type Notification struct { Method string `json:"method"` Params interface{} `json:"params"` } // Subscribable describes a type which can send notifications type Subscribable interface { Subscribe(id uint64) chan Notification Unsubscribe(id uint64) }
package kvstore import ( "errors" "testing" "github.com/stretchr/testify/assert" ) var ( store Store ) func init() { store = Initialize() } func TestGet(t *testing.T) { store.Set("xx", "yy") expectedValue := "yy" actualValue, actualError := store.Get("xx") assert.Nil(t, actualError) assert.Equal(t, expectedValue, actualValue) store.Clear() } func TestGet_UpdatedValue(t *testing.T) { store.Set("xx", "yy") store.Set("xx", "zz") expectedValue := "zz" actualValue, actualError := store.Get("xx") assert.Nil(t, actualError) assert.Equal(t, expectedValue, actualValue) store.Clear() } func TestGet_NoValue(t *testing.T) { _ = store.Set("xx", "") expectedValue := "" expectedError := errors.New("value does not exist") actualValue, actualError := store.Get("xx") assert.Equal(t, expectedError, actualError) assert.Equal(t, expectedValue, actualValue) store.Clear() } func TestGet_NoKey(t *testing.T) { expectedValue := "" expectedError := errors.New("empty key provided") actualValue, actualError := store.Get("") assert.Equal(t, expectedError, actualError) assert.Equal(t, expectedValue, actualValue) store.Clear() } func TestSet_NoValue(t *testing.T) { expectedError := errors.New("empty value provided") actualError := store.Set("xx", "") assert.Equal(t, expectedError, actualError) store.Clear() } func TestClear(t *testing.T) { store.Set("xx1", "yy") store.Set("xx2", "yy") store.Clear() expectedValue := "" expectedError := errors.New("value does not exist") actualValue, actualError := store.Get("xx1") assert.Equal(t, expectedError, actualError) assert.Equal(t, expectedValue, actualValue) store.Clear() } func TestClear_EmptyStore(t *testing.T) { store.Clear() expectedValue := "" expectedError := errors.New("value does not exist") actualValue, actualError := store.Get("xx1") assert.Equal(t, expectedError, actualError) assert.Equal(t, expectedValue, actualValue) store.Clear() } func TestDelete(t *testing.T) { store.Set("xx", "yy") store.Delete("xx") expectedValue := "" expectedError := errors.New("value does not exist") actualValue, actualError := store.Get("xx") assert.Equal(t, expectedError, actualError) assert.Equal(t, expectedValue, actualValue) store.Clear() } func TestDelete_InvalidKey(t *testing.T) { store.Set("xx", "yy") expectedError := errors.New("value does not exist") actualError := store.Delete("zz") assert.Equal(t, expectedError, actualError) store.Clear() } func TestDelete_EmptyKey(t *testing.T) { store.Set("xx", "yy") expectedError := errors.New("empty key provided") actualError := store.Delete("") assert.Equal(t, expectedError, actualError) store.Clear() } func TestDelete_EmptyStore(t *testing.T) { expectedError := errors.New("empty key provided") actualError := store.Delete("") assert.Equal(t, expectedError, actualError) store.Clear() }
package models import ( "encoding/json" "errors" "fmt" "github.com/gomodule/redigo/redis" ) const ( sqlForCreateLawsuit = "INSERT INTO lawsuits(id,company_id,points,name,type,date) VALUES(?,?,?,?,?,?);" ) type Lawsuit struct { ID int `orm:"column(id)"` CompanyID int `orm:"column(company_id)"` Points int `orm:"column(points)"` Name string `orm:"column(name)"` Date string `orm:"column(date)"` Type string `orm:"column(type)"` } func (ls *Lawsuit) TableName() string { return "lawsuits" } func getLawsuitsKey(cid int) string { return fmt.Sprintf("/companies/%d/lawsuits", cid) } func getLawsuitKey(cid, id int) string { return fmt.Sprintf("/companies/%d/lawsuits/%d", cid, id) } func getLawsuitsFromRedis(cid int) (*[]Lawsuit, error) { conn := RedisPool.Get() defer conn.Close() b, err := redis.Bytes(conn.Do("GET", getLawsuitsKey(cid))) if err != nil { return nil, err } var ls []Lawsuit err = json.Unmarshal(b, &ls) return &ls, err } func getLawsuitFromRedis(cid, id int) (*Lawsuit, error) { conn := RedisPool.Get() defer conn.Close() b, err := redis.Bytes(conn.Do("GET", getLawsuitKey(cid, id))) if err != nil { return nil, err } var l Lawsuit err = json.Unmarshal(b, &l) return &l, err } func setLawsuitsToRedis(cid int, ls *[]Lawsuit) error { conn := RedisPool.Get() defer conn.Close() b, err := json.Marshal(ls) if err != nil { return err } _, err = conn.Do("SET", getLawsuitsKey(cid), b, "EX", EXTime) return err } func setLawsuitToRedis(l *Lawsuit) error { conn := RedisPool.Get() defer conn.Close() b, err := json.Marshal(l) if err != nil { return err } _, err = conn.Do("SET", getLawsuitKey(l.CompanyID, l.ID), b, "EX", EXTime) return err } func GetLawsuitsByCompanyID(cid int) (*[]Lawsuit, error) { ls, err := getLawsuitsFromRedis(cid) if err != nil { var lss []Lawsuit _, err := O.Raw("SELECT * FROM lawsuits WHERE company_id = ?;", cid).QueryRows(&lss) if err == nil { setLawsuitsToRedis(cid, &lss) } return &lss, err } return ls, err } func GetLawsuitByIDAndCompanyID(id, cid int) (*Lawsuit, error) { l, err := getLawsuitFromRedis(cid, id) if err != nil { var ls Lawsuit err := O.Raw("SELECT * FROM lawsuits WHERE id = ? AND company_id = ?;", id, cid).QueryRow(&ls) if err == nil { setLawsuitToRedis(&ls) } return &ls, err } return l, err } func CreateLawsuit(ls *Lawsuit) error { if ls == nil { return errors.New("arg ls is nil") } o := NewOrm() if err := o.Begin(); err != nil { return err } var maxID int err := o.Raw("SELECT MAX(id) FROM lawsuits WHERE company_id = ?;", ls.CompanyID).QueryRow(&maxID) _, err = o.Raw(sqlForCreateLawsuit, maxID+1, ls.CompanyID, ls.Points, ls.Name, ls.Type, ls.Date).Exec() if err != nil { o.Rollback() return err } err = UpdateCompanyPoints(ls.CompanyID, -ls.Points, o) if err != nil { o.Rollback() return err } return o.Commit() } func DeleteLawsuitByIDAndCompanyID(id, cid int) error { o := NewOrm() if err := o.Begin(); err != nil { return err } ls, err := GetLawsuitByIDAndCompanyID(id, cid) if err != nil { return err } err = UpdateCompanyPoints(cid, ls.Points, o) if err != nil { o.Rollback() return err } _, err = o.Raw("DELETE FROM lawsuits WHERE id = ? AND company_id = ?;", id, cid).Exec() if err != nil { o.Rollback() return err } return o.Commit() }
package config //MYSQL配置 const ( //mysql 连接cdn地址 MYSQL_DATA_SOURCE_NAME = "root:nihaoma123?@tcp(192.168.3.235:3306)/blog?charset=utf8&parseTime=true" //连接池最大链接数 MYSQL_SET_MAX_OPEN_CONNS = 400 //最大闲置链接数 MYSQL_SET_MAX_IDLE_CONNS = 200 //数据表前缀 MYSQL_TABLE_PREFIX = "b_" //关闭表名复数例如"type table struct"表名为 "tables" 关闭 为 "table" MYSQL_TABLE_SINGULAR = true )
package main // import ( // "encoding/json" // "fmt" // "io/ioutil" // "os" // "strings" // "github.com/evanxg852000/eserveless/internal/data" // "github.com/evanxg852000/eserveless/internal/helpers" // "github.com/gofrs/uuid" // ) // func main() { // projectDir, err := ioutil.TempDir("", "repos-") // if err != nil { // panic(err) // } // defer os.RemoveAll(projectDir) // hash, err := helpers.CloneProjectRepo(projectDir, "https://github.com/evanxg852000/node-eserveless-example") // if err != nil { // panic(err) // } // manifest, err := helpers.ReadProjectManifest(projectDir) // if err != nil { // panic(err) // } // var functions []*data.Function // for _, f := range manifest.Functions { // meta, _ := json.Marshal(f.Meta) // functions = append(functions, &data.Function{ // Name: f.Name, // Image: fmt.Sprintf("%s-%s:latest", uuid.Must(uuid.NewV4()), strings.ToLower(f.Name)), // Runtime: manifest.Runtime, // Handler: f.Type, // Schedule: f.Schedule, // Meta: string(meta), // ProjectID: 0, // }) // } // f := functions[0] // buildDir, err := helpers.PrepareDockerImage("./runtimes", projectDir, f) // if err != nil { // panic(err) // } // defer os.RemoveAll(buildDir) // logs, err := helpers.BuildDockerImage(buildDir, f) // if err != nil { // panic(err) // } // fmt.Println(logs) // fmt.Println("server", hash, manifest, functions) // }