text
stringlengths
11
4.05M
// Copyright 2018 The adeia authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package domain import ( "encoding/json" "net/http" "github.com/pkg/errors" ) //go:generate counterfeiter -o ../mocks/domain_client.go --fake-name DomainClient . client type client interface { Get(string) (*http.Response, error) } // Fetcher gets all domains from URL using Client. type Fetcher struct { Client client URL string } // Fetch domains from remote json endpoint. func (f *Fetcher) Fetch() ([]Domain, error) { if len(f.URL) < 1 { return nil, errors.New("invalid URL") } resp, err := f.Client.Get(f.URL) if err != nil { return nil, err } if resp == nil { return nil, errors.New("received empty response") } result := []Domain{} if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { return nil, errors.Wrap(err, "parse json failed") } return result, nil }
package stormpath import ( "net/http" "testing" "github.com/stretchr/testify/assert" ) func TestGetAPIKey(t *testing.T) { t.Parallel() application := createTestApplication(t) defer application.Purge() account := createTestAccount(application, t) apiKey, _ := account.CreateAPIKey() k, err := GetAPIKey(apiKey.Href, MakeAPIKeyCriteria()) assert.NoError(t, err) assert.Equal(t, apiKey, k) } func TestDeleteAPIKey(t *testing.T) { t.Parallel() application := createTestApplication(t) defer application.Purge() account := createTestAccount(application, t) apiKey, _ := account.CreateAPIKey() err := apiKey.Delete() assert.NoError(t, err) k, err := GetAPIKey(apiKey.Href, MakeAPIKeyCriteria()) assert.Error(t, err) assert.Nil(t, k) assert.Equal(t, http.StatusNotFound, err.(Error).Status) } func TestUpdateAPIKey(t *testing.T) { t.Parallel() application := createTestApplication(t) defer application.Purge() account := createTestAccount(application, t) apiKey, _ := account.CreateAPIKey() apiKey.Status = Disabled err := apiKey.Update() assert.NoError(t, err) updatedAPIKey, _ := GetAPIKey(apiKey.Href, MakeAPIKeyCriteria()) assert.Equal(t, Disabled, updatedAPIKey.Status) }
package main import ( "fmt" "log" "github.com/josetom/go-chain/config" "github.com/josetom/go-chain/constants" "github.com/spf13/cobra" ) func main() { log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC | log.Lshortfile) config.Load("config.yaml") var cmd = &cobra.Command{ Use: constants.CliName, Short: constants.BlockChainName + " CLI", PreRunE: func(cmd *cobra.Command, args []string) error { return incorrectUsageErr() }, Run: func(cmd *cobra.Command, args []string) { }, } cmd.AddCommand(versionCmd) cmd.AddCommand(runCmd) cmd.AddCommand(initCmd) cmd.AddCommand(configCmd) cmd.AddCommand(balancesCmd()) cmd.AddCommand(txCmd()) cmd.AddCommand(walletCmd()) err := cmd.Execute() if err != nil { log.Fatalln(err) } } func incorrectUsageErr() error { return fmt.Errorf("incorrect_usage") }
package db import ( "context" "errors" "github.com/i-hate-nicknames/redeamtask/pkg/book" ) // BookRecord represents a single record in the book database type BookRecord struct { ID int // todo maybe uint book.Book } // BookDB is a generic book database interface type BookDB interface { Create(context.Context, BookRecord) (BookRecord, error) Update(context.Context, BookRecord) error Get(context.Context, int) (BookRecord, error) GetAll(context.Context) ([]BookRecord, error) Delete(context.Context, int) error Close(context.Context) error Migrate(context.Context) error } // ErrBookNotFound is returned when requested book is not present in the database var ErrBookNotFound = errors.New("book not found")
package fakes import ( "sync" gcpcompute "google.golang.org/api/compute/v1" ) type InstanceGroupsClient struct { DeleteInstanceGroupCall struct { sync.Mutex CallCount int Receives struct { Zone string InstanceGroup string } Returns struct { Error error } Stub func(string, string) error } ListInstanceGroupsCall struct { sync.Mutex CallCount int Receives struct { Zone string } Returns struct { InstanceGroupSlice []*gcpcompute.InstanceGroup Error error } Stub func(string) ([]*gcpcompute.InstanceGroup, error) } } func (f *InstanceGroupsClient) DeleteInstanceGroup(param1 string, param2 string) error { f.DeleteInstanceGroupCall.Lock() defer f.DeleteInstanceGroupCall.Unlock() f.DeleteInstanceGroupCall.CallCount++ f.DeleteInstanceGroupCall.Receives.Zone = param1 f.DeleteInstanceGroupCall.Receives.InstanceGroup = param2 if f.DeleteInstanceGroupCall.Stub != nil { return f.DeleteInstanceGroupCall.Stub(param1, param2) } return f.DeleteInstanceGroupCall.Returns.Error } func (f *InstanceGroupsClient) ListInstanceGroups(param1 string) ([]*gcpcompute.InstanceGroup, error) { f.ListInstanceGroupsCall.Lock() defer f.ListInstanceGroupsCall.Unlock() f.ListInstanceGroupsCall.CallCount++ f.ListInstanceGroupsCall.Receives.Zone = param1 if f.ListInstanceGroupsCall.Stub != nil { return f.ListInstanceGroupsCall.Stub(param1) } return f.ListInstanceGroupsCall.Returns.InstanceGroupSlice, f.ListInstanceGroupsCall.Returns.Error }
package main import ( "fmt" "strconv" "time" ) /* How many ways can I get from the top left to the bottom right. */ func main() { //defer timeTrack(time.Now(), "gridTraveler") //fmt.Printf("gridTraveler(1, 1) = %d\n", gridTraveler(1, 1)) //1 //fmt.Printf("gridTraveler(3, 2) = %d\n", gridTraveler(3, 2)) //3 //fmt.Printf("gridTraveler(2, 3) = %d\n", gridTraveler(2, 3)) //3 //fmt.Printf("gridTraveler(3, 3) = %d\n", gridTraveler(3, 3)) //6 //fmt.Printf("gridTraveler(18, 18) = %d\n", gridTraveler(18, 18)) //2333606220 //fmt.Printf("gridTravelerMem(1, 17) = %d\n", gridTravelerMem(1, 17, make(map[string]int))) //1 //fmt.Printf("gridTravelerMem(3, 2) = %d\n", gridTravelerMem(3, 2, make(map[string]int))) //3 //fmt.Printf("gridTravelerMem(2, 3) = %d\n", gridTravelerMem(2, 3, make(map[string]int))) //3 //fmt.Printf("gridTravelerMem(3, 3) = %d\n", gridTravelerMem(3, 3, make(map[string]int))) //6 defer timeTrack(time.Now(), "gridTravelerMem") fmt.Printf("gridTravelerMem(18, 18) = %d\n", gridTravelerMem(18, 18, make(map[string]int))) //2333606220 } func gridTraveler(m int, n int) int { if m == 1 && n == 1 { return 1 } if m == 0 || n == 0 { return 0 } return gridTraveler(m-1, n) + gridTraveler(m, n-1) } func gridTravelerMem(m int, n int, memo map[string]int) int { key := strconv.Itoa(m) + "," + strconv.Itoa(n) keyReverse := strconv.Itoa(n) + "," + strconv.Itoa(m) //fmt.Printf("key: %s\n", key) //fmt.Printf("keyReverse: %s\n", keyReverse) if val, ok := memo[key]; ok { //fmt.Printf("found key - val: %s - %d\n", key, val) return val } if val, ok := memo[keyReverse]; ok { //fmt.Printf("found keyReverse - val: %s - %d\n", keyReverse, val) return val } if m == 1 && n == 1 { return 1 } if m == 0 || n == 0 { return 0 } memo[key] = gridTravelerMem(m-1, n, memo) + gridTravelerMem(m, n-1, memo) return memo[key] } func timeTrack(start time.Time, name string) { elapsed := time.Since(start) fmt.Printf("%s took %s\n", name, elapsed) }
package paw import ( "net/http" "github.com/gin-gonic/gin" ) // UserResource - type UserResource struct { App *App } // CreateUser - func (r *UserResource) CreateUser(c *gin.Context) { var user User if !c.Bind(&user) { c.JSON(http.StatusBadRequest, gin.H{"error": "Unable to decode request"}) return } r.App.Database.Save(&user) c.JSON(http.StatusCreated, user) } // ListUsers - func (r *UserResource) ListUsers(c *gin.Context) { var users []User r.App.Database.Order("id desc").Find(&users) c.JSON(http.StatusOK, users) } // ShowUser - func (r *UserResource) ShowUser(c *gin.Context) { var userID = c.Params.ByName("id") var existing User if r.App.Database.First(&existing, userID).RecordNotFound() { c.JSON(http.StatusNotFound, gin.H{"error": "Not found"}) } else { c.JSON(http.StatusOK, existing) } } // UpdateUser - func (r *UserResource) UpdateUser(c *gin.Context) { var userID = c.Params.ByName("id") var user User if !c.Bind(&user) { c.JSON(http.StatusBadRequest, gin.H{"error": "Unable to decode request"}) return } var existing User if r.App.Database.First(&existing, userID).RecordNotFound() { c.JSON(http.StatusNotFound, gin.H{"error": "Not found"}) } else { // Transfer attributes explicitly existing.FirstName = user.FirstName existing.MiddleName = user.MiddleName existing.LastName = user.LastName r.App.Database.Save(&existing) c.JSON(http.StatusOK, existing) } }
package tree_traversal import "container/list" func PostOrder(root *Tree) []int { var result []int if root == nil { return nil } result = append(result, PostOrder(root.Left)...) result = append(result, PostOrder(root.Right)...) result = append(result, root.Val) return result } func PostOrderNonRecursive(root *Tree) []int { var result []int if root == nil { return result } stack := list.New() p := root lastVisit := new(Tree) for p != nil || stack.Len() != 0 { for p != nil { stack.PushBack(p) p = p.Left } if stack.Len() != 0 { node := stack.Back() if node.Value.(*Tree).Right != nil && lastVisit != node.Value.(*Tree).Right { p = node.Value.(*Tree).Right continue } result = append(result, node.Value.(*Tree).Val) stack.Remove(node) lastVisit = node.Value.(*Tree) } } return result }
package euler func FilterIntChannel(predicate func(int) bool, in chan int) chan int { out := make(chan int) go func() { for { if v := <-in; predicate(v) { out <- v } } }() return out } func CapIntChannel(in chan int, limit int) chan int { out := make(chan int) go func() { for { v := <-in if v >= limit { close(out) break } out <- v } }() return out } func Fibonaccis() chan int { ch := make(chan int) go func() { for i, j := 0, 1; ; i, j = i+j, i { ch <- i } }() return ch } func Triangles() chan int { ch := make(chan int) go func() { n := 0 for i := 1; ; i++ { n += i ch <- n } }() return ch }
package dbsearch import ( "reflect" "testing" ) func Test_Array(t *testing.T) { s := init_test_data() if s != nil { _01_array_int(t, s) _02_array_int64(t, s) _03_array_uint64(t, s) _04_array_uint(t, s) _11_array_bool(t, s) _12_array_bool(t, s) _21_array_string(t, s) _31_array_float32(t, s) _32_array_float64(t, s) } //t.Fatal("Success [no error] test") } func array_main_f_test_table(s *Searcher) { sql_create := " CREATE TABLE public.test " + "(col1 int, col2 bigint[], col3 smallint[], col4 integer[], " + " col7 text[], col8 varchar(50)[], col9 char(10)[], " + " col11 real[], col12 double precision[], col13 numeric[], col14 decimal[], " + " col15 money[], col16 boolean[] " + ") " sql_cols := "INSERT INTO test(col1, col2, col3, col4, col7, col8, col9, col11, col12, col13, col14, col15, col16 ) " sql_vals := []string{ " VALUES (1, '{9223372036854775807,3,2}'::bigint[], '{18,28,33}'::smallint[], '{884,-121}'::int[], '{\"10\",\"Малененький текст\"}'::text[], " + " '{\" varchar \", \" varchar next\", \"654\"}','{\" char[] \",\" char2[] \",\"123.56234\"}', '{-12.13,13.13213409,-12.130909}', '{14.105,-15.015,-15.1500}', '{-16.17,16.1600,-16.173244}', " + " '{18.00,18.18,1800.180999234}'::decimal[], '{21.21,22.22,23.23}'::money[], '{TRUE,FALSE,TRUE,FALSE}'::boolean[] ) ", " VALUES (2, null, null, null, null, null, null, null, null, null, null, null, null ) ", // check null - nil } make_t_table(s, sql_create, sql_cols, sql_vals) } /* []int test */ type array_int_TestPlace struct { Col1 int `db:"col1" type:"int"` Col2 []int `db:"col2" type:"[]bigint"` Col3 []int `db:"col3" type:"[]smallint"` Col4 []int `db:"col4" type:"[]int"` Col7 []int `db:"col7" type:"[]text"` Col8 []int `db:"col8" type:"[]varchar"` Col9 []int `db:"col9" type:"[]char"` Col11 []int `db:"col11" type:"[]real"` Col12 []int `db:"col12" type:"[]double"` Col13 []int `db:"col13" type:"[]numeric"` Col14 []int `db:"col14" type:"[]decimal"` Col15 []int `db:"col15" type:"[]money"` Col16 []int `db:"col16" type:"[]bool"` } var array_int_mTestType *AllRows = &AllRows{ SType: reflect.TypeOf(array_int_TestPlace{}), } func _01_array_int(t *testing.T, s *Searcher) { array_main_f_test_table(s) p := []array_int_TestPlace{} s.Get(array_int_mTestType, &p, "SELECT * FROM public.test ORDER BY 1") if p[0].Col2[0] != 9223372036854775807 { t.Fatal("Error array_int_TestPlace.Col2; []int <= []bigint") } if p[0].Col3[0] != 18 { t.Fatal("Error array_int_TestPlace.Col3; []int <= []smallint") } if p[0].Col4[0] != 884 { t.Fatal("Error array_int_TestPlace.Col4; []int <= []int") } if p[0].Col7[0] != 10 { t.Fatal("Error array_int_TestPlace.Col7; []int <= []text") } if p[0].Col8[1] != 0 { t.Fatal("Error array_int_TestPlace.Col8; []int <= []varchar") } if p[0].Col16[0] != 1 { t.Fatal("Error array_int_TestPlace.Col16; []int <= []bool") } } /* int64 test */ type array_int64_TestPlace struct { Col1 int `db:"col1" type:"int"` Col2 []int64 `db:"col2" type:"[]bigint"` Col3 []int64 `db:"col3" type:"[]smallint"` Col4 []int64 `db:"col4" type:"[]int"` Col7 []int64 `db:"col7" type:"[]text"` Col8 []int64 `db:"col8" type:"[]varchar"` Col9 []int64 `db:"col9" type:"[]char"` Col11 []int64 `db:"col11" type:"[]real"` Col12 []int64 `db:"col12" type:"[]double"` Col13 []int64 `db:"col13" type:"[]numeric"` Col14 []int64 `db:"col14" type:"[]decimal"` Col15 []int64 `db:"col15" type:"[]money"` Col16 []int64 `db:"col16" type:"[]bool"` } var array_int64_mTestType *AllRows = &AllRows{ SType: reflect.TypeOf(array_int64_TestPlace{}), } func _02_array_int64(t *testing.T, s *Searcher) { array_main_f_test_table(s) p := []array_int64_TestPlace{} s.Get(array_int64_mTestType, &p, "SELECT * FROM public.test ORDER BY 1") if p[0].Col2[0] != 9223372036854775807 { t.Fatal("Error array_int_TestPlace.Col2; []int <= []bigint") } if p[0].Col3[0] != 18 { t.Fatal("Error array_int_TestPlace.Col3; []int <= []smallint") } if p[0].Col4[0] != 884 { t.Fatal("Error array_int_TestPlace.Col4; []int <= []int") } if p[0].Col7[0] != 10 { t.Fatal("Error array_int_TestPlace.Col7; []int <= []text") } if p[0].Col8[1] != 0 { t.Fatal("Error array_int_TestPlace.Col8; []int <= []varchar") } if p[0].Col16[0] != 1 { t.Fatal("Error array_int_TestPlace.Col16; []int <= []bool") } } /* uint64 test */ type array_uint64_TestPlace struct { Col1 int `db:"col1" type:"int"` Col2 []uint64 `db:"col2" type:"[]bigint"` Col3 []uint64 `db:"col3" type:"[]smallint"` Col4 []uint64 `db:"col4" type:"[]int"` Col7 []uint64 `db:"col7" type:"[]text"` Col8 []uint64 `db:"col8" type:"[]varchar"` Col9 []uint64 `db:"col9" type:"[]char"` Col11 []uint64 `db:"col11" type:"[]real"` Col12 []uint64 `db:"col12" type:"[]double"` Col13 []uint64 `db:"col13" type:"[]numeric"` Col14 []uint64 `db:"col14" type:"[]decimal"` Col15 []uint64 `db:"col15" type:"[]money"` Col16 []uint64 `db:"col16" type:"[]bool"` } var array_uint64_mTestType *AllRows = &AllRows{ SType: reflect.TypeOf(array_uint64_TestPlace{}), } func _03_array_uint64(t *testing.T, s *Searcher) { array_main_f_test_table(s) p := []array_uint64_TestPlace{} s.Get(array_uint64_mTestType, &p, "SELECT * FROM public.test ORDER BY 1") if p[0].Col2[0] != 9223372036854775807 { t.Fatal("Error array_int_TestPlace.Col2; []int <= []bigint") } if p[0].Col3[0] != 18 { t.Fatal("Error array_int_TestPlace.Col3; []int <= []smallint") } if p[0].Col4[0] != 884 { t.Fatal("Error array_int_TestPlace.Col4; []int <= []int") } if p[0].Col7[0] != 10 { t.Fatal("Error array_int_TestPlace.Col7; []int <= []text") } if p[0].Col8[1] != 0 { t.Fatal("Error array_int_TestPlace.Col8; []int <= []varchar") } if p[0].Col16[0] != 1 { t.Fatal("Error array_int_TestPlace.Col16; []int <= []bool") } } /* uint test */ type array_uint_TestPlace struct { Col1 int `db:"col1" type:"int"` Col2 []uint `db:"col2" type:"[]bigint"` Col3 []uint `db:"col3" type:"[]smallint"` Col4 []uint `db:"col4" type:"[]int"` Col7 []uint `db:"col7" type:"[]text"` Col8 []uint `db:"col8" type:"[]varchar"` Col9 []uint `db:"col9" type:"[]char"` Col11 []uint `db:"col11" type:"[]real"` Col12 []uint `db:"col12" type:"[]double"` Col13 []uint `db:"col13" type:"[]numeric"` Col14 []uint `db:"col14" type:"[]decimal"` Col15 []uint `db:"col15" type:"[]money"` Col16 []uint `db:"col16" type:"[]bool"` } var array_uint_mTestType *AllRows = &AllRows{ SType: reflect.TypeOf(array_uint_TestPlace{}), } func _04_array_uint(t *testing.T, s *Searcher) { array_main_f_test_table(s) p := []array_uint_TestPlace{} s.Get(array_uint_mTestType, &p, "SELECT * FROM public.test ORDER BY 1") if p[0].Col2[0] != 9223372036854775807 { t.Fatal("Error array_int_TestPlace.Col2; []int <= []bigint") } if p[0].Col3[0] != 18 { t.Fatal("Error array_int_TestPlace.Col3; []int <= []smallint") } if p[0].Col4[0] != 884 { t.Fatal("Error array_int_TestPlace.Col4; []int <= []int") } if p[0].Col7[0] != 10 { t.Fatal("Error array_int_TestPlace.Col7; []int <= []text") } if p[0].Col8[1] != 0 { t.Fatal("Error array_int_TestPlace.Col8; []int <= []varchar") } if p[0].Col16[0] != 1 { t.Fatal("Error array_int_TestPlace.Col16; []int <= []bool") } } /* boolean test */ type array_bool_TestPlace struct { Col1 int `db:"col1" type:"int"` Col2 []string `db:"col2" type:"[]boolean"` Col3 []int64 `db:"col3" type:"[]boolean"` Col4 []float32 `db:"col4" type:"[]boolean"` Col5 []float64 `db:"col5" type:"[]bool"` Col6 []bool `db:"col6" type:"[]bool"` Col7 []int `db:"col7" type:"[]bool"` Col8 []uint8 `db:"col8" type:"[]bool"` Col9 []uint64 `db:"col9" type:"[]bool"` Col10 []uint `db:"col10" type:"[]bool"` } var array_bool_mTestType *AllRows = &AllRows{ SType: reflect.TypeOf(array_bool_TestPlace{}), } func _11_array_bool(t *testing.T, s *Searcher) { sql_create := " CREATE TABLE public.test " + "(col1 int, col2 boolean[], col3 boolean[], col4 boolean[], " + "col5 boolean[], col6 boolean[], col7 boolean[], col8 boolean[], col9 boolean[], col10 boolean[]) " sql_cols := "INSERT INTO test(col1, col2, col3, col4, col5, col6, col7, col8, col9, col10 ) " sql_vals := []string{ "VALUES (1, '{TRUE,FALSE}'::bool[], '{TRUE,FALSE}'::bool[], '{TRUE,FALSE}'::bool[], '{TRUE,FALSE}'::bool[], '{TRUE,FALSE}'::bool[], '{TRUE,FALSE}'::bool[], '{TRUE,FALSE}'::bool[],'{TRUE,FALSE}'::bool[],'{TRUE,FALSE}'::bool[] )", "VALUES (2, '{FALSE,TRUE}'::bool[], '{FALSE,TRUE}'::bool[], '{FALSE,TRUE}'::bool[], '{FALSE,TRUE}'::bool[], '{FALSE,TRUE}'::bool[], '{FALSE,TRUE}'::bool[], '{FALSE,TRUE}'::bool[], '{FALSE,TRUE}'::bool[], '{FALSE,TRUE}'::bool[])", "VALUES (3, null, null, null, null, null, null, null, null, null)", // check null - nil } make_t_table(s, sql_create, sql_cols, sql_vals) p := []array_bool_TestPlace{} s.Get(array_bool_mTestType, &p, "SELECT * FROM public.test ORDER BY 1") if p[0].Col2[0] != "1" { t.Fatal("Error array_int_TestPlace.Col2; []string <= []boolean") } if p[0].Col3[0] != 1 { t.Fatal("Error array_int_TestPlace.Col3; []int64 <= []boolean") } if p[0].Col4[0] != 0 { t.Fatal("Error array_int_TestPlace.Col4; []float32 <= []boolean") } if p[0].Col5[0] != 0 { t.Fatal("Error array_int_TestPlace.Col5; []float64 <= []boolean") } if p[0].Col6[0] != true { t.Fatal("Error array_int_TestPlace.Col6; []bool <= []boolean") } if p[0].Col7[0] != 1 { t.Fatal("Error array_int_TestPlace.Col7; []int <= []boolean") } if p[0].Col8[1] != 0 { t.Fatal("Error array_int_TestPlace.Col8; []uint8 <= []boolean") } if p[0].Col9[0] != 1 { t.Fatal("Error array_int_TestPlace.Col16; []uint64 <= []boolean") } } /* boolean test */ type array_bool2_TestPlace struct { Col1 int `db:"col1" type:"int"` Col2 []bool `db:"col2" type:"[]bigint"` Col3 []bool `db:"col3" type:"[]smallint"` Col4 []bool `db:"col4" type:"[]int"` Col5 []bool `db:"col5" type:"[]text"` Col6 []bool `db:"col6" type:"[]varchar"` Col7 []bool `db:"col7" type:"[]char"` Col8 []bool `db:"col8" type:"[]real"` Col9 []bool `db:"col9" type:"[]double"` Col10 []bool `db:"col10" type:"[]numeric"` Col11 []bool `db:"col11" type:"[]decimal"` Col12 []bool `db:"col12" type:"[]money"` Col13 []bool `db:"col13" type:"[]bool"` } func check_bool_result(t *testing.T, col, p []bool, name string) { if len(col) != len(p) { t.Fatalf("\n%s: %#v\np: %#v\n, Error _12_array_bool Bad len for %s [%d <=> %d] ", name, col, p, name, len(col), len(p)) } for i := range col { if p[i] != col[i] { t.Fatalf("\n%s: %#v\np: %#v\nError _12_array_bool Bad value for %s[%d] [%s <=> %s]", name, col, p, name, i, p[i], col[i]) } } } func _12_array_bool(t *testing.T, s *Searcher) { sql_create := " CREATE TABLE public.test " + "(col1 int, col2 bigint[], col3 smallint[], col4 integer[], " + " col5 text[], col6 varchar(50)[], col7 char(10)[], " + " col8 real[], col9 double precision[], col10 numeric[], col11 decimal[], " + " col12 money[], col13 boolean[] " + ") " sql_cols := "INSERT INTO test(col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13 ) " sql_vals := []string{" VALUES (1, " + // cols 2,3,4 "'{9223372036854775807,3,2,0}'::bigint[], '{18,28,0,33}'::smallint[], '{884,-121,0}'::int[], " + // cols 5,6 " '{\"10\",\"\",\"Малененький текст\"}'::text[], '{\"\", \" varchar next\", \"654\"}', " + // cols 7,8 "'{\" char[] \",\"\",\" char2[] \",\"123.56234\",\"\"}', '{0,-12.13,13.13213409,0,-12.130909,0}'," + // cols 9, 10 "'{14.105,0,-15.015,-15.1500}', '{0,-16.17,0.0,16.1600,0,-16.173244,0}', " + // cols 11,12 "'{18.00,18.18,1800.180999234,0.0}'::decimal[], '{21.21,22.22,0.0,0,23.23}'::money[]," + // cols 13 " '{TRUE,FALSE,TRUE,FALSE}'::boolean[] ) ", " VALUES (2, null, null, null, null, null, null, null, null, null, null, null, null ) ", // check null - nil } make_t_table(s, sql_create, sql_cols, sql_vals) var array_bool_TestPlace *AllRows = &AllRows{ SType: reflect.TypeOf(array_bool2_TestPlace{}), } col2 := []bool{true, true, true, false} col3 := []bool{true, true, false, true} col4 := []bool{true, true, false} col5 := []bool{true, false, true} col6 := []bool{false, true, true} col7 := []bool{true, false, true, true, false} col8 := []bool{false, true, true, false, true, false} col9 := []bool{true, false, true, true} col10 := []bool{false, true, false, true, false, true, false} col11 := []bool{true, true, true, false} col12 := []bool{true, true, false, false, true} col13 := []bool{true, false, true, false} p := []array_bool2_TestPlace{} s.Get(array_bool_TestPlace, &p, "SELECT * FROM public.test ORDER BY 1") check_bool_result(t, col2, p[0].Col2, "col2") check_bool_result(t, col3, p[0].Col3, "col3") check_bool_result(t, col4, p[0].Col4, "col4") check_bool_result(t, col5, p[0].Col5, "col5") check_bool_result(t, col6, p[0].Col6, "col6") check_bool_result(t, col7, p[0].Col7, "col7") check_bool_result(t, col8, p[0].Col8, "col8") check_bool_result(t, col9, p[0].Col9, "col9") check_bool_result(t, col10, p[0].Col10, "col10") check_bool_result(t, col11, p[0].Col11, "col11") check_bool_result(t, col12, p[0].Col12, "col12") check_bool_result(t, col13, p[0].Col13, "col13") } /* string test */ type array_string_TestPlace struct { Col1 int `db:"col1" type:"int"` Col2 []string `db:"col2" type:"[]bigint"` Col3 []string `db:"col3" type:"[]smallint"` Col4 []string `db:"col4" type:"[]integer"` Col7 []string `db:"col7" type:"[]text"` Col8 []string `db:"col8" type:"[]varchar"` Col9 []string `db:"col9" type:"[]char"` Col11 []string `db:"col11" type:"[]real"` Col12 []string `db:"col12" type:"[]double"` Col13 []string `db:"col13" type:"[]numeric"` Col14 []string `db:"col14" type:"[]decimal"` Col15 []string `db:"col15" type:"[]money"` Col16 []string `db:"col16" type:"[]bool"` } var array_string_mTestType *AllRows = &AllRows{ SType: reflect.TypeOf(array_string_TestPlace{}), } func _21_array_string(t *testing.T, s *Searcher) { array_main_f_test_table(s) p := []array_string_TestPlace{} s.Get(array_string_mTestType, &p, "SELECT * FROM public.test ORDER BY 1") if p[0].Col2[0] != "9223372036854775807" { t.Fatal("Error array_int_TestPlace.Col2; []string <= []bigint") } if p[0].Col3[0] != "18" { t.Fatal("Error array_int_TestPlace.Col3; []string <= []smallint") } if p[0].Col4[0] != "884" { t.Fatal("Error array_int_TestPlace.Col4; []string <= []int") } if p[0].Col7[0] != "10" { t.Fatal("Error array_int_TestPlace.Col7; []string <= []text") } if p[0].Col8[1] != " varchar next" { t.Fatal("Error array_int_TestPlace.Col8; []string <= []varchar") } if p[0].Col16[0] != "1" { t.Fatal("Error array_int_TestPlace.Col16; []string <= []bool") } } /* float64 test */ type array_float32_TestPlace struct { Col1 int `db:"col1" type:"int"` Col2 []float32 `db:"col2" type:"[]bigint"` Col3 []float32 `db:"col3" type:"[]smallint"` Col4 []float32 `db:"col4" type:"[]integer"` Col7 []float32 `db:"col7" type:"[]text"` Col8 []float32 `db:"col8" type:"[]varchar"` Col9 []float32 `db:"col9" type:"[]char"` Col11 []float32 `db:"col11" type:"[]real"` Col12 []float32 `db:"col12" type:"[]double"` Col13 []float32 `db:"col13" type:"[]numeric"` Col14 []float32 `db:"col14" type:"[]decimal"` Col15 []float32 `db:"col15" type:"[]money"` Col16 []float32 `db:"col16" type:"[]bool"` } var array_float32_mTestType *AllRows = &AllRows{ SType: reflect.TypeOf(array_float32_TestPlace{}), } func _31_array_float32(t *testing.T, s *Searcher) { array_main_f_test_table(s) p := []array_float32_TestPlace{} s.Get(array_float32_mTestType, &p, "SELECT * FROM public.test ORDER BY 1") if p[0].Col2[0] != 9223372036854775807 { t.Fatal("Error array_int_TestPlace.Col2; []float32 <= []bigint") } if p[0].Col3[0] != 18 { t.Fatal("Error array_int_TestPlace.Col3; []float32 <= []smallint") } if p[0].Col4[0] != 884 { t.Fatal("Error array_int_TestPlace.Col4; []float32 <= []int") } if p[0].Col7[0] != 10 { t.Fatal("Error array_int_TestPlace.Col7; []float32 <= []text") } if p[0].Col8[0] != 0 { t.Fatal("Error array_int_TestPlace.Col8; []float32 <= []varchar") } if p[0].Col16[0] != 0 { t.Fatal("Error array_int_TestPlace.Col16; []float32 <= []bool") } } /* float64 test */ type array_float64_TestPlace struct { Col1 int `db:"col1" type:"int"` Col2 []float64 `db:"col2" type:"[]bigint"` Col3 []float64 `db:"col3" type:"[]smallint"` Col4 []float64 `db:"col4" type:"[]integer"` Col7 []float64 `db:"col7" type:"[]text"` Col8 []float64 `db:"col8" type:"[]varchar"` Col9 []float64 `db:"col9" type:"[]char"` Col11 []float64 `db:"col11" type:"[]real"` Col12 []float64 `db:"col12" type:"[]double"` Col13 []float64 `db:"col13" type:"[]numeric"` Col14 []float64 `db:"col14" type:"[]decimal"` Col15 []float64 `db:"col15" type:"[]money"` Col16 []float64 `db:"col16" type:"[]bool"` } var array_float64_mTestType *AllRows = &AllRows{ SType: reflect.TypeOf(array_float64_TestPlace{}), } func _32_array_float64(t *testing.T, s *Searcher) { array_main_f_test_table(s) p := []array_float64_TestPlace{} s.Get(array_float64_mTestType, &p, "SELECT * FROM public.test ORDER BY 1") if p[0].Col2[0] != 9223372036854775807 { t.Fatal("Error array_int_TestPlace.Col2; []float64 <= []bigint") } if p[0].Col3[0] != 18 { t.Fatal("Error array_int_TestPlace.Col3; []float64 <= []smallint") } if p[0].Col4[0] != 884 { t.Fatal("Error array_int_TestPlace.Col4; []float64 <= []int") } if p[0].Col7[0] != 10 { t.Fatal("Error array_int_TestPlace.Col7; []float64 <= []text") } if p[0].Col8[0] != 0 { t.Fatal("Error array_int_TestPlace.Col8; []float64 <= []varchar") } if p[0].Col16[0] != 0 { t.Fatal("Error array_int_TestPlace.Col16; []float64 <= []bool") } }
package response import "github.com/madneal/gshark/model" type ExaCustomerResponse struct { Customer model.ExaCustomer `json:"customer"` }
package database import ( "fmt" "strings" "time" "github.com/covista/commons/proto" ) // given a request for downloading DiagnosisKeys, build the SQL query that returns // the keys that match the filter. We know because of 'checkGetKeyRequest' that // there is at least one filter defined in 'request' func buildQuery(request *proto.GetKeyRequest) (string, []interface{}, error) { var query_values []interface{} var query = `SELECT TEK, ENIN FROM reported_keys WHERE ` var clauses []string var suffix string var err error // if health authority identifier is provided... if len(request.AuthorityId) > 0 { query_values = append(query_values, request.AuthorityId) clauses = append(clauses, fmt.Sprintf("authority_id = $%d", len(query_values))) suffix += `JOIN authorization_keys USING (authorization_key) JOIN health_authorities USING (api_key)` } // if an ENIN is provided, round to the nearest day and default to [ENIN, ENIN + 1 day] if request.ENIN > 0 { start := eninToTimestamp(request.ENIN) end := start.Add(24 * time.Hour) query_values = append(query_values, start) clauses = append(clauses, fmt.Sprintf("enin >= $%d", len(query_values))) query_values = append(query_values, end) clauses = append(clauses, fmt.Sprintf("enin <= $%d", len(query_values))) } // if historical range [start, end] dates are provided, use that range. // if historical range [days] is provided, generate filter for the last N days // starting at [start_date] if len(request.Hrange.StartDate) > 0 || request.Hrange.Days > 0 { // default to current date if start_date not defined var start, end time.Time if len(request.Hrange.StartDate) == 0 { end = time.Now() } else { end, err = time.Parse(time.RFC3339, request.Hrange.StartDate) if err != nil { return query, query_values, err } } end = end.UTC().Truncate(24 * time.Hour) num_days := max(request.Hrange.Days, 1) start = end.Add(time.Duration(num_days) * -24 * time.Hour) query_values = append(query_values, start) clauses = append(clauses, fmt.Sprintf("enin >= $%d", len(query_values))) query_values = append(query_values, end) clauses = append(clauses, fmt.Sprintf("enin <= $%d", len(query_values))) } query = fmt.Sprintf("%s %s %s", query, strings.Join(clauses, " AND "), suffix) return query, query_values, nil }
package main import ( "fmt" "log" ) func (cli *PHBCLI) phbstartNode(nodeID, minerAddress string) { fmt.Printf("Starting node %s\n", nodeID) if len(minerAddress) > 0 { if PHBValidateAddress(minerAddress) { fmt.Println("Mining is on. Address to receive rewards: ", minerAddress) } else { log.Panic("Wrong miner address!") } } PHBStartServer(nodeID, minerAddress) }
package acceptance import ( "context" "fmt" "os" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/databrickslabs/terraform-provider-databricks/common" . "github.com/databrickslabs/terraform-provider-databricks/compute" "github.com/databrickslabs/terraform-provider-databricks/internal/acceptance" "github.com/databrickslabs/terraform-provider-databricks/qa" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestAwsAccJobsCreate(t *testing.T) { if _, ok := os.LookupEnv("CLOUD_ENV"); !ok { t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set") } client := common.NewClientFromEnvironment() jobsAPI := NewJobsAPI(context.Background(), client) clustersAPI := NewClustersAPI(context.Background(), client) sparkVersion := clustersAPI.LatestSparkVersionOrDefault(SparkVersionRequest{Latest: true, LongTermSupport: true}) jobSettings := JobSettings{ NewCluster: &Cluster{ NumWorkers: 2, SparkVersion: sparkVersion, SparkConf: nil, AwsAttributes: &AwsAttributes{ Availability: "ON_DEMAND", }, NodeTypeID: clustersAPI.GetSmallestNodeType(NodeTypeRequest{ LocalDisk: true, }), }, NotebookTask: &NotebookTask{ NotebookPath: "/tf-test/demo-terraform/demo-notebook", }, Name: "1-test-job", Libraries: []Library{ { Maven: &Maven{ Coordinates: "org.jsoup:jsoup:1.7.2", }, }, }, EmailNotifications: &EmailNotifications{ OnStart: []string{}, OnSuccess: []string{}, OnFailure: []string{}, }, TimeoutSeconds: 3600, MaxRetries: 1, Schedule: &CronSchedule{ QuartzCronExpression: "0 15 22 ? * *", TimezoneID: "America/Los_Angeles", }, MaxConcurrentRuns: 1, } job, err := jobsAPI.Create(jobSettings) require.NoError(t, err, err) id := job.ID() defer func() { err := jobsAPI.Delete(id) assert.NoError(t, err, err) }() t.Log(id) job, err = jobsAPI.Read(id) assert.NoError(t, err, err) assert.True(t, job.Settings.NewCluster.SparkVersion == sparkVersion, "Something is wrong with spark version") newSparkVersion := clustersAPI.LatestSparkVersionOrDefault(SparkVersionRequest{Latest: true}) jobSettings.NewCluster.SparkVersion = newSparkVersion err = jobsAPI.Update(id, jobSettings) assert.NoError(t, err, err) job, err = jobsAPI.Read(id) assert.NoError(t, err, err) assert.True(t, job.Settings.NewCluster.SparkVersion == newSparkVersion, "Something is wrong with spark version") } func TestPreviewAccJobTasks(t *testing.T) { acceptance.Test(t, []acceptance.Step{ { Template: ` data "databricks_current_user" "me" {} data "databricks_spark_version" "latest" {} data "databricks_node_type" "smallest" { local_disk = true } resource "databricks_notebook" "this" { path = "${data.databricks_current_user.me.home}/Terraform{var.RANDOM}" language = "PYTHON" content_base64 = base64encode(<<-EOT # created from ${abspath(path.module)} display(spark.range(10)) EOT ) } resource "databricks_job" "this" { name = "{var.RANDOM}" task { task_key = "a" new_cluster { num_workers = 1 spark_version = data.databricks_spark_version.latest.id node_type_id = data.databricks_node_type.smallest.id } notebook_task { notebook_path = databricks_notebook.this.path } } task { task_key = "b" depends_on { task_key = "a" } new_cluster { num_workers = 8 spark_version = data.databricks_spark_version.latest.id node_type_id = data.databricks_node_type.smallest.id } notebook_task { notebook_path = databricks_notebook.this.path } } task { task_key = "c" depends_on { task_key = "b" } new_cluster { num_workers = 20 spark_version = data.databricks_spark_version.latest.id node_type_id = data.databricks_node_type.smallest.id } notebook_task { notebook_path = databricks_notebook.this.path } } }`, }, }) } func TestAccJobResource(t *testing.T) { if _, ok := os.LookupEnv("CLOUD_ENV"); !ok { t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set") } clustersAPI := NewClustersAPI(context.Background(), common.CommonEnvironmentClient()) sparkVersion := clustersAPI.LatestSparkVersionOrDefault(SparkVersionRequest{Latest: true, LongTermSupport: true}) acceptance.AccTest(t, resource.TestCase{ Steps: []resource.TestStep{ { Config: fmt.Sprintf(`resource "databricks_job" "this" { new_cluster { autoscale { min_workers = 2 max_workers = 3 } instance_pool_id = "%s" spark_version = "%s" } notebook_task { notebook_path = "/Production/MakeFeatures" } email_notifications { no_alert_for_skipped_runs = true } name = "%s" timeout_seconds = 3600 max_retries = 1 max_concurrent_runs = 1 }`, CommonInstancePoolID(), sparkVersion, qa.RandomLongName()), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object acceptance.ResourceCheck("databricks_job.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { job, err := NewJobsAPI(ctx, client).Read(id) assert.NoError(t, err) assert.NotNil(t, job.Settings) assert.NotNil(t, job.Settings.NewCluster) assert.NotNil(t, job.Settings.NewCluster.Autoscale) assert.NotNil(t, job.Settings.NotebookTask) assert.Equal(t, 2, int(job.Settings.NewCluster.Autoscale.MinWorkers)) assert.Equal(t, 3, int(job.Settings.NewCluster.Autoscale.MaxWorkers)) assert.Equal(t, sparkVersion, job.Settings.NewCluster.SparkVersion) assert.Equal(t, "/Production/MakeFeatures", job.Settings.NotebookTask.NotebookPath) assert.Equal(t, 3600, int(job.Settings.TimeoutSeconds)) assert.Equal(t, 1, int(job.Settings.MaxRetries)) assert.Equal(t, 1, int(job.Settings.MaxConcurrentRuns)) return nil }), ), }, }, }) } func TestAwsAccJobResource_NoInstancePool(t *testing.T) { if _, ok := os.LookupEnv("CLOUD_ENV"); !ok { t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set") } clustersAPI := NewClustersAPI(context.Background(), common.CommonEnvironmentClient()) sparkVersion := clustersAPI.LatestSparkVersionOrDefault(SparkVersionRequest{Latest: true, LongTermSupport: true}) randomStr := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) instanceProfileARN := fmt.Sprintf("arn:aws:iam::999999999999:instance-profile/tf-test-%s", randomStr) acceptance.AccTest(t, resource.TestCase{ Steps: []resource.TestStep{ { Config: fmt.Sprintf(`resource "databricks_job" "this" { new_cluster { num_workers = 1 aws_attributes { zone_id = "eu-central-1" spot_bid_price_percent = "100" instance_profile_arn = "%s" first_on_demand = 1 ebs_volume_type = "GENERAL_PURPOSE_SSD" ebs_volume_count = 1 ebs_volume_size = 32 } node_type_id = "m4.large" spark_version = "%s" } notebook_task { notebook_path = "/Production/MakeFeatures" } library { pypi { package = "networkx" } } email_notifications { no_alert_for_skipped_runs = true } name = "%s" timeout_seconds = 3600 max_retries = 1 max_concurrent_runs = 1 }`, instanceProfileARN, sparkVersion, qa.RandomLongName()), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object acceptance.ResourceCheck("databricks_job.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { job, err := NewJobsAPI(ctx, client).Read(id) assert.NoError(t, err) assert.NotNil(t, job.Settings) assert.NotNil(t, job.Settings.NewCluster) assert.NotNil(t, job.Settings.NewCluster.AwsAttributes) return nil }), ), }, }, }) }
package spec // Surface is an interface that should hide concrete drawing implementations // from controls. Using this interface should allow us to reasonably easily // swap rendering backends (e.g., NanoVg, Cairo, Skia, HTML Canvas, etc.) type Surface interface { Init() // Arc draws an arc from the x,y point along angle 1 and 2 at the provided radius. Arc(xc, yc, radius, angle1, angle2 float64) // BeginPath starts a new stroke or fill path. BeginPath() // BeginFrame initiates a new frame rendering. BeginFrame() // Ends the previously-begun frame rendering. EndFrame() // Close the surface for further operations. Close() // DebugDumpPathCache will print the current Path cache to log. DebugDumpPathCache() // Fill will fill the previously drawn shape. Fill() // Rect draws a rectangle from x and y to width and height. Rect(x, y, width, height float64) // Rect draws a rectangle with rounded corners from x and y to width and height. RoundedRect(x, y, width, height, radius float64) // SetStrokeWidth configures the width in pixels of the next shape. SetStrokeWidth(width float64) // SetFillColor configures the fill color as an RGBA hex value (0xffcc00ff) SetFillColor(color uint) // SetStrokeColor configures the stroke color as an RGBA hex value (0xffcc00ff) SetStrokeColor(color uint) // Stroke draws a stroke around the previous shape. Stroke() // GetOffsetSurfaceFor provides offset surface for nested controls so that // they can use local coordinates for positioning. // GetOffsetSurfaceFor(d Reader) Surface AddFont(name string, path string) SetFontSize(size float64) SetFontFace(face string) Text(x float64, y float64, text string) TextBounds(face string, size float64, text string) (x, y, w, h float64) // SetWidth sets the horizontal size of the surface. SetWidth(w float64) // Width returns the horizontal size of the surface. Width() float64 // SetHeight sets the vertical size of the surface. SetHeight(h float64) // Height returns the vertical size of the surface. Height() float64 }
package main import ( "flag" "fmt" "github.com/containerd/containerd/namespaces" units "github.com/docker/go-units" "github.com/genuinetools/img/client" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/util/appcontext" ) const pullHelp = `Pull an image or a repository from a registry.` func (cmd *pullCommand) Name() string { return "pull" } func (cmd *pullCommand) Args() string { return "[OPTIONS] NAME[:TAG|@DIGEST]" } func (cmd *pullCommand) ShortHelp() string { return pullHelp } func (cmd *pullCommand) LongHelp() string { return pullHelp } func (cmd *pullCommand) Hidden() bool { return false } func (cmd *pullCommand) Register(fs *flag.FlagSet) {} type pullCommand struct { image string } func (cmd *pullCommand) Run(args []string) (err error) { if len(args) < 1 { return fmt.Errorf("must pass an image or repository to pull") } // Get the specified image. cmd.image = args[0] // Create the context. ctx := appcontext.Context() id := identity.NewID() ctx = session.NewContext(ctx, id) ctx = namespaces.WithNamespace(ctx, namespaces.Default) // Create the client. c, err := client.New(stateDir, backend, nil) if err != nil { return err } defer c.Close() fmt.Printf("Pulling %s...\n", cmd.image) ref, err := c.Pull(ctx, cmd.image) if err != nil { return err } fmt.Printf("Snapshot ref: %s\n", ref.ID()) // Get the size. size, err := ref.Size(ctx) if err != nil { return err } fmt.Printf("Size: %s\n", units.BytesSize(float64(size))) return nil }
package fml import ( . "github.com/fipress/fiputil" "strings" "unicode" ) //Skip spaces and comments func skipLeft(input []byte) (skip int) { i := 0 for i < len(input) { if input[i] == '#' { i += skipComments(input[i:]) } else if IsSpaceOrLineEnd(input[i]) { i++ } else { return i } } return i } //Skip comments or spaces until line end func skipRest(input []byte) (skip int) { i := 0 for i < len(input) { if input[i] == '#' { i += skipComments(input[i:]) } else if IsSpace(input[i]) { i++ } else if IsLineEnd(input[i]) { return i + 1 } else { return i } } return i } func skipComments(input []byte) int { return SkipUntilFunc(input, IsLineEnd, true) } //A node end by blank lines. //A blank line means it contains only spaces or comments func isBlankLine(input []byte) (bool, int) { i := 0 for ; i < len(input); i++ { switch input[i] { case '\n', '\r', '\f': return true, i + 1 case ' ', '\t': case '#': return true, i + skipComments(input[i:]) default: return false, 0 } } return true, i } //A block end by blank lines, or next line starts a list block func isKeyValueBlockEnd(input []byte) (bool, int) { isList, _ := isListPrefix(input) if isList { return true, 0 } return isBlockEnd(input) } func isBlockEnd(input []byte) (bool, int) { i := 0 for ; i < len(input); i++ { switch input[i] { case '\n', '\r', '\f': return true, i + 1 case ' ', '\t': case '#': return true, i + skipComments(input[i:]) default: return false, 0 } } return true, i } func isListPrefix(input []byte) (bool, int) { idx := SkipSpace(input) if idx+2 < len(input) && input[idx] == '-' && IsSpace(input[idx+1]) { return true, idx + 2 } return false, 0 } //value end at line end or comment func skipUntilValueEnd(input []byte) int { i := 0 for ; i < len(input); i++ { if input[i] == '#' && IsSpace(input[i-1]) { return i - 1 } if IsLineEnd(input[i]) { return i } } return i } //get value in string func getRawValue(input []byte) (string, int) { start := SkipSpace(input) end := start + skipUntilValueEnd(input[start:]) val := string(input[start:end]) val = strings.TrimRightFunc(val, unicode.IsSpace) idx := end + skipRest(input[end:]) return val, idx }
package main import ( "fmt" "math/rand" "strings" "time" "github.com/fatih/color" ) var ASCI = "QWERTYUIOPLKJHGFDSAZXCVBNMmnbvcxzasdfghjklopiuytrewq7869543210" var Pass = []string{} var Sign = "#!@&()_-][><" func strung() []string { for i := 0; i < 10; i++ { Pass = append(Pass, string(ASCI[rand.Intn(len(ASCI))])) } for i := 0; i < rand.Intn(3); i++ { Pass[rand.Intn(10)] = string(Sign[rand.Intn(len(Sign))]) } return Pass } func main() { rand.Seed(time.Now().UnixNano()) fmt.Println(strings.Join(strung(), "")) fmt.Println(len(strung())) fmt.Println(strung()) color.Red("") }
package gen import ( "fmt" "go/ast" "go/printer" "go/token" "strings" ) // Struct is an alias for ast.StructType type Struct ast.StructType // NewStruct creates a new struct definition func NewStruct() *Struct { return (*Struct)(&ast.StructType{ Fields: &ast.FieldList{List: make([]*ast.Field, 0, 1)}, }) } // AST returns the cast go/ast StructType of s func (s *Struct) AST() *ast.StructType { return (*ast.StructType)(s) } // AddField adds a Field to the Struct func (s *Struct) AddField(f *Field) { if s.Fields == nil { s.Fields = &ast.FieldList{List: make([]*ast.Field, 0, 1)} } s.Fields.List = append(s.Fields.List, f.AST()) } func (s *Struct) String() string { b := &strings.Builder{} structType := (*ast.StructType)(s) fs := token.NewFileSet() err := printer.Fprint(b, fs, structType) if err != nil { panic(err) } return b.String() } // StructTag represents a struct tag type StructTag struct { props map[string]map[string]string propKeys []string propSubKeys map[string][]string } // NewStructTag initializes a struct tag func NewStructTag() *StructTag { return &StructTag{ props: make(map[string]map[string]string), propKeys: make([]string, 0, 1), propSubKeys: make(map[string][]string), } } // WithValue sets "key" to "value" on tag "tag", e.g.: // `s.WithTagValue("json", "", "")` func (s *StructTag) WithValue(tag, key, value string) *StructTag { if _, exists := s.props[tag]; !exists { s.props[tag] = make(map[string]string) s.propKeys = append(s.propKeys, tag) } if _, exists := s.propSubKeys[tag]; !exists { s.propSubKeys[tag] = make([]string, 0, 1) } if _, exists := s.props[tag][key]; !exists { s.propSubKeys[tag] = append(s.propSubKeys[tag], key) } s.props[tag][key] = value return s } func (s *StructTag) String() string { tags := make([]string, 0, len(s.props)) for _, propKey := range s.propKeys { values := make([]string, 0, len(s.props[propKey])) for _, propSubKey := range s.propSubKeys[propKey] { value := s.props[propKey][propSubKey] if len(value) > 0 { values = append(values, fmt.Sprintf("%s=%s", propSubKey, value)) } else { values = append(values, propSubKey) } } tags = append(tags, fmt.Sprintf("%s:%q", propKey, strings.Join(values, ", "))) } return strings.Join(tags, " ") } // AST returns the *ast.BasicLit representation of the struct tag func (s *StructTag) AST() *ast.BasicLit { return &ast.BasicLit{ Kind: token.STRING, Value: s.String(), } }
package client import ( "encoding/json" "fmt" "github.com/gojektech/heimdall/v6/httpclient" "github.com/pkg/errors" "io/ioutil" "log" "net/http" "strings" "time" ) type Memo struct { Content string Tag string Api string } type Payload struct { Content string `json:"content"` } func (m *Memo) Submit(verbose bool) (*string, error) { content := strings.TrimSpace(m.Content) if m.Api == "" || content == "" { return nil, errors.New("lack of necessary arguments") } if m.Tag != "" { content += fmt.Sprintf("\n\n#%s", m.Tag) } timeout := 3000 * time.Millisecond client := httpclient.NewClient(httpclient.WithHTTPTimeout(timeout)) payloadJSON, _ := json.Marshal(Payload{ content, }) body := ioutil.NopCloser(strings.NewReader(string(payloadJSON))) headers := http.Header{} headers.Set("Content-Type", "application/json") if verbose { log.Printf("Raw content: %s", content) log.Printf("Payload JSON: %s", payloadJSON) } response, err := client.Post(m.Api, body, headers) if err != nil { return nil, errors.Wrap(err, "failed to make a request to server") } defer response.Body.Close() responseBody, err := ioutil.ReadAll(response.Body) if err != nil { return nil, errors.Wrap(err, "failed to read response body") } var responseData map[string]interface{} if err := json.Unmarshal(responseBody, &responseData); err != nil { return nil, err } if verbose { log.Printf("Response Body: %v", responseData) } statusCode := response.StatusCode if statusCode >= 200 && statusCode < 400 { message := responseData["message"].(string) return &message, nil } else if statusCode >= 400 && statusCode < 500 { return nil, &ResponseError{ Err: errors.New("request is not valid"), StatusCode: statusCode, } } else { return nil, &ResponseError{ Err: errors.New("response error"), StatusCode: statusCode, } } }
package e4 import ( "io" "testing" ) func TestInfo(t *testing.T) { TestWrapFunc(t, NewInfo("foo")) info := NewInfo("foo %s", "bar")(io.EOF) if info.Error() != "foo bar\nEOF" { t.Fatalf("got %s", info.Error()) } if !is(info, io.EOF) { t.Fatal() } }
package cli import ( "os" "github.com/codegangsta/cli" orderconstant "github.com/xozrc/cqrs/eventsourcing/examples/order/constant" ) var ( bus string = orderconstant.OrderCommandBusAddr topic string = orderconstant.OrderCommandTopic ) var ( commands []cli.Command ) func appendCmd(cmd cli.Command) { commands = append(commands, cmd) } func Start() { app := cli.NewApp() app.Name = "order client" app.Usage = "orderclient [global options] command [command options] [arguments...]." app.Author = "" app.Email = "" app.Flags = []cli.Flag{ cli.StringFlag{ Name: "bus,b", Value: bus, Usage: "bus address for order client to publish", Destination: &bus, }, cli.StringFlag{ Name: "topic,t", Value: topic, Usage: "topic for order client to publish", Destination: &topic, }, } app.Commands = commands app.Run(os.Args) }
package nmea import ( "github.com/stretchr/testify/assert" "testing" ) //$GPGSV,3,1,11,03,03,111,00,04,15,270,00,06,01,010,00,13,06,292,00*74 //$GPGSV,3,2,11,14,25,170,00,16,57,208,39,18,67,296,40,19,40,246,00*74 //$GPGSV,3,3,11,22,42,067,42,24,14,311,43,27,05,244,00,,,,*4D //$GPGSV,1,1,13,02,02,213,,03,-3,000,,11,00,121,,14,13,172,05*67 func TestGPGSVGoodSentence(t *testing.T) { goodMsg := "$GPGSV,3,3,11,22,42,067,42,24,14,311,43,27,05,244,00,,,,*4D" sentence, err := Parse(goodMsg) assert.NoError(t, err, "Unexpected error parsing good sentence") // Attributes of the parsed sentence, and their expected values. expected := GPGSV{ Sentence: Sentence{ Type: "GPGSV", Fields: []string{"3", "3", "11", "22", "42", "067", "42", "24", "14", "311", "43", "27", "05", "244", "00", "", "", "", ""}, Checksum: "4D", Raw: "$GPGSV,3,3,11,22,42,067,42,24,14,311,43,27,05,244,00,,,,*4D", }, TotalNumberOfMessages: 3, NumberOFMessage: 3, TotalNumberOfSVs: 11, SVList: []SatelliteView{ { SVPRNNumber: "22", ElevationInDegrees: "42", Azimuth: "067", SNR: "42", }, { SVPRNNumber: "24", ElevationInDegrees: "14", Azimuth: "311", SNR: "43", }, { SVPRNNumber: "27", ElevationInDegrees: "05", Azimuth: "244", SNR: "00", }, { SVPRNNumber: "", ElevationInDegrees: "", Azimuth: "", SNR: "", }, }, } assert.EqualValues(t, expected, sentence, "Sentence values do not match") }
package pulsepoint import ( "encoding/json" "testing" "github.com/prebid/prebid-server/openrtb_ext" ) func TestValidParams(t *testing.T) { validator, err := openrtb_ext.NewBidderParamsValidator("../../static/bidder-params") if err != nil { t.Fatalf("Failed to fetch the json-schemas. %v", err) } for _, validParam := range validParams { if err := validator.Validate(openrtb_ext.BidderPulsepoint, json.RawMessage(validParam)); err != nil { t.Errorf("Schema rejected pulsepoint params: %s \n Error: %s", validParam, err) } } } // TestInvalidParams makes sure that the pubmatic schema rejects all the imp.ext fields we don't support. func TestInvalidParams(t *testing.T) { validator, err := openrtb_ext.NewBidderParamsValidator("../../static/bidder-params") if err != nil { t.Fatalf("Failed to fetch the json-schemas. %v", err) } for _, invalidParam := range invalidParams { if err := validator.Validate(openrtb_ext.BidderPulsepoint, json.RawMessage(invalidParam)); err == nil { t.Errorf("Schema allowed unexpected pulsepoint params: %s", invalidParam) } } } var validParams = []string{ `{"cp":1000, "ct": 2000}`, `{"cp":1001, "ct": 2001}`, `{"cp":1001, "ct": 2001, "cf": "1x1"}`, } var invalidParams = []string{ ``, `null`, `true`, `5`, `4.2`, `[]`, `{}`, `{"cp":"1000"}`, `{"ct":"1000"}`, `{"cp":1000}`, `{"ct":1000}`, `{"cp":1000, "ct":"1000"}`, `{"cp":1000, "ct": "abcd"}`, `{"cp":"abcd", "ct": 1000}`, `{"cp":"1000.2", "ct": "1000.1"}`, }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tpcc import ( "context" gosql "database/sql" "fmt" "net/url" "strconv" "strings" "sync" "time" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" "github.com/cockroachdb/cockroach/pkg/workload/workloadimpl" "github.com/cockroachdb/errors" "github.com/jackc/pgx" "github.com/spf13/pflag" "golang.org/x/exp/rand" "golang.org/x/sync/errgroup" ) type tpcc struct { flags workload.Flags connFlags *workload.ConnFlags seed uint64 warehouses int activeWarehouses int interleaved bool nowString []byte numConns int idleConns int // Used in non-uniform random data generation. cLoad is the value of C at load // time. cCustomerID is the value of C for the customer id generator. cItemID // is the value of C for the item id generator. See 2.1.6. cLoad, cCustomerID, cItemID int mix string waitFraction float64 workers int fks bool separateColumnFamilies bool // deprecatedFKIndexes adds in foreign key indexes that are no longer needed // due to origin index restrictions being lifted. deprecatedFkIndexes bool dbOverride string txInfos []txInfo // deck contains indexes into the txInfos slice. deck []int auditor *auditor reg *histogram.Registry split bool scatter bool partitions int clientPartitions int affinityPartitions []int wPart *partitioner zoneCfg zoneConfig usePostgres bool serializable bool txOpts *pgx.TxOptions expensiveChecks bool replicateStaticColumns bool randomCIDsCache struct { syncutil.Mutex values [][]int } localsPool *sync.Pool } type waitSetter struct { val *float64 } // Set implements the pflag.Value interface. func (w *waitSetter) Set(val string) error { switch strings.ToLower(val) { case "true", "on": *w.val = 1.0 case "false", "off": *w.val = 0.0 default: f, err := strconv.ParseFloat(val, 64) if err != nil { return err } if f < 0 { return errors.New("cannot set --wait to a negative value") } *w.val = f } return nil } // Type implements the pflag.Value interface func (*waitSetter) Type() string { return "0.0/false - 1.0/true" } // String implements the pflag.Value interface. func (w *waitSetter) String() string { switch *w.val { case 0: return "false" case 1: return "true" default: return fmt.Sprintf("%f", *w.val) } } func init() { workload.Register(tpccMeta) } // FromWarehouses returns a tpcc generator pre-configured with the specified // number of warehouses. func FromWarehouses(warehouses int) workload.Generator { return workload.FromFlags(tpccMeta, fmt.Sprintf(`--warehouses=%d`, warehouses)) } var tpccMeta = workload.Meta{ Name: `tpcc`, Description: `TPC-C simulates a transaction processing workload` + ` using a rich schema of multiple tables`, Version: `2.2.0`, PublicFacing: true, New: func() workload.Generator { g := &tpcc{} g.flags.FlagSet = pflag.NewFlagSet(`tpcc`, pflag.ContinueOnError) g.flags.Meta = map[string]workload.FlagMeta{ `db`: {RuntimeOnly: true}, `mix`: {RuntimeOnly: true}, `partitions`: {RuntimeOnly: true}, `client-partitions`: {RuntimeOnly: true}, `partition-affinity`: {RuntimeOnly: true}, `partition-strategy`: {RuntimeOnly: true}, `zones`: {RuntimeOnly: true}, `active-warehouses`: {RuntimeOnly: true}, `scatter`: {RuntimeOnly: true}, `serializable`: {RuntimeOnly: true}, `split`: {RuntimeOnly: true}, `wait`: {RuntimeOnly: true}, `workers`: {RuntimeOnly: true}, `conns`: {RuntimeOnly: true}, `expensive-checks`: {RuntimeOnly: true, CheckConsistencyOnly: true}, } g.flags.Uint64Var(&g.seed, `seed`, 1, `Random number generator seed`) g.flags.IntVar(&g.warehouses, `warehouses`, 1, `Number of warehouses for loading`) g.flags.BoolVar(&g.fks, `fks`, true, `Add the foreign keys`) g.flags.BoolVar(&g.deprecatedFkIndexes, `deprecated-fk-indexes`, false, `Add deprecated foreign keys (needed when running against v20.1 or below clusters)`) g.flags.BoolVar(&g.interleaved, `interleaved`, false, `Use interleaved tables`) if err := g.Flags().MarkHidden("interleaved"); err != nil { panic(errors.Wrap(err, "no interleaved flag?")) } g.flags.StringVar(&g.mix, `mix`, `newOrder=10,payment=10,orderStatus=1,delivery=1,stockLevel=1`, `Weights for the transaction mix. The default matches the TPCC spec.`) g.waitFraction = 1.0 g.flags.Var(&waitSetter{&g.waitFraction}, `wait`, `Wait mode (include think/keying sleeps): 1/true for tpcc-standard wait, 0/false for no waits, other factors also allowed`) g.flags.StringVar(&g.dbOverride, `db`, ``, `Override for the SQL database to use. If empty, defaults to the generator name`) g.flags.IntVar(&g.workers, `workers`, 0, fmt.Sprintf( `Number of concurrent workers. Defaults to --warehouses * %d`, numWorkersPerWarehouse, )) g.flags.IntVar(&g.numConns, `conns`, 0, fmt.Sprintf( `Number of connections. Defaults to --warehouses * %d (except in nowait mode, where it defaults to --workers`, numConnsPerWarehouse, )) g.flags.IntVar(&g.idleConns, `idle-conns`, 0, `Number of idle connections. Defaults to 0`) g.flags.IntVar(&g.partitions, `partitions`, 1, `Partition tables`) g.flags.IntVar(&g.clientPartitions, `client-partitions`, 0, `Make client behave as if the tables are partitioned, but does not actually partition underlying data. Requires --partition-affinity.`) g.flags.IntSliceVar(&g.affinityPartitions, `partition-affinity`, nil, `Run load generator against specific partition (requires partitions). `+ `Note that if one value is provided, the assumption is that all urls are associated with that partition. In all other cases the assumption `+ `is that the URLs are distributed evenly over the partitions`) g.flags.Var(&g.zoneCfg.strategy, `partition-strategy`, `Partition tables according to which strategy [replication, leases]`) g.flags.StringSliceVar(&g.zoneCfg.zones, "zones", []string{}, "Zones for partitioning, the number of zones should match the number of partitions and the zones used to start cockroach.") g.flags.IntVar(&g.activeWarehouses, `active-warehouses`, 0, `Run the load generator against a specific number of warehouses. Defaults to --warehouses'`) g.flags.BoolVar(&g.scatter, `scatter`, false, `Scatter ranges`) g.flags.BoolVar(&g.serializable, `serializable`, false, `Force serializable mode`) g.flags.BoolVar(&g.split, `split`, false, `Split tables`) g.flags.BoolVar(&g.expensiveChecks, `expensive-checks`, false, `Run expensive checks`) g.flags.BoolVar(&g.separateColumnFamilies, `families`, false, `Use separate column families for dynamic and static columns`) g.flags.BoolVar(&g.replicateStaticColumns, `replicate-static-columns`, false, "Create duplicate indexes for all static columns in district, items and warehouse tables, such that each zone or rack has them locally.") g.connFlags = workload.NewConnFlags(&g.flags) // Hardcode this since it doesn't seem like anyone will want to change // it and it's really noisy in the generated fixture paths. g.nowString = []byte(`2006-01-02 15:04:05`) return g }, } // Meta implements the Generator interface. func (*tpcc) Meta() workload.Meta { return tpccMeta } // Flags implements the Flagser interface. func (w *tpcc) Flags() workload.Flags { return w.flags } // Hooks implements the Hookser interface. func (w *tpcc) Hooks() workload.Hooks { return workload.Hooks{ Validate: func() error { if w.warehouses < 1 { return errors.Errorf(`--warehouses must be positive`) } if w.activeWarehouses > w.warehouses { return errors.Errorf(`--active-warehouses needs to be less than or equal to warehouses`) } else if w.activeWarehouses == 0 { w.activeWarehouses = w.warehouses } if w.partitions < 1 { return errors.Errorf(`--partitions must be positive`) } if w.clientPartitions > 0 { if w.partitions > 1 { return errors.Errorf(`cannot specify both --partitions and --client-partitions; --partitions actually partitions underlying data. --client-partitions only modifies client behavior to access a subset of warehouses. Must be used with --partition-affinity`) } if len(w.affinityPartitions) == 0 { return errors.Errorf(`--client-partitions must be used with --partition-affinity.`) } for _, p := range w.affinityPartitions { if p >= w.clientPartitions { return errors.Errorf(`--partition-affinity %d in %v out of bounds of --client-partitions`, p, w.affinityPartitions) } } } else { for _, p := range w.affinityPartitions { if p < 0 || p >= w.partitions { return errors.Errorf(`--partition-affinity out of bounds of --partitions`) } } if len(w.zoneCfg.zones) > 0 && (len(w.zoneCfg.zones) != w.partitions) { return errors.Errorf(`--zones should have the sames length as --partitions.`) } } w.initNonUniformRandomConstants() if w.workers == 0 { w.workers = w.activeWarehouses * numWorkersPerWarehouse } if w.numConns == 0 { // If we're not waiting, open up a connection for each worker. If we are // waiting, we only use up to a set number of connections per warehouse. // This isn't mandated by the spec, but opening a connection per worker // when they each spend most of their time waiting is wasteful. if w.waitFraction == 0 { w.numConns = w.workers } else { w.numConns = w.activeWarehouses * numConnsPerWarehouse } } if w.waitFraction > 0 && w.workers != w.activeWarehouses*numWorkersPerWarehouse { return errors.Errorf(`--wait > 0 and --warehouses=%d requires --workers=%d`, w.activeWarehouses, w.warehouses*numWorkersPerWarehouse) } if w.serializable { w.txOpts = &pgx.TxOptions{IsoLevel: pgx.Serializable} } w.auditor = newAuditor(w.activeWarehouses) // Create a partitioner to help us partition the warehouses. The base-case is // where w.warehouses == w.activeWarehouses and w.partitions == 1. var err error if w.clientPartitions > 0 { // This partitioner will not actually be used to partiton the data, but instead // is only used to limit the warehouses the client attempts to manipulate. w.wPart, err = makePartitioner(w.warehouses, w.activeWarehouses, w.clientPartitions) } else { w.wPart, err = makePartitioner(w.warehouses, w.activeWarehouses, w.partitions) } if err != nil { return errors.Wrap(err, "error creating partitioner") } return initializeMix(w) }, PostLoad: func(db *gosql.DB) error { if w.fks { // We avoid validating foreign keys because we just generated // the data set and don't want to scan over the entire thing // again. Unfortunately, this means that we leave the foreign // keys unvalidated for the duration of the test, so the SQL // optimizer can't use them. // TODO(lucy-zhang): expose an internal knob to validate fk // relations without performing full validation. See #38833. fkStmts := []string{ `alter table district add foreign key (d_w_id) references warehouse (w_id) not valid`, `alter table customer add foreign key (c_w_id, c_d_id) references district (d_w_id, d_id) not valid`, `alter table history add foreign key (h_c_w_id, h_c_d_id, h_c_id) references customer (c_w_id, c_d_id, c_id) not valid`, `alter table history add foreign key (h_w_id, h_d_id) references district (d_w_id, d_id) not valid`, `alter table "order" add foreign key (o_w_id, o_d_id, o_c_id) references customer (c_w_id, c_d_id, c_id) not valid`, `alter table new_order add foreign key (no_w_id, no_d_id, no_o_id) references "order" (o_w_id, o_d_id, o_id) not valid`, `alter table stock add foreign key (s_w_id) references warehouse (w_id) not valid`, `alter table stock add foreign key (s_i_id) references item (i_id) not valid`, `alter table order_line add foreign key (ol_w_id, ol_d_id, ol_o_id) references "order" (o_w_id, o_d_id, o_id) not valid`, `alter table order_line add foreign key (ol_supply_w_id, ol_i_id) references stock (s_w_id, s_i_id) not valid`, } for _, fkStmt := range fkStmts { if _, err := db.Exec(fkStmt); err != nil { const duplFKErr = "columns cannot be used by multiple foreign key constraints" const idxErr = "foreign key requires an existing index on columns" switch { case strings.Contains(err.Error(), idxErr): fmt.Println(errors.WithHint(err, "try using the --deprecated-fk-indexes flag")) // If the statement failed because of a missing FK index, suggest // to use the deprecated-fks flag. return errors.WithHint(err, "try using the --deprecated-fk-indexes flag") case strings.Contains(err.Error(), duplFKErr): // If the statement failed because the fk already exists, // ignore it. Return the error for any other reason. default: return err } } } } return w.partitionAndScatterWithDB(db) }, PostRun: func(startElapsed time.Duration) error { w.auditor.runChecks() const totalHeader = "\n_elapsed_______tpmC____efc__avg(ms)__p50(ms)__p90(ms)__p95(ms)__p99(ms)_pMax(ms)" fmt.Println(totalHeader) const newOrderName = `newOrder` w.reg.Tick(func(t histogram.Tick) { if newOrderName == t.Name { tpmC := float64(t.Cumulative.TotalCount()) / startElapsed.Seconds() * 60 fmt.Printf("%7.1fs %10.1f %5.1f%% %8.1f %8.1f %8.1f %8.1f %8.1f %8.1f\n", startElapsed.Seconds(), tpmC, 100*tpmC/(SpecWarehouseFactor*float64(w.activeWarehouses)), time.Duration(t.Cumulative.Mean()).Seconds()*1000, time.Duration(t.Cumulative.ValueAtQuantile(50)).Seconds()*1000, time.Duration(t.Cumulative.ValueAtQuantile(90)).Seconds()*1000, time.Duration(t.Cumulative.ValueAtQuantile(95)).Seconds()*1000, time.Duration(t.Cumulative.ValueAtQuantile(99)).Seconds()*1000, time.Duration(t.Cumulative.ValueAtQuantile(100)).Seconds()*1000, ) } }) return nil }, CheckConsistency: func(ctx context.Context, db *gosql.DB) error { for _, check := range AllChecks() { if !w.expensiveChecks && check.Expensive { continue } start := timeutil.Now() err := check.Fn(db, "" /* asOfSystemTime */) log.Infof(ctx, `check %s took %s`, check.Name, timeutil.Since(start)) if err != nil { return errors.Wrapf(err, `check failed: %s`, check.Name) } } return nil }, } } // Tables implements the Generator interface. func (w *tpcc) Tables() []workload.Table { aCharsInit := workloadimpl.PrecomputedRandInit(rand.New(rand.NewSource(w.seed)), precomputedLength, aCharsAlphabet) lettersInit := workloadimpl.PrecomputedRandInit(rand.New(rand.NewSource(w.seed)), precomputedLength, lettersAlphabet) numbersInit := workloadimpl.PrecomputedRandInit(rand.New(rand.NewSource(w.seed)), precomputedLength, numbersAlphabet) if w.localsPool == nil { w.localsPool = &sync.Pool{ New: func() interface{} { return &generateLocals{ rng: tpccRand{ Rand: rand.New(rand.NewSource(uint64(timeutil.Now().UnixNano()))), // Intentionally wait until here to initialize the precomputed rands // so a caller of Tables that only wants schema doesn't compute // them. aChars: aCharsInit(), letters: lettersInit(), numbers: numbersInit(), }, } }, } } // splits is a convenience method for constructing table splits that returns // a zero value if the workload does not have splits enabled. splits := func(t workload.BatchedTuples) workload.BatchedTuples { if w.split { return t } return workload.BatchedTuples{} } // numBatches is a helper to calculate how many split batches exist exist given // the total number of rows and the desired number of rows per split. numBatches := func(total, per int) int { batches := total / per if total%per == 0 { batches-- } return batches } warehouse := workload.Table{ Name: `warehouse`, Schema: maybeAddColumnFamiliesSuffix( w.separateColumnFamilies, tpccWarehouseSchema, tpccWarehouseColumnFamiliesSuffix, ), InitialRows: workload.BatchedTuples{ NumBatches: w.warehouses, FillBatch: w.tpccWarehouseInitialRowBatch, }, Splits: splits(workload.Tuples( numBatches(w.warehouses, numWarehousesPerRange), func(i int) []interface{} { return []interface{}{(i + 1) * numWarehousesPerRange} }, )), Stats: w.tpccWarehouseStats(), } district := workload.Table{ Name: `district`, Schema: maybeAddInterleaveSuffix( w.interleaved, maybeAddColumnFamiliesSuffix( w.separateColumnFamilies, tpccDistrictSchemaBase, tpccDistrictColumnFamiliesSuffix, ), tpccDistrictSchemaInterleaveSuffix, ), InitialRows: workload.BatchedTuples{ NumBatches: numDistrictsPerWarehouse * w.warehouses, FillBatch: w.tpccDistrictInitialRowBatch, }, Splits: splits(workload.Tuples( numBatches(w.warehouses, numWarehousesPerRange), func(i int) []interface{} { return []interface{}{(i + 1) * numWarehousesPerRange, 0} }, )), Stats: w.tpccDistrictStats(), } customer := workload.Table{ Name: `customer`, Schema: maybeAddInterleaveSuffix( w.interleaved, maybeAddColumnFamiliesSuffix( w.separateColumnFamilies, tpccCustomerSchemaBase, tpccCustomerColumnFamiliesSuffix, ), tpccCustomerSchemaInterleaveSuffix, ), InitialRows: workload.BatchedTuples{ NumBatches: numCustomersPerWarehouse * w.warehouses, FillBatch: w.tpccCustomerInitialRowBatch, }, Stats: w.tpccCustomerStats(), } history := workload.Table{ Name: `history`, Schema: maybeAddFkSuffix( w.deprecatedFkIndexes, tpccHistorySchemaBase, deprecatedTpccHistorySchemaFkSuffix, ), InitialRows: workload.BatchedTuples{ NumBatches: numHistoryPerWarehouse * w.warehouses, FillBatch: w.tpccHistoryInitialRowBatch, }, Splits: splits(workload.Tuples( numBatches(w.warehouses, numWarehousesPerRange), func(i int) []interface{} { return []interface{}{(i + 1) * numWarehousesPerRange} }, )), Stats: w.tpccHistoryStats(), } order := workload.Table{ Name: `order`, Schema: maybeAddInterleaveSuffix( w.interleaved, tpccOrderSchemaBase, tpccOrderSchemaInterleaveSuffix, ), InitialRows: workload.BatchedTuples{ NumBatches: numOrdersPerWarehouse * w.warehouses, FillBatch: w.tpccOrderInitialRowBatch, }, Stats: w.tpccOrderStats(), } newOrder := workload.Table{ Name: `new_order`, Schema: tpccNewOrderSchema, InitialRows: workload.BatchedTuples{ NumBatches: numNewOrdersPerWarehouse * w.warehouses, FillBatch: w.tpccNewOrderInitialRowBatch, }, Stats: w.tpccNewOrderStats(), } item := workload.Table{ Name: `item`, Schema: tpccItemSchema, InitialRows: workload.BatchedTuples{ NumBatches: numItems, FillBatch: w.tpccItemInitialRowBatch, }, Splits: splits(workload.Tuples( numBatches(numItems, numItemsPerRange), func(i int) []interface{} { return []interface{}{numItemsPerRange * (i + 1)} }, )), Stats: w.tpccItemStats(), } stock := workload.Table{ Name: `stock`, Schema: maybeAddInterleaveSuffix( w.interleaved, maybeAddFkSuffix( w.deprecatedFkIndexes, tpccStockSchemaBase, deprecatedTpccStockSchemaFkSuffix, ), tpccStockSchemaInterleaveSuffix, ), InitialRows: workload.BatchedTuples{ NumBatches: numStockPerWarehouse * w.warehouses, FillBatch: w.tpccStockInitialRowBatch, }, Stats: w.tpccStockStats(), } orderLine := workload.Table{ Name: `order_line`, Schema: maybeAddInterleaveSuffix( w.interleaved, maybeAddFkSuffix( w.deprecatedFkIndexes, tpccOrderLineSchemaBase, deprecatedTpccOrderLineSchemaFkSuffix, ), tpccOrderLineSchemaInterleaveSuffix, ), InitialRows: workload.BatchedTuples{ NumBatches: numOrdersPerWarehouse * w.warehouses, FillBatch: w.tpccOrderLineInitialRowBatch, }, Stats: w.tpccOrderLineStats(), } return []workload.Table{ warehouse, district, customer, history, order, newOrder, item, stock, orderLine, } } // Ops implements the Opser interface. func (w *tpcc) Ops( ctx context.Context, urls []string, reg *histogram.Registry, ) (workload.QueryLoad, error) { // It would be nice to remove the need for this and to require that // partitioning and scattering occurs only when the PostLoad hook is // run, but to maintain backward compatibility, it's easiest to allow // partitioning and scattering during `workload run`. if err := w.partitionAndScatter(urls); err != nil { return workload.QueryLoad{}, err } sqlDatabase, err := workload.SanitizeUrls(w, w.dbOverride, urls) if err != nil { return workload.QueryLoad{}, err } parsedURL, err := url.Parse(urls[0]) if err != nil { return workload.QueryLoad{}, err } w.reg = reg w.usePostgres = parsedURL.Port() == "5432" // We can't use a single MultiConnPool because we want to implement partition // affinity. Instead we have one MultiConnPool per server. cfg := workload.MultiConnPoolCfg{ MaxTotalConnections: (w.numConns + len(urls) - 1) / len(urls), // round up // Limit the number of connections per pool (otherwise preparing statements // at startup can be slow). MaxConnsPerPool: 50, } fmt.Printf("Initializing %d connections...\n", w.numConns) dbs := make([]*workload.MultiConnPool, len(urls)) var g errgroup.Group for i := range urls { i := i g.Go(func() error { var err error dbs[i], err = workload.NewMultiConnPool(cfg, urls[i]) return err }) } if err := g.Wait(); err != nil { return workload.QueryLoad{}, err } var partitionDBs [][]*workload.MultiConnPool if w.clientPartitions > 0 { // Client partitons simply emulates the behavior of data partitions // w/r/t database connections, though all of the connections will // be for the same partition. partitionDBs = make([][]*workload.MultiConnPool, w.clientPartitions) } else { // Assign each DB connection pool to a local partition. This assumes that // dbs[i] is a machine that holds partition "i % *partitions". If we have an // affinity partition, all connections will be for the same partition. partitionDBs = make([][]*workload.MultiConnPool, w.partitions) } // If there is only one affinityPartition then we assume all of the URLs are // associated with that partition. if len(w.affinityPartitions) == 1 { // All connections are for our local partitions. partitionDBs[w.affinityPartitions[0]] = dbs } else { // This is making some assumptions about how racks are handed out. // If we have more than one affinityPartion then we assume that the URLs // are mapped to partitions in a round-robin fashion. // Imagine there are 5 partitions and 15 urls, this code assumes that urls // 0, 5, and 10 correspond to the 0th partition. for i, db := range dbs { p := i % w.partitions partitionDBs[p] = append(partitionDBs[p], db) } for i := range partitionDBs { // Possible if we have more partitions than DB connections. if partitionDBs[i] == nil { partitionDBs[i] = dbs } } } fmt.Printf("Initializing %d idle connections...\n", w.idleConns) var conns []*pgx.Conn for i := 0; i < w.idleConns; i++ { for _, url := range urls { connConfig, err := pgx.ParseURI(url) if err != nil { return workload.QueryLoad{}, err } conn, err := pgx.Connect(connConfig) if err != nil { return workload.QueryLoad{}, err } conns = append(conns, conn) } } fmt.Printf("Initializing %d workers and preparing statements...\n", w.workers) ql := workload.QueryLoad{SQLDatabase: sqlDatabase} ql.WorkerFns = make([]func(context.Context) error, 0, w.workers) var group errgroup.Group // Determines whether a partition is in the local workload's set of affinity // partitions. isMyPart := func(p int) bool { for _, ap := range w.affinityPartitions { if p == ap { return true } } // If nothing is mine, then everything is mine. return len(w.affinityPartitions) == 0 } // Limit the amount of workers we initialize in parallel, to avoid running out // of memory (#36897). sem := make(chan struct{}, 100) for workerIdx := 0; workerIdx < w.workers; workerIdx++ { workerIdx := workerIdx warehouse := w.wPart.totalElems[workerIdx%len(w.wPart.totalElems)] p := w.wPart.partElemsMap[warehouse] // This isn't part of our local partition. if !isMyPart(p) { continue } dbs := partitionDBs[p] db := dbs[warehouse%len(dbs)] // NB: ql.WorkerFns is sized so this never re-allocs. ql.WorkerFns = append(ql.WorkerFns, nil) idx := len(ql.WorkerFns) - 1 sem <- struct{}{} group.Go(func() error { worker, err := newWorker(ctx, w, db, reg.GetHandle(), warehouse) if err == nil { ql.WorkerFns[idx] = worker.run } <-sem return err }) } if err := group.Wait(); err != nil { return workload.QueryLoad{}, err } // Preregister all of the histograms so they always print. for _, tx := range allTxs { reg.GetHandle().Get(tx.name) } // Close idle connections. ql.Close = func(context context.Context) { for _, conn := range conns { if err := conn.Close(); err != nil { log.Warningf(ctx, "%v", err) } } } return ql, nil } func (w *tpcc) partitionAndScatter(urls []string) error { db, err := gosql.Open(`cockroach`, strings.Join(urls, ` `)) if err != nil { return err } defer db.Close() return w.partitionAndScatterWithDB(db) } func (w *tpcc) partitionAndScatterWithDB(db *gosql.DB) error { if w.partitions > 1 { // Repartitioning can take upwards of 10 minutes, so determine if // the dataset is already partitioned before launching the operation // again. if parts, err := partitionCount(db); err != nil { return errors.Wrapf(err, "could not determine if tables are partitioned") } else if parts == 0 { if err := partitionTables(db, w.zoneCfg, w.wPart, w.replicateStaticColumns); err != nil { return errors.Wrapf(err, "could not partition tables") } } else if parts != w.partitions { return errors.Errorf("tables are not partitioned %d way(s). "+ "Pass the --partitions flag to 'workload init' or 'workload fixtures'.", w.partitions) } } if w.scatter { if err := scatterRanges(db); err != nil { return errors.Wrapf(err, "could not scatter ranges") } } return nil }
package sink import ( "testing" "github.com/google/go-cmp/cmp" "github.com/pragkent/slackwork/wework" ) func TestTranslate(t *testing.T) { tests := []struct { payload *Payload want []wework.SendChatMessageRequest }{ { payload: &Payload{ Channel: "#haha", Parse: "full", Attachments: []Attachment{ { Color: "#D63232", Fallback: "[Alerting] Test notification", Fields: []AttachmentField{ { Short: true, Title: "High value", Value: "null", }, { Short: true, Title: "Higher Value", Value: "200", }, { Short: false, Title: "Error", Value: "This is only a test", }, }, Footer: "Grafana v4.4.1", FooterIcon: "https://grafana.com/assets/img/fav32.png", ImageURL: "http://grafana.org/assets/img/blog/mixed_styles.png", Text: "@haha Someone is testing the alert notification within grafana.", Title: "[Alerting] Test notification", TitleLink: "https://grafana.com/", }, }, }, want: []wework.SendChatMessageRequest{ { ChatID: "haha", Type: "textcard", TextCard: &wework.TextCard{ Title: "[Alerting] Test notification", URL: "https://grafana.com/", Description: "<div class=\"normal\">@haha Someone is testing the alert notification within grafana.\n</div><div class=\"gray\">High value</div><div class=\"highlight\">null</div><div class=\"gray\">Higher Value</div><div class=\"highlight\">200</div><div class=\"gray\">Error</div><div class=\"highlight\">This is only a test</div>", }, }, }, }, { payload: &Payload{ Channel: "#haha", Parse: "full", Attachments: []Attachment{ { Color: "#D63232", Fallback: "[Alerting] Test notification", Fields: []AttachmentField{ { Short: true, Title: "High value", Value: "null", }, }, Text: "Attachment Text", }, }, Text: "Payload Text", }, want: []wework.SendChatMessageRequest{ { ChatID: "haha", Type: "text", Text: &wework.Text{ Content: "Payload Text\n\nAttachment Text\nHigh value: null", }, }, }, }, } ws := &WeWorkSink{ wc: wework.NewAgentClient("1001", "2002", 12345), AgentID: 12345, } for _, tt := range tests { got := ws.Translate(tt.payload) if !cmp.Equal(got, tt.want) { t.Errorf("%v", cmp.Diff(got, tt.want)) t.Errorf("WeWorkSink.Translate error. got: %#v want: %#v", got, tt.want) } } }
package main import ( "flag" "github.com/go-redis/redis/v7" "github.com/qubard/claack-go/lib/microservice" "github.com/qubard/claack-go/websocket/socket" ) func main() { var addr string flag.StringVar(&addr, "redis", "", "The address (ip:port) of a redis instance") flag.Parse() redis := redis.NewClient(&redis.Options{ Addr: addr, Password: "", DB: 0, }) _, err := redis.Ping().Result() edgeServer := socket.CreateSimpleEdgeServer(redis) if err == nil { pool := microservice.CreateRacePool(redis, edgeServer, "racepool", "enq", "deq") pool.Run() } else { panic(err) } }
package db import ( "log" "time" "github.com/jelinden/stock-portfolio/app/service" ) func GetHistory(portfolioid string) []service.ClosePrice { timeFrom := time.Now() var closePrices = []service.ClosePrice{} rows, err := mdb.Query(`SELECT h.symbol, h.closePrice, h.epoch FROM history AS h, (SELECT symbol, amount, epoch as minDate FROM portfoliostocks WHERE portfolioid = $1) AS p WHERE h.symbol = p.symbol AND h.epoch >= p.minDate`, portfolioid) if err != nil { log.Printf("failed with '%v'\n", err) return closePrices } defer rows.Close() for rows.Next() { closePrice := service.ClosePrice{} err := rows.Scan( &closePrice.Symbol, &closePrice.ClosePrice, &closePrice.Epoch) if err != nil { log.Println("scanning row failed", err.Error()) } closePrices = append(closePrices, closePrice) } log.Println("get history took", time.Since(timeFrom)) return closePrices } func SaveHistory(closePrices []service.ClosePrice) { for _, c := range closePrices { if !isClosePrice(c.Symbol, c.ClosePriceDate) { d, err := time.Parse("01/02/2006", c.ClosePriceDate) if err != nil { log.Println("SaveHistory failed", err) } else { err := exec(`INSERT INTO history (symbol, closePrice, closePriceDate, epoch) VALUES ($1,$2,$3,$4);`, c.Symbol, c.ClosePrice, c.ClosePriceDate, d.Unix()*1000, ) if err != nil { log.Printf("failed with '%s' %s\n", err.Error(), c.Symbol) } } } } } func isClosePrice(symbol string, date string) bool { log.Println(symbol, date) row, err := db.Query(`select symbol from history where symbol = $1 and closePriceDate = $2;`, symbol, date) if err != nil { log.Println(err) return false } defer row.Close() var s = "" if row.Next() { row.Scan(&s) } if s != "" { return true } return false }
package schema type EndpointsData struct { Currency string `json:"currency"` Addresses []string `json:"addresses"` Stopped []string `json:"stopped"` }
package main import ( "bufio" "fmt" "go-sns-test/snsclient" "os" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sns" ) // usage: // go run main.go func main() { scanner := bufio.NewScanner(os.Stdin) fmt.Println("You need to have configured the AWS credentials") fmt.Println("Enter aws-cli profile:") scanner.Scan() profile := scanner.Text() fmt.Println("Enter AWS Region:") scanner.Scan() region := scanner.Text() fmt.Println("Enter AWS SNS Topic ARN:") scanner.Scan() topicArn := scanner.Text() // SDK will use to load credentials from the shared credentials file ~/.aws/credentials. sess, err := session.NewSessionWithOptions(session.Options{ Profile: profile, Config: aws.Config{ Region: aws.String(region), }, }) if err != nil { fmt.Println("NewSession error:", err) return } client := snsclient.SNS{ Client: sns.New(sess), TopicArn: topicArn, } for { fmt.Println("Enter message to send or ':q' to quit") scanner.Scan() msg := scanner.Text() if msg == ":q" { break } result, err := client.Publish(msg) if err != nil { fmt.Println("Publish error:", err) } else { fmt.Println(result) } } }
package main import ( "errors" "strconv" "strings" ) // Degree API student-v1 degree response message. type Degree struct { ID string `json:"id"` StudentDegNbr string `json:"studentDegNbr"` Code string `json:"degreeCode"` Desc string `json:"degreeDesc"` AcadCareer string `json:"degAcadCareer"` ConferDate string `json:"degreeConferDate"` HonorsPrefix string `json:"honorsPrefix"` HonorsSuffix string `json:"honorsSuffix"` AcadDegreeStatus string `json:"degAcadDegreeStatus"` ProspectusCode string `json:"prospectusCode"` Plans []struct { AcadPlanCode string `json:"acadPlanCode"` AcadPlanDesc string `json:"acadPlanDesc"` DgpAcadCareer string `json:"dgpAcadCareer"` StudentCareerNbr int `json:"studentCareerNbr"` DgpAcadDegreeStatus string `json:"dgpAcadDegreeStatus"` DegreeStatusDate string `json:"degreeStatusDate"` AcadProgCode string `json:"acadProgCode"` AcadProgGroupCode int `json:"acadProgGroupCode"` AcadProgGroup string `json:"acadProgGroup"` AcadProgLevelCode string `json:"acadProgLevelCode"` AcadProgLevel string `json:"acadProgLevel"` AcadOrgCode string `json:"acadOrgCode"` AcadGroupDesc string `json:"acadGroupDesc"` } `json:"degreePlans"` } // Degrees - array of degrees type Degrees []Degree // Qualification - external organisations-qualification entry type Qualification struct { Type string `json:"type"` Code string `json:"code"` Description string `json:"description"` Country string `json:"country"` } // Qualifications - array of qualifications type Qualifications []Qualification // propagateToHub adds degree/education records to the current affiliation task. func (degrees Degrees) propagateToHub(email, orcid string) (count int, err error) { count = len(degrees) if count == 0 { return 0, errors.New("no degree entry") } records := make([]Record, count) for i, d := range degrees { degreeName, ok := qualifications[d.Code] if !ok { degreeName, ok = degreeCodes[strings.ToUpper(d.Desc)] if !ok { degreeName = d.Desc } } date := strings.Split(d.ConferDate, "T")[0] records[i] = Record{ AffiliationType: "education", EndDate: date, LocalID: d.ID + "/" + d.StudentDegNbr, Email: email, Orcid: orcid, Role: degreeName, IsActive: true, } } // Make sure the task set-up is comlete var task Task err = oh.patch("api/v1/affiliations/"+strconv.Itoa(taskID), Task{ID: taskID, Records: records}, &task) if err != nil { log.Error("failed to update the taks: ", err) return } taskRecordCountMutex.Lock() taskRecordCount += count taskRecordCountMutex.Unlock() return }
package main import ( "fmt" "log" "net/http" ) func main() { /** Defer function */ fmt.Println("start") // LIFO order defer fmt.Println("middle") fmt.Println("end") a := "hi" defer fmt.Println(a) a = "hello" /** Panic function */ http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte("Hello GO!")) }) err := http.ListenAndServe(":8080", nil) if err != nil { panic(err.Error()) } fmt.Println("Start Panic") panicker() fmt.Println("End Panic") } func panicker() { fmt.Println("About to panic") defer func() { if err := recover(); err != nil { log.Println("Error: ", err) panic(err) } panic("something bad happened") fmt.Println("Panicking done") }() }
package service import ( "errors" "net/http" "time" "github.com/chidam1994/happyfox/group" "github.com/chidam1994/happyfox/models" "github.com/chidam1994/happyfox/utils" "github.com/google/uuid" ) type groupService struct { repo group.Repository } func NewService(r group.Repository) group.Service { return &groupService{ repo: r, } } func (svc *groupService) SaveGroup(group *models.Group) (groupId uuid.UUID, err error) { group.Id = uuid.New() group.CreatedAt = time.Now() group.UpdatedAt = time.Now() beforeSave(group, group.Id) existingGroup, err := svc.repo.FindByName(group.Name) if err != nil { return groupId, err } if existingGroup != nil { return groupId, utils.GetAppError(errors.New("Group with the specified name already exists"), "Unable to create group", http.StatusConflict) } return svc.repo.Save(group) } func (svc *groupService) DeleteGroup(groupId uuid.UUID) error { group, err := svc.repo.FindById(groupId) if err != nil { return err } if group == nil { return utils.GetAppError(errors.New("The group you're trying to delete doesnt exist"), "Unable to Delete group", http.StatusBadRequest) } return svc.repo.Delete(groupId) } func (svc *groupService) GetGroup(groupId uuid.UUID) (*models.Group, error) { return svc.repo.FindById(groupId) } func (svc *groupService) AddMembers(groupId uuid.UUID, memberIds []uuid.UUID) error { group, err := svc.repo.FindById(groupId) if err != nil { return err } if group == nil { return utils.GetAppError(errors.New("The group you're trying to add members to doesnt exist"), "Unable to add members", http.StatusBadRequest) } num, err := svc.repo.GetMembersCount(groupId, memberIds) if err != nil { return err } if num > 0 { return utils.GetAppError(errors.New("some of the contacts you are trying to add are already members of the group"), "Unable to Add members", http.StatusBadRequest) } return svc.repo.AddMembers(groupId, getMembers(memberIds, groupId)) } func (svc *groupService) RemMembers(groupId uuid.UUID, memberIds []uuid.UUID) error { group, err := svc.repo.FindById(groupId) if err != nil { return err } if group == nil { return utils.GetAppError(errors.New("The group you're trying to add members to doesnt exist"), "Unable to add members", http.StatusBadRequest) } num, err := svc.repo.GetMembersCount(groupId, memberIds) if err != nil { return err } if num < len(memberIds) { return utils.GetAppError(errors.New("some of the contacts you are trying to remove are not members of the group"), "Unable to remove members", http.StatusBadRequest) } return svc.repo.RemMembers(groupId, memberIds) } func (svc *groupService) RenameGroup(groupId uuid.UUID, name string) error { group, err := svc.repo.FindById(groupId) if err != nil { return err } if group == nil { return utils.GetAppError(errors.New("The group you're trying to rename doesnt exist"), "Unable to rename group", http.StatusBadRequest) } group.UpdatedAt = time.Now() group.Name = name return svc.repo.RenameGroup(group) } func beforeSave(group *models.Group, groupId uuid.UUID) { now := time.Now() for i := range group.Members { group.Members[i].GroupId = groupId group.Members[i].CreatedAt = now group.Members[i].UpdatedAt = now } } func getMembers(memberIds []uuid.UUID, groupId uuid.UUID) []*models.Member { now := time.Now() result := make([]*models.Member, len(memberIds)) for i := range memberIds { result[i] = &models.Member{ MemberId: memberIds[i], GroupId: groupId, CreatedAt: now, UpdatedAt: now, } } return result }
/* Copyright 2014 Huawei Technologies Co., Ltd. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package unittest import ( "io/ioutil" "path/filepath" "runtime" "testing" "github.com/fernet/fernet-go" "github.com/stretchr/testify/assert" "github.com/containerops/dockyard/utils" ) // TestEncryptMethod func TestEncryptMethod(t *testing.T) { cases := []struct { data string expected utils.EncryptMethod }{ {"rsa", utils.EncryptRSA}, {"", utils.EncryptNone}, {"anyother", utils.EncryptNotSupported}, } for _, c := range cases { assert.Equal(t, utils.NewEncryptMethod(c.data), c.expected, "Fail to get encrypt method") } } // TestRSAGenerateEnDe func TestRSAGenerateEnDe(t *testing.T) { privBytes, pubBytes, err := utils.GenerateRSAKeyPair(1024) assert.Nil(t, err, "Fail to genereate RSA Key Pair") testData := []byte("This is the testdata for encrypt and decryp") encrypted, err := utils.RSAEncrypt(pubBytes, testData) assert.Nil(t, err, "Fail to encrypt data") decrypted, err := utils.RSADecrypt(privBytes, encrypted) assert.Nil(t, err, "Fail to decrypt data") assert.Equal(t, testData, decrypted, "Fail to get correct data after en/de") } // TestSHA256Sign func TestSHA256Sign(t *testing.T) { _, path, _, _ := runtime.Caller(0) dir := filepath.Join(filepath.Dir(path), "testdata") testPrivFile := filepath.Join(dir, "rsa_private_key.pem") testContentFile := filepath.Join(dir, "hello.txt") testSignFile := filepath.Join(dir, "hello.sig") privBytes, _ := ioutil.ReadFile(testPrivFile) signBytes, _ := ioutil.ReadFile(testSignFile) contentBytes, _ := ioutil.ReadFile(testContentFile) testBytes, err := utils.SHA256Sign(privBytes, contentBytes) assert.Nil(t, err, "Fail to sign") assert.Equal(t, testBytes, signBytes, "Fail to get valid sign data ") } // TestSHA256Verify func TestSHA256Verify(t *testing.T) { _, path, _, _ := runtime.Caller(0) dir := filepath.Join(filepath.Dir(path), "testdata") testPubFile := filepath.Join(dir, "rsa_public_key.pem") testContentFile := filepath.Join(dir, "hello.txt") testSignFile := filepath.Join(dir, "hello.sig") pubBytes, _ := ioutil.ReadFile(testPubFile) signBytes, _ := ioutil.ReadFile(testSignFile) contentBytes, _ := ioutil.ReadFile(testContentFile) err := utils.SHA256Verify(pubBytes, contentBytes, signBytes) assert.Nil(t, err, "Fail to verify valid signed data") err = utils.SHA256Verify(pubBytes, []byte("Invalid content data"), signBytes) assert.NotNil(t, err, "Fail to verify invalid signed data") } func TestTokenMarshalUnmarshal(t *testing.T) { var fkey fernet.Key fkey.Generate() key := string(fkey.Encode()) invalidKey := "invalidKey" var retInt int var testInt int testInt = 1024 intResult, err := utils.TokenMarshal(testInt, invalidKey) assert.NotNil(t, err, "Fail to marshal int with invalid key") err = utils.TokenUnmarshal(string(intResult), key, &retInt) assert.NotNil(t, err, "Fail to unmarshal int with invalid key") intResult, err = utils.TokenMarshal(testInt, key) assert.Nil(t, err, "Fail to marshal int") err = utils.TokenUnmarshal(string(intResult), key, &retInt) assert.Nil(t, err, "Fail to unmarshal int") assert.Equal(t, testInt, retInt, "Fail to get the original int data") var retStr string var testStr string testStr = "hello, world" strResult, err := utils.TokenMarshal(testStr, key) assert.Nil(t, err, "Fail to marshal string") err = utils.TokenUnmarshal(string(strResult), key, &retStr) assert.Nil(t, err, "Fail to unmarshal string") assert.Equal(t, testStr, retStr, "Fail to get the original string data") }
// Copyright 2018, Shulhan <ms@kilabit.info>. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package net import ( "net" "testing" "github.com/shuLhan/share/lib/test" ) func TestIsTypeUDP(t *testing.T) { cases := []struct { desc string netw string exp bool }{{ desc: "Empty network", }, { desc: "Network is tcp", netw: "tcp", }, { desc: "Network is tcp4", netw: "tcp4", }, { desc: "Network is tcp6", netw: "tcp6", }, { desc: "Network is udp", netw: "udp", exp: true, }, { desc: "Network is udp4", netw: "udp4", exp: true, }, { desc: "Network is udp6", netw: "udp6", exp: true, }, { desc: "Network is ip", netw: "ip", }, { desc: "Network is ip4", netw: "ip4", }, { desc: "Network is ip6", netw: "ip6", }, { desc: "Network is unix", netw: "unix", }, { desc: "Network is unixgram", netw: "unixgram", }, { desc: "Network is unixpacket", netw: "unixpacket", }} for _, c := range cases { t.Log(c.desc) netType := ConvertStandard(c.netw) got := IsTypeUDP(netType) test.Assert(t, "IsTypeUDP", c.exp, got) } } func TestIsTypeTCP(t *testing.T) { cases := []struct { desc string netw string exp bool }{{ desc: "Empty network", }, { desc: "Network is tcp", netw: "tcp", exp: true, }, { desc: "Network is tcp4", netw: "tcp4", exp: true, }, { desc: "Network is tcp6", netw: "tcp6", exp: true, }, { desc: "Network is udp", netw: "udp", }, { desc: "Network is udp4", netw: "udp4", }, { desc: "Network is udp6", netw: "udp6", }, { desc: "Network is ip", netw: "ip", }, { desc: "Network is ip4", netw: "ip4", }, { desc: "Network is ip6", netw: "ip6", }, { desc: "Network is unix", netw: "unix", }, { desc: "Network is unixgram", netw: "unixgram", }, { desc: "Network is unixpacket", netw: "unixpacket", }} for _, c := range cases { t.Log(c.desc) netType := ConvertStandard(c.netw) got := IsTypeTCP(netType) test.Assert(t, "IsTypeTCP", c.exp, got) } } func TestIsTypeTransport(t *testing.T) { cases := []struct { desc string netw string exp bool }{{ desc: "Empty network", }, { desc: "Network is tcp", netw: "tcp", exp: true, }, { desc: "Network is tcp4", netw: "tcp4", exp: true, }, { desc: "Network is tcp6", netw: "tcp6", exp: true, }, { desc: "Network is udp", netw: "udp", exp: true, }, { desc: "Network is udp4", netw: "udp4", exp: true, }, { desc: "Network is udp6", netw: "udp6", exp: true, }, { desc: "Network is ip", netw: "ip", }, { desc: "Network is ip4", netw: "ip4", }, { desc: "Network is ip6", netw: "ip6", }, { desc: "Network is unix", netw: "unix", }, { desc: "Network is unixgram", netw: "unixgram", }, { desc: "Network is unixpacket", netw: "unixpacket", }} for _, c := range cases { t.Log(c.desc) netType := ConvertStandard(c.netw) got := IsTypeTransport(netType) test.Assert(t, "IsTypeTransport", c.exp, got) } } func TestToDotIPv6(t *testing.T) { cases := []struct { ip net.IP exp []byte }{{ ip: net.ParseIP("2001:db8::68"), exp: []byte("2.0.0.1.0.d.b.8.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.6.8"), }, { ip: net.ParseIP("::1"), exp: []byte("0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1"), }} for _, c := range cases { got := ToDotIPv6(c.ip) test.Assert(t, "ToDotIPv6", c.exp, got) } }
package cmd import ( "fmt" "github.com/khushmeeet/vc/vc" "github.com/spf13/cobra" ) // commitCmd represents the commit command var commitCmd = &cobra.Command{ Use: "commit", Short: "A brief description of your command", Long: `A longer description that spans multiple lines and likely contains examples and usage of using your command. For example: Cobra is a CLI library for Go that empowers applications. This application is a tool to generate the needed files to quickly create a Cobra application.`, Args: cobra.NoArgs, Run: func(cmd *cobra.Command, args []string) { message, err := cmd.Flags().GetString("message") if err != nil { fmt.Printf("Error in flag - %v", err) } vc.DoCommit(message) }, } func init() { rootCmd.AddCommand(commitCmd) // Here you will define your flags and configuration settings. // Cobra supports Persistent Flags which will work for this command // and all subcommands, e.g.: commitCmd.Flags().StringP("message", "m", "", "DoCommit message") _ = commitCmd.MarkFlagRequired("message") // Cobra supports local flags which will only run when this command // is called directly, e.g.: // commitCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") }
package _6_RainOfReason import "fmt" func main() { inputString := "crazy" result := alphabeticShift(inputString) fmt.Println(result) } func alphabeticShift(inputString string) string { outputSlice := []rune(inputString) for index, value := range outputSlice { if value == 122 { value = 96 } value++ outputSlice[index] = value } return string(outputSlice) }
package compute_test import ( "errors" "github.com/genevieve/leftovers/gcp/compute" "github.com/genevieve/leftovers/gcp/compute/fakes" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("InstanceTemplate", func() { var ( client *fakes.InstanceTemplatesClient name string instanceTemplate compute.InstanceTemplate ) BeforeEach(func() { client = &fakes.InstanceTemplatesClient{} name = "banana" instanceTemplate = compute.NewInstanceTemplate(client, name) }) Describe("Delete", func() { It("deletes the instance template", func() { err := instanceTemplate.Delete() Expect(err).NotTo(HaveOccurred()) Expect(client.DeleteInstanceTemplateCall.CallCount).To(Equal(1)) Expect(client.DeleteInstanceTemplateCall.Receives.Template).To(Equal(name)) }) Context("when the client fails to delete", func() { BeforeEach(func() { client.DeleteInstanceTemplateCall.Returns.Error = errors.New("the-error") }) It("returns the error", func() { err := instanceTemplate.Delete() Expect(err).To(MatchError("Delete: the-error")) }) }) }) Describe("Name", func() { It("returns the name", func() { Expect(instanceTemplate.Name()).To(Equal(name)) }) }) Describe("Type", func() { It("returns the type", func() { Expect(instanceTemplate.Type()).To(Equal("Instance Template")) }) }) })
package cache import ( "bytes" "fmt" "sync" "time" "github.com/Skipor/memcached/internal/tag" "github.com/Skipor/memcached/log" ) type lru struct { lock sync.RWMutex table map[string]*node queues []*queue limits limits log log.Logger } func newLRU(l log.Logger, conf Config) *lru { c := &lru{ log: l, table: make(map[string]*node), limits: limits{ total: conf.Size, hot: conf.Size * (hotCap * 100) / 100, warm: conf.Size * (warmCap * 100) / 100, }, } for i := 0; i < temps; i++ { queue := newQueue() queue.onExpire = c.onExpire c.queues = append(c.queues, queue) } c.hot().onActive = attachAsInactive c.warm().onActive = attachAsInactive c.cold().onActive = moveTo(c.warm()) c.hot().onInactive = moveTo(c.cold()) c.warm().onInactive = moveTo(c.cold()) c.cold().onInactive = c.onEvict return c } type temp uint8 const ( cold temp = iota warm hot temps = 3 hotCap = 0.32 warmCap = 0.32 ) type limits struct { total int64 hot int64 warm int64 } func (c *lru) set(i Item) { defer c.checkInvariants() now := nowUnix() expired := i.expired(now) if expired { c.log.Warn("Set expired item.") } n, ok := c.table[i.Key] var wasActive bool if ok { c.log.Debugf("Remove old item %s value.", i.Key) wasActive = n.isActive() n.detach() c.deleteDetached(n) } if expired { c.log.Warn("Skip add of expired item.") i.Data.Recycle() return } c.log.Debugf("Add %s.", i.Key) n = newNode(i) c.table[i.Key] = n c.queues[hot].push(n) if wasActive { n.active = active } if n.size() > c.limits.hot { c.log.Panic("Too large item. Size %v, limit %v", n.size(), c.limits.hot) } if c.hotOverflow() || c.totalOverflow() { // TODO do this in background goroutine. That improves latency. c.fixOverflows() } } func (c *lru) get(keys ...[]byte) (views []ItemView) { c.log.Debugf("Get %s", keysPrinter{keys}) now := time.Now().Unix() for _, key := range keys { if n, ok := c.table[string(key)]; ok { // No allocation. if !n.expired(now) { n.setActive() views = append(views, n.NewView()) } } } return } func (c *lru) touch(keys ...[]byte) { c.log.Debugf("Touch %s", keysPrinter{keys}) for _, key := range keys { if n, ok := c.table[string(key)]; ok { // No allocation. n.setActive() } } return } func (c *lru) delete(key []byte) (deleted bool) { defer c.checkInvariants() c.log.Debugf("Delete %s", key) n, ok := c.table[string(key)] // No allocation. if !ok { return false } n.detach() c.deleteDetached(n) return true } func (c *lru) fixOverflows() { c.log.Debug("Fixing overflows") now := time.Now().Unix() if c.hotOverflow() { c.log.Debug("Hot overflow.") c.hot().shrinkWhile(c.hotOverflow, now) } if !c.totalOverflow() { return } c.log.Debug("Total overflow.") c.cold().shrinkWhile(func() bool { return !c.cold().empty() && c.totalOverflow() }, now) if c.warmOverflow() { // Some active cold become warm now. c.log.Debug("Warm overflow.") c.warm().shrinkWhile(c.warmOverflow, now) } if !c.totalOverflow() { return } c.log.Debug("Total overflow not fixed yet. Evict previous warm inactive items.") c.cold().shrinkWhile(c.totalOverflow, now) if c.totalOverflow() { panic("Overflow after cache eviction. Should not happen.") } } func (c *lru) onEvict(n *node) { c.log.Debugf("Item %s evicted.", n.Key) c.deleteDetached(n) } func (c *lru) onExpire(n *node) { c.log.Debugf("Item %s expired.", n.Key) c.deleteDetached(n) } // delete removes owned but detached node. func (c *lru) deleteDetached(n *node) { n.disown() n.Data.Recycle() delete(c.table, string(n.Key)) if tag.Debug { n.next = nil n.prev = nil n.owner = nil n.Data = nil } } func (c *lru) hot() *queue { return c.queues[hot] } func (c *lru) warm() *queue { return c.queues[warm] } func (c *lru) cold() *queue { return c.queues[cold] } func (c *lru) free() int64 { return c.limits.total - c.size() } func (c *lru) hotOverflow() bool { return c.hot().size > c.limits.hot } func (c *lru) warmOverflow() bool { return c.warm().size > c.limits.warm } func (c *lru) totalOverflow() bool { return c.free() < 0 } func (c *lru) itemsNum() int { return len(c.table) } func (c *lru) size() int64 { var size int64 for i := range c.queues { size += c.queues[i].size } return size } type keysPrinter struct{ keys [][]byte } func (p keysPrinter) String() string { buf := &bytes.Buffer{} for _, k := range p.keys { buf.WriteString(fmt.Sprintf(" %q", k)) } return buf.String() } func nowUnix() int64 { return time.Now().Unix() }
package main import "fmt" func main() { fmt.Println("I am the child") }
package zrpc import "encoding/json" // Input 輸出參數 type Input struct { Service string `json:"service"` Method string `json:"method"` Params interface{} `json:"params"` ID int `json:"id"` Address string `json:"address"` } // Output 輸出參數 type Output struct { Result interface{} `json:"result"` Error error `json:"error"` ID int `json:"id"` } // ErrorDetail 錯誤細節 type ErrorDetail struct { Code string `json:"code"` Message string `json:"message"` Data interface{} `json:"data"` } // Error 顯示ErrorDetail的訊息 func (e ErrorDetail) Error() string { errMsg, err := json.Marshal(e) if err != nil { return err.Error() } return string(errMsg) }
package function import ( "bytes" "context" "encoding/json" "errors" dataBaseClient "github.com/dgraph-io/dgo" dataBaseAPI "github.com/dgraph-io/dgo/protos/api" "github.com/hecatoncheir/Storage" "google.golang.org/grpc" "log" "os" "testing" "text/template" ) func TestExecutor_DeleteEntityByID(t *testing.T) { t.Skip("Database must be started") DatabaseGateway := os.Getenv("DatabaseGateway") if DatabaseGateway == "" { DatabaseGateway = "localhost:9080" } databaseClient, err := connectToDatabase(DatabaseGateway) if err != nil { t.Fatalf(err.Error()) } schema := ` path: string @index(term) . pageInPaginationSelector: string @index(term) . previewImageOfSelector: string @index(term) . pageParamPath: string @index(term) . pageCityPath: string @index(term) . itemSelector: string @index(term) . nameOfItemSelector: string @index(term) . priceOfItemSelector: string @index(term) . cityInCookieKey: string @index(term) . cityIdForCookie: string @index(term) . ` err = setUpCompanySchema(schema, databaseClient) entityID := "" executor := Executor{Store: &storage.Store{DatabaseGateway: DatabaseGateway}} err = executor.DeleteEntityByID(entityID) if err != ErrEntityCanNotBeWithoutID { t.Fatalf(err.Error()) } FakeEntityID := "0x12" err = executor.DeleteEntityByID(FakeEntityID) if err != nil { t.Fatalf(err.Error()) } entityForCreate := storage.PageInstruction{ Path: "//"} createdEntityID, err := createEntity(entityForCreate, databaseClient) if err != nil { t.Fatalf(err.Error()) } if createdEntityID == "" { t.Fatalf("Created entity id is empty") } entityFromStore, err := readEntityByID(createdEntityID, databaseClient) if err != nil { t.Fatalf(err.Error()) } if entityFromStore.ID == "" { t.Fatalf("Created entity not founded by id") } if entityFromStore.ID != createdEntityID { t.Fatalf("Founded entity id: %v not created entity id: %v", entityFromStore.ID, createdEntityID) } err = executor.DeleteEntityByID(createdEntityID) if err != nil { t.Fatalf(err.Error()) } entityFromStore, err = readEntityByID(createdEntityID, databaseClient) if err.Error() != "entity by id not found" { t.Fatalf(err.Error()) } err = deleteEntityByID(createdEntityID, databaseClient) if err != nil { t.Fatalf(err.Error()) } entityFromStore, err = readEntityByID(createdEntityID, databaseClient) if err.Error() != "entity by id not found" { t.Fatalf(err.Error()) } } func connectToDatabase(databaseGateway string) (*dataBaseClient.Dgraph, error) { conn, err := grpc.Dial(databaseGateway, grpc.WithInsecure()) if err != nil { return nil, err } baseConnection := dataBaseAPI.NewDgraphClient(conn) databaseClient := dataBaseClient.NewDgraphClient(baseConnection) return databaseClient, nil } func setUpCompanySchema(schema string, databaseClient *dataBaseClient.Dgraph) error { operation := &dataBaseAPI.Operation{Schema: schema} err := databaseClient.Alter(context.Background(), operation) if err != nil { return err } return nil } func createEntity(entityForCreate storage.PageInstruction, databaseClient *dataBaseClient.Dgraph) (string, error) { encodedEntity, err := json.Marshal(entityForCreate) if err != nil { return "", err } mutation := &dataBaseAPI.Mutation{ SetJson: encodedEntity, CommitNow: true} transaction := databaseClient.NewTxn() assigned, err := transaction.Mutate(context.Background(), mutation) if err != nil { return "", nil } uid := assigned.Uids["blank-0"] return uid, nil } func readEntityByID(entityID string, databaseClient *dataBaseClient.Dgraph) (entity storage.PageInstruction, err error) { var ( ErrEntityByIDCanNotBeFound = errors.New("entity by id can not be found") ErrEntityDoesNotExist = errors.New("entity by id not found") ) variables := struct { PageInstructionID string }{ PageInstructionID: entityID} queryTemplate, err := template.New("ReadPageInstructionByID").Parse(`{ pageInstructions(func: uid("{{.PageInstructionID}}")) @filter(has(path)) { uid path pageInPaginationSelector pageParamPath cityParamPath itemSelector nameOfItemSelector priceOfItemSelector } }`) entity = storage.PageInstruction{ID: entityID} if err != nil { log.Println(err) return entity, ErrEntityByIDCanNotBeFound } queryBuf := bytes.Buffer{} err = queryTemplate.Execute(&queryBuf, variables) if err != nil { log.Println(err) return entity, ErrEntityByIDCanNotBeFound } transaction := databaseClient.NewTxn() response, err := transaction.Query(context.Background(), queryBuf.String()) if err != nil { log.Println(err) return entity, ErrEntityByIDCanNotBeFound } type entitiesInStore struct { Entities []storage.PageInstruction `json:"pageInstructions"` } var foundedEntities entitiesInStore err = json.Unmarshal(response.GetJson(), &foundedEntities) if err != nil { log.Println(err) return entity, ErrEntityByIDCanNotBeFound } if len(foundedEntities.Entities) == 0 { return entity, ErrEntityDoesNotExist } return foundedEntities.Entities[0], nil } func deleteEntityByID(entityID string, databaseClient *dataBaseClient.Dgraph) error { deleteEntityData, err := json.Marshal(map[string]string{"uid": entityID}) if err != nil { return err } mutation := dataBaseAPI.Mutation{ DeleteJson: deleteEntityData, CommitNow: true} transaction := databaseClient.NewTxn() _, err = transaction.Mutate(context.Background(), &mutation) if err != nil { return err } return nil }
// Copyright 2020 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tcp import "gvisor.dev/gvisor/pkg/tcpip/seqnum" // sackRecovery stores the variables related to TCP SACK loss recovery // algorithm. // // +stateify savable type sackRecovery struct { s *sender } func newSACKRecovery(s *sender) *sackRecovery { return &sackRecovery{s: s} } // handleSACKRecovery implements the loss recovery phase as described in RFC6675 // section 5, step C. // +checklocks:sr.s.ep.mu func (sr *sackRecovery) handleSACKRecovery(limit int, end seqnum.Value) (dataSent bool) { snd := sr.s snd.SetPipe() if smss := int(snd.ep.scoreboard.SMSS()); limit > smss { // Cap segment size limit to s.smss as SACK recovery requires // that all retransmissions or new segments send during recovery // be of <= SMSS. limit = smss } nextSegHint := snd.writeList.Front() for snd.Outstanding < snd.SndCwnd { var nextSeg *segment var rescueRtx bool nextSeg, nextSegHint, rescueRtx = snd.NextSeg(nextSegHint) if nextSeg == nil { return dataSent } if !snd.isAssignedSequenceNumber(nextSeg) || snd.SndNxt.LessThanEq(nextSeg.sequenceNumber) { // New data being sent. // Step C.3 described below is handled by // maybeSendSegment which increments sndNxt when // a segment is transmitted. // // Step C.3 "If any of the data octets sent in // (C.1) are above HighData, HighData must be // updated to reflect the transmission of // previously unsent data." // // We pass s.smss as the limit as the Step 2) requires that // new data sent should be of size s.smss or less. if sent := snd.maybeSendSegment(nextSeg, limit, end); !sent { return dataSent } dataSent = true snd.Outstanding++ snd.updateWriteNext(nextSeg.Next()) continue } // Now handle the retransmission case where we matched either step 1,3 or 4 // of the NextSeg algorithm. // RFC 6675, Step C.4. // // "The estimate of the amount of data outstanding in the network // must be updated by incrementing pipe by the number of octets // transmitted in (C.1)." snd.Outstanding++ dataSent = true snd.sendSegment(nextSeg) segEnd := nextSeg.sequenceNumber.Add(nextSeg.logicalLen()) if rescueRtx { // We do the last part of rule (4) of NextSeg here to update // RescueRxt as until this point we don't know if we are going // to use the rescue transmission. snd.FastRecovery.RescueRxt = snd.FastRecovery.Last } else { // RFC 6675, Step C.2 // // "If any of the data octets sent in (C.1) are below // HighData, HighRxt MUST be set to the highest sequence // number of the retransmitted segment unless NextSeg () // rule (4) was invoked for this retransmission." snd.FastRecovery.HighRxt = segEnd - 1 } } return dataSent } // +checklocks:sr.s.ep.mu func (sr *sackRecovery) DoRecovery(rcvdSeg *segment, fastRetransmit bool) { snd := sr.s if fastRetransmit { snd.resendSegment() } // We are in fast recovery mode. Ignore the ack if it's out of range. if ack := rcvdSeg.ackNumber; !ack.InRange(snd.SndUna, snd.SndNxt+1) { return } // RFC 6675 recovery algorithm step C 1-5. end := snd.SndUna.Add(snd.SndWnd) dataSent := sr.handleSACKRecovery(snd.MaxPayloadSize, end) snd.postXmit(dataSent, true /* shouldScheduleProbe */) }
// Package loadbalancer provides methods of finding available backends. package loadbalancer import ( "fmt" "net" "reflect" "sync" "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/wait" watchtypes "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/klog" ) const ( RemoteReady = "ready" RemoteNotReady = "not_ready" ReadBuffer = 4096000 DialTimeOut = time.Second * 10 HealthCheckPeriod = time.Second * 2 ) type LbConnection map[net.Conn]struct{} type LoadBalancer struct { mutex sync.RWMutex tcpConnection map[string]LbConnection serverHealthStatus map[string]bool originalServerAddress string nextServerIndex int currentServerAddress string ServerAddresses []string randomServers []string remoteEnable bool config *Config } type Config struct { LbAddress string ServerAddress string KubeClient kubernetes.Interface HealthChan chan bool StopChan chan struct{} } // Start finds available backend servers and return a loadbalancer. func Start(config *Config) (_lb *LoadBalancer, _err error) { lb := &LoadBalancer{ config: config, originalServerAddress: config.ServerAddress, remoteEnable: false, tcpConnection: make(map[string]LbConnection), serverHealthStatus: make(map[string]bool), } lb.setServers([]string{lb.originalServerAddress}) go lb.proxyStart() lb.findBackend() return lb, nil } // update sets the backend server list in load balancer. func (lb *LoadBalancer) update(serverAddress []string) { if lb == nil { return } if !lb.setServers(serverAddress) { return } klog.V(2).Infof("Updating load balancer server address -> %v", lb.randomServers) } // IsRemoteEnable returns whether backend server is available. func (lb *LoadBalancer) IsRemoteEnable() bool { return lb.remoteEnable } // findBackend searchs the available backend servers. func (lb *LoadBalancer) findBackend() error { addresses := []string{} endpoint, _ := lb.config.KubeClient.CoreV1().Endpoints("default").Get("kubernetes", metav1.GetOptions{}) if endpoint != nil { addresses = getAddresses(endpoint) lb.update(addresses) } go func() { connect: for { time.Sleep(5 * time.Second) watch, err := lb.config.KubeClient.CoreV1().Endpoints("default").Watch(metav1.ListOptions{ FieldSelector: fields.Set{"metadata.name": "kubernetes"}.String(), ResourceVersion: "0", }) if err != nil { klog.Errorf("Unable to watch for loadbalancer endpoints: %v", err) continue connect } watching: for { select { case ev, ok := <-watch.ResultChan(): if !ok || ev.Type == watchtypes.Error { if ok { klog.Errorf("loadbalancer endpoint watch channel closed: %v", ev) } watch.Stop() continue connect } endpoint, ok := ev.Object.(*v1.Endpoints) if !ok { klog.Errorf("loadbalancer could not event object to endpoint: %v", ev) continue watching } newAddresses := getAddresses(endpoint) if reflect.DeepEqual(newAddresses, addresses) { continue watching } addresses = newAddresses klog.Infof("loadbalancer endpoint watch event: %v", addresses) lb.update(addresses) } } } }() return nil } func (lb *LoadBalancer) sendSignalToSyncer(signal string) { switch signal { case RemoteReady: if !lb.remoteEnable { lb.config.HealthChan <- true lb.remoteEnable = true } case RemoteNotReady: if lb.remoteEnable { lb.remoteEnable = false lb.config.HealthChan <- false } default: klog.Errorf("unsupported signal type") } } func (lb *LoadBalancer) proxyStart() { lbListener, err := net.Listen("tcp", lb.config.LbAddress) if err != nil { klog.Errorf("Unable to listen on: %s, error: %v", lb.config.LbAddress, err) return } defer lbListener.Close() go wait.Until(lb.healthCheck, HealthCheckPeriod, lb.config.StopChan) for { proxyConn, err := lbListener.Accept() if err != nil { klog.Errorf("Unable to accept a request: %v", err) continue } targetAddr, targetConn, err := lb.getAvailableBackend() if err != nil { klog.Errorf("Unable to connect to: %s, error: %v", lb.currentServerAddress, err) proxyConn.Close() continue } lb.addTcpConnection(targetAddr, proxyConn, targetConn) go lb.proxyRequest(targetAddr, proxyConn, targetConn) go lb.proxyRequest(targetAddr, targetConn, proxyConn) } } // proxyRequest forwards all requests from r to w func (lb *LoadBalancer) proxyRequest(ip string, r net.Conn, w net.Conn) { defer func() { r.Close() w.Close() lb.deleteTcpConnection(ip, r, w) }() var buffer = make([]byte, ReadBuffer) for { n, err := r.Read(buffer) if err != nil { klog.Errorf("Unable to read from input: %v", err) break } n, err = w.Write(buffer[:n]) if err != nil { klog.Errorf("Unable to write to output: %v", err) break } } } func (lb *LoadBalancer) healthCheck() { isRemoteHealth := false for _, addr := range lb.getCurrentServerList() { if _, err := net.DialTimeout("tcp", addr, DialTimeOut); err != nil { for c := range lb.getTcpConnection(addr) { lb.deleteTcpConnection(addr, c) } lb.setHealthStatus(addr, false) klog.V(2).Infof("%s is not health", addr) } else { lb.setHealthStatus(addr, true) isRemoteHealth = true klog.V(2).Infof("%s is health", addr) } } if !isRemoteHealth { lb.sendSignalToSyncer(RemoteNotReady) } else { lb.sendSignalToSyncer(RemoteReady) } } func (lb *LoadBalancer) getAvailableBackend() (string, net.Conn, error) { startIndex := 0 for { if startIndex == len(lb.randomServers) { return "", nil, fmt.Errorf("all servers failed") } startIndex++ targetServer := lb.currentServerAddress // if current server is not health, then changes to the next server. if !lb.getHealthStatus(targetServer) { if _, err := lb.nextServer(targetServer); err != nil { return "", nil, err } continue } conn, err := net.DialTimeout("tcp", targetServer, DialTimeOut) if err == nil { klog.V(2).Infof("current request connect to: %s", targetServer) // the next request will uses the next server in randomServers lb.nextServer(targetServer) return targetServer, conn, nil } klog.Errorf("Dial error from load balancer: %v", err) // if current server couldn't connect, then uses the next server. if _, err = lb.nextServer(targetServer); err != nil { return "", nil, err } } } func (lb *LoadBalancer) addTcpConnection(addr string, conn ...net.Conn) { defer lb.mutex.Unlock() lb.mutex.Lock() if _, ok := lb.tcpConnection[addr]; !ok { c := make(LbConnection) lb.tcpConnection[addr] = c } for _, c := range conn { lb.tcpConnection[addr][c] = struct{}{} } } func (lb *LoadBalancer) deleteTcpConnection(addr string, conn ...net.Conn) { defer lb.mutex.Unlock() lb.mutex.Lock() for _, c := range conn { if c != nil { c.Close() } delete(lb.tcpConnection[addr], c) } } func (lb *LoadBalancer) getTcpConnection(addr string) LbConnection { defer lb.mutex.RUnlock() lb.mutex.RLock() if conn, ok := lb.tcpConnection[addr]; ok { return conn } return LbConnection{} } func (lb *LoadBalancer) setHealthStatus(addr string, status bool) { defer lb.mutex.Unlock() lb.mutex.Lock() lb.serverHealthStatus[addr] = status } func (lb *LoadBalancer) getHealthStatus(addr string) bool { defer lb.mutex.RUnlock() lb.mutex.RLock() if status, ok := lb.serverHealthStatus[addr]; ok { return status } return false } func (lb *LoadBalancer) getCurrentServerList() []string { defer lb.mutex.RUnlock() lb.mutex.RLock() return lb.randomServers }
package fakes import ( "sync" gcpdns "google.golang.org/api/dns/v1" ) type ManagedZonesClient struct { DeleteManagedZoneCall struct { sync.Mutex CallCount int Receives struct { Zone string } Returns struct { Error error } Stub func(string) error } ListManagedZonesCall struct { sync.Mutex CallCount int Returns struct { ManagedZonesListResponse *gcpdns.ManagedZonesListResponse Error error } Stub func() (*gcpdns.ManagedZonesListResponse, error) } } func (f *ManagedZonesClient) DeleteManagedZone(param1 string) error { f.DeleteManagedZoneCall.Lock() defer f.DeleteManagedZoneCall.Unlock() f.DeleteManagedZoneCall.CallCount++ f.DeleteManagedZoneCall.Receives.Zone = param1 if f.DeleteManagedZoneCall.Stub != nil { return f.DeleteManagedZoneCall.Stub(param1) } return f.DeleteManagedZoneCall.Returns.Error } func (f *ManagedZonesClient) ListManagedZones() (*gcpdns.ManagedZonesListResponse, error) { f.ListManagedZonesCall.Lock() defer f.ListManagedZonesCall.Unlock() f.ListManagedZonesCall.CallCount++ if f.ListManagedZonesCall.Stub != nil { return f.ListManagedZonesCall.Stub() } return f.ListManagedZonesCall.Returns.ManagedZonesListResponse, f.ListManagedZonesCall.Returns.Error }
// Copyright 2014 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. See the AUTHORS file // for names of contributors. // // Author: Kathy Spradlin (kathyspradlin@gmail.com) package storage import ( "fmt" "sync" "github.com/cockroachdb/cockroach/gossip" "github.com/cockroachdb/cockroach/proto" ) // FindStoreFunc finds the disks in a datacenter that have the requested // attributes. type FindStoreFunc func(proto.Attributes) ([]*StoreDescriptor, error) type stringSet map[string]struct{} // StoreFinder provides the data necessary to find stores with particular // attributes. type StoreFinder struct { finderMu sync.Mutex cond *sync.Cond capacityKeys stringSet // Tracks gosisp keys used for capacity gossip *gossip.Gossip } // newStoreFinder creates a StoreFinder. func newStoreFinder(g *gossip.Gossip) *StoreFinder { sf := &StoreFinder{gossip: g} sf.cond = sync.NewCond(&sf.finderMu) return sf } // capacityGossipUpdate is a gossip callback triggered whenever capacity // information is gossiped. It just tracks keys used for capacity gossip. func (sf *StoreFinder) capacityGossipUpdate(key string, contentsChanged bool) { sf.finderMu.Lock() defer sf.finderMu.Unlock() if sf.capacityKeys == nil { sf.capacityKeys = stringSet{} } sf.capacityKeys[key] = struct{}{} sf.cond.Broadcast() } // WaitForNodes blocks until at least the given number of nodes are present in the // capacity map. Used for tests. func (sf *StoreFinder) WaitForNodes(n int) { sf.finderMu.Lock() defer sf.finderMu.Unlock() for len(sf.capacityKeys) < n { sf.cond.Wait() } } // findStores is the Store's implementation of a StoreFinder. It returns a list // of stores with attributes that are a superset of the required attributes. It // never returns an error. // // If it cannot retrieve a StoreDescriptor from the Store's gossip, it garbage // collects the failed key. // // TODO(embark, spencer): consider using a reverse index map from Attr->stores, // for efficiency. Ensure that entries in this map still have an opportunity // to be garbage collected. func (sf *StoreFinder) findStores(required proto.Attributes) ([]*StoreDescriptor, error) { sf.finderMu.Lock() defer sf.finderMu.Unlock() var stores []*StoreDescriptor for key := range sf.capacityKeys { storeDesc, err := storeDescFromGossip(key, sf.gossip) if err != nil { // We can no longer retrieve this key from the gossip store, // perhaps it expired. delete(sf.capacityKeys, key) } else if required.IsSubset(storeDesc.Attrs) { stores = append(stores, storeDesc) } } return stores, nil } // storeDescFromGossip retrieves a StoreDescriptor from the specified capacity // gossip key. Returns an error if the gossip doesn't exist or is not // a StoreDescriptor. func storeDescFromGossip(key string, g *gossip.Gossip) (*StoreDescriptor, error) { info, err := g.GetInfo(key) if err != nil { return nil, err } storeDesc, ok := info.(StoreDescriptor) if !ok { return nil, fmt.Errorf("gossiped info is not a StoreDescriptor: %+v", info) } return &storeDesc, nil }
// Copyright 2022 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "bytes" "fmt" "os" "os/exec" "path/filepath" "strings" "sync" "testing" "time" "github.com/cenkalti/backoff" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" "gvisor.dev/gvisor/pkg/fd" "gvisor.dev/gvisor/pkg/sentry/seccheck" pb "gvisor.dev/gvisor/pkg/sentry/seccheck/points/points_go_proto" "gvisor.dev/gvisor/pkg/sentry/seccheck/sinks/remote/test" "gvisor.dev/gvisor/pkg/sentry/seccheck/sinks/remote/wire" "gvisor.dev/gvisor/pkg/test/testutil" ) func waitForFile(path string) error { return testutil.Poll(func() error { if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { return err } return &backoff.PermanentError{Err: err} } return nil }, 5*time.Second) } type syncBuffer struct { mu sync.Mutex // +checklocks:mu buf bytes.Buffer } func (s *syncBuffer) Write(p []byte) (n int, err error) { s.mu.Lock() defer s.mu.Unlock() return s.buf.Write(p) } func (s *syncBuffer) String() string { s.mu.Lock() defer s.mu.Unlock() return s.buf.String() } type exampleServer struct { path string cmd *exec.Cmd out syncBuffer } func newExampleServer(quiet bool) (*exampleServer, error) { exe, err := testutil.FindFile("examples/seccheck/server_cc") if err != nil { return nil, fmt.Errorf("error finding server_cc: %v", err) } dir, err := os.MkdirTemp(os.TempDir(), "remote") if err != nil { return nil, fmt.Errorf("Setup(%q): %v", dir, err) } server := &exampleServer{path: filepath.Join(dir, "remote.sock")} server.cmd = exec.Command(exe, server.path) if quiet { server.cmd.Args = append(server.cmd.Args, "-q") } server.cmd.Stdout = &server.out server.cmd.Stderr = &server.out if err := server.cmd.Start(); err != nil { os.RemoveAll(dir) return nil, fmt.Errorf("error running %q: %v", exe, err) } if err := waitForFile(server.path); err != nil { server.stop() return nil, fmt.Errorf("error waiting for server file %q: %w", server.path, err) } return server, nil } func (s *exampleServer) stop() { _ = s.cmd.Process.Kill() _ = s.cmd.Wait() _ = os.Remove(s.path) } func TestBasic(t *testing.T) { server, err := test.NewServer() if err != nil { t.Fatalf("newServer(): %v", err) } defer server.Close() endpoint, err := setup(server.Endpoint) if err != nil { t.Fatalf("setup(): %v", err) } endpointFD, err := fd.NewFromFile(endpoint) if err != nil { _ = endpoint.Close() t.Fatalf("NewFromFile(): %v", err) } _ = endpoint.Close() r, err := new(nil, endpointFD) if err != nil { t.Fatalf("New(): %v", err) } info := &pb.ExitNotifyParentInfo{ExitStatus: 123} if err := r.ExitNotifyParent(nil, seccheck.FieldSet{}, info); err != nil { t.Fatalf("ExitNotifyParent: %v", err) } server.WaitForCount(1) pt := server.GetPoints()[0] if want := pb.MessageType_MESSAGE_SENTRY_EXIT_NOTIFY_PARENT; pt.MsgType != want { t.Errorf("wrong message type, want: %v, got: %v", want, pt.MsgType) } got := &pb.ExitNotifyParentInfo{} if err := proto.Unmarshal(pt.Msg, got); err != nil { t.Errorf("proto.Unmarshal(ExitNotifyParentInfo): %v", err) } if !proto.Equal(info, got) { t.Errorf("Received point is different, want: %+v, got: %+v", info, got) } // Check that no more points were received. if want, got := 1, server.Count(); want != got { t.Errorf("wrong number of points, want: %d, got: %d", want, got) } } func TestVersionUnsupported(t *testing.T) { server, err := test.NewServer() if err != nil { t.Fatalf("newServer(): %v", err) } defer server.Close() server.SetVersion(0) _, err = setup(server.Endpoint) if err == nil || !strings.Contains(err.Error(), "remote version") { t.Fatalf("Wrong error: %v", err) } } func TestVersionNewer(t *testing.T) { server, err := test.NewServer() if err != nil { t.Fatalf("newServer(): %v", err) } defer server.Close() server.SetVersion(wire.CurrentVersion + 10) endpoint, err := setup(server.Endpoint) if err != nil { t.Fatalf("setup(): %v", err) } _ = endpoint.Close() } // Test that the example C++ server works. It's easier to test from here and // also changes that can break it will likely originate here. func TestExample(t *testing.T) { server, err := newExampleServer(false) if err != nil { t.Fatalf("newExampleServer(): %v", err) } defer server.stop() endpoint, err := setup(server.path) if err != nil { t.Fatalf("setup(): %v", err) } endpointFD, err := fd.NewFromFile(endpoint) if err != nil { _ = endpoint.Close() t.Fatalf("NewFromFile(): %v", err) } _ = endpoint.Close() r, err := new(nil, endpointFD) if err != nil { t.Fatalf("New(): %v", err) } info := pb.ExitNotifyParentInfo{ExitStatus: 123} if err := r.ExitNotifyParent(nil, seccheck.FieldSet{}, &info); err != nil { t.Fatalf("ExitNotifyParent: %v", err) } check := func() error { gotRaw := server.out.String() // Collapse whitespace. got := strings.Join(strings.Fields(gotRaw), " ") if !strings.Contains(got, "ExitNotifyParentInfo => exit_status: 123") { return fmt.Errorf("ExitNotifyParentInfo point didn't get to the server, out: %q, raw: %q", got, gotRaw) } return nil } if err := testutil.Poll(check, time.Second); err != nil { t.Errorf(err.Error()) } } func TestConfig(t *testing.T) { for _, tc := range []struct { name string config map[string]any want *remote err string }{ { name: "default", config: map[string]any{}, want: &remote{ retries: 0, initialBackoff: 25 * time.Microsecond, maxBackoff: 10 * time.Millisecond, }, }, { name: "all", config: map[string]any{ "retries": float64(10), "backoff": "1s", "backoff_max": "10s", }, want: &remote{ retries: 10, initialBackoff: time.Second, maxBackoff: 10 * time.Second, }, }, { name: "bad-retries", config: map[string]any{ "retries": "10", }, err: "retries", }, { name: "bad-backoff", config: map[string]any{ "backoff": "wrong", }, err: "invalid duration", }, { name: "bad-backoff-max", config: map[string]any{ "backoff_max": 10, }, err: "is not an string", }, { name: "bad-invalid-backoffs", config: map[string]any{ "retries": float64(10), "backoff": "10s", "backoff_max": "1s", }, err: "cannot be larger than max", }, } { t.Run(tc.name, func(t *testing.T) { var endpoint fd.FD sink, err := new(tc.config, &endpoint) if len(tc.err) == 0 { if err != nil { t.Fatalf("new(%q): %v", tc.config, err) } got := sink.(*remote) got.endpoint = nil if *got != *tc.want { t.Errorf("wrong remote: want: %+v, got: %+v", tc.want, got) } } else if err == nil || !strings.Contains(err.Error(), tc.err) { t.Errorf("wrong error: want: %v, got: %v", tc.err, err) } }) } } func BenchmarkSmall(t *testing.B) { // Run server in a separate process just to isolate it as much as possible. server, err := newExampleServer(false) if err != nil { t.Fatalf("newExampleServer(): %v", err) } defer server.stop() endpoint, err := setup(server.path) if err != nil { t.Fatalf("setup(): %v", err) } endpointFD, err := fd.NewFromFile(endpoint) if err != nil { _ = endpoint.Close() t.Fatalf("NewFromFile(): %v", err) } _ = endpoint.Close() r, err := new(nil, endpointFD) if err != nil { t.Fatalf("New(): %v", err) } t.ResetTimer() t.RunParallel(func(sub *testing.PB) { for sub.Next() { info := pb.ExitNotifyParentInfo{ExitStatus: 123} if err := r.ExitNotifyParent(nil, seccheck.FieldSet{}, &info); err != nil { t.Fatalf("ExitNotifyParent: %v", err) } } }) } func BenchmarkProtoAny(t *testing.B) { info := &pb.ExitNotifyParentInfo{ExitStatus: 123} t.ResetTimer() t.RunParallel(func(sub *testing.PB) { for sub.Next() { any, err := anypb.New(info) if err != nil { t.Fatal(err) } if _, err := proto.Marshal(any); err != nil { t.Fatal(err) } } }) } func BenchmarkProtoEnum(t *testing.B) { info := &pb.ExitNotifyParentInfo{ExitStatus: 123} t.ResetTimer() t.RunParallel(func(sub *testing.PB) { for sub.Next() { if _, err := proto.Marshal(info); err != nil { t.Fatal(err) } } }) }
package main import ( "encoding/json" "fmt" "log" "net/http" "os" "time" "github.com/gorilla/handlers" "github.com/gorilla/mux" ) func serveHttp() error { p := os.Getenv("HTTP_PORT") r := mux.NewRouter() r.PathPrefix("/blocks").HandlerFunc(blockHandler).Methods("GET") fileServe(r, "/", "./www") srv := &http.Server{ Handler: handlers.CompressHandler(r), Addr: fmt.Sprintf(":%s", p), WriteTimeout: 15 * time.Second, ReadTimeout: 15 * time.Second, } log.Printf("Started http server, serving at %s", p) return srv.ListenAndServe() } func blockHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") resp, _ := json.Marshal(blocks) w.Write(resp) } func fileServe(router *mux.Router, prefix string, directory string) { router.PathPrefix(prefix).Handler(http.StripPrefix(prefix, http.FileServer(http.Dir(directory)))).Methods("GET") http.Handle(prefix, router) }
package controllers import ( "fmt" "github.com/astaxie/beego" "github.com/astaxie/beego/logs" "homework/common/encrypt" "homework/models/datamodels" "homework/models/services" "strconv" ) type ProductController struct { beego.Controller ProductService services.IProductService } type ProductDetail struct { IsLogin bool Product *datamodels.Product } type ProductList struct { ProductInfo map[int]map[string]string PageInfo *PageInfo IsLogin bool } type PageInfo struct { Count int IndexPage int NextPage int PrePage int ArrayPages []int EndPage int } func (this *ProductController) GetList() { s := this.GetString("pagenum") var indexpage int indexpage, err := strconv.Atoi(s) if err != nil || indexpage == 0 { indexpage = 1 } arr, count, e := this.ProductService.GetAllProductInfo(indexpage, 12) logs.Info(arr) endpage := count/12 + 1 if e != nil { logs.Error(e) this.Abort("501") } arrpages := []int{(indexpage-1)/5*5 + 1} for i := 1; i < 5; i++ { if arrpages[i-1]+1 > endpage { break } arrpages = append(arrpages, arrpages[i-1]+1) } //count //count/10+1 总页数 //(pagenum-1)*10,10 //start:= (pagenum/5)*5 end := start+5>endpage ? start+5:endpage this.Data["Count"] = count this.Data["IndexPage"] = indexpage this.Data["NextPage"] = indexpage + 1 this.Data["PrePage"] = indexpage - 1 this.Data["ArrayPages"] = arrpages this.Data["EndPage"] = endpage uid := this.Ctx.GetCookie("uid") sign := this.Ctx.GetCookie("sign") signbytes, err := encrypt.DePwdCode(sign) var IsLogin bool fmt.Println(uid, string(signbytes)) if err == nil && uid == string(signbytes) { IsLogin = true } else { this.Ctx.SetCookie("uid", "", -1) this.Ctx.SetCookie("sign", "", -1) } //msg := ProductList{ // ProductInfo: arr, // PageInfo: pageinfo, // IsLogin: IsLogin, //} //this.Data["json"] = msg //this.ServeJSON() this.Data["IsLogin"] = IsLogin this.Data["products"] = arr this.TplName = "product/listview.html" } func (this *ProductController) GetDetail() { idstring := this.GetString("id") id, _ := strconv.Atoi(idstring) product, err := this.ProductService.GetProductByID(int64(id)) if err != nil { this.Abort("500") } uid := this.Ctx.GetCookie("uid") sign := this.Ctx.GetCookie("sign") signbytes, err := encrypt.DePwdCode(sign) var IsLogin bool if err == nil && uid == string(signbytes) { IsLogin = true } else { this.Ctx.SetCookie("uid", "", -1) this.Ctx.SetCookie("sign", "", -1) } this.Data["IsLogin"] = IsLogin this.Data["product"] = product //this.Layout = "shared/productLayout.html" this.TplName = "product/view.html" } func (this *ProductController) GetTestDetail() { idstring := this.GetString("id") id, _ := strconv.Atoi(idstring) product, err := this.ProductService.GetProductByID(int64(id)) if err != nil { this.Abort("500") } uid := this.Ctx.GetCookie("uid") sign := this.Ctx.GetCookie("sign") signbytes, err := encrypt.DePwdCode(sign) var IsLogin bool if err == nil && uid == string(signbytes) { IsLogin = true } else { this.Ctx.SetCookie("uid", "", -1) this.Ctx.SetCookie("sign", "", -1) } this.Data["IsLogin"] = IsLogin this.Data["product"] = product //this.Layout = "shared/productLayout.html" this.TplName = "product/view_test.html" }
package s3cli import ( "context" "encoding/json" "errors" "io" "io/ioutil" "os" "path" "path/filepath" "sort" "strings" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/astaxie/beego" "github.com/qiaogw/pkg/filemanager" "github.com/qiaogw/pkg/logs" "github.com/qiaogw/pkg/tools" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" _ "github.com/aws/aws-sdk-go/service/s3/s3manager" ) //Svc s3 管理 type Svc struct { svc *s3.S3 bucketName string EditableMaxSize int64 conf *S3Config } // S3Config 配置 type S3Config struct { Endpoint string `label:"地址"` // 地址 AccessKeyID string `label:"AccessKeyID"` // 地址 SecretAccessKey string `label:"SecretAccessKey"` // 地址 Region string `label:"对象存储的region"` // 对象存储的region Bucket string `label:"对象存储的Bucket"` // 对象存储的Bucket Secure bool `label:"true代表使用HTTPS"` // true代表使用HTTPS Ignore string `label:"隐藏文件,S3不支持空目录"` // 地址 LifeDay int64 `label:"存储周期,天"` // 地址 DefautRestorePath string `label:"默认恢复文件前缀"` TaskTime string `label:"删除超期文件时间"` TempDir string `label:"临时文件夹"` MountDir string `label:"mount文件夹"` // 地址 CacheDir string `label:"Cache文件夹"` // 地址 MountConfigFile string `label:"mountConfigFile地址"` // 地址 LogFile string `label:"LogFile地址"` // 地址 } // NewSvc 新的svc func NewSvc(s3Conf *S3Config) *Svc { //sess = nil accessKeyID := s3Conf.AccessKeyID secretAccessKey := s3Conf.SecretAccessKey endPoint := s3Conf.Endpoint //endpoint设置,不要动 sess, _ := session.NewSession(&aws.Config{ Credentials: credentials.NewStaticCredentials(accessKeyID, secretAccessKey, ""), Endpoint: aws.String(endPoint), Region: aws.String("us-east-1"), DisableSSL: aws.Bool(true), S3ForcePathStyle: aws.Bool(false), //virtual-host style方式,不要修改 }) svc := new(Svc) svc.svc = s3.New(sess) svc.bucketName = s3Conf.Bucket return svc } // ListBuckets 查看S3中包含的bucket func (s *Svc) ListBuckets() ([]*s3.Bucket, error) { //svc := s3.New(sess) result, err := s.svc.ListBuckets(nil) if err != nil { logs.Error("Unable to list buckets, %v", err) return nil, err } return result.Buckets, nil } // BucketName 查看默认bucket func (s *Svc) BucketName() string { return s.bucketName } // CreateBucket 创建bucket func (s *Svc) CreateBucket(bucketName string) (err error) { _, err = s.svc.CreateBucket(&s3.CreateBucketInput{ Bucket: aws.String(bucketName), }) if err != nil { logs.Errorf("Unable to create bucket %q, %v", bucketName, err) return err } err = s.svc.WaitUntilBucketExists(&s3.HeadBucketInput{ Bucket: aws.String(bucketName), }) return //return s.svc.CreateBucketWithContext(ctx, bucketName, minio.MakeBucketOptions{Region: m.Region}) } // RemoveBucket Remove bucket func (s *Svc) RemoveBucket(bucket string) (err error) { _, err = s.svc.DeleteBucket(&s3.DeleteBucketInput{ Bucket: aws.String(bucket), }) if err != nil { logs.Errorf("Unable to delete bucket %q, %v", bucket, err) return err } // Wait until bucket is deleted before finishing err = s.svc.WaitUntilBucketNotExists(&s3.HeadBucketInput{ Bucket: aws.String(bucket), }) return } // ListObjects 查看某个bucket中包含的文件/文件夹 func (s *Svc) ListObjects(bucket, prefix string, maxKeys int64, isDelimiter bool) (dirs []os.FileInfo) { params := &s3.ListObjectsInput{ Bucket: aws.String(bucket), Prefix: aws.String(prefix), //MaxKeys: aws.Int64(maxKeys), Marker: aws.String(""), } if isDelimiter { params.Delimiter = aws.String("/") } var mtime time.Time class := "" pages := int64(0) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(3000)*time.Second) defer cancel() err := s.svc.ListObjectsPagesWithContext(ctx, params, func(output *s3.ListObjectsOutput, b bool) bool { for _, content := range output.Contents { if len(prefix) > 0 { *content.Key = strings.TrimPrefix(*content.Key, prefix) } mtime = *content.LastModified class = *content.StorageClass obj := NewFileInfo(*content) dirs = append(dirs, obj) } pages++ if maxKeys > 0 { return pages <= maxKeys } return true }) if err != nil { beego.Error(err) return } if prefix == "" { prefix = "/" } class = "" err = s.GetDir(&dirs, bucket, prefix, class, mtime) return } // GetDir 获取文件夹树 func (s *Svc) GetDir(dirs *[]os.FileInfo, bucket, prefix, class string, mtime time.Time) (err error) { var ld []FilePrefix dirfile := filepath.Join(s.conf.TempDir, bucket+".json") jdata, _ := ioutil.ReadFile(dirfile) err = json.Unmarshal(jdata, &ld) if err != nil { return } for _, v := range ld { if v.Pid == prefix { vs := s3.Object{ Key: aws.String(v.Key), Size: aws.Int64(0), LastModified: aws.Time(mtime), StorageClass: aws.String(class), } obj := NewFileInfo(vs) *dirs = append(*dirs, obj) } } return } // GetDirInfo list某个bucket中包含的文件夹 func (s *Svc) GetDirInfo(bucket, pptath string) (dirs []os.FileInfo) { params := &s3.ListObjectsInput{ Bucket: aws.String(bucket), Prefix: aws.String(pptath), } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(300000)*time.Second) defer cancel() pageNum := 0 var ld []FilePrefix err := s.svc.ListObjectsPagesWithContext(ctx, params, func(output *s3.ListObjectsOutput, b bool) bool { for _, content := range output.Contents { ld = getFilePrefix(path.Dir(*content.Key), pptath, ld) pageNum++ return pageNum <= 50 } return true }) if err != nil { return } nj, _ := json.Marshal(ld) dirFile := filepath.Join(s.conf.TempDir, bucket+".json") err = tools.WriteFileByte(dirFile, nj, true) if err != nil { beego.Error(err) } return } //List 列出文件对象包括文件夹 func (s *Svc) List(bucket, ppath string, isDelimiter bool, sortBy ...string) (err error, exit bool, dirs []os.FileInfo) { objectPrefix := strings.TrimPrefix(ppath, `/`) words := len(objectPrefix) var forceDir bool if words == 0 { forceDir = true } else { if strings.HasSuffix(objectPrefix, `/`) { forceDir = true } else { objectPrefix += `/` } } dirs = s.ListObjects(bucket, objectPrefix, 0, isDelimiter) // dirs = s.GetDirInfo(bucket, objectPrefix) if !forceDir && len(dirs) == 0 && err != nil { return } if len(sortBy) > 0 { switch sortBy[0] { case `time`: sort.Sort(filemanager.SortByModTime(dirs)) case `-time`: sort.Sort(filemanager.SortByModTimeDesc(dirs)) case `name`: case `-name`: sort.Sort(filemanager.SortByNameDesc(dirs)) case `type`: fallthrough default: sort.Sort(filemanager.SortByFileType(dirs)) } } else { sort.Sort(filemanager.SortByFileType(dirs)) } return } // RemoveObject 删除某个bucket中的对象文件 func (s *Svc) RemoveObject(bucket string, itemName string) (err error) { //name := aws.StringValue(item) dp := &s3.DeleteObjectInput{ Bucket: aws.String(bucket), Key: aws.String(itemName), } _, err = s.svc.DeleteObject(dp) if err != nil { logs.Error("Unable to delete object ", itemName, "from bucket", bucket, err) } logs.Info("successfully deleted", itemName) return } // RemoveObject 恢复某个bucket中的冷存储对象文件 func (s *Svc) RestoreObject(bucket, itemName string, days int64) (err error) { rparams := &s3.RestoreObjectInput{ Bucket: aws.String(bucket), Key: aws.String(itemName), RestoreRequest: &s3.RestoreRequest{ Days: aws.Int64(days), GlacierJobParameters: &s3.GlacierJobParameters{ Tier: aws.String("Expedited"), // 取回选项,支持三种取 //值:[Expedited|Standard| //Bulk]。 //Expedited表示快速取回对 //象,取回耗时1~5 min, //Standard表示标准取回对 //象,取回耗时3~5 h, //Bulk表示批量取回对象, //取回耗时5~12 h。 //默认取值为Standard。 }, }, } _, err = s.svc.RestoreObject(rparams) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case s3.ErrCodeObjectAlreadyInActiveTierError: logs.Error(s3.ErrCodeObjectAlreadyInActiveTierError, aerr.Error()) default: logs.Error(aerr.Error()) } } else { logs.Error(err) } } return } // RemoveObjectsLife 删除某个Bucket重的超期对象文件 func (s *Svc) RemoveObjectsLife() (count int, err error) { logs.Info("开始删除超期文件。。。") //svc := s3.New(sess) //bucket := s3Conf.Bucket params := &s3.ListObjectsInput{ Bucket: aws.String(s.bucketName), Prefix: aws.String("StoragePath/"), } logs.Info(*s.svc.Config.Endpoint) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(3000)*time.Second) defer cancel() now := time.Now() lifeDay := float64(s.conf.LifeDay - 1) err = s.svc.ListObjectsPagesWithContext(ctx, params, func(output *s3.ListObjectsOutput, b bool) bool { for _, content := range output.Contents { t := now.Sub(*content.LastModified).Hours() if t > lifeDay*24 { count++ s.RemoveObject(s.bucketName, *content.Key) } } return true }) if err != nil { logs.Error(err) } logs.Infof("共删除超期文件 %v 份!!!", count) return } // RestoreObjectsLife 恢复某个Bucket中的所有冷存储对象文件 func (s *Svc) RestoreObjectsLife(bucketName, ppath string, days int64) (count int, err error) { logs.Info("开始恢复文件。。。") bucket := bucketName params := &s3.ListObjectsInput{ Bucket: aws.String(bucket), Prefix: aws.String(ppath), } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(3000)*time.Second) defer cancel() err = s.svc.ListObjectsPagesWithContext(ctx, params, func(output *s3.ListObjectsOutput, b bool) bool { for _, content := range output.Contents { if *content.StorageClass == "GLACIER" { count++ s.RestoreObject(bucket, *content.Key, days) } } return true }) if err != nil { logs.Error(err) } logs.Infof("共恢复文件 %v 份!!!", count) return } // RemoveDir 删除文件夹 func (s *Svc) RemoveDir(prefix string) (err error) { objectName := strings.TrimPrefix(prefix, `/`) if !strings.HasSuffix(objectName, `/`) { objectName += `/` } if objectName == `/` { return s.clear(s.bucketName) } params := &s3.ListObjectsInput{ Bucket: aws.String(s.bucketName), Prefix: aws.String(objectName), } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(3000)*time.Second) defer cancel() err = s.svc.ListObjectsPagesWithContext(ctx, params, func(output *s3.ListObjectsOutput, b bool) bool { for _, content := range output.Contents { if *content.StorageClass == "GLACIER" { s.RemoveObject(s.bucketName, *content.Key) } } return true }) return nil } func (s *Svc) clear(bucket string) (err error) { iter := s3manager.NewDeleteListIterator(s.svc, &s3.ListObjectsInput{ Bucket: aws.String(bucket), }) if err := s3manager.NewBatchDeleteWithClient(s.svc).Delete(aws.BackgroundContext(), iter); err != nil { logs.Errorf("Unable to delete objects from bucket %q, %v", bucket, err) return err } return } // Put 提交数据 func (s *Svc) Put(reader io.Reader, objectName string) (err error) { input := &s3.PutObjectInput{ //ACL: aws.String("authenticated-read"), Body: aws.ReadSeekCloser(reader), Bucket: aws.String(s.bucketName), Key: aws.String(objectName), } logs.Debug(objectName, s.bucketName) _, err = s.svc.PutObject(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { default: logs.Error(aerr.Error()) } } else { // Print the error, cast err to awserr.Error to get the Code and // Message from an error. logs.Errorf(err.Error()) } return } return } // // Get 获取数据 func (s *Svc) Get(bucket, ppath string) (io.Reader, error) { objectName := strings.TrimPrefix(ppath, `/`) beego.Info(objectName) input := &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(objectName), } state, err := s.Stat(s.bucketName, ppath) if *state.StorageClass == "GLACIER" { if state.Restore == nil { s.RestoreObject(s.bucketName, ppath, 7) return nil, errors.New("对象为冷存储,开始恢复,请在5分钟后重试") } if strings.Index(*state.Restore, `ongoing-request="false"`) < 0 { return nil, errors.New("对象为冷存储,恢复处理中,请在5分钟后重试") } } result, err := s.svc.GetObject(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case s3.ErrCodeNoSuchKey: logs.Error(s3.ErrCodeNoSuchKey, aerr.Error()) case s3.ErrCodeInvalidObjectState: logs.Error(s3.ErrCodeInvalidObjectState, aerr.Error()) default: logs.Error(aerr.Error()) } } else { // Print the error, cast err to awserr.Error to get the Code and // Message from an error. logs.Error(err.Error()) } return nil, err } return result.Body, nil } // // Stat 获取对象信息 func (s *Svc) Stat(bucket, name string) (*s3.HeadObjectOutput, error) { input := &s3.HeadObjectInput{ Bucket: aws.String(bucket), Key: aws.String(name), } result, err := s.svc.HeadObject(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { default: logs.Error(aerr.Error()) } } else { // Print the error, cast err to awserr.Error to get the Code and // Message from an error. logs.Error(err.Error()) } return nil, err } return result, nil } // // Exists 对象是否存在 func (s *Svc) Exists(ppath string) (bool, error) { _, err := s.Stat(s.bucketName, ppath) if err != nil { return false, err } return true, err } //
package main import ( "../foo" "fmt" "math" ) func main() { fmt.Println(math.Pi) fmt.Println(foo.Baz) }
// Copyright 2020 The Moov Authors // Use of this source code is governed by an Apache License // license that can be found in the LICENSE file. package wire import ( "encoding/json" "strings" "unicode/utf8" ) // CurrencyInstructedAmount is the currency instructed amount type CurrencyInstructedAmount struct { // tag tag string // SwiftFieldTag SwiftFieldTag string `json:"swiftFieldTag"` // Amount is the instructed amount // Amount Must begin with at least one numeric character (0-9) and contain only one decimal comma marker // (e.g., $1,234.56 should be entered as 1234,56 and $0.99 should be entered as Amount string `json:"amount"` // validator is composed for data validation validator // converters is composed for WIRE to GoLang Converters converters } // NewCurrencyInstructedAmount returns a new CurrencyInstructedAmount func NewCurrencyInstructedAmount() *CurrencyInstructedAmount { cia := &CurrencyInstructedAmount{ tag: TagCurrencyInstructedAmount, } return cia } // Parse takes the input string and parses the CurrencyInstructedAmount values // // Parse provides no guarantee about all fields being filled in. Callers should make a Validate() call to confirm // successful parsing and data validity. func (cia *CurrencyInstructedAmount) Parse(record string) error { if utf8.RuneCountInString(record) < 25 { return NewTagMinLengthErr(25, len(record)) } cia.tag = record[:6] length := 6 value, read, err := cia.parseVariableStringField(record[length:], 5) if err != nil { return fieldError("SwiftFieldTag", err) } cia.SwiftFieldTag = value length += read if len(record) < length+18 { return fieldError("Amount", ErrValidLength) } cia.Amount = cia.parseStringField(record[length : length+18]) length += 18 if err := cia.verifyDataWithReadLength(record, length); err != nil { return NewTagMaxLengthErr(err) } return nil } func (cia *CurrencyInstructedAmount) UnmarshalJSON(data []byte) error { type Alias CurrencyInstructedAmount aux := struct { *Alias }{ (*Alias)(cia), } if err := json.Unmarshal(data, &aux); err != nil { return err } cia.tag = TagCurrencyInstructedAmount return nil } // String returns a fixed-width CurrencyInstructedAmount record func (cia *CurrencyInstructedAmount) String() string { return cia.Format(FormatOptions{ VariableLengthFields: false, }) } // Format returns a CurrencyInstructedAmount record formatted according to the FormatOptions func (cia *CurrencyInstructedAmount) Format(options FormatOptions) string { var buf strings.Builder buf.Grow(29) buf.WriteString(cia.tag) buf.WriteString(cia.FormatSwiftFieldTag(options)) buf.WriteString(cia.AmountField()) return buf.String() } // Validate performs WIRE format rule checks on CurrencyInstructedAmount and returns an error if not Validated // The first error encountered is returned and stops that parsing. func (cia *CurrencyInstructedAmount) Validate() error { if cia.tag != TagCurrencyInstructedAmount { return fieldError("tag", ErrValidTagForType, cia.tag) } if err := cia.isAlphanumeric(cia.SwiftFieldTag); err != nil { return fieldError("SwiftFieldTag", err, cia.SwiftFieldTag) } if err := cia.isAmount(cia.Amount); err != nil { return fieldError("Amount", err, cia.Amount) } return nil } // SwiftFieldTagField gets a string of the SwiftFieldTag field func (cia *CurrencyInstructedAmount) SwiftFieldTagField() string { return cia.alphaField(cia.SwiftFieldTag, 5) } // ToDo: The spec isn't clear if this is padded with zeros or not, so for now it is // AmountField gets a string of the AmountTag field func (cia *CurrencyInstructedAmount) AmountField() string { return cia.numericStringField(cia.Amount, 18) } // FormatSwiftFieldTag returns SwiftFieldTag formatted according to the FormatOptions func (cia *CurrencyInstructedAmount) FormatSwiftFieldTag(options FormatOptions) string { return cia.formatAlphaField(cia.SwiftFieldTag, 5, options) }
package event import ( "sync/atomic" "testing" "time" "github.com/stretchr/testify/require" "github.com/iotaledger/hive.go/runtime/workerpool" ) func Benchmark(b *testing.B) { testEvent := New1[int]() testEvent.Hook(func(int) {}) b.ResetTimer() for i := 0; i < b.N; i++ { testEvent.Trigger(i) } } func TestTrigger_PreTrigger(t *testing.T) { var triggerCount atomic.Uint64 var hookCount atomic.Uint64 var preTriggerCount atomic.Uint64 testEvent := New1[int](WithPreTriggerFunc(func(i int) { preTriggerCount.Add(1) })) testEvent.Hook(func(int) { hookCount.Add(1) }) testEvent.Hook(func(int) { hookCount.Add(1) }) for i := 0; i < 10; i++ { triggerCount.Add(1) testEvent.Trigger(i) } require.Equal(t, uint64(10), triggerCount.Load()) require.Equal(t, uint64(20), preTriggerCount.Load()) require.Equal(t, uint64(20), hookCount.Load()) } func TestTrigger_LinkTo_PreTrigger(t *testing.T) { var triggerCount atomic.Uint64 var hook1Count atomic.Uint64 var hook2Count atomic.Uint64 var preTriggerCount atomic.Uint64 testEvent := New1[int](WithPreTriggerFunc(func(i int) { preTriggerCount.Add(1) })) testEvent2 := New1[int]() testEvent.Hook(func(int) { hook1Count.Add(1) }) testEvent2.Hook(func(int) { hook2Count.Add(1) }) testEvent2.LinkTo(testEvent) for i := 0; i < 10; i++ { triggerCount.Add(1) testEvent.Trigger(i) } require.Equal(t, uint64(10), triggerCount.Load()) require.Equal(t, uint64(20), preTriggerCount.Load()) require.Equal(t, uint64(10), hook1Count.Load()) require.Equal(t, uint64(10), hook2Count.Load()) } func TestTriggerSettings_MaxTriggerCount(t *testing.T) { var triggerCount atomic.Uint64 testEvent := New1[int](WithMaxTriggerCount(3)) testEvent.Hook(func(int) { triggerCount.Add(1) }) for i := 0; i < 10; i++ { go testEvent.Trigger(i) } require.Eventually(t, func() bool { return triggerCount.Load() == 3 }, 1*time.Second, 10*time.Millisecond) time.Sleep(1 * time.Second) require.Equal(t, uint64(3), triggerCount.Load()) } func TestTriggerSettings_Hook_MaxTriggerCount(t *testing.T) { var triggerCount atomic.Uint64 testEvent := New1[int]() testEvent.Hook(func(int) { triggerCount.Add(1) }, WithMaxTriggerCount(3)) for i := 0; i < 10; i++ { go testEvent.Trigger(i) } require.Eventually(t, func() bool { return triggerCount.Load() == 3 }, 1*time.Second, 10*time.Millisecond) time.Sleep(1 * time.Second) require.Equal(t, uint64(3), triggerCount.Load()) } func TestEvent1_Hook_WorkerPool(t *testing.T) { workerPool := workerpool.New(t.Name()).Start() var eventFired atomic.Bool testEvent := New1[int]() hook := testEvent.Hook(func(int) { time.Sleep(1 * time.Second) eventFired.Store(true) }, WithWorkerPool(workerPool)) require.Equal(t, workerPool, hook.WorkerPool()) require.False(t, testEvent.WasTriggered()) require.False(t, hook.WasTriggered()) testEvent.Trigger(0) require.True(t, testEvent.WasTriggered()) require.Equal(t, 1, testEvent.TriggerCount()) require.Equal(t, testEvent.MaxTriggerCount(), 0) require.False(t, testEvent.MaxTriggerCountReached()) require.True(t, hook.WasTriggered()) require.False(t, eventFired.Load()) require.Eventually(t, eventFired.Load, 5*time.Second, 100*time.Millisecond) require.True(t, hook.WasTriggered()) } func TestEvent1_WithoutWorkerPool(t *testing.T) { var eventFired atomic.Bool testEvent := New1[int](WithWorkerPool(nil)) testEvent.Hook(func(int) { time.Sleep(1 * time.Second) eventFired.Store(true) }) require.Equal(t, (*workerpool.WorkerPool)(nil), testEvent.WorkerPool()) testEvent.Trigger(0) require.True(t, eventFired.Load()) } func TestEvent1_Hook_WithoutWorkerPool(t *testing.T) { workerPool := workerpool.New(t.Name()).Start() var eventFired atomic.Bool testEvent := New1[int](WithWorkerPool(workerPool)) hook := testEvent.Hook(func(int) { time.Sleep(1 * time.Second) eventFired.Store(true) }, WithWorkerPool(nil)) require.Nil(t, hook.WorkerPool()) testEvent.Trigger(0) require.True(t, eventFired.Load()) } func TestLink(t *testing.T) { sourceEvents := NewEvents() eventTriggered := 0 subEventTriggered := 0 linkedEvents := NewEvents(sourceEvents) linkedEvents.Event.Hook(func(int) { eventTriggered++ }) linkedEvents.SubEvents.Event.Hook(func(error) { subEventTriggered++ }) sourceEvents.Event.Trigger(7) require.Equal(t, eventTriggered, 1) require.Equal(t, subEventTriggered, 0) sourceEvents.SubEvents.Event.Trigger(nil) require.Equal(t, eventTriggered, 1) require.Equal(t, subEventTriggered, 1) linkedEvents.LinkTo(nil) sourceEvents.Event.Trigger(7) sourceEvents.SubEvents.Event.Trigger(nil) require.Equal(t, eventTriggered, 1) require.Equal(t, subEventTriggered, 1) linkedEvents.LinkTo(sourceEvents) sourceEvents.Event.Trigger(7) sourceEvents.SubEvents.Event.Trigger(nil) require.Equal(t, eventTriggered, 2) require.Equal(t, subEventTriggered, 2) } type Events struct { Event *Event1[int] SubEvents *SubEvents Group[Events, *Events] } var NewEvents = CreateGroupConstructor(func() *Events { return &Events{ Event: New1[int](), SubEvents: NewSubEvents(), } }) type SubEvents struct { Event *Event1[error] Group[SubEvents, *SubEvents] } var NewSubEvents = CreateGroupConstructor(func() *SubEvents { return &SubEvents{ Event: New1[error](), } })
package main import ( "bufio" "fmt" "math/rand" "os" "strconv" "time" ) func main() { fmt.Println("Start game") deck := initDeck() fmt.Println(deck) var player Player var dealer Dealer var drawed Card drawed = deck.Pop() fmt.Printf("You: first %s\n", drawed) player.AddCard(drawed) drawed = deck.Pop() fmt.Printf("You: second %s\n", drawed) player.AddCard(drawed) drawed = deck.Pop() fmt.Printf("CPU: first %s\n", drawed) dealer.AddCard(drawed) drawed = deck.Pop() fmt.Printf("CPU: second card is not known\n") dealer.AddCard(drawed) fmt.Printf("Your total: %d\n", player.Total) player.DrawCards(&deck) if player.IsBurst { fmt.Println("Burst!! you lose...") os.Exit(0) } dealer.DrawCards(&deck) fmt.Printf("Your total: %d\n", player.Total) fmt.Printf("CPU total: %d\n", dealer.Total) if dealer.IsBurst { fmt.Println("Burst!! you win!!!") os.Exit(0) } if player.Total > dealer.Total { fmt.Println("You win!!!") } else { fmt.Println("You lose...") } } type Card struct { Number int Mark string } func (c Card) String() string { var number string switch c.Number { case 11: number = "J" case 12: number = "Q" case 13: number = "K" default: number = strconv.Itoa(c.Number) } return fmt.Sprintf("%s: %s", c.Mark, number) } type Deck struct { Cards []Card } func (d *Deck) Pop() Card { c, cs := d.Cards[0], d.Cards[1:] d.Cards = cs return c } func Total(cards []Card) (total int) { for _, c := range cards { if c.Number > 10 { total += 10 } else if c.Number == 1 { if total + 11 <= 21 { total += 11 } else { total += 1 } } else { total += c.Number } } return } type Player struct { Cards []Card Total int IsBurst bool } func (p *Player) AddCard(c Card) { p.Cards = append(p.Cards, c) p.Total = Total(p.Cards) p.IsBurst = p.Total > 21 } func (p *Player) DrawCards(deck *Deck) { for { fmt.Println("Draw card?: [y/n]") stdin := bufio.NewScanner(os.Stdin) stdin.Scan() if stdin.Text() != "y" { break } drawed := deck.Pop() fmt.Printf("You card: %s\n", drawed) p.AddCard(drawed) fmt.Printf("Your total: %d\n", p.Total) if p.Total >= 21 { break } } } type Dealer struct { Cards []Card Total int IsBurst bool } func (d *Dealer) AddCard(c Card) { d.Cards = append(d.Cards, c) d.Total = Total(d.Cards) d.IsBurst = d.Total > 21 } func (d *Dealer) DrawCards(deck *Deck) { for { if d.Total >= 17 { break } drawed := deck.Pop() fmt.Printf("CPU: card %s\n", drawed) d.AddCard(drawed) } } func initDeck() (deck Deck) { var cards []Card for _, m := range [4]string{"ハート", "ダイヤ", "クラブ", "スペード"} { for _, n := range [13]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} { cards = append(cards, Card{n, m}) } } // カードをシャッフル rand.Seed(time.Now().Unix()) rand.Shuffle(len(cards), func(i, j int) {cards[i], cards[j] = cards[j], cards[i]}) deck.Cards = cards return }
package lexers import ( . "github.com/alecthomas/chroma/v2" // nolint ) // Chapel lexer. var Chapel = Register(MustNewLexer( &Config{ Name: "Chapel", Aliases: []string{"chapel", "chpl"}, Filenames: []string{"*.chpl"}, MimeTypes: []string{}, }, func() Rules { return Rules{ "root": { {`\n`, TextWhitespace, nil}, {`\s+`, TextWhitespace, nil}, {`\\\n`, Text, nil}, {`//(.*?)\n`, CommentSingle, nil}, {`/(\\\n)?[*](.|\n)*?[*](\\\n)?/`, CommentMultiline, nil}, {Words(``, `\b`, `config`, `const`, `in`, `inout`, `out`, `param`, `ref`, `type`, `var`), KeywordDeclaration, nil}, {Words(``, `\b`, `false`, `nil`, `none`, `true`), KeywordConstant, nil}, {Words(``, `\b`, `bool`, `bytes`, `complex`, `imag`, `int`, `locale`, `nothing`, `opaque`, `range`, `real`, `string`, `uint`, `void`), KeywordType, nil}, {Words(``, `\b`, `atomic`, `single`, `sync`, `borrowed`, `owned`, `shared`, `unmanaged`, `align`, `as`, `begin`, `break`, `by`, `catch`, `cobegin`, `coforall`, `continue`, `defer`, `delete`, `dmapped`, `do`, `domain`, `else`, `enum`, `except`, `export`, `extern`, `for`, `forall`, `foreach`, `forwarding`, `if`, `implements`, `import`, `index`, `init`, `inline`, `label`, `lambda`, `let`, `lifetime`, `local`, `new`, `noinit`, `on`, `only`, `otherwise`, `override`, `pragma`, `primitive`, `private`, `prototype`, `public`, `reduce`, `require`, `return`, `scan`, `select`, `serial`, `sparse`, `subdomain`, `then`, `this`, `throw`, `throws`, `try`, `use`, `when`, `where`, `while`, `with`, `yield`, `zip`), Keyword, nil}, {`(iter)(\s+)`, ByGroups(Keyword, TextWhitespace), Push("procname")}, {`(proc)(\s+)`, ByGroups(Keyword, TextWhitespace), Push("procname")}, {`(operator)(\s+)`, ByGroups(Keyword, TextWhitespace), Push("procname")}, {`(class|interface|module|record|union)(\s+)`, ByGroups(Keyword, TextWhitespace), Push("classname")}, {`\d+i`, LiteralNumber, nil}, {`\d+\.\d*([Ee][-+]\d+)?i`, LiteralNumber, nil}, {`\.\d+([Ee][-+]\d+)?i`, LiteralNumber, nil}, {`\d+[Ee][-+]\d+i`, LiteralNumber, nil}, {`(\d*\.\d+)([eE][+-]?[0-9]+)?i?`, LiteralNumberFloat, nil}, {`\d+[eE][+-]?[0-9]+i?`, LiteralNumberFloat, nil}, {`0[bB][01]+`, LiteralNumberBin, nil}, {`0[xX][0-9a-fA-F]+`, LiteralNumberHex, nil}, {`0[oO][0-7]+`, LiteralNumberOct, nil}, {`[0-9]+`, LiteralNumberInteger, nil}, {`"(\\\\|\\"|[^"])*"`, LiteralString, nil}, {`'(\\\\|\\'|[^'])*'`, LiteralString, nil}, {`(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|<=>|<~>|\.\.|by|#|\.\.\.|&&|\|\||!|&|\||\^|~|<<|>>|==|!=|<=|>=|<|>|[+\-*/%]|\*\*)`, Operator, nil}, {`[:;,.?()\[\]{}]`, Punctuation, nil}, {`[a-zA-Z_][\w$]*`, NameOther, nil}, }, "classname": { {`[a-zA-Z_][\w$]*`, NameClass, Pop(1)}, }, "procname": { {`([a-zA-Z_][.\w$]*|\~[a-zA-Z_][.\w$]*|[+*/!~%<>=&^|\-:]{1,2})`, NameFunction, Pop(1)}, {`\(`, Punctuation, Push("receivertype")}, {`\)+\.`, Punctuation, nil}, }, "receivertype": { {Words(``, `\b`, `atomic`, `single`, `sync`, `borrowed`, `owned`, `shared`, `unmanaged`), Keyword, nil}, {Words(``, `\b`, `bool`, `bytes`, `complex`, `imag`, `int`, `locale`, `nothing`, `opaque`, `range`, `real`, `string`, `uint`, `void`), KeywordType, nil}, {`[^()]*`, NameOther, Pop(1)}, }, } }, ))
package audit import ( "github.com/google/uuid" "github.com/jrapoport/gothic/core/context" "github.com/jrapoport/gothic/models/account" "github.com/jrapoport/gothic/models/auditlog" "github.com/jrapoport/gothic/models/types" "github.com/jrapoport/gothic/models/types/key" "github.com/jrapoport/gothic/models/user" "github.com/jrapoport/gothic/store" ) // LogLogin log user login func LogLogin(ctx context.Context, conn *store.Connection, userID uuid.UUID) error { _, err := CreateLogEntry(ctx, conn, auditlog.Login, userID, nil) return err } // LogLogout log user logout func LogLogout(ctx context.Context, conn *store.Connection, userID uuid.UUID) error { _, err := CreateLogEntry(ctx, conn, auditlog.Logout, userID, nil) return err } // LogPasswordChange log user password change func LogPasswordChange(ctx context.Context, conn *store.Connection, userID uuid.UUID) error { _, err := CreateLogEntry(ctx, conn, auditlog.Password, userID, nil) return err } // LogEmailChange log user email change func LogEmailChange(ctx context.Context, conn *store.Connection, userID uuid.UUID) error { _, err := CreateLogEntry(ctx, conn, auditlog.Email, userID, nil) return err } // LogUserUpdated log user updated func LogUserUpdated(ctx context.Context, conn *store.Connection, userID uuid.UUID) error { _, err := CreateLogEntry(ctx, conn, auditlog.Updated, userID, nil) return err } // LogChangeRole log user role change func LogChangeRole(ctx context.Context, conn *store.Connection, userID uuid.UUID, r user.Role) error { _, err := CreateLogEntry(ctx, conn, auditlog.ChangeRole, userID, types.Map{ key.Role: r.String(), }) return err } // LogLinked logs a linked account. func LogLinked(ctx context.Context, conn *store.Connection, userID uuid.UUID, la *account.Account) error { data := types.Map{ key.Type: la.Type.String(), key.Provider: la.Provider, key.AccountID: la.AccountID, } _, err := CreateLogEntry(ctx, conn, auditlog.Linked, userID, data) return err }
package commands import ( "os" "io" "github.com/sonm-io/core/cmd/cli/task_config" pb "github.com/sonm-io/core/proto" "github.com/sonm-io/core/util" "github.com/spf13/cobra" ) func init() { nodeTaskRootCmd.AddCommand( nodeTaskListCmd, nodeTaskStartCmd, nodeTaskStatusCmd, nodeTaskLogsCmd, nodeTaskStopCmd, ) } var nodeTaskRootCmd = &cobra.Command{ Use: "tasks", Short: "Manage tasks", } var nodeTaskListCmd = &cobra.Command{ Use: "list [hub_addr]", Short: "Show active tasks", PreRun: loadKeyStoreWrapper, Run: func(cmd *cobra.Command, args []string) { node, err := NewTasksInteractor(nodeAddressFlag, timeoutFlag) if err != nil { showError(cmd, "Cannot connect to Node", err) os.Exit(1) } var hubAddr string if len(args) > 0 { hubAddr = args[0] } list, err := node.List(hubAddr) if err != nil { showError(cmd, "Cannot get task list", err) os.Exit(1) } showJSON(cmd, list) }, } var nodeTaskStartCmd = &cobra.Command{ Use: "start <deal_id> <task.yaml>", Short: "Start task", PreRun: loadKeyStoreWrapper, Args: cobra.MinimumNArgs(2), Run: func(cmd *cobra.Command, args []string) { node, err := NewTasksInteractor(nodeAddressFlag, timeoutFlag) if err != nil { showError(cmd, "Cannot connect to Node", err) os.Exit(1) } dealID := args[0] taskFile := args[1] taskDef, err := task_config.LoadConfig(taskFile) if err != nil { showError(cmd, "Cannot load task definition", err) os.Exit(1) } deal := &pb.Deal{ Id: dealID, BuyerID: util.PubKeyToAddr(sessionKey.PublicKey), } var req = &pb.HubStartTaskRequest{ Deal: deal, Image: taskDef.GetImageName(), Registry: taskDef.GetRegistryName(), Auth: taskDef.GetRegistryAuth(), PublicKeyData: taskDef.GetSSHKey(), Env: taskDef.GetEnvVars(), } reply, err := node.Start(req) if err != nil { showError(cmd, "Cannot start task", err) os.Exit(1) } showJSON(cmd, reply) }, } var nodeTaskStatusCmd = &cobra.Command{ Use: "status <hub_addr> <task_id>", Short: "Show task status", PreRun: loadKeyStoreWrapper, Args: cobra.MinimumNArgs(2), Run: func(cmd *cobra.Command, args []string) { node, err := NewTasksInteractor(nodeAddressFlag, timeoutFlag) if err != nil { showError(cmd, "Cannot connect to Node", err) os.Exit(1) } hubAddr := args[0] taskID := args[1] status, err := node.Status(taskID, hubAddr) if err != nil { showError(cmd, "Cannot get task status", err) os.Exit(1) } showJSON(cmd, status) }, } var nodeTaskLogsCmd = &cobra.Command{ Use: "logs <hub_addr> <task_id>", Short: "Retrieve task logs", PreRun: loadKeyStoreWrapper, Args: cobra.MinimumNArgs(2), Run: func(cmd *cobra.Command, args []string) { node, err := NewTasksInteractor(nodeAddressFlag, timeoutFlag) if err != nil { showError(cmd, "Cannot connect to Node", err) os.Exit(1) } hubAddr := args[0] taskID := args[1] req := &pb.TaskLogsRequest{ Id: taskID, HubAddr: hubAddr, Since: since, AddTimestamps: addTimestamps, Follow: follow, Tail: tail, Details: details, } logClient, err := node.Logs(req) if err != nil { showError(cmd, "Cannot get task logs", err) os.Exit(1) } for { chunk, err := logClient.Recv() if err == io.EOF { return } if err != nil { if err != nil { showError(cmd, "Cannot fetch log chunk", err) os.Exit(1) } } cmd.Print(string(chunk.Data)) } }, } var nodeTaskStopCmd = &cobra.Command{ Use: "stop <hub_addr> <task_id>", Short: "Stop task", PreRun: loadKeyStoreWrapper, Args: cobra.MinimumNArgs(1), Run: func(cmd *cobra.Command, args []string) { node, err := NewTasksInteractor(nodeAddressFlag, timeoutFlag) if err != nil { showError(cmd, "Cannot connect to Node", err) os.Exit(1) } hubAddr := args[0] taskID := args[1] status, err := node.Stop(taskID, hubAddr) if err != nil { showError(cmd, "Cannot stop status", err) os.Exit(1) } showJSON(cmd, status) }, }
/* Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package client import ( "net/http" "net/url" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" ) type HTTPClientFunc func(*http.Request) (*http.Response, error) func (f HTTPClientFunc) Do(req *http.Request) (*http.Response, error) { return f(req) } // FakeRESTClient provides a fake RESTClient interface. type FakeRESTClient struct { Client HTTPClient Codec runtime.Codec Legacy bool Req *http.Request Resp *http.Response Err error } func (c *FakeRESTClient) Get() *Request { return NewRequest(c, "GET", &url.URL{Host: "localhost"}, testapi.Version(), c.Codec, c.Legacy, c.Legacy) } func (c *FakeRESTClient) Put() *Request { return NewRequest(c, "PUT", &url.URL{Host: "localhost"}, testapi.Version(), c.Codec, c.Legacy, c.Legacy) } func (c *FakeRESTClient) Post() *Request { return NewRequest(c, "POST", &url.URL{Host: "localhost"}, testapi.Version(), c.Codec, c.Legacy, c.Legacy) } func (c *FakeRESTClient) Delete() *Request { return NewRequest(c, "DELETE", &url.URL{Host: "localhost"}, testapi.Version(), c.Codec, c.Legacy, c.Legacy) } func (c *FakeRESTClient) Do(req *http.Request) (*http.Response, error) { c.Req = req if c.Client != HTTPClient(nil) { return c.Client.Do(req) } return c.Resp, c.Err }
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved. // This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. package deleter import ( "fmt" "log" "time" "github.com/pivotal-cf/on-demand-service-broker/cf" "github.com/pivotal-cf/on-demand-service-broker/config" ) //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate //counterfeiter:generate -o fakes/fake_cloud_foundry_client.go . CloudFoundryClient type CloudFoundryClient interface { GetServiceInstances(filter cf.GetInstancesFilter, logger *log.Logger) ([]cf.Instance, error) GetLastOperationForInstance(instanceGUID string, logger *log.Logger) (cf.LastOperation, error) GetBindingsForInstance(instanceGUID string, logger *log.Logger) ([]cf.Binding, error) DeleteBinding(binding cf.Binding, logger *log.Logger) error GetServiceKeysForInstance(instanceGUID string, logger *log.Logger) ([]cf.ServiceKey, error) DeleteServiceKey(serviceKey cf.ServiceKey, logger *log.Logger) error DeleteServiceInstance(instanceGUID string, logger *log.Logger) error } //counterfeiter:generate -o fakes/fake_sleeper.go . Sleeper type Sleeper interface { Sleep(d time.Duration) } type Config struct { ServiceCatalog ServiceCatalog `yaml:"service_catalog"` DisableSSLCertVerification bool `yaml:"disable_ssl_cert_verification"` // TODO use the CF.disable_ssl_cert_verification field CF config.CF `yaml:"cf"` PollingInterval int `yaml:"polling_interval"` PollingInitialOffset int `yaml:"polling_initial_offset"` } type ServiceCatalog struct { ID string `yaml:"id"` } type Deleter struct { logger *log.Logger pollingInitialOffset time.Duration pollingInterval time.Duration cfClient CloudFoundryClient sleeper Sleeper } func New(cfClient CloudFoundryClient, sleeper Sleeper, pollingInitialOffset int, pollingInterval int, logger *log.Logger) *Deleter { return &Deleter{ logger: logger, pollingInitialOffset: time.Duration(pollingInitialOffset) * time.Second, pollingInterval: time.Duration(pollingInterval) * time.Second, cfClient: cfClient, sleeper: sleeper, } } func (d *Deleter) DeleteAllServiceInstances(serviceUniqueID string) error { d.logger.Printf("Deleter Configuration: polling_intial_offset: %v, polling_interval: %v.", d.pollingInitialOffset.Seconds(), d.pollingInterval.Seconds()) instancesFilter := cf.GetInstancesFilter{ServiceOfferingID: serviceUniqueID} serviceInstances, err := d.cfClient.GetServiceInstances(instancesFilter, d.logger) if err != nil { return err } if len(serviceInstances) == 0 { d.logger.Println("No service instances found.") return nil } for _, instance := range serviceInstances { err = d.deleteBindings(instance.GUID) if err != nil { return err } err = d.deleteServiceKeys(instance.GUID) if err != nil { return err } deleteInProgress, err := d.deleteInProgress(instance.GUID) if err != nil { d.logger.Printf("could not retrieve information about service instance %s, will try to delete", instance.GUID) } if deleteInProgress { d.logger.Printf("service instance %s is being deleted, will skip sending the delete request", instance.GUID) } else { if err = d.deleteServiceInstance(instance.GUID); err != nil { return err } } d.logger.Printf("Waiting for service instance %s to be deleted", instance.GUID) err = d.pollInstanceDeleteStatus(instance.GUID) if err != nil { return err } } serviceInstances, err = d.cfClient.GetServiceInstances(instancesFilter, d.logger) if err != nil { return err } if len(serviceInstances) != 0 { return fmt.Errorf("expected 0 instances for service offering with unique ID: %s. Got %d instance(s).", serviceUniqueID, len(serviceInstances)) } return nil } func (d Deleter) deleteBindings(instanceGUID string) error { bindings, err := d.cfClient.GetBindingsForInstance(instanceGUID, d.logger) switch err.(type) { case cf.ResourceNotFoundError: return nil case error: return err } for _, binding := range bindings { d.logger.Printf("Deleting binding %s of service instance %s to app %s\n", binding.GUID, instanceGUID, binding.AppGUID) err = d.cfClient.DeleteBinding(binding, d.logger) if err != nil { return err } } return nil } func (d Deleter) deleteServiceKeys(instanceGUID string) error { serviceKeys, err := d.cfClient.GetServiceKeysForInstance(instanceGUID, d.logger) switch err.(type) { case cf.ResourceNotFoundError: return nil case error: return err } for _, serviceKey := range serviceKeys { d.logger.Printf("Deleting service key %s of service instance %s\n", serviceKey.GUID, instanceGUID) err = d.cfClient.DeleteServiceKey(serviceKey, d.logger) if err != nil { return err } } return nil } func (d Deleter) deleteServiceInstance(instanceGUID string) error { d.logger.Printf("Deleting service instance %s\n", instanceGUID) return d.cfClient.DeleteServiceInstance(instanceGUID, d.logger) } func (d Deleter) pollInstanceDeleteStatus(instanceGUID string) error { d.sleeper.Sleep(d.pollingInitialOffset) for { d.sleeper.Sleep(d.pollingInterval) lastOperation, err := d.cfClient.GetLastOperationForInstance(instanceGUID, d.logger) switch err.(type) { case cf.ResourceNotFoundError: d.logger.Printf("Result: deleted service instance %s", instanceGUID) return nil case cf.UnauthorizedError, cf.ForbiddenError, cf.InvalidResponseError: return fmt.Errorf("Result: failed to delete service instance %s. Error: %s.", instanceGUID, err) case error: continue } if !lastOperation.IsDelete() { return fmt.Errorf( "Result: failed to delete service instance %s. Unexpected operation type: '%s'.", instanceGUID, lastOperation.Type, ) } if lastOperation.OperationFailed() { return fmt.Errorf("Result: failed to delete service instance %s. Delete operation failed.", instanceGUID) } } } func (d Deleter) deleteInProgress(instanceGUID string) (bool, error) { lastOperation, err := d.cfClient.GetLastOperationForInstance(instanceGUID, d.logger) if err != nil { return false, err } return lastOperation.IsDelete(), nil }
// Copyright 2019 The SwiftShader Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package deqp provides functions for running dEQP, as well as loading and storing the results. package deqp import ( "encoding/json" "errors" "fmt" "io/ioutil" "log" "math/rand" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "sync" "time" "swiftshader.googlesource.com/SwiftShader/tests/regres/cov" "swiftshader.googlesource.com/SwiftShader/tests/regres/shell" "swiftshader.googlesource.com/SwiftShader/tests/regres/testlist" "swiftshader.googlesource.com/SwiftShader/tests/regres/util" ) const dataVersion = 1 var ( // Regular expression to parse the output of a dEQP test. deqpRE = regexp.MustCompile(`(Fail|Pass|NotSupported|CompatibilityWarning|QualityWarning|InternalError) \(([^\)]*)\)`) // Regular expression to parse a test that failed due to UNIMPLEMENTED() unimplementedRE = regexp.MustCompile(`[^\n]*UNIMPLEMENTED:[^\n]*`) // Regular expression to parse a test that failed due to UNSUPPORTED() unsupportedRE = regexp.MustCompile(`[^\n]*UNSUPPORTED:[^\n]*`) // Regular expression to parse a test that failed due to UNREACHABLE() unreachableRE = regexp.MustCompile(`[^\n]*UNREACHABLE:[^\n]*`) // Regular expression to parse a test that failed due to ASSERT() assertRE = regexp.MustCompile(`[^\n]*ASSERT\([^\)]*\)[^\n]*`) // Regular expression to parse a test that failed due to ABORT() abortRE = regexp.MustCompile(`[^\n]*ABORT:[^\n]*`) // Regular expression to parse individual test names and output caseOutputRE = regexp.MustCompile("Test case '([^']*)'..") ) // Config contains the inputs required for running dEQP on a group of test lists. type Config struct { ExeEgl string ExeGles2 string ExeGles3 string ExeVulkan string TempDir string // Directory for temporary log files, coverage output. TestLists testlist.Lists Env []string LogReplacements map[string]string NumParallelTests int MaxTestsPerProc int CoverageEnv *cov.Env TestTimeout time.Duration ValidationLayer bool } // Results holds the results of tests across all APIs. // The Results structure may be serialized to cache results. type Results struct { Version int Error string Tests map[string]TestResult Coverage *cov.Tree Duration time.Duration } // TestResult holds the results of a single dEQP test. type TestResult struct { Test string Status testlist.Status TimeTaken time.Duration Err string `json:",omitempty"` Coverage *cov.Coverage } func (r TestResult) String() string { if r.Err != "" { return fmt.Sprintf("%s: %s (%s)", r.Test, r.Status, r.Err) } return fmt.Sprintf("%s: %s", r.Test, r.Status) } // LoadResults loads cached test results from disk. func LoadResults(path string) (*Results, error) { f, err := os.Open(path) if err != nil { return nil, fmt.Errorf("failed to open '%s' for loading test results: %w", path, err) } defer f.Close() var out Results if err := json.NewDecoder(f).Decode(&out); err != nil { return nil, err } if out.Version != dataVersion { return nil, errors.New("Data is from an old version") } return &out, nil } // Save saves (caches) test results to disk. func (r *Results) Save(path string) error { if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { return fmt.Errorf("failed to make '%s' for saving test results: %w", filepath.Dir(path), err) } f, err := os.Create(path) if err != nil { return fmt.Errorf("failed to open '%s' for saving test results: %w", path, err) } defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", " ") if err := enc.Encode(r); err != nil { return fmt.Errorf("failed to encode test results: %w", err) } return nil } // Run runs all the tests. func (c *Config) Run() (*Results, error) { start := time.Now() if c.TempDir == "" { dir, err := ioutil.TempDir("", "deqp") if err != nil { return nil, fmt.Errorf("failed to generate temporary directory: %w", err) } c.TempDir = dir } // Wait group that completes once all the tests have finished. wg := sync.WaitGroup{} results := make(chan TestResult, 256) numTests := 0 goroutineIndex := 0 // For each API that we are testing for _, list := range c.TestLists { // Resolve the test runner exe, supportsCoverage := "", false switch list.API { case testlist.EGL: exe = c.ExeEgl case testlist.GLES2: exe = c.ExeGles2 case testlist.GLES3: exe = c.ExeGles3 case testlist.Vulkan: exe, supportsCoverage = c.ExeVulkan, true default: return nil, fmt.Errorf("Unknown API '%v'", list.API) } if !util.IsFile(exe) { return nil, fmt.Errorf("failed to find dEQP executable at '%s'", exe) } // Build a chan for the test names to be run. tests := make(chan string, len(list.Tests)) numParallelTests := c.NumParallelTests if list.API != testlist.Vulkan { // OpenGL tests attempt to open lots of X11 display connections, // which may cause us to run out of handles. This maximum was // determined experimentally on a 72-core system. maxParallelGLTests := 16 if numParallelTests > maxParallelGLTests { numParallelTests = maxParallelGLTests } } // Start a number of go routines to run the tests. wg.Add(numParallelTests) for i := 0; i < numParallelTests; i++ { go func(index int) { c.TestRoutine(exe, tests, results, index, supportsCoverage) wg.Done() }(goroutineIndex) goroutineIndex++ } // Shuffle the test list. // This attempts to mix heavy-load tests with lighter ones. shuffled := make([]string, len(list.Tests)) for i, j := range rand.New(rand.NewSource(42)).Perm(len(list.Tests)) { shuffled[i] = list.Tests[j] } // Hand the tests to the TestRoutines. for _, t := range shuffled { tests <- t } // Close the tests chan to indicate that there are no more tests to run. // The TestRoutine functions will return once all tests have been // run. close(tests) numTests += len(list.Tests) } out := Results{ Version: dataVersion, Tests: map[string]TestResult{}, } if c.CoverageEnv != nil { out.Coverage = &cov.Tree{} out.Coverage.Add(cov.Path{}, c.CoverageEnv.AllSourceFiles()) } // Collect the results. finished := make(chan struct{}) lastUpdate := time.Now() go func() { start, i := time.Now(), 0 for r := range results { i++ if time.Since(lastUpdate) > time.Minute { lastUpdate = time.Now() remaining := numTests - i log.Printf("Ran %d/%d tests (%v%%). Estimated completion in %v.\n", i, numTests, util.Percent(i, numTests), (time.Since(start)/time.Duration(i))*time.Duration(remaining)) } out.Tests[r.Test] = r if r.Coverage != nil { path := strings.Split(r.Test, ".") out.Coverage.Add(cov.Path(path), r.Coverage) r.Coverage = nil // Free memory } } close(finished) }() wg.Wait() // Block until all the deqpTestRoutines have finished. close(results) // Signal no more results. <-finished // And wait for the result collecting go-routine to finish. out.Duration = time.Since(start) return &out, nil } // TestRoutine repeatedly runs the dEQP test executable exe with the tests // taken from tests. The output of the dEQP test is parsed, and the test result // is written to results. // TestRoutine only returns once the tests chan has been closed. // TestRoutine does not close the results chan. func (c *Config) TestRoutine(exe string, tests <-chan string, results chan<- TestResult, goroutineIndex int, supportsCoverage bool) { // Context for the GCOV_PREFIX environment variable: // If you compile SwiftShader with gcc and the --coverage flag, the build will contain coverage instrumentation. // We can use this to get the code coverage of SwiftShader from running dEQP. // The coverage instrumentation reads the existing coverage files on start-up (at a hardcoded path alongside the // SwiftShader build), updates coverage info as the programs runs, then (over)writes the coverage files on exit. // Thus, multiple parallel processes will race when updating coverage information. The GCOV_PREFIX environment // variable adds a prefix to the hardcoded paths. // E.g. Given GCOV_PREFIX=/tmp/coverage, the hardcoded path /ss/build/a.gcno becomes /tmp/coverage/ss/build/a.gcno. // This is mainly intended for running the target program on a different machine where the hardcoded paths don't // make sense. It can also be used to avoid races. It would be trivial to avoid races if the GCOV_PREFIX variable // supported macro variables like the Clang code coverage "%p" variable that expands to the process ID; in this // case, we could use GCOV_PREFIX=/tmp/coverage/%p to avoid races. Unfortunately, gcc does not support this. // Furthermore, processing coverage information from many directories can be slow; we start a lot of dEQP child // processes, each of which will likely get a unique process ID. In practice, we only need one directory per go // routine. // If GCOV_PREFIX is in Env, replace occurrences of "PROC_ID" in GCOV_PREFIX with goroutineIndex. // This avoids races between parallel child processes reading and writing coverage output files. // For example, GCOV_PREFIX="/tmp/gcov_output/PROC_ID" becomes GCOV_PREFIX="/tmp/gcov_output/1" in the first go routine. // You might expect PROC_ID to be the process ID of some process, but the only real requirement is that // it is a unique ID between the *parallel* child processes. env := make([]string, 0, len(c.Env)) for _, v := range c.Env { if strings.HasPrefix(v, "GCOV_PREFIX=") { v = strings.ReplaceAll(v, "PROC_ID", strconv.Itoa(goroutineIndex)) } env = append(env, v) } coverageFile := filepath.Join(c.TempDir, fmt.Sprintf("%v.profraw", goroutineIndex)) if supportsCoverage { if c.CoverageEnv != nil { env = cov.AppendRuntimeEnv(env, coverageFile) } } logPath := "/dev/null" // TODO(bclayton): Try "nul" on windows. if !util.IsFile(logPath) { logPath = filepath.Join(c.TempDir, fmt.Sprintf("%v.log", goroutineIndex)) } testNames := []string{} for name := range tests { testNames = append(testNames, name) if len(testNames) >= c.MaxTestsPerProc { c.PerformTests(exe, env, coverageFile, logPath, testNames, supportsCoverage, results) // Clear list of test names testNames = testNames[:0] } } if len(testNames) > 0 { c.PerformTests(exe, env, coverageFile, logPath, testNames, supportsCoverage, results) } } func (c *Config) PerformTests(exe string, env []string, coverageFile string, logPath string, testNames []string, supportsCoverage bool, results chan<- TestResult) { // log.Printf("Running test(s) '%s'\n", testNames) start := time.Now() // Set validation layer according to flag. validation := "disable" if c.ValidationLayer { validation = "enable" } // The list of test names will be passed to stdin, since the deqp-stdin-caselist option is used stdin := strings.Join(testNames, "\n") + "\n" numTests := len(testNames) timeout := c.TestTimeout * time.Duration(numTests) outRaw, deqpErr := shell.Exec(timeout, exe, filepath.Dir(exe), env, stdin, "--deqp-validation="+validation, "--deqp-surface-type=pbuffer", "--deqp-shadercache=disable", "--deqp-log-images=disable", "--deqp-log-shader-sources=disable", "--deqp-log-decompiled-spirv=disable", "--deqp-log-empty-loginfo=disable", "--deqp-log-flush=disable", "--deqp-log-filename="+logPath, "--deqp-stdin-caselist") duration := time.Since(start) out := string(outRaw) out = strings.ReplaceAll(out, exe, "<dEQP>") for k, v := range c.LogReplacements { out = strings.ReplaceAll(out, k, v) } var coverage *cov.Coverage if c.CoverageEnv != nil && supportsCoverage { var covErr error coverage, covErr = c.CoverageEnv.Import(coverageFile) if covErr != nil { log.Printf("Warning: Failed to process test coverage for test '%v'. %v", testNames, covErr) } os.Remove(coverageFile) } if numTests > 1 { // Separate output per test case caseOutputs := caseOutputRE.Split(out, -1) // If the output isn't as expected, a crash may have happened isCrash := (len(caseOutputs) != (numTests + 1)) // Verify the exit code to see if a crash has happened var exitErr *exec.ExitError if errors.As(deqpErr, &exitErr) { if exitErr.ExitCode() == 255 { isCrash = true } } // If a crash has happened, re-run tests separately if isCrash { // Re-run tests one by one for _, testName := range testNames { singleTest := []string{testName} c.PerformTests(exe, env, coverageFile, logPath, singleTest, supportsCoverage, results) } } else { caseOutputs = caseOutputs[1:] // Ignore text up to first "Test case '...'" caseNameMatches := caseOutputRE.FindAllStringSubmatch(out, -1) caseNames := make([]string, len(caseNameMatches)) for i, m := range caseNameMatches { caseNames[i] = m[1] } averageDuration := duration / time.Duration(numTests) for i, caseOutput := range caseOutputs { results <- c.AnalyzeOutput(caseNames[i], caseOutput, averageDuration, coverage, deqpErr) } } } else { results <- c.AnalyzeOutput(testNames[0], out, duration, coverage, deqpErr) } } func (c *Config) AnalyzeOutput(name string, out string, duration time.Duration, coverage *cov.Coverage, err error) TestResult { for _, test := range []struct { re *regexp.Regexp s testlist.Status }{ {unimplementedRE, testlist.Unimplemented}, {unsupportedRE, testlist.Unsupported}, {unreachableRE, testlist.Unreachable}, {assertRE, testlist.Assert}, {abortRE, testlist.Abort}, } { if s := test.re.FindString(out); s != "" { return TestResult{ Test: name, Status: test.s, TimeTaken: duration, Err: s, Coverage: coverage, } } } // Don't treat non-zero error codes as crashes. var exitErr *exec.ExitError if errors.As(err, &exitErr) { if exitErr.ExitCode() != 255 { out += fmt.Sprintf("\nProcess terminated with code %d", exitErr.ExitCode()) err = nil } } switch err.(type) { default: return TestResult{ Test: name, Status: testlist.Crash, TimeTaken: duration, Err: out, Coverage: coverage, } case shell.ErrTimeout: log.Printf("Timeout for test '%v'\n", name) return TestResult{ Test: name, Status: testlist.Timeout, TimeTaken: duration, Coverage: coverage, } case nil: toks := deqpRE.FindStringSubmatch(out) if len(toks) < 3 { err := fmt.Sprintf("Couldn't parse test '%v' output:\n%s", name, out) log.Println("Warning: ", err) return TestResult{Test: name, Status: testlist.Fail, Err: err, Coverage: coverage} } switch toks[1] { case "Pass": return TestResult{Test: name, Status: testlist.Pass, TimeTaken: duration, Coverage: coverage} case "NotSupported": return TestResult{Test: name, Status: testlist.NotSupported, TimeTaken: duration, Coverage: coverage} case "CompatibilityWarning": return TestResult{Test: name, Status: testlist.CompatibilityWarning, TimeTaken: duration, Coverage: coverage} case "QualityWarning": return TestResult{Test: name, Status: testlist.QualityWarning, TimeTaken: duration, Coverage: coverage} case "Fail": var err string if toks[2] != "Fail" { err = toks[2] } return TestResult{Test: name, Status: testlist.Fail, Err: err, TimeTaken: duration, Coverage: coverage} case "InternalError": var err string if toks[2] != "InternalError" { err = toks[2] } return TestResult{Test: name, Status: testlist.InternalError, Err: err, TimeTaken: duration, Coverage: coverage} default: err := fmt.Sprintf("Couldn't parse test output:\n%s", out) log.Println("Warning: ", err) return TestResult{Test: name, Status: testlist.Fail, Err: err, TimeTaken: duration, Coverage: coverage} } } }
package domain import ( "time" "github.com/traPtitech/trap-collection-server/src/domain/values" ) // LauncherSession // ランチャーのプロダクトキーでの認証後のセッションを表すドメイン。 // プロダクトキーでの認証から一定時間を過ぎると無効になり、 // 再度認証が必要になる。 // 有効期限の延長は不可能。 type LauncherSession struct { id values.LauncherSessionID accessToken values.LauncherSessionAccessToken expiresAt time.Time } func NewLauncherSession( id values.LauncherSessionID, accessToken values.LauncherSessionAccessToken, expiresAt time.Time, ) *LauncherSession { return &LauncherSession{ id: id, accessToken: accessToken, expiresAt: expiresAt, } } func (ls *LauncherSession) GetID() values.LauncherSessionID { return ls.id } func (ls *LauncherSession) GetAccessToken() values.LauncherSessionAccessToken { return ls.accessToken } func (ls *LauncherSession) GetExpiresAt() time.Time { return ls.expiresAt } // IsExpired 有効期限を過ぎていたらtrue func (ls *LauncherSession) IsExpired() bool { return time.Now().After(ls.expiresAt) }
// SPDX-License-Identifier: MIT // Package site 用于生成网站内容 // // 包括网站的基本信息,以及文档的翻译内容等。 package site import ( "encoding/xml" "io/ioutil" "os" "github.com/issue9/errwrap" "golang.org/x/text/language/display" "github.com/caixw/apidoc/v7/core" "github.com/caixw/apidoc/v7/internal/ast" "github.com/caixw/apidoc/v7/internal/docs" "github.com/caixw/apidoc/v7/internal/lang" "github.com/caixw/apidoc/v7/internal/locale" ) const ( siteFilename = "site.xml" // 配置文件的文件名 docBasename = "locale." // 翻译文档文件名的前缀部分,一般格式为 docBasename.{locale}.xml ) type site struct { XMLName struct{} `xml:"site"` Name string `xml:"name"` Version string `xml:"version"` Repo string `xml:"repo"` URL string `xml:"url"` Languages []language `xml:"languages>language"` Locales []loc `xml:"locales>locale"` } type language struct { ID string `xml:"id,attr"` Name string `xml:",chardata"` } type loc struct { ID string `xml:"id,attr"` Href string `xml:"href,attr"` Title string `xml:"title,attr"` Doc string `xml:"doc,attr"` } type doc struct { XMLName struct{} `xml:"locale"` Spec []*spec `xml:"spec>type"` Commands []*command `xml:"commands>command"` Config []*item `xml:"config>item"` } type spec struct { Name string `xml:"name,attr,omitempty"` Usage innerXML `xml:"usage,omitempty"` Items []*item `xml:"item,omitempty"` } type innerXML struct { Text string `xml:",innerxml"` } type item struct { Name string `xml:"name,attr"` // 变量名 Type string `xml:"type,attr"` // 变量的类型 Array bool `xml:"array,attr"` Required bool `xml:"required,attr"` Usage string `xml:",innerxml"` } type command struct { Name string `xml:"name,attr"` Usage string `xml:",innerxml"` } // Write 输出站点中所有需要自动生成的内容 func Write(target core.URI) error { site, d, err := gen() if err != nil { return err } if err := writeXML(target.Append(siteFilename), site, "\t"); err != nil { return err } for filename, dd := range d { if err := writeXML(target.Append(filename), dd, "\t"); err != nil { return err } } return nil } func writeXML(uri core.URI, v any, indent string) error { data, err := xml.MarshalIndent(v, "", indent) if err != nil { return err } path, err := uri.File() if err != nil { return err } var w errwrap.Buffer w.WString(xml.Header).WString("\n"). WString("<!-- ").WString(docs.FileHeader).WString(" -->\n\n"). WBytes(data). WString("\n") // 统一代码风格,文件末尾加一空行。 if w.Err != nil { return w.Err } return ioutil.WriteFile(path, w.Bytes(), os.ModePerm) } func gen() (*site, map[string]*doc, error) { site := &site{ Name: core.Name, Version: ast.Version, Repo: core.RepoURL, URL: core.OfficialURL, Languages: make([]language, 0, len(lang.Langs())), Locales: make([]loc, 0, len(locale.Tags())), } for _, lang := range lang.Langs() { site.Languages = append(site.Languages, language{ ID: lang.ID, Name: lang.DisplayName, }) } tags := locale.Tags() docs := make(map[string]*doc, len(tags)) for _, tag := range tags { locale.SetTag(tag) id := tag.String() docFilename := buildDocFilename(id) href := "index.xml" if id != locale.DefaultLocaleID { href = "index." + id + ".xml" } site.Locales = append(site.Locales, loc{ ID: id, Href: href, Title: display.Self.Name(tag), Doc: docFilename, }) dd, err := genDoc() if err != nil { return nil, nil, err } docs[docFilename] = dd } return site, docs, nil } func genDoc() (*doc, error) { doc := &doc{} if err := doc.newCommands(); err != nil { return nil, err } if err := doc.newConfig(); err != nil { return nil, err } if err := doc.newSpec(&ast.APIDoc{}); err != nil { return nil, err } return doc, nil } func buildDocFilename(id string) string { return docBasename + id + ".xml" }
package backend import ( "context" "errors" "io" "time" "github.com/travis-ci/worker/config" ) func init() { Register("fake", "Fake", map[string]string{ "LOG_OUTPUT": "faked log output to write", "RUN_SLEEP": "faked runtime sleep duration", "ERROR": "error out all jobs (useful for testing requeue storms)", }, newFakeProvider) } type fakeProvider struct { cfg *config.ProviderConfig } func newFakeProvider(cfg *config.ProviderConfig) (Provider, error) { return &fakeProvider{cfg: cfg}, nil } func (p *fakeProvider) SupportsProgress() bool { return false } func (p *fakeProvider) StartWithProgress(ctx context.Context, startAttributes *StartAttributes, _ Progresser) (Instance, error) { return p.Start(ctx, startAttributes) } func (p *fakeProvider) Start(ctx context.Context, _ *StartAttributes) (Instance, error) { var ( dur time.Duration err error ) if p.cfg.IsSet("STARTUP_DURATION") { dur, err = time.ParseDuration(p.cfg.Get("STARTUP_DURATION")) if err != nil { return nil, err } } return &fakeInstance{p: p, startupDuration: dur}, nil } func (p *fakeProvider) Setup(ctx context.Context) error { return nil } type fakeInstance struct { p *fakeProvider startupDuration time.Duration } func (i *fakeInstance) Warmed() bool { return false } func (i *fakeInstance) SupportsProgress() bool { return false } func (i *fakeInstance) UploadScript(ctx context.Context, script []byte) error { return nil } func (i *fakeInstance) RunScript(ctx context.Context, writer io.Writer) (*RunResult, error) { if i.p.cfg.Get("ERROR") == "true" { return &RunResult{Completed: false}, errors.New("fake provider is configured to error all jobs") } if i.p.cfg.IsSet("RUN_SLEEP") { rs, err := time.ParseDuration(i.p.cfg.Get("RUN_SLEEP")) if err != nil { return &RunResult{Completed: false}, err } time.Sleep(rs) } _, err := writer.Write([]byte(i.p.cfg.Get("LOG_OUTPUT"))) if err != nil { return &RunResult{Completed: false}, err } return &RunResult{Completed: true}, nil } func (i *fakeInstance) DownloadTrace(ctx context.Context) ([]byte, error) { return nil, ErrDownloadTraceNotImplemented } func (i *fakeInstance) Stop(ctx context.Context) error { return nil } func (i *fakeInstance) ID() string { return "fake" } func (i *fakeInstance) ImageName() string { return "fake" } func (i *fakeInstance) StartupDuration() time.Duration { return i.startupDuration }
package persistent import ( "log" "strconv" "time" "github.com/EventStore/EventStore-Client-Go/position" "github.com/EventStore/EventStore-Client-Go/protos/persistent" "github.com/EventStore/EventStore-Client-Go/protos/shared" system_metadata "github.com/EventStore/EventStore-Client-Go/systemmetadata" "github.com/gofrs/uuid" ) func eventIDFromProto(recordedEvent *persistent.ReadResp_ReadEvent_RecordedEvent) uuid.UUID { id := recordedEvent.GetId() idString := id.GetString_() return uuid.FromStringOrNil(idString) } func toProtoUUID(id uuid.UUID) *shared.UUID { return &shared.UUID{ Value: &shared.UUID_String_{ String_: id.String(), }, } } func getContentTypeFromProto(recordedEvent *persistent.ReadResp_ReadEvent_RecordedEvent) string { return recordedEvent.Metadata[system_metadata.SystemMetadataKeysContentType] } func createdFromProto(recordedEvent *persistent.ReadResp_ReadEvent_RecordedEvent) time.Time { timeSinceEpoch, err := strconv.ParseInt( recordedEvent.Metadata[system_metadata.SystemMetadataKeysCreated], 10, 64) if err != nil { log.Fatalf("Failed to parse created date as int from %+v", recordedEvent.Metadata[system_metadata.SystemMetadataKeysCreated]) } // The metadata contains the number of .NET "ticks" (100ns increments) since the UNIX epoch return time.Unix(0, timeSinceEpoch*100).UTC() } func positionFromProto(recordedEvent *persistent.ReadResp_ReadEvent_RecordedEvent) position.Position { return position.Position{ Commit: recordedEvent.GetCommitPosition(), Prepare: recordedEvent.GetPreparePosition(), } }
package metas import ( "fmt" "github.com/daiguadaidai/haechi/utils" "strings" ) type Partition struct { Type string `json:"type" form:"type"` ColumnNames []string `json:"column_names" form:"column_names"` ListPartition *ListPartition `json:"list_partition" form:"list_partition"` RangePartition *RangePartition `json:"range_partition" form:"range_partition"` } type ListPartition struct { ListPartitionItems []*ListPartitionItem `json:"list_partition_items" form:"list_partition_items"` } func NewListPartition() *ListPartition { return &ListPartition{ ListPartitionItems: make([]*ListPartitionItem, 0, 5), } } func (this *ListPartition) AddPartitionItem(partitionItem *ListPartitionItem) *ListPartition { this.ListPartitionItems = append(this.ListPartitionItems, partitionItem) return this } func (this *ListPartition) GetMetaStr() (string, error) { parts := make([]string, 0, 3) for _, item := range this.ListPartitionItems { partStr, err := item.GetMetaStr() if err != nil { return "", err } partStr = fmt.Sprintf(" %s", partStr) parts = append(parts, partStr) } return strings.Join(parts, ",\n"), nil } type ListPartitionItem struct { Name string `json:"name" form:"name"` Values []interface{} `json:"values" form:"values"` } func NewListPartitionItem(name string, values []interface{}) *ListPartitionItem { return &ListPartitionItem{ Name: name, Values: values, } } func (this *ListPartitionItem) GetMetaStr() (string, error) { if strings.TrimSpace(this.Name) == "" { return "", fmt.Errorf("List 分区必须要有分区名. %v", this.Values) } if len(this.Values) < 1 { // 没有值 return "", fmt.Errorf("List 分区必须要有指定值. %s", this.Name) } value := utils.WarpInterfaceStrs(this.Values, "%#v", ", ") return fmt.Sprintf("PARTITION %s VALUES IN (%s)", this.Name, value), nil } type RangePartition struct { RangePartitionItems []*RangePartitionItem `json:"range_partition_items" form:"range_partition_items"` } func NewRangePartition() *RangePartition { return &RangePartition{ RangePartitionItems: make([]*RangePartitionItem, 0, 5), } } func (this *RangePartition) AddPartitionItem(partitionItem *RangePartitionItem) *RangePartition { this.RangePartitionItems = append(this.RangePartitionItems, partitionItem) return this } func (this *RangePartition) GetMetaStr() (string, error) { parts := make([]string, 0, 3) for _, item := range this.RangePartitionItems { partStr, err := item.GetMetaStr() if err != nil { return "", err } partStr = fmt.Sprintf(" %s", partStr) parts = append(parts, partStr) } return strings.Join(parts, ",\n"), nil } type RangePartitionItem struct { Name string `json:"name" form:"name"` Values []interface{} `json:"values" form:"values"` IsMaxValue bool `json:"is_max_value" json:"is_max_value"` } func NewRangePartitionItem(name string, values []interface{}, isMaxValue bool) *RangePartitionItem { return &RangePartitionItem{ Name: name, Values: values, IsMaxValue: isMaxValue, } } func (this *RangePartitionItem) GetMetaStr() (string, error) { if strings.TrimSpace(this.Name) == "" { return "", fmt.Errorf("Range 分区必须要有分区名. %v", this.Values) } if len(this.Values) < 1 { // 没有值 return "", fmt.Errorf("Range 分区必须要有指定范围值. %s", this.Name) } prePart := fmt.Sprintf("PARTITION %s VALUES LESS THAN", this.Name) // MAXVALUE 分区 if this.IsMaxValue { values := make([]string, 0, len(this.Values)) for _, _ = range this.Values { values = append(values, "MAXVALUE") } valuePart := utils.WarpStrs(values, "%s", ", ") finalStr := fmt.Sprintf("%s (%s)", prePart, valuePart) return finalStr, nil } // LIST 分区 valuePart := utils.WarpInterfaceStrs(this.Values, "%#v", ", ") finalStr := fmt.Sprintf("%s (%s)", prePart, valuePart) return finalStr, nil }
package configutils import ( "encoding/json" "io" "io/ioutil" "net/http" "os" flags "github.com/jessevdk/go-flags" yaml "gopkg.in/yaml.v2" ) func LoadFromYaml(configPath string, out interface{}) error { f, err := os.Open(configPath) if err != nil { return err } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { return err } return yaml.Unmarshal(data, out) } func LoadFromRemote(method string, url string, body io.Reader, headers map[string]string, out interface{}) error { req, err := http.NewRequest(method, url, body) if err != nil { return err } for k, v := range headers { req.Header.Set(k, v) } resp, err := (&http.Client{}).Do(req) if err != nil { return nil } defer resp.Body.Close() data, err := ioutil.ReadAll(resp.Body) if err != nil { return err } return json.Unmarshal(data, out) } // LoadFromCommandLine args can be os.Args, or rest args after parsed func LoadFromCommandLine(args []string, out interface{}) (restArgs []string, err error) { restArgs, err = flags.NewParser(out, flags.HelpFlag|flags.PrintErrors|flags.PassDoubleDash|flags.IgnoreUnknown).ParseArgs(args) if flagErr, ok := err.(*flags.Error); ok && flagErr.Type == flags.ErrHelp { return restArgs, nil } return restArgs, err }
package model import ( "app/internal/config" "app/internal/log" "errors" _ "github.com/go-sql-driver/mysql" "github.com/jmoiron/sqlx" ) var _db *sqlx.DB func Initialize() error { dsn := config.GetMySQLDSN() if dsn == "" { return errors.New("Empty MySQL DSN") } db, err := sqlx.Connect("mysql", dsn) if err != nil { return err } _db = db return nil } func Terminate() { if _db == nil { return } if err := _db.Close(); err != nil { log.Logger().Error("Terminate database error: ", err) return } }
package main import ( "fmt" ) func main() { /** Declaring boolean value */ var n bool = true // initially false fmt.Printf("%v, %T\n", n, n) /** Integers - Comparing values */ i := 1 == 1 j := 2 == 1 fmt.Printf("\n%v, %T\n", i, i) fmt.Printf("%v, %T\n", j, j) /** Declaring int values */ k := 33 // signed integer fmt.Printf("\n%v, %T\n", k, k) var l uint = 22 // unsigned integer fmt.Printf("%v, %T\n", l, l) /** Arithmetic Operations - Integer - I */ a := 10 // 1010 - converted to binary b := 3 // 0011 - converted to binary fmt.Println("") fmt.Println(a + b) fmt.Println(a - b) fmt.Println(a * b) fmt.Println(a / b) fmt.Println(a % b) fmt.Println("") /** Arithmetic Operations - Integer - II */ var c int = 10 var d int8 = 3 //fmt.Println(c + d) // error (mismatched types int and int8) fmt.Println(c + int(d)) // convert type fmt.Println("") /** Arithmetic Operations - Integer - III (bitwise operators) */ fmt.Println(a & b) // AND - 0011 = 2 fmt.Println(a | b) // OR - 1011 = 11 fmt.Println(a ^ b) // XOR - 1001 = 9 fmt.Println(a &^ b) // NOT - 0100 = 8 fmt.Println("") /** Bit shifting */ e := 8 // 2^3 fmt.Println(e << 4) // 2^3 * 2^4 = 2^6 fmt.Println(e >> 2) // 2^3 / 2^2 = 2^0 fmt.Println("") /** Floating point numbers - Declaration */ var f float64 = 3.14 f = 13.7e72 f = 2.1E10 fmt.Printf("%v, %T\n", f, f) fmt.Println("") /** Arithmetic Operations - Float */ g := 10.4 h := 3.6 fmt.Println(g + h) fmt.Println(g - h) fmt.Println(g * h) fmt.Println(g / h) fmt.Printf("%v, %T\n", g+h, g+h) // float64 + float64 = float64 fmt.Println("") /** Complex numbers - Declaration */ var x complex128 = 1 + 2i fmt.Printf("%v, %T\n", real(x), real(x)) fmt.Printf("%v, %T\n", imag(x), imag(x)) fmt.Println("") /** Complex function */ var y complex128 = complex(5, 10) fmt.Printf("%v, %T\n", y, y) fmt.Println("") /** String - UTF8 - Declaration */ var s string = "Hello There" fmt.Printf("%v, %T\n", s, s) fmt.Println("") /** String concatenation */ s2 := "Hello World" fmt.Printf("%v, %T\n", s+s2, s+s2) fmt.Println("") /** Rune - Declaration */ var r rune = 'a' fmt.Printf("%v, %T\n", r, r) }
package flutter import ( "encoding/json" "fmt" "log" "runtime" "time" "unsafe" "github.com/go-flutter-desktop/go-flutter/embedder" "github.com/go-gl/glfw/v3.2/glfw" "github.com/pkg/errors" ) // dpPerInch defines the amount of display pixels per inch as defined for Flutter. const dpPerInch = 160.0 // Run executes a flutter application with the provided options. // given limitations this method must be called by the main function directly. func Run(options ...Option) (err error) { var ( window *glfw.Window c config ) // The Windows Title Handler and the TextInput handler come by default options = append(options, addHandlerWindowTitle()) options = append(options, addHandlerTextInput()) options = append(options, addHandlerClipboard()) c = c.merge(options...) err = glfw.Init() if err != nil { return errors.Wrap(err, "glfw init") } defer glfw.Terminate() window, err = glfw.CreateWindow(c.WindowDimension.x, c.WindowDimension.y, "Loading..", nil, nil) if err != nil { return errors.Wrap(err, "creating glfw window") } defer window.Destroy() if c.WindowIconProvider != nil { images, err := c.WindowIconProvider() if err != nil { return errors.Wrap(err, "getting images from icon provider") } window.SetIcon(images) } if c.WindowInitializerDeprecated != nil { err = c.WindowInitializerDeprecated(window) if err != nil { return errors.Wrap(err, "executing window initializer") } } flu := runFlutter(window, c) defer flu.Shutdown() for !window.ShouldClose() { // glfw.WaitEvents() glfw.PollEvents() embedder.FlutterEngineFlushPendingTasksNow() } return nil } // GLFW callbacks to the Flutter Engine func glfwCursorPositionCallbackAtPhase( window *glfw.Window, phase embedder.PointerPhase, x float64, y float64, ) { winWidth, _ := window.GetSize() frameBuffWidth, _ := window.GetFramebufferSize() contentScale := float64(frameBuffWidth / winWidth) event := embedder.PointerEvent{ Phase: phase, X: x * contentScale, Y: y * contentScale, Timestamp: time.Now().UnixNano() / int64(time.Millisecond), } index := *(*int)(window.GetUserPointer()) flutterEngine := embedder.FlutterEngineByIndex(index) flutterEngine.SendPointerEvent(event) } func glfwMouseButtonCallback(window *glfw.Window, key glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) { if key == glfw.MouseButton1 { x, y := window.GetCursorPos() // recalculate x and y from screen cordinates to pixels widthPx, _ := window.GetFramebufferSize() width, _ := window.GetSize() pixelsPerScreenCoordinate := float64(widthPx) / float64(width) x = x * pixelsPerScreenCoordinate y = y * pixelsPerScreenCoordinate if action == glfw.Press { glfwCursorPositionCallbackAtPhase(window, embedder.KDown, x, y) window.SetCursorPosCallback(func(window *glfw.Window, x float64, y float64) { x = x * pixelsPerScreenCoordinate y = y * pixelsPerScreenCoordinate glfwCursorPositionCallbackAtPhase(window, embedder.KMove, x, y) }) } if action == glfw.Release { glfwCursorPositionCallbackAtPhase(window, embedder.KUp, x, y) window.SetCursorPosCallback(nil) } } } var state = textModel{} func glfwKey(keyboardLayout KeyboardShortcuts) func(w *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) { var modifierKey glfw.ModifierKey var wordTravellerKey int var wordTravellerKeyShift int switch runtime.GOOS { case "darwin": modifierKey = glfw.ModSuper wordTravellerKey = ModAlt wordTravellerKeyShift = ModShiftAlt default: modifierKey = glfw.ModControl wordTravellerKey = ModControl wordTravellerKeyShift = ModShiftControl } return func(w *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) { var modsIsModfifier = false var modsIsShift = false var modsIsWordModifierShift = false var modsIsWordModifier = false switch { case int(mods) == wordTravellerKeyShift: modsIsWordModifierShift = true case int(mods) == wordTravellerKey: modsIsWordModifier = true case mods == modifierKey: modsIsModfifier = true case int(mods) == ModShift: modsIsShift = true } if key == glfw.KeyEscape && action == glfw.Press { w.SetShouldClose(true) } if action == glfw.Repeat || action == glfw.Press { if state.clientID != 0 { switch key { case glfw.KeyEnter: if mods == modifierKey { performAction(w, "done") } else { state.addChar([]rune{'\n'}) performAction(w, "newline") } case glfw.KeyHome: state.MoveCursorHome(modsIsModfifier, modsIsShift, modsIsWordModifierShift, modsIsWordModifier) case glfw.KeyEnd: state.MoveCursorEnd(modsIsModfifier, modsIsShift, modsIsWordModifierShift, modsIsWordModifier) case glfw.KeyLeft: state.MoveCursorLeft(modsIsModfifier, modsIsShift, modsIsWordModifierShift, modsIsWordModifier) case glfw.KeyRight: state.MoveCursorRight(modsIsModfifier, modsIsShift, modsIsWordModifierShift, modsIsWordModifier) case glfw.KeyDelete: state.Delete(modsIsModfifier, modsIsShift, modsIsWordModifierShift, modsIsWordModifier) case glfw.KeyBackspace: state.Backspace(modsIsModfifier, modsIsShift, modsIsWordModifierShift, modsIsWordModifier) case keyboardLayout.SelectAll: if mods == modifierKey { state.SelectAll() } case keyboardLayout.Copy: if mods == modifierKey && state.isSelected() { _, _, selectedContent := state.GetSelectedText() w.SetClipboardString(selectedContent) } case keyboardLayout.Cut: if mods == modifierKey && state.isSelected() { _, _, selectedContent := state.GetSelectedText() w.SetClipboardString(selectedContent) state.RemoveSelectedText() } case keyboardLayout.Paste: if mods == modifierKey { var clpString, err = w.GetClipboardString() if err != nil { log.Printf("unable to get the clipboard content: %v\n", err) } else { state.addChar([]rune(clpString)) } } } } } } } // newGLFWFramebufferSizeCallback creates a func that is called on framebuffer resizes. // When pixelRatio is set, the pixelRatio communicated to the Flutter embedder is not calculated. func newGLFWFramebufferSizeCallback(pixelRatio float64, monitorScreenCoordinatesPerInch float64) func(*glfw.Window, int, int) { return func(window *glfw.Window, widthPx int, heightPx int) { index := *(*int)(window.GetUserPointer()) flutterEngine := embedder.FlutterEngineByIndex(index) // calculate pixelRatio when it has not been forced. if pixelRatio == 0 { width, _ := window.GetSize() pixelsPerScreenCoordinate := float64(widthPx) / float64(width) dpi := pixelsPerScreenCoordinate * monitorScreenCoordinatesPerInch pixelRatio = dpi / dpPerInch // Limit the ratio to 1 to avoid rendering a smaller UI in standard resolution monitors. if pixelRatio < 1.0 { fmt.Println("calculated pixelRatio limited to a minimum of 1.0") pixelRatio = 1.0 } } event := embedder.WindowMetricsEvent{ Width: widthPx, Height: heightPx, PixelRatio: pixelRatio, } flutterEngine.SendWindowMetricsEvent(event) } } func glfwCharCallback(w *glfw.Window, char rune) { if state.clientID != 0 { state.addChar([]rune{char}) } } // Flutter Engine func runFlutter(window *glfw.Window, c config) *embedder.FlutterEngine { flutterEngine := embedder.NewFlutterEngine() // Engine arguments flutterEngine.AssetsPath = c.AssetsPath flutterEngine.IcuDataPath = c.ICUDataPath // Render callbacks flutterEngine.FMakeCurrent = func(v unsafe.Pointer) bool { w := glfw.GoWindow(v) w.MakeContextCurrent() return true } flutterEngine.FClearCurrent = func(v unsafe.Pointer) bool { glfw.DetachCurrentContext() return true } flutterEngine.FPresent = func(v unsafe.Pointer) bool { w := glfw.GoWindow(v) w.SwapBuffers() return true } flutterEngine.FFboCallback = func(v unsafe.Pointer) int32 { return 0 } flutterEngine.FMakeResourceCurrent = func(v unsafe.Pointer) bool { return false } // PlatformMessage flutterEngine.FPlatfromMessage = func(platMessage *embedder.PlatformMessage, window unsafe.Pointer) bool { windows := glfw.GoWindow(window) hasDispatched := false // Dispatch the message from the Flutter Engine, to all of the PluginReceivers // having the same embedder.PlatformMessage.Channel name for _, receivers := range c.PlatformMessageReceivers[platMessage.Channel] { hasDispatched = receivers(platMessage, flutterEngine, windows) || hasDispatched } return hasDispatched } state.notifyState = func() { // log.Printf("Text: Sending to the flutter engine %v", state) updateEditingState(window) } flutterEngineIndex := flutterEngine.Index() window.SetUserPointer(unsafe.Pointer(&flutterEngineIndex)) result := flutterEngine.Run(window.GLFWWindow(), c.VMArguments) if result != embedder.KSuccess { window.Destroy() panic("Couldn't launch the FlutterEngine") } glfwFramebufferSizeCallback := newGLFWFramebufferSizeCallback(c.ForcePixelRatio, getScreenCoordinatesPerInch()) width, height := window.GetFramebufferSize() glfwFramebufferSizeCallback(window, width, height) var glfwKeyCallback func(w *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) if c.KeyboardLayout != nil { glfwKeyCallback = glfwKey(*c.KeyboardLayout) } else { glfwKeyCallback = glfwKey(KeyboardQwertyLayout) } window.SetKeyCallback(glfwKeyCallback) window.SetFramebufferSizeCallback(glfwFramebufferSizeCallback) window.SetMouseButtonCallback(glfwMouseButtonCallback) window.SetCharCallback(glfwCharCallback) return flutterEngine } // getScreenCoordinatesPerInch returns the number of screen coordinates per // inch for the main monitor. If the information is unavailable it returns // a default value that assumes that a screen coordinate is one dp. func getScreenCoordinatesPerInch() float64 { // TODO: multi-monitor support (#74) primaryMonitor := glfw.GetPrimaryMonitor() if primaryMonitor == nil { return dpPerInch } primaryMonitorMode := primaryMonitor.GetVideoMode() if primaryMonitorMode == nil { return dpPerInch } primaryMonitorWidthMM, _ := primaryMonitor.GetPhysicalSize() if primaryMonitorWidthMM == 0 { return dpPerInch } return float64(primaryMonitorMode.Width) / (float64(primaryMonitorWidthMM) / 25.4) } // Update the TextInput with the current state func updateEditingState(window *glfw.Window) { editingState := argsEditingState{ Text: string(state.word), SelectionAffinity: "TextAffinity.downstream", SelectionBase: state.selectionBase, SelectionExtent: state.selectionExtent, SelectionIsDirectional: false, } editingStateMarchalled, _ := json.Marshal([]interface{}{ state.clientID, editingState, }) message := embedder.Message{ Args: editingStateMarchalled, Method: textUpdateStateMethod, } var mess = &embedder.PlatformMessage{ Channel: textInputChannel, Message: message, } index := *(*int)(window.GetUserPointer()) flutterEngine := embedder.FlutterEngineByIndex(index) flutterEngine.SendPlatformMessage(mess) } func performAction(window *glfw.Window, action string) { actionArgs, _ := json.Marshal([]interface{}{ state.clientID, "TextInputAction." + action, }) message := embedder.Message{ Args: actionArgs, Method: "TextInputClient.performAction", } var mess = &embedder.PlatformMessage{ Channel: textInputChannel, Message: message, } index := *(*int)(window.GetUserPointer()) flutterEngine := embedder.FlutterEngineByIndex(index) flutterEngine.SendPlatformMessage(mess) }
package renderer import ( "errors" "image" "github.com/driusan/de/demodel" "golang.org/x/image/math/fixed" ) var ErrNoCharacter = errors.New("No character under the mouse cursor.") type ImageLoc struct { Loc fixed.Rectangle26_6 Idx uint } type ImageMap struct { IMap []ImageLoc Buf *demodel.CharBuffer } func inRectangle(x, y int, r fixed.Rectangle26_6) bool { return x >= r.Min.X.Ceil() && x <= r.Max.X.Floor() && y >= r.Min.Y.Ceil() && y <= r.Max.Y.Floor() } func (im ImageMap) At(x, y int) (uint, error) { for _, m := range im.IMap { if inRectangle(x, y, m.Loc) { return m.Idx, nil } } return 0, ErrNoCharacter } // Returns the bounding rectangle for the character at index idx // in the character buffer. func (im ImageMap) Get(idx uint) (image.Rectangle, error) { for _, chr := range im.IMap { if chr.Idx == idx { return image.Rectangle{ Min: image.Point{chr.Loc.Min.X.Ceil(), chr.Loc.Min.Y.Ceil()}, Max: image.Point{chr.Loc.Max.X.Ceil(), chr.Loc.Max.Y.Ceil()}, }, nil } } return image.ZR, ErrNoCharacter }
package main import ( "fmt" "math/rand" "net" "time" "golang.org/x/net/icmp" "golang.org/x/net/ipv4" ) const maxHops = 64 // Hop stores information about a single traceroute hop. type Hop struct { Number int Addr net.Addr Rtt time.Duration Type icmp.Type Success bool } // TraceRoute returns a channel of hop information values. func TraceRoute(host string) (<-chan Hop, <-chan error) { errc := make(chan error, 1) dest, err := net.ResolveIPAddr("ip4", host) if err != nil { errc <- fmt.Errorf("%s is invalid", host) defer close(errc) return nil, errc } ttl := 1 timeout := time.Second out := make(chan Hop) go func() { defer close(out) defer close(errc) for { hop, err := sendEcho(dest, ttl, ttl, timeout) if err != nil { errc <- err break } out <- hop ttl++ if hop.Success { if hop.Type == ipv4.ICMPTypeEchoReply { break } timeout = hop.Rtt*3 + time.Millisecond*50 } if ttl > maxHops { return } } }() return out, errc } func sendEcho(dest net.Addr, seq, ttl int, timeout time.Duration) (hop Hop, err error) { conn, err := icmp.ListenPacket("ip4:icmp", "0.0.0.0") if err != nil { return Hop{}, err } defer conn.Close() echo, err := createICMPEcho(seq) if err != nil { return Hop{}, err } conn.IPv4PacketConn().SetTTL(ttl) start := time.Now() _, err = conn.WriteTo(echo, dest) if err != nil { return Hop{}, err } reply := make([]byte, 1500) err = conn.SetReadDeadline(time.Now().Add(timeout)) if err != nil { return Hop{}, err } _, peer, err := conn.ReadFrom(reply) if err != nil { return Hop{Number: ttl, Success: false}, nil } rtt := time.Since(start) message, err := icmp.ParseMessage(1, reply) if err != nil { return Hop{Number: ttl, Success: false}, nil } return Hop{Number: ttl, Addr: peer, Rtt: rtt, Type: message.Type, Success: true}, nil } func createICMPEcho(seq int) (request []byte, err error) { message := icmp.Message{ Type: ipv4.ICMPTypeEcho, Code: 0, Body: &icmp.Echo{ ID: rand.Int(), Seq: seq, Data: []byte(""), }} return message.Marshal(nil) }
package handlers import ( "encoding/json" "log" "net/http" "path" ) const ( ApiError = "api_error" InvalidRequestError = "invalid_request_error" ) // ErrorMessage type holds API error related information. // It is typically serialized to JSON then returned to the client. type ErrorMessage struct { ErrorType string `json:"type"` // Represents the error classification Message string `json:"message"` // Summary of the error } // BadRequest renders a 400 bad request response. func BadRequest(w http.ResponseWriter, errorType, message string) { errMsg := ErrorMessage{ErrorType: errorType, Message: message} renderError(w, http.StatusBadRequest, &errMsg) } // NotFound renders a 404 not found response. func NotFound(w http.ResponseWriter) { renderError(w, http.StatusNotFound, nil) } func renderError(w http.ResponseWriter, status int, errMsg *ErrorMessage) { w.WriteHeader(status) if errMsg != nil { w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(errMsg); err != nil { log.Print(err) } } } func decodeError(message string) ErrorMessage { errMsg := ErrorMessage{} json.Unmarshal([]byte(message), &errMsg) return errMsg } func repoPath(datadir, repoName string) string { return path.Join(datadir, "repos/", repoName) }
// Copyright 2020 The Moov Authors // Use of this source code is governed by an Apache License // license that can be found in the LICENSE file. package wire import ( "bytes" "encoding/json" "fmt" ) // File contains the structures of a parsed WIRE File. type File struct { ID string `json:"id"` FEDWireMessage FEDWireMessage `json:"fedWireMessage"` isIncoming bool `json:"-"` } // NewFile constructs a file template func NewFile(opts ...FilePropertyFunc) *File { f := &File{} for _, opt := range opts { opt(f) } return f } // SetValidation stores ValidateOpts on the FEDWireMessage's validation rules func (f *File) SetValidation(opts *ValidateOpts) { if f == nil || opts == nil { return } f.FEDWireMessage.ValidateOptions = opts } // GetValidation returns validation rules of FEDWireMessage func (f *File) GetValidation() *ValidateOpts { if f == nil || f.FEDWireMessage.ValidateOptions == nil { return nil } return f.FEDWireMessage.ValidateOptions } // AddFEDWireMessage appends a FEDWireMessage to the File func (f *File) AddFEDWireMessage(fwm FEDWireMessage) FEDWireMessage { f.FEDWireMessage = fwm return f.FEDWireMessage } // Create will tabulate and assemble an WIRE file into a valid state. // // Create implementations are free to modify computable fields in a file and should // call the Validate() function at the end of their execution. func (f *File) Create() error { return nil } // Validate will never modify the file. func (f *File) Validate() error { if err := f.FEDWireMessage.verify(f.isIncoming); err != nil { return err } return nil } // FileFromJSON attempts to return a *File object assuming the input is valid JSON. // // Callers should always check for a nil-error before using the returned file. // // The File returned may not be valid and callers should confirm with Validate(). Invalid files may // be rejected by other Financial Institutions or ACH tools. func FileFromJSON(bs []byte) (*File, error) { if len(bs) == 0 { //return nil, errors.New("no JSON data provided") return nil, nil } file := NewFile() if err := json.NewDecoder(bytes.NewReader(bs)).Decode(file); err != nil { return nil, fmt.Errorf("problem reading File: %v", err) } return file, nil } type FilePropertyFunc func(*File) // OutgoingFile specify that the file is for outgoing func OutgoingFile() FilePropertyFunc { return func(f *File) { f.isIncoming = false } } // IncomingFile specify that the file is for incoming func IncomingFile() FilePropertyFunc { return func(f *File) { f.isIncoming = true } }
package main import ( "flag" "fmt" "os" ) func main() { for i, v := range os.Args { fmt.Printf("%d : %s\n", i, v) } s := flag.String("s", "hello s", "input string!!!") i := flag.Int("i", 0, "int value") //clone :=flag.NewFlagSet("clone",flag.ExitOnError) flag.Parse() fmt.Printf("s : %s\n", *s) fmt.Printf("i : %d\n", *i) }
package experiment // import ( // "fmt" // "testing" // "gopkg.in/ddspog/mspec.v1/bdd" // ) // // Feature Create Data with functional Getters // // - As a developer, // // - I want to be able to create a Data object and get values with its // // getters, // // - So that I could use these getters to manipulate and read data. // func Test_Create_Data_with_functional_Getters(t *testing.T) { // given, like, s := bdd.Sentences() // given(t, "a Data d with Columns = '%[1]s' and Rows = %[2]v", func(when bdd.When, args ...interface{}) { // tcolumns := args[0].([]string) // tvalues := args[1].([][]interface{}) // var d Dater = &Data{ // columns: tcolumns, // values: tvalues, // } // when("d.Columns() is called", func(it bdd.It) { // it(fmt.Sprintf("should return %d", len(tcolumns)), func(assert bdd.Assert) { // assert.Equal(len(tcolumns), d.Columns()) // }) // }) // for i := 0; i < len(tcolumns); i++ { // when(fmt.Sprintf("d.Column(%d) is called", i), func(it bdd.It) { // it(fmt.Sprintf("should return %s", tcolumns[i]), func(assert bdd.Assert) { // assert.Equal(tcolumns[i], d.Column(i)) // }) // }) // } // when("d.Rows() is called", func(it bdd.It) { // it(fmt.Sprintf("should return %d", len(tvalues)), func(assert bdd.Assert) { // assert.Equal(len(tvalues), d.Rows()) // }) // }) // for i := 0; i < len(tvalues); i++ { // for j := 0; j < len(tcolumns); j++ { // when(fmt.Sprintf("d.Value(%d, %d) is called", i, j), func(it bdd.It) { // it(fmt.Sprintf("should return %d", tvalues[i][j]), func(assert bdd.Assert) { // assert.Equal(tvalues[i][j], d.Value(i, j)) // }) // }) // } // } // }, like( // s([]string{"a", "b"}, [][]interface{}{{1, 2}, {2, 3}}), // s([]string{"a", "b", "c"}, [][]interface{}{{1, 2, 3}, {2, 3, 5}}), // s([]string{"a"}, [][]interface{}{{1}, {2}}), // )) // }
package utils import ( "github.com/stretchr/testify/assert" "testing" ) func TestVector2_Add(t *testing.T) { v1 := Vector2{X: 1, Y: 2} v2 := Vector2{X: 3, Y: 4} v3 := v1.Add(v2) assert.Equal(t, Vector2{X: 4, Y: 6}, v3) }
package representation import ( "fmt" "math/big" "testing" ) func TestMulImpl(t *testing.T) { var a U256 var b U256 uint64max := ^uint64(0) a[0] = uint64max a[1] = uint64max a[2] = uint64max a[3] = uint64max b[0] = uint64max b[1] = uint64max b[2] = uint64max b[3] = uint64max fmt.Println(a.String()) mulRes := a.MulImpl(&b) if mulRes.String() != "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0000000000000000000000000000000000000000000000000000000000000001" { t.Fail() } } func TestSquareImpl(t *testing.T) { var a U256 var b U256 uint64max := ^uint64(0) a[0] = uint64max a[1] = uint64max a[2] = uint64max a[3] = uint64max b[0] = uint64max b[1] = uint64max b[2] = uint64max b[3] = uint64max fmt.Println(a.String()) mulRes := a.MulImpl(&b) squareRes := a.SquareImpl() if mulRes != squareRes { t.Fail() } if mulRes.String() != "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0000000000000000000000000000000000000000000000000000000000000001" { t.Fail() } } func TestMulImplByLimb(t *testing.T) { var a U256 var b U256 uint64max := ^uint64(0) a[0] = uint64max a[1] = uint64max a[2] = uint64max a[3] = uint64max b[0] = uint64max fmt.Println(a.String()) fmt.Println(b.String()) mulRes := a.MulImpl(&b) fmt.Println(mulRes.String()) t.Log(mulRes) if mulRes.String() != "0x000000000000000000000000000000000000000000000000fffffffffffffffeffffffffffffffffffffffffffffffffffffffffffffffff0000000000000001" { t.Fail() } } func TestMulImplByTwoLimbs(t *testing.T) { var a U256 var b U256 uint64max := ^uint64(0) a[0] = uint64max a[1] = uint64max a[2] = uint64max a[3] = uint64max b[0] = uint64max b[1] = uint64max fmt.Println(a.String()) fmt.Println(b.String()) mulRes := a.MulImpl(&b) fmt.Println(mulRes.String()) t.Log(mulRes) if mulRes.String() != "0x00000000000000000000000000000000fffffffffffffffffffffffffffffffeffffffffffffffffffffffffffffffff00000000000000000000000000000001" { t.Fail() } } func TestMAC(t *testing.T) { uint64max := ^uint64(0) a := uint64max b := uint64max c := uint64(0) carry := uint64(0) res, carry := mac_with_carry(a, b, c, carry) if res != uint64max || carry != 0 { t.Fail() } } func TestMAC2(t *testing.T) { uint64max := ^uint64(0) a := uint64(0) b := uint64max c := uint64max carry := uint64(0) res, carry := mac_with_carry(a, b, c, carry) if res != 1 || carry != uint64max-1 { t.Fail() } } func TestMAC3(t *testing.T) { uint64max := ^uint64(0) a := uint64max b := uint64max c := uint64max carry := uint64(0) res, carry := mac_with_carry(a, b, c, carry) if res != 0 || carry != uint64max { t.Fail() } } func TestBN254BaseField(t *testing.T) { modulus := big.NewInt(0) modulus.SetString("21888242871839275222246405745257275088696311157297823662689037894645226208583", 10) montBits := uint(256) montR := big.NewInt(1) montR = big.NewInt(0).Lsh(montR, montBits) montR = big.NewInt(0).Mod(montR, modulus) montR2 := big.NewInt(0).Mul(montR, montR) montR2 = big.NewInt(0).Mod(montR2, modulus) var u256Modulus U256 var u256R U256 var u256R2 U256 for i := 0; i < M; i++ { u256Modulus[i] = modulus.Uint64() u256R[i] = montR.Uint64() u256R2[i] = montR2.Uint64() modulus = modulus.Rsh(modulus, 64) montR = montR.Rsh(montR, 64) montR2 = montR2.Rsh(montR2, 64) } inv := uint64(1) for i := 0; i < 63; i++ { inv = inv * inv inv = inv * u256Modulus[0] } inv = (^inv) + 1 var two U256 two[0] = 2 var three U256 three[0] = 3 params := FieldParams{ modulus: &u256Modulus, montR: &u256R, montR2: &u256R2, montInv: inv, } fe_two := two.IntoFp(&params) fe_three := three.IntoFp(&params) fe_two.MulAssign(&fe_three) result := fe_two.IntoRepr() res := result.(*U256) if res[0] != 6 { t.Fail() } } func TestBN254BaseFieldPow(t *testing.T) { modulus := big.NewInt(0) modulus.SetString("21888242871839275222246405745257275088696311157297823662689037894645226208583", 10) montBits := uint(256) montR := big.NewInt(1) montR = big.NewInt(0).Lsh(montR, montBits) montR = big.NewInt(0).Mod(montR, modulus) montR2 := big.NewInt(0).Mul(montR, montR) montR2 = big.NewInt(0).Mod(montR2, modulus) var u256Modulus U256 var u256R U256 var u256R2 U256 for i := 0; i < M; i++ { u256Modulus[i] = modulus.Uint64() u256R[i] = montR.Uint64() u256R2[i] = montR2.Uint64() modulus = modulus.Rsh(modulus, 64) montR = montR.Rsh(montR, 64) montR2 = montR2.Rsh(montR2, 64) } inv := uint64(1) for i := 0; i < 63; i++ { inv = inv * inv inv = inv * u256Modulus[0] } inv = (^inv) + 1 var three U256 three[0] = 3 params := FieldParams{ modulus: &u256Modulus, montR: &u256R, montR2: &u256R2, montInv: inv, } fe_three := three.IntoFp(&params) var exponentRepr U256 exponent := big.NewInt(0) exponent.SetString("21888242871839275222246405745257275088696311157297823662689037894645226208582", 10) for i := 0; i < M; i++ { exponentRepr[i] = exponent.Uint64() exponent = exponent.Rsh(exponent, 64) } p := fe_three.Pow(exponentRepr[:]) result := p.IntoRepr() res := result.(*U256) if res[0] != 1 { t.Fail() } } func BenchmarkBN254BaseFieldPow(b *testing.B) { modulus := big.NewInt(0) modulus.SetString("21888242871839275222246405745257275088696311157297823662689037894645226208583", 10) montBits := uint(256) montR := big.NewInt(1) montR = big.NewInt(0).Lsh(montR, montBits) montR = big.NewInt(0).Mod(montR, modulus) montR2 := big.NewInt(0).Mul(montR, montR) montR2 = big.NewInt(0).Mod(montR2, modulus) var u256Modulus U256 var u256R U256 var u256R2 U256 for i := 0; i < M; i++ { u256Modulus[i] = modulus.Uint64() u256R[i] = montR.Uint64() u256R2[i] = montR2.Uint64() modulus = modulus.Rsh(modulus, 64) montR = montR.Rsh(montR, 64) montR2 = montR2.Rsh(montR2, 64) } inv := uint64(1) for i := 0; i < 63; i++ { inv = inv * inv inv = inv * u256Modulus[0] } inv = (^inv) + 1 var three U256 three[0] = 3 params := FieldParams{ modulus: &u256Modulus, montR: &u256R, montR2: &u256R2, montInv: inv, } fe_three := three.IntoFp(&params) var exponentRepr U256 exponent := big.NewInt(0) exponent.SetString("21888242871839275222246405745257275088696311157297823662689037894645226208582", 10) for i := 0; i < M; i++ { exponentRepr[i] = exponent.Uint64() exponent = exponent.Rsh(exponent, 64) } for i := 0; i < b.N; i++ { _ = fe_three.Pow(exponentRepr[:]) } }
package requirements /** isEmpty receives a string s and returns true if it is empty As there are no nulls in go, only the length of the string was checked */ func isEmpty(s string) bool { return len(s) == 0 }
package main import ( "net/http" "github.com/gin-gonic/gin" ) // FormHandler Form处理函数 func FormHandler(c *gin.Context) { name := c.DefaultPostForm("name", "张三") city := c.DefaultPostForm("city", "雄安") c.JSON(http.StatusOK, gin.H{ "name": name, "city": city, }) } // PathHandler Path处理函数 func PathHandler(c *gin.Context) { action := c.Param("action") c.JSON(http.StatusOK, gin.H{ "action": action, }) } func main() { engine := gin.Default() engine.POST("/form", FormHandler) engine.GET("/book/:action", PathHandler) engine.Run(":8001") }
package main import ("fmt" "golang.org/x/oauth2") func main(){ conf := &oauth2.Config{ ClientID :"", ClientSecret: "-hJmkRnmsPLZ", Scopes: []string{"wl.offline_access", "onedrive.readwrite"}, Endpoint: oauth2.Endpoint{ AuthURL: "https://login.live.com/oauth20_authorize.srf", TokenURL: "https://login.live.com/oauth20_token.srf", }, } // Redirect user to consent page to ask for permission // for the scopes specified above. url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) fmt.Printf("Visit the URL for the auth dialog: %v", url) // Use the authorization code that is pushed to the redirect URL. // NewTransportWithCode will do the handshake to retrieve // an access token and initiate a Transport that is // authorized and authenticated by the retrieved token. var code string if _, err := fmt.Scan(&code); err != nil { log.Fatal(err) } tok, err := conf.Exchange(oauth2.NoContext, code) if err != nil { log.Fatal(err) } client := conf.Client(oauth2.NoContext, tok) }
package keeper import ( "fmt" "github.com/tendermint/tendermint/libs/log" "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" "github.com/irismod/service/types" ) // Keeper defines the service keeper type Keeper struct { storeKey sdk.StoreKey cdc codec.Marshaler accountKeeper types.AccountKeeper // The bankKeeper to reduce the supply of the network bankKeeper types.BankKeeper tokenKeeper types.TokenKeeper paramSpace paramstypes.Subspace // name of the FeeCollector ModuleAccount feeCollectorName string // used to map the module name to response callback respCallbacks map[string]types.ResponseCallback // used to map the module name to state callback stateCallbacks map[string]types.StateCallback // used to map the module name to module service moduleServices map[string]*types.ModuleService } // NewKeeper creates a new service Keeper instance func NewKeeper( cdc codec.Marshaler, key sdk.StoreKey, accountKeeper types.AccountKeeper, bankKeeper types.BankKeeper, tokenKeeper types.TokenKeeper, paramSpace paramstypes.Subspace, feeCollectorName string, ) Keeper { // ensure service module accounts are set if addr := accountKeeper.GetModuleAddress(types.DepositAccName); addr == nil { panic(fmt.Sprintf("%s module account has not been set", types.DepositAccName)) } if addr := accountKeeper.GetModuleAddress(types.RequestAccName); addr == nil { panic(fmt.Sprintf("%s module account has not been set", types.RequestAccName)) } // set KeyTable if it has not already been set if !paramSpace.HasKeyTable() { paramSpace = paramSpace.WithKeyTable(ParamKeyTable()) } keeper := Keeper{ storeKey: key, cdc: cdc, accountKeeper: accountKeeper, bankKeeper: bankKeeper, tokenKeeper: tokenKeeper, feeCollectorName: feeCollectorName, paramSpace: paramSpace, } keeper.respCallbacks = make(map[string]types.ResponseCallback) keeper.stateCallbacks = make(map[string]types.StateCallback) keeper.moduleServices = make(map[string]*types.ModuleService) return keeper } // Logger returns a module-specific logger. func (k Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", fmt.Sprintf("irismod/%s", types.ModuleName)) } // GetServiceDepositAccount returns the service depost ModuleAccount func (k Keeper) GetServiceDepositAccount(ctx sdk.Context) authtypes.ModuleAccountI { return k.accountKeeper.GetModuleAccount(ctx, types.DepositAccName) } // GetServiceRequestAccount returns the service request ModuleAccount func (k Keeper) GetServiceRequestAccount(ctx sdk.Context) authtypes.ModuleAccountI { return k.accountKeeper.GetModuleAccount(ctx, types.RequestAccName) }
package dashrates import ( "encoding/json" "io/ioutil" "net/http" "strconv" "time" ) // LiquidAPI implements the RateAPI interface and contains info necessary for // calling to the public Liquid price ticker API. type LiquidAPI struct { BaseAPIURL string PriceTickerEndpoint string } // NewLiquidAPI is a constructor for LiquidAPI. func NewLiquidAPI() *LiquidAPI { return &LiquidAPI{ BaseAPIURL: "https://api.liquid.com", PriceTickerEndpoint: "/products/116", } } // DisplayName returns the exchange display name. It is part of the RateAPI // interface implementation. func (a *LiquidAPI) DisplayName() string { return "Liquid" } // FetchRate gets the Dash exchange rate from the Liquid API. // // This is part of the RateAPI interface implementation. func (a *LiquidAPI) FetchRate() (*RateInfo, error) { resp, err := http.Get(a.BaseAPIURL + a.PriceTickerEndpoint) if err != nil { return nil, err } defer resp.Body.Close() now := time.Now() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } // parse json and extract Dash rate var res liquidPubTickerResp err = json.Unmarshal(body, &res) if err != nil { return nil, err } data, err := res.Normalize() if err != nil { return nil, err } ri := RateInfo{ BaseCurrency: "DASH", QuoteCurrency: "BTC", LastPrice: data.LastPrice, BaseAssetVolume: data.Volume24h, FetchTime: now, } return &ri, nil } // liquidPubTickerData is used in parsing the Liquid API response only. type liquidPubTickerData struct { ID int64 LastPrice float64 Volume24h float64 } // liquidPubTickerResp is used in parsing the Liquid API response only. type liquidPubTickerResp struct { ID string `json:"id"` LastPrice string `json:"last_traded_price"` Volume24h string `json:"volume_24h"` } // Normalize parses the fields in liquidPubTickerResp and returns a // liquidPubTickerData with proper data types. func (resp *liquidPubTickerResp) Normalize() (*liquidPubTickerData, error) { id, err := strconv.ParseInt(resp.ID, 10, 64) if err != nil { return nil, err } lastPrice, err := strconv.ParseFloat(resp.LastPrice, 64) if err != nil { return nil, err } volume24h, err := strconv.ParseFloat(resp.Volume24h, 64) if err != nil { return nil, err } return &liquidPubTickerData{ ID: id, LastPrice: lastPrice, Volume24h: volume24h, }, nil }
package app import ( "io/ioutil" "strings" "github.com/pkg/errors" ) type fileReader struct { file string } func newFileReader(f string) *fileReader { return &fileReader{file: f} } func (r *fileReader) ReadExecutables() ([]string, error) { content, err := ioutil.ReadFile(r.file) if err != nil { return nil, errors.Wrap(err, "Can't read file "+r.file) } lines := strings.Split(string(content), "\n") lines = clean(lines) if len(lines) == 0 { return nil, errors.New("No executables in " + r.file) } return lines, nil } func clean(data []string) []string { res := make([]string, 0) for _, d := range data { d = strings.TrimSpace(d) if d != "" { res = append(res, d) } } return res }
package model import "github.com/jinzhu/gorm" type Announce struct { gorm.Model Title string `gorm:"type:varchar(50);not null"` Content string `gorm:"type:varchar(200);"` Url string `gorm:"type:varchar(100);"` }
package pkg import ( "path/filepath" "github.com/appscode/go/flags" "github.com/appscode/go/log" "github.com/spf13/cobra" "k8s.io/client-go/tools/clientcmd" "kmodules.xyz/client-go/tools/backup" "stash.appscode.dev/stash/pkg/restic" "stash.appscode.dev/stash/pkg/util" ) const ( JobClusterBackup = "stash-cluster-backup" ) type clusterBackupOptions struct { sanitize bool backupDir string masterURL string kubeconfigPath string context string outputDir string backupOpt restic.BackupOptions setupOpt restic.SetupOptions metrics restic.MetricsOptions } func NewCmdBackup() *cobra.Command { opt := &clusterBackupOptions{ setupOpt: restic.SetupOptions{ ScratchDir: restic.DefaultScratchDir, EnableCache: false, }, backupOpt: restic.BackupOptions{ Host: restic.DefaultHost, }, metrics: restic.MetricsOptions{ JobName: JobClusterBackup, }, backupDir: filepath.Join(restic.DefaultScratchDir, "cluster-resources"), sanitize: false, } cmd := &cobra.Command{ Use: "backup-cluster", Short: "Takes a backup of Cluster's resources YAML", DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { flags.EnsureRequiredFlags(cmd, "provider", "secret-dir") err := opt.runClusterBackup() if err != nil { log.Errorln(err) return util.HandleResticError(opt.outputDir, restic.DefaultOutputFileName, err) } return nil }, } cmd.Flags().StringVar(&opt.masterURL, "master", "", "URL of master node") cmd.Flags().StringVar(&opt.kubeconfigPath, "kubeconfig", opt.kubeconfigPath, "kubeconfig file pointing at the 'core' kubernetes server") cmd.Flags().StringVar(&opt.context, "context", "", "Context to use from kubeconfig file") cmd.Flags().BoolVar(&opt.sanitize, "sanitize", false, "Cleanup decorators from dumped YAML files") cmd.Flags().StringVar(&opt.setupOpt.Provider, "provider", opt.setupOpt.Provider, "Backend provider (i.e. gcs, s3, azure etc)") cmd.Flags().StringVar(&opt.setupOpt.Bucket, "bucket", opt.setupOpt.Bucket, "Name of the cloud bucket/container (keep empty for local backend)") cmd.Flags().StringVar(&opt.setupOpt.Endpoint, "endpoint", opt.setupOpt.Endpoint, "Endpoint for s3/s3 compatible backend") cmd.Flags().StringVar(&opt.setupOpt.URL, "rest-server-url", opt.setupOpt.URL, "URL for rest backend") cmd.Flags().StringVar(&opt.setupOpt.Path, "path", opt.setupOpt.Path, "Directory inside the bucket where backup will be stored") cmd.Flags().StringVar(&opt.setupOpt.SecretDir, "secret-dir", opt.setupOpt.SecretDir, "Directory where storage secret has been mounted") cmd.Flags().StringVar(&opt.setupOpt.ScratchDir, "scratch-dir", opt.setupOpt.ScratchDir, "Temporary directory") cmd.Flags().BoolVar(&opt.setupOpt.EnableCache, "enable-cache", opt.setupOpt.EnableCache, "Specify weather to enable caching for restic") cmd.Flags().IntVar(&opt.setupOpt.MaxConnections, "max-connections", opt.setupOpt.MaxConnections, "Specify maximum concurrent connections for GCS, Azure and B2 backend") cmd.Flags().StringVar(&opt.backupOpt.Host, "hostname", opt.backupOpt.Host, "Name of the host machine") cmd.Flags().IntVar(&opt.backupOpt.RetentionPolicy.KeepLast, "retention-keep-last", opt.backupOpt.RetentionPolicy.KeepLast, "Specify value for retention strategy") cmd.Flags().IntVar(&opt.backupOpt.RetentionPolicy.KeepHourly, "retention-keep-hourly", opt.backupOpt.RetentionPolicy.KeepHourly, "Specify value for retention strategy") cmd.Flags().IntVar(&opt.backupOpt.RetentionPolicy.KeepDaily, "retention-keep-daily", opt.backupOpt.RetentionPolicy.KeepDaily, "Specify value for retention strategy") cmd.Flags().IntVar(&opt.backupOpt.RetentionPolicy.KeepWeekly, "retention-keep-weekly", opt.backupOpt.RetentionPolicy.KeepWeekly, "Specify value for retention strategy") cmd.Flags().IntVar(&opt.backupOpt.RetentionPolicy.KeepMonthly, "retention-keep-monthly", opt.backupOpt.RetentionPolicy.KeepMonthly, "Specify value for retention strategy") cmd.Flags().IntVar(&opt.backupOpt.RetentionPolicy.KeepYearly, "retention-keep-yearly", opt.backupOpt.RetentionPolicy.KeepYearly, "Specify value for retention strategy") cmd.Flags().StringSliceVar(&opt.backupOpt.RetentionPolicy.KeepTags, "retention-keep-tags", opt.backupOpt.RetentionPolicy.KeepTags, "Specify value for retention strategy") cmd.Flags().BoolVar(&opt.backupOpt.RetentionPolicy.Prune, "retention-prune", opt.backupOpt.RetentionPolicy.Prune, "Specify weather to prune old snapshot data") cmd.Flags().BoolVar(&opt.backupOpt.RetentionPolicy.DryRun, "retention-dry-run", opt.backupOpt.RetentionPolicy.DryRun, "Specify weather to test retention policy without deleting actual data") cmd.Flags().StringVar(&opt.outputDir, "output-dir", opt.outputDir, "Directory where output.json file will be written (keep empty if you don't need to write output in file)") cmd.Flags().BoolVar(&opt.metrics.Enabled, "metrics-enabled", opt.metrics.Enabled, "Specify weather to export Prometheus metrics") cmd.Flags().StringVar(&opt.metrics.PushgatewayURL, "metrics-pushgateway-url", opt.metrics.PushgatewayURL, "Pushgateway URL where the metrics will be pushed") cmd.Flags().StringVar(&opt.metrics.MetricFileDir, "metrics-dir", opt.metrics.MetricFileDir, "Directory where to write metric.prom file (keep empty if you don't want to write metric in a text file)") cmd.Flags().StringSliceVar(&opt.metrics.Labels, "metrics-labels", opt.metrics.Labels, "Labels to apply in exported metrics") return cmd } func (opt *clusterBackupOptions) runClusterBackup() error { config, err := clientcmd.BuildConfigFromFlags(opt.masterURL, opt.kubeconfigPath) if err != nil { return err } // if no explicit context is provided then try to detect context from kubeconfig file. if opt.context == "" { cfg, err := clientcmd.LoadFromFile(opt.kubeconfigPath) if err == nil { opt.context = cfg.CurrentContext } else { // using in-cluster config. so no context. use default. opt.context = "default" } } // backup cluster resources yaml into opt.backupDir mgr := backup.NewBackupManager(opt.context, config, opt.sanitize) _, err = mgr.BackupToDir(opt.backupDir) if err != nil { return err } // apply nice, ionice settings from env opt.setupOpt.Nice, err = util.NiceSettingsFromEnv() if err != nil { return util.HandleResticError(opt.outputDir, restic.DefaultOutputFileName, err) } opt.setupOpt.IONice, err = util.IONiceSettingsFromEnv() if err != nil { return util.HandleResticError(opt.outputDir, restic.DefaultOutputFileName, err) } // init restic wrapper resticWrapper, err := restic.NewResticWrapper(opt.setupOpt) if err != nil { return err } // Now backup the directory where dumped YAML is stored opt.backupOpt.BackupDirs = []string{opt.backupDir} backupOutput, backupErr := resticWrapper.RunBackup(opt.backupOpt) // If metrics are enabled then generate metrics if opt.metrics.Enabled { err := backupOutput.HandleMetrics(&opt.metrics, backupErr) if err != nil { return err } } if backupErr != nil { return backupErr } // If output directory specified, then write the output in "output.json" file in the specified directory if opt.outputDir != "" { return backupOutput.WriteOutput(filepath.Join(opt.outputDir, restic.DefaultOutputFileName)) } return nil }
package logx import "io" // Option is the common type of functions that set options type Option func(*options) type options struct { marshaler Marshaler writer io.Writer level Level withoutTime bool withoutFileInfo bool additionalFileSkipLevel int } // MarshalerOpt is an option that changes the log marshaler. func MarshalerOpt(m Marshaler) Option { return func(o *options) { o.marshaler = m } } // WriterOpt is an option that changes the log writer. func WriterOpt(w io.Writer) Option { return func(o *options) { o.writer = w } } // LevelOpt is an option that changes the log level. func LevelOpt(l Level) Option { return func(o *options) { o.level = l } } // WithoutTimeOpt is an option that removes time logging for testing purposes. func WithoutTimeOpt() Option { return func(o *options) { o.withoutTime = true } } // WithoutFileInfo is an option that disables logging the file and line where the log was called. func WithoutFileInfo() Option { return func(o *options) { o.withoutFileInfo = true } } // AdditionalFileSkipLevel is an option that lets you go more levels up to find the file & line doing the log. func AdditionalFileSkipLevel(l int) Option { return func(o *options) { o.additionalFileSkipLevel = l } }
package eventsourcing import ( "context" "github.com/caos/zitadel/internal/errors" es_models "github.com/caos/zitadel/internal/eventstore/models" org_model "github.com/caos/zitadel/internal/org/model" "github.com/caos/zitadel/internal/org/repository/eventsourcing/model" ) func OrgByIDQuery(id string, latestSequence uint64) (*es_models.SearchQuery, error) { if id == "" { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-dke74", "id should be filled") } return OrgQuery(latestSequence). AggregateIDFilter(id), nil } func OrgDomainUniqueQuery(domain string) *es_models.SearchQuery { return es_models.NewSearchQuery(). AggregateTypeFilter(model.OrgDomainAggregate). AggregateIDFilter(domain). OrderDesc(). SetLimit(1) } func OrgNameUniqueQuery(name string) *es_models.SearchQuery { return es_models.NewSearchQuery(). AggregateTypeFilter(model.OrgNameAggregate). AggregateIDFilter(name). OrderDesc(). SetLimit(1) } func OrgQuery(latestSequence uint64) *es_models.SearchQuery { return es_models.NewSearchQuery(). AggregateTypeFilter(model.OrgAggregate). LatestSequenceFilter(latestSequence) } func OrgAggregate(ctx context.Context, aggCreator *es_models.AggregateCreator, id string, sequence uint64) (*es_models.Aggregate, error) { return aggCreator.NewAggregate(ctx, id, model.OrgAggregate, model.OrgVersion, sequence) } func orgCreatedAggregates(ctx context.Context, aggCreator *es_models.AggregateCreator, org *model.Org) (_ []*es_models.Aggregate, err error) { if org == nil { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-kdie7", "Errors.Internal") } agg, err := aggCreator.NewAggregate(ctx, org.AggregateID, model.OrgAggregate, model.OrgVersion, org.Sequence, es_models.OverwriteResourceOwner(org.AggregateID)) if err != nil { return nil, err } agg, err = agg.AppendEvent(model.OrgAdded, org) if err != nil { return nil, err } aggregates := make([]*es_models.Aggregate, 0) aggregates, err = addDomainAggregateAndEvents(ctx, aggCreator, agg, aggregates, org) if err != nil { return nil, err } nameAggregate, err := reservedUniqueNameAggregate(ctx, aggCreator, org.AggregateID, org.Name) if err != nil { return nil, err } aggregates = append(aggregates, nameAggregate) return append(aggregates, agg), nil } func addDomainAggregateAndEvents(ctx context.Context, aggCreator *es_models.AggregateCreator, orgAggregate *es_models.Aggregate, aggregates []*es_models.Aggregate, org *model.Org) ([]*es_models.Aggregate, error) { for _, domain := range org.Domains { orgAggregate, err := orgAggregate.AppendEvent(model.OrgDomainAdded, domain) if err != nil { return nil, err } if domain.Verified { domainAggregate, err := reservedUniqueDomainAggregate(ctx, aggCreator, org.AggregateID, domain.Domain) if err != nil { return nil, err } aggregates = append(aggregates, domainAggregate) orgAggregate, err = orgAggregate.AppendEvent(model.OrgDomainVerified, domain) if err != nil { return nil, err } } if domain.Primary { orgAggregate, err = orgAggregate.AppendEvent(model.OrgDomainPrimarySet, domain) if err != nil { return nil, err } } } return aggregates, nil } func OrgUpdateAggregates(ctx context.Context, aggCreator *es_models.AggregateCreator, existing *model.Org, updated *model.Org) ([]*es_models.Aggregate, error) { if existing == nil { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-dk83d", "Errors.Internal") } if updated == nil { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-dhr74", "Errors.Internal") } changes := existing.Changes(updated) if len(changes) == 0 { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-E0hc5", "Errors.NoChangesFound") } aggregates := make([]*es_models.Aggregate, 0, 3) if name, ok := changes["name"]; ok { nameAggregate, err := reservedUniqueNameAggregate(ctx, aggCreator, "", name.(string)) if err != nil { return nil, err } aggregates = append(aggregates, nameAggregate) nameReleasedAggregate, err := releasedUniqueNameAggregate(ctx, aggCreator, "", existing.Name) if err != nil { return nil, err } aggregates = append(aggregates, nameReleasedAggregate) } orgAggregate, err := OrgAggregate(ctx, aggCreator, existing.AggregateID, existing.Sequence) if err != nil { return nil, err } orgAggregate, err = orgAggregate.AppendEvent(model.OrgChanged, changes) if err != nil { return nil, err } aggregates = append(aggregates, orgAggregate) return aggregates, nil } func orgDeactivateAggregate(aggCreator *es_models.AggregateCreator, org *model.Org) func(ctx context.Context) (*es_models.Aggregate, error) { return func(ctx context.Context) (*es_models.Aggregate, error) { if org == nil { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-R03z8", "Errors.Internal") } if org.State == int32(org_model.OrgStateInactive) { return nil, errors.ThrowInvalidArgument(nil, "EVENT-mcPH0", "Errors.Internal.AlreadyDeactivated") } agg, err := OrgAggregate(ctx, aggCreator, org.AggregateID, org.Sequence) if err != nil { return nil, err } return agg.AppendEvent(model.OrgDeactivated, nil) } } func orgReactivateAggregate(aggCreator *es_models.AggregateCreator, org *model.Org) func(ctx context.Context) (*es_models.Aggregate, error) { return func(ctx context.Context) (*es_models.Aggregate, error) { if org == nil { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-cTHLd", "Errors.Internal") } if org.State == int32(org_model.OrgStateActive) { return nil, errors.ThrowInvalidArgument(nil, "EVENT-pUSMs", "Errors.Org.AlreadyActive") } agg, err := OrgAggregate(ctx, aggCreator, org.AggregateID, org.Sequence) if err != nil { return nil, err } return agg.AppendEvent(model.OrgReactivated, nil) } } func reservedUniqueDomainAggregate(ctx context.Context, aggCreator *es_models.AggregateCreator, resourceOwner, domain string) (*es_models.Aggregate, error) { aggregate, err := aggCreator.NewAggregate(ctx, domain, model.OrgDomainAggregate, model.OrgVersion, 0) if resourceOwner != "" { aggregate, err = aggCreator.NewAggregate(ctx, domain, model.OrgDomainAggregate, model.OrgVersion, 0, es_models.OverwriteResourceOwner(resourceOwner)) } if err != nil { return nil, err } aggregate, err = aggregate.AppendEvent(model.OrgDomainReserved, nil) if err != nil { return nil, err } return aggregate.SetPrecondition(OrgDomainUniqueQuery(domain), isEventValidation(aggregate, model.OrgDomainReserved)), nil } func releasedUniqueDomainAggregate(ctx context.Context, aggCreator *es_models.AggregateCreator, resourceOwner, domain string) (*es_models.Aggregate, error) { aggregate, err := aggCreator.NewAggregate(ctx, domain, model.OrgDomainAggregate, model.OrgVersion, 0) if resourceOwner != "" { aggregate, err = aggCreator.NewAggregate(ctx, domain, model.OrgDomainAggregate, model.OrgVersion, 0, es_models.OverwriteResourceOwner(resourceOwner)) } if err != nil { return nil, err } aggregate, err = aggregate.AppendEvent(model.OrgDomainReleased, nil) if err != nil { return nil, err } return aggregate.SetPrecondition(OrgDomainUniqueQuery(domain), isEventValidation(aggregate, model.OrgDomainReleased)), nil } func reservedUniqueNameAggregate(ctx context.Context, aggCreator *es_models.AggregateCreator, resourceOwner, name string) (aggregate *es_models.Aggregate, err error) { aggregate, err = aggCreator.NewAggregate(ctx, name, model.OrgNameAggregate, model.OrgVersion, 0) if resourceOwner != "" { aggregate, err = aggCreator.NewAggregate(ctx, name, model.OrgNameAggregate, model.OrgVersion, 0, es_models.OverwriteResourceOwner(resourceOwner)) } if err != nil { return nil, err } aggregate, err = aggregate.AppendEvent(model.OrgNameReserved, nil) if err != nil { return nil, err } return aggregate.SetPrecondition(OrgNameUniqueQuery(name), isEventValidation(aggregate, model.OrgNameReserved)), nil } func releasedUniqueNameAggregate(ctx context.Context, aggCreator *es_models.AggregateCreator, resourceOwner, name string) (aggregate *es_models.Aggregate, err error) { aggregate, err = aggCreator.NewAggregate(ctx, name, model.OrgNameAggregate, model.OrgVersion, 0) if resourceOwner != "" { aggregate, err = aggCreator.NewAggregate(ctx, name, model.OrgNameAggregate, model.OrgVersion, 0, es_models.OverwriteResourceOwner(resourceOwner)) } if err != nil { return nil, err } aggregate, err = aggregate.AppendEvent(model.OrgNameReleased, nil) if err != nil { return nil, err } return aggregate.SetPrecondition(OrgNameUniqueQuery(name), isEventValidation(aggregate, model.OrgNameReleased)), nil } func OrgDomainAddedAggregate(aggCreator *es_models.AggregateCreator, existing *model.Org, domain *model.OrgDomain) func(ctx context.Context) (*es_models.Aggregate, error) { return func(ctx context.Context) (*es_models.Aggregate, error) { if domain == nil { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-OSid3", "Errors.Internal") } agg, err := OrgAggregate(ctx, aggCreator, existing.AggregateID, existing.Sequence) if err != nil { return nil, err } return agg.AppendEvent(model.OrgDomainAdded, domain) } } func OrgDomainVerifiedAggregate(aggCreator *es_models.AggregateCreator, existing *model.Org, domain *model.OrgDomain) func(ctx context.Context) ([]*es_models.Aggregate, error) { return func(ctx context.Context) ([]*es_models.Aggregate, error) { if domain == nil { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-DHs7s", "Errors.Internal") } agg, err := OrgAggregate(ctx, aggCreator, existing.AggregateID, existing.Sequence) if err != nil { return nil, err } aggregates := make([]*es_models.Aggregate, 0, 2) agg, err = agg.AppendEvent(model.OrgDomainVerified, domain) if err != nil { return nil, err } domainAgregate, err := reservedUniqueDomainAggregate(ctx, aggCreator, existing.AggregateID, domain.Domain) if err != nil { return nil, err } aggregates = append(aggregates, domainAgregate) return append(aggregates, agg), nil } } func OrgDomainSetPrimaryAggregate(aggCreator *es_models.AggregateCreator, existing *model.Org, domain *model.OrgDomain) func(ctx context.Context) (*es_models.Aggregate, error) { return func(ctx context.Context) (*es_models.Aggregate, error) { if domain == nil { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-PSw3j", "Errors.Internal") } agg, err := OrgAggregate(ctx, aggCreator, existing.AggregateID, existing.Sequence) if err != nil { return nil, err } return agg.AppendEvent(model.OrgDomainPrimarySet, domain) } } func OrgDomainRemovedAggregate(ctx context.Context, aggCreator *es_models.AggregateCreator, existing *model.Org, domain *model.OrgDomain) ([]*es_models.Aggregate, error) { if domain == nil { return nil, errors.ThrowPreconditionFailed(nil, "EVENT-si8dW", "Errors.Internal") } aggregates := make([]*es_models.Aggregate, 0, 2) agg, err := OrgAggregate(ctx, aggCreator, existing.AggregateID, existing.Sequence) if err != nil { return nil, err } agg, err = agg.AppendEvent(model.OrgDomainRemoved, domain) if err != nil { return nil, err } aggregates = append(aggregates, agg) domainAgregate, err := releasedUniqueDomainAggregate(ctx, aggCreator, existing.AggregateID, domain.Domain) if err != nil { return nil, err } return append(aggregates, domainAgregate), nil } func isEventValidation(aggregate *es_models.Aggregate, eventType es_models.EventType) func(...*es_models.Event) error { return func(events ...*es_models.Event) error { if len(events) == 0 { aggregate.PreviousSequence = 0 return nil } if events[0].Type == eventType { return errors.ThrowPreconditionFailedf(nil, "EVENT-eJQqe", "user is already %v", eventType) } aggregate.PreviousSequence = events[0].Sequence return nil } }
package rest import ( "net/http" "net/url" "strconv" "github.com/HDIOES/su4na-API-main/models" "github.com/pkg/errors" ) //StudioHandler struct type StudioHandler struct { Dao *models.StudioDAO } func (g *StudioHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { requestBody, rawQuery, headers, err := GetRequestData(r) if err != nil { HandleErr(errors.Wrap(err, ""), w, 400, "Request cannot be read") return } if err := LogHTTPRequest(r.URL.String(), r.Method, headers, requestBody); err != nil { HandleErr(errors.Wrap(err, ""), w, 400, "Request cannot be logged") return } vars, parseErr := url.ParseQuery(*rawQuery) if parseErr != nil { HandleErr(errors.Wrap(parseErr, ""), w, 400, "URL not valid") } studioSQLBuilder := models.StudioQueryBuilder{} if limit, limitOk := vars["limit"]; limitOk { limitInt64, parseErr := strconv.ParseInt(limit[0], 10, 32) if parseErr != nil { HandleErr(errors.Wrap(parseErr, ""), w, 400, "Not valid limit") return } studioSQLBuilder.SetOffset(int32(limitInt64)) } if offset, offsetOk := vars["offset"]; offsetOk { offsetInt64, parseErr := strconv.ParseInt(offset[0], 10, 32) if parseErr != nil { HandleErr(errors.Wrap(parseErr, ""), w, 400, "Not valid offset") return } studioSQLBuilder.SetOffset(int32(offsetInt64)) } if studiosDtos, stErr := g.Dao.FindByFilter(studioSQLBuilder); stErr != nil { HandleErr(stErr, w, 400, "Error") } else { studios := []StudioRo{} for _, dto := range studiosDtos { ro := StudioRo{ ID: &dto.ID, Name: dto.Name, FilteredName: dto.FilteredStudioName, } studios = append(studios, ro) } if err := ReturnResponseAsJSON(w, studios, 200); err != nil { HandleErr(err, w, 500, "Error") } } } //StudioRo struct type StudioRo struct { ID *int64 `json:"id"` Name *string `json:"name"` FilteredName *string `json:"filtered_name"` }
package block import ( "blockchain/pkg/cryptoAPI" "time" ) type Block struct { Index int Timestamp string Data int Hash string PrevHash string } var BlockChain []Block func CreateBlock(oldBlock Block, data int) Block { B := Block{ Index: oldBlock.Index + 1, Timestamp: time.Now().String(), Data: data, PrevHash: oldBlock.Hash, } B.Hash = cryptoAPI.GenerateSHA256Hash(B.PrevHash + B.Timestamp + string(B.Index) + string(data)) return B } func IsValid(block Block, prevBlock Block) bool { if prevBlock.Index + 1 != block.Index { return false } if prevBlock.Hash != block.PrevHash { return false } if cryptoAPI.GenerateSHA256Hash(block.PrevHash + block.Timestamp + string(block.Index) + string(block.Data)) != block.Hash { return false } return true } func ReplaceChain(chain []Block) { if len(chain) > len(BlockChain) { BlockChain = chain } }
package domain // Alerm : type Alerm struct { ID string `json:"_id"` UserID string `json:"userId"` Stop bool `json:"stop"` }
package steps import ( "errors" "strings" s "github.com/pganalyze/collector/setup/state" "github.com/pganalyze/collector/setup/util" ) var ConfirmAutoExplainAvailable = &s.Step{ Kind: s.AutomatedExplainStep, ID: "aemod_check_auto_explain_available", Description: "Confirm the auto_explain contrib module is available", Check: func(state *s.SetupState) (bool, error) { logExplain, err := util.UsingLogExplain(state.CurrentSection) if err != nil || logExplain { return logExplain, err } err = state.QueryRunner.Exec("LOAD 'auto_explain'") if err != nil { if strings.Contains(err.Error(), "No such file or directory") { return false, nil } return false, err } return true, err }, Run: func(state *s.SetupState) error { return errors.New("contrib module auto_explain is not available") }, }
// Copyright 2016 Zhang Peihao <zhangpeihao@gmail.com> /* Package plaintext 纯文本格式 用多行来分隔Command字段 第一行:信令版本(纯文本格式:第一个字符为:'t',后面是协议版本号) 第二行:信令所属App ID 第三行:信令名 第四行:信令数据 第五行:信令负载 */ package plaintext import ( "bufio" "bytes" "encoding/json" "fmt" "strings" "github.com/golang/glog" "github.com/zhangpeihao/zim/pkg/define" "github.com/zhangpeihao/zim/pkg/protocol" "github.com/zhangpeihao/zim/pkg/protocol/serialize" ) const ( // Version 版本 Version = "t1" // ProbeByte 协议首字节 ProbeByte byte = 't' ) var ( // CommandSep 命令分割字符 CommandSep = []byte{'\n'} ) const ( // CommandVersionLine 信令版本索引 CommandVersionLine = 0 // CommandAppIDLine 信令所属App ID行索引 CommandAppIDLine = 1 // CommandNameLine 信令名行索引 CommandNameLine = 2 // CommandDataLine 信令数据行索引 CommandDataLine = 3 // CommandPayloadLine 信令负载行索引 CommandPayloadLine = 4 // CommandLines 信令行数 CommandLines = 5 ) var ( serializer = &serialize.Serializer{ Version: Version, ProbeByte: ProbeByte, NewParseEngine: NewParseEngine, Compose: Compose, } ) type engine struct { lines [CommandLines][]byte linesIndex int } func init() { serialize.Register(serializer) } // NewParseEngine 新建解析器 func NewParseEngine() serialize.ParseEngine { return &engine{} } // Parse 解析 func (e *engine) Parse(br *bufio.Reader) (cmd *protocol.Command, err error) { for e.linesIndex < CommandLines { // 一行一行读取 var line []byte line, err = br.ReadBytes('\n') if err != nil { glog.Warningln("protocol::serialize::plaintext::Parse() error:", err) return } e.lines[e.linesIndex] = append(e.lines[e.linesIndex], line...) if line[len(line)-1] == '\n' { // 完整 e.linesIndex++ } } defer e.reset() cmd = &protocol.Command{ Version: strings.Trim(string(e.lines[CommandVersionLine]), "\r\t\n "), AppID: strings.Trim(string(e.lines[CommandAppIDLine]), "\r\t\n "), Name: strings.Trim(string(e.lines[CommandNameLine]), "\r\t\n "), } if len(cmd.Version) == 0 || cmd.Version[0] != ProbeByte { err = define.ErrUnsupportProtocol fmt.Println("define.ErrUnsupportProtocol") return } cmd.Payload = bytes.Trim(e.lines[CommandPayloadLine], "\r\n") if err = cmd.ParseData(e.lines[CommandDataLine]); err != nil { fmt.Println("cmd.ParseData error:", err) return } return } // Close 关闭 func (e *engine) Close() error { e.reset() return nil } func (e *engine) reset() { for i := 0; i < CommandLines; i++ { e.lines[i] = nil } e.linesIndex = 0 } // Compose 将信令编码 func Compose(cmd *protocol.Command) ([]byte, error) { buf := bytes.NewBufferString(Version) buf.WriteByte('\n') buf.WriteString(cmd.AppID) buf.WriteByte('\n') buf.WriteString(cmd.Name) buf.WriteByte('\n') if cmd.Data != nil { enc := json.NewEncoder(buf) enc.Encode(cmd.Data) } else { buf.WriteByte('\n') } buf.Write(cmd.Payload) buf.WriteByte('\n') return buf.Bytes(), nil }
package hosts import ( "github.com/jrapoport/gothic/core" "github.com/jrapoport/gothic/hosts/rpc" "github.com/jrapoport/gothic/hosts/rpc/system" ) const rpcName = "rpc" // NewRPCHost creates a new rpc host. func NewRPCHost(a *core.API, address string) core.Hosted { return rpc.NewHost(a, rpcName, address, []rpc.RegisterServer{ system.RegisterServer, }) }
package v1 import "bytes" // NopCloser テストでbytes.Readerをmultipart.Fileに対応するための構造体 type NopCloser struct { *bytes.Reader } func (r *NopCloser) Close() error { return nil }
package main import ( "fmt" ellipse "github.com/diversemix/learn-golang/projects/ellipse" ) func main() { //Initalise the Init function with value of A,B e := ellipse.Init{ 9, 2, } //this will give answer as 0.9749960430435691 fmt.Println(e.GetEccentricity()) }
package dbmigrate import ( "database/sql" "fmt" "strings" "time" "github.com/pkg/errors" ) var ( errDBNameNotProvided = errors.New("database name is not provided") errUserNotProvided = errors.New("user is not provided") ) // dbWrapper encapsulates all database access operations type dbWrapper struct { *Settings db *sql.DB provider placeholdersProvider } // executor is an interface to exec sql so we could pass db instance as well as tx one type executor interface { Exec(query string, args ...interface{}) (sql.Result, error) } // migrationData holds info about migration from migrations table type migrationData struct { version time.Time appliedAt time.Time } // newDBWrapper creates new dbWrapper instance func newDBWrapper(settings *Settings, provider provider) *dbWrapper { w := &dbWrapper{ Settings: settings, provider: provider, } if pp, ok := w.provider.(placeholdersProvider); ok { w.placeholdersProvider = pp } return w } // open creates new database connection func (w *dbWrapper) open() error { dsn, err := w.provider.dsn(w.Settings) if err != nil { return err } w.db, err = sql.Open(w.provider.driver(), dsn) if err != nil { return errors.Wrap(err, "can't open database") } return nil } // close shuts down database connection func (w *dbWrapper) close() error { err := w.db.Close() if err != nil { return errors.Wrap(err, "can't close db") } return nil } // setPlaceholders calls placeholdersProvider's placeholdersProvider if it is provided func (w *dbWrapper) setPlaceholders(s string) string { if w.placeholdersProvider == nil { return s } return w.placeholdersProvider.setPlaceholders(s) } // hasMigrationsTable checks if the table with applied migrations data already exists func (w *dbWrapper) hasMigrationsTable() (bool, error) { var table string err := w.db.QueryRow(w.setPlaceholders(w.provider.hasTableQuery()), w.MigrationsTable).Scan(&table) if err == sql.ErrNoRows { return false, nil } if err != nil { return false, err } return true, nil } // createMigrationsTable creates new table for applied migrations data func (w *dbWrapper) createMigrationsTable() error { _, err := w.db.Exec(fmt.Sprintf( "CREATE TABLE %s (version VARCHAR(14) NOT NULL, applied_at VARCHAR(14) NOT NULL, PRIMARY KEY(version));", w.MigrationsTable)) if err != nil { return errors.Wrap(err, "can't create migrations table") } return nil } // latestMigrationVersion returns a timestamp for latest migration version func (w *dbWrapper) latestMigrationVersion() (time.Time, error) { version, err := w.getAttrOrderedBy("version", "version DESC") if err != nil { return time.Time{}, errors.Wrap(err, "can't select latest migration version from database") } return version, nil } // lastAppliedMigrationVersion returns a latest applied migration version timestamp func (w *dbWrapper) lastAppliedMigrationVersion() (time.Time, error) { version, err := w.getAttrOrderedBy("version", "applied_at DESC, version DESC") if err != nil { return time.Time{}, errors.Wrap(err, "can't select last applied migration version from database") } return version, nil } // getAttrOrderedBy returns first attr ordered by order func (w *dbWrapper) getAttrOrderedBy(attr string, order string) (time.Time, error) { var result string err := w.db.QueryRow(fmt.Sprintf("SELECT %s FROM %s ORDER BY %s LIMIT 1", attr, w.MigrationsTable, order)).Scan(&result) if err == sql.ErrNoRows { return time.Time{}, nil } if err != nil { return time.Time{}, err } ts, _ := time.Parse(TimestampFormat, result) return ts, nil } // appliedMigrationsData returns all data from migrations table ordered by provided order variable func (w *dbWrapper) appliedMigrationsData(order string) ([]*migrationData, error) { rows, err := w.db.Query(fmt.Sprintf("SELECT version, applied_at FROM %s ORDER BY %s", w.MigrationsTable, order)) if err != nil { return nil, errors.Wrap(err, "can't get applied migrations versions") } defer rows.Close() var mds []*migrationData var version, appliedAt string for rows.Next() { err = rows.Scan(&version, &appliedAt) if err != nil { return nil, errors.Wrap(err, "can't scan migrations table's row") } md := &migrationData{} md.version, _ = time.Parse(TimestampFormat, version) md.appliedAt, _ = time.Parse(TimestampFormat, appliedAt) mds = append(mds, md) } return mds, nil } // insertMigrationData inserts data for applied migration func (w *dbWrapper) insertMigrationData(version time.Time, appliedAtTs time.Time, executor executor) error { if executor == nil { executor = w.db } _, err := executor.Exec(w.setPlaceholders(fmt.Sprintf("INSERT INTO %s (version, applied_at) VALUES (?, ?)", w.MigrationsTable)), version.UTC().Format(TimestampFormat), appliedAtTs.UTC().Format(TimestampFormat)) if err != nil { return errors.Wrap(err, "can't insert migration") } return nil } // countMigrationsInLastBatch returns number of migrations which were applied during the last database operation func (w *dbWrapper) countMigrationsInLastBatch() (int, error) { var count int err := w.db.QueryRow(w.setPlaceholders("SELECT COUNT(*) FROM migrations GROUP BY applied_at ORDER BY applied_at DESC LIMIT 1")).Scan(&count) if err == sql.ErrNoRows { return 0, nil } if err != nil { return 0, errors.Wrapf(err, "can't count migrations for last batch") } return count, nil } // deleteMigrationVersion removes database row with given migration version func (w *dbWrapper) deleteMigrationVersion(version time.Time, executor executor) error { if executor == nil { executor = w.db } _, err := executor.Exec(w.setPlaceholders(fmt.Sprintf( "DELETE FROM %s WHERE version = ?", w.MigrationsTable)), version.UTC().Format(TimestampFormat)) if err != nil { return errors.Wrap(err, "can't delete migration") } return nil } // execMigrationQueries executes queries from the migration file, calling func after func (w *dbWrapper) execMigrationQueries(query string, afterFunc func(tx *sql.Tx) error) error { // using transactions, although only postgres supports supports DDL ones tx, err := w.db.Begin() if err != nil { return errors.Wrap(err, "can't begin transaction") } // split queries and exec them one by one, because mysql driver can't exec multiple queries using one Exec call queries := strings.Split(query, ";") for _, q := range queries { q := strings.TrimSpace(q) if q != "" { _, err := tx.Exec(q + ";") if err != nil { tx.Rollback() return errors.Wrapf(err, "can't execute query %s", q) } } } err = afterFunc(tx) if err != nil { tx.Rollback() return err } err = tx.Commit() if err != nil { return errors.Wrap(err, "can't commit transaction") } return nil }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package cli import ( "context" "fmt" "strings" "testing" "github.com/cockroachdb/cockroach/pkg/cli/exit" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/channel" "github.com/cockroachdb/cockroach/pkg/util/log/severity" "github.com/cockroachdb/errors" "github.com/lib/pq" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestOutputError(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) errBase := errors.New("woo") file, line, fn, _ := errors.GetOneLineSource(errBase) refLoc := fmt.Sprintf("%s, %s:%d", fn, file, line) testData := []struct { err error showSeverity, verbose bool exp string }{ // Check the basic with/without severity. {errBase, false, false, "woo"}, {errBase, true, false, "ERROR: woo"}, {pgerror.WithCandidateCode(errBase, pgcode.Syntax), false, false, "woo\nSQLSTATE: " + pgcode.Syntax.String()}, // Check the verbose output. This includes the uncategorized sqlstate. {errBase, false, true, "woo\nSQLSTATE: " + pgcode.Uncategorized.String() + "\nLOCATION: " + refLoc}, {errBase, true, true, "ERROR: woo\nSQLSTATE: " + pgcode.Uncategorized.String() + "\nLOCATION: " + refLoc}, // Check the same over pq.Error objects. {&pq.Error{Message: "woo"}, false, false, "woo"}, {&pq.Error{Message: "woo"}, true, false, "ERROR: woo"}, {&pq.Error{Message: "woo"}, false, true, "woo"}, {&pq.Error{Message: "woo"}, true, true, "ERROR: woo"}, {&pq.Error{Severity: "W", Message: "woo"}, false, false, "woo"}, {&pq.Error{Severity: "W", Message: "woo"}, true, false, "W: woo"}, // Check hint printed after message. {errors.WithHint(errBase, "hello"), false, false, "woo\nHINT: hello"}, // Check sqlstate printed before hint, location after hint. {errors.WithHint(errBase, "hello"), false, true, "woo\nSQLSTATE: " + pgcode.Uncategorized.String() + "\nHINT: hello\nLOCATION: " + refLoc}, // Check detail printed after message. {errors.WithDetail(errBase, "hello"), false, false, "woo\nDETAIL: hello"}, // Check hint/detail collection, hint printed after detail. {errors.WithHint( errors.WithDetail( errors.WithHint(errBase, "a"), "b"), "c"), false, false, "woo\nDETAIL: b\nHINT: a\n--\nc"}, {errors.WithDetail( errors.WithHint( errors.WithDetail(errBase, "a"), "b"), "c"), false, false, "woo\nDETAIL: a\n--\nc\nHINT: b"}, // Check sqlstate printed before detail, location after hint. {errors.WithDetail( errors.WithHint(errBase, "a"), "b"), false, true, "woo\nSQLSTATE: " + pgcode.Uncategorized.String() + "\nDETAIL: b\nHINT: a\nLOCATION: " + refLoc}, } for _, tc := range testData { var buf strings.Builder cliOutputError(&buf, tc.err, tc.showSeverity, tc.verbose) assert.Equal(t, tc.exp+"\n", buf.String()) } } func TestFormatLocation(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) testData := []struct { file, line, fn string exp string }{ {"", "", "", ""}, {"a.b", "", "", "a.b"}, {"", "123", "", "<unknown>:123"}, {"", "", "abc", "abc"}, {"a.b", "", "abc", "abc, a.b"}, {"a.b", "123", "", "a.b:123"}, {"", "123", "abc", "abc, <unknown>:123"}, } for _, tc := range testData { r := formatLocation(tc.file, tc.line, tc.fn) assert.Equal(t, tc.exp, r) } } type logger struct { TB testing.TB Severity log.Severity Channel log.Channel Err error } func (l *logger) Log(_ context.Context, sev log.Severity, msg string, args ...interface{}) { require.Equal(l.TB, 1, len(args), "expected to log one item") err, ok := args[0].(error) require.True(l.TB, ok, "expected to log an error") l.Severity = sev l.Channel = channel.SESSIONS l.Err = err } func TestErrorReporting(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) tests := []struct { desc string err error wantSeverity log.Severity wantCLICause bool // should the cause be a *cliError? }{ { desc: "plain", err: errors.New("boom"), wantSeverity: severity.ERROR, wantCLICause: false, }, { desc: "single cliError", err: &cliError{ exitCode: exit.UnspecifiedError(), severity: severity.INFO, cause: errors.New("routine"), }, wantSeverity: severity.INFO, wantCLICause: false, }, { desc: "double cliError", err: &cliError{ exitCode: exit.UnspecifiedError(), severity: severity.INFO, cause: &cliError{ exitCode: exit.UnspecifiedError(), severity: severity.ERROR, cause: errors.New("serious"), }, }, wantSeverity: severity.INFO, // should only unwrap one layer wantCLICause: true, }, { desc: "wrapped cliError", err: fmt.Errorf("some context: %w", &cliError{ exitCode: exit.UnspecifiedError(), severity: severity.INFO, cause: errors.New("routine"), }), wantSeverity: severity.INFO, wantCLICause: false, }, } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { got := &logger{TB: t} checked := checkAndMaybeShoutTo(tt.err, got.Log) assert.Equal(t, tt.err, checked, "should return error unchanged") assert.Equal(t, tt.wantSeverity, got.Severity, "wrong severity log") assert.Equal(t, channel.SESSIONS, got.Channel, "wrong channel") gotCLI := errors.HasType(got.Err, (*cliError)(nil)) if tt.wantCLICause { assert.True(t, gotCLI, "logged cause should be *cliError, got %T", got.Err) } else { assert.False(t, gotCLI, "logged cause shouldn't be *cliError, got %T", got.Err) } }) } }
package main import "fmt" func maxSlidingWindow(nums []int, k int) []int { res, s, si, i := make([]int, 0, len(nums)), make([]int, 0, len(nums)), make([]int, 0, len(nums)-k), 0 for ; i < k-1; i++ { for len(s) > 0 && s[len(s)-1] <= nums[i] { s, si = s[:len(s)-1], si[:len(si)-1] } s, si = append(s, nums[i]), append(si, i) } for ; i < len(nums); i++ { for len(s) > 0 && i-si[0] >= k { s, si = s[1:], si[1:] } for len(s) > 0 && s[len(s)-1] <= nums[i] { s, si = s[:len(s)-1], si[:len(si)-1] } s, si, res = append(s, nums[i]), append(si, i), append(res, s[0]) } return res } func main() { fmt.Println(maxSlidingWindow([]int{1, 3, 1, 2, 0, 5}, 3)) }
package lib_gc_cache_helpers // Cache Object metadata fields const METADATA_VERSION = "VERSION" const METADATA_STATUS = "STATUS" const ( CACHEOBJECTSTATUS_ENABLED int16 = 0 CACHEOBJECTSTATUS_DISABLED int16 = 1 CACHEOBJECTSTATUS_DELETED int16 = 2 )
package models import ( "gopkg.in/mgo.v2" ) var ( session *mgo.Session url string = "127.0.0.1:27017" basename string = "test" ) func GetSession() *mgo.Session { if session == nil { var err error session, err = mgo.Dial(url) if err != nil { panic(err) } } return session.Clone() }