text
stringlengths
11
4.05M
package coremain const ( coreName = "CoreDNS" // CoreVersion is the current version of CoreDNS. CoreVersion = "1.0.4" serverType = "dns" )
// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package certificate import ( "crypto/x509" "encoding/json" "encoding/pem" "reflect" "strings" "testing" "time" "chromiumos/tast/errors" ) func pemDecode(s string) ([]byte, error) { block, rest := pem.Decode([]byte(s)) if block == nil { return nil, errors.New("couldn't decode Cert PEM") } if len(rest) != 0 { return nil, errors.Errorf("found trailing data in cert: %q", string(rest)) } return block.Bytes, nil } func x509ParseCert(certStr string) (*x509.Certificate, error) { // Parse certificate. It should be X-509 certificates in PEM format. pem, err := pemDecode(certStr) if err != nil { return nil, errors.Wrap(err, "failed to decode PEM") } cert, err := x509.ParseCertificate(pem) if err != nil { return nil, errors.Wrap(err, "failed to parse certificate") } return cert, err } // validateCertSignature checks that cert is signed by its parent. Note that we allow MD5-based signatures for now // (crbug.com/1047146), and because Golang's x509 library rejects this weak crypto, we can't easily verify signatures // properly. func validateCertSignature(cert, parent *x509.Certificate) error { err := cert.CheckSignatureFrom(parent) if err != nil { // TODO(crbug.com/1047146): MD5 certificates are rejected by Golang x509. We're still allowing them for now. var insecureErr x509.InsecureAlgorithmError if !errors.As(err, &insecureErr) { return err } } return nil } func validatePrivateKey(privateKey string, cert *x509.Certificate) error { // Parse private key. It should be a PKCS#1 key in PEM format. pem, err := pemDecode(privateKey) if err != nil { return err } key, err := x509.ParsePKCS1PrivateKey(pem) if err != nil { return errors.Wrap(err, "failed to parse private key") } if err = key.Validate(); err != nil { return errors.Wrap(err, "private key failed validation") } if !reflect.DeepEqual(&key.PublicKey, cert.PublicKey) { return errors.New("public key does not match") } return nil } func TestCertificate(t *testing.T) { now := time.Now() isExpired := func(cert *x509.Certificate) bool { return now.Before(cert.NotBefore) || now.After(cert.NotAfter) } for testi, testcase := range []CertStore{TestCert1(), TestCert2(), TestCert3()} { caCert, err := x509ParseCert(testcase.CACred.Cert) if err != nil { t.Fatalf("Test %d: CACert: %v", testi, err) } if err := validateCertSignature(caCert, caCert); err != nil { t.Errorf("Test %d: unexpeted: CA cert isn't self-signed", testi) } testCred := func(cred Credential, expectedExpired bool) error { cert, err := x509ParseCert(cred.Cert) if err != nil { return err } // Verify expiry. if expired := isExpired(cert); expired != expectedExpired { return errors.Errorf("failed cert expiry check got %t, want %t", expired, expectedExpired) } // Validate private keys. if err := validatePrivateKey(cred.PrivateKey, cert); err != nil { return errors.Wrap(err, "failed private key check") } // Check cert signatures. if err := validateCertSignature(cert, caCert); err != nil { return errors.Wrap(err, "failed CA cert check") } return nil } if err := testCred(testcase.CACred, false); err != nil { t.Errorf("Test %d: CACred: %v", testi, err) } if err := testCred(testcase.ServerCred, false); err != nil { t.Errorf("Test %d: ServerCred: %v", testi, err) } if err := testCred(testcase.ClientCred, false); err != nil { t.Errorf("Test %d: ClientCred: %v", testi, err) } if err := testCred(testcase.ExpiredServerCred, true); err != nil { t.Errorf("Test %d: ExpiredServerCred: %v", testi, err) } } } // TestAltSubjectMatch test that the entries in TestCert3AltSubjectMatch are exactly what TestCert3 contains. func TestAltSubjectMatch(t *testing.T) { // Get the entries in TestCert3AltSubjectMatch(). expectedDNSNames := make(map[string]bool) expectedEmailAddresses := make(map[string]bool) for _, altStr := range TestCert3AltSubjectMatch() { var alt struct { Type string Value string } if err := json.Unmarshal([]byte(altStr), &alt); err != nil { t.Fatalf("failed to unmarshal altsubject match string: %s", altStr) } switch alt.Type { case "DNS": expectedDNSNames[alt.Value] = true case "EMAIL": expectedEmailAddresses[alt.Value] = true default: t.Errorf("unexpected Type in altsubject match: %s", alt.Type) } } for testi, testcert := range []string{TestCert3().ServerCred.Cert, TestCert3().ExpiredServerCred.Cert} { // Get the entries of the cert. cert, err := x509ParseCert(testcert) if err != nil { t.Fatal(err) } dnsNames := make(map[string]bool) for _, d := range cert.DNSNames { dnsNames[d] = true } emailAddresses := make(map[string]bool) for _, e := range cert.EmailAddresses { emailAddresses[e] = true } if !reflect.DeepEqual(dnsNames, expectedDNSNames) { t.Errorf("Test %d: DNS names not match, got %v, want %v", testi, dnsNames, expectedDNSNames) } if !reflect.DeepEqual(emailAddresses, expectedEmailAddresses) { t.Errorf("Test %d: email addresses not match, got %v, want %v", testi, emailAddresses, expectedEmailAddresses) } } } // TestDomainSuffixMatch test that the domain specified by TestCert3DomainSuffixMatch() is found in TestCert3. func TestDomainSuffixMatch(t *testing.T) { // Get the entries in TestCert3DomainSuffixMatch(). expectedDomainSuffixMatch := TestCert3DomainSuffixMatch() for testi, testcert := range []string{TestCert3().ServerCred.Cert, TestCert3().ExpiredServerCred.Cert} { // Get the entries of the cert. cert, err := x509ParseCert(testcert) if err != nil { t.Fatal(err) } for _, d := range cert.DNSNames { match := false for _, s := range expectedDomainSuffixMatch { match = match || strings.HasSuffix(d, s) } if !match { t.Errorf("Test %d: the domain does not match, got %v, want %v", testi, cert.DNSNames, expectedDomainSuffixMatch) } } } } func TestCADifference(t *testing.T) { // Check that TestCert1 and TestCert2 are using different CAs. if TestCert1().CACred.Cert == TestCert2().CACred.Cert { t.Error("TestCert1 and TestCert2 are using the same CA") } }
package main import ( "fmt" "time" ) func using_select() { fmt.Println("-------------- Multithreading select -------------------") c1 := make(chan string) c2 := make(chan string) go func() { for { time.Sleep(time.Second * 5) c1 <- "" } }() go func() { for { time.Sleep(time.Minute) c2 <- "the minute is up" } }() c := 5 for { select { case <-c1: if c > 0 { fmt.Printf("%d", c) } c += 5 case msg := <-c2: fmt.Printf("%d %s \n", c, msg) c = 0 case <-time.After(time.Second): fmt.Print(".") } } var input string fmt.Scanln(&input) }
/* Command line tool to try evaluating JSonnet. Demos: echo "{ a: 1, b: 2 }" | go run jsonnet_main/main.go /dev/stdin go run jsonnet_main/main.go test1.j go run jsonnet_main/main.go test2.j echo 'std.extVar("a") + "bar"' | go run jsonnet_main/main.go /dev/stdin a=foo */ package main import "github.com/strickyak/jsonnet_cgo" import ( "flag" "fmt" "io/ioutil" "log" "path/filepath" "strings" ) var stringOutput = flag.Bool( "string_output", false, "If set, will expect a string and output it verbatim") func importFunc(base, rel string) (result string, path string, err error) { filename := filepath.Join(base, rel) contents, err := ioutil.ReadFile(filename) if err != nil { return "", "", err } return string(contents), filename, nil } func main() { flag.Parse() vm := jsonnet.Make() vm.ImportCallback(importFunc) if stringOutput != nil { vm.StringOutput(*stringOutput) } args := flag.Args() if len(args) < 1 { log.Fatal("Usage: jsonnet_main filename key1=val1 key2=val2...") } for i := 1; i < len(args); i++ { kv := strings.SplitN(args[i], "=", 2) if len(kv) != 2 { log.Fatalf("Error in jsonnet_main: Expected arg to be 'key=value': %q", args[i]) } vm.ExtVar(kv[0], kv[1]) } z, err := vm.EvaluateFile(args[0]) if err != nil { log.Fatalf("Error in jsonnet_main: %s", err) } fmt.Print(z) vm.Destroy() }
package models type SSGAResponse struct { Data struct { FundType []struct { Key string `json:"key"` Name string `json:"name"` Size int `json:"size"` } `json:"fundType"` Funds struct { Etfs struct { ViewBy struct { Overview struct { Name string `json:"name"` } `json:"overview"` Performance struct { Name string `json:"name"` Children struct { MonthEnd string `json:"monthEnd"` QuarterEnd string `json:"quarterEnd"` } `json:"children"` } `json:"performance"` Pricing struct { Name string `json:"name"` } `json:"pricing"` Documents struct { Name string `json:"name"` } `json:"documents"` } `json:"viewBy"` Labels []struct { Key string `json:"key"` Name string `json:"name"` Tab []string `json:"tab"` Type string `json:"type,omitempty"` SubLabels []struct { Key string `json:"key"` Name string `json:"name"` } `json:"subLabels,omitempty"` NoSort bool `json:"noSort,omitempty"` } `json:"labels"` Datas []struct { Domicile string `json:"domicile"` FundName string `json:"fundName"` FundTicker string `json:"fundTicker"` FundURI string `json:"fundUri"` Ter []interface{} `json:"ter"` Nav []interface{} `json:"nav"` Aum []interface{} `json:"aum"` AsOfDate []string `json:"asOfDate"` PerfAsOf []string `json:"PerfAsOf"` Mo1 []interface{} `json:"mo1"` Qtd []interface{} `json:"qtd"` Ytd []interface{} `json:"ytd"` Yr1 []interface{} `json:"yr1"` Yr3 []interface{} `json:"yr3"` Yr5 []interface{} `json:"yr5"` Yr10 []interface{} `json:"yr10"` SinceInception []interface{} `json:"sinceInception"` InceptionDate []string `json:"inceptionDate"` PerfAsOf1 []string `json:"PerfAsOf_1"` Mo11 []interface{} `json:"mo1_1"` Qtd1 []interface{} `json:"qtd_1"` Ytd1 []interface{} `json:"ytd_1"` Yr11 []interface{} `json:"yr1_1"` Yr31 []interface{} `json:"yr3_1"` Yr51 []interface{} `json:"yr5_1"` Yr101 []interface{} `json:"yr10_1"` SinceInception1 []interface{} `json:"sinceInception_1"` PrimaryExchange string `json:"primaryExchange"` ClosePrice []interface{} `json:"closePrice"` BidAsk []interface{} `json:"bidAsk"` PremiumDiscount []interface{} `json:"premiumDiscount"` DocumentPdf []struct { DocType string `json:"docType"` Docs []struct { Language string `json:"language"` Name string `json:"name"` Path string `json:"path"` CanDownload bool `json:"canDownload"` } `json:"docs"` } `json:"documentPdf"` FundFilter string `json:"fundFilter"` PopUp bool `json:"popUp"` Keywords string `json:"keywords"` PerfIndex []struct { FundName string `json:"fundName"` FundTicker string `json:"fundTicker"` Ter string `json:"ter"` PerfAsOf string `json:"PerfAsOf"` Mo1 string `json:"mo1"` Qtd string `json:"qtd"` Ytd string `json:"ytd"` Yr1 string `json:"yr1"` Yr3 string `json:"yr3"` Yr5 string `json:"yr5"` Yr10 string `json:"yr10"` SinceInception string `json:"sinceInception"` InceptionDate string `json:"inceptionDate"` PerfAsOf1 string `json:"PerfAsOf_1"` Mo11 string `json:"mo1_1"` Qtd1 string `json:"qtd_1"` Ytd1 string `json:"ytd_1"` Yr11 string `json:"yr1_1"` Yr31 string `json:"yr3_1"` Yr51 string `json:"yr5_1"` Yr101 string `json:"yr10_1"` SinceInception1 string `json:"sinceInception_1"` Num int `json:"num"` } `json:"perfIndex,omitempty"` } `json:"datas"` Categories []struct { Key string `json:"key"` Name string `json:"name"` SubCategories []struct { Key string `json:"key"` Name string `json:"name"` SubCategories []struct { Key string `json:"key"` Name string `json:"name"` CheckBox bool `json:"checkBox"` SubCategories []struct { Key string `json:"key"` Name string `json:"name"` Funds string `json:"funds"` Size int `json:"size"` } `json:"subCategories"` Funds string `json:"funds"` Size int `json:"size"` } `json:"subCategories,omitempty"` Funds string `json:"funds"` Size int `json:"size"` } `json:"subCategories"` Funds string `json:"funds"` Size int `json:"size"` } `json:"categories"` QuickLinks []struct { Name string `json:"name"` Path string `json:"path"` IsExternal bool `json:"isExternal"` Target bool `json:"target"` } `json:"quickLinks"` ExpenseRatio string `json:"expenseRatio"` FormatInfo struct { DecimalPoint string `json:"decimalPoint"` ThousandsSep string `json:"thousandsSep"` } `json:"formatInfo"` } `json:"etfs"` } `json:"funds"` } `json:"data"` Msg string `json:"msg"` Status int `json:"status"` }
package ifcli import ( "github.com/c-bata/go-prompt" ) var ( additionalSugKey = map[string]bool{} suggestions = []prompt.Suggest{ // A {Text: "ALTER", Description: "..."}, // B // C {Text: "CREATE", Description: "..."}, // D {Text: "DATABASE", Description: "..."}, {Text: "DATABASES", Description: "..."}, {Text: "DELETE", Description: "..."}, {Text: "DROP", Description: "..."}, {Text: "DURATION", Description: "..."}, // E // F {Text: "FROM", Description: "..."}, {Text: "FIELD", Description: "..."}, // G {Text: "GROUP", Description: "..."}, // H // I // J // K {Text: "KEYS", Description: "..."}, {Text: "KEY", Description: "..."}, // L {Text: "LIMIT", Description: "..."}, // M {Text: "MEASUREMENTS", Description: "..."}, // N {Text: "NAME", Description: "..."}, // O {Text: "ON", Description: "..."}, {Text: "OFFSET", Description: "..."}, // P {Text: "POLICIES", Description: "..."}, {Text: "POLICY", Description: "..."}, // Q // R {Text: "RETENTION", Description: "..."}, {Text: "REPLICATION", Description: "..."}, // S {Text: "SHOW", Description: "..."}, {Text: "SELECT", Description: "..."}, {Text: "SHARD", Description: "..."}, {Text: "SERIES", Description: "..."}, // T {Text: "TAG", Description: "..."}, // U // V {Text: "VALUES", Description: "..."}, // W {Text: "WHERE", Description: "..."}, {Text: "WITH", Description: "..."}, // X // Y // Z // self key words {Text: "ENABLE_NIL", Description: "..."}, {Text: "DISABLE_NIL", Description: "..."}, {Text: "RESET_SUG", Description: "..."}, // remove suggestions {Text: "USE", Description: "..."}, // switch databases // additional suggestions } ) func AddSug(key string) { if ok, _ := additionalSugKey[key]; !ok { suggestions = append(suggestions, prompt.Suggest{ Text: key, Description: "---", }) additionalSugKey[key] = true } } // remove additional suggestions func ResetSug() { sug := []prompt.Suggest{} for _, s := range suggestions { if ok, _ := additionalSugKey[s.Text]; !ok { sug = append(sug, s) } } additionalSugKey = map[string]bool{} suggestions = suggestions[:] suggestions = sug } func SugCompleter(d prompt.Document) []prompt.Suggest { w := d.GetWordBeforeCursor() if w == "" { return []prompt.Suggest{} } return prompt.FilterHasPrefix(suggestions, w, true) }
package gemini import ( "net/http" "reflect" "testing" "time" ) func TestNewClient(t *testing.T) { tests := []struct { name string want *Client }{ { name: "valid", want: &Client{ BaseURL: "https://api.gemini.com", HTTPClient: &http.Client{ Timeout: 3 * time.Second, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := NewClient(); !reflect.DeepEqual(got, tt.want) { t.Errorf("NewClient() = %v, want %v", got, tt.want) } }) } }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tpcc import ( "bytes" gosql "database/sql" "fmt" "strings" "github.com/cockroachdb/errors" "golang.org/x/exp/rand" ) type partitionStrategy int const ( // The partitionedReplication strategy constrains replication for a given // partition to within a single zone. It does so by requiring that all // replicas of each range in a partition are stored in the same zone. // // Example of 9 warehouses partitioned over 3 zones: // partitions = [0,1,2], [3,4,5], [6,7,8] // w = warehouse # // L = leaseholder // // us-east1-b: // n1 = [w0(L), w1, w2 ] // n2 = [w0, w1(L), w2 ] // n3 = [w0, w1, w2(L)] // // us-west1-b: // n4 = [w3(L), w4, w5 ] // n5 = [w3, w4, w5 ] // n6 = [w3, w4(L), w5(L)] // // europe-west2-b: // n7 = [w6, w7, w8(L)] // n8 = [w6, w7(L), w8 ] // n9 = [w6(L), w7, w8 ] // // NOTE: the lease for a range is randomly scattered within the zone // that contains all replicas of the range. // partitionedReplication partitionStrategy = iota // The partitionedLeases strategy collocates read leases for a given // partition to within a single zone. It does so by configuring lease // preferences on each range in a partition to prefer the same zone. // Unlike the partitioned replication strategy, it does not prevent // cross-zone replication. // // Example of 9 warehouses partitioned over 3 zones: // partitions = [0,1,2], [3,4,5], [6,7,8] // w = warehouse # // L = leaseholder // // us-east1-b: // n1 = [w0(L), w3, w6] // n2 = [w1(L), w4, w7] // n3 = [w2(L), w5, w8] // // us-west1-b: // n4 = [w0, w1, w2 ] // n5 = [w3(L), w4(L), w5(L)] // n6 = [w6, w7, w8 ] // // europe-west2-b: // n7 = [w2, w5, w8(L)] // n8 = [w1, w4, w7(L)] // n9 = [w0, w3, w6(L)] // // NOTE: a copy of each range is randomly scattered within each zone. // partitionedLeases ) // Part of pflag's Value interface. func (ps partitionStrategy) String() string { switch ps { case partitionedReplication: return "replication" case partitionedLeases: return "leases" } panic("unexpected") } // Part of pflag's Value interface. func (ps *partitionStrategy) Set(value string) error { switch value { case "replication": *ps = partitionedReplication return nil case "leases": *ps = partitionedLeases return nil } return errors.Errorf("unknown partition strategy %q", value) } // Part of pflag's Value interface. func (ps partitionStrategy) Type() string { return "partitionStrategy" } type zoneConfig struct { zones []string strategy partitionStrategy } // partitioner encapsulates all logic related to partitioning discrete numbers // of warehouses into disjoint sets of roughly equal sizes. Partitions are then // evenly assigned "active" warehouses, which allows for an even split of live // warehouses across partitions without the need to repartition when the active // count is changed. type partitioner struct { total int // e.g. the total number of warehouses active int // e.g. the active number of warehouses parts int // the number of partitions to break `total` into partBounds []int // the boundary points between partitions partElems [][]int // the elements active in each partition partElemsMap map[int]int // mapping from element to partition index totalElems []int // all active elements } func makePartitioner(total, active, parts int) (*partitioner, error) { if total <= 0 { return nil, errors.Errorf("total must be positive; %d", total) } if active <= 0 { return nil, errors.Errorf("active must be positive; %d", active) } if parts <= 0 { return nil, errors.Errorf("parts must be positive; %d", parts) } if active > total { return nil, errors.Errorf("active > total; %d > %d", active, total) } if parts > total { return nil, errors.Errorf("parts > total; %d > %d", parts, total) } // Partition boundary points. // // bounds contains the boundary points between partitions, where each point // in the slice corresponds to the exclusive end element of one partition // and and the inclusive start element of the next. // // total = 20 // parts = 3 // bounds = [0, 6, 13, 20] // bounds := make([]int, parts+1) for i := range bounds { bounds[i] = (i * total) / parts } // Partition sizes. // // sizes contains the number of elements that are active in each partition. // // active = 10 // parts = 3 // sizes = [3, 3, 4] // sizes := make([]int, parts) for i := range sizes { s := (i * active) / parts e := ((i + 1) * active) / parts sizes[i] = e - s } // Partitions. // // partElems enumerates the active elements in each partition. // // total = 20 // active = 10 // parts = 3 // partElems = [[0, 1, 2], [6, 7, 8], [13, 14, 15, 16]] // partElems := make([][]int, parts) for i := range partElems { partAct := make([]int, sizes[i]) for j := range partAct { partAct[j] = bounds[i] + j } partElems[i] = partAct } // Partition reverse mapping. // // partElemsMap maps each active element to its partition index. // // total = 20 // active = 10 // parts = 3 // partElemsMap = {0:0, 1:0, 2:0, 6:1, 7:1, 8:1, 13:2, 14:2, 15:2, 16:2} // partElemsMap := make(map[int]int) for p, elems := range partElems { for _, elem := range elems { partElemsMap[elem] = p } } // Total elements. // // totalElems aggregates all active elements into a single slice. // // total = 20 // active = 10 // parts = 3 // totalElems = [0, 1, 2, 6, 7, 8, 13, 14, 15, 16] // var totalElems []int for _, elems := range partElems { totalElems = append(totalElems, elems...) } return &partitioner{ total: total, active: active, parts: parts, partBounds: bounds, partElems: partElems, partElemsMap: partElemsMap, totalElems: totalElems, }, nil } // randActive returns a random active element. func (p *partitioner) randActive(rng *rand.Rand) int { return p.totalElems[rng.Intn(len(p.totalElems))] } // configureZone sets up zone configs for previously created partitions. By // default it adds constraints/preferences in terms of racks, but if the zones // flag is passed into tpcc, it will set the constraints/preferences based on // the geographic zones provided. func configureZone(db *gosql.DB, cfg zoneConfig, table, partition string, partIdx int) error { var kv string if len(cfg.zones) > 0 { kv = fmt.Sprintf("zone=%s", cfg.zones[partIdx]) } else { kv = fmt.Sprintf("rack=%d", partIdx) } var opts string switch cfg.strategy { case partitionedReplication: // Place all replicas in the zone. opts = fmt.Sprintf(`constraints = '[+%s]'`, kv) case partitionedLeases: // Place one replica in the zone and give that replica lease preference. opts = fmt.Sprintf(`num_replicas = COPY FROM PARENT, constraints = '{"+%s":1}', lease_preferences = '[[+%s]]'`, kv, kv) default: panic("unexpected") } sql := fmt.Sprintf(`ALTER PARTITION %s OF TABLE %s CONFIGURE ZONE USING %s`, partition, table, opts) if _, err := db.Exec(sql); err != nil { return errors.Wrapf(err, "Couldn't exec %q", sql) } return nil } // partitionObject partitions the specified object (TABLE or INDEX) with the // provided name, given the partitioning. Callers of the function must specify // the associated table and the partition's number. func partitionObject( db *gosql.DB, cfg zoneConfig, p *partitioner, obj, name, col, table string, idx int, ) error { var buf bytes.Buffer fmt.Fprintf(&buf, "ALTER %s %s PARTITION BY RANGE (%s) (\n", obj, name, col) for i := 0; i < p.parts; i++ { fmt.Fprintf(&buf, " PARTITION p%d_%d VALUES FROM (%d) to (%d)", idx, i, p.partBounds[i], p.partBounds[i+1]) if i+1 < p.parts { buf.WriteString(",") } buf.WriteString("\n") } buf.WriteString(")\n") if _, err := db.Exec(buf.String()); err != nil { return errors.Wrapf(err, "Couldn't exec %q", buf.String()) } for i := 0; i < p.parts; i++ { if err := configureZone(db, cfg, table, fmt.Sprintf("p%d_%d", idx, i), i); err != nil { return err } } return nil } func partitionTable( db *gosql.DB, cfg zoneConfig, p *partitioner, table, col string, idx int, ) error { return partitionObject(db, cfg, p, "TABLE", table, col, table, idx) } func partitionIndex( db *gosql.DB, cfg zoneConfig, p *partitioner, table, index, col string, idx int, ) error { indexStr := fmt.Sprintf("%s@%s", table, index) if exists, err := indexExists(db, table, index); err != nil { return err } else if !exists { return errors.Errorf("could not find index %q", indexStr) } return partitionObject(db, cfg, p, "INDEX", indexStr, col, table, idx) } func partitionWarehouse(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { return partitionTable(db, cfg, wPart, "warehouse", "w_id", 0) } func partitionDistrict(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { return partitionTable(db, cfg, wPart, "district", "d_w_id", 0) } func partitionNewOrder(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { return partitionTable(db, cfg, wPart, "new_order", "no_w_id", 0) } func partitionOrder(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { if err := partitionTable(db, cfg, wPart, `"order"`, "o_w_id", 0); err != nil { return err } return partitionIndex(db, cfg, wPart, `"order"`, "order_idx", "o_w_id", 1) } func partitionOrderLine(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { return partitionTable(db, cfg, wPart, "order_line", "ol_w_id", 0) } func partitionStock(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { return partitionTable(db, cfg, wPart, "stock", "s_w_id", 0) } func partitionCustomer(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { if err := partitionTable(db, cfg, wPart, "customer", "c_w_id", 0); err != nil { return err } return partitionIndex(db, cfg, wPart, "customer", "customer_idx", "c_w_id", 1) } func partitionHistory(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { return partitionTable(db, cfg, wPart, "history", "h_w_id", 0) } // replicateColumns creates covering replicated indexes for a given table // for each of the zones provided. // // It is recommended to do this for columns that are immutable as it allows // lookups on those columns to be local within the provided zone. If there are // no zones, it assumes that each partition corresponds to a rack. func replicateColumns( db *gosql.DB, cfg zoneConfig, wPart *partitioner, name string, pkColumns []string, storedColumns []string, ) error { constraints := synthesizeConstraints(cfg, wPart) for i, constraint := range constraints { if _, err := db.Exec( fmt.Sprintf(`CREATE UNIQUE INDEX %[1]s_idx_%[2]d ON %[1]s (%[3]s) STORING (%[4]s)`, name, i, strings.Join(pkColumns, ","), strings.Join(storedColumns, ",")), ); err != nil { return err } if _, err := db.Exec(fmt.Sprintf( `ALTER INDEX %[1]s@%[1]s_idx_%[2]d CONFIGURE ZONE USING num_replicas = COPY FROM PARENT, constraints='{"%[3]s": 1}', lease_preferences='[[%[3]s]]'`, name, i, constraint)); err != nil { return err } } return nil } func replicateWarehouse(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { return replicateColumns(db, cfg, wPart, "warehouse", []string{"w_id"}, []string{"w_tax"}) } func replicateDistrict(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { return replicateColumns(db, cfg, wPart, "district", []string{"d_w_id", "d_id"}, []string{"d_name", "d_street_1", "d_street_2", "d_city", "d_state", "d_zip"}) } func replicateItem(db *gosql.DB, cfg zoneConfig, wPart *partitioner) error { return replicateColumns(db, cfg, wPart, "item", []string{"i_id"}, []string{"i_im_id", "i_name", "i_price", "i_data"}) } func synthesizeConstraints(cfg zoneConfig, wPart *partitioner) []string { var constraints []string if len(cfg.zones) > 0 { for _, zone := range cfg.zones { constraints = append(constraints, "+zone="+zone) } } else { // Assume we have parts number of racks which are zero indexed. for i := 0; i < wPart.parts; i++ { constraints = append(constraints, fmt.Sprintf("+rack=%d", i)) } } return constraints } func partitionTables( db *gosql.DB, cfg zoneConfig, wPart *partitioner, replicateStaticColumns bool, ) error { if err := partitionWarehouse(db, cfg, wPart); err != nil { return err } if err := partitionDistrict(db, cfg, wPart); err != nil { return err } if err := partitionNewOrder(db, cfg, wPart); err != nil { return err } if err := partitionOrder(db, cfg, wPart); err != nil { return err } if err := partitionOrderLine(db, cfg, wPart); err != nil { return err } if err := partitionStock(db, cfg, wPart); err != nil { return err } if err := partitionCustomer(db, cfg, wPart); err != nil { return err } if err := partitionHistory(db, cfg, wPart); err != nil { return err } if replicateStaticColumns { if err := replicateDistrict(db, cfg, wPart); err != nil { return err } if err := replicateWarehouse(db, cfg, wPart); err != nil { return err } } return replicateItem(db, cfg, wPart) } func partitionCount(db *gosql.DB) (int, error) { var count int if err := db.QueryRow(` SELECT count(*) FROM crdb_internal.tables t JOIN crdb_internal.partitions p USING (table_id) WHERE t.name = 'warehouse' AND p.name ~ 'p0_\d+' `).Scan(&count); err != nil { return 0, err } return count, nil } func indexExists(db *gosql.DB, table, index string) (bool, error) { // Strip any quotes around the table name. table = strings.ReplaceAll(table, `"`, ``) var exists bool if err := db.QueryRow(` SELECT count(*) > 0 FROM information_schema.statistics WHERE table_name = $1 AND index_name = $2 `, table, index).Scan(&exists); err != nil { return false, err } return exists, nil }
package stmt import ( "reflect" "github.com/junhwong/goost/apm" "github.com/junhwong/goost/runtime" ) type ParamterFilter func(string, interface{}) (interface{}, error) type structedParams struct { names map[string]int val reflect.Value filters []ParamterFilter } var ( newParameterInvalidErr, ParameterInvalidErr = apm.NewErrorf("sqlx_param_invalid", 500, "Parameter is missing or invalid") ) func NewStructedParams(obj interface{}, names map[string]int, filters ...ParamterFilter) (*structedParams, error) { v := reflect.ValueOf(obj) if v.Kind() == reflect.Ptr { v = v.Elem() } if v.Kind() != reflect.Struct { return nil, newParameterInvalidErr("Must be a struct instace") } return &structedParams{val: v, names: names, filters: filters}, nil } func (params *structedParams) Get(key string) (val interface{}, err error) { index, ok := params.names[key] if !ok { err = newParameterInvalidErr("%q undefined", key) return } defer runtime.HandleCrash(func(ex error) { err = ex }) val = params.val.Field(index).Interface() for _, filter := range params.filters { if filter != nil { val, err = filter(key, val) if err != nil { return } } } return }
package main import "fmt" func main() { s := "babad" fmt.Println(longestPalindrome(s)) } func longestPalindrome(s string) string { len := len(s) if len <= 1 { return s } // 回文起始位置 start := 0 // 回文串最大长度 max := 1 // 动态规划二维数组 dp := [][]bool{} for i := 0; i < len; i++ { dp = append(dp, make([]bool, len)) } // 初始化 for i := 0; i < len; i++ { // 单个字符肯定是回文 dp[i][i] = true // 回文子串长度为2时判断是否回文 if i < len-1 && s[i] == s[i+1] { dp[i][i+1] = true max = 2 start = i } } // l表示回文子串的长度 for l := 3; l <= len; l++ { for i := 0; i+l-1 < len; i++ { // 回文终止位置 j := i + l - 1 // 状态转移 if s[i] == s[j] && dp[i+1][j-1] { dp[i][j] = true start = i max = l } } } return s[start : start+max] }
/** * Copyright (c) 2018-present, MultiVAC Foundation. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ package sync import ( "github.com/multivactech/MultiVAC/configs/config" "github.com/multivactech/MultiVAC/model/shard" "github.com/multivactech/MultiVAC/p2p/connection" ) const ( maxSyncCandidates = 10 ) // A simple sync peer management class per worker. // NOTE(huangsz): Better to be handled in a centralized place when p2p refactor is done. type syncPeerManager struct { shardIdx shard.Index curSyncPeer *syncPeer candidates map[*connection.ConnPeer]*syncPeer } type syncPeer struct { cp *connection.ConnPeer nodeType config.NodeType } func (p *syncPeer) equals(a *syncPeer) bool { if a == nil { return p == a } return p.cp == a.cp && p.nodeType == a.nodeType } func newSyncPeerManager(shardIdx shard.Index) *syncPeerManager { return &syncPeerManager{shardIdx: shardIdx, candidates: make(map[*connection.ConnPeer]*syncPeer, maxSyncCandidates)} } func (mgr *syncPeerManager) addSyncPeerCandidate(cp *connection.ConnPeer, t config.NodeType) { mgr.candidates[cp] = &syncPeer{cp: cp, nodeType: t} } // Returns one peer for sync. // Will choose one when available if there isn't one yet. // Returns nil if there is no sync peer. func (mgr *syncPeerManager) getSyncPeer() *syncPeer { if mgr.curSyncPeer == nil { for _, p := range mgr.candidates { shards := p.cp.GetShards() for _, shardID := range shards { if mgr.shardIdx == shardID { mgr.curSyncPeer = p break } } } } return mgr.curSyncPeer } // Returns whether or not the removed peer is the current sync peer. func (mgr *syncPeerManager) removePeerCandidate(cp *connection.ConnPeer, t config.NodeType) bool { delete(mgr.candidates, cp) m := &syncPeer{cp: cp, nodeType: t} if mgr.isCurSyncPeer(m) { mgr.curSyncPeer = nil return true } return false } func (mgr *syncPeerManager) isCurSyncPeer(sp *syncPeer) bool { return mgr.curSyncPeer != nil && mgr.curSyncPeer.cp == sp.cp && mgr.curSyncPeer.nodeType == sp.nodeType }
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // package main import "github.com/spf13/cobra" func newCmdInstallationOperation() *cobra.Command { cmd := &cobra.Command{ Use: "operation", Short: "Manipulate installation operations managed by the provisioning server.", } cmd.AddCommand(newCmdInstallationRestorationOperation()) cmd.AddCommand(newCmdInstallationDBMigrationOperation()) return cmd }
package user import "time" type User struct { Username string `json:"username" pg:",use_zero"` DiscordId string `json:"discord_id" pg:",pk,use_zero"` Birthday time.Time `json:"birthday"` Anilist string `json:"anilist"` Waifu string `json:"waifu"` Admin bool `json:"admin" pg:",use_zero"` } // UserStore defines database operations for a user. type UserStore interface { Get(userId string) (*User, error) GetAll() ([]*User, error) Create(user User) error Update(user User) error Delete(userId string) error }
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // package utility import ( "strings" "github.com/mattermost/mattermost-cloud/model" "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) type cloudprober struct { cluster *model.Cluster kubeconfigPath string logger log.FieldLogger actualVersion *model.HelmUtilityVersion desiredVersion *model.HelmUtilityVersion } func newCloudproberOrUnmanagedHandle(cluster *model.Cluster, kubeconfigPath string, logger log.FieldLogger) (Utility, error) { desired := cluster.DesiredUtilityVersion(model.CloudproberCanonicalName) actual := cluster.ActualUtilityVersion(model.CloudproberCanonicalName) if model.UtilityIsUnmanaged(desired, actual) { return newUnmanagedHandle(model.CloudproberCanonicalName, logger), nil } cloudprober := newCloudproberHandle(desired, cluster, kubeconfigPath, logger) err := cloudprober.validate() if err != nil { return nil, errors.Wrap(err, "cloudprober utility config is invalid") } return cloudprober, nil } func newCloudproberHandle(desiredVersion *model.HelmUtilityVersion, cluster *model.Cluster, kubeconfigPath string, logger log.FieldLogger) *cloudprober { return &cloudprober{ cluster: cluster, logger: logger.WithField("cluster-utility", model.CloudproberCanonicalName), kubeconfigPath: kubeconfigPath, desiredVersion: desiredVersion, actualVersion: cluster.UtilityMetadata.ActualVersions.Cloudprober, } } func (c *cloudprober) validate() error { if c.kubeconfigPath == "" { return errors.New("kubeconfig path cannot be empty") } if c.cluster == nil { return errors.New("cluster cannot be nil") } return nil } func (c *cloudprober) CreateOrUpgrade() error { logger := c.logger.WithField("cloudprober-action", "upgrade") h := c.newHelmDeployment(logger) err := h.Update() if err != nil { return err } err = c.updateVersion(h) return err } func (c *cloudprober) Name() string { return model.CloudproberCanonicalName } func (c *cloudprober) Destroy() error { helm := c.newHelmDeployment(c.logger) return helm.Delete() } func (c *cloudprober) Migrate() error { return nil } func (c *cloudprober) DesiredVersion() *model.HelmUtilityVersion { return c.desiredVersion } func (c *cloudprober) ActualVersion() *model.HelmUtilityVersion { if c.actualVersion == nil { return nil } return &model.HelmUtilityVersion{ Chart: strings.TrimPrefix(c.actualVersion.Version(), "cloudprober-"), ValuesPath: c.actualVersion.Values(), } } func (c *cloudprober) newHelmDeployment(logger log.FieldLogger) *helmDeployment { return newHelmDeployment( "chartmuseum/cloudprober", "cloudprober", "cloudprober", c.kubeconfigPath, c.desiredVersion, defaultHelmDeploymentSetArgument, logger, ) } func (c *cloudprober) ValuesPath() string { if c.desiredVersion == nil { return "" } return c.desiredVersion.Values() } func (c *cloudprober) updateVersion(h *helmDeployment) error { actualVersion, err := h.Version() if err != nil { return err } c.actualVersion = actualVersion return nil }
package candyjs import ( "fmt" "reflect" "testing" "time" . "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { TestingT(t) } type CandySuite struct { ctx *Context stored interface{} } var _ = Suite(&CandySuite{}) func (s *CandySuite) SetUpTest(c *C) { s.ctx = NewContext() s.stored = nil s.ctx.PushGlobalGoFunction("store", func(value interface{}) { s.stored = value }) } func (s *CandySuite) TestPushGlobalCandyJSObject(c *C) { c.Assert(s.ctx.PevalString(`store(CandyJS._functions.toString())`), IsNil) c.Assert(s.stored, Equals, "[object Object]") c.Assert(s.ctx.PevalString(`store(CandyJS._call.toString())`), IsNil) c.Assert(s.stored, Equals, "function () { [ecmascript code] }") c.Assert(s.ctx.PevalString(`store(CandyJS.proxy.toString())`), IsNil) c.Assert(s.stored, Equals, "function () { [ecmascript code] }") c.Assert(s.ctx.PevalString(`store(CandyJS.require.toString())`), IsNil) c.Assert(s.stored, Equals, "function () { [native code] }") } func (s *CandySuite) TestPushGlobalCandyJSObject_Require(c *C) { fn := func(ctx *Context) { ctx.PushString("qux") } RegisterPackagePusher("foo", fn) c.Assert(s.ctx.PevalString(`store(CandyJS.require("foo"))`), IsNil) c.Assert(s.stored, Equals, "qux") } func (s *CandySuite) TestSetRequireFunction(c *C) { s.ctx.SetRequireFunction(func(id string, a ...interface{}) string { return fmt.Sprintf(`exports.store = function () { store("%s"); };`, id) }) c.Assert(s.ctx.PevalString("require('foo').store()"), IsNil) c.Assert(s.stored, Equals, "foo") } func (s *CandySuite) TestPushType(c *C) { s.ctx.PushGlobalObject() s.ctx.PushObject() s.ctx.PushType(MyStruct{}) s.ctx.PutPropString(-2, "MyStruct") s.ctx.PutPropString(-2, "foo") s.ctx.Pop() c.Assert(s.ctx.PevalString(` obj = new foo.MyStruct() obj.int = 42 store(obj) `), IsNil) c.Assert(s.stored.(*MyStruct).Int, Equals, 42) } func (s *CandySuite) TestGlobalPushType(c *C) { s.ctx.PushGlobalType("MyStruct", MyStruct{}) c.Assert(s.ctx.PevalString(` obj = new MyStruct() obj.int = 42 store(obj) `), IsNil) c.Assert(s.stored.(*MyStruct).Int, Equals, 42) } func (s *CandySuite) TestPushProxy(c *C) { s.ctx.PushGlobalObject() s.ctx.PushObject() s.ctx.PushProxy(&MyStruct{Int: 142}) s.ctx.PutPropString(-2, "obj") s.ctx.PutPropString(-2, "foo") s.ctx.Pop() err := s.ctx.PevalString(`store(foo.obj.int)`) c.Assert(err, IsNil) c.Assert(s.stored, Equals, 142.0) } func (s *CandySuite) TestPushGlobalProxy_GetMap(c *C) { s.ctx.PushGlobalProxy("test", &map[string]int{"foo": 42}) s.ctx.PevalString(`store(test.foo)`) c.Assert(s.stored, Equals, 42.0) } func (s *CandySuite) TestPushGlobalProxy_GetPtr(c *C) { s.ctx.PushGlobalProxy("test", &MyStruct{Int: 42}) s.ctx.PevalString(`store(test.int)`) c.Assert(s.stored, Equals, 42.0) s.ctx.PevalString(`try { x = test.baz; } catch(err) { store(true); }`) c.Assert(s.stored, Equals, true) } func (s *CandySuite) TestPushGlobalProxy_Set(c *C) { s.ctx.PushGlobalProxy("test", &MyStruct{Int: 42}) s.ctx.PevalString(`test.int = 21; store(test.int)`) c.Assert(s.stored, Equals, 21.0) s.ctx.PevalString(`try { test.baz = 21; } catch(err) { store(true); }`) c.Assert(s.stored, Equals, true) } func (s *CandySuite) TestPushGlobalProxy_Has(c *C) { s.ctx.PushGlobalProxy("test", &MyStruct{}) s.ctx.PevalString(`store("int" in test)`) c.Assert(s.stored, Equals, true) s.ctx.PevalString(`store("qux" in test)`) c.Assert(s.stored, Equals, false) } func (s *CandySuite) TestPushGlobalProxy_Nested(c *C) { s.ctx.PushGlobalProxy("test", &MyStruct{ Int: 42, Float64: 21.0, Nested: &MyStruct{Int: 21}, }) c.Assert(s.ctx.PevalString(`store([ test.int, test.multiply(2), test.nested.int, test.nested.multiply(3) ])`), IsNil) c.Assert(s.stored, DeepEquals, []interface{}{42.0, 84.0, 21.0, 63.0}) } func (s *CandySuite) TestPushGlobalProxy_Integration(c *C) { now := time.Now() after := now.Add(time.Millisecond) s.ctx.PushGlobalProxy("a", now) s.ctx.PushGlobalProxy("b", after) s.ctx.PevalString(`store(b.sub(a))`) c.Assert(s.stored, Equals, 1000000.0) } func (s *CandySuite) TestPushGlobalInterface(c *C) { s.ctx.PushGlobalInterface("int", 42) c.Assert(s.ctx.PevalString(`store(int)`), IsNil) c.Assert(s.stored, Equals, 42.0) } func (s *CandySuite) TestPushGlobalStruct(c *C) { s.ctx.PushGlobalStruct("test", &MyStruct{ Int: 42, Float64: 21.0, Nested: &MyStruct{Int: 21}, }) c.Assert(s.ctx.PevalString(`store([ test.int, test.multiply(2), test.nested.int, test.nested.multiply(3) ])`), IsNil) c.Assert(s.stored, DeepEquals, []interface{}{42.0, 84.0, 21.0, 63.0}) } func (s *CandySuite) TestPushGlobalValueInt(c *C) { s.ctx.pushGlobalValue("test", reflect.ValueOf(42)) c.Assert(s.ctx.PevalString(`store(test)`), IsNil) c.Assert(s.stored, Equals, 42.0) } func (s *CandySuite) TestPushGlobalValueUint(c *C) { s.ctx.pushGlobalValue("test", reflect.ValueOf(uint(42))) c.Assert(s.ctx.PevalString(`store(test)`), IsNil) c.Assert(s.stored, Equals, 42.0) } func (s *CandySuite) TestPushGlobalValueFloat(c *C) { s.ctx.pushGlobalValue("test", reflect.ValueOf(42.2)) c.Assert(s.ctx.PevalString(`store(test)`), IsNil) c.Assert(s.stored, Equals, 42.2) } func (s *CandySuite) TestPushGlobalValueString(c *C) { s.ctx.pushGlobalValue("test", reflect.ValueOf("foo")) c.Assert(s.ctx.PevalString(`store(test)`), IsNil) c.Assert(s.stored, Equals, "foo") } func (s *CandySuite) TestPushGlobalValueStruct(c *C) { s.ctx.pushGlobalValue("test", reflect.ValueOf(MyStruct{Int: 42})) c.Assert(s.ctx.PevalString(`store(test.int)`), IsNil) c.Assert(s.stored, Equals, 42.0) } func (s *CandySuite) TestPushGlobalValueStructPtr(c *C) { s.ctx.pushGlobalValue("test", reflect.ValueOf(&MyStruct{Int: 42})) c.Assert(s.ctx.PevalString(`store(test.int)`), IsNil) c.Assert(s.stored, Equals, 42.0) } func (s *CandySuite) TestPushGlobalValueNil(c *C) { s.ctx.pushGlobalValue("test", reflect.ValueOf(nil)) c.Assert(s.ctx.PevalString(`store(test)`), IsNil) c.Assert(s.stored, Equals, nil) } func (s *CandySuite) TestPushGlobalValueDefault(c *C) { s.ctx.pushGlobalValue("test", reflect.ValueOf([]string{"foo", "bar"})) c.Assert(s.ctx.PevalString(`store(test)`), IsNil) c.Assert(s.stored, DeepEquals, []interface{}{"foo", "bar"}) } func (s *CandySuite) TestPushGlobalValueStringPtr(c *C) { foo := "foo" s.ctx.pushGlobalValue("test", reflect.ValueOf(&foo)) c.Assert(s.ctx.PevalString(`store(test)`), IsNil) c.Assert(s.stored, Equals, "foo") } func (s *CandySuite) PendingTestPushGlobalValueWithMethods(c *C) { s.ctx.pushGlobalValue("test", reflect.ValueOf(time.Duration(1e5))) c.Assert(s.ctx.PevalString(`store(test.string())`), IsNil) c.Assert(s.stored, Equals, 42.0) } func (s *CandySuite) TestPushGlobalValues(c *C) { s.ctx.pushGlobalValues("test", []reflect.Value{ reflect.ValueOf("foo"), reflect.ValueOf("qux"), }) c.Assert(s.ctx.PevalString(`store(test)`), IsNil) c.Assert(s.stored, DeepEquals, []interface{}{"foo", "qux"}) } func (s *CandySuite) TestPushGlobalGoFunction_String(c *C) { var called interface{} s.ctx.PushGlobalGoFunction("test_in_string", func(s string) { called = s }) s.ctx.EvalString("test_in_string('foo')") c.Assert(called, Equals, "foo") } func (s *CandySuite) TestPushGlobalGoFunction_Int(c *C) { var ri, ri8, ri16, ri32, ri64 interface{} s.ctx.PushGlobalGoFunction("test_in_int", func(i int, i8 int8, i16 int16, i32 int32, i64 int64) { ri = i ri8 = i8 ri16 = i16 ri32 = i32 ri64 = i64 }) s.ctx.EvalString("test_in_int(42, 8, 16, 32, 64)") c.Assert(ri, Equals, 42) c.Assert(ri8, Equals, int8(8)) c.Assert(ri16, Equals, int16(16)) c.Assert(ri32, Equals, int32(32)) c.Assert(ri64, Equals, int64(64)) } func (s *CandySuite) TestPushGlobalGoFunction_Uint(c *C) { var ri, ri8, ri16, ri32, ri64 interface{} s.ctx.PushGlobalGoFunction("test_in_uint", func(i uint, i8 uint8, i16 uint16, i32 uint32, i64 uint64) { ri = i ri8 = i8 ri16 = i16 ri32 = i32 ri64 = i64 }) s.ctx.EvalString("test_in_uint(42, 8, 16, 32, 64)") c.Assert(ri, Equals, uint(42)) c.Assert(ri8, Equals, uint8(8)) c.Assert(ri16, Equals, uint16(16)) c.Assert(ri32, Equals, uint32(32)) c.Assert(ri64, Equals, uint64(64)) } func (s *CandySuite) TestPushGlobalGoFunction_Float(c *C) { var called64 interface{} var called32 interface{} s.ctx.PushGlobalGoFunction("test_in_float", func(f64 float64, f32 float32) { called64 = f64 called32 = f32 }) s.ctx.EvalString("test_in_float(42, 42)") c.Assert(called64, Equals, 42.0) c.Assert(called32, Equals, float32(42.0)) } func (s *CandySuite) TestPushGlobalGoFunction_Bool(c *C) { var called interface{} s.ctx.PushGlobalGoFunction("test_in_bool", func(b bool) { called = b }) s.ctx.EvalString("test_in_bool(true)") c.Assert(called, Equals, true) } func (s *CandySuite) TestPushGlobalGoFunction_Interface(c *C) { var called interface{} s.ctx.PushGlobalGoFunction("test", func(i interface{}) { called = i }) s.ctx.EvalString("test('qux')") c.Assert(called, Equals, "qux") } func (s *CandySuite) TestPushGlobalGoFunction_Struct(c *C) { var called *MyStruct s.ctx.PushGlobalGoFunction("test", func(m *MyStruct) { called = m }) s.ctx.EvalString("test({'int':42})") c.Assert(called.Int, Equals, 42) } func (s *CandySuite) TestPushGlobalGoFunction_Slice(c *C) { var called interface{} s.ctx.PushGlobalGoFunction("test_in_slice", func(s []interface{}) { called = s }) s.ctx.EvalString("test_in_slice(['foo', 42])") c.Assert(called, DeepEquals, []interface{}{"foo", 42.0}) } func (s *CandySuite) TestPushGlobalGoFunction_Map(c *C) { var called interface{} s.ctx.PushGlobalGoFunction("test_in_map", func(s map[string]interface{}) { called = s }) s.ctx.EvalString("test_in_map({foo: 42, qux: {bar: 'bar'}})") c.Assert(called, DeepEquals, map[string]interface{}{ "foo": 42.0, "qux": map[string]interface{}{"bar": "bar"}, }) } func (s *CandySuite) TestPushGlobalGoFunction_Nil(c *C) { var cm, cs, ci, cst interface{} s.ctx.PushGlobalGoFunction("test_nil", func(m map[string]interface{}, s []interface{}, i int, st string) { cm = m cs = s ci = i cst = st }) s.ctx.EvalString("test_nil(null, null, null, null)") c.Assert(cm, DeepEquals, map[string]interface{}(nil)) c.Assert(cs, DeepEquals, []interface{}(nil)) c.Assert(ci, DeepEquals, 0) c.Assert(cst, DeepEquals, "") } func (s *CandySuite) TestPushGlobalGoFunction_Date(c *C) { var called interface{} s.ctx.PushGlobalGoFunction("test_in_date", func(d time.Time) { called = d }) s.ctx.EvalString("test_in_date(new Date(Date.UTC(1999,9,19)))") c.Assert(called, Equals, time.Date(1999, 10, 19, 0, 0, 0, 0, time.UTC)) } func (s *CandySuite) TestPushGlobalGoFunction_Optional(c *C) { var cm, cs, ci, cst interface{} s.ctx.PushGlobalGoFunction("test_optional", func(m map[string]interface{}, s []interface{}, i int, st string) { cm = m cs = s ci = i cst = st }) s.ctx.EvalString("test_optional()") c.Assert(cm, DeepEquals, map[string]interface{}(nil)) c.Assert(cs, DeepEquals, []interface{}(nil)) c.Assert(ci, DeepEquals, 0) c.Assert(cst, DeepEquals, "") } func (s *CandySuite) TestPushGlobalGoFunction_Variadic(c *C) { var calledA interface{} var calledB interface{} s.ctx.PushGlobalGoFunction("test_in_variadic", func(s string, is ...int) { calledA = s calledB = is }) s.ctx.EvalString("test_in_variadic('foo', 21, 42)") c.Assert(calledA, DeepEquals, "foo") c.Assert(calledB, DeepEquals, []int{21, 42}) } func (s *CandySuite) TestPushGlobalGoFunction_EmptyVariadic(c *C) { var calledA interface{} var calledB interface{} s.ctx.PushGlobalGoFunction("test_in_variadic", func(s string, is ...int) { calledA = s calledB = is }) s.ctx.EvalString("test_in_variadic('foo')") c.Assert(calledA, DeepEquals, "foo") c.Assert(calledB, DeepEquals, []int{}) } func (s *CandySuite) TestPushGlobalGoFunction_ReturnMultiple(c *C) { s.ctx.PushGlobalGoFunction("test", func() (int, int, error) { return 2, 4, nil }) c.Assert(s.ctx.PevalString("store(test())"), IsNil) c.Assert(s.stored, HasLen, 2) c.Assert(s.stored.([]interface{})[0], Equals, 2.0) c.Assert(s.stored.([]interface{})[1], Equals, 4.0) } func (s *CandySuite) TestPushGlobalGoFunction_ReturnStruct(c *C) { s.ctx.PushGlobalGoFunction("test", func() *MyStruct { return &MyStruct{Int: 42} }) c.Assert(s.ctx.PevalString("store(test().multiply(3))"), IsNil) c.Assert(s.stored, Equals, 126.0) } func (s *CandySuite) TestPushGlobalGoFunction_Function(c *C) { s.ctx.PushGlobalGoFunction("test", func(fn func(int, int) int) { s.stored = fn }) c.Assert(s.ctx.PevalString(` test(CandyJS.proxy(function(a, b) { return a * b; })); `), IsNil) c.Assert(s.stored.(func(int, int) int)(10, 5), Equals, 50) } func (s *CandySuite) TestPushGlobalGoFunction_FunctionMultiple(c *C) { s.ctx.PushGlobalGoFunction("test", func(fn func(int, int) (int, int)) { s.stored = fn }) c.Assert(s.ctx.PevalString(` test(CandyJS.proxy(function(a, b) { return [b, a]; })); `), IsNil) a, b := s.stored.(func(int, int) (int, int))(10, 5) c.Assert(a, Equals, 5) c.Assert(b, Equals, 10) } func (s *CandySuite) TestPushGlobalGoFunction_Error(c *C) { s.ctx.PushGlobalGoFunction("test", func() (string, error) { return "foo", fmt.Errorf("foo") }) c.Assert(s.ctx.PevalString(` try { test(); } catch(err) { store(true); } `), IsNil) c.Assert(s.stored, Equals, true) } func (s *CandySuite) TestJsonEncode(c *C) { ms := &MyStruct{Date: time.Date(1984, 12, 24, 1, 2, 3, 456*int(time.Millisecond), time.UTC), Int: 142, Float64: 3.141596254} s.ctx.PushGlobalProxy("test", ms) cases := []string{ // serialize proxy directly `test`, // serialize object containing nested proxies `({ date: test.date, int: test.int, float64: test.float64 })`, // serialize proxy directly. `(function() { test.date = new Date(Date.UTC(1984,11,24,1,2,3,456)) return test })()`} for _, cs := range cases { c.Assert(s.ctx.PevalString(cs), IsNil) js := s.ctx.JsonEncode(-1) s.ctx.Pop() c.Assert(js, Matches, ".*\"date\":\"1984-12-24T01:02:03.456Z\".*") c.Assert(js, Matches, ".*\"int\":142.*") c.Assert(js, Matches, ".*\"float64\":3.141596254.*") } } func (s *CandySuite) TestCustomProxy(c *C) { customProxy := &myCustomProxy{values: map[string]interface{}{ "name": "John", "shoeSize": 40, "dob": time.Date(1980, 7, 31, 1, 2, 3, 456*int(time.Millisecond), time.UTC), }} s.ctx.PushGlobalProxy("customProxy", customProxy) c.Assert(s.ctx.PevalString(` customProxy.name += " Doe" customProxy.shoeSize += 2.5 var d = customProxy.dob; d.setFullYear(1984) customProxy.dob = d `), IsNil) c.Assert(customProxy.calls, DeepEquals, []string{ "get(name)", "set(name,John Doe)", "get(shoeSize)", "set(shoeSize,42.5)", "get(dob)", "set(dob,1984-07-31T01:02:03.456Z)"}) c.Assert(customProxy.values["name"], Equals, "John Doe") c.Assert(customProxy.values["shoeSize"], Equals, 42.5) c.Assert(customProxy.values["dob"], Equals, "1984-07-31T01:02:03.456Z") } type MyTimeStruct struct { MyTime MyTime `json:"myTime"` } type MyTime time.Time func (t MyTime) MarshalJSON() ([]byte, error) { return time.Time(t).MarshalJSON() } func (t *MyTime) UnmarshalJSON(data []byte) error { tt := time.Time{} if err := tt.UnmarshalJSON(data); err != nil { return err } *t = MyTime(tt) return nil } func (s *CandySuite) TestGlobalStructTimeAlias(c *C) { m := &MyTimeStruct{ MyTime: MyTime(time.Date(1984, 12, 24, 1, 2, 3, 456*int(time.Millisecond), time.UTC)), } s.ctx.PushGlobalProxy("struct", m) // time alias can be used like a normal date in js err := s.ctx.PevalString(`store(struct.myTime.getFullYear())`) c.Assert(err, IsNil) c.Assert(s.stored, Equals, 1984.0) // setting a new date directly on the struct works // note: setting it on the struct requires UnmarshalJSON on the type alias err = s.ctx.PevalString(`struct.myTime = new Date("2014-10-30T09:03:34.141Z")`) c.Assert(err, IsNil) c.Assert(time.Time(m.MyTime).Year(), Equals, 2014) c.Assert(time.Time(m.MyTime).Day(), Equals, 30) // updating the value in js and setting it on the struct works // note: setting it on the struct requires UnmarshalJSON on the type alias err = s.ctx.PevalString(` var d = struct.myTime; d.setFullYear(2000); struct.myTime = d; `) c.Assert(err, IsNil) c.Assert(time.Time(m.MyTime).Year(), Equals, 2000) } func (s *CandySuite) TestCallback(c *C) { s.ctx.PushGlobalGoFunction( "fnWithCallback", func(cb func(s string) string) string { return "Hello " + cb("world") }) c.Assert(s.ctx.PevalString(`store(fnWithCallback( function(s) { return s + "!"; }));`), IsNil) c.Assert(s.stored, Equals, "Hello world!") } func (s *CandySuite) TestCallbackThrows(c *C) { s.ctx.PushGlobalGoFunction( "fnWithCallbackError", func(cb func(s string) error) string { err := cb("foo") if err != nil { return err.Error() } return "" }) s.ctx.PushGlobalGoFunction( "fnWithCallbackStringError", func(cb func(s string) (string, error)) string { s, err := cb("foo") if err != nil { return err.Error() } return s }) s.ctx.PushGlobalGoFunction( "fnWithCallbackStringString", func(cb func(s string) (string, string)) string { s1, s2 := cb("foo") return s1 + " " + s2 }) s.ctx.PushGlobalGoFunction( "fnWithCallbackStringErrorMultiple", func(cb func(s string) (string, error)) string { str := "" for { var err error str, err = cb(str) if err != nil { return err.Error() } } }) c.Assert(s.ctx.PevalString(`store(fnWithCallbackError( function(s) { throw new Error(s); }));`), IsNil) c.Assert(s.stored, Equals, "Error: foo") c.Assert(s.ctx.PevalString(`store(fnWithCallbackStringError( function(s) { throw new Error(s); }));`), IsNil) c.Assert(s.stored, Equals, "Error: foo") c.Assert(s.ctx.PevalString(`store(fnWithCallbackStringString( function(s) { return ["foo", "bar"] }));`), IsNil) c.Assert(s.stored, Equals, "foo bar") // call a function multiple times before throwing an error c.Assert(s.ctx.PevalString(`store(fnWithCallbackStringErrorMultiple( function(s) { if (s === "aaa") { throw new Error(s); } return s + "a"; }));`), IsNil) c.Assert(s.stored, Equals, "Error: aaa") // call a function inside another function before throwing an error c.Assert(s.ctx.PevalString(`store(fnWithCallbackStringErrorMultiple( function(s) { if (s === "foo bar/foo bar/foo bar") { throw new Error(s); } var result = fnWithCallbackStringString(function(s) { return [s, "bar"] }); return (s ? s + "/" : "") + result; }));`), IsNil) c.Assert(s.stored, Equals, "Error: foo bar/foo bar/foo bar") } func (s *CandySuite) TearDownTest(c *C) { s.ctx.DestroyHeap() } type MyNestedStruct struct { Name string `json:"name"` } func (m *MyNestedStruct) SayHello() string { return fmt.Sprintf("Hello %s!", m.Name) } type MyStruct struct { Bool bool `json:"bool"` Int int `json:"int"` Int8 int8 `json:"int8"` Int16 int16 `json:"int16"` Int32 int32 `json:"int32"` Int64 int64 `json:"int64"` UInt uint `json:"uInt"` UInt8 uint8 `json:"uInt8"` UInt16 uint16 `json:"uInt16"` UInt32 uint32 `json:"uInt32"` UInt64 uint64 `json:"uInt64"` String string `json:"string"` Bytes []byte `json:"bytes"` Float32 float32 `json:"float32"` Float64 float64 `json:"float64"` Date time.Time `json:"date"` Empty *MyStruct `json:"empty"` Nested *MyStruct `json:"nested"` Slice []int `json:"slice"` StructSlice []MyNestedStruct `json:"structSlice"` StructMap map[string]MyNestedStruct `json:"structMap"` private int } func (m *MyStruct) Multiply(x int) int { return m.Int * x } func (m *MyStruct) privateMethod() int { return 1 } type myCustomProxy struct { values map[string]interface{} calls []string Name string `json:"name"` } func (p *myCustomProxy) Has(t interface{}, k string) bool { p.calls = append(p.calls, fmt.Sprintf("has(%s)", k)) return true } func (p *myCustomProxy) Get(t interface{}, k string, recv interface{}) (interface{}, error) { p.calls = append(p.calls, fmt.Sprintf("get(%s)", k)) return p.values[k], nil } func (p *myCustomProxy) Set(t interface{}, k string, v, recv interface{}) (bool, error) { p.calls = append(p.calls, fmt.Sprintf("set(%s,%v)", k, v)) p.values[k] = v return true, nil } func (p *myCustomProxy) Enumerate(t interface{}) (interface{}, error) { keys := []string{} for key := range p.values { keys = append(keys, key) } p.calls = append(p.calls, "enumerate()") return keys, nil } func (s *CandySuite) TestErrorFactory(c *C) { s.ctx.SetErrorFactory( func(ctx *Context, index int) error { return fmt.Errorf(">%s<", ctx.SafeToString(index)) }) var actualErr error s.ctx.PushGlobalGoFunction("test", func(cb func() error) { actualErr = cb() }) c.Assert(s.ctx.PevalString(`test(function() { throw new Error("Deliberate error"); });`), IsNil) c.Assert(actualErr.Error(), Equals, ">Error: Deliberate error<") }
package api import ( "encoding/json" "io/ioutil" "log" "net/http" "github.com/def4ultx/mv-restapi/models" ) const httpURI = "https://s3-ap-southeast-1.amazonaws.com/ysetter/media/video-search.json" // RequestVideo get video metadata from httpURI and return SearchResponse func RequestVideo() (*models.SearchResponse, error) { var body string res, err := http.Get(httpURI) if err != nil { log.Println(err) return nil, err } defer res.Body.Close() if res.StatusCode == http.StatusOK { bodyBytes, _ := ioutil.ReadAll(res.Body) body = string(bodyBytes) } // fmt.Println(body) // io.Copy(os.Stdout, res.Body) metadata := &models.SearchResponse{} err = json.Unmarshal([]byte(body), metadata) if err != nil { log.Println(err) return nil, err } return metadata, nil // video := &models.Video{} // err = json.Unmarshal([]byte(metadata.Item[0]), video) // if err != nil { // log.Println(err) // } // fmt.Println(metadata.Item[0].Snippet.Title) } // GetVideoByID find video by ETAG and return func GetVideoByID(id string) *models.Video { metadata, err := RequestVideo() if err != nil { return nil } for _, v := range metadata.Item { if v.ID.VideoID == id { video := v return &video } } return nil } // GetVideoByTitle find video by ETAG and return // func GetVideoByTitle(title string) []models.Video { // metadata, err := RequestVideo() // if err != nil { // return nil // } // var items []models.Video // for _, v := range metadata.Item { // title := strings.ToLower(v.Snippet.Title) // if strings.Contains(title, title) { // items = append(items, v) // } // } // return items // }
package main import ( "hello/handler" "hello/subscriber" "time" "github.com/micro/go-micro/v2/registry" "github.com/micro/go-micro/v2" log "github.com/micro/go-micro/v2/logger" "github.com/micro/go-micro/v2/registry/etcd" hello "hello/proto/hello" ) var etcdReg registry.Registry func init() { etcdReg = etcd.NewRegistry( registry.Addrs("127.0.0.1:2379"), registry.Timeout(10*time.Second), ) } func main() { // New Service service := micro.NewService( micro.Name("go.micro.service.hello"), micro.Version("latest"), micro.Registry(etcdReg), ) // Initialise service service.Init() // Register Handler hello.RegisterHelloHandler(service.Server(), new(handler.Hello)) // Register Struct as Subscriber micro.RegisterSubscriber("go.micro.service.hello", service.Server(), new(subscriber.Hello)) // Run service if err := service.Run(); err != nil { log.Fatal(err) } }
package main import ( "fmt" ) func main() { var nome string = "Joao" switch nome { case "Ana": fmt.Println("É a Ana") case "Joao": fmt.Println("É o João") default: fmt.Println("Não conheço") } fmt.Println(nome) }
package leaderboard import ( "reflect" "testing" ) type testCase struct { name string scores []int32 alice []int32 ans []int32 } var testCases = []testCase{ {"1", []int32{100, 100, 50, 40, 40, 20, 10}, []int32{5, 25, 50, 120}, []int32{6, 4, 2, 1}}, {"2", []int32{100, 90, 90, 80, 75, 60}, []int32{50, 65, 77, 90, 102}, []int32{6, 5, 4, 2, 1}}, } func TestClimbing(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { got := climbingLeaderboard(tc.scores, tc.alice) // test list equality if !reflect.DeepEqual(got, tc.ans) { t.Errorf("Scores were %v\n\t\t\t expected %v", got, tc.ans) } }) } }
package injector import ( "log" "strconv" "strings" corev1 "k8s.io/api/core/v1" ) const ( eventstoreEnabledKey = "eventstore/enabled" eventstorePortKey = "eventstore/port" eventstoreNames = "eventstore/names" evenstoreDefaultPort = 5600 ) func (i *injector) isEventstoreEnabled(pod *corev1.Pod) bool { val, ok := pod.Annotations[eventstoreEnabledKey] if !ok { return false } str := strings.ToLower(val) switch str { case "y", "yes", "true", "1", "on": return true default: return false } } func (i *injector) getEvenstorePort(pod *corev1.Pod) int { val, ok := pod.Annotations[eventstorePortKey] if !ok { return evenstoreDefaultPort } port, err := strconv.Atoi(val) if err != nil { log.Printf("value of port annotation '%s' can't be converted to integer, using %d default port", val, evenstoreDefaultPort) return evenstoreDefaultPort } return port } func (i *injector) getEvenstoreNames(pod *corev1.Pod) string { val, ok := pod.Annotations[eventstoreNames] if !ok { log.Println("no evenstore names specified in pod annotation") return "" } return val }
package socketClient import ( "context" "fmt" "io" "net" ) // Request will send a request to the socket and return the bytes sent back. // It will close the socket at the end. // It takes a timeout value which will be used to wait for the output of the // socket. This is a read timeout. func Request(ctx context.Context, socketFile, requestText string) ([]byte, error) { c, err := net.Dial("unix", socketFile) if err != nil { return []byte{}, err } defer c.Close() _, err = c.Write([]byte(requestText)) type readData struct { b []byte err error } readChan := make(chan *readData) go func() { buf := make([]byte, 5120) nbytes, err := c.Read(buf) // We limit the size of the requests here to 5120 bytes. Anything more is most likely // an error. if err == nil && nbytes == len(buf) { err = fmt.Errorf("Buffer at maximum capacity of %d, likely overstaturation", len(buf)) } if err == io.EOF { // We get EOF on a closed socket. This is expected behaviour. // So reset the error. err = nil } readChan <- &readData{b: buf[0:nbytes], err: err} }() select { case <-ctx.Done(): return []byte{}, fmt.Errorf("timed out waiting for respose from socket") case out := <-readChan: return out.b, out.err } }
package httpsrv import ( "context" "github.com/k81/kate" ) type HelloHandler struct { BaseHandler } func (h *HelloHandler) ServeHTTP(ctx context.Context, w kate.ResponseWriter, r *kate.Request) { h.OKData(ctx, w, "hello world") }
// Package mailer contains a utility to send an smtp package mailer import ( "crypto/tls" "fmt" "log" "net/smtp" "strings" ) // Mail contains the information related to email. type Mail struct { SenderID string Password string ToIds []string CcIds []string BccIds []string Subject string Body string } // SMTPServer contains information related to smtp-server. type SMTPServer struct { Host string Port string } func (s *SMTPServer) serverName() string { return s.Host + ":" + s.Port } func (mail *Mail) buildMessage() string { mime := "1.0;" contenType := "text/html; charset=\"UTF-8\";" message := "" message += fmt.Sprintf("MIME-version: %s\r\n", mime) message += fmt.Sprintf("Content-Type: %s\r\n", contenType) message += fmt.Sprintf("From: %s\r\n", mail.SenderID) if len(mail.ToIds) > 0 { message += fmt.Sprintf("To: %s\r\n", strings.Join(mail.ToIds, ";")) } if len(mail.CcIds) > 0 { message += fmt.Sprintf("Cc: %s\r\n", strings.Join(mail.CcIds, ";")) } message += fmt.Sprintf("Subject: %s\r\n", mail.Subject) message += "\r\n" + mail.Body return message } // Send expects a Mail struct and SMTPServer struct func Send(mail Mail, smtpServer SMTPServer) { messageBody := mail.buildMessage() //build an auth auth := smtp.PlainAuth("", mail.SenderID, mail.Password, smtpServer.Host) // Gmail will reject connection if it's not secure // TLS config tlsconfig := &tls.Config{ InsecureSkipVerify: true, ServerName: smtpServer.Host, } conn, err := tls.Dial("tcp", smtpServer.serverName(), tlsconfig) if err != nil { log.Panic(err) } client, err := smtp.NewClient(conn, smtpServer.Host) if err != nil { log.Panic(err) } // step 1: Use Auth if err = client.Auth(auth); err != nil { log.Panic(err) } // step 2: add all from and to if err = client.Mail(mail.SenderID); err != nil { log.Panic(err) } // for _, k := range mail.ToIds { // if err = client.Rcpt(k); err != nil { // log.Panic(err) // } // } receivers := append(mail.ToIds, mail.CcIds...) receivers = append(receivers, mail.BccIds...) for _, k := range receivers { log.Println("sending to: ", k) if err = client.Rcpt(k); err != nil { log.Panic(err) } } // Data w, err := client.Data() if err != nil { log.Panic(err) } _, err = w.Write([]byte(messageBody)) if err != nil { log.Panic(err) } err = w.Close() if err != nil { log.Panic(err) } client.Quit() log.Println("Mail sent successfully") }
package controllers import ( "database/sql" "github.com/go-gorp/gorp" _ "github.com/mattn/go-sqlite3" r "github.com/revel/revel" "RecipeHosting/app/models" "fmt" "golang.org/x/crypto/bcrypt" ) // Global variable that stored the database object map var ( Dbm *gorp.DbMap ) // Initialize the Database by importing the sqlite database file, and setting up the Columns. // NOTE: This is where admins and MCP database info can be defined. func InitDB() { // Base path for reading files bp := r.BasePath //Get Sqlite database file db, err := sql.Open("sqlite3", bp+"/tmpDb.bin") if err != nil { fmt.Println("Sql Open Fail") panic(err) } // set the Databse map Dbm = &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}} // Function to set the columns for our Table setColumnSizes := func(t *gorp.TableMap, colSizes map[string]int) { for col, size := range colSizes { t.ColMap(col).MaxSize = size } } // Add the user table to the Database t := Dbm.AddTable(models.User{}).SetKeys(true, "UserId") // Ensure the password is transient (we DONT save it) t.ColMap("Password").Transient = true // Set the column sizes for the username and name setColumnSizes(t, map[string]int{ "FirstName": 100, //changed from Username:20 "LastName": 100, "Email": 50, "Recipes": 100000, // 100,000 * 32 bit = 400 kB of data. Not bad. }) // Set up database tracing for errors - commented our for production Dbm.TraceOn("[gorp]", r.INFO) // Create the Table Dbm.CreateTables() // Should for whatever reason, the SQLite file is deleted, these need to be called to initialize the database with // the mcp data and testing user's. //Create a test user. bcryptPassword, _ := bcrypt.GenerateFromPassword( []byte("trust"), bcrypt.DefaultCost) trustMe := &models.User{ UserId:1, FirstName:"trustF", LastName:"trustL", Email:"trust", Password:"trust", HashedPassword:bcryptPassword, Recipes:"", } if err := Dbm.Insert(trustMe); err != nil { panic(err) } } //Gorp Controller that extends the revel controller and allows us to use gorp transactions with the DB type GorpController struct { *r.Controller Txn *gorp.Transaction } //Start the database transaction and return the revel result (should be nil) func (c *GorpController) Begin() r.Result { txn, err := Dbm.Begin() if err != nil { panic(err) } c.Txn = txn return nil } //Commit the changes to the database and return the revel result (should be nil) func (c *GorpController) Commit() r.Result { if c.Txn == nil { return nil } if err := c.Txn.Commit(); err != nil && err != sql.ErrTxDone { panic(err) } c.Txn = nil return nil } //Rollback the changes to the databases and return the revel result (should be nil) func (c *GorpController) Rollback() r.Result { if c.Txn == nil { return nil } if err := c.Txn.Rollback(); err != nil && err != sql.ErrTxDone { panic(err) } c.Txn = nil return nil } func (c *GorpController) GetUser (email string) *models.User { //Select from our database users, err := Dbm.Select(models.User{}, `select * from User where Email = ?`, email) //Check for error if err != nil { panic(err) } //Check to see if we got any results if len(users) == 0 { println("NO USER " + email) // if none, then they don't exist return nil } return users[0].(*models.User) } func (c *GorpController) SaveNewUser (user *models.User) error { if err := Dbm.Insert(user); err != nil { return err } return nil } func (c *GorpController) UpdateUser (user *models.User) error { if _, err := Dbm.Update(user); err != nil { return err } return nil } func (c *GorpController) UpdateRecipe (user *models.User, recipe string) error { user.Recipes = recipe return c.UpdateUser(user) } // Function to load a recipe. //func (c *GorpController) LoadRecipe(title, author string) *models.Recipe{ // // TODO: load the recipe data file and return it. // filepath := models.GetRecipeFilePath(title, author) // body, _ := ioutil.ReadFile(filepath) // data := strings.Split(string(body), ":://::") // ing := data[0] // dir := data[1] // return &models.Recipe{ // Title:title, // Author:author, // Directions:dir, // Ingredients:ing, // Filepath:filepath, // } //}
// Copyright (c) 2017 Kuguar <licenses@kuguar.io> Author: Adrian P.K. <apk@kuguar.io> // // MIT License // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package controllers import ( "fmt" htmlTemplate "html/template" "github.com/arschles/go-bindata-html-template" "github.com/gorilla/mux" "github.com/gorilla/schema" "net/http" "net/url" "path" "github.com/adrianpk/fundacja/app" "github.com/adrianpk/fundacja/bootstrap" "github.com/adrianpk/fundacja/logger" "github.com/adrianpk/fundacja/models" "github.com/adrianpk/fundacja/repo" _ "github.com/lib/pq" // Import pq without side effects ) var ( permissionAssetsBase string permissionExtAssetsBase string permissionTemplates map[string]*template.Template permissionExtTemplates map[string]*htmlTemplate.Template permissionIndex = "/permissions" permissionNew = "/permissions/new" permissionEdit = "/permissions/edit/%s" permissionShow = "/permissions/%s" permissionDelete = "/permissions/delete/%s" ) // InitializePermission - Initialize the controller func InitializePermission() { if permissionTemplates == nil { permissionTemplates = make(map[string]*template.Template) } if permissionExtTemplates == nil { permissionExtTemplates = make(map[string]*htmlTemplate.Template) } parsePermissionAssets() parsePermissionExtAssets() } // IndexPermissions - Returns a collection containing all permissions. // Handler for HTTP Get - "/permissions" func IndexPermissions(w http.ResponseWriter, r *http.Request) { logger.Debug("IndexPermissions...") // Check permissions // defer func() { // recover() // showPermissionError(w, r, indexView, layoutView, nil, app.ErrEntitySelect, warningAlert, nil) // }() // services.IsAllowed("f254cfe5", loggedInUserID(r)) // Get ID vars := mux.Vars(r) orgid := vars["organization"] // Get Organization organization, err := getOrganization(orgid) if err != nil { showPermissionError(w, r, indexView, layoutView, nil, app.ErrEntityNotFound, warningAlert, err) return } permissionRepo, err := repo.MakePermissionRepository() if err != nil { showPermissionError(w, r, indexView, layoutView, nil, app.ErrEntitySelect, warningAlert, err) return } // Select permissions, err := permissionRepo.GetAll(orgid) if err != nil { showPermissionError(w, r, indexView, layoutView, nil, app.ErrEntitySelect, warningAlert, err) return } pageModel := makePage(makeParentChild(organization, permissions), nil) renderPermissionTemplate(w, r, indexView, layoutView, pageModel) } // NewPermission - Presents new permission form. // Handler for HTTP Get - "/permissions/new" func NewPermission(w http.ResponseWriter, r *http.Request) { logger.Debug("NewPermission...") vars := mux.Vars(r) orgid := vars["organization"] // Get Organization organization, err := getOrganization(orgid) if err != nil { showPermissionError(w, r, indexView, layoutView, nil, app.ErrEntityNotFound, warningAlert, err) return } pageModel := makePage(makeParentChild(organization, nil), nil) renderPermissionTemplate(w, r, newView, layoutView, pageModel) } // CreatePermission - Creates a new Permission. // Handler for HTTP Post - "/permissions/create" func CreatePermission(w http.ResponseWriter, r *http.Request) { logger.Debug("CreatePermission...") vars := mux.Vars(r) orgid := vars["organization"] // Parse err := r.ParseForm() if err != nil { showPermissionError(w, r, newView, layoutView, nil, app.ErrEntityCreate, warningAlert, err) return } // Decode var permission models.Permission err = schema.NewDecoder().Decode(&permission, r.Form) if err != nil { showPermissionError(w, r, newView, layoutView, permission, app.ErrEntityCreate, warningAlert, err) return } // Get Organization organization, err := getOrganization(orgid) if err != nil { showPermissionError(w, r, indexView, layoutView, nil, app.ErrEntityNotFound, warningAlert, err) return } // Get repo permissionRepo, err := repo.MakePermissionRepository() if err != nil { showPermissionError(w, r, newView, layoutView, permission, app.ErrEntityCreate, warningAlert, err) return } // Set values permission.OrganizationName = organization.Name permission.OrganizationID = organization.ID // Persist permissionRepo.Create(&permission) if err != nil { showPermissionError(w, r, newView, layoutView, permission, app.ErrDataAccess, warningAlert, err) return } // Respond indexURL := fmt.Sprintf("/organizations/%s/permissions", organization.ID.String) redirectTo(w, r, indexURL, makePageAlert("Permission created", infoAlert)) } // ShowPermission - Returns a single Permission by its id or permissionname. // Handler for HTTP Get - "/permissions/{permission}" func ShowPermission(w http.ResponseWriter, r *http.Request) { logger.Debug("ShowPermission...") ShowPermissionByID(w, r) } // ShowPermissionByID - Returns a single Permission by its id. // Handler for HTTP Get - "/permissions/{permission}" func ShowPermissionByID(w http.ResponseWriter, r *http.Request) { logger.Debug("ShowPermissionByID...") // Get ID vars := mux.Vars(r) id := vars["permission"] // Get repo permissionRepo, err := repo.MakePermissionRepository() if err != nil { redirectTo(w, r, permissionIndex, makePageAlert(app.ErrDataStore.Error(), warningAlert)) return } // Select permission, err := permissionRepo.Get(id) if err != nil { redirectTo(w, r, permissionIndex, makePageAlert(app.ErrDataAccess.Error(), warningAlert)) return } renderPermissionTemplate(w, r, showView, layoutView, makePage(permission, nil)) } // EditPermission - Presents edit permission form. // Handler for HTTP Get - "/permissions/edit" func EditPermission(w http.ResponseWriter, r *http.Request) { logger.Debug("EditPermission...") // Get ID vars := mux.Vars(r) id := vars["permission"] // Get repo permissionRepo, err := repo.MakePermissionRepository() if err != nil { redirectTo(w, r, permissionIndex, makePageAlert(app.ErrDataStore.Error(), warningAlert)) return } // Select permission, err := permissionRepo.Get(id) if err != nil { redirectTo(w, r, permissionIndex, makePageAlert(app.ErrDataAccess.Error(), warningAlert)) return } renderPermissionTemplate(w, r, editView, layoutView, makePage(permission, nil)) return } // UpdatePermission - Update an existing Permission. // Handler for HTTP Put - "/permissions/{permission}" func UpdatePermission(w http.ResponseWriter, r *http.Request) { logger.Debug("UpdatePermission...") // Get ID vars := mux.Vars(r) id := vars["permission"] // Parse err := r.ParseForm() if err != nil { showPermissionError(w, r, signupView, layoutView, nil, app.ErrRegistration, warningAlert, err) return } // Decode var permission models.Permission err = schema.NewDecoder().Decode(&permission, r.Form) if err != nil { showPermissionError(w, r, editView, layoutView, permission, app.ErrRegistration, warningAlert, err) return } permission.ID = models.ToNullsString(id) // Get repo permissionRepo, err := repo.MakePermissionRepository() if err != nil { showPermissionError(w, r, editView, layoutView, permission, app.ErrEntityUpdate, warningAlert, err) return } // Check against current permission currentPermission, err := permissionRepo.Get(id) if err != nil { showPermissionError(w, r, editView, layoutView, permission, app.ErrEntityUpdate, warningAlert, err) return } // Avoid ID spoofing err = verifyID(permission.IdentifiableModel, currentPermission.IdentifiableModel) if err != nil { showPermissionError(w, r, editView, layoutView, currentPermission, app.ErrEntityUpdate, warningAlert, err) return } // Update err = permissionRepo.Update(&permission) if err != nil { showPermissionError(w, r, editView, layoutView, currentPermission, app.ErrEntityUpdate, warningAlert, err) return } // Respond redirectTo(w, r, permissionIndex, makePageAlert("Permission updated", warningAlert)) } // InitDeletePermission - Show permission deletion page. // Handler for HTTP Get - "/permissions/init-delete/{permission}" func InitDeletePermission(w http.ResponseWriter, r *http.Request) { logger.Debug("InitDeletePermission...") // Get ID vars := mux.Vars(r) id := vars["permission"] // Get repo permissionRepo, err := repo.MakePermissionRepository() if err != nil { redirectTo(w, r, permissionIndex, makePageAlert(app.ErrDataStore.Error(), warningAlert)) return } // Select permission, err := permissionRepo.Get(id) if err != nil { redirectTo(w, r, permissionIndex, makePageAlert(app.ErrDataAccess.Error(), warningAlert)) return } renderPermissionTemplate(w, r, deleteView, layoutView, makePage(permission, nil)) } // DeletePermission - Deletes an existing Permission // Handler for HTTP Delete - "/permissions/{id}" func DeletePermission(w http.ResponseWriter, r *http.Request) { logger.Debug("DeletePermission...") // Get ID vars := mux.Vars(r) id := vars["permission"] // Get repo permissionRepo, err := repo.MakePermissionRepository() if err != nil { showPermissionError(w, r, indexView, layoutView, nil, app.ErrEntitySelect, warningAlert, err) return } // Delete err = permissionRepo.Delete(id) if err != nil { showPermissionError(w, r, indexView, layoutView, nil, app.ErrEntityDelete, warningAlert, err) return } // Respond redirectTo(w, r, permissionIndex, makePageAlert("Permission updated", warningAlert)) } func permissionIDfromURL(r *http.Request) string { u, _ := url.Parse(r.URL.Path) dir := path.Dir(u.Path) id := path.Base(dir) logger.Debugf("Permission id in url is %s", id) return id } func permissionnameFromURL(r *http.Request) string { u, _ := url.Parse(r.URL.Path) dir := path.Dir(u.Path) permissionname := path.Base(dir) logger.Debugf("PermissionName in url is %s", permissionname) return permissionname } func parsePermissionAssets() { //logger.Debug("Parsing permission assets...") assetNames := []string{indexView, newView, showView, editView, deleteView} parseAssets(&permissionAssetsBase, "layouts", "permission", layoutView, assetNames, permissionTemplates) } func parsePermissionExtAssets() { assetNames := []string{indexView, newView, showView, editView, deleteView} parseExtAssets(&permissionExtAssetsBase, "layouts", "permission", layoutView, assetNames, permissionExtTemplates) } func renderPermissionTemplate(w http.ResponseWriter, r *http.Request, groupName string, name string, page *Page) { if useExtTemplates { renderExtPermissionTemplate(w, r, groupName, name, page) return } renderIntPermissionTemplate(w, r, groupName, name, page) } // Render templates for the given name, template definition and data object func renderIntPermissionTemplate(w http.ResponseWriter, r *http.Request, groupName string, name string, page *Page) { renderTemplate(w, r, permissionAssetsBase, groupName, name, permissionTemplates, page) } // Render templates for the given name, template definition and data object func renderExtPermissionTemplate(w http.ResponseWriter, r *http.Request, groupName string, name string, page *Page) { //logger.Debugf("Autoreload: %t", bootstrap.AppConfig.IsAutoreloadOn()) if bootstrap.AppConfig.IsAutoreloadOn() { logger.Debug("Reloading templates.") permissionExtTemplates = make(map[string]*htmlTemplate.Template) parsePermissionExtAssets() } renderExtTemplate(w, r, permissionExtAssetsBase, groupName, name, permissionExtTemplates, page) } func showPermissionError(w http.ResponseWriter, r *http.Request, page string, layout string, model interface{}, err error, alertKind string, cause error) { logger.Dump(cause) pageModel := makePage(model, makePageAlert(err.Error(), alertKind)) renderPermissionTemplate(w, r, page, layoutView, pageModel) }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package geomfn import ( "testing" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/geo/geopb" "github.com/stretchr/testify/require" ) func TestSubdivide(t *testing.T) { type args struct { g geo.Geometry maxVertices int } tests := []struct { name string args args want []geo.Geometry }{ { "empty geometry", args{geo.MustParseGeometry("POLYGON EMPTY"), 5}, []geo.Geometry{geo.MustParseGeometry("POLYGON EMPTY")}, }, { "width and height bounds equal to zero", args{geo.MustParseGeometry("POINT(1 10)"), 5}, []geo.Geometry{geo.MustParseGeometry("POINT (1 10)")}, }, { "single geometry, no. of vertices under tolerance", args{geo.MustParseGeometry("LINESTRING(0 0, 10 10, 0 10, 10 0)"), 5}, []geo.Geometry{geo.MustParseGeometry("LINESTRING(0 0, 10 10, 0 10, 10 0)")}, }, { "Polygon, width > height", args{geo.MustParseGeometry("POLYGON((-1 -1,-1 -0.5, -1 0, 1 0.5, 1 -1,-1 -1))"), 5}, []geo.Geometry{ geo.MustParseGeometry("POLYGON((-1 -1,-1 0,0 0.25,0 -1,-1 -1))"), geo.MustParseGeometry("POLYGON((0 0.25, 1 0.5, 1 -1, 0 -1, 0 0.25))"), }, }, { "invalid (bow-tie) Polygon, width > height", args{geo.MustParseGeometry("POLYGON((0 0, -2 -1, -2 1, 0 0, 2 1, 2 -1, 0 0))"), 5}, []geo.Geometry{ geo.MustParseGeometry("POLYGON((0 0, -2 -1, -2 1, 0 0))"), geo.MustParseGeometry("POLYGON((0 0, 2 1, 2 -1, 0 0))"), }, }, { "invalid (bow-tie) Polygon, height > width", args{geo.MustParseGeometry("POLYGON((0 0, -1 -2, -1 2, 0 0, 1 2, 1 -2, 0 0))"), 5}, []geo.Geometry{ geo.MustParseGeometry("POLYGON((0 0, 1 0, 1 -2, 0 0))"), geo.MustParseGeometry("POLYGON((0 0,-1 -2,-1 0,0 0))"), geo.MustParseGeometry("POLYGON((-1 0,-1 2,0 0,-1 0))"), geo.MustParseGeometry("POLYGON((0 0,1 2,1 0,0 0))"), }, }, { "Polygon, 12 decimal points precision", args{geo.MustParseGeometry("POLYGON((-0.1 -0.1,-0.1 -0.000000000005, -0.1 0, 0.1 0.000000000005, 0.1 -0.1,-0.1 -0.1))"), 5}, []geo.Geometry{ geo.MustParseGeometry("POLYGON((-0.1 -0.1,-0.1 0,0 0.0000000000025,0 -0.1,-0.1 -0.1))"), geo.MustParseGeometry("POLYGON((0 0.0000000000025, 0.1 0.000000000005, 0.1 -0.1, 0 -0.1, 0 0.0000000000025))"), }, }, { "Polygon, trapezoid+rectangle", args{geo.MustParseGeometry("POLYGON((-1 0, -1 1, 0 2, 3 2, 3 0, -1 0))"), 5}, []geo.Geometry{ geo.MustParseGeometry("POLYGON((-1 0,-1 1,0 2,0 0,-1 0))"), geo.MustParseGeometry("POLYGON((0 2,3 2,3 0,0 0,0 2))"), }, }, { "Polygon with a hole inside", args{geo.MustParseGeometry("POLYGON((-1 -1, -1 1, 0 2, 1 1, 1 -1, 0 -2, -1 -1),(-0.5 -0.5, -0.5 0.5, 0.5 0.5, 0.5 -0.5, 0 -0.5, -0.5 -0.5))"), 5}, []geo.Geometry{ geo.MustParseGeometry("POLYGON((1 -1,0 -2,-1 -1,1 -1))"), geo.MustParseGeometry("POLYGON((-1 -1,-1 -0.5,1 -0.5,1 -1,-1 -1))"), geo.MustParseGeometry("POLYGON((-1 -0.5,-1 0.5,-0.5 0.5,-0.5 -0.5,-1 -0.5))"), geo.MustParseGeometry("POLYGON((-1 0.5,-1 1,0.5 1,0.5 0.5,-1 0.5))"), geo.MustParseGeometry("POLYGON((0.5 1,1 1,1 -0.5,0.5 -0.5,0.5 1))"), geo.MustParseGeometry("POLYGON((-1 1,0 2,1 1,-1 1))"), }, }, { "LineString, width < height ", args{geo.MustParseGeometry("LINESTRING(0 0, 10 15, 0 0, 10 15, 10 0, 10 15)"), 5}, []geo.Geometry{ geo.MustParseGeometry("LINESTRING(0 0,5 7.5)"), geo.MustParseGeometry("LINESTRING(10 7.5,10 0)"), geo.MustParseGeometry("LINESTRING(5 7.5,10 15)"), geo.MustParseGeometry("LINESTRING(10 15,10 7.5)"), }, }, { "LineString, width > height", args{geo.MustParseGeometry("LINESTRING(0 0, 15 10, 0 0, 15 10, 15 0, 15 10)"), 5}, []geo.Geometry{ geo.MustParseGeometry("LINESTRING(0 0,7.5 5)"), geo.MustParseGeometry("LINESTRING(7.5 5,15 10)"), geo.MustParseGeometry("LINESTRING(15 10,15 0)"), }, }, { "LineString with specified SRID", args{geo.MustParseGeometry("SRID=4269;LINESTRING(0 0, 10 15, 0 0, 10 15, 10 0, 10 15)"), 5}, []geo.Geometry{ geo.MustParseGeometry("SRID=4269;LINESTRING(0 0,5 7.5)"), geo.MustParseGeometry("SRID=4269;LINESTRING(10 7.5,10 0)"), geo.MustParseGeometry("SRID=4269;LINESTRING(5 7.5,10 15)"), geo.MustParseGeometry("SRID=4269;LINESTRING(10 15,10 7.5)"), }, }, { "MultiLineString - horizontal and vertical lines", args{geo.MustParseGeometry("MULTILINESTRING((5 0, 5 1, 5 3, 5 4, 5 5, 5 6),(0 5, 1 5, 2 5, 3 5, 4 5, 6 5))"), 5}, []geo.Geometry{ geo.MustParseGeometry("LINESTRING(5 0,5 3)"), geo.MustParseGeometry("LINESTRING(5 3,5 6)"), geo.MustParseGeometry("LINESTRING(0 5,3 5)"), geo.MustParseGeometry("LINESTRING(3 5,6 5)"), }, }, { "MultiPoint, max vertices 6", args{geo.MustParseGeometry("MULTIPOINT((0 1),(1 2),(2 3),(3 4),(4 3),(6 2),(7 1),(8 0),(9 -1),(10 -2),(11 -3))"), 6}, []geo.Geometry{ geo.MustParseGeometry("MULTIPOINT(0 1,1 2,2 3,3 4,4 3)"), geo.MustParseGeometry("MULTIPOINT(6 2,7 1,8 0,9 -1,10 -2,11 -3)"), }, }, { "GeometryCollection, types with different dimensions", args{geo.MustParseGeometry("GEOMETRYCOLLECTION(LINESTRING(0 0, 10 10, 0 10, 10 0), POLYGON((0 0, -2 -1, -2 1, 0 0, 2 1, 2 -1, 0 0)))"), 5}, []geo.Geometry{ geo.MustParseGeometry("POLYGON((0 0,-2 -1,-2 1,0 0))"), geo.MustParseGeometry("POLYGON((0 0,2 1,2 -1,0 0))"), }, }, { "GeometryCollection, types with different dimensions, point included", args{geo.MustParseGeometry("GEOMETRYCOLLECTION(LINESTRING(0 0, 10 10, 0 10, 10 0), POINT(1 10))"), 5}, []geo.Geometry{ geo.MustParseGeometry("LINESTRING(0 0,10 10,0 10,10 0)"), }, }, { "GeometryCollection, same types, one invalid", args{geo.MustParseGeometry("GEOMETRYCOLLECTION(POLYGON((0 0, -2 -1, -2 1, 0 0, 2 1, 2 -1, 0 0)),POLYGON((-1 -1,-1 -0.5, -1 0, 1 0.5, 1 -1,-1 -1)))"), 5}, []geo.Geometry{ geo.MustParseGeometry("POLYGON((0 0,-2 -1,-2 1,0 0))"), geo.MustParseGeometry("POLYGON((0 0,2 1,2 -1,0 0))"), geo.MustParseGeometry("POLYGON((-1 -1,-1 0,0 0.25,0 -1,-1 -1))"), geo.MustParseGeometry("POLYGON((0 0.25, 1 0.5, 1 -1, 0 -1, 0 0.25))"), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := Subdivide(tt.args.g, tt.args.maxVertices) require.NoError(t, err) require.Equal(t, tt.want, got) }) } t.Run("less max vertices than minimum acceptable", func(t *testing.T) { _, err := Subdivide(geo.MustParseGeometry("LINESTRING(0 0, -10 10, 0 10)"), 4) require.Error(t, err) require.Equal(t, "max_vertices number cannot be less than 5", err.Error()) }) t.Run("would need to divide more than the maximum depth", func(t *testing.T) { g := geo.MustParseGeometry("POLYGON((-1 -1, -1 1, 0 2, 1 1, 1 -1, 0 -2, -1 -1),(-0.5 -0.5, -0.5 0.5, 0.5 0.5, 0.5 -0.5, 0 -0.5, -0.5 -0.5))") gt, err := g.AsGeomT() require.NoError(t, err) dim, err := dimensionFromGeomT(gt) require.NoError(t, err) const maxDepth = 2 const startDepth = 0 const maxVertices = 5 geomTs, err := subdivideRecursive(gt, maxVertices, startDepth, dim, maxDepth) require.NoError(t, err) got := []geo.Geometry{} for _, cg := range geomTs { g, err := geo.MakeGeometryFromGeomT(cg) require.NoError(t, err) g, err = g.CloneWithSRID(geopb.SRID(gt.SRID())) require.NoError(t, err) got = append(got, g) } want := []geo.Geometry{ geo.MustParseGeometry("POLYGON((1 -1, 0 -2, -1 -1, 1 -1))"), geo.MustParseGeometry("POLYGON((-1 -1, -1 -0.5, 1 -0.5, 1 -1, -1 -1))"), geo.MustParseGeometry("POLYGON((-1 -0.5, -1 1, 1 1, 1 -0.5, 0.5 -0.5, 0.5 0.5, -0.5 0.5, -0.5 -0.5, -1 -0.5))"), geo.MustParseGeometry("POLYGON((-1 1, 0 2, 1 1, -1 1))"), } require.Equal(t, want, got) }) }
// Package tokenizer provides encoding for tokens that can carry user data. // // Tokens are made up of base64url(iv,aes(pkcs7(ts,data)),hmac) // where the iv is random, and hmac signs iv,aes(...). package tokenizer
package lang import ( "reflect" "testing" ) func TestListToSlice(t *testing.T) { list := MakePair(MakeNumber(42), MakePair(MakeString("value"), Nil)) result := ListToSlice(list) expected := []Expr{MakeNumber(42), MakeString("value")} if len(result) != len(expected) { t.Errorf("Result has wrong length, expected '%v', got '%v'", len(expected), len(result)) } if !reflect.DeepEqual(result, expected) { t.Errorf("Wrong result, expected '%v', got '%v'", expected, result) } } func TestSliceToList(t *testing.T) { list := []Expr{MakeNumber(42), MakeString("value")} result := SliceToList(list) expected := MakePair(MakeNumber(42), MakePair(MakeString("value"), Nil)) if !reflect.DeepEqual(result, expected) { t.Errorf("Wrong result, expected '%v', got '%v'", expected, result) } }
/* * Copyright (c) 2019. * by Steve Brush, Iridium Developers */ // Iridium payments gateway JSON RPC API for golang package iridiumWalletdRPC // Version declaration module mame, version major, minor and patch func Version() (name string, major int, minor int, patch int) { return "iridiumdRPC", 0, 0, 1 }
package dmsg import ( "testing" "github.com/skycoin/skycoin/src/util/logging" "github.com/stretchr/testify/assert" "github.com/skycoin/dmsg/cipher" ) func TestNewTransport(t *testing.T) { log := logging.MustGetLogger("dmsg_test") tr := NewTransport(nil, log, cipher.PubKey{}, cipher.PubKey{}, 0, func(id uint16) {}) assert.NotNil(t, tr) } func TestTransport_close(t *testing.T) { log := logging.MustGetLogger("dmsg_test") tr := NewTransport(nil, log, cipher.PubKey{}, cipher.PubKey{}, 0, func(id uint16) {}) closed := tr.close() t.Run("Valid close() result (1st attempt)", func(t *testing.T) { assert.True(t, closed) }) t.Run("Channel closed (1st attempt)", func(t *testing.T) { _, ok := <-tr.done assert.False(t, ok) }) closed = tr.close() t.Run("Valid close() result (2nd attempt)", func(t *testing.T) { assert.False(t, closed) }) t.Run("Channel closed (2nd attempt)", func(t *testing.T) { _, ok := <-tr.done assert.False(t, ok) }) }
package uno import ( "fmt" "strconv" ) var ( NoCards = []int{} ) /* =========================================================== | 000 | ------- | | | 001~013 | Red | 1-9,skip,reverse,draw_two,0 | | 014~026 | Yellow | 1-9,skip,reverse,draw_two,0 | | 027~039 | Green | 1-9,skip,reverse,draw_two,0 | | 040~052 | Blue | 1-9,skip,reverse,draw_two,0 | | 053 | Fake | wild | | 054 | Fake | wild_and_draw | |---------|---------|-------------------------------------| | 060 | Special | draw | | 061~064 | Special | wild:red,yellow,green,blue | | 065~068 | Special | wild_and_draw:red,yellow,green,blue | | 069 | Special | challenge | | 070 | Special | draw_four | =========================================================== */ // Card is UNO Card type Card struct { ID int `json:"id"` Color int `json:"color"` Name int `json:"name"` } func (c *Card) String() string { switch c.ID { case IDSpecialDraw: return "Special - Draw" case IDSpeicalDrawFour: return "Special - Draw Four" case IDSpecialChallenge: return "Special - Challenge" } var color, name string switch c.Color { case 0: color = "Red" case 1: color = "Yellow" case 2: color = "Green" case 3: color = "Blue" default: color = "Black/Special" } switch val := c.Name % 13; val { case 10: name = "Skip" case 11: name = "Reverse" case 12: name = "Draw Two" default: name = strconv.Itoa(val) } return fmt.Sprintf("[%d]: %s %s", c.ID, color, name) } // IsNormal : return if it's r/y/g/b numbers,skip,reverse // [ draw two ] is special func (c *Card) IsNormal() bool { if c.Color < 4 && (c.Name < 12 || c.Name == 13) { return true } return false } // NextColor : return true color for next player func (c *Card) NextColor() int { // wild only, because wild_and_Draw is different if c.Name == Wild { return c.ID - 61 // r,y,g,b } return c.Color } // Info = (id) => Card func Info(id int) Card { return Card{ ID: id, Color: getColor(id), Name: getName(id), } } func cardIsNumber(id int) bool { if id > IDCardNone && id < IDCardWild { val := id % 13 if val < IDCardRedSkip || val == IDCardRedNumZero { return true } } return false } func cardIsWildDrawFour(id int) bool { if id == IDCardWildAndDraw { // its not needed return true } return id >= IDWildDrawFourRed && id <= IDWildDrawFourBlue } func cardIsWild(id int) bool { if id == IDCardWild { // its not needed return true } return id >= IDWildRed && id <= IDWildBlue } func cardIsSkip(id int) bool { switch id { case IDCardRedSkip: return true case IDCardYellowSkip: return true case IDCardGreenSkip: return true case IDCardBlueSkip: return true default: return false } } func cardIsReverse(id int) bool { switch id { case IDCardRedReverse: return true case IDCardYellowReverse: return true case IDCardGreenReverse: return true case IDCardBlueReverse: return true default: return false } } func cardIsDrawTwo(id int) bool { switch id { case IDCardRedDrawTwo: return true case IDCardYellowDrawTwo: return true case IDCardGreenDrawTwo: return true case IDCardBlueDrawTwo: return true default: return false } } func cardIsFake(id int) bool { return id >= IDFakeCardRed && id <= IDFakeCardBlue } // not in use func pickCardFromList(id int, list []int) (bool, []int) { for ix, _id := range list { if _id == id { return true, append(list[:ix], list[ix+1:]...) } } return false, list } func isNotBluff(cardID int, relatedCards []int) bool { altColor := getAltColor(cardID) for _, id := range relatedCards { color := getColor(id) if color == altColor { return true } } return false } func getFakeCard(cardID int) int { c := getColor(cardID) return IDFakeCardRed + c }
package main import ( "fmt" "strconv" ) func getQueensAttack(n int, k int, rQ int, cQ int, obstacles [][]int) (output int) { /* Args: n (int): [Chessboard Size Number. The Board will be nxn] k (int): [Number of obstacles on Chessboard.] rQ (int): [Row Number of Queen Position] cQ (int): [Column Number of Queen Position] obstacles ([][]int): [Array of Int Arrays denoting Obstacles Position] Returns: output (int): [Number of Places in the Board the Queen can Move to] */ obstacleMap := map[string]bool{} for _, v := range obstacles { obstacleMap[strconv.Itoa(v[0])+"_"+strconv.Itoa(v[1])] = true } fmt.Println(obstacleMap) // calculate top and bottom var i, j int i, j = rQ, cQ for i < n { if v := obstacleMap[strconv.Itoa(i+1)+"_"+strconv.Itoa(j)]; !v { output++ i++ } else { break } } fmt.Printf("Result After Top %v\n", output) i, j = rQ, cQ for i > 1 { if v := obstacleMap[strconv.Itoa(i-1)+"_"+strconv.Itoa(j)]; !v { output++ i-- } else { break } } fmt.Printf("Result After Bot %v\n", output) i, j = rQ, cQ for j < n { if v := obstacleMap[strconv.Itoa(i)+"_"+strconv.Itoa(j+1)]; !v { output++ j++ } else { break } } fmt.Printf("Result After Right %v\n", output) i, j = rQ, cQ for j > 1 { if v := obstacleMap[strconv.Itoa(i)+"_"+strconv.Itoa(j-1)]; !v { output++ j-- } else { break } } fmt.Printf("Result After Left %v\n", output) // Diagonals i, j = rQ, cQ for i < n && j < n { if v := obstacleMap[strconv.Itoa(i+1)+"_"+strconv.Itoa(j+1)]; !v { output++ i++ j++ } else { break } } fmt.Printf("Result After Top-Right %v\n", output) i, j = rQ, cQ for i < n && j > 1 { if v := obstacleMap[strconv.Itoa(i+1)+"_"+strconv.Itoa(j-1)]; !v { output++ i++ j-- } else { break } } fmt.Printf("Result After Top-Left %v\n", output) i, j = rQ, cQ for i > 1 && j > 1 { if v := obstacleMap[strconv.Itoa(i-1)+"_"+strconv.Itoa(j-1)]; !v { output++ i-- j-- } else { break } } fmt.Printf("Result After Bot-Left %v\n", output) i, j = rQ, cQ for i > 1 && j < n { if v := obstacleMap[strconv.Itoa(i-1)+"_"+strconv.Itoa(j+1)]; !v { output++ i-- j++ } else { break } } fmt.Printf("Result After Bot-Right %v\n", output) return output } func main() { n, k, rQ, cQ := 5, 3, 4, 3 obstacles := [][]int{ {5, 5}, {4, 2}, {2, 3}, } fmt.Println(getQueensAttack(n, k, rQ, cQ, obstacles)) }
package main import ( "4d63.com/assets/exchangerates" "4d63.com/assets/portfolio" ) type Data struct { ExchangeRates exchangerates.ExchangeRates Portfolios []portfolio.Portfolio } func (d Data) Names() []string { names := []string{} for _, p := range d.Portfolios { names = append(names, p.Name) } return names } func (d Data) Subset(indexes []int) Data { indexesMap := map[int]bool{} for _, i := range indexes { indexesMap[i] = true } return Data{ Portfolios: func() []portfolio.Portfolio { portfolios := []portfolio.Portfolio{} for i, p := range d.Portfolios { if indexesMap[i] { portfolios = append(portfolios, p) } } return portfolios }(), } }
package main import ( "flag" "buffer" "time" "github.com/golang/glog" ) func init() { glog.MaxSize = 1024 * 1024 * 100 //最大100M flag.Set("alsologtostderr", "true") // 日志写入文件的同时,输出到stderr flag.Set("log_dir", "./log") // 日志文件保存目录 flag.Set("v", "1") // 配置V输出的等级。 flag.Parse() } func main() { pool, err := buffer.NewPool(10, 4096) if pool == nil { glog.Info(err) return } for i := 0; i < 1; i++ { go func() { glog.Info("Put Data begin..............") for i := 0; i < 51200; i++ { pool.Put(i) } glog.Info("Put Data End:", pool) }() } for i := 0; i < 1; i++ { go func() { glog.Info("Get Data begin..............") ticker := time.NewTicker(time.Second * 150) defer ticker.Stop() for { select { case <-ticker.C: glog.Info("Get Data End:", pool) return default: pool.Get() } } }() } select {} }
package requests import ( "encoding/json" "fmt" "io/ioutil" "net/http" "net/url" "strings" "time" "github.com/google/go-querystring/query" "github.com/atomicjolt/canvasapi" "github.com/atomicjolt/canvasapi/models" "github.com/atomicjolt/string_utils" ) // GetSISImportList Returns the list of SIS imports for an account // // Example: // curl https://<canvas>/api/v1/accounts/<account_id>/sis_imports \ // -H 'Authorization: Bearer <token>' // https://canvas.instructure.com/doc/api/sis_imports.html // // Path Parameters: // # Path.AccountID (Required) ID // // Query Parameters: // # Query.CreatedSince (Optional) If set, only shows imports created after the specified date (use ISO8601 format) // # Query.CreatedBefore (Optional) If set, only shows imports created before the specified date (use ISO8601 format) // # Query.WorkflowState (Optional) . Must be one of initializing, created, importing, cleanup_batch, imported, imported_with_messages, aborted, failed, failed_with_messages, restoring, partially_restored, restoredIf set, only returns imports that are in the given state. // type GetSISImportList struct { Path struct { AccountID string `json:"account_id" url:"account_id,omitempty"` // (Required) } `json:"path"` Query struct { CreatedSince time.Time `json:"created_since" url:"created_since,omitempty"` // (Optional) CreatedBefore time.Time `json:"created_before" url:"created_before,omitempty"` // (Optional) WorkflowState []string `json:"workflow_state" url:"workflow_state,omitempty"` // (Optional) . Must be one of initializing, created, importing, cleanup_batch, imported, imported_with_messages, aborted, failed, failed_with_messages, restoring, partially_restored, restored } `json:"query"` } func (t *GetSISImportList) GetMethod() string { return "GET" } func (t *GetSISImportList) GetURLPath() string { path := "accounts/{account_id}/sis_imports" path = strings.ReplaceAll(path, "{account_id}", fmt.Sprintf("%v", t.Path.AccountID)) return path } func (t *GetSISImportList) GetQuery() (string, error) { v, err := query.Values(t.Query) if err != nil { return "", err } return v.Encode(), nil } func (t *GetSISImportList) GetBody() (url.Values, error) { return nil, nil } func (t *GetSISImportList) GetJSON() ([]byte, error) { return nil, nil } func (t *GetSISImportList) HasErrors() error { errs := []string{} if t.Path.AccountID == "" { errs = append(errs, "'Path.AccountID' is required") } for _, v := range t.Query.WorkflowState { if v != "" && !string_utils.Include([]string{"initializing", "created", "importing", "cleanup_batch", "imported", "imported_with_messages", "aborted", "failed", "failed_with_messages", "restoring", "partially_restored", "restored"}, v) { errs = append(errs, "WorkflowState must be one of initializing, created, importing, cleanup_batch, imported, imported_with_messages, aborted, failed, failed_with_messages, restoring, partially_restored, restored") } } if len(errs) > 0 { return fmt.Errorf(strings.Join(errs, ", ")) } return nil } func (t *GetSISImportList) Do(c *canvasapi.Canvas, next *url.URL) ([]*models.SISImport, *canvasapi.PagedResource, error) { var err error var response *http.Response if next != nil { response, err = c.Send(next, t.GetMethod(), nil) } else { response, err = c.SendRequest(t) } if err != nil { return nil, nil, err } if err != nil { return nil, nil, err } body, err := ioutil.ReadAll(response.Body) response.Body.Close() if err != nil { return nil, nil, err } ret := []*models.SISImport{} err = json.Unmarshal(body, &ret) if err != nil { return nil, nil, err } pagedResource, err := canvasapi.ExtractPagedResource(response.Header) if err != nil { return nil, nil, err } return ret, pagedResource, nil }
package main import ( "fmt" "io/ioutil" "strings" ) // Reads all of the lines from the shia labeouf text file into memory func readFileIntoMem() []string { var filename = "shia-labeouf.txt" content, err := ioutil.ReadFile(filename) if err != nil { fmt.Println("Error Or Something") //Do something } lines := strings.Split(string(content), " ") return lines } func readToken() string { var keyfile = "key.txt" content, err := ioutil.ReadFile(keyfile) if err != nil { fmt.Println("Error") } return string(content[:]) }
package handlers import ( "encoding/json" "fmt" "io/ioutil" "log" "net/http" "os" "path" "github.com/husobee/vestigo" "github.com/libgit2/git2go" "github.com/tmaesaka/cellar/config" ) // Repository type holds information about a repository. type Repository struct { Name string `json:"name"` // Unique name of the repository } // IndexRepositoryHandler generates a list of existing repositories. func IndexRepositoryHandler(cfg *config.ApiConfig) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { repos := make([]Repository, 0) files, err := ioutil.ReadDir(path.Join(cfg.DataDir, "repos")) if err != nil { BadRequest(w, ApiError, err.Error()) return } for _, f := range files { repoPath := repoPath(cfg.DataDir, f.Name()) _, err := git.OpenRepositoryExtended(repoPath, git.RepositoryOpenNoSearch, "") if err != nil { if cfg.Verbose { log.Printf("%s is not a git repository", repoPath) } continue } repos = append(repos, Repository{Name: f.Name()}) } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(repos) } } // ShowRepositoryHandler looks up the requested git repository. func ShowRepositoryHandler(cfg *config.ApiConfig) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { name := vestigo.Param(r, "name") rpath := repoPath(cfg.DataDir, name) _, err := git.OpenRepository(rpath) if err != nil { NotFound(w) return } repo := Repository{Name: name} w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(repo) } } // CreateRepositoryHandler provisions a bare git repository under the datadir // directory. Relevant validation is also executed. func CreateRepositoryHandler(cfg *config.ApiConfig) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { name := r.FormValue("name") if len(name) == 0 { BadRequest(w, InvalidRequestError, "name parameter required") return } rpath := repoPath(cfg.DataDir, name) gitRepo, _ := git.OpenRepository(rpath) if gitRepo != nil { errStr := fmt.Sprintf("respoitory %s already exists", name) BadRequest(w, ApiError, errStr) return } bareRepo := true _, err := git.InitRepository(rpath, bareRepo) if err != nil { BadRequest(w, ApiError, "failed to init repository") return } repo := Repository{Name: name} w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(repo) } } func UpdateRepositoryHandler(cfg *config.ApiConfig) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("unimplemented")) } } // DestroyRepositoryHandler destroys the specified repository. func DestroyRepositoryHandler(cfg *config.ApiConfig) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { name := vestigo.Param(r, "name") rpath := repoPath(cfg.DataDir, name) if _, err := os.Stat(rpath); os.IsNotExist(err) { NotFound(w) return } if err := os.RemoveAll(rpath); err != nil { BadRequest(w, ApiError, "failed to destroy repository") return } w.Header().Set("Content-Type", "application/json") } }
package main //hackerRank-Golang-test import ( "fmt" "net/http" "os" "hackerRank-Golang-test/driver" ph "hackerRank-Golang-test/handler/http" "github.com/go-chi/chi" "github.com/go-chi/chi/middleware" ) func main() { dbName := os.Getenv("DB_NAME") dbPass := os.Getenv("DB_PASS") dbHost := os.Getenv("DB_HOST") dbPort := os.Getenv("DB_PORT") connection, err := driver.ConnectSQL(dbHost, dbPort, "root", dbPass, dbName) if err != nil { fmt.Println(err) os.Exit(-1) } r := chi.NewRouter() r.Use(middleware.Recoverer) r.Use(middleware.Logger) pHandler := ph.NewPostHandler(connection) r.Route("/", func(rt chi.Router) { rt.Mount("/", postRouter(pHandler)) }) fmt.Println("Server listen at :8005") http.ListenAndServe(":8005", r) } // A completely separate router for posts routes func postRouter(pHandler *ph.Post) http.Handler { r := chi.NewRouter() r.Get("/search/{uid:[0-9]+}/{aid:[0-9]+}", pHandler.Search) r.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("")) }) return r }
package main import ( "crypto/md5" "fmt" "encoding/hex" "crypto/des" "encoding/base32" ) func GetMD5Hash(text string) string { hasher := md5.New() hasher.Write([]byte(text)) return hex.EncodeToString(hasher.Sum(nil)) } func main() { str := "fweihgorwj" fmt.Printf("%x", md5.Sum([]byte(str))) fmt.Sprintf(GetMD5Hash(str)) str := "this is a exampiile" fmt.Println(getbase32(str)) // fmt.Printf("%x", md5.Sum([]byte(str))) // fmt.Sprintf(GetMD5Hash(str)) } func getbase32(str string) string { return base32.HexEncoding.EncodeToString([]byte(str)) } func aesEn() { des.BlockSize }
package middlewares import ( "log" "github.com/dgrijalva/jwt-go" "gopkg.in/matryer/respond.v1" "net/http" "context" "os" ) func JwtMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { tokenStr := r.Header.Get("Authorization") jwt_string := os.Getenv("JWT_SECRET") jwt_secret := []byte(jwt_string) token, err := jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) { return jwt_secret, nil }) if err != nil { respond.With(w, r, http.StatusUnauthorized, err) } else if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid { ctx := context.WithValue(r.Context(), "authUser",claims) r = r.WithContext(ctx) next.ServeHTTP(w, r) } else { log.Printf("Invalid JWT Token") respond.With(w, r, http.StatusUnauthorized, "Invalid") } }) }
package api import ( "context" "encoding/json" "fmt" "net/http" "strings" ) // CreateGithubActionRequest represents the accepted fields for creating // a Github action type CreateGithubActionRequest struct { ReleaseID uint `json:"release_id" form:"required"` GitRepo string `json:"git_repo" form:"required"` GitBranch string `json:"git_branch"` ImageRepoURI string `json:"image_repo_uri" form:"required"` DockerfilePath string `json:"dockerfile_path"` FolderPath string `json:"folder_path"` GitRepoID uint `json:"git_repo_id" form:"required"` RegistryID uint `json:"registry_id"` ShouldCreateWorkflow bool `json:"should_create_workflow"` } // CreateGithubAction creates a Github action with basic authentication func (c *Client) CreateGithubAction( ctx context.Context, projectID, clusterID uint, releaseName, releaseNamespace string, createGH *CreateGithubActionRequest, ) error { data, err := json.Marshal(createGH) if err != nil { return err } req, err := http.NewRequest( "POST", fmt.Sprintf( "%s/projects/%d/ci/actions/create?cluster_id=%d&name=%s&namespace=%s", c.BaseURL, projectID, clusterID, releaseName, releaseNamespace, ), strings.NewReader(string(data)), ) if err != nil { return err } req = req.WithContext(ctx) if httpErr, err := c.sendRequest(req, nil, true); httpErr != nil || err != nil { if httpErr != nil { return fmt.Errorf("code %d, errors %v", httpErr.Code, httpErr.Errors) } return err } return nil }
package main import ( "fmt" "github.com/garyburd/redigo/redis" ) func main() { c, err := redis.Dial("tcp", "127.0.0.1:6379") if err != nil { fmt.Println("Connect to redis error", err) return } defer c.Close() _, err = c.Do("SET", "mykey", "sl") if err != nil { fmt.Println("redis set failed:", err) } }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ReleaseConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec ReleaseConfigSpec `json:"spec"` Status ReleaseConfigStatus `json:"status"` } // ReleaseConfigSpec is the spec for a ReleaseConfig resource type ReleaseConfigSpec struct { ConfigValues map[string]interface{} `json:"configValues" description:"user config values added to the chart"` DependenciesConfigValues map[string]interface{} `json:"dependenciesConfigValues" description:"dependencies' config values added to the chart"` Dependencies map[string]string `json:"dependencies" description:"map of dependency chart name and release"` ChartName string `json:"chartName" description:"chart name"` ChartVersion string `json:"chartVersion" description:"chart version"` ChartAppVersion string `json:"chartAppVersion" description:"jsonnet app version"` OutputConfig map[string]interface{} `json:"outputConfig"` Repo string `json:"repo" description:"chart repo"` ChartImage string `json:"chartImage" description:"chart image"` IsomateConfig *IsomateConfig `json:"isomateConfig" description:"isomate config"` ChartWalmVersion string `json:"chartWalmVersion" description:"chart walm version: v1, v2"` } type IsomateConfig struct { DefaultIsomateName string `json:"defaultIsomateName" description:"default isomate name"` Isomates []*Isomate `json:"isomates" description:"isomates"` } type Isomate struct { Name string `json:"name" description:"isomate name"` ConfigValues map[string]interface{} `json:"configValues" description:"isomate config values"` Plugins []*ReleasePlugin `json:"plugins" description:"isomate plugins"` } type ReleasePlugin struct { Name string `json:"name" description:"plugin name"` Args string `json:"args" description:"plugin args"` Version string `json:"version" description:"plugin version"` Disable bool `json:"disable" description:"disable plugin"` } // ReleaseConfigStatus is the status for a ReleaseConfig resource type ReleaseConfigStatus struct{} // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReleaseConfigList is a list of ReleaseConfig resources type ReleaseConfigList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` Items []ReleaseConfig `json:"items"` }
package user import ( internal "github.com/ernesto2108/AP_CreatyHelp/internal/storage/psql" "github.com/ernesto2108/AP_CreatyHelp/pkg/user/domain" ) type UsersStorageGateway interface { create(u *domain.CreateUserCmd) (*domain.User,error) update(u *domain.UpdateUserCmd) *domain.User getId(id int64) (*domain.User, error) delete(id int64) *domain.User getAll() []*domain.User } type UsersStorage struct { *internal.PostSqlClient }
package test import ( "testing" "time" "github.com/muidea/magicOrm/orm" "github.com/muidea/magicOrm/provider" "github.com/muidea/magicOrm/provider/remote" ) func TestRemoteExecutor(t *testing.T) { orm.Initialize() defer orm.Uninitialize() config := orm.NewConfig("localhost:3306", "testdb", "root", "rootkit") remoteProvider := provider.NewRemoteProvider("default") o1, err := orm.NewOrm(remoteProvider, config) defer o1.Release() if err != nil { t.Errorf("new Orm failed, err:%s", err.Error()) return } now, _ := time.ParseInLocation("2006-01-02 15:04:05:0000", "2018-01-02 15:04:05:0000", time.Local) val := &Unit{ID: 10, I8: 1, I64: uint64(78962222222), Name: "Hello world", Value: 12.3456, TimeStamp: now, Flag: true} objDef, objErr := remote.GetObject(val) if objErr != nil { t.Errorf("GetObject failed, err:%s", objErr.Error()) return } objList := []interface{}{objDef} _, err = registerModel(remoteProvider, objList) if err != nil { t.Errorf("register mode failed, err:%s", err.Error()) return } err = o1.Drop(objDef) if err != nil { t.Errorf("drop ext failed, err:%s", err.Error()) return } err = o1.Create(objDef) if err != nil { t.Errorf("create obj failed, err:%s", err.Error()) return } objVal, objErr := getObjectValue(val) if objErr != nil { t.Errorf("GetObjectValue failed, err:%s", objErr.Error()) return } objModel, objErr := remoteProvider.GetEntityModel(objVal) if objErr != nil { t.Errorf("GetEntityModel failed, err:%s", objErr.Error()) return } objModel, objErr = o1.Insert(objModel) if objErr != nil { t.Errorf("insert obj failed, err:%s", objErr.Error()) return } err = provider.UpdateEntity(objModel.Interface(true).(*remote.ObjectValue), val) if err != nil { t.Errorf("UpdateEntity failed, err:%s", err.Error()) return } val.I8 = int8(124) val.Name = "abababa" val.Value = 100.000 objVal, objErr = getObjectValue(val) if objErr != nil { t.Errorf("GetObjectValue failed, err:%s", objErr.Error()) return } objModel, objErr = remoteProvider.GetEntityModel(objVal) if objErr != nil { t.Errorf("GetEntityModel failed, err:%s", objErr.Error()) return } objModel, objErr = o1.Update(objModel) if err != nil { t.Errorf("update obj failed, err:%s", err.Error()) return } val2 := &Unit{ID: val.ID, Name: "", Value: 0.0} obj2Val, objErr := getObjectValue(val2) if objErr != nil { t.Errorf("GetObjectValue failed, err:%s", objErr.Error()) return } obj2Model, obj2Err := remoteProvider.GetEntityModel(obj2Val) if obj2Err != nil { t.Errorf("GetEntityModel failed, err:%s", obj2Err.Error()) return } obj2Model, obj2Err = o1.Query(obj2Model) if obj2Err != nil { t.Errorf("query obj failed, err:%s", obj2Err.Error()) return } err = provider.UpdateEntity(obj2Model.Interface(true).(*remote.ObjectValue), val2) if err != nil { t.Errorf("UpdateEntity failed, err:%s", err.Error()) return } if val.Name != val2.Name || val.Value != val2.Value { t.Errorf("query obj failed, obj:%v, obj2:%v", val, val2) return } _, err = o1.Delete(obj2Model) if err != nil { t.Errorf("query obj failed, err:%s", err.Error()) } } func TestRemoteDepends(t *testing.T) { orm.Initialize() defer orm.Uninitialize() config := orm.NewConfig("localhost:3306", "testdb", "root", "rootkit") remoteProvider := provider.NewRemoteProvider("default") o1, err := orm.NewOrm(remoteProvider, config) defer o1.Release() if err != nil { t.Errorf("new Orm failed, err:%s", err.Error()) return } now, _ := time.ParseInLocation("2006-01-02 15:04:05:0000", "2018-01-02 15:04:05:0000", time.Local) val := &Unit{ID: 10, I64: uint64(78962222222), Name: "Hello world", Value: 12.3456, TimeStamp: now, Flag: true} extVal := &ExtUnit{Unit: val} objDef, objErr := remote.GetObject(val) if objErr != nil { t.Errorf("GetObject failed, err:%s", objErr.Error()) return } extObjDef, objErr := remote.GetObject(extVal) if objErr != nil { t.Errorf("GetObject failed, err:%s", objErr.Error()) return } extVal2 := &ExtUnitList{Unit: *val, UnitList: []Unit{}} ext2ObjDef, objErr := remote.GetObject(extVal2) if objErr != nil { t.Errorf("GetObject failed, err:%s", objErr.Error()) return } objList := []interface{}{objDef, extObjDef, ext2ObjDef} registerModel(remoteProvider, objList) err = o1.Drop(objDef) if err != nil { t.Errorf("drop unit failed, err:%s", err.Error()) return } err = o1.Create(objDef) if err != nil { t.Errorf("create unit failed, err:%s", err.Error()) return } err = o1.Drop(extObjDef) if err != nil { t.Errorf("drop ext failed, err:%s", err.Error()) return } err = o1.Create(extObjDef) if err != nil { t.Errorf("create ext failed, err:%s", err.Error()) return } extObjVal, extObjErr := getObjectValue(extVal) if extObjErr != nil { t.Errorf("GetObjectValue failed, err:%s", extObjErr.Error()) return } extObjModel, extObjErr := remoteProvider.GetEntityModel(extObjVal) if extObjErr != nil { t.Errorf("GetEntityModel failed, err:%s", extObjErr.Error()) return } extObjModel, extObjErr = o1.Insert(extObjModel) if extObjErr != nil { t.Errorf("insert ext failed, err:%s", extObjErr.Error()) return } extVal2.UnitList = append(extVal2.UnitList, *val) err = o1.Drop(ext2ObjDef) if err != nil { t.Errorf("drop ext2 failed, err:%s", err.Error()) return } err = o1.Create(ext2ObjDef) if err != nil { t.Errorf("create ext2 failed, err:%s", err.Error()) return } ext2ObjVal, ext2ObjErr := getObjectValue(extVal2) if ext2ObjErr != nil { t.Errorf("GetObjectValue failed, err:%s", ext2ObjErr.Error()) return } ext2ObjModel, ext2ObjErr := remoteProvider.GetEntityModel(ext2ObjVal) if ext2ObjErr != nil { t.Errorf("GetEntityModel failed, err:%s", ext2ObjErr.Error()) return } ext2ObjModel, ext2ObjErr = o1.Insert(ext2ObjModel) if ext2ObjErr != nil { t.Errorf("insert ext2 failed, err:%s", ext2ObjErr.Error()) return } _, err = o1.Delete(ext2ObjModel) if err != nil { t.Errorf("delete ext2 failed, err:%s", err.Error()) } }
package main import ( "context" "encoding/json" "errors" "fmt" "io/ioutil" "os" "regexp" "strings" "github.com/google/go-github/github" "golang.org/x/oauth2" ) var ( ghClient *github.Client ghCtx context.Context reHex = regexp.MustCompile("^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$") version = "master" commit = "none" date = "unknown" ) type Config struct { Labels []Label Repositories []string } type Label struct { Name string Color string Replace string } type Action int const ( Create Action = iota Update ) var actions = [...]string{ "create", "update", } func (a Action) String() string { return actions[a] } type Result struct { Action Action From Label To Label Error error } func (r Result) String() string { prefix := "[OK]" if r.Error != nil { prefix = "[FAIL]" } var ret string switch r.Action { case Update: ret = fmt.Sprintf("%s Updated label named '%s' with color '%s' to '%s' with color '%s'", prefix, r.From.Name, r.From.Color, r.To.Name, r.To.Color) case Create: ret = fmt.Sprintf("%s Created label named '%s' with color '%s'", prefix, r.To.Name, r.To.Color) } return ret } func (c *Config) check() error { if len(c.Labels) == 0 { return errors.New("Empty labels in config file") } if len(c.Repositories) == 0 { return errors.New("Empty target repositories in config file") } m := make(map[string]bool, 0) for i, label := range c.Labels { if label.Name == "" { return errors.New("label name can not be empty") } if label.Color == "" { return errors.New("label color can not be empty") } if strings.HasPrefix(label.Color, "#") { label.Color = strings.TrimPrefix(label.Color, "#") c.Labels[i].Color = label.Color } if !reHex.MatchString(label.Color) { return errors.New("label color must be in 6 character hex code") } if _, ok := m[label.Name]; ok { return fmt.Errorf("%s in `replaces` is used more than once", label.Name) } } for _, repo := range c.Repositories { parts := strings.Split(repo, "/") if len(parts) != 2 { return fmt.Errorf("invalid repo format %s, shoud be user/repo", repo) } if parts[0] == "" || parts[1] == "" { return fmt.Errorf("invalid repo format %s, shoud be user/repo", repo) } } return nil } func main() { if len(os.Args) < 2 { usage(errors.New("missing config file")) } if os.Getenv("GITHUB_TOKEN") == "" { usage(errors.New("empty GITHUB_TOKEN in env")) } c, err := ReadConfig(os.Args[1]) if err != nil { usage(err) } run(c) } func ReadConfig(path string) (c *Config, err error) { f, err := os.Open(path) if err != nil { return c, err } defer f.Close() fc, err := ioutil.ReadAll(f) if err != nil { return c, err } if err = json.Unmarshal(fc, &c); err != nil { return c, fmt.Errorf("json unmarshal error: %s", err) } err = c.check() return c, err } func run(c *Config) { ghCtx = context.Background() ts := oauth2.StaticTokenSource( &oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")}, ) tc := oauth2.NewClient(ghCtx, ts) ghClient = github.NewClient(tc) for _, repoPath := range c.Repositories { fmt.Printf("Update labels in repo %s...\n", repoPath) results, err := UpdateRepo(repoPath, c.Labels) if err != nil { fmt.Printf("Error: %s\n", err) } printResults(results) } } func printResults(results []Result) { for _, result := range results { fmt.Printf("* %s\n", result) } fmt.Println("") } func UpdateRepo(repoPath string, labels []Label) (results []Result, err error) { // First, get all labels, mapped to their colors, from the repoOwner. repoLabels, err := GetRepoLabels(repoPath) if err != nil { return results, err } // Foreach labels from config: // - If label name exists in current labels, perform update. Probably // the color changes. // - If label replace found in repoLabels, perform update. // - If no match create new label. var result Result for _, label := range labels { if color, ok := repoLabels[label.Name]; ok { result = Result{ Action: Update, From: Label{ Name: label.Name, Color: color, }, To: label, Error: UpdateLabel(repoPath, label.Name, label), } } else if color, ok := repoLabels[label.Replace]; ok { result = Result{ Action: Update, From: Label{ Name: label.Replace, Color: color, }, To: label, Error: UpdateLabel(repoPath, label.Replace, label), } } else { result = Result{ Action: Create, From: Label{}, To: label, Error: CreateLabel(repoPath, label), } } results = append(results, result) } return results, nil } func UpdateLabel(repoPath, labelName string, label Label) error { parts := strings.Split(repoPath, "/") owner, repo := parts[0], parts[1] ghLabel := &github.Label{ Name: &label.Name, Color: &label.Color, } if _, _, err := ghClient.Issues.EditLabel(ghCtx, owner, repo, labelName, ghLabel); err != nil { return err } return nil } func CreateLabel(repoPath string, label Label) error { parts := strings.Split(repoPath, "/") owner, repo := parts[0], parts[1] ghLabel := &github.Label{ Name: &label.Name, Color: &label.Color, } if _, _, err := ghClient.Issues.CreateLabel(ghCtx, owner, repo, ghLabel); err != nil { return err } return nil } func GetRepoLabels(repoPath string) (m map[string]string, err error) { parts := strings.Split(repoPath, "/") owner, repo := parts[0], parts[1] opt := &github.ListOptions{ PerPage: 100, } m = make(map[string]string) for { repoLabels, resp, err := ghClient.Issues.ListLabels(ghCtx, owner, repo, opt) if err != nil { return m, err } for _, label := range repoLabels { m[label.GetName()] = label.GetColor() } if resp.NextPage == 0 { break } opt.Page = resp.NextPage } return m, err } func getVersion() string { return fmt.Sprintf("%v, commit %v, built at %v", version, commit, date) } func usage(err error) { fmt.Printf("Error: %v\n", err) fmt.Printf(` Name: gembel - bulk update issue labels of GitHub repositories. Version: %s Usage: gembel <config-file> To specifiy GITHUB_TOKEN when running it: GITHUB_TOKEN=token gembel <config-file> `, getVersion()) os.Exit(1) }
package dtoshoptrades import ( "github.com/tahmooress/motor-shop/internal/entities/models" "github.com/tahmooress/motor-shop/internal/pkg/query" "github.com/tahmooress/motor-shop/internal/pkg/server" ) type Request struct { ShopID models.ID `json:"shop_id"` server.Query } type Response struct { Data []models.ShopTrades `json:"data"` Meta query.Meta `json:"meta"` }
package closeflag import ( "errors" "sync" ) // CloseFlag is a simple object that has a close function that closes a channel and can be called many times type CloseFlag struct { mutex sync.Mutex closed bool closeChan chan (struct{}) // CloseFunc will be called the first time Close is called. It is allowed to call Close itself CloseFunc func() error } var ( // ErrorClosed is returned when the object is closed multiple times ErrorClosed = errors.New("CloseFlag was already closed. This is harmless.") ) // Chan returns a channel that will be closed upon closing the CloseFlag func (c *CloseFlag) Chan() <-chan (struct{}) { c.mutex.Lock() if c.closeChan == nil { c.closeChan = make(chan (struct{})) if c.closed { close(c.closeChan) } } c.mutex.Unlock() return c.closeChan } // IsClosed returns if the flag was already closed func (c *CloseFlag) IsClosed() bool { c.mutex.Lock() defer c.mutex.Unlock() return c.closed } // Close closes the CloseFlag. It can safely be called multiple times func (c *CloseFlag) Close() error { c.mutex.Lock() closed := c.closed c.closed = true if !closed && c.closeChan != nil { close(c.closeChan) } c.mutex.Unlock() if closed { return ErrorClosed } if c.CloseFunc != nil { return c.CloseFunc() } return nil }
// Copyright 2014 Marc-Antoine Ruel. All rights reserved. // Use of this source code is governed under the Apache License, Version 2.0 // that can be found in the LICENSE file. package main import ( "io/ioutil" "log" "os" "testing" "github.com/maruel/subcommands" "github.com/maruel/ut" ) // t.Parallel() cannot be used here, see main.go for rationale. // In addition, logging must be zapped out. // TODO(maruel): Create an in-memory os.File, couldn't quickly find a ready // made fake only. func newTempFile(t testing.TB) *os.File { f, err := ioutil.TempFile("", "sample-simple") ut.AssertEqual(t, nil, err) return f } // mockStdout mocks os.Stdout manually. To have it mocked automatically, see // sample-complex. func mockStdout(t testing.TB) func() { oldStdout := os.Stdout os.Stdout = newTempFile(t) return func() { os.Stdout.Close() os.Stdout = oldStdout } } func assertStdout(t testing.TB, expected string) { _, _ = os.Stdout.Seek(0, 0) actual, err := ioutil.ReadAll(os.Stdout) ut.AssertEqual(t, nil, err) ut.AssertEqual(t, expected, string(actual)) } func TestGreet(t *testing.T) { defer mockStdout(t)() ut.AssertEqual(t, 0, subcommands.Run(application, []string{"greet", "active tester"})) assertStdout(t, "Hi active tester!\n") } func TestSleep(t *testing.T) { defer mockStdout(t)() // If running with "go test -v", the following log entry will be printed: // utiltest.go:132: 2010/01/02 03:04:05 Simulating sleeping for 1s. out := ut.NewWriter(t) defer out.Close() log.SetOutput(out) ut.AssertEqual(t, 0, subcommands.Run(application, []string{"sleep", "-duration", "1"})) assertStdout(t, "") }
/* Copyright 2022 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cloudrun import "fmt" const ( typeService = "Service" typeJob = "Job" ) type ResourceType string // RunResourceName represents a Cloud Run Service type RunResourceName struct { Project string Region string Service string Job string } // String returns the path representation of a Cloud Run Service. func (n RunResourceName) String() string { // only one of Job or Service should be specified if n.Service != "" { return fmt.Sprintf("projects/%s/locations/%s/services/%s", n.Project, n.Region, n.Service) } return fmt.Sprintf("namespaces/%s/jobs/%s", n.Project, n.Job) } func (n RunResourceName) Name() string { if n.Service != "" { return n.Service } return n.Job } func (n RunResourceName) Type() ResourceType { if n.Service != "" { return typeService } return typeJob }
package read type repository interface { GetWidget(id uint) (Widget, error) GetAllWidgets() ([]Widget, error) } type Service interface { GetWidget(id uint) (Widget, error) GetAllWidgets() ([]Widget, error) } type service struct { r repository } func NewService(r repository) Service { return service{r: r} } func (s service) GetWidget(id uint) (Widget, error) { return s.r.GetWidget(id) } func (s service) GetAllWidgets() ([]Widget, error) { return s.r.GetAllWidgets() }
package main import ( "database/sql" "fmt" "os" ) // Up is executed when this migration is applied func Up_20170818120003(txn *sql.Tx) { databaseProvider := os.Getenv("DATABASE_PROVIDER") fmt.Printf("ENV is: %s, %s", databaseProvider, os.Getenv("MYSQL_ROOT_PASSWORD")) binaryDataType := "BYTEA" if databaseProvider == "mysql" { binaryDataType = "BLOB" } createTokens := "CREATE TABLE IF NOT EXISTS tokens (" createTokens += "user_guid VARCHAR(36) NOT NULL, " createTokens += "cnsi_guid VARCHAR(36), " createTokens += "token_type VARCHAR(4) NOT NULL, " createTokens += "auth_token " + binaryDataType + " NOT NULL, " createTokens += "refresh_token " + binaryDataType + " NOT NULL, " createTokens += "token_expiry BIGINT NOT NULL, " createTokens += "last_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP)" if databaseProvider == "pgsql" { createTokens += " WITH (OIDS=FALSE);" } else { createTokens += ";" } _, err := txn.Exec(createTokens) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } createCnsisTable := "CREATE TABLE IF NOT EXISTS cnsis (" createCnsisTable += "guid VARCHAR(36) NOT NULL UNIQUE," createCnsisTable += "name VARCHAR(255) NOT NULL," createCnsisTable += "cnsi_type VARCHAR(3) NOT NULL," createCnsisTable += "api_endpoint VARCHAR(255) NOT NULL," createCnsisTable += "auth_endpoint VARCHAR(255) NOT NULL," createCnsisTable += "token_endpoint VARCHAR(255) NOT NULL," createCnsisTable += "doppler_logging_endpoint VARCHAR(255) NOT NULL," createCnsisTable += "skip_ssl_validation BOOLEAN NOT NULL DEFAULT FALSE," createCnsisTable += "last_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP," createCnsisTable += "PRIMARY KEY (guid) );" _, err = txn.Exec(createCnsisTable) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } createIndex := "CREATE INDEX tokens_user_guid ON tokens (user_guid);" _, err = txn.Exec(createIndex) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } createIndex = "CREATE INDEX tokens_cnsi_guid ON tokens (cnsi_guid);" _, err = txn.Exec(createIndex) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } createIndex = "CREATE INDEX tokens_token_type ON tokens (token_type);" _, err = txn.Exec(createIndex) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } createIndex = "CREATE INDEX cnsis_name ON cnsis (name);" _, err = txn.Exec(createIndex) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } createIndex = "CREATE INDEX cnsis_cnsi_type ON cnsis (cnsi_type);" _, err = txn.Exec(createIndex) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } } // Down is executed when this migration is rolled back func Down_20170818120003(txn *sql.Tx) { dropTables := "DROP INDEX IF EXISTS tokens_token_type;" _, err := txn.Exec(dropTables) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } dropTables = "DROP INDEX IF EXISTS tokens_cnsi_guid;" _, err = txn.Exec(dropTables) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } dropTables = "DROP INDEX IF EXISTS tokens_user_guid;" _, err = txn.Exec(dropTables) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } dropTables = "DROP TABLE IF EXISTS tokens;" _, err = txn.Exec(dropTables) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } dropTables = "DROP INDEX IF EXISTS cnsis_cnsi_type;" _, err = txn.Exec(dropTables) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } dropTables = "DROP INDEX IF EXISTS cnsis_name;" _, err = txn.Exec(dropTables) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } dropTables = "DROP TABLE IF EXISTS cnsis;" _, err = txn.Exec(dropTables) if err != nil { fmt.Printf("Failed to migrate due to: %v", err) } }
package main import "fmt" func main() { //no need for break statements //because by default there is no fallthrough in switches //which means if you forget a break the code won't continue //to check for matches after a case runs switch "Medhi" { case "Daniel": fmt.Println("Sup Daniel") case "Medhi": fmt.Println("Sup Medhi") default: fmt.Println("No matches") } }
package services import ( "context" "encoding/json" "errors" "fmt" "time" "traceip/internal/models" "traceip/internal/restclient" "github.com/go-redis/redis/v8" ) //CurrenciesService service to obtain information about currencies type CurrenciesService struct { RedisConn *redis.Client } //Sync allows us to keep currencies synchronized on redis database func (cs *CurrenciesService) Sync() (interface{}, error) { for { fmt.Println("Syncronizing currencies...") if _, err := cs.update(); err != nil { time.Sleep(15 * time.Second) continue } time.Sleep(45 * time.Minute) } } //Get allows us to obtain last currencies that we stored on redis database func (cs *CurrenciesService) Get() (currencies *models.Currencies, err error) { currencies = &models.Currencies{} val, err := cs.RedisConn.Get(context.Background(), "currencies").Result() if err == redis.Nil { return nil, nil } err = json.Unmarshal([]byte(val), currencies) return currencies, err } func (cs *CurrenciesService) update() (currencies *models.Currencies, err error) { currencies = &models.Currencies{} if err := getCurrenciesFromBase(currencies); err != nil { return nil, err } b, err := json.Marshal(currencies) val := fmt.Sprintf("%s", b) _, err = cs.RedisConn.Set(context.Background(), "currencies", val, 0).Result() return currencies, err } func getCurrenciesFromBase(currencies *models.Currencies) error { err := restclient.Get("http://data.fixer.io/api/latest?access_key=04d37504d1e4a10c38dc291f7485d9e8&format=1", currencies) if err != nil || currencies == nil { return errors.New("Error getting currencies") } return nil }
package model import ( ) type CmsPrefrenceAreaProductRelation struct { AppId string `json:"appId" gorm:"type:bigint unsigned;"` // Id int `json:"id" gorm:"type:bigint;primary_key"` // PrefrenceAreaId string `json:"prefrenceAreaId" gorm:"type:bigint;"` // ProductId string `json:"productId" gorm:"type:bigint;"` // BaseModel } func (CmsPrefrenceAreaProductRelation) TableName() string { return "cms_prefrence_area_product_relation" }
// generated by running "go generate" on project root package assets // Helper for rod var Helper = ` (frameId) => { // eslint-disable-line no-unused-expressions const rod = { element (selector) { return (this.document || this).querySelector(selector) }, elements (selector) { return (this.document || this).querySelectorAll(selector) }, elementX (xpath) { return document.evaluate( xpath, (this.document || this), null, XPathResult.FIRST_ORDERED_NODE_TYPE ).singleNodeValue }, elementsX (xpath) { const iter = document.evaluate(xpath, (this.document || this), null, XPathResult.ORDERED_NODE_ITERATOR_TYPE) const list = [] let el while ((el = iter.iterateNext())) list.push(el) return list }, elementMatches (selector, reg) { const r = new RegExp(reg) const filter = el => rod.text.call(el).match(r) const el = Array.from((this.document || this).querySelectorAll(selector)).find(filter) return el || null }, parents (selector) { let p = this.parentElement const list = [] while (p) { if (p.matches(selector)) { list.push(p) } p = p.parentElement } return list }, async overlay (id, left, top, width, height, msg) { await rod.waitLoad() const div = document.createElement('div') const msgDiv = document.createElement('div') div.id = id div.style = ` + "`" + `position: fixed; z-index:2147483647; border: 2px dashed red; border-radius: 3px; box-shadow: #5f3232 0 0 3px; pointer-events: none; box-sizing: border-box; left: ${left}px; top: ${top}px; height: ${height}px; width: ${width}px;` + "`" + ` if (width * height === 0) { div.style.border = 'none' } msgDiv.style = ` + "`" + `position: absolute; color: #cc26d6; font-size: 12px; background: #ffffffeb; box-shadow: #333 0 0 3px; padding: 2px 5px; border-radius: 3px; white-space: nowrap; top: ${height}px;` + "`" + ` msgDiv.innerHTML = msg div.appendChild(msgDiv) document.body.appendChild(div) if (window.innerHeight < msgDiv.offsetHeight + top + height) { msgDiv.style.top = -msgDiv.offsetHeight - 2 + 'px' } if (window.innerWidth < msgDiv.offsetWidth + left) { msgDiv.style.left = window.innerWidth - msgDiv.offsetWidth - left + 'px' } }, async elementOverlay (id, msg) { const interval = 100 let pre = rod.box.call(this) await rod.overlay(id, pre.left, pre.top, pre.width, pre.height, msg) const update = () => { const overlay = document.getElementById(id) if (overlay === null) return const box = rod.box.call(this) if (pre.left === box.left && pre.top === box.top && pre.width === box.width && pre.height === box.height) { setTimeout(update, interval) return } overlay.style.left = box.left + 'px' overlay.style.top = box.top + 'px' overlay.style.width = box.width + 'px' overlay.style.height = box.height + 'px' pre = box setTimeout(update, interval) } setTimeout(update, interval) }, removeOverlay (id) { const el = document.getElementById(id) el && el.remove() }, waitIdle (timeout) { return new Promise((resolve) => { window.requestIdleCallback(resolve, { timeout }) }) }, waitLoad () { return new Promise((resolve) => { if (document.readyState === 'complete') return resolve() window.addEventListener('load', resolve) }) }, async scrollIntoViewIfNeeded () { if (!this.isConnected) { throw new Error('Node is detached from document') } if (this.nodeType !== Node.ELEMENT_NODE) { throw new Error('Node is not of type HTMLElement') } const visibleRatio = await new Promise(resolve => { const observer = new IntersectionObserver(entries => { resolve(entries[0].intersectionRatio) observer.disconnect() }) observer.observe(this) }) if (visibleRatio !== 1.0) { this.scrollIntoView({ block: 'center', inline: 'center', behavior: 'instant' }) } }, inputEvent () { this.dispatchEvent(new Event('input', { bubbles: true })) this.dispatchEvent(new Event('change', { bubbles: true })) }, selectText (pattern) { const m = this.value.match(new RegExp(pattern)) if (m) { this.setSelectionRange(m.index, m.index + m[0].length) } }, selectAllText () { this.select() }, select (selectors) { selectors.forEach(s => { Array.from(this.options).find(el => { try { if (el.innerText.includes(s) || el.matches(s)) { el.selected = true return true } } catch (e) { } }) }) this.dispatchEvent(new Event('input', { bubbles: true })) this.dispatchEvent(new Event('change', { bubbles: true })) }, visible () { const box = this.getBoundingClientRect() const style = window.getComputedStyle(this) return style.display !== 'none' && style.visibility !== 'hidden' && !!(box.top || box.bottom || box.width || box.height) }, invisible () { return !rod.visible.apply(this) }, box () { const box = this.getBoundingClientRect().toJSON() if (this.tagName === 'IFRAME') { const style = window.getComputedStyle(this) box.left += parseInt(style.paddingLeft) + parseInt(style.borderLeftWidth) box.top += parseInt(style.paddingTop) + parseInt(style.borderTopWidth) } return box }, text () { switch (this.tagName) { case 'INPUT': case 'TEXTAREA': return this.value default: return this.innerText } }, resource () { return new Promise((resolve, reject) => { if (this.complete) { return resolve(this.currentSrc) } this.addEventListener('load', () => resolve(this.currentSrc)) this.addEventListener('error', (e) => reject(e)) }) }, stripHTML (html) { const div = document.createElement('div') div.innerHTML = html return div.innerText }, addScriptTag (id, url, content) { if (document.getElementById(id)) return return new Promise((resolve, reject) => { var s = document.createElement('script') if (url) { s.src = url s.onload = resolve } else { s.type = 'text/javascript' s.text = content resolve() } s.id = id s.onerror = reject document.head.appendChild(s) }) }, addStyleTag (id, url, content) { if (document.getElementById(id)) return return new Promise((resolve, reject) => { var el if (url) { el = document.createElement('link') el.rel = 'stylesheet' el.href = url } else { el = document.createElement('style') el.type = 'text/css' el.appendChild(document.createTextNode(content)) resolve() } el.id = id el.onload = resolve el.onerror = reject document.head.appendChild(el) }) } } window['rod' + frameId] = rod if (!window.rod) window.rod = rod return window } ` // Monitor for rod var Monitor = `<html> <head> <title>Rod Monitor - Pages</title> </head> <body> <h3>Page List</h3> {{range .list}} <h4> <a href='/page/{{.TargetID}}?rate=1000' title="{{.URL}}">{{.Title}}</a> </h4> {{end}} </body> </html>` // MonitorPage for rod var MonitorPage = `<html> <head><title>Rod Monitor - {{.id}}</title></head> <body style="margin: 0"> </body> <script> let img = document.createElement('img') function update() { let now = new Date() img.src = '/screenshot/{{.id}}?t=' + now.getTime() } img.style.maxWidth = innerWidth + "px" img.onload = () => setTimeout(update, {{.rate}}) img.onerror = () => alert('error loading screenshots') document.body.appendChild(img) update() </script> </html>`
package main import ( "fmt" "git.ronaksoftware.com/blip/server/internal/tools" "github.com/spf13/cobra" "io/ioutil" "os" ) /* Creation Time: 2019 - Oct - 16 Created by: (ehsan) Maintainers: 1. Ehsan N. Moosa (E2) Auditor: Ehsan N. Moosa (E2) Copyright Ronak Software Group 2018 */ func init() { RootCmd.AddCommand(SettingsCmd) SetAccessTokenCmd.Flags().String(FlagAccessToken, "", "") SetSessionIDCmd.Flags().String(FlagSessionID, "", "") SettingsCmd.AddCommand(SetAccessTokenCmd, SetSessionIDCmd, GetAccessTokenCmd, GetSessionIDCmd) } var SettingsCmd = &cobra.Command{ Use: "Settings", } var SetAccessTokenCmd = &cobra.Command{ Use: "SetAccessToken", Run: func(cmd *cobra.Command, args []string) { _ = ioutil.WriteFile(".blip-accessToken", tools.StrToByte(cmd.Flag(FlagAccessToken).Value.String()), os.ModePerm) }, } var GetAccessTokenCmd = &cobra.Command{ Use: "GetAccessToken", Run: func(cmd *cobra.Command, args []string) { tokenBytes, _ := ioutil.ReadFile(".blip-accessToken") fmt.Println(tools.ByteToStr(tokenBytes)) }, } var SetSessionIDCmd = &cobra.Command{ Use: "SetSessionID", Run: func(cmd *cobra.Command, args []string) { _ = ioutil.WriteFile(".blip-session", tools.StrToByte(cmd.Flag(FlagSessionID).Value.String()), os.ModePerm) }, } var GetSessionIDCmd = &cobra.Command{ Use: "GetSessionID", Run: func(cmd *cobra.Command, args []string) { id, _ := ioutil.ReadFile(".blip-session") fmt.Println(tools.ByteToStr(id)) }, }
package frvradn import ( "encoding/json" "errors" "fmt" "strings" "github.com/prebid/openrtb/v19/openrtb2" "github.com/prebid/prebid-server/adapters" "github.com/prebid/prebid-server/config" "github.com/prebid/prebid-server/errortypes" "github.com/prebid/prebid-server/openrtb_ext" ) type adapter struct { uri string } func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) { if config.Endpoint == "" { return nil, errors.New("missing endpoint adapter parameter") } bidder := &adapter{ uri: config.Endpoint, } return bidder, nil } func (a *adapter) MakeRequests(request *openrtb2.BidRequest, requestInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) { var requests []*adapters.RequestData var errs []error requestCopy := *request for _, imp := range request.Imp { frvrAdnExt, err := getImpressionExt(&imp) if err != nil { errs = append(errs, err) continue } if imp.BidFloor > 0 && imp.BidFloorCur != "" && strings.ToUpper(imp.BidFloorCur) != "USD" { convertedValue, err := requestInfo.ConvertCurrency(imp.BidFloor, imp.BidFloorCur, "USD") if err != nil { errs = append(errs, err) continue } imp.BidFloorCur = "USD" imp.BidFloor = convertedValue } ext, err := json.Marshal(frvrAdnExt) if err != nil { errs = append(errs, err) continue } imp.Ext = ext requestCopy.Imp = []openrtb2.Imp{imp} requestJSON, err := json.Marshal(requestCopy) if err != nil { errs = append(errs, err) continue } requestData := &adapters.RequestData{ Method: "POST", Uri: a.uri, Body: requestJSON, } requests = append(requests, requestData) } return requests, errs } func (a *adapter) MakeBids(request *openrtb2.BidRequest, requestData *adapters.RequestData, responseData *adapters.ResponseData) (*adapters.BidderResponse, []error) { if adapters.IsResponseStatusCodeNoContent(responseData) { return nil, nil } if err := adapters.CheckResponseStatusCodeForErrors(responseData); err != nil { return nil, []error{err} } if len(responseData.Body) == 0 { return nil, nil } var response openrtb2.BidResponse if err := json.Unmarshal(responseData.Body, &response); err != nil { return nil, []error{err} } bidResponse := adapters.NewBidderResponseWithBidsCapacity(len(request.Imp)) bidResponse.Currency = response.Cur var errs []error for _, seatBid := range response.SeatBid { for i := range seatBid.Bid { bidType, err := getBidMediaType(&seatBid.Bid[i]) if err != nil { errs = append(errs, err) continue } b := &adapters.TypedBid{ Bid: &seatBid.Bid[i], BidType: bidType, } bidResponse.Bids = append(bidResponse.Bids, b) } } return bidResponse, errs } func getImpressionExt(imp *openrtb2.Imp) (*openrtb_ext.ImpExtFRVRAdn, error) { var extImpBidder adapters.ExtImpBidder if err := json.Unmarshal(imp.Ext, &extImpBidder); err != nil { return nil, &errortypes.BadInput{ Message: "missing ext", } } var frvrAdnExt openrtb_ext.ImpExtFRVRAdn if err := json.Unmarshal(extImpBidder.Bidder, &frvrAdnExt); err != nil { return nil, &errortypes.BadInput{ Message: "missing ext.bidder", } } if len(frvrAdnExt.PublisherID) == 0 || len(frvrAdnExt.AdUnitID) == 0 { return nil, &errortypes.BadInput{ Message: "publisher_id and ad_unit_id are required", } } return &frvrAdnExt, nil } func getBidMediaType(bid *openrtb2.Bid) (openrtb_ext.BidType, error) { var extBid openrtb_ext.ExtBid err := json.Unmarshal(bid.Ext, &extBid) if err != nil { return "", fmt.Errorf("unable to deserialize imp %v bid.ext", bid.ImpID) } if extBid.Prebid == nil { return "", fmt.Errorf("imp %v with unknown media type", bid.ImpID) } return extBid.Prebid.Type, nil }
package driver // type Migrator interface { // HasTable(table string) bool // }
package market import "github.com/shopspring/decimal" type GetAllSymbolsLast24hCandlesticksAskBidResponse struct { Status string `json:"status"` Ts int64 `json:"ts"` Data []SymbolCandlestick `json:"data"` } type SymbolCandlestick struct { Amount decimal.Decimal `json:"amount"` Open decimal.Decimal `json:"open"` Close decimal.Decimal `json:"close"` High decimal.Decimal `json:"high"` Symbol string `json:"symbol"` Count int64 `json:"count"` Low decimal.Decimal `json:"low"` Vol decimal.Decimal `json:"vol"` Bid decimal.Decimal `json:"bid"` BidSize decimal.Decimal `json:"bidSize"` Ask decimal.Decimal `json:"ask"` AskSize decimal.Decimal `json:"askSize"` }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stream import ( "fmt" ) // Format prints the Channel to w. func (c Channel) Format(w fmt.State, r rune) { switch c { case Channel_Red: fmt.Fprint(w, "R") case Channel_Green: fmt.Fprint(w, "G") case Channel_Blue: fmt.Fprint(w, "B") case Channel_Alpha: fmt.Fprint(w, "A") case Channel_Luminance: fmt.Fprint(w, "L") case Channel_Depth: fmt.Fprint(w, "D") case Channel_Stencil: fmt.Fprint(w, "S") case Channel_ChromaU: fmt.Fprint(w, "ChromaU") case Channel_ChromaV: fmt.Fprint(w, "ChromaV") case Channel_Gray: fmt.Fprint(w, "Gray") case Channel_U: fmt.Fprint(w, "U") case Channel_V: fmt.Fprint(w, "V") case Channel_W: fmt.Fprint(w, "W") case Channel_X: fmt.Fprint(w, "X") case Channel_Y: fmt.Fprint(w, "Y") case Channel_Z: fmt.Fprint(w, "Z") case Channel_SharedExponent: fmt.Fprint(w, "E") case Channel_Undefined: fmt.Fprint(w, "Ж") default: fmt.Fprint(w, "?") } } // ColorChannels is the list of channels considered colors. var ColorChannels = Channels{ Channel_Red, Channel_Green, Channel_Blue, Channel_Alpha, Channel_Luminance, Channel_Gray, Channel_ChromaU, Channel_ChromaV, } // DepthChannels is the list of channels considered depth. var DepthChannels = Channels{ Channel_Depth, } // StencilChannels is the list of channels considered stencil. var StencilChannels = Channels{ Channel_Stencil, } // VectorChannels is the list of channels considered vectors. var VectorChannels = Channels{ Channel_X, Channel_Y, Channel_Z, Channel_W, } // IsColor returns true if the channel is considered a color channel. // See ColorChannels for the list of channels considered color. func (c Channel) IsColor() bool { for _, t := range ColorChannels { if t == c { return true } } return false } // IsDepth returns true if the channel is considered a depth channel. // See DepthChannels for the list of channels considered depth. func (c Channel) IsDepth() bool { for _, t := range DepthChannels { if t == c { return true } } return false } // IsStencil returns true if the channel is considered a stencil channel. // See StencilChannels for the list of channels considered stencil. func (c Channel) IsStencil() bool { for _, t := range StencilChannels { if t == c { return true } } return false } // IsVector returns true if the channel is considered a vector channel. // See VectorChannels for the list of channels considered vector. func (c Channel) IsVector() bool { for _, t := range VectorChannels { if t == c { return true } } return false } // Channels is a list of channels. type Channels []Channel // Contains returns true if l contains c. func (l Channels) Contains(c Channel) bool { for _, t := range l { if t == c { return true } } return false } // ContainsColor returns true if l contains a color channel. // See ColorChannels for channels considered colors. func (l Channels) ContainsColor() bool { for _, t := range l { if t.IsColor() { return true } } return false } // ContainsDepth returns true if l contains a depth channel. // See DepthChannels for channels considered depth. func (l Channels) ContainsDepth() bool { for _, t := range l { if t.IsDepth() { return true } } return false } // ContainsStencil returns true if l contains a stencil channel. // See StencilChannels for channels considered stencil. func (l Channels) ContainsStencil() bool { for _, t := range l { if t.IsStencil() { return true } } return false } // ContainsVector returns true if l contains a vector channel. // See VectorChannels for channels considered vectors. func (l Channels) ContainsVector() bool { for _, t := range l { if t.IsVector() { return true } } return false }
package main import ( "fmt" "net/http" /* "os" "os/signal"*/ "strings" "time" ) func startServer() { http.Handle("/", TestHandle(".")) s := &http.Server{ Addr: ":8080", } s.ListenAndServe() } func main() { fmt.Println("before listen") go startServer() fmt.Println("after listen") /* c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, os.Kill) // Block until a signal is received. sig := <-c fmt.Printf("Trapped Signal; %v", sig) */ for i := 0; i < 10; i++ { time.Sleep(time.Second) } for { } } type FileHandler struct { http.Handler count int8 } func (f *FileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { f.Handler.ServeHTTP(w, r) fmt.Println(r.URL, r.Method) url := r.URL fmt.Println("scheme:", url.Scheme, " opaque:", url.Opaque, " user:", url.User, " host:", url.Host, " path:", url.Path) fmt.Println(" rawpath:", url.RawPath, " forcequery:", url.ForceQuery, " rawquery:", url.RawQuery, " fragment:", url.Fragment) fullName := r.URL.Path nameSlice := strings.Split(fullName, "/") fmt.Println(nameSlice[len(nameSlice)-1]) } func TestHandle(directory string) http.Handler { fh := new(FileHandler) fh.Handler = http.FileServer(http.Dir(directory)) return fh }
// Copyright (C) 2019 Storj Labs, Inc. // See LICENSE for copying information. package storj_test import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "storj.io/common/storj" "storj.io/common/testrand" ) func TestNewKey(t *testing.T) { t.Run("nil humanReadableKey", func(t *testing.T) { t.Parallel() key, err := storj.NewKey(nil) require.NoError(t, err) require.True(t, key.IsZero(), "key isn't zero value") }) t.Run("empty humanReadableKey", func(t *testing.T) { t.Parallel() key, err := storj.NewKey([]byte{}) require.NoError(t, err) require.True(t, key.IsZero(), "key isn't zero value") }) t.Run("humanReadableKey is of KeySize length", func(t *testing.T) { t.Parallel() humanReadableKey := testrand.Bytes(storj.KeySize) key, err := storj.NewKey(humanReadableKey) require.NoError(t, err) require.Equal(t, humanReadableKey, key[:]) }) t.Run("humanReadableKey is shorter than KeySize", func(t *testing.T) { t.Parallel() humanReadableKey := testrand.BytesInt(testrand.Intn(storj.KeySize)) key, err := storj.NewKey(humanReadableKey) require.NoError(t, err) require.Equal(t, humanReadableKey, key[:len(humanReadableKey)]) }) t.Run("humanReadableKey is larger than KeySize", func(t *testing.T) { t.Parallel() humanReadableKey := testrand.BytesInt(testrand.Intn(10) + storj.KeySize + 1) key, err := storj.NewKey(humanReadableKey) require.NoError(t, err) assert.Equal(t, humanReadableKey[:storj.KeySize], key[:]) }) t.Run("same human readable key produce the same key", func(t *testing.T) { t.Parallel() humanReadableKey := testrand.BytesInt(testrand.Intn(10) + storj.KeySize + 1) key1, err := storj.NewKey(humanReadableKey) require.NoError(t, err) key2, err := storj.NewKey(humanReadableKey) require.NoError(t, err) assert.Equal(t, key1, key2, "keys are equal") }) } func TestKey_IsZero(t *testing.T) { t.Run("nil", func(t *testing.T) { var key *storj.Key require.True(t, key.IsZero()) wrapperFn := func(key *storj.Key) bool { return key.IsZero() } require.True(t, wrapperFn(nil)) }) t.Run("zero", func(t *testing.T) { key := &storj.Key{} require.True(t, key.IsZero()) }) t.Run("no nil/zero", func(t *testing.T) { key := &storj.Key{'k'} require.False(t, key.IsZero()) }) } // TestNonce_Scan tests (*Nonce).Scan(). func TestNonce_Scan(t *testing.T) { tmp := storj.Nonce{} require.Error(t, tmp.Scan(32)) require.Error(t, tmp.Scan(false)) require.Error(t, tmp.Scan([]byte{})) require.NoError(t, tmp.Scan(nil)) require.True(t, tmp.IsZero()) require.NoError(t, tmp.Scan(tmp.Bytes())) require.True(t, tmp.IsZero()) } // TestEncryptedPrivateKey_Scan tests (*EncryptedPrivateKey).Scan(). func TestEncryptedPrivateKey_Scan(t *testing.T) { tmp := storj.EncryptedPrivateKey{} require.Error(t, tmp.Scan(32)) require.Error(t, tmp.Scan(false)) require.NoError(t, tmp.Scan([]byte{})) require.NoError(t, tmp.Scan([]byte{1, 2, 3, 4})) ref := []byte{1, 2, 3} require.NoError(t, tmp.Scan(ref)) ref[0] = 0xFF require.Equal(t, storj.EncryptedPrivateKey{1, 2, 3}, tmp) }
package users import ( "fmt" "io" "net/http" "net/url" "mainapp/app/middleware" "io/ioutil" ) // DB_BASE_URL Database Address const DB_BASE_URL string = "http://cooper-database-api:8080" // Type is an HTTP content-type key const Type string = "Content-Type" // contentT is an HTTP content-type value const contentT string = "application/json" func getUser(w http.ResponseWriter, r *http.Request) { r.ParseForm() fmt.Println(r) w.Header().Set(Type, contentT) /** * TODO: Encapsulate to getParser(). Done */ keys, _, parserErr := middleware.GetParser(r, []string{"userId"}) if (parserErr != nil) { /** * Paser error handling */ w.WriteHeader(http.StatusBadRequest) /* * Need a middleware to handle this error. * Need a better design. Functions might be duplicated here. */ io.WriteString(w, "GET /api/user/getUser StatusBadRequest") } else { userID := (string)(keys[0]) dbURL, _ := url.Parse(DB_BASE_URL + "/users/" + userID) res, err := http.Get(dbURL.String()) //~TODO: make this line async~, concurrency shall not be resolved here, but it should be resolved when the req reaches our server. if err == nil { w.WriteHeader(http.StatusOK) body, _ := ioutil.ReadAll(res.Body) fmt.Println(body) io.WriteString(w, (string)(body)) /* * Check here in testing. I am not sure * whether res.Body could be parsered properly */ res.Body.Close() } else { w.WriteHeader(http.StatusBadRequest) } } } // GetUser exports getUser() var GetUser = getUser
/* Copyright 2021 The KodeRover Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package types import ( "fmt" "strings" "github.com/koderover/zadig/lib/microservice/warpdrive/config" "github.com/koderover/zadig/lib/microservice/warpdrive/core/service/types/task" "github.com/koderover/zadig/lib/setting" ) // Context ... type Context struct { // Workspace 容器工作目录 [必填] Workspace string `yaml:"workspace"` // CleanWorkspace 是否清理工作目录 [选填, 默认为 false] CleanWorkspace bool `yaml:"clean_workspace"` // Paths 执行脚本Path Paths string `yaml:"-"` // Proxy 翻墙配置信息 Proxy *Proxy `yaml:"proxy"` // Envs 用户注入环境变量, 包括安装脚本环境变量 [optional] Envs EnvVar `yaml:"envs"` // SecretEnvs 用户注入敏感信息环境变量, value不能在stdout stderr中输出 [optional] SecretEnvs EnvVar `yaml:"secret_envs"` // Installs 安装程序脚本 [optional] Installs []*Install `yaml:"installs"` // Repos 用户需要下载的repos Repos []*Repo `yaml:"repos"` // Scripts 执行主要编译脚本 Scripts []string `yaml:"scripts"` // PostScripts 后置编译脚本 PostScripts []string `yaml:"post_scripts"` // SSH ssh连接参数 SSHs []*task.SSH `yaml:"sshs"` // GinkgoTest 执行 ginkgo test 配置 GinkgoTest *GinkgoTest `yaml:"ginkgo_test"` // Archive: 归档配置 [optional] Archive *Archive `yaml:"archive"` // DockerRegistry: 镜像仓库配置 [optional] DockerRegistry *DockerRegistry `yaml:"docker_registry"` // DockerBuildContext image 构建context DockerBuildCtx *task.DockerBuildCtx `yaml:"docker_build_ctx"` // FileArchiveCtx 二进制包构建 FileArchiveCtx *task.FileArchiveCtx `yaml:"file_archive_ctx"` // Git Github/Gitlab 配置 Git *Git `yaml:"git"` // Caches Caches配置 Caches []string `yaml:"caches"` // testType TestType string `yaml:"test_type"` // Classic build ClassicBuild bool `yaml:"classic_build"` // StorageUri StorageUri string `yaml:"storage_uri"` // PipelineName PipelineName string `yaml:"pipeline_name"` // TaskID TaskID int64 `yaml:"task_id"` // ServiceName ServiceName string `yaml:"service_name"` // ResetCache ignore workspace cache [runtime] ResetCache bool `yaml:"reset_cache"` // IgnoreCache ignore docker build cache [runtime] IgnoreCache bool `yaml:"ignore_cache"` StorageEndpoint string `yaml:"storage_endpoint"` StorageAK string `yaml:"storage_ak"` StorageSK string `yaml:"storage_sk"` StorageBucket string `yaml:"storage_bucket"` } // Proxy 翻墙配置信息 type Proxy struct { Type string `yaml:"type"` Address string `yaml:"address"` Port int `yaml:"port"` NeedPassword bool `yaml:"need_password"` Username string `yaml:"username"` Password string `yaml:"password"` EnableRepoProxy bool `yaml:"enable_repo_proxy"` EnableApplicationProxy bool `yaml:"enable_application_proxy"` } func (p *Proxy) GetProxyUrl() string { var uri string if p.NeedPassword { uri = fmt.Sprintf("%s://%s:%s@%s:%d", p.Type, p.Username, p.Password, p.Address, p.Port, ) return uri } uri = fmt.Sprintf("%s://%s:%d", p.Type, p.Address, p.Port, ) return uri } // EnvVar ... type EnvVar []string // Environs 返回用户注入Env 格式为: "key=value". // 支持 val 包含 $HOME func (ev EnvVar) Environs() []string { resp := []string{} for _, val := range ev { if val == "" { continue } if len(strings.Split(val, "=")) != 2 { continue } replaced := strings.Replace(val, "$HOME", config.Home(), -1) resp = append(resp, replaced) } return resp } // Install ... type Install struct { // 安装名称 Name string `yaml:"name"` // 安装版本 Version string `yaml:"version"` // 安装脚本 Scripts []string `yaml:"scripts"` // 可执行文件目录 BinPath string `yaml:"bin_path"` // Optional: 安装脚本环境变量 Envs EnvVar `yaml:"envs"` // 安装包位置 Download string `yaml:"download"` } // Repo ... type Repo struct { Source string `yaml:"source"` Address string `yaml:"address"` Owner string `yaml:"owner"` Name string `yaml:"name"` RemoteName string `yaml:"remote_name"` Branch string `yaml:"branch"` PR int `yaml:"pr"` Tag string `yaml:"tag"` CheckoutPath string `yaml:"checkout_path"` SubModules bool `yaml:"submodules"` OauthToken string `yaml:"oauthToken"` User string `yaml:"-"` Password string `yaml:"-"` CheckoutRef string `yaml:"checkout_ref"` } // PRRef returns refs format // It will check repo provider type, by default returns github refs format. // // e.g. github returns refs/pull/1/head // e.g. gitlab returns merge-requests/1/head func (r *Repo) PRRef() string { if strings.ToLower(r.Source) == setting.SourceFromGitlab { return fmt.Sprintf("merge-requests/%d/head", r.PR) } else if strings.ToLower(r.Source) == setting.SourceFromGerrit { return r.CheckoutRef } return fmt.Sprintf("refs/pull/%d/head", r.PR) } // BranchRef returns branch refs format // e.g. refs/heads/master func (r *Repo) BranchRef() string { return fmt.Sprintf("refs/heads/%s", r.Branch) } // TagRef returns the tag ref of current repo // e.g. refs/tags/v1.0.0 func (r *Repo) TagRef() string { return fmt.Sprintf("refs/tags/%s", r.Tag) } // Ref returns the changes ref of current repo in the following order: // 1. tag ref // 2. branch ref // 3. pr ref func (r *Repo) Ref() string { if len(r.Tag) > 0 { return r.TagRef() } else if len(r.Branch) > 0 { return r.BranchRef() } else if r.PR > 0 { return r.PRRef() } return "" } // Archive ... type Archive struct { Dir string `yaml:"dir"` File string `yaml:"file"` } // GinkgoTest ... type GinkgoTest struct { ResultPath string `yaml:"result_path"` ArtifactPaths []string `yaml:"artifact_paths"` } // DockerRegistry 推送镜像到 docker registry 配置 type DockerRegistry struct { Host string `yaml:"host"` Namespace string `yaml:"namespace"` UserName string `yaml:"username"` Password string `yaml:"password"` } // Git ... type Git struct { UserName string `yaml:"username"` Email string `yaml:"email"` GithubHost string `yaml:"github_host"` GithubSSHKey string `yaml:"github_ssh_key"` GitlabHost string `yaml:"gitlab_host"` // encoded in base64 GitlabSSHKey string `yaml:"gitlab_ssh_key"` GitKnownHost string `yaml:"git_known_host"` }
// Package errorutils provides helper functions for dealing with errors. package errorutils // First returns the first non-nil error in a set of errors. func First(errors ...error) error { for _, err := range errors { if err != nil { return err } } return nil }
package sql import ( "database/sql" // "log" "sort" "strings" "unicode" "golang.org/x/text/runes" "golang.org/x/text/transform" "golang.org/x/text/unicode/norm" "github.com/BestPrice/backend/bp" "github.com/shopspring/decimal" ) var _ bp.Service = &Service{} type Service struct { db *sql.DB } func makeCategoryTree(parent *bp.ID, cat map[*bp.Category]bool) []bp.Category { nodes := []bp.Category{} for c := range cat { if parent == nil { if !c.IDParent.Null() { continue } } else { if c.IDParent.Null() || c.IDParent.String() != parent.String() { continue } } delete(cat, c) c.Subcategories = makeCategoryTree(&c.ID, cat) nodes = append(nodes, *c) } return nodes } func (s Service) Categories() ([]bp.Category, error) { query := ` WITH RECURSIVE nodes (id_product, product_name, id_parent_product) AS ( SELECT p.id_product, p.product_name, p.id_parent_product FROM product p WHERE p.id_parent_product is NULL UNION ALL SELECT p.id_product, p.product_name, p.id_parent_product FROM product p, nodes n WHERE p.id_parent_product = n.id_product AND p.price_description = '' ) SELECT n.id_product, n.product_name, n.id_parent_product FROM nodes n` rows, err := s.db.Query(query) if err != nil { return nil, err } defer rows.Close() vals := make(map[*bp.Category]bool) for rows.Next() { var p bp.Category if err := rows.Scan(&p.ID, &p.Name, &p.IDParent); err != nil { return nil, err } vals[&p] = true } return makeCategoryTree(nil, vals), nil } func (s Service) Chainstores() ([]bp.Chainstore, error) { rows, err := s.db.Query("SELECT * FROM chain_store") if err != nil { return nil, err } defer rows.Close() vals := make([]bp.Chainstore, 0, 32) for rows.Next() { var c bp.Chainstore if err := rows.Scan(&c.ID, &c.Name); err != nil { return nil, err } vals = append(vals, c) } return vals, nil } func normalizePhrase(p string) (string, error) { var ( add = func(r rune) rune { if r == ' ' { return '|' } return r } ) p = strings.Replace(p, "|", "", -1) p = strings.TrimSpace(p) t := transform.Chain( runes.Map(add), runes.Map(unicode.ToLower), norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC) no, _, err := transform.String(t, p) return no, err } func (s Service) Products(category *bp.ID, phrase string) ([]bp.Product, error) { c := "IS NULL" if category != nil { c = "= '{" + category.String() + "}'" } p, err := normalizePhrase(phrase) if err != nil { return nil, err } query := ` WITH RECURSIVE nodes AS ( -- GET all products with given category SELECT p.id_product uuid, p.id_brand, p.price_description pd, ''::text || p.product_name AS chain FROM product p WHERE p.id_parent_product ` + c + ` UNION ALL SELECT p.id_product, p.id_brand, p.price_description, n.chain || ' ' || p.product_name FROM product p, nodes n WHERE p.id_parent_product = n.uuid ) , join_brands AS ( SELECT n.uuid, n.pd, n.chain || ' ' || b.brand_name AS chain FROM nodes n JOIN brand b ON b.id_brand = n.id_brand ), nodes2 AS ( -- REMOVE category products and split chain SELECT n.uuid, regexp_split_to_table(n.chain, E'\\s+') words FROM join_brands n WHERE NOT n.pd = '' ), nodes3 AS ( -- COUNT matches SELECT n.uuid uuid, count(n.uuid) rank FROM nodes2 n WHERE unaccent(lower(n.words)) SIMILAR TO '%(` + p + `)%' GROUP BY n.uuid ), nodes4 AS ( SELECT p.*, n.rank FROM product p, nodes3 n WHERE p.id_product = n.uuid ) SELECT n.id_product, n.product_name, n.weight, n.volume, n.price_description, n.decimal_possibility, b.id_brand, b.brand_name, n.rank FROM nodes4 n JOIN brand b ON b.id_brand = n.id_brand ORDER BY n.rank DESC ` rows, err := s.db.Query(query) if err != nil { return nil, err } defer rows.Close() vals := make([]bp.Product, 0, 32) for rows.Next() { var p bp.Product if err := rows.Scan(&p.ID, &p.Name, &p.Weight, &p.Volume, &p.PriceDescription, &p.DecimalPossibility, &p.Brand.ID, &p.Brand.Name, &p.Rank); err != nil { return nil, err } vals = append(vals, p) } return vals, nil } func (s Service) Stores() ([]bp.Store, error) { query := ` SELECT s.id_store, cs.chain_store_name, s.store_name, s.city, s.street_and_nr, s.district, s.region, s.latitude, s.longitude FROM store s JOIN chain_store cs ON s.id_chain_store = cs.id_chain_store ` rows, err := s.db.Query(query) if err != nil { return nil, err } defer rows.Close() vals := make([]bp.Store, 0, 32) for rows.Next() { var s bp.Store if err := rows.Scan(&s.ID, &s.CSName, &s.Name, &s.City, &s.Street, &s.District, &s.Region, &s.Lat, &s.Lng); err != nil { return nil, err } vals = append(vals, s) } return vals, nil } func (s *Service) shopQuery(ID bp.ID) string { id := ID.String() query := ` WITH RECURSIVE t0 AS ( SELECT p.id_product FROM product p WHERE p.id_parent_product = '` + id + `' UNION ALL SELECT p.id_product FROM product p, t0 n WHERE p.id_parent_product = n.id_product ) , t1 AS ( SELECT t.id_product FROM t0 t UNION SELECT '` + id + `' ) , t2 AS ( SELECT pp.* FROM t1 t, product_prices pp WHERE t.id_product = pp.id_product ) , t3 AS ( SELECT '` + id + `' as id_product, cs.chain_store_name, p.product_name, b.brand_name, p.price_description, t.unit_price, cs.id_chain_store --, p.weight, p.volume, p.decimal_possibility FROM t2 t JOIN product p ON p.id_product = t.id_product JOIN chain_store cs ON cs.id_chain_store = t.id_chain_store JOIN brand b ON b.id_brand = p.id_brand ) SELECT * FROM t3 ` return query } func (s Service) Shop(req *bp.ShopRequest) (bp.Shop, error) { var ( IDs []string p []bp.ShopProduct ) for _, product := range req.Products { IDs = append(IDs, product.ID.String()) rows, err := s.db.Query(s.shopQuery(product.ID)) if err != nil { return bp.Shop{}, err } defer rows.Close() for rows.Next() { var r bp.ShopProduct err := rows.Scan(&r.ID, &r.ChainStore, &r.Product, &r.Brand, &r.PriceDesc, &r.Price, &r.IDChainStore) if err != nil { return bp.Shop{}, err } p = append(p, r) } } return calcShop(p, req) } type Stores []bp.ShopStore type shopProducts struct { p []bp.ShopProduct } func (b *shopProducts) Len() int { return len(b.p) } func (b *shopProducts) Swap(i, j int) { b.p[i], b.p[j] = b.p[j], b.p[i] } type byPrice struct { shopProducts } func (b *byPrice) Less(i, j int) bool { return b.p[i].Price.Cmp(b.p[j].Price) < 0 } func calcShop(p []bp.ShopProduct, req *bp.ShopRequest) (bp.Shop, error) { var ( stores = make(map[string]*bp.ShopStore) m = make(map[string]bool) priceTotal decimal.Decimal ) // add price to products for i := range p { pid := p[i].ID.String() m[pid] = false p[i].Count = req.ProductCount(p[i].ID) p[i].Price = p[i].Price.Mul(decimal.NewFromFloat(float64(p[i].Count))) } sort.Sort(&byPrice{shopProducts{p}}) findProducts(p, req, stores, make(map[string]bool)) var ( Stores Stores productsTotal int ) for _, store := range stores { // remove not prefered chainstores if !req.UserPreference.Contains(store.ID) { continue } for _, product := range store.Products { priceTotal = priceTotal.Add(product.Price) } productsTotal += len(store.Products) Stores = append(Stores, *store) } if productsTotal != len(req.Products) { return bp.Shop{Error: "one or more products not available in store"}, nil } return bp.Shop{ Stores: Stores, PriceTotal: priceTotal, }, nil } func findProducts(products []bp.ShopProduct, req *bp.ShopRequest, stores map[string]*bp.ShopStore, pt map[string]bool) { for i, p := range products { pid := p.ID.String() if _, ok := pt[pid]; ok { continue } pt[pid] = true if len(pt) > len(req.Products) { delete(pt, pid) return } pidcs := p.IDChainStore.String() store := stores[pidcs] if store == nil { store = &bp.ShopStore{ ID: p.IDChainStore, ChainStoreName: p.ChainStore, } stores[pidcs] = store } if len(stores) > req.UserPreference.MaxStores { delete(stores, pidcs) delete(pt, pid) continue } store.Products = append(store.Products, p) findProducts(products[i+1:], req, stores, pt) if len(pt) == len(req.Products) { return } store.Products = store.Products[:len(store.Products)-1] if len(store.Products) == 0 { delete(stores, pidcs) } delete(pt, pid) } }
package bfrequence import ( "bufio" "fmt" "io/ioutil" "log" "os" "sort" "strconv" ) func BuffFileInfo(filename string) { //Open method, returnerer et File objekt file, err := os.Open(filename) if err != nil { log.Fatalf("Oh no") } scanner := bufio.NewScanner(file) scanner.Split(bufio.ScanLines) fileData := "" lines := 0 for scanner.Scan() { fileData += scanner.Text() lines++ } file.Close() //ioRunesAndLines returnerer en slice med informasjon om top 5 runes og antall lines infoSlice := buffRunes(fileData) //legger til antall linjer infoSlice = append(infoSlice, "Linjer: "+string(lines)) //ioFileWrite tar imot en slice og skriver til en fil. buffFileWrite(infoSlice) fmt.Println("Successfully executed.") } func buffRunes(filNavn string) []string { //her skal runes lagres som key, og antall forekomster som integer charMap := make(map[string]int) //converterer integer j til rune, bruker string versjon av rune j som key, og setter value som 0 for j := 0; j < 128; j++ { runeVersion := rune(j) charMap[string(runeVersion)] = 0 } //legger til 1 på count for hver forekomst i teksten for i := 0; i < len(filNavn); i++ { charMap[string(filNavn[i])]++ } //her skal alle forekomstmengdene lagres amountList := make([]int, 128) //legger alle forekomstene i amountList for i := 0; i < 128; i++ { runeVersion := rune(i) amountList = append(amountList, charMap[string(runeVersion)]) } //sorterer listen i økende rekkefølge, der amountList[-1] er den største verdien sort.Ints(amountList) //her skal runes, med sin tilknyttede antall forekomster, lagres. printSlice := make([]string, 5) //i := 2 fordi det mest vanlige symbolet er " ", noe som vi ikke ønsker å telle for i := 2; i <= 6; i++ { //count inneholder indexen til verdien vi vil hente ut countIndex := len(amountList) - i searchNum := amountList[countIndex] //her skal det sjekkes hvilken rune som har mengden forekomster vi leter etter for j := 0; j < 128; j++ { //converterer fra integer til rune til string, slik at det kan brukes som index til charMap runeVersion := rune(j) charMapIndex := string(runeVersion) if charMap[charMapIndex] == searchNum { //danner en string med "rune": "antall forekomster" x := charMapIndex + ": " + strconv.Itoa(searchNum) + " | " //lagrer i slice printSlice = append(printSlice, x) } } } return printSlice } func buffFileWrite(wslice []string) { printString := "" for i := 0; i < len(wslice); i++ { //legger til innholdet på indeks i til stringen printString printString += wslice[i] } //buffFileNameSearch() finner et gyldig navn og returnerer det som en string ioutil.WriteFile(buffFileNameSearch(), []byte(printString), 0644) } //sjekker om fil finnes allerede, og returnerer et navn som er gyldig func buffFileNameSearch() string { //nameSearch er true så lenge et gyldig navn ikke har blitt funnet nameSearch := true fileName := "" for i := 0; nameSearch; i++ { //starter som frequenceRes0, og fortsetter å lete til den finner et gyldig navn fileName = "bfrequenceRes" + strconv.Itoa(i) + ".txt" _, err := os.Stat(fileName) //sjekker etter feil if err != nil { //sjekker om feilmeldingen indikerer at filen ikke finnes if os.IsNotExist(err) { //dersom filen ikke finnes, skal søket etter navn slutte nameSearch = false } } } return fileName } // ikke nødvendig /* func buffFindLines(stringName string) int { return strings.Count(stringName, "\n") }*/ /*func main() { var filnavn string flag.StringVar(&filnavn, "f", "", "Name of file to be inspected") flag.Parse() - buffFileInfo(filnavn) }*/
package module import ( "bytes" "reflect" "testing" ) func TestModuleHCL(t *testing.T) { hclInput := ` import { name = "base-module" } import { name = "some-other-module" } pacman "openssh" { state = "present" } pacman "tmux" { state = "present" } ` hclModule, err := Load("main", &Config{}, bytes.NewBufferString(hclInput)) if err != nil { t.Fatal(err) } wantName := "main" wantImports := []Import{ {Name: "base-module"}, {Name: "some-other-module"}, } wantNumResources := 2 if wantName != hclModule.Name { t.Errorf("want module name %q, got name %q", wantName, hclModule.Name) } if !reflect.DeepEqual(wantImports, hclModule.Imports) { t.Errorf("want %q imports, got %q imports", wantImports, hclModule.Imports) } if wantNumResources != len(hclModule.Resources) { t.Errorf("want %d resources, got %d resources", wantNumResources, len(hclModule.Resources)) } }
// A RPC Node type for RPC call package chord import ( "math/big" "net" ) type RPCNode struct { O *Node Listen net.Listener } /* method used for rpc call: FindSuccessor Notify GetData GetValue GetPredecessor SetSuccessor SetPredecessor */ func (o *RPCNode) FindSuccessor(pos *LookupType, res *Edge) error { return o.O.FindSuccessor(pos, res) } func (o *RPCNode) Notify(pred *Edge, res *int) error { return o.O.Notify(pred, res) } func (o *RPCNode) PutValue(kv KVPair, success *bool) error { return o.O.PutValue(kv, success) } func (o *RPCNode) GetValue(key string, value *string) error { return o.O.GetValue(key, value) } func (o *RPCNode) DeleteValue(key string, success *bool) error { return o.O.DeleteValue(key, success) } func (o *RPCNode) PutValueSuccessor(kv KVPair, success *bool) error { return o.O.PutValueSuccessor(kv, success) } func (o *RPCNode) DeleteValueSuccessor(key string, success *bool) error { return o.O.DeleteValueSuccessor(key, success) } func (o *RPCNode) PutValueDataPre(kv KVPair, success *bool) error { return o.O.PutValueDataPre(kv, success) } func (o *RPCNode) DeleteValueDataPre(key string, success *bool) error { return o.O.DeleteValueDataPre(key, success) } func (o *RPCNode) MoveKVPairs(newNode *big.Int, res *map[string]string) error { return o.O.MoveKVPairs(newNode, res) } func (o *RPCNode) MoveDataPre(args int, res *map[string]string) error { return o.O.MoveDataPre(args, res) } func (o *RPCNode) QuitMoveData(Data KVMap, res *int) error { return o.O.QuitMoveData(Data, res) } func (o *RPCNode) QuitMoveDataPre(DataPre KVMap, res *int) error { return o.O.QuitMoveDataPre(DataPre, res) } func (o *RPCNode) GetPredecessor(args int, res *Edge) error { return o.O.GetPredecessor(args, res) } func (o *RPCNode) GetSuccessorList(args int, res *[successorListLen + 1]Edge) error { return o.O.GetSuccessorList(args, res) } func (o *RPCNode) SetSuccessor(edge Edge, res *int) error { return o.O.SetSuccessor(edge, res) } func (o *RPCNode) SetPredecessor(edge Edge, res *int) error { return o.O.SetPredecessor(edge, res) }
package azure import ( "context" "fmt" "strconv" "strings" "github.com/pkg/errors" "yunion.io/x/jsonutils" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/cloudprovider" "yunion.io/x/onecloud/pkg/multicloud" ) // todo: 虚拟机规模集不支持 // 注: 因为与onecloud后端服务器组存在配置差异,不支持同步未关联的后端服务器组 // 应用型LB: HTTP 设置 + 后端池 = onecloud 后端服务器组 // 4层LB: loadBalancingRules(backendPort)+ 后端池 = onecloud 后端服务器组 type SLoadbalancerBackendGroup struct { lb *SLoadbalancer lbbs []cloudprovider.ICloudLoadbalancerBackend Pool BackendAddressPool DefaultPort int HttpSettings *BackendHTTPSettingsCollection BackendIps []BackendIPConfiguration } func (self *SLoadbalancerBackendGroup) GetId() string { return self.Pool.ID + "::" + strconv.Itoa(self.DefaultPort) } func (self *SLoadbalancerBackendGroup) GetName() string { if self.HttpSettings != nil { return self.Pool.Name + "::" + self.HttpSettings.Name } return self.Pool.Name + "::" + strconv.Itoa(self.DefaultPort) } func (self *SLoadbalancerBackendGroup) GetGlobalId() string { return strings.ToLower(self.GetId()) } func (self *SLoadbalancerBackendGroup) GetStatus() string { switch self.Pool.Properties.ProvisioningState { case "Succeeded": return api.LB_STATUS_ENABLED default: return api.LB_STATUS_UNKNOWN } } func (self *SLoadbalancerBackendGroup) Refresh() error { lbbg, err := self.lb.GetILoadBalancerBackendGroupById(self.GetId()) if err != nil { return errors.Wrap(err, "GetILoadBalancerBackendGroupById") } err = jsonutils.Update(self, lbbg) if err != nil { return errors.Wrap(err, "refresh.Update") } self.lbbs = nil return nil } func (self *SLoadbalancerBackendGroup) IsEmulated() bool { return true } func (self *SLoadbalancerBackendGroup) GetSysTags() map[string]string { return nil } func (self *SLoadbalancerBackendGroup) GetTags() (map[string]string, error) { return map[string]string{}, nil } func (self *SLoadbalancerBackendGroup) SetTags(tags map[string]string, replace bool) error { return errors.Wrap(cloudprovider.ErrNotImplemented, "SetTags") } func (self *SLoadbalancerBackendGroup) GetProjectId() string { return getResourceGroup(self.GetId()) } func (self *SLoadbalancerBackendGroup) IsDefault() bool { return false } func (self *SLoadbalancerBackendGroup) GetType() string { return api.LB_BACKENDGROUP_TYPE_NORMAL } func (self *SLoadbalancerBackendGroup) GetLoadbalancerId() string { return self.lb.GetId() } func (self *SLoadbalancerBackendGroup) GetILoadbalancerBackends() ([]cloudprovider.ICloudLoadbalancerBackend, error) { if self.lbbs != nil { return self.lbbs, nil } var ret []cloudprovider.ICloudLoadbalancerBackend ips := self.Pool.Properties.BackendIPConfigurations for i := range ips { ip := ips[i] nic, err := self.lb.region.GetNetworkInterface(strings.Split(ip.ID, "/ipConfigurations")[0]) if err != nil { return nil, errors.Wrap(err, "GetNetworkInterface") } if len(nic.Properties.VirtualMachine.ID) == 0 { continue } name := nic.Properties.VirtualMachine.Name vid := nic.Properties.VirtualMachine.ID if len(name) == 0 && len(vid) > 0 { segs := strings.Split(vid, "/virtualMachines/") name = segs[len(segs)-1] } bg := SLoadbalancerBackend{ SResourceBase: multicloud.SResourceBase{}, lbbg: self, Name: name, ID: vid, Type: api.LB_BACKEND_GUEST, BackendPort: self.DefaultPort, } ret = append(ret, &bg) } ips2 := self.Pool.Properties.BackendAddresses for i := range ips2 { name := fmt.Sprintf("ip-%s", ips2[i].IPAddress) bg := SLoadbalancerBackend{ SResourceBase: multicloud.SResourceBase{}, lbbg: self, Name: name, ID: fmt.Sprintf("%s-%s", self.GetId(), name), Type: api.LB_BACKEND_IP, BackendIP: ips2[i].IPAddress, BackendPort: self.DefaultPort, } ret = append(ret, &bg) } self.lbbs = ret return ret, nil } func (self *SLoadbalancerBackendGroup) GetILoadbalancerBackendById(backendId string) (cloudprovider.ICloudLoadbalancerBackend, error) { lbbs, err := self.GetILoadbalancerBackends() if err != nil { return nil, errors.Wrap(err, "GetILoadbalancerBackends") } for i := range lbbs { if lbbs[i].GetId() == backendId { return lbbs[i], nil } } return nil, errors.Wrap(cloudprovider.ErrNotFound, "GetILoadbalancerBackendById") } func (self *SLoadbalancerBackendGroup) GetProtocolType() string { return "" } func (self *SLoadbalancerBackendGroup) GetScheduler() string { return "" } func (self *SLoadbalancerBackendGroup) GetHealthCheck() (*cloudprovider.SLoadbalancerHealthCheck, error) { return nil, nil } func (self *SLoadbalancerBackendGroup) GetStickySession() (*cloudprovider.SLoadbalancerStickySession, error) { return nil, nil } func (self *SLoadbalancerBackendGroup) AddBackendServer(serverId string, weight int, port int) (cloudprovider.ICloudLoadbalancerBackend, error) { return nil, errors.Wrap(cloudprovider.ErrNotImplemented, "AddBackendServer") } func (self *SLoadbalancerBackendGroup) RemoveBackendServer(serverId string, weight int, port int) error { return errors.Wrap(cloudprovider.ErrNotImplemented, "RemoveBackendServer") } func (self *SLoadbalancerBackendGroup) Delete(ctx context.Context) error { return errors.Wrap(cloudprovider.ErrNotImplemented, "Delete") } func (self *SLoadbalancerBackendGroup) Sync(ctx context.Context, group *cloudprovider.SLoadbalancerBackendGroup) error { return errors.Wrap(cloudprovider.ErrNotImplemented, "Sync") }
// Copyright 2014 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package batcheval import ( "bytes" "context" "fmt" "math" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/abortspan" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/readsummary" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanset" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) func init() { RegisterReadWriteCommand(roachpb.EndTxn, declareKeysEndTxn, EndTxn) } // declareKeysWriteTransaction is the shared portion of // declareKeys{End,Heartbeat}Transaction. func declareKeysWriteTransaction( _ ImmutableRangeState, header roachpb.Header, req roachpb.Request, latchSpans *spanset.SpanSet, ) { if header.Txn != nil { header.Txn.AssertInitialized(context.TODO()) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: keys.TransactionKey(req.Header().Key, header.Txn.ID), }) } } func declareKeysEndTxn( rs ImmutableRangeState, header roachpb.Header, req roachpb.Request, latchSpans, _ *spanset.SpanSet, ) { et := req.(*roachpb.EndTxnRequest) declareKeysWriteTransaction(rs, header, req, latchSpans) var minTxnTS hlc.Timestamp if header.Txn != nil { header.Txn.AssertInitialized(context.TODO()) minTxnTS = header.Txn.MinTimestamp abortSpanAccess := spanset.SpanReadOnly if !et.Commit { // Rollback EndTxn requests may write to the abort span, either if // their Poison flag is set, in which case they will add an abort // span entry, or if their Poison flag is not set and an abort span // entry already exists on this Range, in which case they will clear // that entry. abortSpanAccess = spanset.SpanReadWrite } latchSpans.AddNonMVCC(abortSpanAccess, roachpb.Span{ Key: keys.AbortSpanKey(rs.GetRangeID(), header.Txn.ID), }) } // If the request is intending to finalize the transaction record then it // needs to declare a few extra keys. if !et.IsParallelCommit() { // All requests that intend on resolving local locks need to depend on // the range descriptor because they need to determine which locks are // within the local range. latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{ Key: keys.RangeDescriptorKey(rs.GetStartKey()), }) // The spans may extend beyond this Range, but it's ok for the // purpose of acquiring latches. The parts in our Range will // be resolved eagerly. for _, span := range et.LockSpans { latchSpans.AddMVCC(spanset.SpanReadWrite, span, minTxnTS) } if et.InternalCommitTrigger != nil { if st := et.InternalCommitTrigger.SplitTrigger; st != nil { // Splits may read from the entire pre-split range (they read // from the LHS in all cases, and the RHS only when the existing // stats contain estimates). Splits declare non-MVCC read access // across the entire LHS to block all concurrent writes to the // LHS because their stat deltas will interfere with the // non-delta stats computed as a part of the split. Splits // declare non-MVCC write access across the entire RHS to block // all concurrent reads and writes to the RHS because they will // fail if applied after the split. (see // https://github.com/cockroachdb/cockroach/issues/14881) latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{ Key: st.LeftDesc.StartKey.AsRawKey(), EndKey: st.LeftDesc.EndKey.AsRawKey(), }) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: st.RightDesc.StartKey.AsRawKey(), EndKey: st.RightDesc.EndKey.AsRawKey(), }) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: keys.MakeRangeKeyPrefix(st.LeftDesc.StartKey), EndKey: keys.MakeRangeKeyPrefix(st.RightDesc.EndKey).PrefixEnd(), }) leftRangeIDPrefix := keys.MakeRangeIDReplicatedPrefix(rs.GetRangeID()) latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{ Key: leftRangeIDPrefix, EndKey: leftRangeIDPrefix.PrefixEnd(), }) rightRangeIDPrefix := keys.MakeRangeIDReplicatedPrefix(st.RightDesc.RangeID) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: rightRangeIDPrefix, EndKey: rightRangeIDPrefix.PrefixEnd(), }) rightRangeIDUnreplicatedPrefix := keys.MakeRangeIDUnreplicatedPrefix(st.RightDesc.RangeID) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: rightRangeIDUnreplicatedPrefix, EndKey: rightRangeIDUnreplicatedPrefix.PrefixEnd(), }) latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{ Key: keys.RangeLastReplicaGCTimestampKey(st.LeftDesc.RangeID), }) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: keys.RangeLastReplicaGCTimestampKey(st.RightDesc.RangeID), }) latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{ Key: abortspan.MinKey(rs.GetRangeID()), EndKey: abortspan.MaxKey(rs.GetRangeID()), }) } if mt := et.InternalCommitTrigger.MergeTrigger; mt != nil { // Merges copy over the RHS abort span to the LHS, and compute // replicated range ID stats over the RHS in the merge trigger. latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: abortspan.MinKey(mt.LeftDesc.RangeID), EndKey: abortspan.MaxKey(mt.LeftDesc.RangeID).PrefixEnd(), }) latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{ Key: keys.MakeRangeIDReplicatedPrefix(mt.RightDesc.RangeID), EndKey: keys.MakeRangeIDReplicatedPrefix(mt.RightDesc.RangeID).PrefixEnd(), }) // Merges incorporate the prior read summary from the RHS into // the LHS, which ensures that the current and all future // leaseholders on the joint range respect reads served on the // RHS. latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: keys.RangePriorReadSummaryKey(mt.LeftDesc.RangeID), }) } } } } // EndTxn either commits or aborts (rolls back) an extant transaction according // to the args.Commit parameter. Rolling back an already rolled-back txn is ok. // TODO(nvanbenschoten): rename this file to cmd_end_txn.go once some of andrei's // recent PRs have landed. func EndTxn( ctx context.Context, readWriter storage.ReadWriter, cArgs CommandArgs, resp roachpb.Response, ) (result.Result, error) { args := cArgs.Args.(*roachpb.EndTxnRequest) h := cArgs.Header ms := cArgs.Stats reply := resp.(*roachpb.EndTxnResponse) if err := VerifyTransaction(h, args, roachpb.PENDING, roachpb.STAGING, roachpb.ABORTED); err != nil { return result.Result{}, err } if args.Require1PC { // If a 1PC txn was required and we're in EndTxn, we've failed to evaluate // the batch as a 1PC. We're returning early instead of preferring a // possible retriable error because we might want to leave locks behind in // case of retriable errors - which Require1PC does not want. return result.Result{}, roachpb.NewTransactionStatusError("could not commit in one phase as requested") } if args.Commit && args.Poison { return result.Result{}, errors.AssertionFailedf("cannot poison during a committing EndTxn request") } key := keys.TransactionKey(h.Txn.Key, h.Txn.ID) // Fetch existing transaction. var existingTxn roachpb.Transaction if ok, err := storage.MVCCGetProto( ctx, readWriter, key, hlc.Timestamp{}, &existingTxn, storage.MVCCGetOptions{}, ); err != nil { return result.Result{}, err } else if !ok { // No existing transaction record was found - create one by writing it // below in updateFinalizedTxn. reply.Txn = h.Txn.Clone() // Verify that it is safe to create the transaction record. We only need // to perform this verification for commits. Rollbacks can always write // an aborted txn record. if args.Commit { if err := CanCreateTxnRecord(ctx, cArgs.EvalCtx, reply.Txn); err != nil { return result.Result{}, err } } } else { // We're using existingTxn on the reply, although it can be stale // compared to the Transaction in the request (e.g. the Sequence, // and various timestamps). We must be careful to update it with the // supplied ba.Txn if we return it with an error which might be // retried, as for example to avoid client-side serializable restart. reply.Txn = &existingTxn // Verify that we can either commit it or abort it (according // to args.Commit), and also that the Timestamp and Epoch have // not suffered regression. switch reply.Txn.Status { case roachpb.COMMITTED: // This can happen if the coordinator had left the transaction in the // implicitly committed state, and is now coming to clean it up. Someone // else must have performed the STAGING->COMMITTED transition in the // meantime. The TransactionStatusError is going to be handled by the // txnCommitter interceptor. log.VEventf(ctx, 2, "transaction found to be already committed") return result.Result{}, roachpb.NewTransactionCommittedStatusError() case roachpb.ABORTED: if !args.Commit { // The transaction has already been aborted by other. // Do not return TransactionAbortedError since the client anyway // wanted to abort the transaction. desc := cArgs.EvalCtx.Desc() resolvedLocks, externalLocks, err := resolveLocalLocks(ctx, desc, readWriter, ms, args, reply.Txn, cArgs.EvalCtx) if err != nil { return result.Result{}, err } if err := updateFinalizedTxn( ctx, readWriter, ms, key, args, reply.Txn, externalLocks, ); err != nil { return result.Result{}, err } // Use alwaysReturn==true because the transaction is definitely // aborted, no matter what happens to this command. res := result.FromEndTxn(reply.Txn, true /* alwaysReturn */, args.Poison) res.Local.ResolvedLocks = resolvedLocks return res, nil } // If the transaction was previously aborted by a concurrent writer's // push, any intents written are still open. It's only now that we know // them, so we return them all for asynchronous resolution (we're // currently not able to write on error, but see #1989). // // Similarly to above, use alwaysReturn==true. The caller isn't trying // to abort, but the transaction is definitely aborted and its locks // can go. reply.Txn.LockSpans = args.LockSpans return result.FromEndTxn(reply.Txn, true /* alwaysReturn */, args.Poison), roachpb.NewTransactionAbortedError(roachpb.ABORT_REASON_ABORTED_RECORD_FOUND) case roachpb.PENDING, roachpb.STAGING: if h.Txn.Epoch < reply.Txn.Epoch { return result.Result{}, errors.AssertionFailedf( "programming error: epoch regression: %d", h.Txn.Epoch) } default: return result.Result{}, errors.AssertionFailedf("bad txn status: %s", reply.Txn) } // Update the existing txn with the supplied txn. reply.Txn.Update(h.Txn) } // Attempt to commit or abort the transaction per the args.Commit parameter. if args.Commit { if retry, reason, extraMsg := IsEndTxnTriggeringRetryError(reply.Txn, args); retry { return result.Result{}, roachpb.NewTransactionRetryError(reason, extraMsg) } // If the transaction needs to be staged as part of an implicit commit // before being explicitly committed, write the staged transaction // record and return without running commit triggers or resolving local // locks. if args.IsParallelCommit() { // It's not clear how to combine transaction recovery with commit // triggers, so for now we don't allow them to mix. This shouldn't // cause any issues and the txn coordinator knows not to mix them. if ct := args.InternalCommitTrigger; ct != nil { err := errors.Errorf("cannot stage transaction with a commit trigger: %+v", ct) return result.Result{}, err } reply.Txn.Status = roachpb.STAGING reply.StagingTimestamp = reply.Txn.WriteTimestamp if err := updateStagingTxn(ctx, readWriter, ms, key, args, reply.Txn); err != nil { return result.Result{}, err } return result.Result{}, nil } // Else, the transaction can be explicitly committed. reply.Txn.Status = roachpb.COMMITTED } else { reply.Txn.Status = roachpb.ABORTED } // Resolve locks on the local range synchronously so that their resolution // ends up in the same Raft entry. There should always be at least one because // we position the transaction record next to the first write of a transaction. // This avoids the need for the intentResolver to have to return to this range // to resolve locks for this transaction in the future. desc := cArgs.EvalCtx.Desc() resolvedLocks, externalLocks, err := resolveLocalLocks(ctx, desc, readWriter, ms, args, reply.Txn, cArgs.EvalCtx) if err != nil { return result.Result{}, err } if err := updateFinalizedTxn(ctx, readWriter, ms, key, args, reply.Txn, externalLocks); err != nil { return result.Result{}, err } // Note: there's no need to clear the AbortSpan state if we've successfully // finalized a transaction, as there's no way in which an abort cache entry // could have been written (the txn would already have been in // state=ABORTED). // // Summary of transaction replay protection after EndTxn: When a // transactional write gets replayed over its own resolved intents, the // write will succeed but only as an intent with a newer timestamp (with a // WriteTooOldError). However, the replayed intent cannot be resolved by a // subsequent replay of this EndTxn call because the txn timestamp will be // too old. Replays of requests which attempt to create a new txn record // (HeartbeatTxn or EndTxn) never succeed because EndTxn inserts in the // timestamp cache in Replica's updateTimestampCache method, forcing // the call to CanCreateTxnRecord to return false, resulting in a // transaction retry error. If the replay didn't attempt to create a txn // record, any push will immediately succeed as a missing txn record on push // where CanCreateTxnRecord returns false succeeds. In both cases, the txn // will be GC'd on the slow path. // // We specify alwaysReturn==false because if the commit fails below Raft, we // don't want the locks to be up for resolution. That should happen only if // the commit actually happens; otherwise, we risk losing writes. txnResult := result.FromEndTxn(reply.Txn, false /* alwaysReturn */, args.Poison) txnResult.Local.UpdatedTxns = []*roachpb.Transaction{reply.Txn} txnResult.Local.ResolvedLocks = resolvedLocks // Run the rest of the commit triggers if successfully committed. if reply.Txn.Status == roachpb.COMMITTED { triggerResult, err := RunCommitTrigger( ctx, cArgs.EvalCtx, readWriter.(storage.Batch), ms, args, reply.Txn, ) if err != nil { return result.Result{}, roachpb.NewReplicaCorruptionError(err) } if err := txnResult.MergeAndDestroy(triggerResult); err != nil { return result.Result{}, err } } else if reply.Txn.Status == roachpb.ABORTED { // If this is the system config span and we're aborted, add a trigger to // potentially gossip now that we've removed an intent. This is important // to deal with cases where previously committed values were not gossipped // due to an outstanding intent. if cArgs.EvalCtx.ContainsKey(keys.SystemConfigSpan.Key) { txnResult.Local.MaybeGossipSystemConfigIfHaveFailure = true } } return txnResult, nil } // IsEndTxnExceedingDeadline returns true if the transaction exceeded its // deadline. func IsEndTxnExceedingDeadline(t hlc.Timestamp, args *roachpb.EndTxnRequest) bool { return args.Deadline != nil && args.Deadline.LessEq(t) } // IsEndTxnTriggeringRetryError returns true if the EndTxnRequest cannot be // committed and needs to return a TransactionRetryError. It also returns the // reason and possibly an extra message to be used for the error. func IsEndTxnTriggeringRetryError( txn *roachpb.Transaction, args *roachpb.EndTxnRequest, ) (retry bool, reason roachpb.TransactionRetryReason, extraMsg string) { // If we saw any WriteTooOldErrors, we must restart to avoid lost // update anomalies. if txn.WriteTooOld { retry, reason = true, roachpb.RETRY_WRITE_TOO_OLD } else { readTimestamp := txn.ReadTimestamp isTxnPushed := txn.WriteTimestamp != readTimestamp // Return a transaction retry error if the commit timestamp isn't equal to // the txn timestamp. if isTxnPushed { retry, reason = true, roachpb.RETRY_SERIALIZABLE } } // A transaction must obey its deadline, if set. if !retry && IsEndTxnExceedingDeadline(txn.WriteTimestamp, args) { exceededBy := txn.WriteTimestamp.GoTime().Sub(args.Deadline.GoTime()) extraMsg = fmt.Sprintf( "txn timestamp pushed too much; deadline exceeded by %s (%s > %s)", exceededBy, txn.WriteTimestamp, args.Deadline) retry, reason = true, roachpb.RETRY_COMMIT_DEADLINE_EXCEEDED } return retry, reason, extraMsg } const lockResolutionBatchSize = 500 // iterManager provides a storage.IterAndBuf appropriate for working with a // span of keys that are either all local or all global keys, identified by // the start key of the span, that is passed to getIterAndBuf. This is to deal // with the constraint that a single MVCCIterator using // MVCCKeyAndIntentsIterKind can either iterate over local keys or global // keys, but not both. We don't wish to create a new iterator for each span, // so iterManager lazily creates a new one when needed. type iterManager struct { reader storage.Reader globalKeyUpperBound roachpb.Key iterAndBuf storage.IterAndBuf iter storage.MVCCIterator isLocalIter bool } func (im *iterManager) getIterAndBuf(key roachpb.Key) storage.IterAndBuf { isLocal := keys.IsLocal(key) if im.iter != nil { if im.isLocalIter == isLocal { return im.iterAndBuf } im.iterAndBuf.SwitchIter(nil /* iter */) im.iter.Close() im.iter = nil } if isLocal { im.iter = im.reader.NewMVCCIterator( storage.MVCCKeyAndIntentsIterKind, storage.IterOptions{ UpperBound: keys.LocalMax, }) im.isLocalIter = true im.iterAndBuf.SwitchIter(im.iter) } else { im.iter = im.reader.NewMVCCIterator( storage.MVCCKeyAndIntentsIterKind, storage.IterOptions{ UpperBound: im.globalKeyUpperBound, }) im.isLocalIter = false im.iterAndBuf.SwitchIter(im.iter) } return im.iterAndBuf } func (im *iterManager) Close() { im.iterAndBuf.Cleanup() im.iterAndBuf = storage.IterAndBuf{} im.iter = nil } // resolveLocalLocks synchronously resolves any locks that are local to this // range in the same batch and returns those lock spans. The remainder are // collected and returned so that they can be handed off to asynchronous // processing. Note that there is a maximum lock resolution allowance of // lockResolutionBatchSize meant to avoid creating a batch which is too large // for Raft. Any local locks which exceed the allowance are treated as // external and are resolved asynchronously with the external locks. func resolveLocalLocks( ctx context.Context, desc *roachpb.RangeDescriptor, readWriter storage.ReadWriter, ms *enginepb.MVCCStats, args *roachpb.EndTxnRequest, txn *roachpb.Transaction, evalCtx EvalContext, ) (resolvedLocks []roachpb.LockUpdate, externalLocks []roachpb.Span, _ error) { if mergeTrigger := args.InternalCommitTrigger.GetMergeTrigger(); mergeTrigger != nil { // If this is a merge, then use the post-merge descriptor to determine // which locks are local (note that for a split, we want to use the // pre-split one instead because it's larger). desc = &mergeTrigger.LeftDesc } iterManager := &iterManager{ reader: readWriter, globalKeyUpperBound: desc.EndKey.AsRawKey(), iterAndBuf: storage.GetBufUsingIter(nil), } defer iterManager.Close() var resolveAllowance int64 = lockResolutionBatchSize if args.InternalCommitTrigger != nil { // If this is a system transaction (such as a split or merge), don't enforce the resolve allowance. // These transactions rely on having their locks resolved synchronously. resolveAllowance = math.MaxInt64 } for _, span := range args.LockSpans { if err := func() error { if resolveAllowance == 0 { externalLocks = append(externalLocks, span) return nil } update := roachpb.MakeLockUpdate(txn, span) if len(span.EndKey) == 0 { // For single-key lock updates, do a KeyAddress-aware check of // whether it's contained in our Range. if !kvserverbase.ContainsKey(desc, span.Key) { externalLocks = append(externalLocks, span) return nil } resolveMS := ms ok, err := storage.MVCCResolveWriteIntentUsingIter( ctx, readWriter, iterManager.getIterAndBuf(span.Key), resolveMS, update) if err != nil { return err } if ok { resolveAllowance-- } resolvedLocks = append(resolvedLocks, update) return nil } // For update ranges, cut into parts inside and outside our key // range. Resolve locally inside, delegate the rest. In particular, // an update range for range-local data is correctly considered local. inSpan, outSpans := kvserverbase.IntersectSpan(span, desc) externalLocks = append(externalLocks, outSpans...) if inSpan != nil { update.Span = *inSpan num, resumeSpan, err := storage.MVCCResolveWriteIntentRangeUsingIter( ctx, readWriter, iterManager.getIterAndBuf(update.Span.Key), ms, update, resolveAllowance) if err != nil { return err } if evalCtx.EvalKnobs().NumKeysEvaluatedForRangeIntentResolution != nil { atomic.AddInt64(evalCtx.EvalKnobs().NumKeysEvaluatedForRangeIntentResolution, num) } resolveAllowance -= num if resumeSpan != nil { if resolveAllowance != 0 { log.Fatalf(ctx, "expected resolve allowance to be exactly 0 resolving %s; got %d", update.Span, resolveAllowance) } update.EndKey = resumeSpan.Key externalLocks = append(externalLocks, *resumeSpan) } resolvedLocks = append(resolvedLocks, update) return nil } return nil }(); err != nil { return nil, nil, errors.Wrapf(err, "resolving lock at %s on end transaction [%s]", span, txn.Status) } } removedAny := resolveAllowance != lockResolutionBatchSize if WriteAbortSpanOnResolve(txn.Status, args.Poison, removedAny) { if err := UpdateAbortSpan(ctx, evalCtx, readWriter, ms, txn.TxnMeta, args.Poison); err != nil { return nil, nil, err } } return resolvedLocks, externalLocks, nil } // updateStagingTxn persists the STAGING transaction record with updated status // (and possibly timestamp). It persists the record with the EndTxn request's // declared in-flight writes along with all of the transaction's (local and // remote) locks. func updateStagingTxn( ctx context.Context, readWriter storage.ReadWriter, ms *enginepb.MVCCStats, key []byte, args *roachpb.EndTxnRequest, txn *roachpb.Transaction, ) error { txn.LockSpans = args.LockSpans txn.InFlightWrites = args.InFlightWrites txnRecord := txn.AsRecord() return storage.MVCCPutProto(ctx, readWriter, ms, key, hlc.Timestamp{}, nil /* txn */, &txnRecord) } // updateFinalizedTxn persists the COMMITTED or ABORTED transaction record with // updated status (and possibly timestamp). If we've already resolved all locks // locally, we actually delete the record right away - no use in keeping it // around. func updateFinalizedTxn( ctx context.Context, readWriter storage.ReadWriter, ms *enginepb.MVCCStats, key []byte, args *roachpb.EndTxnRequest, txn *roachpb.Transaction, externalLocks []roachpb.Span, ) error { if txnAutoGC && len(externalLocks) == 0 { if log.V(2) { log.Infof(ctx, "auto-gc'ed %s (%d locks)", txn.Short(), len(args.LockSpans)) } return storage.MVCCDelete(ctx, readWriter, ms, key, hlc.Timestamp{}, nil /* txn */) } txn.LockSpans = externalLocks txn.InFlightWrites = nil txnRecord := txn.AsRecord() return storage.MVCCPutProto(ctx, readWriter, ms, key, hlc.Timestamp{}, nil /* txn */, &txnRecord) } // RunCommitTrigger runs the commit trigger from an end transaction request. func RunCommitTrigger( ctx context.Context, rec EvalContext, batch storage.Batch, ms *enginepb.MVCCStats, args *roachpb.EndTxnRequest, txn *roachpb.Transaction, ) (result.Result, error) { ct := args.InternalCommitTrigger if ct == nil { return result.Result{}, nil } if ct.GetSplitTrigger() != nil { newMS, trigger, err := splitTrigger( ctx, rec, batch, *ms, ct.SplitTrigger, txn.WriteTimestamp, ) *ms = newMS return trigger, err } if mt := ct.GetMergeTrigger(); mt != nil { return mergeTrigger(ctx, rec, batch, ms, mt, txn.WriteTimestamp) } if crt := ct.GetChangeReplicasTrigger(); crt != nil { // TODO(tbg): once we support atomic replication changes, check that // crt.Added() and crt.Removed() don't intersect (including mentioning // the same replica more than once individually) because it would be // silly (though possible) to have to attach semantics to that. return changeReplicasTrigger(ctx, rec, batch, crt), nil } if ct.GetModifiedSpanTrigger() != nil { var pd result.Result if ct.ModifiedSpanTrigger.SystemConfigSpan { // Check if we need to gossip the system config. // NOTE: System config gossiping can only execute correctly if // the transaction record is located on the range that contains // the system span. If a transaction is created which modifies // both system *and* non-system data, it should be ensured that // the transaction record itself is on the system span. This can // be done by making sure a system key is the first key touched // in the transaction. if rec.ContainsKey(keys.SystemConfigSpan.Key) { if err := pd.MergeAndDestroy( result.Result{ Local: result.LocalResult{ MaybeGossipSystemConfig: true, }, }, ); err != nil { return result.Result{}, err } } else { log.Errorf(ctx, "System configuration span was modified, but the "+ "modification trigger is executing on a non-system range. "+ "Configuration changes will not be gossiped.") } } if nlSpan := ct.ModifiedSpanTrigger.NodeLivenessSpan; nlSpan != nil { if err := pd.MergeAndDestroy( result.Result{ Local: result.LocalResult{ MaybeGossipNodeLiveness: nlSpan, }, }, ); err != nil { return result.Result{}, err } } return pd, nil } if sbt := ct.GetStickyBitTrigger(); sbt != nil { newDesc := *rec.Desc() if !sbt.StickyBit.IsEmpty() { newDesc.StickyBit = &sbt.StickyBit } else { newDesc.StickyBit = nil } var res result.Result res.Replicated.State = &kvserverpb.ReplicaState{ Desc: &newDesc, } return res, nil } log.Fatalf(ctx, "unknown commit trigger: %+v", ct) return result.Result{}, nil } // splitTrigger is called on a successful commit of a transaction // containing an AdminSplit operation. It copies the AbortSpan for // the new range and recomputes stats for both the existing, left hand // side (LHS) range and the right hand side (RHS) range. For // performance it only computes the stats for the original range (the // left hand side) and infers the RHS stats by subtracting from the // original stats. We compute the LHS stats because the split key // computation ensures that we do not create large LHS // ranges. However, this optimization is only possible if the stats // are fully accurate. If they contain estimates, stats for both the // LHS and RHS are computed. // // Splits are complicated. A split is initiated when a replica receives an // AdminSplit request. Note that this request (and other "admin" requests) // differs from normal requests in that it doesn't go through Raft but instead // allows the lease holder Replica to act as the orchestrator for the // distributed transaction that performs the split. As such, this request is // only executed on the lease holder replica and the request is redirected to // the lease holder if the recipient is a follower. // // Splits do not require the lease for correctness (which is good, because we // only check that the lease is held at the beginning of the operation, and // have no way to ensure that it is continually held until the end). Followers // could perform splits too, and the only downside would be that if two splits // were attempted concurrently (or a split and a ChangeReplicas), one would // fail. The lease is used to designate one replica for this role and avoid // wasting time on splits that may fail. // // The processing of splits is divided into two phases. The first phase occurs // in Replica.AdminSplit. In that phase, the split-point is computed, and a // transaction is started which updates both the LHS and RHS range descriptors // and the meta range addressing information. (If we're splitting a meta2 range // we'll be updating the meta1 addressing, otherwise we'll be updating the // meta2 addressing). That transaction includes a special SplitTrigger flag on // the EndTxn request. Like all transactions, the requests within the // transaction are replicated via Raft, including the EndTxn request. // // The second phase of split processing occurs when each replica for the range // encounters the SplitTrigger. Processing of the SplitTrigger happens below, // in Replica.splitTrigger. The processing of the SplitTrigger occurs in two // stages. The first stage operates within the context of an engine.Batch and // updates all of the on-disk state for the old and new ranges atomically. The // second stage is invoked when the batch commits and updates the in-memory // state, creating the new replica in memory and populating its timestamp cache // and registering it with the store. // // There is lots of subtlety here. The easy scenario is that all of the // replicas process the SplitTrigger before processing any Raft message for RHS // (right hand side) of the newly split range. Something like: // // Node A Node B Node C // ---------------------------------------------------- // range 1 | | | // | | | // SplitTrigger | | // | SplitTrigger | // | | SplitTrigger // | | | // ---------------------------------------------------- // split finished on A, B and C | | // | | | // range 2 | | | // | ---- MsgVote --> | | // | ---------------------- MsgVote ---> | // // But that ideal ordering is not guaranteed. The split is "finished" when two // of the replicas have appended the end-txn request containing the // SplitTrigger to their Raft log. The following scenario is possible: // // Node A Node B Node C // ---------------------------------------------------- // range 1 | | | // | | | // SplitTrigger | | // | SplitTrigger | // | | | // ---------------------------------------------------- // split finished on A and B | | // | | | // range 2 | | | // | ---- MsgVote --> | | // | --------------------- MsgVote ---> ??? // | | | // | | SplitTrigger // // In this scenario, C will create range 2 upon reception of the MsgVote from // A, though locally that span of keys is still part of range 1. This is // possible because at the Raft level ranges are identified by integer IDs and // it isn't until C receives a snapshot of range 2 from the leader that it // discovers the span of keys it covers. In order to prevent C from fully // initializing range 2 in this instance, we prohibit applying a snapshot to a // range if the snapshot overlaps another range. See Store.canApplySnapshotLocked. // // But while a snapshot may not have been applied at C, an uninitialized // Replica was created. An uninitialized Replica is one which belongs to a Raft // group but for which the range descriptor has not been received. This Replica // will have participated in the Raft elections. When we're creating the new // Replica below we take control of this uninitialized Replica and stop it from // responding to Raft messages by marking it "destroyed". Note that we use the // Replica.mu.destroyed field for this, but we don't do everything that // Replica.Destroy does (so we should probably rename that field in light of // its new uses). In particular we don't touch any data on disk or leave a // tombstone. This is especially important because leaving a tombstone would // prevent the legitimate recreation of this replica. // // There is subtle synchronization here that is currently controlled by the // Store.processRaft goroutine. In particular, the serial execution of // Replica.handleRaftReady by Store.processRaft ensures that an uninitialized // RHS won't be concurrently executing in Replica.handleRaftReady because we're // currently running on that goroutine (i.e. Replica.splitTrigger is called on // the processRaft goroutine). // // TODO(peter): The above synchronization needs to be fixed. Using a single // goroutine for executing Replica.handleRaftReady is undesirable from a // performance perspective. Likely we will have to add a mutex to Replica to // protect handleRaftReady and to grab that mutex below when marking the // uninitialized Replica as "destroyed". Hopefully we'll also be able to remove // Store.processRaftMu. // // Note that in this more complex scenario, A (which performed the SplitTrigger // first) will create the associated Raft group for range 2 and start // campaigning immediately. It is possible for B to receive MsgVote requests // before it has applied the SplitTrigger as well. Both B and C will vote for A // (and preserve the records of that vote in their HardState). It is critically // important for Raft correctness that we do not lose the records of these // votes. After electing A the Raft leader for range 2, A will then attempt to // send a snapshot to B and C and we'll fall into the situation above where a // snapshot is received for a range before it has finished splitting from its // sibling and is thus rejected. An interesting subtlety here: A will send a // snapshot to B and C because when range 2 is initialized we were careful set // synthesize its HardState to set its Raft log index to 10. If we had instead // used log index 0, Raft would have believed the group to be empty, but the // RHS has something. Using a non-zero initial log index causes Raft to believe // that there is a discarded prefix to the log and will thus send a snapshot to // followers. // // A final point of clarification: when we split a range we're splitting the // data the range contains. But we're not forking or splitting the associated // Raft group. Instead, we're creating a new Raft group to control the RHS of // the split. That Raft group is starting from an empty Raft log (positioned at // log entry 10) and a snapshot of the RHS of the split range. // // After the split trigger returns, the on-disk state of the right-hand side // will be suitable for instantiating the right hand side Replica, and // a suitable trigger is returned, along with the updated stats which represent // the LHS delta caused by the split (i.e. all writes in the current batch // which went to the left-hand side, minus the kv pairs which moved to the // RHS). // // These stats are suitable for returning up the callstack like those for // regular commands; the corresponding delta for the RHS is part of the // returned trigger and is handled by the Store. func splitTrigger( ctx context.Context, rec EvalContext, batch storage.Batch, bothDeltaMS enginepb.MVCCStats, split *roachpb.SplitTrigger, ts hlc.Timestamp, ) (enginepb.MVCCStats, result.Result, error) { desc := rec.Desc() if !bytes.Equal(desc.StartKey, split.LeftDesc.StartKey) || !bytes.Equal(desc.EndKey, split.RightDesc.EndKey) { return enginepb.MVCCStats{}, result.Result{}, errors.Errorf("range does not match splits: (%s-%s) + (%s-%s) != %s", split.LeftDesc.StartKey, split.LeftDesc.EndKey, split.RightDesc.StartKey, split.RightDesc.EndKey, desc) } // Compute the absolute stats for the (post-split) LHS. No more // modifications to it are allowed after this line. leftMS, err := rditer.ComputeStatsForRange(&split.LeftDesc, batch, ts.WallTime) if err != nil { return enginepb.MVCCStats{}, result.Result{}, errors.Wrap(err, "unable to compute stats for LHS range after split") } log.Event(ctx, "computed stats for left hand side range") h := splitStatsHelperInput{ AbsPreSplitBothEstimated: rec.GetMVCCStats(), DeltaBatchEstimated: bothDeltaMS, AbsPostSplitLeft: leftMS, AbsPostSplitRightFn: func() (enginepb.MVCCStats, error) { rightMS, err := rditer.ComputeStatsForRange( &split.RightDesc, batch, ts.WallTime, ) return rightMS, errors.Wrap(err, "unable to compute stats for RHS range after split") }, } return splitTriggerHelper(ctx, rec, batch, h, split, ts) } // splitTriggerHelper continues the work begun by splitTrigger, but has a // reduced scope that has all stats-related concerns bundled into a // splitStatsHelper. func splitTriggerHelper( ctx context.Context, rec EvalContext, batch storage.Batch, statsInput splitStatsHelperInput, split *roachpb.SplitTrigger, ts hlc.Timestamp, ) (enginepb.MVCCStats, result.Result, error) { // TODO(d4l3k): we should check which side of the split is smaller // and compute stats for it instead of having a constraint that the // left hand side is smaller. // NB: the replicated post-split left hand keyspace is frozen at this point. // Only the RHS can be mutated (and we do so to seed its state). // Copy the last replica GC timestamp. This value is unreplicated, // which is why the MVCC stats are set to nil on calls to // MVCCPutProto. replicaGCTS, err := rec.GetLastReplicaGCTimestamp(ctx) if err != nil { return enginepb.MVCCStats{}, result.Result{}, errors.Wrap(err, "unable to fetch last replica GC timestamp") } if err := storage.MVCCPutProto(ctx, batch, nil, keys.RangeLastReplicaGCTimestampKey(split.RightDesc.RangeID), hlc.Timestamp{}, nil, &replicaGCTS); err != nil { return enginepb.MVCCStats{}, result.Result{}, errors.Wrap(err, "unable to copy last replica GC timestamp") } h, err := makeSplitStatsHelper(statsInput) if err != nil { return enginepb.MVCCStats{}, result.Result{}, err } // Initialize the RHS range's AbortSpan by copying the LHS's. if err := rec.AbortSpan().CopyTo( ctx, batch, batch, h.AbsPostSplitRight(), ts, split.RightDesc.RangeID, ); err != nil { return enginepb.MVCCStats{}, result.Result{}, err } if !rec.ClusterSettings().Version.IsActive(ctx, clusterversion.AbortSpanBytes) { // Since the stats here is used to seed the initial state for the RHS // replicas, we need to be careful about zero-ing out the abort span // bytes if the cluster version introducing it is not yet active. Not // doing so can result in inconsistencies in MVCCStats across replicas // in a mixed-version cluster. h.AbsPostSplitRight().AbortSpanBytes = 0 } // Note: we don't copy the queue last processed times. This means // we'll process the RHS range in consistency and time series // maintenance queues again possibly sooner than if we copied. The // lock is to limit post-raft logic. // Now that we've computed the stats for the RHS so far, we persist them. // This looks a bit more complicated than it really is: updating the stats // also changes the stats, and we write not only the stats but a complete // initial state. Additionally, since bothDeltaMS is tracking writes to // both sides, we need to update it as well. { // Various pieces of code rely on a replica's lease never being uninitialized, // but it's more than that - it ensures that we properly initialize the // timestamp cache, which is only populated on the lease holder, from that // of the original Range. We found out about a regression here the hard way // in #7899. Prior to this block, the following could happen: // - a client reads key 'd', leaving an entry in the timestamp cache on the // lease holder of [a,e) at the time, node one. // - the range [a,e) splits at key 'c'. [c,e) starts out without a lease. // - the replicas of [a,e) on nodes one and two both process the split // trigger and thus copy their timestamp caches to the new right-hand side // Replica. However, only node one's timestamp cache contains information // about the read of key 'd' in the first place. // - node two becomes the lease holder for [c,e). Its timestamp cache does // not know about the read at 'd' which happened at the beginning. // - node two can illegally propose a write to 'd' at a lower timestamp. // // TODO(tschottdorf): why would this use r.store.Engine() and not the // batch? We do the same thing for other usages of the state loader. sl := MakeStateLoader(rec) leftLease, err := sl.LoadLease(ctx, rec.Engine()) if err != nil { return enginepb.MVCCStats{}, result.Result{}, errors.Wrap(err, "unable to load lease") } if leftLease.Empty() { log.Fatalf(ctx, "LHS of split has no lease") } replica, found := split.RightDesc.GetReplicaDescriptor(leftLease.Replica.StoreID) if !found { return enginepb.MVCCStats{}, result.Result{}, errors.Errorf( "pre-split lease holder %+v not found in post-split descriptor %+v", leftLease.Replica, split.RightDesc, ) } rightLease := leftLease rightLease.Replica = replica gcThreshold, err := sl.LoadGCThreshold(ctx, rec.Engine()) if err != nil { return enginepb.MVCCStats{}, result.Result{}, errors.Wrap(err, "unable to load GCThreshold") } if gcThreshold.IsEmpty() { log.VEventf(ctx, 1, "LHS's GCThreshold of split is not set") } // We're about to write the initial state for the replica. We migrated // the formerly replicated truncated state into unreplicated keyspace // in 19.1, but this range may still be using the replicated version // and we need to make a decision about what to use for the RHS that // is consistent across the followers: do for the RHS what the LHS // does: if the LHS has the legacy key, initialize the RHS with a // legacy key as well. // // See VersionUnreplicatedRaftTruncatedState. truncStateType := stateloader.TruncatedStateUnreplicated if found, err := storage.MVCCGetProto( ctx, batch, keys.RaftTruncatedStateLegacyKey(rec.GetRangeID()), hlc.Timestamp{}, nil, storage.MVCCGetOptions{}, ); err != nil { return enginepb.MVCCStats{}, result.Result{}, errors.Wrap(err, "unable to load legacy truncated state") } else if found { truncStateType = stateloader.TruncatedStateLegacyReplicated } replicaVersion, err := sl.LoadVersion(ctx, rec.Engine()) if err != nil { return enginepb.MVCCStats{}, result.Result{}, errors.Wrap(err, "unable to load GCThreshold") } // Writing the initial state is subtle since this also seeds the Raft // group. It becomes more subtle due to proposer-evaluated Raft. // // We are writing to the right hand side's Raft group state in this // batch so we need to synchronize with anything else that could be // touching that replica's Raft state. Specifically, we want to prohibit // an uninitialized Replica from receiving a message for the right hand // side range and performing raft processing. This is achieved by // serializing execution of uninitialized Replicas in Store.processRaft // and ensuring that no uninitialized Replica is being processed while // an initialized one (like the one currently being split) is being // processed. // // Since the right hand side of the split's Raft group may already // exist, we must be prepared to absorb an existing HardState. The Raft // group may already exist because other nodes could already have // processed the split and started talking to our node, prompting the // creation of a Raft group that can vote and bump its term, but not // much else: it can't receive snapshots because those intersect the // pre-split range; it can't apply log commands because it needs a // snapshot first. // // However, we can't absorb the right-hand side's HardState here because // we only *evaluate* the proposal here, but by the time it is // *applied*, the HardState could have changed. We do this downstream of // Raft, in splitPostApply, where we write the last index and the // HardState via a call to synthesizeRaftState. Here, we only call // writeInitialReplicaState which essentially writes a ReplicaState // only. *h.AbsPostSplitRight(), err = stateloader.WriteInitialReplicaState( ctx, batch, *h.AbsPostSplitRight(), split.RightDesc, rightLease, *gcThreshold, truncStateType, replicaVersion, ) if err != nil { return enginepb.MVCCStats{}, result.Result{}, errors.Wrap(err, "unable to write initial Replica state") } } var pd result.Result pd.Replicated.Split = &kvserverpb.Split{ SplitTrigger: *split, // NB: the RHSDelta is identical to the stats for the newly created right // hand side range (i.e. it goes from zero to its stats). RHSDelta: *h.AbsPostSplitRight(), } deltaPostSplitLeft := h.DeltaPostSplitLeft() return deltaPostSplitLeft, pd, nil } // mergeTrigger is called on a successful commit of an AdminMerge transaction. // It calculates stats for the LHS by merging in RHS stats, and copies over the // abort span entries from the RHS. func mergeTrigger( ctx context.Context, rec EvalContext, batch storage.Batch, ms *enginepb.MVCCStats, merge *roachpb.MergeTrigger, ts hlc.Timestamp, ) (result.Result, error) { desc := rec.Desc() if !bytes.Equal(desc.StartKey, merge.LeftDesc.StartKey) { return result.Result{}, errors.AssertionFailedf("LHS range start keys do not match: %s != %s", desc.StartKey, merge.LeftDesc.StartKey) } if !desc.EndKey.Less(merge.LeftDesc.EndKey) { return result.Result{}, errors.AssertionFailedf("original LHS end key is not less than the post merge end key: %s >= %s", desc.EndKey, merge.LeftDesc.EndKey) } if err := abortspan.New(merge.RightDesc.RangeID).CopyTo( ctx, batch, batch, ms, ts, merge.LeftDesc.RangeID, ); err != nil { return result.Result{}, err } // If we collected a read summary from the right-hand side when freezing it, // merge that summary into the left-hand side's prior read summary. In the // usual case, the RightReadSummary in the MergeTrigger will be used to // update the left-hand side's leaseholder's timestamp cache when applying // the merge trigger's Raft log entry. However, if the left-hand side's // leaseholder hears about the merge through a Raft snapshot, the merge // trigger will not be available, so it will need to use the range's prior // read summary to update its timestamp cache to ensure that it does not // serve any writes that invalidate previous reads served on the right-hand // side range. See TestStoreRangeMergeTimestampCache for an example of where // this behavior is necessary. // // This communication from the RHS to the LHS is handled differently from // how we copy over the abortspan. In this case, the read summary is passed // through the SubsumeResponse and into the MergeTrigger. In the abortspan's // case, we read from local RHS replica (which may not be the leaseholder) // directly in this method. The primary reason why these are different is // because the RHS's persistent read summary may not be up-to-date, as it is // not updated by the SubsumeRequest. readSumActive := rec.ClusterSettings().Version.IsActive(ctx, clusterversion.PriorReadSummaries) if merge.RightReadSummary != nil && readSumActive { mergedSum := merge.RightReadSummary.Clone() if priorSum, err := readsummary.Load(ctx, batch, rec.GetRangeID()); err != nil { return result.Result{}, err } else if priorSum != nil { mergedSum.Merge(*priorSum) } if err := readsummary.Set(ctx, batch, rec.GetRangeID(), ms, mergedSum); err != nil { return result.Result{}, err } } // The stats for the merged range are the sum of the LHS and RHS stats, less // the RHS's replicated range ID stats. The only replicated range ID keys we // copy from the RHS are the keys in the abort span, and we've already // accounted for those stats above. ms.Add(merge.RightMVCCStats) { ridPrefix := keys.MakeRangeIDReplicatedPrefix(merge.RightDesc.RangeID) // NB: Range-ID local keys have no versions and no intents. iter := batch.NewMVCCIterator(storage.MVCCKeyIterKind, storage.IterOptions{UpperBound: ridPrefix.PrefixEnd()}) defer iter.Close() sysMS, err := iter.ComputeStats(ridPrefix, ridPrefix.PrefixEnd(), 0 /* nowNanos */) if err != nil { return result.Result{}, err } ms.Subtract(sysMS) } var pd result.Result pd.Replicated.Merge = &kvserverpb.Merge{ MergeTrigger: *merge, } return pd, nil } func changeReplicasTrigger( _ context.Context, rec EvalContext, _ storage.Batch, change *roachpb.ChangeReplicasTrigger, ) result.Result { var pd result.Result // After a successful replica addition or removal check to see if the // range needs to be split. Splitting usually takes precedence over // replication via configuration of the split and replicate queues, but // if the split occurs concurrently with the replicas change the split // can fail and won't retry until the next scanner cycle. Re-queuing // the replica here removes that latency. pd.Local.MaybeAddToSplitQueue = true // Gossip the first range whenever the range descriptor changes. We also // gossip the first range whenever the lease holder changes, but that might // not have occurred if a replica was being added or the non-lease-holder // replica was being removed. Note that we attempt the gossiping even from // the removed replica in case it was the lease-holder and it is still // holding the lease. pd.Local.GossipFirstRange = rec.IsFirstRange() pd.Replicated.State = &kvserverpb.ReplicaState{ Desc: change.Desc, } pd.Replicated.ChangeReplicas = &kvserverpb.ChangeReplicas{ ChangeReplicasTrigger: *change, } return pd } // txnAutoGC controls whether Transaction entries are automatically gc'ed upon // EndTxn if they only have local locks (which can be resolved synchronously // with EndTxn). Certain tests become simpler with this being turned off. var txnAutoGC = true // TestingSetTxnAutoGC is used in tests to temporarily enable/disable // txnAutoGC. func TestingSetTxnAutoGC(to bool) func() { prev := txnAutoGC txnAutoGC = to return func() { txnAutoGC = prev } }
package main import ( "fmt" "gitgo/src/test/geometry" "image/color" ) func main() { q := geometry.Point{1, 2} p := geometry.Point{4, 6} fmt.Println(geometry.Distance(p, q)) fmt.Println(p.Distance(q)) m := geometry.Point.Distance fmt.Println(m(q, p)) //三角形周长 perim := geometry.Path{ {1, 1}, {5, 1}, {5, 4}, {1, 1}, } fmt.Println(perim) perim.ChangeV() fmt.Println(perim) fmt.Println(perim.Distance()) //map[string][]string vs := geometry.Values{ "Aa": {"A1", "A2"}, "Bb": {"B1", "B2", "B3"}, } fmt.Println(vs) vs.Add("Aa", "A3") fmt.Println(vs) fmt.Println(vs.GetFirst("Aa")) vs.ChangeMpV("Bb", []string{"c1", "c2"}) fmt.Println(vs) vs.ChangeSV("Aa", "c3") fmt.Println(vs) fmt.Println(vs.GetFirst("Aa")) //修改vs的引用指向一个新的Values对象,当前调用方不会受任何影响 //因为传入的是存储了内存地址的变量,你改变这个变量本身是影响不了原始的变量的 vs.NewV() fmt.Println(vs) //6.3. 通过嵌入结构体来扩展类型 red := color.RGBA{255, 0, 0, 255} blue := color.RGBA{0, 0, 255, 255} var p1 = geometry.ColoredPoint{&geometry.Point{1, 1}, red} var q1 = geometry.ColoredPoint{&geometry.Point{5, 4}, blue} fmt.Println(q1) fmt.Println(p1) fmt.Println(p1.Distance(*q1.Point)) fmt.Println(q1) fmt.Println(p1) p1.ScaleBy(2) fmt.Println(q1) fmt.Println(p1) q1.ScaleBy(2) fmt.Println(q1) fmt.Println(p1) fmt.Println(p1.Distance(*q1.Point)) fmt.Println(q1) fmt.Println(p1) q1.Point = p1.Point p1.ScaleBy(2) fmt.Println(*q1.Point) fmt.Println(*p1.Point) }
package matcher import ( jsonEnc "encoding/json" "errors" "sync" "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" "github.com/mylxsw/adanos-alert/internal/repository" "github.com/mylxsw/adanos-alert/pkg/helper" "github.com/mylxsw/adanos-alert/pkg/json" "github.com/mylxsw/adanos-alert/pkg/misc" ) // InvalidReturnVal is a error represents the expression return value is invalid var InvalidReturnVal = errors.New("invalid return value: must be a bool value") // EventWrap is a wrapper to repository.Event // We will add some helper function to message type EventWrap struct { repository.Event helper.Helpers fullJSONOnce sync.Once fullJSON string } func NewEventWrap(message repository.Event) *EventWrap { return &EventWrap{Event: message} } // FullJSON return whole event as json document func (msg *EventWrap) FullJSON() string { msg.fullJSONOnce.Do(func() { res, _ := jsonEnc.Marshal(msg) msg.fullJSON = string(res) }) return msg.fullJSON } // JsonGet parse message.Content as a json string and return the string value for key func (msg *EventWrap) JsonGet(key string, defaultValue string) string { return json.Gets(key, defaultValue, msg.Content) } // IsRecovery return whether the message is a recovery message func (msg *EventWrap) IsRecovery() bool { return msg.Type == repository.EventTypeRecovery } // IsRecoverable return whether the message is recoverable func (msg *EventWrap) IsRecoverable() bool { return msg.Type == repository.EventTypeRecoverable } // IsPlain return whether the message is a plain message func (msg *EventWrap) IsPlain() bool { return msg.Type == repository.EventTypePlain || msg.Type == "" } // EventMatcher is a matcher for repository.Event type EventMatcher struct { matchProgram *vm.Program ignoreProgram *vm.Program rule repository.Rule } // NewEventMatcher create a new EventMatcher // https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md func NewEventMatcher(rule repository.Rule) (*EventMatcher, error) { matchProgram, err := expr.Compile( misc.IfElse(rule.Rule == "", "true", rule.Rule).(string), expr.Env(&EventWrap{}), expr.AsBool(), ) if err != nil { return nil, err } ignoreProgram, err := expr.Compile( misc.IfElse(rule.IgnoreRule == "", "false", rule.IgnoreRule).(string), expr.Env(&EventWrap{}), expr.AsBool(), ) if err != nil { return nil, err } return &EventMatcher{matchProgram: matchProgram, ignoreProgram: ignoreProgram, rule: rule}, nil } // Match check whether the msg is match with the rule func (m *EventMatcher) Match(evt repository.Event) (matched bool, ignored bool, err error) { wrapMsg := NewEventWrap(evt) rs, err := expr.Run(m.matchProgram, wrapMsg) if err != nil { return false, false, err } if boolRes, ok := rs.(bool); ok { if boolRes { ignore, err := expr.Run(m.ignoreProgram, wrapMsg) if err != nil { return boolRes, false, err } return boolRes, ignore.(bool), nil } return boolRes, false, nil } return false, false, InvalidReturnVal } // Rule return original rule object func (m *EventMatcher) Rule() repository.Rule { return m.rule }
package main func main() { for ;;_ { } }
package main import ( "os" ) const Name = "gch" const Version = "0.1.1" func main() { cli := &CLI{ outStream: os.Stdout, errStream: os.Stderr, } os.Exit(cli.Run(os.Args)) }
package main import ( "bufio" "fmt" "os" "reflect" ) // main Manueller Test für den PO (Überprüfungstest) func main() { ui := Ui{} cfg := Cfg{ N: 3, Reizdauer: 2000, AnzahlReize: 10, Probant: "Peter", } onStart := func() { r := Reiz{ Buchstabe: "A", Index: 1, Anzahl: 10, } ui.Reiz(r) } antwortCount := 0 trefferCount := 0 onAntwort := func(a Antwort) { if a == Wiederholung { trefferCount++ } r := Reiz{} switch antwortCount { case 0: r = Reiz{ Buchstabe: "B", Index: 2, Anzahl: 10, } case 1: r = Reiz{ Buchstabe: "C", Index: 3, Anzahl: 10, } } if reflect.DeepEqual(r, Reiz{}) { ui.Ergebnis(Ergebnis{trefferCount}) } else { antwortCount++ ui.Reiz(r) } } ui.OnStart = onStart ui.OnAntwort = onAntwort ui.Config(cfg) } type Cfg struct { N int Reizdauer int AnzahlReize int Probant string } type Reiz struct { Buchstabe string Index int Anzahl int } type Ergebnis struct { Treffer int } type Antwort int const ( Wiederholung Antwort = 1 << iota Neu ) type StartCallback func() type AntwortCallback func(Antwort) type Ui struct { OnStart StartCallback OnAntwort AntwortCallback } func (self *Ui) Config(c Cfg) { out := fmt.Sprintf("Testparameter:\n N: %d\n Reizdauer: %d msec\n Anzahl Reize: %d\n Probant: %s\n", c.N, c.Reizdauer, c.AnzahlReize, c.Probant) fmt.Println(out) scanner := bufio.NewScanner(os.Stdin) scanner.Scan() // wartet auf \n in stdin self.OnStart() } func (self *Ui) Reiz(r Reiz) { out := fmt.Sprintf("Reiz %d/%d: %s.....\r", r.Index, r.Anzahl, r.Buchstabe) fmt.Printf(out) scanner := bufio.NewScanner(os.Stdin) scanner.Scan() // wartet auf \n in stdin txt := scanner.Text() // liest alle Zeichen aus stdin bis \n ein antwort := Antwort(0) if txt == "w" { antwort = Wiederholung } else if txt == " " { antwort = Neu } self.OnAntwort(antwort) } func (self *Ui) Ergebnis(e Ergebnis) { out := fmt.Sprintf("Ergebnis %d\n", e.Treffer) fmt.Println(out) }
package main import ( "net" "net/http" "net/url" "testing" "github.com/DonnchaC/oniongateway/util" ) func TestNewRedirect(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") defer listener.Close() if err != nil { t.Fatalf("Failed to create a listener: %s", err) } server, err := NewRedirect(listener.Addr().String(), ":443") if err != nil { t.Fatalf("Failed to create redirecting HTTP server: %s", err) } go server.Serve(listener) // check what the server returns hostAndPort := listener.Addr().String() host, _, err := net.SplitHostPort(hostAndPort) if err != nil { t.Fatalf("Failed to extract host from %s: %s", hostAndPort, err) } client := http.Client{CheckRedirect: util.IgnoreRedirect} theURL := &url.URL{ Scheme: "http", Host: hostAndPort, } response, err := client.Get(theURL.String()) if !util.IsRedirectError(err) { t.Fatalf("Expected to get redirect error, but got %s", err) } if response.StatusCode != http.StatusMovedPermanently { t.Fatalf( "Wrong HTTP status returned: %d instead of %d", response.StatusCode, http.StatusMovedPermanently, ) } nextURL, err := response.Location() if err != nil { t.Fatalf("Failed to get location of redirect: %s", err) } if nextURL.Scheme != "https" { t.Fatalf("Expected scheme https, but got %s", nextURL.Scheme) } if nextURL.Host != host { t.Fatalf("Expected host %q, but got %q", host, nextURL.Host) } }
package model // Author ... type Author struct { ID uint `db:"id"` Name string `db:"name"` Year string `db:"year"` }
package user import ( "database/sql" "errors" "time" "github.com/uw-thalesians/perceptia-servers/gateway/gateway/session" mssql "github.com/denisenkom/go-mssqldb" uuid "github.com/satori/go.uuid" ) type MsSqlStore struct { database *sql.DB } // NewMsSqltore constructs a new MsSqlStore. // If *sql.DB is nil, function will return an error. func NewMsSqlStore(db *sql.DB) (*MsSqlStore, error) { if db == nil { return nil, errors.New("NewMsSqlStore: db cannot be nil") } return &MsSqlStore{ db, }, nil } type userInfo struct { Uuid mssql.UniqueIdentifier Username string DisplayName string } type sessionInfo struct { Uuid uuid.UUID SessionId session.SessionID Status string Created time.Time } // CREATE ///////////////////////////////////////////////////////////////////////////////////////////////////////// // CreateSession creates a new session entry with the provided session information. //TODO: func (ms *MsSqlStore) CreateSession(sessionUuid uuid.UUID, sessionId session.SessionID) error // CreateUserSession creates a new session entry and associates it with the given user. //TODO: func (ms *MsSqlStore) CreateUserSession(userUuid uuid.UUID, sessionUuid uuid.UUID, sessionId session.SessionID) error // CreateUserSessionAssociation associates an existing session entry with the given user. //TODO: func (ms *MsSqlStore) CreateUserSessionAssociation(userUuid uuid.UUID, sessionUuid uuid.UUID) error // CreateUser will add the new user to the database func (ms *MsSqlStore) CreateUser(newUser *NewUser) (*User, error) { user := User{} userInfo := userInfo{} userUuid := uuid.NewV4() sqlUuid := mssql.UniqueIdentifier{} errSUID := sqlUuid.Scan(userUuid.String()) if errSUID != nil { return &user, errSUID } stmt, errPS := ms.database.Prepare("USP_CreateUser") if errPS != nil { return &user, ErrPreparingQuery } defer stmt.Close() errQ := stmt.QueryRow( sql.Named("UserUuid", sqlUuid), sql.Named("Username", newUser.Username), sql.Named("FullName", newUser.FullName), sql.Named("DisplayName", newUser.DisplayName), sql.Named("EncodedHash", newUser.EncodedHash), ).Scan(&userInfo.Uuid, &userInfo.Username, &userInfo.DisplayName) if errQ != nil { if errQ == sql.ErrNoRows { return &user, ErrDidNotComplete } else if msErr, ok := errQ.(mssql.Error); ok { if msErr.Number == 50401 { return &user, ErrUserAlreadyExists } else if msErr.Number == 50402 { return &user, ErrUsernameUnavailable } } return &user, errQ } errUQ := user.Uuid.Scan(userInfo.Uuid.String()) if errUQ != nil { return &user, errUQ } user.DisplayName = userInfo.DisplayName user.Username = userInfo.Username return &user, nil } // CreateUserEmail adds the email to the given user's account //TODO: func (ms *MsSqlStore) CreateUserEmail(userUuid uuid.UUID, email string) error // READ ///////////////////////////////////////////////////////////////////////////////////////////////////////// // ReadProcedureVersion gets the procedure version implemented in the database. //TODO: func (ms *MsSqlStore) ReadProcedureVersion() *utility.SemVer // ReadUserActiveSessions gets the active sessions of the user by uuid. //TODO: func (ms *MsSqlStore) ReadUserActiveSessions(userUuid uuid.UUID) (TODO: Define Type, error) // ReadUserSessions gets all the sessions associated with the user. //TODO: func (ms *MsSqlStore) ReadUserSessions(userUuid uuid.UUID) (TODO: define type, error) // ReadUserDisplayName gets the display name for the given user. //TODO: func (ms *MsSqlStore) ReadUserDisplayName(userUuid uuid.UUID) (string, error) // ReadUserFullName gets the full name for the given user. //TODO: func (ms *MsSqlStore) ReadUserFullName(userUuid uuid.UUID) // ReadUserEmails gets the list of emails associated with the user. //TODO: func (ms *MsSqlStore) ReadUserEmails(userUuid uuid.UUID) (TODO: define type, error) // ReadUserEncodedHash gets the encoded hash of the users password. func (ms *MsSqlStore) ReadUserEncodedHash(username string) (string, error) { stmt, errPS := ms.database.Prepare("USP_ReadUserEncodedHash") if errPS != nil { return InvalidEncodedPasswordHash, ErrPreparingQuery } defer stmt.Close() encodedHash := "" errQ := stmt.QueryRow(sql.Named("Username", username)).Scan(&encodedHash) if errQ != nil { if msErr, ok := errQ.(mssql.Error); ok { if msErr.Number == 50101 { return InvalidEncodedPasswordHash, ErrUnexpected } else if msErr.Number == 50301 { return InvalidEncodedPasswordHash, ErrUserNotFound } } else { return InvalidEncodedPasswordHash, ErrUnexpected } } return encodedHash, nil } // ReadUserInfo gets the basic information about the user. func (ms *MsSqlStore) ReadUserInfo(userUuid uuid.UUID) (*User, error) { user := User{} userInfo := userInfo{} sqlUuid := mssql.UniqueIdentifier{} errUI := sqlUuid.Scan(userUuid.String()) if errUI != nil { return &user, ErrUnexpected } stmt, errPS := ms.database.Prepare("USP_ReadUserInfo") if errPS != nil { return &user, ErrPreparingQuery } defer stmt.Close() errQ := stmt.QueryRow(sql.Named("UserUuid", sqlUuid)).Scan( &userInfo.Uuid, &userInfo.Username, &userInfo.DisplayName) if errQ != nil { if msErr, ok := errQ.(mssql.Error); ok { if msErr.Number == 50101 { return &user, ErrUnexpected } else if msErr.Number == 50301 { return &user, ErrUserNotFound } else { return &user, ErrUnexpected } } } errUQ := user.Uuid.Scan(userInfo.Uuid.String()) if errUQ != nil { return &user, ErrUnexpected } user.DisplayName = userInfo.DisplayName user.Username = userInfo.Username return &user, nil } // ReadUserProfile gets the profile information for the user. //TODO: func (ms *MsSqlStore) ReadUserProfile(userUuid uuid.UUID) (TODO: define type, error) // ReadUserUsername gets the username for the given user. //TODO: func (ms *MsSqlStore) ReadUserUsername(userUuid uuid.UUID) (string, error) // ReadUserUsernamesByEmail gets the usernames associated with a given email. //TODO: func (ms *MsSqlStore) ReadUserUsernamesByEmail(email string) (TODO: define type, error) // ReadUserUuid gets the uuid for the user based on the given username. func (ms *MsSqlStore) ReadUserUuid(username string) (*uuid.UUID, error) { sqlUuid := mssql.UniqueIdentifier{} stmt, errPS := ms.database.Prepare("USP_ReadUserUuid") if errPS != nil { return nil, ErrPreparingQuery } defer stmt.Close() errQ := stmt.QueryRow(sql.Named("Username", username)).Scan( &sqlUuid) if errQ != nil { if msErr, ok := errQ.(mssql.Error); ok { if msErr.Number == 50101 { return nil, ErrUnexpected } else if msErr.Number == 50301 { return nil, ErrUserNotFound } else { return nil, ErrUnexpected } } } userUuid := uuid.NewV4() errUQ := userUuid.Scan(sqlUuid.String()) if errUQ != nil { return nil, ErrUnexpected } return &userUuid, nil } // UPDATE ///////////////////////////////////////////////////////////////////////////////////////////////////////// // UpdateSessionExpired sets the given session's status to "Expired". //TODO: func (ms *MsSqlStore) UpdateSessionExpired(sessionUuid uuid.UUID) error // UpdateUserDisplayName updates the display name of the user. //TODO: func (ms *MsSqlStore) UpdateUserDisplayName(displayName string) error // UpdateUserEncodedHash updates the encoded hash associated with the user. //TODO: func (ms *MsSqlStore) UpdateUserEncodedHash(userUuid uuid.UUID, encodedHash string) error // UpdateUserFullName updates the full name of the user. //TODO: func (ms *MsSqlStore) UpdateUserFullName(userUuid uuid.UUID, fullName string) error // UpdateUserProfileBio updates the bio associated with the user's profile. //TODO: func (ms *MsSqlStore) UpdateUserProfileBio(userUuid uuid.UUID, bio string) error // UpdateUserProfileGravatarUrl updates the gravatar url associated with the user's profile. //TODO: func (ms *MsSqlStore) UpdateUserProfileGravatarUrl(userUuid uuid.UUID, gravatarUrl string) error // UpdateUserProfileSharingBio updates the public sharing preference for the bio // associated with the user's profile. //TODO: func (ms *MsSqlStore) UpdateUserProfileSharingBio(userUuid uuid.UUID, share bool) error // UpdateUserProfileSharingDisplayName updates the public sharing preference for the display name // associated with the user's profile. //TODO: func (ms *MsSqlStore) UpdateUserProfileSharingDisplayName(userUuid uuid.UUID, share bool) error // UpdateUserProfileSharingGravatarUrl updates the public sharing preference for the gravatar url // associated with the user's profile. //TODO: func (ms *MsSqlStore) UpdateUserProfileSharingGravatarUrl(userUuid uuid.UUID, share bool) error // UpdateUserUsername updates the username for the given user. //TODO: func (ms *MsSqlStore) UpdateUserUsername(userUuid uuid.UUID, username string) error // DELETE ///////////////////////////////////////////////////////////////////////////////////////////////////////// // DeleteUser removes the user from the database. func (ms *MsSqlStore) DeleteUser(userUuid uuid.UUID) error { sqlUuid := mssql.UniqueIdentifier{} errSUID := sqlUuid.Scan(userUuid.String()) if errSUID != nil { return ErrUnexpected } stmt, errPS := ms.database.Prepare("USP_DeleteUser") if errPS != nil { return ErrPreparingQuery } defer stmt.Close() rs, errQ := stmt.Exec(sql.Named("UserUuid", sqlUuid)) if errQ != nil { if mssqlerr, ok := errQ.(mssql.Error); ok { if mssqlerr.Number == 50101 { return ErrUnexpected } else if mssqlerr.Number == 50301 { return ErrUserNotFound } } else { return ErrUnexpected } } if ra, errRA := rs.RowsAffected(); errRA != nil && ra < 1 { return errors.New("unexpected error: no rows deleted") } return nil } // DeleteUserEmail removes the given email from the users account. //TODO: func (ms *MsSqlStore) DeleteUserEmail(userUuid uuid.UUID, email string) // DeleteSession removes the given session from the list //TODO: func (ms *MsSqlStore) DeleteSession(sessionUuid uuid.UUID) error /*func (ms *MsSqlStore) GetByUuid(uuid uuid.UUID) (*User, error) { user := User{} userInfo := userInfo{} sqlUuid := mssql.UniqueIdentifier{} errUI := sqlUuid.Scan(uuid.String()) if errUI != nil { return &user, errUI } stmt, errPS := ms.database.Prepare("USP_GetUserInfoByUuid") if errPS != nil { return &user, errPS } defer stmt.Close() errQ := stmt.QueryRow(sql.Named("Uuid", sqlUuid)).Scan( &userInfo.Uuid, &userInfo.Username, &userInfo.FullName, &userInfo.DisplayName) if errQ != nil { if errQ == sql.ErrNoRows { return &user, ErrUserNotFound } else { return &user, errQ } } errUQ := user.Uuid.Scan(userInfo.Uuid.String()) if errUQ != nil { return &user, errUQ } user.DisplayName = userInfo.DisplayName user.FullName = userInfo.FullName user.Username = userInfo.Username return &user, nil }*/ /*func (ms *MsSqlStore) GetActiveSessionsByUuid(uuid uuid.UUID) (*session.Sessions, error) { var sessions session.Sessions sqlUuid := mssql.UniqueIdentifier{} errUI := sqlUuid.Scan(uuid.String()) if errUI != nil { return &sessions, errUI } stmt, errPS := ms.database.Prepare("USP_GetUserSessionsByUuid") if errPS != nil { return &sessions, errPS } defer stmt.Close() // TODO: //errQ := stmt.QueryRow(sql.Named("Uuid", sqlUuid)).Scan( // &userInfo.Uuid, &userInfo.Username, &userInfo.FullName, &userInfo.DisplayName) //if errQ != nil { // if errQ == sql.ErrNoRows { // return &user, ErrUserNotFound // } else { // return &user, errQ // } //} //errUQ := user.Uuid.Scan(userInfo.Uuid.String()) //if errUQ != nil { // return &user, errUQ //} //user.DisplayName = userInfo.DisplayName //user.FullName = userInfo.FullName //user.Username = userInfo.Username return &sessions, nil }*/ /*func (ms *MsSqlStore) GetByUsername(username string) (*User, error) { user := User{} userInfo := userInfo{} stmt, errPS := ms.database.Prepare("USP_GetUserInfoByUsername") if errPS != nil { return &user, errPS } defer stmt.Close() errQ := stmt.QueryRow(sql.Named("Username", username)).Scan( &userInfo.Uuid, &userInfo.Username, &userInfo.FullName, &userInfo.DisplayName) if errQ != nil { if errQ == sql.ErrNoRows { return &user, ErrUserNotFound } else { return &user, errQ } } errUQ := user.Uuid.Scan(userInfo.Uuid.String()) if errUQ != nil { return &user, errUQ } user.DisplayName = userInfo.DisplayName user.FullName = userInfo.FullName user.Username = userInfo.Username return &user, nil }*/ /*func (ms *MsSqlStore) GetEncodedHashByUsername(username string) (string, error) { stmt, errPS := ms.database.Prepare("USP_GetUserEncodedHashByUsername") if errPS != nil { return InvalidEncodedPasswordHash, errPS } defer stmt.Close() encodedHash := "" errQ := stmt.QueryRow(sql.Named("Username", username)).Scan(&encodedHash) if errQ != nil { if errQ == sql.ErrNoRows { return InvalidEncodedPasswordHash, ErrUserNotFound } else { return InvalidEncodedPasswordHash, errQ } } return encodedHash, nil }*/ /*func (ms *MsSqlStore) Insert(newUser *NewUser) (*User, error) { user := User{} userInfo := userInfo{} userUuid := uuid.NewV4() sqlUuid := mssql.UniqueIdentifier{} errSUID := sqlUuid.Scan(userUuid.String()) if errSUID != nil { return &user, errSUID } stmt, errPS := ms.database.Prepare("USP_CreateUser") if errPS != nil { return &user, errPS } defer stmt.Close() errQ := stmt.QueryRow( sql.Named("Uuid", sqlUuid), sql.Named("Username", newUser.Username), sql.Named("FullName", newUser.FullName), sql.Named("DisplayName", newUser.DisplayName), sql.Named("EncodedHash", newUser.EncodedHash), ).Scan(&userInfo.Uuid, &userInfo.Username, &userInfo.FullName, &userInfo.DisplayName) if errQ != nil { if errQ == sql.ErrNoRows { return &user, ErrUserNotFound } return &user, errQ } errUQ := user.Uuid.Scan(userInfo.Uuid.String()) if errUQ != nil { return &user, errUQ } user.DisplayName = userInfo.DisplayName user.FullName = userInfo.FullName user.Username = userInfo.Username return &user, nil } func (ms *MsSqlStore) InsertEmail(uuid uuid.UUID, email string) error { sqlUuid := mssql.UniqueIdentifier{} errUI := sqlUuid.Scan(uuid.String()) if errUI != nil { return errUI } stmt, errPS := ms.database.Prepare("USP_AddUserEmail") if errPS != nil { return errPS } defer stmt.Close() rs, errQ := stmt.Exec(sql.Named("Uuid", sqlUuid), sql.Named("Email", email)) if errQ != nil { if mssqlerr, ok := errQ.(mssql.Error); ok { if mssqlerr.Number == 50201 { return ErrUserNotFound } } else { return errQ } } if ra, errRA := rs.RowsAffected(); errRA != nil && ra < 1 { return errors.New("unexpected error: no rows updated") } return nil }*/ /*func (ms *MsSqlStore) UpdateFullName(uuid uuid.UUID, fullName string) (*User, error) { user := User{} sqlUuid := mssql.UniqueIdentifier{} errSUID := sqlUuid.Scan(uuid.String()) if errSUID != nil { return &user, errSUID } stmt, errPS := ms.database.Prepare("USP_UpdateUserFullName") if errPS != nil { return &user, errPS } defer stmt.Close() rs, errQ := stmt.Exec( sql.Named("Uuid", sqlUuid), sql.Named("FullName", fullName), ) if errQ != nil { if mssqlerr, ok := errQ.(mssql.Error); ok { if mssqlerr.Number == 50201 { return &user, ErrUserNotFound } } return &user, errQ } if ra, errRA := rs.RowsAffected(); errRA != nil && ra < 1 { return &user, errors.New("unexpected error: no rows updated") } userRet, errGU := ms.GetByUuid(uuid) if errGU != nil { return &user, errGU } return userRet, nil }*/ /*func (ms *MsSqlStore) UpdateDisplayName(uuid uuid.UUID, displayName string) (*User, error) { user := User{} sqlUuid := mssql.UniqueIdentifier{} errSUID := sqlUuid.Scan(uuid.String()) if errSUID != nil { return &user, errSUID } stmt, errPS := ms.database.Prepare("USP_UpdateUserDisplayName") if errPS != nil { return &user, errPS } defer stmt.Close() rs, errQ := stmt.Exec( sql.Named("Uuid", sqlUuid), sql.Named("DisplayName", displayName), ) if errQ != nil { if mssqlerr, ok := errQ.(mssql.Error); ok { if mssqlerr.Number == 50201 { return &user, ErrUserNotFound } } return &user, errQ } if ra, errRA := rs.RowsAffected(); errRA != nil && ra < 1 { return &user, errors.New("unexpected error: no rows updated") } userRet, errGU := ms.GetByUuid(uuid) if errGU != nil { return &user, errGU } return userRet, nil }*/ /*func (ms *MsSqlStore) UpdateEncodedHash(uuid uuid.UUID, encodedHash string) error { sqlUuid := mssql.UniqueIdentifier{} errSUID := sqlUuid.Scan(uuid.String()) if errSUID != nil { return errSUID } stmt, errPS := ms.database.Prepare("USP_UpdateUserEncodedHash") if errPS != nil { return errPS } defer stmt.Close() rs, errQ := stmt.Exec( sql.Named("Uuid", sqlUuid), sql.Named("EncodedHash", encodedHash), ) if errQ != nil { if mssqlerr, ok := errQ.(mssql.Error); ok { if mssqlerr.Number == 50201 { return ErrUserNotFound } } return errQ } if ra, errRA := rs.RowsAffected(); errRA != nil && ra < 1 { return errors.New("unexpected error: no rows updated") } return nil }*/ /*func (ms *MsSqlStore) Delete(uuid uuid.UUID) error { sqlUuid := mssql.UniqueIdentifier{} errSUID := sqlUuid.Scan(uuid.String()) if errSUID != nil { return errSUID } stmt, errPS := ms.database.Prepare("USP_DeleteUser") if errPS != nil { return errPS } defer stmt.Close() rs, errQ := stmt.Exec(sql.Named("Uuid", sqlUuid)) if errQ != nil { if mssqlerr, ok := errQ.(mssql.Error); ok { if mssqlerr.Number == 50201 { return ErrUserNotFound } } else { return errQ } } if ra, errRA := rs.RowsAffected(); errRA != nil && ra < 1 { return errors.New("unexpected error: no rows deleted") } return nil }*/ /*func (ms *MsSqlStore) DeleteEmail(uuid uuid.UUID, email string) error { sqlUuid := mssql.UniqueIdentifier{} errSUID := sqlUuid.Scan(uuid.String()) if errSUID != nil { return errSUID } stmt, errPS := ms.database.Prepare("USP_DeleteUserEmail") if errPS != nil { return errPS } defer stmt.Close() rs, errQ := stmt.Exec(sql.Named("Uuid", sqlUuid), sql.Named("Email", email)) if errQ != nil { if mssqlerr, ok := errQ.(mssql.Error); ok { if mssqlerr.Number == 50201 { return ErrUserNotFound } else if mssqlerr.Number == 50202 { return errors.New("provided email is not associated with users account") } } else { return errQ } } if ra, errRA := rs.RowsAffected(); errRA != nil && ra < 1 { return errors.New("unexpected error: no rows deleted") } return nil }*/ /*func (ms *MsSqlStore) DeleteSession(uuid uuid.UUID, session uuid.UUID) error { sqlUuid := mssql.UniqueIdentifier{} errSUID := sqlUuid.Scan(uuid.String()) if errSUID != nil { return errSUID } sqlSessionUuid := mssql.UniqueIdentifier{} errSSID := sqlUuid.Scan(session.String()) if errSSID != nil { return errSSID } stmt, errPS := ms.database.Prepare("USP_DeleteUserSession") if errPS != nil { return errPS } defer stmt.Close() rs, errQ := stmt.Exec(sql.Named("Uuid", sqlUuid), sql.Named("SessionUuid", sqlSessionUuid)) if errQ != nil { if mssqlerr, ok := errQ.(mssql.Error); ok { if mssqlerr.Number == 50201 { return ErrUserNotFound } else if mssqlerr.Number == 50202 { return errors.New("provided session uuid is not associated with user's account") } } else { return errQ } } if ra, errRA := rs.RowsAffected(); errRA != nil && ra < 1 { return errors.New("unexpected error: no rows deleted") } return nil }*/
package v1alpha1 import ( "github.com/kotalco/kotal/apis/shared" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // NodeSpec defines the desired state of Node type NodeSpec struct { // Network is the Filecoin network the node will join and sync Network FilecoinNetwork `json:"network"` // Resources is node compute and storage resources shared.Resources `json:"resources,omitempty"` } // FilecoinNetwork is Filecoin network // +kubebuilder:validation:Enum=mainnet;nerpa;butterfly;calibration type FilecoinNetwork string const ( // MainNetwork is the Filecoin main network MainNetwork FilecoinNetwork = "mainnet" // NerpaNetwork is the Filecoin main network NerpaNetwork FilecoinNetwork = "nerpa" // ButterflyNetwork is the Filecoin main network ButterflyNetwork FilecoinNetwork = "butterfly" // CalibrationNetwork is the Filecoin main network CalibrationNetwork FilecoinNetwork = "calibration" ) // NodeStatus defines the observed state of Node type NodeStatus struct { Client string `json:"client"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // Node is the Schema for the nodes API // +kubebuilder:printcolumn:name="Network",type=string,JSONPath=".spec.network" // +kubebuilder:printcolumn:name="Client",type=string,JSONPath=".status.client" type Node struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec NodeSpec `json:"spec,omitempty"` Status NodeStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true // NodeList contains a list of Node type NodeList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []Node `json:"items"` } func init() { SchemeBuilder.Register(&Node{}, &NodeList{}) }
package main import ( "bufio" "fmt" "os" "unicode" ) func main() { in := bufio.NewReader(os.Stdin) input, _ := in.ReadString('\n') var alphabet = map[string]bool{ "a": false, "b": false, "c": false, "d": false, "e": false, "f": false, "g": false, "h": false, "i": false, "j": false, "k": false, "l": false, "m": false, "n": false, "o": false, "p": false, "q": false, "r": false, "s": false, "t": false, "u": false, "v": false, "w": false, "x": false, "y": false, "z": false, } for _, r := range input { alphabet[string(unicode.ToLower(r))] = true } if pangram(alphabet) { fmt.Println("pangram") } else { fmt.Println("not pangram") } } func pangram(runes map[string]bool) bool { for _, b := range runes { if !b { return false } } return true }
package experiment import "fmt" type experimentNotFoundError struct { experimentName string } func (err *experimentNotFoundError) Error() string { return fmt.Sprintf(`experiment: Experiment "%s" not found`, err.experimentName) } type taskRunError struct { experimentName string err error } func (err *taskRunError) Error() string { return fmt.Sprintf(`task: Failed to run task "%s": %v`, err.experimentName, err.err) }
package domain import ( "time" "github.com/jeansferreira/api-b2w-planetas/helpers" "gopkg.in/mgo.v2/bson" ) // The representation of a created planet type Planeta struct { ID bson.ObjectId `bson:"_id" json:"id,omitempty"` Nome string `bson:"nome" json:"nome,omitempty"` Clima string `bson:"clima" json:"clima,omitempty"` Terreno string `bson:"terreno" json:"terreno,omitempty"` Count int `bson:"count" json:"count"` CreatedAt *time.Time `bson:"created_at" json:"created_at,omitempty"` UpdatedAt *time.Time `bson:"updated_at" json:"updated_at,omitempty"` DeletedAt *time.Time `bson:"deleted_at" json:"deleted_at,omitempty"` } // The representation of a potential planet type CriarPlaneta struct { Nome string `bson:"nome" json:"nome"` Clima string `bson:"clima" json:"clima"` Terreno string `bson:"terreno" json:"terreno"` } // Warning about timezone issues: // https://stackoverflow.com/questions/44873825/how-to-get-timestamp-of-utc-time-with-golang func (c *CriarPlaneta) ToBson() bson.M { return bson.M{ "nome": c.Nome, "clima": c.Clima, "terreno": c.Terreno, "created_at": time.Now(), "updated_at": time.Now(), } } func (c *Planeta) ToBson() bson.M { return bson.M{ "nome": c.Nome, "clima": c.Clima, "terreno": c.Terreno, "created_at": c.CreatedAt, "updated_at": time.Now(), } } func (c *CriarPlaneta) Me() bson.M { return bson.M{ "nome": c.Nome, } } func (c *Planeta) Me() bson.M { return bson.M{ "_id": c.ID, } } func (c *CriarPlaneta) IsValid() (bool, error) { if c.Nome == "" { return false, helpers.NewError("Planets must have a name") } if c.Terreno == "" { return false, helpers.NewError("Planets must have a terrain") } if c.Clima == "" { return false, helpers.NewError("Planets must have a weather") } return true, nil } func (c *Planeta) IsValid() (bool, error) { if c.Nome == "" { return false, helpers.NewError("Planets must have a name") } if c.Terreno == "" { return false, helpers.NewError("Planets must have a terrain") } if c.Clima == "" { return false, helpers.NewError("Planets must have a weather") } return true, nil } // Same idea from gorm, when "tableName" implements the gorm.Tabler interface func (*Planeta) CollectionName() string { return "planetas" }
package main import ( "fmt" "encoding/json" "io" "net/http" "net/http/httptest" "reflect" "testing" ) type StubPlayerStore struct { stores map[string]int winCalls []string league []Player } func (s *StubPlayerStore) GetPlayerScore(name string) int { score := s.stores[name] return score } func (s *StubPlayerStore) RecordWin(name string) { s.winCalls = append(s.winCalls, name) } func (s *StubPlayerStore) GetLeague() []Player { return s.league } func TestGETPlayers(t *testing.T) { store := StubPlayerStore{ map[string]int{ "Pepper": 20, "Floyd": 10, }, nil, nil, } server := NewPlayerServer(&store) t.Run("returns Pepper's score", func (t *testing.T) { req := newGetScoreRequest("Pepper") resp := httptest.NewRecorder() server.ServeHTTP(resp, req) assertStatus(t, resp.Code, http.StatusOK) assertResponseBody(t, resp.Body.String(), "20") }) t.Run("returns Floyd's score", func (t *testing.T) { req := newGetScoreRequest("Floyd") resp := httptest.NewRecorder() server.ServeHTTP(resp, req) assertStatus(t, resp.Code, http.StatusOK) assertResponseBody(t, resp.Body.String(), "10") }) t.Run("returns 404 for missing players", func (t *testing.T) { req := newGetScoreRequest("Jerry") resp := httptest.NewRecorder() server.ServeHTTP(resp, req) got := resp.Code want := http.StatusNotFound assertStatus(t, got, want) }) } func TestStoreWins(t *testing.T) { store := StubPlayerStore{ map[string]int{ "Pepper": 20, "Floyd": 10, }, nil, nil, } server := NewPlayerServer(&store) t.Run("it records wins on a POST", func (t *testing.T) { player := "Pepper" req := newPostWinRequest(player) resp := httptest.NewRecorder() server.ServeHTTP(resp, req) assertStatus(t, resp.Code, http.StatusAccepted) if len(store.winCalls) != 1 { t.Errorf("got %d calls to RecordWin; want %d", len(store.winCalls), 1) } if store.winCalls[0] != player { t.Errorf("did not call RecordWin with correct player got '%s', want '%s'", store.winCalls[0], player) } }) } func TestRecordingWinsAndRetrievingThem(t *testing.T) { store := NewInMemoryPlayerStore() server := NewPlayerServer(store) player := "Pepper" server.ServeHTTP(httptest.NewRecorder(), newPostWinRequest(player)) server.ServeHTTP(httptest.NewRecorder(), newPostWinRequest(player)) server.ServeHTTP(httptest.NewRecorder(), newPostWinRequest(player)) t.Run("get score", func (t *testing.T) { resp := httptest.NewRecorder() server.ServeHTTP(resp, newGetScoreRequest(player)) assertStatus(t, resp.Code, http.StatusOK) assertResponseBody(t, resp.Body.String(), "3") }) t.Run("get league", func (t *testing.T) { resp := httptest.NewRecorder() server.ServeHTTP(resp, newGetLeagueRequest()) got := getLeagueFromResponse(t, resp.Body) want := []Player{ {"Pepper", 3}, } assertLeague(t, got, want) }) } func TestLeague(t *testing.T) { store := StubPlayerStore{} server := NewPlayerServer(&store) t.Run("it returns 200 for /league", func (t *testing.T) { req, _ := http.NewRequest(http.MethodGet, "/league", nil) resp := httptest.NewRecorder() server.ServeHTTP(resp, req) var got []Player err := json.NewDecoder(resp.Body).Decode(&got) if err != nil { t.Fatalf("unable to parse response from server '%s' into slice of Player '%v'", resp.Body, err) } assertStatus(t, resp.Code, http.StatusOK) }) t.Run("it returns a league table of JSON", func (t *testing.T) { wantedLeague := []Player{ {"Scott", 80}, {"Chris", 90}, {"Tamara", 70}, } store := StubPlayerStore{nil, nil, wantedLeague} server := NewPlayerServer(&store) req := newGetLeagueRequest() resp := httptest.NewRecorder() server.ServeHTTP(resp, req) got := getLeagueFromResponse(t, resp.Body) assertStatus(t, resp.Code, http.StatusOK) assertContentType(t, resp, jsonContentType) assertLeague(t, got, wantedLeague) }) } func getLeagueFromResponse(t *testing.T, body io.Reader) (league []Player) { t.Helper() err := json.NewDecoder(body).Decode(&league) if err != nil { t.Fatalf("unable to parse response from server '%s' into slice of Player '%v'", body, err) } return } func newGetLeagueRequest() *http.Request { req, _ := http.NewRequest(http.MethodGet, "/league", nil) return req } func newPostWinRequest(name string) *http.Request { req, _ := http.NewRequest( http.MethodPost, fmt.Sprintf("/players/%s", name), nil, ) return req } func newGetScoreRequest(name string) *http.Request { req, _ := http.NewRequest( http.MethodGet, fmt.Sprintf("/players/%s", name), nil, ) return req } func assertStatus(t *testing.T, got, want int) { t.Helper() if got != want { t.Errorf("got status %d want %d", got, want) } } func assertResponseBody(t *testing.T, got, want string) { t.Helper() if got != want { t.Errorf("response body incorrect: got '%s' want '%s'", got, want) } } func assertLeague(t *testing.T, got, want []Player) { if !reflect.DeepEqual(got, want) { t.Errorf("got %v want %v", got, want) } } const jsonContentType = "application/json" func assertContentType(t *testing.T, resp *httptest.ResponseRecorder, want string) { t.Helper() if resp.Header().Get("content-type") != want { t.Errorf("response didn't have content-type of 'application/json' got %v", resp.HeaderMap) } }
//MIT License // //Copyright (c) 2020 targyz // //Permission is hereby granted, free of charge, to any person obtaining a copy //of this software and associated documentation files (the "Software"), to deal //in the Software without restriction, including without limitation the rights //to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //copies of the Software, and to permit persons to whom the Software is //furnished to do so, subject to the following conditions: // //The above copyright notice and this permission notice shall be included in all //copies or substantial portions of the Software. // //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE //AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE //SOFTWARE. package main import ( "flag" "fmt" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "log" "net/http" "os" "time" ) var addr = flag.String("listen-address", `:9189`, "The address to listen on for HTTP requests.") var cpconfig = flag.String("cpconfig", `/opt/cprocsp/sbin/amd64/cpconfig`, "cpconfig bin, default /opt/cprocsp/sbin/amd64/cpconfig") var certmgr = flag.String("certmgr", `/opt/cprocsp/bin/amd64/certmgr`, "certmgr bin, default /opt/cprocsp/bin/amd64/certmgr") var period = flag.Int("period", 720, "How often to check in minutes, default 720 (12 hours)") var cryptcp = flag.String("cryptcp", `/opt/cprocsp/bin/amd64/cryptcp`, "cpconfig bin, default /opt/cprocsp/bin/amd64/cryptcp") var ensure = flag.Bool("ensure", false, "Run checks and print metric values") func main() { flag.Parse() mgr := CPMgr{ Cpconfig: *cpconfig, Certmgr: *certmgr, Cryptcp: *cryptcp, } if *ensure { runChecksAndExit(mgr) } cryptoVersion := prometheus.NewGauge( prometheus.GaugeOpts{ Name: "cryptopro_version", Help: "Current cryptopro version", }) licenseActive := prometheus.NewGauge( prometheus.GaugeOpts{ Name: "cryptopro_license_active", Help: "Shows if licence permanent or not", }) licensePermanent := prometheus.NewGauge( prometheus.GaugeOpts{ Name: "cryptopro_license_permanent", Help: "Shows if licence permanent or not", }) licenseExpiresIn := prometheus.NewGauge( prometheus.GaugeOpts{ Name: "cryptopro_license_expires_in", Help: "Days before license expiry", }) userCertificateExpiresIn := prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "cryptopro_user_certificate_expires_in", Help: "Days before user certificate expire", }, []string{"container"}) errorsTotal := prometheus.NewCounter( prometheus.CounterOpts{ Name: "cryptopro_exporter_errors_total", Help: "Total errors during runtime. Check logs if value is greater than 0", }) errorsLicense := prometheus.NewCounter( prometheus.CounterOpts{ Name: "cryptopro_exporter_errors_license", Help: "Errors count. Check logs if value is greater than 0", }) errorsUserCertificates := prometheus.NewCounter( prometheus.CounterOpts{ Name: "cryptopro_exporter_errors_user_certificates", Help: "Total error count in runtime. Check logs if value is greater than 0", }) errorsVersion := prometheus.NewCounter( prometheus.CounterOpts{ Name: "cryptopro_exporter_errors_version", Help: "Total error count in runtime. Check logs if value is greater than 0", }) prometheus.MustRegister(cryptoVersion) prometheus.MustRegister(licenseActive) prometheus.MustRegister(licensePermanent) prometheus.MustRegister(licenseExpiresIn) prometheus.MustRegister(userCertificateExpiresIn) prometheus.MustRegister(errorsLicense) prometheus.MustRegister(errorsUserCertificates) prometheus.MustRegister(errorsTotal) prometheus.MustRegister(errorsVersion) go func() { for { // reset gauges licensePermanent.Set(0) licenseActive.Set(0) licenseExpiresIn.Set(0) permanent, active, days, err := mgr.getLicenseInfo() if err != nil { log.Printf("license check finished with error: %v", err) errorsVersion.Inc() errorsTotal.Inc() } else { licensePermanent.Set(boolToFloat64(permanent)) licenseActive.Set(boolToFloat64(active)) licenseExpiresIn.Set(float64(days)) } time.Sleep(time.Minute * time.Duration(*period)) } }() go func() { for { // reset gauges cryptoVersion.Set(0) version, err := mgr.getVersion() if err != nil { log.Printf("version check finished with error: %v", err) errorsVersion.Inc() errorsTotal.Inc() } else { cryptoVersion.Set(version) } time.Sleep(time.Minute * time.Duration(*period)) } }() go func() { for { // TODO проверить корректность работы метода Reset и сброс метрик userCertificateExpiresIn.Reset() certNames, expireIn, err := mgr.GetUserCertsInfo() if err != nil { log.Printf("user certificates check finished with error: %v", err) errorsTotal.Inc() errorsUserCertificates.Inc() } for i, n := range certNames { userCertificateExpiresIn.WithLabelValues(n).Set(expireIn[i]) } time.Sleep(time.Minute * time.Duration(*period)) } }() http.Handle("/metrics", promhttp.Handler()) log.Printf("Starting web server at %s\n", *addr) err := http.ListenAndServe(*addr, nil) if err != nil { log.Printf("http.ListenAndServe: %v\n", err) } } func runChecksAndExit(mgr CPMgr) { version, err := mgr.getVersion() if err != nil { fmt.Printf(`An error occured while running version check: %v`, err) os.Exit(1) } fmt.Printf("Version: %v\n\n", version) permanent, active, days, err := mgr.getLicenseInfo() if err != nil { fmt.Printf(`An error occured while running license check: %v`, err) os.Exit(1) } fmt.Printf("Licence is active: %v\n", active) fmt.Printf("Licence is permanent: %v\n", permanent) fmt.Printf("Licence expires in %v days\n", days) certNames, expireIn, err := mgr.GetUserCertsInfo() if err != nil { fmt.Printf(`An error occured while running user certificates check: %v\n`, err) os.Exit(1) } fmt.Println("") for i, n := range certNames { fmt.Printf("Certificate %v expires in %v days\n", n, expireIn[i]) } fmt.Println("") os.Exit(0) }
package main import( "fmt" "sort" ) type person struct { First string Last string age int } type ByFirst []person func (bn ByFirst) Len() int { return len(bn) } func (bn ByFirst) Swap(i, j int) { bn[i],bn[j] = bn[j], bn[i] } func (bn ByFirst) Less(i, j int) bool { return bn[i].First < bn[j].First } func main() { p1 := person{"harlon","hutch",43} p2 := person{"brook","hutch",43} p3 := person{"trevor","chip",12} p4 := person{"kendra","jax",13} p5 := person{"tiff","bake",41} people := []person{p1, p2, p3, p4, p5} fmt.Println("") fmt.Println("") fmt.Println("") fmt.Println("----------------------------") fmt.Println("out of alphabetical order...") fmt.Println("----------------------------") fmt.Println("") fmt.Println(people) fmt.Println("") fmt.Println("------------------------") fmt.Println("in alphabetical order...") fmt.Println("------------------------") fmt.Println("") fmt.Println("") sort.Sort(ByFirst(people)) fmt.Println(people) }
// description : A read-write TCP client that takes command line arguments (like the real nc) // author : Tom Geudens (https://github.com/tomgeudens/) // modified : 2016/07/24 // package main import ( "io" "log" "net" "os" ) func mustCopy(dst io.Writer, src io.Reader) { _, err := io.Copy(dst, src) if err != nil { log.Fatal(err) } } func main() { conn, err := net.Dial("tcp", os.Args[1]+":"+os.Args[2]) if err != nil { log.Fatal(err) } done := make(chan struct{}) go func() { io.Copy(os.Stdout, conn) log.Println("writing output done") done <- struct{}{} }() mustCopy(conn, os.Stdin) log.Println("reading input done") conn.Close() <-done }
package SmartAuth import ( "bytes" "encoding/json" "errors" "fmt" "github.com/Tnze/go-mc/yggdrasil" "github.com/google/uuid" "github.com/spf13/viper" "net/http" "strings" "sync" "time" ) type Tokens struct { AccessToken string `json:"accessToken"` ClientToken string `json:"clientToken"` } var ( syncLock sync.Mutex accountsFile = viper.New() authProxy *http.Client ) func init() { syncLock.Lock() accountsFile.SetConfigName("accounts") accountsFile.SetConfigType("json") accountsFile.AddConfigPath(".") readAccountsFile() syncLock.Unlock() } func readAccountsFile() { if err := accountsFile.ReadInConfig(); err != nil { if _, ok := err.(viper.ConfigFileNotFoundError); ok { accountsFile.SetDefault("client-Token", uuid.New().String()) accountsFile.SetDefault("accounts", map[string]string{}) _ = accountsFile.SafeWriteConfig() _ = accountsFile.ReadInConfig() } } } func AuthWithEmail(email, password string, proxy *http.Client) (playerID, playerUUID, access string, authErr error) { if proxy != nil { authProxy = proxy } syncLock.Lock() defer syncLock.Unlock() readAccountsFile() clientToken := accountsFile.GetString("client-token") if clientToken == "" { clientToken = uuid.New().String() accountsFile.Set("client-token", clientToken) _ = accountsFile.WriteConfig() } if accounts := accountsFile.GetStringMapStringSlice("accounts"); accounts != nil { if account, ok := accounts[strings.Split(email, "@")[0]]; ok && len(account) == 3 { success, err := validateToken(clientToken, account[2]) if err != nil { authErr = err return } else { if success { playerID = account[0] playerUUID = account[1] access = account[2] authErr = nil return } else { if clientToken, accessToken := refreshToken(clientToken, account[2]); clientToken != "" && accessToken != "" { if success2, err2 := validateToken(clientToken, accessToken); err2 == nil && success2 { playerID = account[0] playerUUID = account[1] access = accessToken authErr = nil account[2] = accessToken accounts[strings.Split(email, "@")[0]] = account accountsFile.Set("accounts", accounts) _ = accountsFile.WriteConfig() return } } } } } var err error playerID, playerUUID, access, err = loginNormal(email, password, clientToken) if err != nil { authErr = err return } authErr = nil account := make([]string, 3) account[0] = playerID account[1] = playerUUID account[2] = access accounts[strings.Split(email, "@")[0]] = account accountsFile.Set("accounts", accounts) _ = accountsFile.WriteConfig() return } else { authErr = errors.New("accounts.json valid error,please delete it and rerun the program") return } } func loginNormal(user, password, clientToken string) (id, uuid, access string, err error) { type agent struct { Name string `json:"name"` Version int `json:"version"` } type proof struct { UserName string `json:"username"` Password string `json:"password"` } type Profile struct { ID string `json:"id"` Name string `json:"name"` } type AuthResp struct { Tokens AvailableProfiles []Profile `json:"availableProfiles"` // only present if the agent field was received SelectedProfile Profile `json:"selectedProfile"` // only present if the agent field was received User struct { ID string `json:"id"` Properties []struct { Name string `json:"name"` Value string `json:"value"` } } `json:"user"` *yggdrasil.Error } type authPayload struct { Agent agent `json:"agent"` proof ClientToken string `json:"clientToken,omitempty"` RequestUser bool `json:"requestUser"` } pl := authPayload{ Agent: agent{ Name: "Minecraft", Version: 1, }, proof: proof{ UserName: user, Password: password, }, ClientToken: clientToken, RequestUser: true, } var ar AuthResp err = postAndParseResponse("authenticate", pl, &ar) if err != nil { return "", "", "", err } if ar.Error != nil { return "", "", "", *ar.Error } return ar.SelectedProfile.Name, ar.SelectedProfile.ID, ar.AccessToken, nil } func validateToken(clientToken, accessToken string) (bool, error) { pl := Tokens{ AccessToken: accessToken, ClientToken: clientToken, } resp, err := postToAuthServer("validate", pl) if resp != nil && err == nil { switch resp.StatusCode { case 204: _ = resp.Body.Close() return true, nil case 403: _ = resp.Body.Close() return false, nil default: _ = resp.Body.Close() return false, errors.New("error when validating tokens") } } return false, nil } func refreshToken(clientToken, accessToken string) (string, string) { pl := Tokens{ AccessToken: accessToken, ClientToken: clientToken, } resp, err := postToAuthServer("refresh", pl) if resp != nil && err == nil { switch resp.StatusCode { case 200: responseUnmarshal := Tokens{} _ = json.NewDecoder(resp.Body).Decode(&responseUnmarshal) _ = resp.Body.Close() return responseUnmarshal.ClientToken, responseUnmarshal.AccessToken default: _ = resp.Body.Close() return "", "" } } if resp != nil { _ = resp.Body.Close() } return "", "" } func postAndParseResponse(endpoint string, payload interface{}, resp interface{}) error { rowResp, err := postToAuthServer(endpoint, payload) if err != nil { return fmt.Errorf("request fail: %v", err) } defer rowResp.Body.Close() err = json.NewDecoder(rowResp.Body).Decode(resp) if err != nil { return fmt.Errorf("parse resp fail: %v", err) } return nil } func postToAuthServer(endPoint string, payload interface{}) (resp *http.Response, err error) { marshal, err := json.Marshal(payload) if err != nil { return &http.Response{}, err } client := &http.Client{ Timeout: 10 * time.Second, } if authProxy != nil { client = authProxy } request, err := http.NewRequest( http.MethodPost, "https://authserver.mojang.com/"+endPoint, bytes.NewReader(marshal)) if err != nil { return &http.Response{}, err } request.Header.Set("User-agent", "go-mc") request.Header.Set("Connection", "keep-alive") request.Header.Set("Content-Type", "application/json") resp, err = client.Do(request) return }
// Package carbone provide an SDK to communicate with Carbone Render // Carbone is the most efficient report generator // It render from a JSON and template into PDF, DOCX, XLSX, PPTX, ODS and many more reports package carbone import ( "bytes" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "io" "io/ioutil" "mime/multipart" "net/http" "os" "strconv" "time" ) // APIResponseData object created during Carbone Render response. type APIResponseData struct { TemplateID string `json:"templateId,omitempty"` RenderID string `json:"renderId,omitempty"` TemplateFileExtension string `json:"inputFileExtension,omitempty"` } // APIResponse object created during Carbone Render response. type APIResponse struct { Success bool `json:"success"` Error string `json:"error,omitempty"` Data APIResponseData `json:"data"` } // CSDK (CarboneSDK) to use Carbone render API easily. type CSDK struct { apiVersion string apiAccessToken string apiURL string apiTimeOut time.Duration apiHTTPClient *http.Client } // NewCarboneSDK is a constructor and return a new instance of CSDK func NewCarboneSDK(args ...string) (*CSDK, error) { apiAccessToken := os.Getenv("CARBONE_TOKEN") if len(args) > 0 && args[0] != "" { apiAccessToken = args[0] } if apiAccessToken == "" { return nil, errors.New(`NewCarboneSDK error: "apiAccessToken" argument OR "CARBONE_TOKEN" env variable is missing`) } csdk := &CSDK{ apiVersion: "3", apiAccessToken: apiAccessToken, apiURL: "https://render.carbone.io", apiTimeOut: time.Second * 10, apiHTTPClient: &http.Client{Timeout: time.Second * 10}, } return csdk, nil } // AddTemplate upload your template to Carbone Render. The first parameter is the template file path, the second is an optional payload. func (csdk *CSDK) AddTemplate(templateFileName string, args ...string) (APIResponse, error) { payload := "" if len(args) > 0 && args[0] != "" { payload = args[0] } cResp := APIResponse{} if templateFileName == "" { return cResp, errors.New("Carbone SDK AddTemplate error: argument is missing: templateFileName") } // Create buffer buf := new(bytes.Buffer) // create a tmpfile and assemble your multipart from there w := multipart.NewWriter(buf) // Create the data object to send // { "payload":"", "template": readstream(file...) } label, err := w.CreateFormField("payload") if err != nil { return cResp, err } // Write payload content (empty for now) label.Write([]byte(payload)) // Create the FormData fw, err := w.CreateFormFile("template", templateFileName) if err != nil { return cResp, err } // Open Template fd, err := os.Open(templateFileName) if err != nil { return cResp, err } defer fd.Close() // Write file field from file to upload _, err = io.Copy(fw, fd) if err != nil { return cResp, err } // Important if you do not close the multipart writer you will not have a terminating boundry w.Close() // Create the request headerRequest := map[string]string{ "Content-Type": w.FormDataContentType(), } resp, err := csdk.doHTTPRequest("POST", csdk.apiURL+"/template", headerRequest, buf) if err != nil { return cResp, err } // Read the stream body, err := ioutil.ReadAll(resp.Body) if err != nil { return cResp, errors.New("Carbone SDK request error: failled to read the body: " + err.Error()) } // Close the connection https://stackoverflow.com/questions/33238518/what-could-happen-if-i-dont-close-response-body defer resp.Body.Close() // Parse JSON body and store into the APIResponse Struct err = json.Unmarshal(body, &cResp) if err != nil { return cResp, errors.New("Carbone SDK request error: failled to parse the JSON response from the body: " + err.Error()) } return cResp, nil } // GetTemplate returns the original template from the templateId (Unique identifier of the template) func (csdk *CSDK) GetTemplate(templateID string) ([]byte, error) { if templateID == "" { return []byte{}, errors.New("Carbone SDK GetTemplate error: argument is missing: templateID") } // Create the request resp, err := csdk.doHTTPRequest("GET", csdk.apiURL+"/template/"+templateID, nil, nil) if err != nil { return []byte{}, err } // Read the response data and return a []byte. The http package automatically decodes chunking when reading response body. body, err := ioutil.ReadAll(resp.Body) if err != nil { return []byte{}, errors.New("Carbone SDK GetTemplate request error: failled to read the body: " + err.Error()) } // Close the connection defer resp.Body.Close() if len(body) == 0 { return body, errors.New("Carbone SDK GetTemplate request error: The response body is empty") } return body, nil } // DeleteTemplate Delete an uploaded template from a templateID. func (csdk *CSDK) DeleteTemplate(templateID string) (APIResponse, error) { cResp := APIResponse{} if templateID == "" { return cResp, errors.New("Carbone SDK DeleteTemplate error: argument is missing: templateID") } // HTTP Request resp, err := csdk.doHTTPRequest("DELETE", csdk.apiURL+"/template/"+templateID, nil, nil) if err != nil { return cResp, err } // Read body body, err := ioutil.ReadAll(resp.Body) if err != nil { return cResp, errors.New("Carbone SDK DeleteTemplate request error: failled to read the body: " + err.Error()) } // Close the connection defer resp.Body.Close() // Parse JSON body and store into the APIResponse Struct err = json.Unmarshal(body, &cResp) if err != nil { return cResp, errors.New("Carbone SDK DeleteTemplate request error: failled to parse the JSON response from the body: " + err.Error()) } return cResp, nil } // RenderReport a report from a templateID and a json data func (csdk *CSDK) RenderReport(templateID string, jsonData string) (APIResponse, error) { cResp := APIResponse{} if templateID == "" { return cResp, errors.New("Carbone SDK RenderReport error: argument is missing: templateID") } if jsonData == "" { return cResp, errors.New("Carbone SDK RenderReport error: argument is missing: jsonData") } headerRequest := map[string]string{ "Content-Type": "application/json", } resp, err := csdk.doHTTPRequest("POST", csdk.apiURL+"/render/"+templateID, headerRequest, bytes.NewBuffer([]byte(jsonData))) if err != nil { return cResp, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return cResp, errors.New("Carbone SDK RenderReport request error: failled to read the body: " + err.Error()) } // Close the connection defer resp.Body.Close() err = json.Unmarshal(body, &cResp) if err != nil { return cResp, errors.New("Carbone SDK RenderReport request error: failled to parse the JSON response from the body: " + err.Error()) } return cResp, nil } // GetReport Request Carbone Render and return a generated report func (csdk *CSDK) GetReport(renderID string) ([]byte, error) { if renderID == "" { return []byte{}, errors.New("Carbone SDK GetReport error: argument is missing: renderID") } // http request resp, err := csdk.doHTTPRequest("GET", csdk.apiURL+"/render/"+renderID, nil, nil) if err != nil { return []byte{}, err } // Read the response data and return a []byte. The http package automatically decodes chunking when reading response body. body, err := ioutil.ReadAll(resp.Body) if err != nil { return []byte{}, errors.New("Carbone SDK GetReport request error: failled to read the body: " + err.Error()) } // Close the connection defer resp.Body.Close() if len(body) == 0 { return []byte{}, errors.New("Carbone SDK GetReport request error: The response body is empty: Render again and generate a new renderId") } return body, nil } // Render render a report from a templateID OR a template path. // pathOrTemplateID {string}: Accept a file path OR a template ID returned by AddTemplate // jsonData {string}: stringify json, all options here: https://carbone.io/api-reference.html#rendering-a-report // args {...string}: You can pass an optinal payload used during the template upload (AddTemplate) to create a different templateID. // It returns a []byte of the file. func (csdk *CSDK) Render(pathOrTemplateID string, jsonData string, args ...string) ([]byte, error) { var cresp APIResponse var er error payload := "" if len(args) > 0 && args[0] != "" { payload = args[0] } info, err := os.Stat(pathOrTemplateID) if os.IsNotExist(err) { // The first argument `pathOrTemplateID` is a templateID cresp, er = csdk.RenderReport(pathOrTemplateID, jsonData) if er != nil { return []byte{}, er } } else if info.IsDir() { return []byte{}, errors.New("Carbone SDK Render error: the path passed as argument is a directory") } else { // The first argument `pathOrTemplateID` is maybe a file templateID, e := csdk.GenerateTemplateID(pathOrTemplateID, payload) if e != nil { return []byte{}, errors.New("Carbone SDK Render error: failled to generate the templateID hash:" + e.Error()) } cresp, er = csdk.RenderReport(templateID, jsonData) if er != nil { return []byte{}, er } else if !cresp.Success { // if RenderReport return one of the following error, it means the template does not exist // - Error while rendering template Error: ENOENT:File not found // - Error while rendering template Error: 404 Not Found // Then call add template and render again cres, e := csdk.AddTemplate(pathOrTemplateID, payload) if e != nil { return []byte{}, errors.New("Carbone SDK Render error:" + e.Error()) } cresp, er = csdk.RenderReport(cres.Data.TemplateID, jsonData) if er != nil { return []byte{}, errors.New("Carbone SDK Render error:" + er.Error()) } } } if !cresp.Success { // If an error is returned, it means something went wrong. // if the error is "Error while rendering template Error: 404 Not Found" or "ENOENT:File not found" it means TemplateID does not exist return []byte{}, errors.New(cresp.Error) } if len(cresp.Data.RenderID) <= 0 { return []byte{}, errors.New("Carbone SDK Render error: renderID is empty") } // Return the report return csdk.GetReport(cresp.Data.RenderID) } // GenerateTemplateID Generate the templateID from a template // filepath {string}: file path // args {...string}: You can set a payload (args[0]) // returns the file TemplateId func (csdk *CSDK) GenerateTemplateID(filepath string, args ...string) (string, error) { payload := "" if len(args) > 0 && args[0] != "" { payload = args[0] } // Open the file f, err := os.Open(filepath) if err != nil { return "", err } defer f.Close() // New HASH h := sha256.New() // Write payload h.Write([]byte(payload)) // Write file buffer if _, err := io.Copy(h, f); err != nil { return "", err } // Return the sha256 has as hexadecimal return hex.EncodeToString(h.Sum(nil)), nil } // SetAccessToken set the Carbone Render access token func (csdk *CSDK) SetAccessToken(newToken string) { csdk.apiAccessToken = newToken } // SetAPIVersion set the Carbone Render version func (csdk *CSDK) SetAPIVersion(version int) { csdk.apiVersion = strconv.Itoa(version) } // GetAPIVersion get the Carbone Render version func (csdk *CSDK) GetAPIVersion() (int, error) { return strconv.Atoi(csdk.apiVersion) } // ------------------ private function func (csdk *CSDK) doHTTPRequest(method string, url string, headers map[string]string, body io.Reader) (*http.Response, error) { req, err := http.NewRequest(method, url, body) if err != nil { return nil, errors.New("Carbone SDK request: failled to create a new request: " + err.Error()) } for k, v := range headers { req.Header.Set(k, v) } // User Api Token req.Header.Set("Authorization", "Bearer "+csdk.apiAccessToken) req.Header.Set("carbone-version", csdk.apiVersion) // Send request resp, err := csdk.apiHTTPClient.Do(req) if err != nil { return nil, fmt.Errorf("Carbone SDK request error: %v", err.Error()) } if resp.StatusCode != http.StatusOK && resp.StatusCode != 404 { return resp, fmt.Errorf("Carbone SDK request error status code %d", resp.StatusCode) } return resp, nil }
package main import "fmt" // 001 Methods type person struct { first string last string } type secretAgent struct { person ltk bool } func (s secretAgent) speak() { fmt.Println("I am", s.first, s.last, " -ajan konuştu") } func (p person) speak() { fmt.Println("I am", p.first, p.last, " -insan konuştu") } // 002 Interfaces type human interface { speak() } func bar(h human) { fmt.Println("Ben aradım...") } func main() { // 001 Methods sa1 := secretAgent{ person: person{ first: "Kamil", last: "KAPLAN", }, ltk: true, } fmt.Println(sa1) sa1.speak() // 002 Interfaces p1 := person{ first: "Dr.", last: "Yes", } fmt.Println(p1) bar(sa1) bar(p1) p1.speak() }
package main import "fmt" func main() { meetShortDeclaration() } func meetShortDeclaration() { // short declaration x := 42 // normal declaration var y = 48 // using package fmt to emit a message fmt.Println("y => ", y, ", x => ", x) }
// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gcsrunner import ( "bytes" "fmt" "github.com/GoogleCloudPlatform/esp-v2/src/go/metadata" "github.com/GoogleCloudPlatform/esp-v2/src/go/options" "github.com/GoogleCloudPlatform/esp-v2/src/go/util" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/ptypes" scpb "github.com/GoogleCloudPlatform/esp-v2/src/go/proto/api/envoy/v7/http/service_control" bootstrappb "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3" listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" hcmpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" ) var ( // Transformers which can be stubbed in unit testing. doServiceControlTransform = addGCPAttributes ) func addGCPAttributes(cfg *scpb.FilterConfig, opts FetchConfigOptions) error { var overridePlatform string if oldAttrs := cfg.GetGcpAttributes(); oldAttrs != nil { overridePlatform = oldAttrs.GetPlatform() } co := options.DefaultCommonOptions() co.MetadataURL = opts.MetadataURL mf := metadata.NewMetadataFetcher(co) attrs, err := mf.FetchGCPAttributes() if err != nil { return err } if overridePlatform != "" { attrs.Platform = overridePlatform } cfg.GcpAttributes = attrs return nil } func transformConfigBytes(config []byte, opts FetchConfigOptions) ([]byte, error) { bootstrap := &bootstrappb.Bootstrap{} u := &jsonpb.Unmarshaler{ AnyResolver: util.Resolver, } if err := u.Unmarshal(bytes.NewBuffer(config), bootstrap); err != nil { return nil, err } if err := transformEnvoyConfig(bootstrap, opts); err != nil { return nil, err } m := &jsonpb.Marshaler{ OrigName: true, AnyResolver: util.Resolver, } buf := &bytes.Buffer{} if err := m.Marshal(buf, bootstrap); err != nil { return nil, err } return buf.Bytes(), nil } func transformEnvoyConfig(bootstrap *bootstrappb.Bootstrap, opts FetchConfigOptions) error { listeners := bootstrap.GetStaticResources().GetListeners() if len(listeners) == 0 { return fmt.Errorf("expected at least 1 listener, got: 0") } ingressListenerTransformed := false for _, l := range listeners { switch l.GetName() { case util.IngressListenerName: ingressListenerTransformed = true if err := transformIngressListener(l, opts); err != nil { return fmt.Errorf("failed to transform Ingress Listener: %v", err) } } if !ingressListenerTransformed { return fmt.Errorf("did not find an ingress listener: %v", listeners[0]) } } return nil } func transformIngressListener(l *listenerpb.Listener, opts FetchConfigOptions) error { for _, c := range l.GetFilterChains() { if filters := c.GetFilters(); filters != nil { for _, f := range filters { if f.GetName() == util.HTTPConnectionManager { if err := transformHTTPConnectionManager(f, opts); err != nil { return fmt.Errorf("failed to transform HttpConnectionManager: %v", err) } return nil } } } } return fmt.Errorf("failed to find HTTPConnectionManager on Ingress Listener") } func transformHTTPConnectionManager(f *listenerpb.Filter, opts FetchConfigOptions) error { hcmCfg := f.GetTypedConfig() httpConMgr := &hcmpb.HttpConnectionManager{} if err := ptypes.UnmarshalAny(hcmCfg, httpConMgr); err != nil { return err } transformed := false for _, hf := range httpConMgr.GetHttpFilters() { if hf.GetName() == util.ServiceControl { if err := transformServiceControlFilter(hf, opts); err != nil { return fmt.Errorf("failed to transform service control filter: %v", err) } transformed = true } } if !transformed { return fmt.Errorf("http connection manager did not find a service control filter: %v", f) } filterCfg, err := ptypes.MarshalAny(httpConMgr) if err != nil { return err } f.ConfigType = &listenerpb.Filter_TypedConfig{TypedConfig: filterCfg} return nil } func transformServiceControlFilter(f *hcmpb.HttpFilter, opts FetchConfigOptions) error { scCfg := f.GetTypedConfig() if scCfg == nil { return fmt.Errorf("failed to unmarshal service control filter as a typed config") } filterConfig := &scpb.FilterConfig{} if err := ptypes.UnmarshalAny(scCfg, filterConfig); err != nil { return err } if err := doServiceControlTransform(filterConfig, opts); err != nil { return fmt.Errorf("failed to add GCP attributes: %v", err) } scs, err := ptypes.MarshalAny(filterConfig) if err != nil { return err } f.ConfigType = &hcmpb.HttpFilter_TypedConfig{ TypedConfig: scs, } return nil }
package main import ( "bufio" "fmt" "image" "image/color" "image/png" "log" "math" "math/rand" "os" "sort" "strings" "github.com/gonum/stat" "github.com/llgcode/draw2d/draw2dimg" ) const ( terminateThreshold = 0.01 maxIters = 10 expandCoeff = 2 contractCoeff = 0.5 shrinkCoeff = 0.5 ) type Point struct { Dims int Terms []float64 } func NewPoint(dims int) *Point { return &Point{ Dims: dims, Terms: make([]float64, dims), } } type Simplex struct { Points []*Point Dimension int Evaluations []float64 initialized bool numInitialized int } func NewSimplex(dim int) *Simplex { return &Simplex{ Points: make([]*Point, 0), Evaluations: make([]float64, 0), Dimension: dim, } } func ComputeCentroid(points ...*Point) *Point { sum := SumPoints(points...) return scalePoint(sum, 1/(float64)(len(points))) } // StdDev returns the standard deviation of the Simplex's evaluated values func (s *Simplex) StdDev() float64 { return stat.StdDev(s.Evaluations, nil) } // Improve "improves" a simplex by replacing its worst // value with the given value func (s *Simplex) Improve(p *Point, value float64) { i := sort.Search(len(s.Evaluations[0:len(s.Evaluations)]), func(i int) bool { return s.Evaluations[i] > value }) if i == len(s.Evaluations) { panic(`Improve: provided value is worse than all existing values`) } // Prevent another slice allocation // Do not copy the last element because it is the // "worst" and will be trimmed copy(s.Points[i+1:], s.Points[i:len(s.Points)-1]) copy(s.Evaluations[i+1:], s.Evaluations[i:len(s.Evaluations)-1]) s.Points[i] = p s.Evaluations[i] = value } func (s *Simplex) Cost() float64 { return s.Evaluations[0] } func (s *Simplex) SetPoint(p *Point, value float64) { i := sort.Search(len(s.Evaluations), func(i int) bool { return value < s.Evaluations[i] }) if s.numInitialized < s.Dimension+1 { // make room for new value s.Evaluations = append(s.Evaluations, 0) s.Points = append(s.Points, &Point{}) copy(s.Points[i+1:], s.Points[i:len(s.Points)]) copy(s.Evaluations[i+1:], s.Evaluations[i:len(s.Evaluations)]) s.Points[i] = p s.Evaluations[i] = value s.numInitialized++ } s.Points[i] = p s.Evaluations[i] = value } func SumPoints(points ...*Point) *Point { if len(points) == 0 { panic(`SumPoints: no points to sum`) } acc := &Point{ Dims: points[0].Dims, Terms: make([]float64, points[0].Dims), } for d := 0; d < acc.Dims; d++ { for _, p := range points { acc.Terms[d] += p.Terms[d] } } return acc } func scalePoint(p *Point, scalar float64) *Point { ret := &Point{ Dims: p.Dims, Terms: make([]float64, p.Dims), } for d := 0; d < p.Dims; d++ { ret.Terms[d] = p.Terms[d] * scalar } return ret } func ReflectPoint(center, p *Point) *Point { scaled := scalePoint(center, 2) negated := scalePoint(p, -1) return SumPoints(scaled, negated) } func ContractPoint(center, p *Point) *Point { negated := scalePoint(center, -1) sum := scalePoint(SumPoints(p, negated), contractCoeff) return SumPoints(center, sum) } func shouldTerminate(s *Simplex) bool { return s.StdDev() < terminateThreshold } func Optimize(eval func(p *Point) float64) *Simplex { dims := 2 points := initPoints(dims, dims+1) simplex := NewSimplex(2) file, err := os.Create(`simplex.txt`) if err != nil { panic(err.Error()) } defer file.Close() w := bufio.NewWriter(file) file.Sync() for _, p := range points { simplex.SetPoint(p, eval(p)) } numIters := 0 for { writeSimplex(simplex, w) fmt.Printf("Cost: %+v\n", simplex.Cost()) numIters++ if numIters > maxIters || shouldTerminate(simplex) { finalVals := `{` for _, p := range simplex.Points { finalVals += fmt.Sprintf(`%v`, p) + `, ` } finalVals += `}` fmt.Printf("final values are %+v at %+v\n", simplex.Evaluations, finalVals) fmt.Printf("final cost is %+v\n", simplex.Cost()) break } centroid := ComputeCentroid(simplex.Points...) reflected := ReflectPoint(centroid, simplex.Points[len(simplex.Points)-1]) // if reflected is better than the second worst point, // but not better than the best, obtain new simplex which // includes the reflected point reflectedEval := eval(reflected) if reflectedEval < simplex.Evaluations[simplex.Dimension] && reflectedEval > simplex.Evaluations[0] { fmt.Printf("Reflect\n\n") simplex.Improve(reflected, reflectedEval) continue } if reflectedEval < simplex.Evaluations[0] { // reflected point is the best so far. Expand negatedCentroid := scalePoint(centroid, -1) expanded := SumPoints(centroid, scalePoint(SumPoints(reflected, negatedCentroid), expandCoeff)) expandedEval := eval(expanded) if expandedEval < reflectedEval { simplex.Improve(expanded, expandedEval) fmt.Printf("Expand\n\n") } else { fmt.Printf("Reflect\n\n") simplex.Improve(expanded, reflectedEval) } continue } contracted := ContractPoint(centroid, simplex.Points[len(simplex.Points)-1]) contractedEval := eval(contracted) if contractedEval < simplex.Evaluations[len(simplex.Points)-1] { fmt.Printf("Contract\n\n") simplex.Improve(contracted, contractedEval) continue } // Shrink the Simplex best := simplex.Points[0] for i := range simplex.Points[1:] { fmt.Printf("Shrink\n\n") negated := scalePoint(simplex.Points[i], -1) shrunk := scalePoint(SumPoints(negated, best), shrinkCoeff) p := SumPoints(best, shrunk) simplex.Points[i] = p simplex.Evaluations[i] = eval(p) } } w.Flush() file.Sync() return simplex } func main() { evalFunc := func(p *Point) float64 { //sum := 0.0 //for _, v := range p.Terms { // sum += v //} // return 10 - p.Terms[0] // ex1 // return math.Abs(p.Terms[1] - p.Terms[0]) // ex2 // return math.Pow(p.Terms[1]-3, 2) + math.Pow(p.Terms[0]-4, 2) // ex3 v := math.Sqrt(math.Pow(p.Terms[0], 2)+ math.Pow(p.Terms[1], 2)) + math.Nextafter(1.0, 2.0) - 1.0 return math.Sin(v) / v //return sum } s := Optimize(evalFunc) drawSimplex(s) } func initPoints(dim, count int) []*Point { points := make([]*Point, count) for i := 0; i < count; i++ { points[i] = NewPoint(dim) for d := 0; d < dim; d++ { r := rand.Float64() * 10 points[i].Terms[d] = r } } return points } // SubtractMean constructs a new simplex whose // points have been recentered around 0 func (s *Simplex) SubtractMean() *Simplex { averages := make([]float64, s.Dimension) for _, p := range s.Points { for d := 0; d < s.Dimension; d++ { averages[d] += p.Terms[d] } } for i := 0; i < len(averages); i++ { averages[i] = averages[i] / float64(len(s.Points)) } s2 := NewSimplex(s.Dimension) s2.Points = make([]*Point, len(s.Points)) for i, p := range s.Points { s2.Points[i] = NewPoint(s.Dimension) for d := 0; d < s.Dimension; d++ { s2.Points[i].Terms[d] = p.Terms[d] - averages[d] } } return s2 } // TranslateToPositive translates all the coordinates of the given // Simplex's points to nonnegative values func (s *Simplex) TranslateToPositive() *Simplex { mins := make([]float64, s.Dimension) for d := 0; d < len(s.Points[0].Terms); d++ { mins[d] = s.Points[0].Terms[d] } for _, p := range s.Points[1:] { for d := 0; d < len(p.Terms); d++ { if p.Terms[d] < mins[d] { mins[d] = p.Terms[d] } } } s2 := NewSimplex(s.Dimension) newPoints := make([]*Point, len(s.Points)) for i, p := range s.Points { newPoints[i] = NewPoint(s.Dimension) for d := 0; d < len(p.Terms); d++ { newPoints[i].Terms[d] = p.Terms[d] - mins[d] } } s2.Points = newPoints return s2 } func writeSimplex(s *Simplex, w *bufio.Writer) { // For xi = (xi1, xi2, ..., xin), zi = eval(xi) // Print the Simplex in the format // Simplex // x11,x12,...,x1n,z1 // x21,x22,...,x2n,z2 // .. // xn1,xn2,...,x(n+1)n, zn+1 // End _, err := w.WriteString("Simplex\n") if err != nil { panic(err.Error()) } for i, p := range s.Points { terms := make([]string, len(p.Terms)) for j, d := range p.Terms { terms[j] = fmt.Sprintf("%f", d) } terms = append(terms, fmt.Sprintf("%f", s.Evaluations[i])) _, err = w.WriteString(strings.Join(terms, `,`) + "\n") if err != nil { panic(err.Error()) } } _, err = w.WriteString("End\n") if err != nil { panic(err.Error()) } } func drawSimplex(s *Simplex) { imgWidth := 850.0 imgHeight := 850.0 rect := image.Rect(0, 0, int(imgWidth), int(imgHeight)) dest := image.NewRGBA(rect) gc := draw2dimg.NewGraphicContext(dest) // Set some properties gc.SetFillColor(color.RGBA{0x44, 0xff, 0x44, 0xff}) // gc.SetStrokeColor(color.RGBA{0x44, 0x44, 0x44, 0xff}) gc.SetStrokeColor(color.RGBA{0xff, 0x00, 0x00, 0xff}) gc.SetLineWidth(5) s2 := s.SubtractMean() s2 = s2.TranslateToPositive() sizeX, sizeY := simplexSize(s) pxMult := math.Min(float64(imgWidth/sizeX), float64(imgHeight/sizeY)) start := translateCoords(s2.Points[0], pxMult) colors := []color.RGBA{{ 0x00, 0xff, 0x00, 0xff, }, { 0x00, 0x00, 0xff, 0xff, }} gc.MoveTo(float64(start.Terms[0]), float64(start.Terms[1])) for i, p := range s2.Points[1:] { ip := translateCoords(p, pxMult) gc.LineTo(float64(ip.Terms[0]), float64(ip.Terms[1])) gc.FillStroke() gc.MoveTo(float64(ip.Terms[0]), float64(ip.Terms[1])) gc.SetStrokeColor(colors[i]) } // Close the loop gc.LineTo(float64(start.Terms[0]), float64(start.Terms[1])) gc.FillStroke() } func writeImage(img *image.Image) { f, err := os.Create("image.png") if err != nil { log.Fatal(err) } if err := png.Encode(f, *img); err != nil { f.Close() log.Fatal(err) } if err := f.Close(); err != nil { log.Fatal(err) } } // simplexSize returns the height and width of a 2-D simplex func simplexSize(s *Simplex) (float64, float64) { minX, maxX := s.Points[0].Terms[0], s.Points[0].Terms[0] minY, maxY := s.Points[0].Terms[1], s.Points[0].Terms[1] for _, p := range s.Points[1:] { if p.Terms[0] < minX { minX = p.Terms[0] } if p.Terms[0] > maxX { maxX = p.Terms[0] } if p.Terms[1] < minY { minY = p.Terms[1] } if p.Terms[1] > maxY { maxY = p.Terms[1] } } return maxX - minX, maxY - minY } func translateCoords(p *Point, stepSize float64) *Point { p.Terms[0] *= stepSize p.Terms[1] *= stepSize imgPoint := NewPoint(2) imgPoint.Terms[0] = p.Terms[0] imgPoint.Terms[1] = p.Terms[1] return imgPoint }
package main import ( "easyquery/examples/pkg/db" "easyquery/examples/pkg/user" ) func main() { // AutoMigrate db.InitDB() defer db.CloseDB() current := db.Postgres current.Migrator().DropTable(&user.User{}) current.AutoMigrate(&user.User{}) current.Migrator().DropTable(&user.Role{}) current.AutoMigrate(&user.Role{}) }
package router import ( "github.com/futurehomeno/fimpgo" log "github.com/sirupsen/logrus" "github.com/thingsplex/easee-ad/model" ) // SendChangerModeEvent sends fimp event func (fc *FromFimpRouter) SendChangerModeEvent(chargerID string, mode string, oldMsg *fimpgo.Message) error { msg := fimpgo.NewStringMessage("evt.mode.report", "chargepoint", mode, nil, nil, oldMsg.Payload) msg.Source = model.ServiceName addr := fimpgo.Address{ MsgType: fimpgo.MsgTypeEvt, ResourceType: fimpgo.ResourceTypeDevice, ResourceName: model.ServiceName, ResourceAddress: fc.configs.InstanceAddress, ServiceName: "chargepoint", ServiceAddress: chargerID, } err := fc.mqt.Publish(&addr, msg) if err != nil { log.Debug(err) return err } return err }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package hardware import ( "context" "time" "github.com/shirou/gopsutil/v3/mem" "chromiumos/tast/local/bundles/cros/hardware/memtester" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: MemtesterSlow, Desc: "Runs one iteration of memtester using 95% of free memory to find memory subsystem faults", Timeout: 12 * time.Hour, // 5-10 minutes per GiB of memory is common Contacts: []string{ "puthik@chromium.org", // Original Autotest author "cros-partner-avl@google.com", }, // This test is disabled because it can be very slow. }) } func MemtesterSlow(ctx context.Context, s *testing.State) { vmstat, err := mem.VirtualMemory() if err != nil { s.Fatal("Failed to get memory stats: ", err) } const mb = 1024 * 1024 useBytes := int64(float64(vmstat.Free) * 0.95) s.Logf("Testing %.1f MiB (system is using %.1f of %.1f MB)", float64(useBytes)/mb, float64(vmstat.Used)/mb, float64(vmstat.Total)/mb) // TODO(tast-owners): Switch this to perform 100 iterations and make it run during hardware // qualification (see the hardware_Memtester.memory_qual Autotest test). That can take // many hours to complete, so we should probably also parse the output from the memtester // process so we can log progress updates. if err := memtester.Run(ctx, useBytes, 1); err != nil { s.Fatal("memtester failed: ", err) } }
package main import ( "bufio" "crypto/sha1" "errors" "flag" "fmt" "html/template" "io" "log" "net/http" "os" "path/filepath" "strconv" "strings" "time" ) var initial_days = flag.Int( "initial_days", 14, "How many days to display initially") var listen_address = flag.String( "listen_address", "", "Local address to listen on. Typically \"\" (permissive) or \"localhost\" (restrictive)") var log_filename = flag.String( "log_file", "tl.log", "Where to keep the log") var port = flag.Int( "port", 29804, "Port to listen on") var template_path = flag.String( "template_path", ".", "Where to find the HTML template file") type Event struct { Name string Time time.Time OriginalDuration time.Duration // Duration before day splitting Duration time.Duration // Intra-day (display) duration TotalDuration time.Duration // Sum of all similarly-named Events } type Day struct { Events []Event } type Report struct { Days []Day } func read_data_file(in io.Reader) (events []Event, err error) { lines := bufio.NewScanner(in) line_number := 0 for lines.Scan() { line_number++ fields := strings.SplitN(lines.Text(), " ", 7) var numerically [6]int for i := 0; i < 6; i++ { var err error numerically[i], err = strconv.Atoi(fields[i]) if err != nil { return nil, errors.New(fmt.Sprint("Field ", i, " on line ", line_number, " is not numeric: ", err)) } } events = append(events, Event{ Name: fields[6], Time: time.Date( numerically[0], time.Month(numerically[1]), numerically[2], numerically[3], numerically[4], numerically[5], 0, // Nanoseconds time.Local), }) } if err := lines.Err(); err != nil { return nil, err } return } func calculate_durations(events []Event) { // The duration of an event is the difference between that event's // timestamp and the following event's timestamp. I.e., Event.Time // is the beginning of the event. for i := range events[:len(events)-1] { d := events[i+1].Time.Sub(events[i].Time) events[i].OriginalDuration = d events[i].Duration = d } d := time.Now().Sub(events[len(events)-1].Time) events[len(events)-1].OriginalDuration = d events[len(events)-1].Duration = d } func calculate_total_durations(events []Event) { totals := make(map[string]time.Duration) for _, e := range events { totals[e.Name] += e.Duration } for i, e := range events { events[i].TotalDuration = totals[e.Name] } } func start_of_day(t time.Time) time.Time { return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.Local) } func start_of_next_day(t time.Time) time.Time { return time.Date(t.Year(), t.Month(), t.Day()+1, 0, 0, 0, 0, time.Local) } func split_by_day(events []Event) (by_day []Day) { var current_day time.Time var this_day Day for _, e := range events { for { first_day_of_e := e day := start_of_day(e.Time) if start_of_day(e.Time.Add(e.Duration)) != day { split_at := start_of_next_day(e.Time) first_day_of_e.Duration = split_at.Sub(e.Time) e.Time = split_at e.Duration -= first_day_of_e.Duration } if current_day != day { if !current_day.IsZero() { by_day = append(by_day, this_day) this_day = Day{} } current_day = day } this_day.Events = append(this_day.Events, first_day_of_e) if start_of_day(first_day_of_e.Time) == start_of_day(e.Time) { break } } } by_day = append(by_day, this_day) return } func (e *Event) TimeOfDay() int { return e.Time.Hour()*3600 + e.Time.Minute()*60 + e.Time.Second() } func (e *Event) Color() template.CSS { if e.Name == "" { return template.CSS("white") } hash := sha1.New() io.WriteString(hash, e.Name) hue := 360.0 * int(hash.Sum(nil)[0]) / 256.0 return template.CSS("hsl(" + strconv.Itoa(hue) + ",90%,45%)") } func DescribeDuration(t time.Duration) string { if t.Hours() > 24 { return fmt.Sprintf("%.1f days", t.Hours()/24) } if t.Hours() > 1 { return fmt.Sprintf("%.1f hours", t.Hours()) } if t.Minutes() > 1 { return fmt.Sprintf("%.1f min", t.Minutes()) } return fmt.Sprintf("%.0f sec", t.Seconds()) } func (e *Event) DurationDescription() string { if e.OriginalDuration == e.TotalDuration { return DescribeDuration(e.OriginalDuration) } return (DescribeDuration(e.OriginalDuration) + " of " + DescribeDuration(e.TotalDuration)) } func (e *Event) Height() float32 { return 100 * float32(e.Duration.Seconds()) / 86400 } func (r Report) BodyWidth() float32 { days_on_screen := *initial_days if len(r.Days) < days_on_screen { days_on_screen = len(r.Days) } return 100 * float32(len(r.Days)) / float32(days_on_screen) } func (r Report) DayWidth() float32 { return 100.0 / float32(len(r.Days)) } func generate_report(days []Day) (td Report) { td.Days = days return } func backfill_first_day(d *Day) { // Stuff an empty event at the beginning of the first day first_event_time := d.Events[0].Time start_of_first_day := start_of_day(first_event_time) time_until_first_event := first_event_time.Sub(start_of_first_day) first_day_events := append([]Event{Event{Duration: time_until_first_event}}, d.Events...) d.Events = first_day_events } func execute_template(template_name string, data interface{}, out io.Writer) error { t := template.New("tl") t, err := t.ParseFiles(filepath.Join(*template_path, template_name)) if err != nil { return err } err = t.ExecuteTemplate(out, template_name, data) if err != nil { return err } return nil } func view_handler(w http.ResponseWriter, r *http.Request) { log_file, err := os.Open(*log_filename) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } defer log_file.Close() all_events, err := read_data_file(log_file) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } calculate_durations(all_events) calculate_total_durations(all_events) by_day := split_by_day(all_events) backfill_first_day(&by_day[0]) report := generate_report(by_day) err = execute_template("view.template", report, w) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } return } func log_handler(w http.ResponseWriter, r *http.Request) { err := execute_template("log.template", nil, w) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } func write_to_log(line []byte) error { log_file, err := os.OpenFile(*log_filename, os.O_WRONLY|os.O_APPEND, 0666) if err != nil { return errors.New(fmt.Sprint("Couldn't open log file: ", err)) } defer log_file.Close() // Closed with error checking below written, err := log_file.Write(line) if err != nil { if written == 0 { return errors.New(fmt.Sprint("Couldn't write to log file: ", err)) } else { return errors.New(fmt.Sprint("Only wrote ", written, " bytes to log file: ", err)) } } err = log_file.Close() if err != nil { return errors.New(fmt.Sprint("Couldn't close log file: ", err)) } return nil } func log_submit_handler(w http.ResponseWriter, r *http.Request) { w.Header()["Allow"] = []string{"POST"} if r.Method != "POST" { http.Error(w, "Please use POST", http.StatusMethodNotAllowed) return } t := time.Now().Format("2006 01 02 15 04 05 ") thing := strings.Replace(r.FormValue("thing"), "\n", "", -1) err := write_to_log([]byte(t + thing + "\n")) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } err = execute_template("log_submit.template", nil, w) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } func main() { flag.Parse() http.HandleFunc("/view", view_handler) http.HandleFunc("/log", log_handler) http.HandleFunc("/log_submit", log_submit_handler) err := http.ListenAndServe(*listen_address+":"+strconv.Itoa(*port), nil) if err != nil { log.Fatal("http.ListenAndServe: ", err) } }
package dao import ( "sync" "math" "math/rand" "fmt" "log" _"os" model "service-monitor/models" mgo "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) type StreamsDAO struct { Server string Database string } var db *mgo.Database const ( COLLECTION = "streams" SLIDE = 1 MAXINT = 2147483647 MININT = -2147483647 ) // Establish a connection to database func (m *StreamsDAO) Connect() { session, err := mgo.Dial(m.Server) if err != nil { log.Fatal(err) } db = session.DB(m.Database) } // Find list of streams func (m *StreamsDAO) FindAll() ([]model.Stream, error) { var streams []model.Stream err := db.C(COLLECTION).Find(bson.M{}).All(&streams) return streams, err } // Insert a stream into database func (m *StreamsDAO) Insert(stream model.Stream) error { err := db.C(COLLECTION).Insert(&stream) return err } func (m *StreamsDAO) Insert1Min(streams []model.Stream) error { var err error for stream := range streams { err = db.C(COLLECTION).Insert(&stream) if err != nil { return err } } return err } // Find list of output func (m *StreamsDAO) FindOutput(input model.Input) ([]model.Output, error) { var err error var streams model.Stream outputs := []model.Output{} slider := SLIDE // fmt.Println(input.StartTime, input.EndTime) for i := input.StartTime; i < input.EndTime; i = i + (slider * 60) { iter := db.C(COLLECTION).Find(bson.M{"hittime": bson.M{"$gte": i, "$lt": i+(slider*60)}}).Iter() var minVal, maxVal, total, count int = MAXINT, MININT, 0, 0 for iter.Next(&streams) { if maxVal < streams.ResponseTime { maxVal = streams.ResponseTime } if minVal > streams.ResponseTime { minVal = streams.ResponseTime } total += streams.ResponseTime count ++; } if err = iter.Close(); err != nil { break } if count > 0 { output := model.Output{Time: i, MinResponseTime: minVal, MaxResponseTime: maxVal, AverageResponseTime: total/count} outputs = append(outputs, output) } } return outputs, err } func (m *StreamsDAO) FindOutput1Min(input model.Input) ([]model.Output, error) { var err error var slider int var streams model.Stream outputs := []model.Output{} newStartTime := int(math.Round(float64(input.StartTime)/60)*60) timeDiff := input.EndTime - input.StartTime newEndTime := int(math.Round(float64(input.EndTime)/60)*60) newTimeDiff := newEndTime - newStartTime totalError := (math.Abs(float64(newStartTime) - float64(input.StartTime)) + math.Abs(float64(newStartTime) - float64(input.StartTime)))/float64(timeDiff) totalError = math.Round(totalError*10000)/10000 if timeDiff < 6000 { slider = SLIDE } else { sliderTemp := rand.Intn(100 - 15) + 15 slider = (newTimeDiff/sliderTemp)/60 // fmt.Println(sliderTemp) } // fmt.Println(input.StartTime, newStartTime, input.EndTime, newEndTime, totalError) fmt.Println("Each Window Min: ", slider) fmt.Println("Error: ", totalError) outputChan := make(chan model.Output) var wg sync.WaitGroup wg.Add(int(math.Ceil(float64(newEndTime - newStartTime)/float64(60*slider)))) for i := newStartTime; i < newEndTime; i = i + (slider * 60) { go func() { defer wg.Done() iter := db.C(COLLECTION).Find(bson.M{"hittime": bson.M{"$gte": i, "$lt": i+(slider*60)}}).Iter() var minVal, maxVal, total, count int = MAXINT, MININT, 0, 0 for iter.Next(&streams) { if maxVal < streams.ResponseTime { maxVal = streams.ResponseTime } if minVal > streams.ResponseTime { minVal = streams.ResponseTime } total += streams.ResponseTime count ++; } if err = iter.Close(); err != nil { return } var output model.Output if count > 0 { output = model.Output{Time: i, MinResponseTime: minVal, MaxResponseTime: maxVal, AverageResponseTime: total/count} // outputs = append(outputs, output) } else { output = model.Output{Time: i, MinResponseTime: 0, MaxResponseTime: 0, AverageResponseTime: 0} // outputs = append(outputs, output) } // fmt.Println(output) outputChan <- output // done <- true }() } go func() { for outputC := range outputChan { // fmt.Println("First", outputC) outputs = append(outputs, outputC) } }() wg.Wait() return outputs, err } // // Method for requesting handeling using goroutine and channels // func ForEachSlide(i int, slider int, outputChan chan model.Output) { // var streams model.Stream // var err error // iter := db.C(COLLECTION).Find(bson.M{"hittime": bson.M{"$gte": i, "$lt": i + (slider * 60)}}).Iter() // var minVal, maxVal, total, count int = MAXINT, MININT, 0, 0 // for iter.Next(&streams) { // if maxVal < streams.ResponseTime { // maxVal = streams.ResponseTime // } // if minVal > streams.ResponseTime { // minVal = streams.ResponseTime // } // total += streams.ResponseTime // count++ // } // if err = iter.Close(); err != nil { // return // } // var output model.Output // if count > 0 { // output = model.Output{Time: i, MinResponseTime: minVal, MaxResponseTime: maxVal, AverageResponseTime: total / count} // // outputs = append(outputs, output) // } else { // output = model.Output{Time: i, MinResponseTime: 0, MaxResponseTime: 0, AverageResponseTime: 0} // // outputs = append(outputs, output) // } // fmt.Println("Inside ", output) // outputChan <- output // // close(outputChan) // } // func writefile() { // f, err := os.Create("test.txt") // if err != nil { // fmt.Println(err) // return // } // l, err := f.WriteString("Hello World") // if err != nil { // fmt.Println(err) // f.Close() // return // } // fmt.Println(l, "bytes written successfully") // err = f.Close() // if err != nil { // fmt.Println(err) // return // } // }
package helper import "time" //DefaultLocation is default location of application timezone var DefaultLocation = time.FixedZone("UTC+7", 7 * 60 * 60) //DatetimeFormat is the default date time format to use var DatetimeFormat = "2006-01-02 15:04:05" //mysql datetime format (RFC3339)