text
stringlengths
11
4.05M
package filters_test import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" . "github.com/bosh-prometheus/bosh_exporter/filters" ) var _ = Describe("AZsFilter", func() { var ( filter []string azsFilter *AZsFilter ) BeforeEach(func() { filter = []string{"fake-az-1", "fake-az-3"} }) JustBeforeEach(func() { azsFilter = NewAZsFilter(filter) }) Describe("Enabled", func() { Context("when az is enabled", func() { It("returns true", func() { Expect(azsFilter.Enabled("fake-az-1")).To(BeTrue()) }) }) Context("when az is not enabled", func() { It("returns false", func() { Expect(azsFilter.Enabled("fake-az-2")).To(BeFalse()) }) }) Context("when there is no filter", func() { BeforeEach(func() { filter = []string{} }) It("returns true", func() { Expect(azsFilter.Enabled("fake-az-2")).To(BeTrue()) }) }) Context("when a filter has leading and/or trailing whitespaces", func() { BeforeEach(func() { filter = []string{" fake-az-1 "} }) It("returns true", func() { Expect(azsFilter.Enabled("fake-az-1")).To(BeTrue()) }) }) }) })
package db // DB is the interface of database type DB interface { Connect(connStr string) InsertOne(doc interface{}) (interface{}, error) InsertMany(docs []interface{}) error Update(filter, data interface{}) (interface{}, error) Upsert(filter, data interface{}) (interface{}, error) UpdateMany(filter, data interface{}) (interface{}, error) Contains(doc interface{}) (bool, error) }
package server import ( "database/sql" "encoding/json" "fmt" "github.com/golang/glog" "io/ioutil" "net/http" "net/url" "strconv" "strings" ) type responseConfigJM struct { Success string `json:"success"` Code int `json:"code"` Data interface{} `json:"data"` Message string `json:"message"` } type dataSlice struct { LoadTime int `json:"load-time"` Redirect []string `json:"redirect"` Dnsproxy []string `json:"dns-proxy"` Version int `json:"version"` } type reqexportConfigJM struct { PlatformCode int Token string } //修改后的ConfigJM func ExportConfigJM(w http.ResponseWriter, req *http.Request) { glog.Info("ExportConfigJM") if req.Method == "POST" { result, _ := ioutil.ReadAll(req.Body) req.Body.Close() glog.Info("reqsaveSite recv body:", string(result)) var m reqexportConfigJM e := json.Unmarshal([]byte(result), &m) if e != nil { retValue := &ResponseData{} retValue.Success = false retValue.Code = 0 retValue.Message = "Request Method error: POST" retValue.Data = "" bytes, _ := json.Marshal(retValue) w.Write([]byte(bytes)) return } { //检查token过期情况 var user_uuid string var err_token error { user_uuid, err_token = CheckToken(m.Token, w, req) if err_token != nil { glog.Info(err_token) retValue := NewBaseJsonData() retValue.Code = 1000 retValue.Message = err_token.Error() retValue.Data = err_token.Error() bytes, _ := json.Marshal(retValue) w.Write([]byte(bytes)) return } } glog.Info("user_uuid: ", user_uuid) } var version_code string var loadTime string { rows, err := Db.Query("SELECT version_code, loadTime FROM proxypro.config_version where platform = ?", m.PlatformCode) glog.Info(err) for rows.Next() { err := rows.Scan(&version_code, &loadTime) if err != nil { glog.Info(err) } } rows.Close() } //判断 platformCode 对应的 平台表 中的 status 状态是启用还是禁用 tmp_data := &DataSlice{} { tmp_data.Dnsproxy = make([]string, 0) tmp_data.Redirect = make([]string, 0) tmp_data.LoadTime, _ = strconv.Atoi(loadTime) tmp_data.Version, _ = strconv.Atoi(version_code) all_tmp_map_slice := make([](map[string]string), 0) { // 从数据库中查询中所有的 webConfig  redirecturl dns url //all_tmp_map_slice := make([](map[string]string), 0) { var Queryconfig *sql.Rows var err error var tmp_PlatformCode int if m.PlatformCode == 1{ tmp_PlatformCode = 0 } Queryconfig, err = Db.Query(`SELECT companyNameID, subcompany, platform, createTime, URL, redirectUrl, DNS FROM proxypro.webConfig where estatus=1 and platform =?;`, tmp_PlatformCode) if err != nil { glog.Info(err) } for Queryconfig.Next() { var companyNameID string var subcompany string var redirectUrl string var DNS string var platform string var createTime string var URL string err := Queryconfig.Scan(&companyNameID, &subcompany, &platform, &createTime, &URL, &redirectUrl, &DNS) if err != nil { glog.Info(err) } tmp_map := make(map[string]string, 0) tmp_map["companyNameID"] = companyNameID tmp_map["subcompanyName"] = subcompany tmp_map["redirectUrl"] = redirectUrl tmp_map["DNS"] = DNS tmp_map["platform"] = platform tmp_map["createTime"] = createTime tmp_map["URL"] = URL all_tmp_map_slice = append(all_tmp_map_slice, tmp_map) } Queryconfig.Close() } } glog.Info("all_tmp_map_slice length", len(all_tmp_map_slice)) for _, value := range all_tmp_map_slice { var httpstr []string var httpsstr []string nottwostrss := map[string][]string{} twostrss := map[string][]string{} //如果重定向  并且  DNS里为空,则该条为空,不用塞入最后的data里 if len(value["redirectUrl"]) == 0 && len(value["DNS"]) == 0 { continue } line := value["redirectUrl"] domainname := strings.Split(line, "|") for _, v1 := range domainname { if strings.HasPrefix(v1, "http:") || strings.HasPrefix(v1, "https:") { if strings.HasPrefix(v1, "http://") { httpstr = append(httpstr, v1[len("http://"):]) } else if strings.HasPrefix(v1, "https://") { httpsstr = append(httpsstr, v1[len("https://"):]) } } else { //处理非http开头 split := strings.Split(v1, ".") //处理两段 if len(split) == 2 { twostrss[split[1]] = append(twostrss[split[1]], split[0]) } else if len(split) == 3 && strings.HasPrefix(v1, "www.") { // 处理以 `www.` 开头的3段 twostrss[split[2]] = append(twostrss[split[2]], split[1]) } else { //处理非两段 if len(split) > 1 { //fmt.Println(`split[len(split)-1],line[:strings.LastIndex(line, ".")]::`, split[len(split)-1], v1[:strings.LastIndex(v1, ".")]) nottwostrss[split[len(split)-1]] = append(nottwostrss[split[len(split)-1]], v1[:strings.LastIndex(v1, ".")]) } } } } //---------------http begin---------------- myhttp := "" index := 0 for _, v := range httpstr { index++ if len(httpstr) == 1 { myhttp += v } else if len(httpstr) > 1 { if len(httpstr) == index { myhttp += "(" + v + ")" } else { myhttp += "(" + v + ")" + "|" } } } myhttp1 := "http://" + "(" + myhttp + ")" myhttp1 = strings.Replace(myhttp1, ".", "\\.", -1) if len(myhttp) > 0 { tmp_data.Redirect = append(tmp_data.Redirect, myhttp1+`->`+value["URL"]) } //---------------http end---------------- //---------------https  begin---------------- myHttps := "" index1 := 0 for _, v := range httpsstr { index1++ if len(myHttps) == 1 { myHttps += v } else if len(myHttps) > 1 { if len(myHttps) == index1 { myHttps += "(" + v + ")" } else { myHttps += "(" + v + ")" + "|" } } } myHttps1 := "https://" + "(" + myHttps + ")" myHttps1 = strings.Replace(myHttps1, ".", "\\.", -1) if len(myHttps) > 0 { tmp_data.Redirect = append(tmp_data.Redirect, myHttps1+`->`+value["URL"]) } //---------------https  end---------------- //---------------handle 2段 begin---------------- twostrs := "" //>0 if len(twostrss) > 0 { indextwostr := 0 for k, v := range twostrss { indextwostr++ tmpst := "" for i, j := range v { if i == len(v)-1 { tmpst += j } else { tmpst += j + "|" } } if indextwostr == len(twostrss) { if len(v) == 1 { twostrs += "(" + tmpst + `.` + k + ")" } else { twostrs += "(" + `(` + tmpst + `).` + k + ")" } } else { if len(v) == 1 { twostrs += "(" + tmpst + `.` + k + ")" + "|" } else { twostrs += "(" + `(` + tmpst + `).` + k + ")" + "|" } } } if len(strings.Split(twostrs, "|")) > 1 { twostrs = `(www.)?(` + twostrs + ")" } else { twostrs = `(www.)?` + twostrs } twostrs = strings.Replace(twostrs, ".", "\\.", -1) } //---------------handle 2段 end---------------- //---------------handle not 2段 begin---------------- nottwostrs := "" //>0 if len(nottwostrss) > 0 { indexnottwostr := 0 for k, v := range nottwostrss { indexnottwostr++ tmpst := "" for i, j := range v { if i == len(v)-1 { tmpst += j } else { tmpst += j + "|" } } if indexnottwostr == len(nottwostrss) { //TODO 按照这种 if len(v) == 1 { nottwostrs += "(" + tmpst + `.` + k + ")" } else { nottwostrs += "(" + `(` + tmpst + `).` + k + ")" } } else { if len(v) == 1 { nottwostrs += "(" + tmpst + `.` + k + ")" + "|" } else { nottwostrs += "(" + `(` + tmpst + `).` + k + ")" + "|" } } } if len(strings.Split(nottwostrs, "|")) > 1 { nottwostrs = "(" + nottwostrs + ")" } else { nottwostrs = nottwostrs } nottwostrs = strings.Replace(nottwostrs, ".", "\\.", -1) } twoAndNotTwo := "" if len(twostrs) > 0 && len(nottwostrs) > 0 { twoAndNotTwo = `((` + twostrs + `)|(` + nottwostrs + `))` } else if len(twostrs) == 0 { twoAndNotTwo = nottwostrs } else if len(nottwostrs) == 0 { twoAndNotTwo = twostrs } if len(twoAndNotTwo) != 0 { twoAndNotTwo = twoAndNotTwo + `->` + value["URL"] tmp_data.Redirect = append(tmp_data.Redirect, twoAndNotTwo) } // 对于hostname进行处理 u, err := url.Parse(value["URL"]) if err != nil { panic(err) } // 带有端口号的情况 if strings.Contains(u.Host, ":") { split := strings.Split(u.Host, ":") if len(split) > 1 { if strings.HasPrefix(split[0], `www.`) && len(split) == 3 { split[0] = strings.Replace(split[0], `.`, `\.`, -1) split[0] = strings.Replace(split[0], `www.`, `(www\\.)?`, -1) tmp_Dnsproxy := split[0] + ":" + value["DNS"] tmp_data.Dnsproxy = append(tmp_data.Dnsproxy, tmp_Dnsproxy) } else { split[0] = strings.Replace(split[0], `.`, `\.`, -1) tmp_Dnsproxy := `(www\.)?` + split[0] + ":" + value["DNS"] tmp_data.Dnsproxy = append(tmp_data.Dnsproxy, tmp_Dnsproxy) } } } else { //不带有端口号的情况 if strings.HasPrefix(u.Host, `www.`) { u.Host = strings.Replace(u.Host, `www.`, `(www.)?`, -1) u.Host = strings.Replace(u.Host, `.`, `\.`, -1) tmp_data.Dnsproxy = append(tmp_data.Dnsproxy, u.Host+":"+value["DNS"]) } else { u.Host = strings.Replace(u.Host, `.`, `\.`, -1) tmp_Dnsproxy := "(www\\.)?" + u.Host + ":" + value["DNS"] tmp_data.Dnsproxy = append(tmp_data.Dnsproxy, tmp_Dnsproxy) } } } } //tmp_Redirect := value["redirectUrl"]+ `/?->` + value["URL"] //tmp_data.Redirect = append(tmp_data.Redirect, tmp_Redirect) /// //Aeskey := "9XPghLnqdLFTro5o" //data_json, _ := json.Marshal(tmp_data) //content := strings.Replace(string(data_json), "\\u003e", ">", -1) //glog.Info("data_json: ", string(content)) //crypted := aescrypto.Encrypt(aescrypto.PKCS7Pad(data_json), key) //crypted, _ := aescrypto.AesEcbPkcs5Encrypt([]byte(content), []byte(Aeskey)) //glog.Info("aes: ", string(crypted)) //encodeString := base64.StdEncoding.EncodeToString(crypted) //glog.Info("aes ecb base64 encrypt: ", encodeString) retValue := &ResponseData{} retValue.Success = true retValue.Code = 0 retValue.Data = tmp_data retValue.Message = "" bytes2, _ := json.Marshal(retValue) w.Write(bytes2) } else { retValue := &ResponseData{} retValue.Success = false retValue.Code = 0 retValue.Message = "Request Method error: POST" retValue.Data = "" bytes, _ := json.Marshal(retValue) w.Write([]byte(bytes)) } } func handleNotStartWithHttp(v1 string, twostrss map[string][]string, nottwostrss map[string][]string) { split := strings.Split(v1, ".") //fmt.Println(reflect.TypeOf(split),split) //处理两段 if len(split) == 2 { twostrss[split[1]] = append(twostrss[split[1]], split[0]) } else if len(split) == 3 && strings.HasPrefix(v1, "www.") { // 处理以 `www.` 开头的3段 twostrss[split[2]] = append(twostrss[split[2]], split[1]) } else { //处理非两段 if len(split) > 1 { fmt.Println(`split[len(split)-1],line[:strings.LastIndex(line, ".")]::`, split[len(split)-1], v1[:strings.LastIndex(v1, ".")]) nottwostrss[split[len(split)-1]] = append(nottwostrss[split[len(split)-1]], v1[:strings.LastIndex(v1, ".")]) } } } func handleStartWithHttp(v1 string, httpstr []string, httpsstr []string) ([]string, []string) { if strings.HasPrefix(v1, "http://") { fmt.Println("http://") fmt.Println(`v1[len("http://"):]:`, v1[len("http://"):]) httpstr = append(httpstr, v1[len("http://"):]) } else if strings.HasPrefix(v1, "https://") { fmt.Println("https://") fmt.Println(`v1[len("https://"):]`, v1[len("https://"):]) httpsstr = append(httpsstr, v1[len("https://"):]) } if len(v1) == 0 { fmt.Println("*****************************************") } return httpstr, httpsstr } func handleNotTwoStr(nottwostrss map[string][]string) string { nottwostrs := "" fmt.Println(len(nottwostrss)) //>0 if len(nottwostrss) > 0 { fmt.Println("nottwostrss:", nottwostrss) //twostrs := `(www\\.)?` indexnottwostr := 0 for k, v := range nottwostrss { indexnottwostr++ tmpst := "" for i, j := range v { if i == len(v)-1 { tmpst += j //fmt.Println(j) } else { tmpst += j + "|" //fmt.Println(j) } } fmt.Println("len(twostrss):", indexnottwostr, "--", len(nottwostrss)) if indexnottwostr == len(nottwostrss) { fmt.Println(tmpst, len(v)) fmt.Println("if tmpst:", tmpst) //TODO 按照这种 if len(v) == 1 { nottwostrs += "(" + tmpst + `.` + k + ")" } else { nottwostrs += "(" + `(` + tmpst + `).` + k + ")" } } else { fmt.Println("else tmpst:", tmpst, " len(v):", len(v)) if len(v) == 1 { nottwostrs += "(" + tmpst + `.` + k + ")" + "|" } else { nottwostrs += "(" + `(` + tmpst + `).` + k + ")" + "|" } } } if len(strings.Split(nottwostrs, "|")) > 1 { nottwostrs = "(" + nottwostrs + ")" if nottwostrs == `(aaa.xasdf.com)|(asd.44.abc)|(a.b.c.xyz))` { fmt.Println(`1111(aaa.xasdf.com)|(asd.44.abc)|(a.b.c.xyz))****`) } } else { nottwostrs = nottwostrs if nottwostrs == `(aaa.xasdf.com)|(asd.44.abc)|(a.b.c.xyz))` { fmt.Println(`222(aaa.xasdf.com)|(asd.44.abc)|(a.b.c.xyz))****`) } } //twostrs=twostrs+")" nottwostrs = strings.Replace(nottwostrs, ".", "\\.", -1) fmt.Println("nottwostrs:||::", nottwostrs) if nottwostrs == `(aaa\.xasdf\.com)|(asd\.44\.abc)|(a\.b\.c\.xyz))` { fmt.Println(`(aaa\.xasdf\.com)|(asd\.44\.abc)|(a\.b\.c\.xyz))----------`) } } return nottwostrs } func handleTwoStr(twostrss map[string][]string) string { fmt.Println(len(twostrss)) twostrs := "" //>0 if len(twostrss) > 0 { fmt.Println("twostrss:", twostrss) //twostrs := `(www\\.)?` indextwostr := 0 for k, v := range twostrss { indextwostr++ tmpst := "" for i, j := range v { if i == len(v)-1 { tmpst += j //fmt.Println(j) } else { tmpst += j + "|" //fmt.Println(j) } } fmt.Println("len(twostrss):", indextwostr, "--", len(twostrss)) if indextwostr == len(twostrss) { fmt.Println(tmpst, len(v)) fmt.Println("if tmpst:", tmpst) if len(v) == 1 { twostrs += "(" + tmpst + `.` + k + ")" } else { twostrs += "(" + `(` + tmpst + `).` + k + ")" } } else { fmt.Println("else tmpst:", tmpst, " len(v):", len(v)) if len(v) == 1 { twostrs += "(" + tmpst + `.` + k + ")" + "|" } else { twostrs += "(" + `(` + tmpst + `).` + k + ")" + "|" } } } if len(strings.Split(twostrs, "|")) > 1 { twostrs = `(www.)?(` + twostrs + ")" } else { twostrs = `(www.)?` + twostrs } //twostrs=twostrs+")" twostrs = strings.Replace(twostrs, ".", "\\.", -1) fmt.Println("twostrs:||::", twostrs) } return twostrs } func handleHttps(httpsstr []string, tmp_data *DataSlice, value map[string]string) { myHttps := "" index1 := 0 for _, v := range httpsstr { index1++ if len(myHttps) == 1 { myHttps += v } else if len(myHttps) > 1 { if len(myHttps) == index1 { myHttps += "(" + v + ")" } else { myHttps += "(" + v + ")" + "|" } } } myHttps1 := "https://" + "(" + myHttps + ")" myHttps1 = strings.Replace(myHttps1, ".", "\\.", -1) if len(myHttps) > 0 { tmp_data.Redirect = append(tmp_data.Redirect, myHttps1+`->`+value["URL"]) } } func handleHttp(httpstr []string, tmp_data *DataSlice, value map[string]string) { myhttp := "" index := 0 fmt.Println("httpstr:::", httpstr) for _, v := range httpstr { index++ if len(httpstr) == 1 { myhttp += v } else if len(httpstr) > 1 { if len(httpstr) == index { myhttp += "(" + v + ")" } else { myhttp += "(" + v + ")" + "|" } } } myhttp1 := "http://" + "(" + myhttp + ")" myhttp1 = strings.Replace(myhttp1, ".", "\\.", -1) if len(myhttp) > 0 { tmp_data.Redirect = append(tmp_data.Redirect, myhttp1+`->`+value["URL"]) } }
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. package armhelpers import ( "context" "fmt" "github.com/Azure/aks-engine/pkg/api" "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) type validationResult struct { image api.AzureOSImageConfig errorData error } // ValidateRequiredImages checks that the OS images required by both // master and agent pools are available on the target cloud func ValidateRequiredImages(ctx context.Context, location string, p *api.Properties, client AKSEngineClient) error { if fetcher, ok := client.(VMImageFetcher); ok { missingImages := make(map[api.Distro]validationResult) for distro, i := range requiredImages(p) { log.Debugln(fmt.Sprintf("Validate OS image is available on the target cloud: %s, %s, %s, %s", i.ImagePublisher, i.ImageOffer, i.ImageSku, i.ImageVersion)) if i.ImageVersion == "latest" { list, err := fetcher.ListVirtualMachineImages(ctx, location, i.ImagePublisher, i.ImageOffer, i.ImageSku) if err != nil || len(*list.Value) == 0 { missingImages[distro] = validationResult{ image: i, errorData: err, } } } else { if _, err := fetcher.GetVirtualMachineImage(ctx, location, i.ImagePublisher, i.ImageOffer, i.ImageSku, i.ImageVersion); err != nil { missingImages[distro] = validationResult{ image: i, errorData: err, } } } } if len(missingImages) == 0 { return nil } return printErrorIfAny(missingImages) } return errors.New("parameter client is not a VMImageFetcher") } func requiredImages(p *api.Properties) map[api.Distro]api.AzureOSImageConfig { images := make(map[api.Distro]api.AzureOSImageConfig) images[p.MasterProfile.Distro] = toImageConfig(p.MasterProfile.Distro) for _, app := range p.AgentPoolProfiles { if app.OSType == api.Windows { images[app.Distro] = toImageConfigWindows(p.WindowsProfile) } else { images[app.Distro] = toImageConfig(app.Distro) } } return images } func printErrorIfAny(missingImages map[api.Distro]validationResult) error { for _, value := range missingImages { i := value.image log.Errorf("error: %+v", value.errorData) log.Errorf("Image Publisher: %s, Offer: %s, SKU: %s, Version: %s", i.ImagePublisher, i.ImageOffer, i.ImageSku, i.ImageVersion) } return errors.New("some VM images are missing on the target cloud") } func toImageConfig(distro api.Distro) api.AzureOSImageConfig { if distro == "" { return api.Ubuntu1604OSImageConfig } switch distro { case api.Ubuntu: return api.Ubuntu1604OSImageConfig case api.Ubuntu1804: return api.Ubuntu1804OSImageConfig case api.Ubuntu1804Gen2: return api.Ubuntu1804Gen2OSImageConfig case api.Ubuntu2004: return api.Ubuntu2004OSImageConfig case api.Ubuntu2004Gen2: return api.Ubuntu2004Gen2OSImageConfig case api.Flatcar: return api.FlatcarImageConfig case api.AKSUbuntu1604: return api.AKSUbuntu1604OSImageConfig case api.AKSUbuntu1804: return api.AKSUbuntu1804OSImageConfig case api.AKSUbuntu2004: return api.AKSUbuntu2004OSImageConfig case api.ACC1604: return api.ACC1604OSImageConfig default: return api.Ubuntu1604OSImageConfig } } func toImageConfigWindows(profile *api.WindowsProfile) api.AzureOSImageConfig { if profile != nil { return api.AzureOSImageConfig{ ImageOffer: profile.WindowsOffer, ImageSku: profile.WindowsSku, ImagePublisher: profile.WindowsPublisher, ImageVersion: profile.ImageVersion, } } return api.AKSWindowsServer2019ContainerDOSImageConfig }
package slacker import ( "regexp" "github.com/shomali11/proper" ) func (s *Slacker) regexMatch(regex, text string) (*proper.Properties, bool) { re, err := regexp.Compile(regex) if err != nil { return nil, false } values := re.FindStringSubmatch(text) if len(values) == 0 { return nil, false } valueIndex := 0 keys := re.SubexpNames() parameters := make(map[string]string) for i := 1; i < len(keys) && valueIndex < len(values); i++ { if len(values[i]) == 0 { continue } parameters[keys[i]] = values[i] valueIndex++ } return proper.NewProperties(parameters), re.MatchString(text) }
package day2 import ( "fmt" "io/ioutil" "os" "regexp" "strconv" "strings" ) //DayTwoOne Day two task one func DayTwoOne() { input, err := ioutil.ReadFile("./2/input.txt") if err != nil { fmt.Println(err) os.Exit(1) } pwdData := strings.Split(string(input), "\n") amountOfCorrectPwds := 0 for i := 0; i < len(pwdData); i++ { keyAndPwd := strings.Split(pwdData[i], ":") pwd := keyAndPwd[1] tempKey := strings.Fields(keyAndPwd[0]) charToCheck := tempKey[1] amountRange := strings.Split(tempKey[0], "-") from, errorFrom := strconv.Atoi(amountRange[0]) to, errorTo := strconv.Atoi(amountRange[1]) if errorFrom != nil || errorTo != nil { fmt.Println("Some error :/") os.Exit(1) } re := regexp.MustCompile(fmt.Sprintf("[^%s]", charToCheck)) parsedPwd := strings.Join(re.Split(pwd, -1), "") if len(parsedPwd) >= from && len(parsedPwd) <= to { amountOfCorrectPwds++ } } fmt.Println(amountOfCorrectPwds) }
package main import ( "encoding/binary" "flag" "fmt" "log" "os" "runtime/pprof" "time" "google.golang.org/grpc" ) func SequentialPayload(n int64) []byte { if n%8 != 0 { panic(fmt.Sprintf("n == %v must be a multiple of 8; has remainder %v", n, n%8)) } k := uint64(n / 8) by := make([]byte, n) j := uint64(0) for i := uint64(0); i < k; i++ { j = i * 8 binary.LittleEndian.PutUint64(by[j:j+8], j) } return by } const ProgramName = "client" func main() { myflags := flag.NewFlagSet(ProgramName, flag.ContinueOnError) cfg := &ClientConfig{} cfg.DefineFlags(myflags) cfg.SkipEncryption = true err := myflags.Parse(os.Args[1:]) if err != nil { log.Fatalf("%s command line flag error: '%s'", ProgramName, err) } if cfg.CpuProfilePath != "" { f, err := os.Create(cfg.CpuProfilePath) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } err = cfg.ValidateConfig() if err != nil { log.Fatalf("%s command line flag error: '%s'", ProgramName, err) } var opts []grpc.DialOption if cfg.UseTLS { cfg.setupTLS(&opts) } else if cfg.SkipEncryption { // no encryption opts = append(opts, grpc.WithInsecure()) p("client configured to skip encryption.") } else { cfg.setupSSH(&opts) } serverAddr := fmt.Sprintf("%v:%v", cfg.ServerHost, cfg.ServerPort) conn, err := grpc.Dial(serverAddr, opts...) if err != nil { log.Fatalf("fail to dial: %v", err) } defer conn.Close() // SendFile c := newClient(conn) isBcastSet := false myID := "test-client-0" data := []byte("hello peer, it is nice to meet you!!") err = c.runSendFile("file1", data, 3, isBcastSet, myID) panicOn(err) data2 := []byte("second set of data should be kept separate!") err = c.runSendFile("file2", data2, 3, isBcastSet, myID) panicOn(err) //n := 1 << 29 // test with 512MB file. Works with up to 1MB or 2MB chunks. n := cfg.PayloadSizeMegaBytes * 1 << 20 p("generating test data of size %v bytes", n) data3 := SequentialPayload(int64(n)) //chunkSz := 1 << 22 // 4MB // GRPC will fail with EOF. chunkSz := 1 << 20 c2done := make(chan struct{}) overlap := false // overlap two sends to different paths go func() { if overlap { time.Sleep(10 * time.Millisecond) p("after 10msec of sleep, comencing bigfile3...") c2 := newClient(conn) t0 := time.Now() err = c2.runSendFile("bigfile3", data3, chunkSz, isBcastSet, myID) t1 := time.Now() panicOn(err) mb := float64(len(data3)) / float64(1<<20) elap := t1.Sub(t0) p("c2: elap time to send %v MB was %v => %.03f MB/sec", mb, elap, mb/(float64(elap)/1e9)) } close(c2done) }() t0 := time.Now() err = c.runSendFile("bigfile4", data3, chunkSz, isBcastSet, myID) t1 := time.Now() panicOn(err) mb := float64(len(data3)) / float64(1<<20) elap := t1.Sub(t0) p("c: elap time to send %v MB was %v => %.03f MB/sec", mb, elap, mb/(float64(elap)/1e9)) <-c2done }
package main import ( "math/rand" ) func intBetween(rand *rand.Rand, min, max int) int { return rand.Intn(max-min) + min } var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") func stringWithLength(rand *rand.Rand, length int) string { b := make([]rune, length) for i := range b { b[i] = letters[rand.Intn(len(letters))] } return string(b) } func oneOfColor(cs ...color) color { colors := []color{} for _, c := range cs { colors = append(colors, c) } return colors[rand.Intn(len(cs))] }
func findShortestSubArray(nums []int) int { m:=make(map[int][]int) for i,v:=range nums{ if _,ok:=m[v];ok{ m[v][0]++ m[v][2]=i }else{ m[v] = []int{1,i,i} } } max:=0 ml:=50000 for _,v:=range m{ if v[0]>max || (v[0]==max && v[2]-v[1]+1<ml) { max = v[0] ml = v[2]-v[1]+1 } } return ml }
package schema import ( "errors" "fmt" "strings" "time" "golang.org/x/crypto/bcrypt" "gopkg.in/mgo.v2/bson" ) // Errors var ( ErrUserAlreadyExist = errors.New("User already exist") ErrUserNotExist = errors.New("User does not exist") ErrUserNotValid = errors.New("User is not valid") ) // User represents the object of individual and member of organization. type User struct { ID bson.ObjectId `json:"id" bson:"_id,omitempty"` Namespace struct { ID bson.ObjectId `bson:"id"` Path string `bson:"path"` OwnerType OwnerType `bson:"owner_type"` } `bson:"namespace"` Username string `json:"username" bson:"username"` GivenName string `json:"given_name,omitempty" bson:"given_name,omitempty"` FamilyName string `json:"family_name,omitempty" bson:"family_name,omitempty"` HashedPassword string `json:"-" bson:"hashed_password"` primaryEmail string `bson:"primary_email"` Emails []EmailAddress `json:"emails" bson:"emails"` CreatedAt time.Time `json:"created_at" bson:"created_at"` UpdatedAt time.Time `json:"updated_at" bson:"updated_at"` Errors Errors `json:"-" bson:"-"` } // EmailAddress is the list of all email addresses of a user. Can contain the // primary email address, but is not obligatory type EmailAddress struct { Email string `json:"email" bson:"email"` IsConfirmed bool `json:"confirmed" bson:"confirmed"` ConfirmedAt time.Time `json:"confirmed_at" bson:"confirmed_at"` DeletedAt time.Time `json:"deleted_at" bson:"deleted_at"` } // func (u *User) insert() error { // u.CreatedAt = time.Now() // u.UpdatedAt = u.CreatedAt // return UsersCollection().Insert(u) // } // func (u *User) update() error { // u.UpdatedAt = time.Now() // return UsersCollection().UpdateId(u.Id, u) // } // func (u *User) ErrorMessages() map[string]interface{} { // errorMessages := map[string]interface{}{} // for fieldName, message := range u.Errors.Messages { // errorMessages[fieldName] = message // } // return errorMessages // } // func (u *User) IsValid() bool { // u.Errors.Clear() // //TODO: perform validation here // return !u.Errors.HasMessages() // } // SetPassword updates the password for the user func (u *User) SetPassword(password string) error { hashBytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) if err != nil { return err } u.HashedPassword = string(hashBytes) return nil } // ValidatePassword verifies if the provided password matches the one in the database func (u *User) ValidatePassword(password string) (bool, error) { err := bcrypt.CompareHashAndPassword([]byte(u.HashedPassword), []byte(password)) if err != nil { return false, err } return true, nil } // ListOfEmails returns the list of email address that the user have registered in the system func (u *User) ListOfEmails() []string { var emails []string for _, item := range u.Emails { emails = append(emails, item.Email) } return emails } // SetPrimaryEmail sets the provided email address as the primary email for the account func (u *User) SetPrimaryEmail(email string) { u.primaryEmail = email u.AddEmailAddress(email) } // GetPrimaryEmail returns the user's primary email address func (u *User) GetPrimaryEmail() string { return u.primaryEmail } // HasEmailAddress returns true if the user has the provided email address registered already func (u *User) HasEmailAddress(email string) bool { for _, item := range u.Emails { if strings.ToLower(item.Email) == email { return true } } return false } // AddEmailAddress adds a new email address to the user's account func (u *User) AddEmailAddress(email string) { email = strings.ToLower(email) for _, item := range u.Emails { if strings.ToLower(item.Email) == email { return // found so bail } } u.Emails = append(u.Emails, EmailAddress{Email: email, IsConfirmed: false}) } // RemoveEmailAddress removes the email address from the user's account func (u *User) RemoveEmailAddress(email string) { email = strings.ToLower(email) for i, item := range u.Emails { if strings.ToLower(item.Email) == email { u.Emails = append(u.Emails[:i], u.Emails[i+1:]...) break } } } // FullName returns the full user's name func (u *User) FullName() string { if len(u.GivenName) > 0 && len(u.FamilyName) > 0 { return fmt.Sprintf("%s %s", u.GivenName, u.FamilyName) } else if len(u.GivenName) > 0 { return u.GivenName } else if len(u.FamilyName) > 0 { return u.FamilyName } return "" } // AbbreviatedName returns the abbreviated name for the user func (u *User) AbbreviatedName() string { return fmt.Sprintf("%s %s.", u.GivenName, string(u.FamilyName[0])) }
package main import ( "fmt" "os" ) func main() { for { fmt.Println("Logenhancer - Please select an option") fmt.Println("1 - BlockLog") fmt.Println("2 - AlertLog") fmt.Println("3 - Close") fmt.Println("Your choice: ") filename := "" fmt.Scanf("%s", &filename) switch filename { case "1": runBlockLog() case "2": runAlertLog() case "3": os.Exit(0) default: fmt.Println("-----------------------") fmt.Println("Error - Invalid choice") fmt.Println("-----------------------") } } }
package service import ( "CloudRestaurant/dao" "CloudRestaurant/model" "CloudRestaurant/param" "CloudRestaurant/tool" "encoding/json" "fmt" "github.com/aliyun/alibaba-cloud-sdk-go/services/dysmsapi" "log" "math/rand" "strconv" "time" ) type MemberService struct { } func (ms *MemberService)GetUserInfo(userId string)*model.Member{ id,err:=strconv.Atoi(userId) if err!=nil{ return nil } memberDao:=dao.MemberDao{tool.DbEngine} return memberDao.QueryMemberById(id) } func (ms *MemberService)UploadAvatar(userId int64,fileName string)string{ memberDao:=dao.MemberDao{tool.DbEngine} result:=memberDao.UpdateMemberAvatar(userId,fileName) if result==0{ return "" } return fileName } //用户登录 func(ms *MemberService)Login(name string,password string)*model.Member{ //1、使用用户名 + 密码 查询用户信息 如果存在用户 直接返回 md:=dao.MemberDao{tool.DbEngine} member:=md.Query(name,password) if member.Id!=0{ return member } //2、用户信息不存在,作为新用户保存到数据库中 user:=model.Member{} user.UserName=name user.Password=tool.EncoderSha256(password) user.RegisterTime=time.Now().Unix() result:=md.InsertMember(user) user.Id=result return &user } //用户手机号+验证码的登录 func(ms *MemberService)SmsLogin(Loginparam param.SmsLoginParam)*model.Member{ //1.获取到手机号和验证码 //2.验证手机号+验证码是否正确 md:=dao.MemberDao{tool.DbEngine} sms:=md.ValidateSmsCode(Loginparam.Phone,Loginparam.Code) if sms.Id==0{ return nil } //3、根据手机号member表中查询记录 member:=md.QueryByPhone(Loginparam.Phone) if member.Id!=0{ return member } //4.新创建一个member记录,并保存 user:=model.Member{} user.UserName=Loginparam.Phone user.Mobile=Loginparam.Phone user.RegisterTime=time.Now().Unix() user.Id=md.InsertMember(user) return &user } func (ms *MemberService)Sendcode(phone string)bool{ //1.产生一个验证码 code := fmt.Sprintf("%06v", rand.New(rand.NewSource(time.Now().UnixNano())).Int31n(1000000)) config:=tool.GetConfig().Sms //2.调用阿里云sdk 完成发送 client,err:=dysmsapi.NewClientWithAccessKey(config.RegionId,config.AppKey,config.AppSecret) if err!=nil{ log.Fatalln(err.Error()) return false } request := dysmsapi.CreateSendSmsRequest() request.Scheme = "https" request.SignName = config.SignName request.TemplateCode = config.TemplateCode request.PhoneNumbers = phone par, err := json.Marshal(map[string]interface{}{ "code": code, }) request.TemplateParam = string(par) response, err := client.SendSms(request) fmt.Println(response) if err != nil { log.Fatalln(err.Error()) return false } //3.接收返回结果,并判断发送状态 //短信验证码发送成功 if response.Code == "OK" { //将验证码保存到数据库中 smsCode:=model.SmsCode{Phone:phone,BizId:response.BizId,Code:code,CreateTime:time.Now().Unix()} memberDao:=dao.MemberDao{tool.DbEngine} result:=memberDao.InsertCode(smsCode) return result>0 } return false }
package watcher const CreateState = "CREATE" const ModifyState = "MODIFY" const RemoveState = "REMOVE" type Event struct { Type string `json:"type"` Name string `json:"name"` Values *FileData `json:"data"` }
package main import ( "io" "os" "os/exec" "syscall" "github.com/Microsoft/go-winio" ) const sshAgentPipe = "//./pipe/openssh-ssh-agent" func openAgentSocket() (io.ReadWriteCloser, error) { conn, err := winio.DialPipe(sshAgentPipe, nil) if err != nil { err = &os.PathError{Path: sshAgentPipe, Op: "open", Err: err} } return conn, err } func setupCommandForPlatform(cmd *exec.Cmd) { cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} }
package global import "go.mongodb.org/mongo-driver/bson/primitive" var NilUser User type User struct { ID primitive.ObjectID `bson:"_id,omitempty"` Username string `bson:"username,omitempty"` Password string `bson:"password,omitempty"` }
package workqueue import ( "context" "github.com/apex/log" "gopkg.in/tomb.v2" "git.scc.kit.edu/sdm/lsdf-checksum/internal/lifecycle" "git.scc.kit.edu/sdm/lsdf-checksum/meda" "git.scc.kit.edu/sdm/lsdf-checksum/workqueue" "git.scc.kit.edu/sdm/lsdf-checksum/workqueue/scheduler" ) //go:generate confions config ProducerConfig type ProducerConfig struct { MinWorkPackFileSize uint64 MaxWorkPackFileNumber uint64 FetchRowChunkSize uint64 FetchRowBatchSize uint64 FileSystemName string SnapshotName string DB *meda.DB `yaml:"-"` Queue *workqueue.QueueClient[*workqueue.WorkPack] `yaml:"-"` Logger log.Interface `yaml:"-"` Controller scheduler.Controller `yaml:"-"` } var ProducerDefaultConfig = &ProducerConfig{ MinWorkPackFileSize: 5 * 1024 * 1024, // 5 MiB MaxWorkPackFileNumber: 1000, FetchRowChunkSize: 50000, FetchRowBatchSize: 1000, } type Producer struct { Config *ProducerConfig tomb *tomb.Tomb ctx context.Context filesChan chan meda.File lastRand float64 lastID uint64 scheduler *scheduler.Scheduler[*workqueue.WorkPack] fieldLogger log.Interface } func NewProducer(config *ProducerConfig) *Producer { return &Producer{ Config: config, } } func (p *Producer) Start(ctx context.Context) { p.tomb, p.ctx = tomb.WithContext(ctx) p.fieldLogger = p.Config.Logger.WithFields(log.Fields{ "snapshot": p.Config.SnapshotName, "filesystem": p.Config.FileSystemName, "component": "workqueue.Producer", }) p.lastRand = -1 p.lastID = 0 p.filesChan = make(chan meda.File, p.Config.FetchRowBatchSize) p.scheduler = scheduler.New(p.Config.Queue, scheduler.Config{ Logger: p.Config.Logger, Controller: p.Config.Controller, }) p.tomb.Go(func() error { p.tomb.Go(p.rowFetcher) p.tomb.Go(p.run) p.scheduler.Start(p.tomb.Context(nil)) p.tomb.Go(p.schedulerWaiter) return nil }) } func (p *Producer) SignalStop() { p.tomb.Kill(lifecycle.ErrStopSignalled) } func (p *Producer) Wait() error { return p.tomb.Wait() } func (p *Producer) Dead() <-chan struct{} { return p.tomb.Dead() } func (p *Producer) Err() error { return p.tomb.Err() } func (p *Producer) run() error { var err error var exhausted bool var order scheduler.ProductionOrder[*workqueue.WorkPack] p.fieldLogger.Info("Starting listening for production orders") for { order, err = p.scheduler.AcquireOrder(p.ctx, uint(p.Config.FetchRowBatchSize)) if err != nil { break } p.fieldLogger.WithField("n", order.Total()).Debug("Received production order") exhausted, err = p.fulfill(&order) if err != nil || exhausted { break } } if err != nil { p.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", "exhausted": exhausted, }).Error("Encountered error while producing") } else { p.fieldLogger.WithFields(log.Fields{ "action": "stopping", "exhausted": exhausted, }).Info("Finished listening for production orders") } p.scheduler.SignalStop() return err } func (p *Producer) schedulerWaiter() error { p.scheduler.Wait() return nil } func (p *Producer) rowFetcher() error { var err error var exhausted bool ctx := p.tomb.Context(nil) dying := p.tomb.Dying() files := make([]meda.File, 0, p.Config.FetchRowBatchSize) filesFetcher := p.Config.DB.FilesToBeReadFetcher(&meda.FilesToBeReadFetcherConfig{ ChunkSize: p.Config.FetchRowChunkSize, }) defer close(p.filesChan) for !exhausted { files, err = filesFetcher.AppendNext(files[:0], ctx, nil, p.Config.FetchRowBatchSize) if err != nil { p.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Encountered error while fetching next batch of File rows") return err } exhausted = len(files) < int(p.Config.FetchRowBatchSize) p.fieldLogger.WithFields(log.Fields{ "exhausted": exhausted, "count": len(files), }).Debug("Fetched files from database") for _, file := range files { select { // If dying is closed, one of the two cases will be randomly (!) // selected. Thus, the function may not return instantly. case p.filesChan <- file: case <-dying: return tomb.ErrDying } } } return nil } func (p *Producer) fulfill(order *scheduler.ProductionOrder[*workqueue.WorkPack]) (bool, error) { var err error file := meda.File{} workPack := workqueue.WorkPack{ FileSystemName: p.Config.FileSystemName, SnapshotName: p.Config.SnapshotName, Files: make([]workqueue.WorkPackFile, 0, 16), } var exhausted, ok bool for i := 0; i < order.Total() && !exhausted; i++ { // Initialise work pack workPack.Files = workPack.Files[:0] var totalFileSize, numberOfFiles uint64 // Prepare work pack for totalFileSize < p.Config.MinWorkPackFileSize && numberOfFiles < p.Config.MaxWorkPackFileNumber { // If the Producer is dying, filesChan is closed by rowFetcher(). // Thus, this loop will also shut down quickly. file, ok = <-p.filesChan exhausted = !ok if exhausted { break } workPack.Files = append(workPack.Files, workqueue.WorkPackFile{ ID: file.ID, Path: file.Path, }) totalFileSize += file.FileSize numberOfFiles++ } p.fieldLogger.WithFields(log.Fields{ "files_count": numberOfFiles, "files_total_size": totalFileSize, }).Debug("Enqueuing compute checksum job") _, err = order.Enqueue(&workPack) if err != nil { return exhausted, err } } return exhausted, nil }
package proxy type application struct { } // HandleRequest handles current request func (a *application) HandleRequest(url, method string) (int, string) { if url == "/myProfile" && method == "GET" { return 200, "OK" } //... if url == "/create/product" && method == "POST" { return 201, "Product Created" } return 404, "Not Found" } // NewApplication ... func NewApplication() Server{ return &application{} }
package main import ( "context" "encoding/json" "errors" "io" "log" "net/http" "os" "github.com/olivere/elastic/v6" "github.com/sirupsen/logrus" ) var client *ElasticClient func init() { var err error client, err = NewElasticClient(ElasticClientConfig{ Addr: "http://192.168.1.234:9200", User: "", Password: "", }) if err != nil { log.Panic(err) } } type ReqEsMatch struct { Match map[string]string `json:"match"` } type ReqEsTerm struct { Terms map[string][]string `json:"terms"` } type ReqEsBool struct { Must []interface{} `json:"must"` } func EsQueryFrame(warehouseID string, containerIdList []string) (frameList []EsResFrameSourceDetail, err error) { //查询条件 warehosueIDMatch := ReqEsMatch{} warehosueIDMatch.Match = map[string]string{"warehouse_id": warehouseID} termContainerId := ReqEsTerm{} termContainerId.Terms = map[string][]string{"frame_id": containerIdList} queryBool := ReqEsBool{} queryBool.Must = []interface{}{warehosueIDMatch, termContainerId} //es查询 frameIndex := "wareservice.frame" scrollID := "" for { res, err := client.Scroll(frameIndex, ElasticScrollRequest{ ScrollID: scrollID, ElasticSearchRequest: ElasticSearchRequest{ Query: map[string]interface{}{ "query": map[string]interface{}{"bool": queryBool}, "size": 10000, }, }, }) if err == io.EOF { break } if err != nil { log.Panic(err) } scrollID = res.ScrollId esResHits := res.Hits.Hits for _, esResSource := range esResHits { var frame EsResFrameSourceDetail err := json.Unmarshal(*esResSource.Source, &frame) if err != nil { log.Panic(err) } frameList = append(frameList, frame) } } return frameList, nil } type EsResFrameSourceDetail struct { FrameID string `json:"frame_id"` Node string `json:"node"` PositionType int `json:"position_type"` CurrentZ int `json:"current_z"` } func main() { for i := 1; i < 10000; i++ { list, err := EsQueryFrame("232414789256686833", []string{"354956860203204610"}) if err != nil { log.Panic(err) } for _, item := range list { log.Printf("%+v", item) } } } type ElasticClient struct { c *elastic.Client } func (c *ElasticClient) GetRawClient() *elastic.Client { return c.c } type ElasticScrollRequest struct { ElasticSearchRequest KeepaliveTime string ScrollID string } func (c *ElasticClient) Scroll(index string, req ElasticScrollRequest) (*elastic.SearchResult, error) { doCtx := context.Background() keepaliveTime := "1m" if req.KeepaliveTime != "" { keepaliveTime = req.KeepaliveTime } scrollService := c.c.Scroll().KeepAlive(keepaliveTime).Index(index) if req.ScrollID != "" { scrollService = scrollService.ScrollId(req.ScrollID) } searchResult, err := scrollService.Body(req.Query).Do(doCtx) if err != nil { return nil, err } return searchResult, nil } type ElasticSearchRequest struct { Query interface{} } func (c *ElasticClient) Search(index string, req ElasticSearchRequest) (*elastic.SearchResult, error) { doCtx := context.Background() searchService := c.c.Search().Index(index) searchResult, err := searchService.Source(req.Query).Do(doCtx) if err != nil { return nil, err } return searchResult, nil } // todo use sql in es func (c *ElasticClient) Sql(index string) (*elastic.SearchResult, error) { return nil, errors.New("not ready yet") } type ElasticClientConfig struct { HTTPS bool Addr string User string Password string Decoder elastic.Decoder RetryStrategy elastic.Retrier HttpClient *http.Client Other []elastic.ClientOptionFunc LogLevel string } func NewElasticClient(cfg ElasticClientConfig) (*ElasticClient, error) { var options []elastic.ClientOptionFunc options = append(options, elastic.SetURL(cfg.Addr)) options = append(options, elastic.SetSniff(false)) options = append(options, elastic.SetGzip(true)) if cfg.User != "" || cfg.Password != "" { options = append(options, elastic.SetBasicAuth(cfg.User, cfg.Password)) } if cfg.HTTPS { options = append(options, elastic.SetScheme("https")) } if cfg.HttpClient != nil { options = append(options, elastic.SetHttpClient(cfg.HttpClient)) } if cfg.Decoder != nil { options = append(options, elastic.SetDecoder(cfg.Decoder)) } if cfg.RetryStrategy != nil { options = append(options, elastic.SetRetrier(cfg.RetryStrategy)) } loglevel := logrus.InfoLevel if level, exist := getLogLevel(cfg.LogLevel); exist { loglevel = level } elasticLogger := newElasticLogger(loglevel) options = append(options, elastic.SetErrorLog(&elasticErrorLogger{elasticLogger})) options = append(options, elastic.SetTraceLog(&elasticDebugLogger{elasticLogger})) options = append(options, elastic.SetInfoLog(&elasticInfoLogger{elasticLogger})) // override if len(cfg.Other) > 0 { options = append(options, cfg.Other...) } es, err := elastic.NewClient(options...) if err != nil { return nil, err } return &ElasticClient{ c: es, }, nil } type elasticLogger struct { logger *logrus.Logger } func newElasticLogger(level logrus.Level) elasticLogger { logger := logrus.New() logger.Out = os.Stdout logger.SetLevel(level) logger.SetFormatter(&logrus.JSONFormatter{ TimestampFormat: "2006-01-02 15:04:05", }) return elasticLogger{ logger: logger, } } func (l *elasticLogger) getLogger() *logrus.Entry { return l.logger.WithFields(logrus.Fields{"module": "elastic"}) } type elasticDebugLogger struct { elasticLogger } type elasticInfoLogger struct { elasticLogger } type elasticErrorLogger struct { elasticLogger } func (l *elasticDebugLogger) Printf(format string, v ...interface{}) { l.getLogger().Debugf(format, v...) } func (l *elasticInfoLogger) Printf(format string, v ...interface{}) { l.getLogger().Infof(format, v...) } func (l *elasticErrorLogger) Printf(format string, v ...interface{}) { l.getLogger().Errorf(format, v...) } func getLogLevel(loglevel string) (logrus.Level, bool) { switch loglevel { case "debug": return logrus.DebugLevel, true case "info": return logrus.InfoLevel, true case "warn": return logrus.WarnLevel, true case "error": return logrus.ErrorLevel, true case "fatal": return logrus.FatalLevel, true case "panic": return logrus.PanicLevel, true default: return 0, false } }
package sgf import ( "fmt" "math/rand" "os" "testing" "time" ) func init() { rand.Seed(time.Now().UTC().UnixNano()) } func TestIllegality(t *testing.T) { fmt.Printf("TestIllegality\n") root, err := Load("test_kifu/illegality.sgf") if err != nil { t.Errorf(err.Error()) return } node := root.GetEnd() original_end := node node, err = node.Play(Point(10,8)) if err == nil { t.Errorf("Recaptured a ko") } node, err = node.Play(Point(11,9)) if err == nil { t.Errorf("Played a suicide move") } node, err = node.Play(Point(11,10)) if err == nil { t.Errorf("Played on top of a stone") } node, err = node.Play(Point(19,19)) if err == nil { t.Errorf("Played an off-board move") } if node != original_end { t.Errorf("node was not original_end") } if len(node.children) != 0 { t.Errorf("node gained a child somehow") } } func TestCollection(t *testing.T) { fmt.Printf("TestCollection\n") collection, err := LoadCollection("test_kifu/collection.sgf") if err != nil { t.Errorf(err.Error()) return } if len(collection) != 3 { t.Errorf("Collection was not of expected size") } expectations := []int{44, 244, 3793} for i, root := range collection { if root.TreeSize() != expectations[i] { t.Errorf("A tree was not of expected size") } } } func TestCyclicAttachment(t *testing.T) { fmt.Printf("TestCyclicAttachment\n") root, err := Load("test_kifu/2016-03-10a.sgf") if err != nil { t.Errorf(err.Error()) return } // Choose a random node... all_nodes := root.TreeNodes() node := all_nodes[rand.Intn(len(all_nodes))] // Choose a random node in that node's subtree... descendents := node.SubtreeNodes() d := descendents[rand.Intn(len(descendents))] // Trying to attach the node to a descendent or itself should panic... defer func() { r := recover() if r == nil { t.Errorf("The cyclic attachment did not cause a panic") } }() node.SetParent(d) } func TestDyer(t *testing.T) { fmt.Printf("TestDyer\n") root, err := Load("test_kifu/2016-03-10a.sgf") if err != nil { t.Errorf(err.Error()) return } if root.Dyer() != "comhcledemrd" { t.Errorf("Dyer signature was not what was expected") } } func TestUnescaping(t *testing.T) { fmt.Printf("TestUnescaping\n") root, err := Load("test_kifu/escaped.sgf") if err != nil { t.Errorf(err.Error()) return } node := root.GetEnd() label, _ := node.GetValue("LB") if label != "pd:\\" { t.Errorf("Label not as expected") } comment, _ := node.GetValue("C") if comment != "This comment has a \\ character." { t.Errorf("Comment not as expected") } } func TestMainLineLoader(t *testing.T) { fmt.Printf("TestMainLineLoader\n") root, err := LoadMainLine("test_kifu/2016-03-10a.sgf") if err != nil { t.Errorf(err.Error()) return } if root.TreeSize() != 212 { t.Errorf("Wrong number of nodes in tree") } } func TestGibLoader(t *testing.T) { fmt.Printf("TestGibLoader\n") root, err := Load("test_kifu/3handicap.gib") if err != nil { t.Errorf(err.Error()) return } if root.TreeSize() != 253 { t.Errorf("Wrong number of nodes in tree") } ha, _ := root.GetValue("HA") if ha != "3" { t.Errorf("Wrong handicap") } stones := root.AllValues("AB") if len(stones) != 3 { t.Errorf("Wrong AB property") } } func TestNgfLoader(t *testing.T) { fmt.Printf("TestNgfLoader\n") root, err := Load("test_kifu/3handicap.ngf") if err != nil { t.Errorf(err.Error()) return } if root.TreeSize() != 284 { t.Errorf("Wrong number of nodes in tree") } ha, _ := root.GetValue("HA") if ha != "3" { t.Errorf("Wrong handicap") } stones := root.AllValues("AB") if len(stones) != 3 { t.Errorf("Wrong AB property") } } func TestHandicap(t *testing.T) { fmt.Printf("TestHandicap\n") root, err := Load("test_kifu/9handicap.sgf") if err != nil { t.Errorf(err.Error()) return } ha, _ := root.GetValue("HA") if ha != "9" { t.Errorf("Wrong handicap") } stones := root.AllValues("AB") if len(stones) != 9 { t.Errorf("Wrong AB property") } } func TestKeyValues(t *testing.T) { fmt.Printf("TestKeyValues\n") root, err := Load("test_kifu/2016-03-10a.sgf") if err != nil { t.Errorf(err.Error()) return } key_count, value_count := root.TreeKeyValueCount() if key_count != 9562 || value_count != 9562 { t.Errorf("Wrong number of keys or values in tree") } } func TestUnicode(t *testing.T) { fmt.Printf("TestUnicode\n") root, err := Load("test_kifu/unicode.sgf") if err != nil { t.Errorf(err.Error()) return } pb, _ := root.GetValue("PB") pw, _ := root.GetValue("PW") if pb != "播放機" || pw != "戰鬥機" { t.Errorf("Got unexpected string when reading unicode") } } func TestBoard(t *testing.T) { fmt.Printf("TestBoard\n") root, err := Load("test_kifu/2016-03-10a.sgf") if err != nil { t.Errorf(err.Error()) return } total_board_updates = 0 // Reset global root.Board() if total_board_updates != 1 { t.Errorf("total_board_updates not as expected") } // Real tests... board := root.GetEnd().Board() if total_board_updates != 212 { // t.Errorf("total_board_updates not as expected") } if board.CapturesBy[BLACK] != 3 || board.CapturesBy[WHITE] != 5 { t.Errorf("Captures not as expected") } stones := 0 for x := 0; x < board.Size; x++ { for y := 0; y < board.Size; y++ { if board.State[x][y] != EMPTY { stones++ } } } if stones != 203 { t.Errorf("Stones not as expected") } } func TestGroups(t *testing.T) { fmt.Printf("TestGroups\n") root, err := Load("test_kifu/group_info.sgf") if err != nil { t.Errorf(err.Error()) return } board := root.Board() if len(board.Stones("aa")) != 57 { t.Errorf("len(board.Stones()) not as expected") } if len(board.Liberties("aa")) != 37 { t.Errorf("len(board.Liberties()) not as expected") } if board.HasLiberties("pd") { t.Errorf("Empty point was considered as having liberties") } if board.HasLiberties("pp") { t.Errorf("Empty point was considered as having liberties") } if board.DestroyGroup("aa") != 57 { t.Errorf("DestroyGroup did not return the expected value") } // Try adding some stones to make an illegal position... root, err = Load("test_kifu/2016-03-10a.sgf") if err != nil { t.Errorf(err.Error()) return } board = root.GetEnd().Board() board.AddStone("jk", WHITE) board.AddStone("kk", WHITE) if board.HasLiberties("kk") == true || len(board.Liberties("kk")) != 0 { t.Errorf("Group with no liberties reported as having liberties") } // None of the group info methods should crash if given an invalid point... board.Stones("ZZ") board.HasLiberties("ZZ") board.Liberties("ZZ") board.Singleton("ZZ") } func TestCache(t *testing.T) { fmt.Printf("TestCache\n") root, err := Load("test_kifu/2016-03-10a.sgf") if err != nil { t.Errorf(err.Error()) return } nodes := root.SubtreeNodes() for _, node := range nodes { node.Board() } for _, node := range nodes { if node.__board_cache == nil { t.Errorf("Board cache was not made (1)") } } root.AddValue("AB", "aa") for _, node := range nodes { if node.__board_cache != nil { t.Errorf("Board cache was not purged (1)") } } for _, node := range nodes { node.Board() } for _, node := range nodes { if node.__board_cache == nil { t.Errorf("Board cache was not made (2)") } } root.MainChild().Detach() for _, node := range nodes { if node != root { if node.__board_cache != nil { t.Errorf("Board cache was not purged (2)") } } else { if node.__board_cache == nil { t.Errorf("Board cache of root was purged for no reason") } } } } func TestNodeCopy(t *testing.T) { fmt.Printf("TestNodeCopy\n") root := NewNode(nil) node := NewNode(root) NewNode(node) // Add a child. node.AddValue("AB", "dd") node.AddValue("AB", "pp") c := node.Copy() if len(c.AllKeys()) != 1 || c.KeyCount() != 1 { t.Errorf("Copy had wrong number of keys") } if len(c.AllValues("AB")) != 2 || c.ValueCount("AB") != 2 { t.Errorf("Copy had wrong number of values") } if c.Parent() != nil { t.Errorf("Copy had a parent") } if c.MainChild() != nil { t.Errorf("Copy had a child") } } func TestNodeUpdates(t *testing.T) { fmt.Printf("TestNodeUpdates\n") expect_keys := func(node *Node, n int) { if len(node.AllKeys()) != n || node.KeyCount() != n { t.Errorf("Wrong number of keys") } } expect_vals := func(node *Node, key string, n int) { if len(node.AllValues(key)) != n || node.ValueCount(key) != n { t.Errorf("Wrong number of values") } } node := NewNode(nil) expect_keys(node, 0) expect_vals(node, "AB", 0) node.AddValue("AB", "dd") expect_keys(node, 1) expect_vals(node, "AB", 1) node.AddValue("AW", "dd") expect_keys(node, 2) expect_vals(node, "AB", 1) expect_vals(node, "AW", 1) node.DeleteKey("AW") expect_keys(node, 1) expect_vals(node, "AB", 1) expect_vals(node, "AW", 0) node.AddValue("AB", "dd") // Duplicate value, shouldn't add. expect_keys(node, 1) expect_vals(node, "AB", 1) node.AddValue("AB", "pp") expect_keys(node, 1) expect_vals(node, "AB", 2) node.AddValue("AB", "dp") expect_keys(node, 1) expect_vals(node, "AB", 3) node.SetValue("AB", "jj") // SetValue should delete all others. expect_keys(node, 1) expect_vals(node, "AB", 1) node.DeleteValue("AB", "dd") // Deleting a non-existant value does nothing. expect_keys(node, 1) expect_vals(node, "AB", 1) node.DeleteValue("AB", "AB") // Check this doesn't delete the key. expect_keys(node, 1) expect_vals(node, "AB", 1) node.DeleteValue("AB", "jj") expect_keys(node, 0) expect_vals(node, "AB", 0) } func TestRootLoader(t *testing.T) { fmt.Printf("TestRootLoader\n") root, err := LoadRoot("test_kifu/instabranch.sgf") if err != nil { t.Errorf(err.Error()) return } if root.MainChild() != nil { t.Errorf("root had a child") } } func TestLine(t *testing.T) { fmt.Printf("TestLine\n") root, err := Load("test_kifu/2016-03-10a.sgf") if err != nil { t.Errorf(err.Error()) return } end := root.GetEnd() line := end.GetLine() if len(line) != 212 { t.Errorf("line was not the expected length") } } func TestBoardEdits(t *testing.T) { fmt.Printf("TestBoardEdits\n") board := NewBoard(19) expect_next_player := func(board *Board, colour Colour) { if board.Player != colour { t.Errorf("Wrong colour to play") } } board.Play("pp") expect_next_player(board, WHITE) board.Play("pp") // Fails expect_next_player(board, WHITE) board.ForceStone("pp", WHITE) // Succeeds expect_next_player(board, BLACK) board.ForceStone("pp", WHITE) // Succeeds expect_next_player(board, BLACK) board.ForceStone("pp", BLACK) // Succeeds expect_next_player(board, WHITE) board.Play("dd") expect_next_player(board, BLACK) board.Pass() expect_next_player(board, WHITE) } func TestLegalMovesEquivalence(t *testing.T) { fmt.Printf("TestLegalMovesEquivalence\n") const alpha = "abcdefghijklmnopqrst" // 20 chars, so sometimes generates offboard for i := 0; i < 10; i++ { board := NewBoard(19) node := NewTree(19) var node_err, board_err error for n := 0; n < 1000; n++ { x := rand.Intn(20) // See above y := rand.Intn(20) p := fmt.Sprintf("%c%c", alpha[x], alpha[y]) // Sometimes switch the colours up... if rand.Intn(8) == 0 { board_err = board.PlayColour(p, board.Player.Opposite()) node, node_err = node.PlayColour(p, node.Board().Player.Opposite()) } else { board_err = board.Play(p) node, node_err = node.Play(p) } if (board_err == nil && node_err != nil) || (board_err != nil && node_err == nil) { t.Errorf("Got differing errors") break } if board.Equals(node.Board()) != true { t.Errorf("Got differing boards") break } } } } func TestForcedMovesEquivalence(t *testing.T) { fmt.Printf("TestForcedMovesEquivalence\n") const alpha = "abcdefghijklmnopqrst" // 20 chars, so sometimes generates offboard for i := 0; i < 10; i++ { board := NewBoard(19) node := NewTree(19) for n := 0; n < 1000; n++ { x := rand.Intn(20) // See above y := rand.Intn(20) p := fmt.Sprintf("%c%c", alpha[x], alpha[y]) colour := BLACK key := "B" if rand.Intn(2) == 0 { colour = WHITE key = "W" } if rand.Intn(8) == 0 { // Sometimes do direct board // manipulation with no captures. board.Set(p, colour) board.Player = colour.Opposite() board.ClearKo() key = "A" + key node = NewNode(node) node.SetValue(key, p) // Key is AB or AW } else { // Sometimes do stone placement // with captures. board.ForceStone(p, colour) node = NewNode(node) node.SetValue(key, p) // Key is B or W } if board.Equals(node.Board()) != true { t.Errorf("Got differing boards at move %d", n) board.Dump() node.Board().Dump() node.GetRoot().write_tree(os.Stdout) fmt.Printf("\n") break } } // node.GetRoot().Save("meh.sgf") } }
package main import "fmt" func greeting(name string) string { return "Hello " + name } func getsum(x int, y int) int { return x + y } func main() { var name = "weiqi" fmt.Println(greeting(name)) fmt.Println(getsum(1, 5)) }
package remote import ( "fmt" aerror "opendev.org/airship/airshipctl/pkg/errors" ) type RemoteDirectError struct { aerror.AirshipError } func NewRemoteDirectErrorf(format string, v ...interface{}) error { e := &RemoteDirectError{} e.Message = fmt.Sprintf(format, v...) return e }
/* Copyright 2015 Google Inc. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file or at https://developers.google.com/open-source/licenses/bsd */ package cups /* #cgo LDFLAGS: -lcups #include <cups/cups.h> #include <stdlib.h> // free #include "cups.h" */ import "C" import ( "bytes" "encoding/binary" "fmt" "os" "runtime" "sort" "strconv" "strings" "sync" "time" "unsafe" "github.com/google/cups-connector/cdd" "github.com/google/cups-connector/lib" "github.com/golang/glog" ) const ( // CUPS "URL" length are always less than 40. For example: /job/1234567 urlMaxLength = 100 attrDeviceURI = "device-uri" attrMarkerLevels = "marker-levels" attrMarkerNames = "marker-names" attrMarkerTypes = "marker-types" attrPrinterInfo = "printer-info" attrPrinterMakeAndModel = "printer-make-and-model" attrPrinterName = "printer-name" attrPrinterState = "printer-state" attrPrinterStateReasons = "printer-state-reasons" attrPrinterUUID = "printer-uuid" attrJobState = "job-state" attrJobMediaSheetsCompleted = "job-media-sheets-completed" ) var ( requiredPrinterAttributes []string = []string{ attrDeviceURI, attrMarkerLevels, attrMarkerNames, attrMarkerTypes, attrPrinterInfo, attrPrinterMakeAndModel, attrPrinterName, attrPrinterState, attrPrinterStateReasons, attrPrinterUUID, } jobAttributes []string = []string{ attrJobState, attrJobMediaSheetsCompleted, } ) // Interface between Go and the CUPS API. type CUPS struct { cc *cupsCore pc *ppdCache infoToDisplayName bool printerAttributes []string systemTags map[string]string translatePPDToCDD func(string) (*cdd.PrinterDescriptionSection, error) } func NewCUPS(infoToDisplayName bool, printerAttributes []string, maxConnections uint, connectTimeout time.Duration, translatePPDToCDD func(string) (*cdd.PrinterDescriptionSection, error)) (*CUPS, error) { if err := checkPrinterAttributes(printerAttributes); err != nil { return nil, err } cc, err := newCUPSCore(maxConnections, connectTimeout) if err != nil { return nil, err } pc := newPPDCache(cc, translatePPDToCDD) systemTags, err := getSystemTags() if err != nil { return nil, err } c := &CUPS{ cc: cc, pc: pc, infoToDisplayName: infoToDisplayName, printerAttributes: printerAttributes, systemTags: systemTags, } return c, nil } func (c *CUPS) Quit() { c.pc.quit() } // ConnQtyOpen gets the current quantity of open CUPS connections. func (c *CUPS) ConnQtyOpen() uint { return c.cc.connQtyOpen() } // ConnQtyOpen gets the maximum quantity of open CUPS connections. func (c *CUPS) ConnQtyMax() uint { return c.cc.connQtyMax() } // GetPrinters gets all CUPS printers found on the CUPS server. func (c *CUPS) GetPrinters() ([]lib.Printer, error) { pa := C.newArrayOfStrings(C.int(len(c.printerAttributes))) defer C.freeStringArrayAndStrings(pa, C.int(len(c.printerAttributes))) for i, a := range c.printerAttributes { C.setStringArrayValue(pa, C.int(i), C.CString(a)) } response, err := c.cc.getPrinters(pa, C.int(len(c.printerAttributes))) if err != nil { return nil, err } // cupsDoRequest() returns ipp_t pointer which needs explicit free. defer C.ippDelete(response) if C.ippGetStatusCode(response) == C.IPP_STATUS_ERROR_NOT_FOUND { // Normal error when there are no printers. return make([]lib.Printer, 0), nil } printers := c.responseToPrinters(response) for i := range printers { printers[i].GCPVersion = lib.GCPAPIVersion printers[i].ConnectorVersion = lib.ShortName printers[i].SetupURL = lib.ConnectorHomeURL printers[i].SupportURL = lib.ConnectorHomeURL printers[i].UpdateURL = lib.ConnectorHomeURL } printers = c.addDescriptionToPrinters(printers) return printers, nil } // responseToPrinters converts a C.ipp_t to a slice of lib.Printers. func (c *CUPS) responseToPrinters(response *C.ipp_t) []lib.Printer { printers := make([]lib.Printer, 0, 1) for a := C.ippFirstAttribute(response); a != nil; a = C.ippNextAttribute(response) { if C.ippGetGroupTag(a) != C.IPP_TAG_PRINTER { continue } attributes := make([]*C.ipp_attribute_t, 0, C.int(len(c.printerAttributes))) for ; a != nil && C.ippGetGroupTag(a) == C.IPP_TAG_PRINTER; a = C.ippNextAttribute(response) { attributes = append(attributes, a) } tags := attributesToTags(attributes) p := tagsToPrinter(tags, c.systemTags, c.infoToDisplayName) printers = append(printers, p) } return printers } // addPPDHashToPrinters fetches description, PPD hash, manufacturer, model for // all argument printers, concurrently. // // Returns a new printer slice, because it can shrink due to raw or // mis-configured printers. func (c *CUPS) addDescriptionToPrinters(printers []lib.Printer) []lib.Printer { var wg sync.WaitGroup ch := make(chan *lib.Printer, len(printers)) for i := range printers { if !lib.PrinterIsRaw(printers[i]) { wg.Add(1) go func(p *lib.Printer) { if description, ppdHash, manufacturer, model, err := c.pc.getDescription(p.Name); err == nil { p.Description.Absorb(description) p.CapsHash = ppdHash p.Manufacturer = manufacturer p.Model = model ch <- p } else { glog.Error(err) } wg.Done() }(&printers[i]) } } wg.Wait() close(ch) result := make([]lib.Printer, 0, len(ch)) for printer := range ch { result = append(result, *printer) } return result } func getSystemTags() (map[string]string, error) { tags := make(map[string]string) tags["connector-version"] = lib.BuildDate hostname, err := os.Hostname() if err == nil { tags["system-hostname"] = hostname } tags["system-arch"] = runtime.GOARCH sysname, nodename, release, version, machine, err := uname() if err != nil { return nil, fmt.Errorf("CUPS failed to call uname while initializing: %s", err) } tags["system-uname-sysname"] = sysname tags["system-uname-nodename"] = nodename tags["system-uname-release"] = release tags["system-uname-version"] = version tags["system-uname-machine"] = machine tags["connector-cups-client-version"] = fmt.Sprintf("%d.%d.%d", C.CUPS_VERSION_MAJOR, C.CUPS_VERSION_MINOR, C.CUPS_VERSION_PATCH) return tags, nil } // RemoveCachedPPD removes a printer's PPD from the cache. func (c *CUPS) RemoveCachedPPD(printername string) { c.pc.removePPD(printername) } // GetJobState gets the current state of the job indicated by jobID. func (c *CUPS) GetJobState(jobID uint32) (cdd.PrintJobStateDiff, error) { ja := C.newArrayOfStrings(C.int(len(jobAttributes))) defer C.freeStringArrayAndStrings(ja, C.int(len(jobAttributes))) for i, attribute := range jobAttributes { C.setStringArrayValue(ja, C.int(i), C.CString(attribute)) } response, err := c.cc.getJobAttributes(C.int(jobID), ja) if err != nil { return cdd.PrintJobStateDiff{}, err } // cupsDoRequest() returned ipp_t pointer needs explicit free. defer C.ippDelete(response) s := C.ippFindAttribute(response, C.JOB_STATE, C.IPP_TAG_ENUM) state := int32(C.ippGetInteger(s, C.int(0))) p := C.ippFindAttribute(response, C.JOB_MEDIA_SHEETS_COMPLETED, C.IPP_TAG_INTEGER) var pages int32 if p != nil { pages = int32(C.ippGetInteger(p, C.int(0))) } return convertJobState(state, pages), nil } // convertJobState converts CUPS job state to cdd.PrintJobStateDiff. func convertJobState(cupsState, pages int32) cdd.PrintJobStateDiff { state := cdd.PrintJobStateDiff{PagesPrinted: pages} switch cupsState { case 3: // PENDING state.State = cdd.JobState{Type: "IN_PROGRESS"} case 4: // HELD state.State = cdd.JobState{Type: "IN_PROGRESS"} case 5: // PROCESSING state.State = cdd.JobState{Type: "IN_PROGRESS"} case 6: // STOPPED state.State = cdd.JobState{ Type: "STOPPED", DeviceActionCause: &cdd.DeviceActionCause{ErrorCode: "OTHER"}, } case 7: // CANCELED state.State = cdd.JobState{ Type: "ABORTED", UserActionCause: &cdd.UserActionCause{ActionCode: "CANCELLED"}, // Spelled with two L's. } case 8: // ABORTED state.State = cdd.JobState{ Type: "ABORTED", DeviceActionCause: &cdd.DeviceActionCause{ErrorCode: "PRINT_FAILURE"}, } case 9: // COMPLETED state.State = cdd.JobState{Type: "DONE"} } return state } // Print sends a new print job to the specified printer. The job ID // is returned. func (c *CUPS) Print(printername, filename, title, user string, ticket cdd.CloudJobTicket) (uint32, error) { pn := C.CString(printername) defer C.free(unsafe.Pointer(pn)) fn := C.CString(filename) defer C.free(unsafe.Pointer(fn)) t := C.CString(title) defer C.free(unsafe.Pointer(t)) options := ticketToOptions(ticket) numOptions := C.int(0) var o *C.cups_option_t = nil for key, value := range options { k, v := C.CString(key), C.CString(value) numOptions = C.cupsAddOption(k, v, numOptions, &o) C.free(unsafe.Pointer(k)) C.free(unsafe.Pointer(v)) } defer C.cupsFreeOptions(numOptions, o) u := C.CString(user) defer C.free(unsafe.Pointer(u)) jobID, err := c.cc.printFile(u, pn, fn, t, numOptions, o) if err != nil { return 0, err } return uint32(jobID), nil } func ticketToOptions(ticket cdd.CloudJobTicket) map[string]string { m := make(map[string]string) for _, vti := range ticket.Print.VendorTicketItem { m[vti.ID] = vti.Value } if ticket.Print.Color != nil { m["ColorModel"] = ticket.Print.Color.VendorID } if ticket.Print.Duplex != nil { switch ticket.Print.Duplex.Type { case "LONG_EDGE": m["Duplex"] = "DuplexNoTumble" case "SHORT_EDGE": m["Duplex"] = "DuplexTumble" case "NO_DUPLEX": m["Duplex"] = "None" } } if ticket.Print.PageOrientation != nil { switch ticket.Print.PageOrientation.Type { case "PORTRAIT": m["orientation-requested"] = "3" case "LANDSCAPE": m["orientation-requested"] = "4" } } if ticket.Print.Copies != nil { m["copies"] = strconv.FormatInt(int64(ticket.Print.Copies.Copies), 10) } if ticket.Print.Margins != nil { m["page-top"] = micronsToPoints(ticket.Print.Margins.TopMicrons) m["page-right"] = micronsToPoints(ticket.Print.Margins.RightMicrons) m["page-bottom"] = micronsToPoints(ticket.Print.Margins.BottomMicrons) m["page-left"] = micronsToPoints(ticket.Print.Margins.LeftMicrons) } if ticket.Print.DPI != nil { if ticket.Print.DPI.VendorID != "" { m["Resolution"] = ticket.Print.DPI.VendorID } else { m["Resolution"] = fmt.Sprintf("%dx%xdpi", ticket.Print.DPI.HorizontalDPI, ticket.Print.DPI.VerticalDPI) } } if ticket.Print.FitToPage != nil { switch ticket.Print.FitToPage.Type { case "FIT_TO_PAGE": m["fit-to-page"] = "true" case "NO_FITTING": m["fit-to-page"] = "false" } } if ticket.Print.MediaSize != nil { if ticket.Print.MediaSize.VendorID != "" { m["media"] = ticket.Print.MediaSize.VendorID } else { widthPoints := micronsToPoints(ticket.Print.MediaSize.WidthMicrons) heightPoints := micronsToPoints(ticket.Print.MediaSize.HeightMicrons) m["media"] = fmt.Sprintf("Custom.%sx%s", widthPoints, heightPoints) } } if ticket.Print.Collate != nil { if ticket.Print.Collate.Collate { m["Collate"] = "true" } else { m["Collate"] = "false" } } if ticket.Print.ReverseOrder != nil { if ticket.Print.ReverseOrder.ReverseOrder { m["outputorder"] = "reverse" } else { m["outputorder"] = "normal" } } return m } func micronsToPoints(microns int32) string { return strconv.Itoa(int(float32(microns)*72/25400 + 0.5)) } // convertIPPDateToTime converts an RFC 2579 date to a time.Time object. func convertIPPDateToTime(date *C.ipp_uchar_t) time.Time { r := bytes.NewReader(C.GoBytes(unsafe.Pointer(date), 11)) var year uint16 var month, day, hour, min, sec, dsec uint8 binary.Read(r, binary.BigEndian, &year) binary.Read(r, binary.BigEndian, &month) binary.Read(r, binary.BigEndian, &day) binary.Read(r, binary.BigEndian, &hour) binary.Read(r, binary.BigEndian, &min) binary.Read(r, binary.BigEndian, &sec) binary.Read(r, binary.BigEndian, &dsec) var utcDirection, utcHour, utcMin uint8 binary.Read(r, binary.BigEndian, &utcDirection) binary.Read(r, binary.BigEndian, &utcHour) binary.Read(r, binary.BigEndian, &utcMin) var utcOffset time.Duration utcOffset += time.Duration(utcHour) * time.Hour utcOffset += time.Duration(utcMin) * time.Minute var loc *time.Location if utcDirection == '-' { loc = time.FixedZone("", -int(utcOffset.Seconds())) } else { loc = time.FixedZone("", int(utcOffset.Seconds())) } nsec := int(dsec) * 100 * int(time.Millisecond) return time.Date(int(year), time.Month(month), int(day), int(hour), int(min), int(sec), nsec, loc) } // attributesToTags converts a slice of C.ipp_attribute_t to a // string:string "tag" map. Outside of this package, "printer attributes" are // known as "tags". func attributesToTags(attributes []*C.ipp_attribute_t) map[string][]string { tags := make(map[string][]string) for _, a := range attributes { key := C.GoString(C.ippGetName(a)) count := int(C.ippGetCount(a)) values := make([]string, count) switch C.ippGetValueTag(a) { case C.IPP_TAG_NOVALUE, C.IPP_TAG_NOTSETTABLE: // No value means no value. case C.IPP_TAG_INTEGER, C.IPP_TAG_ENUM: for i := 0; i < count; i++ { values[i] = strconv.FormatInt(int64(C.ippGetInteger(a, C.int(i))), 10) } case C.IPP_TAG_BOOLEAN: for i := 0; i < count; i++ { if int(C.ippGetInteger(a, C.int(i))) == 0 { values[i] = "false" } else { values[i] = "true" } } case C.IPP_TAG_STRING, C.IPP_TAG_TEXT, C.IPP_TAG_NAME, C.IPP_TAG_KEYWORD, C.IPP_TAG_URI, C.IPP_TAG_CHARSET, C.IPP_TAG_LANGUAGE, C.IPP_TAG_MIMETYPE: for i := 0; i < count; i++ { values[i] = C.GoString(C.ippGetString(a, C.int(i), nil)) } case C.IPP_TAG_DATE: for i := 0; i < count; i++ { date := C.ippGetDate(a, C.int(i)) t := convertIPPDateToTime(date) values[i] = strconv.FormatInt(t.Unix(), 10) } case C.IPP_TAG_RESOLUTION: for i := 0; i < count; i++ { yres := C.int(-1) unit := C.int(-1) xres := C.ippGetResolutionWrapper(a, C.int(i), &yres, &unit) if unit == C.IPP_RES_PER_CM { values[i] = fmt.Sprintf("%dx%dpp%s", int(xres), int(yres), "cm") } else { values[i] = fmt.Sprintf("%dx%dpp%s", int(xres), int(yres), "i") } } case C.IPP_TAG_RANGE: for i := 0; i < count; i++ { uppervalue := C.int(-1) lowervalue := C.ippGetRange(a, C.int(i), &uppervalue) values[i] = fmt.Sprintf("%d~%d", int(lowervalue), int(uppervalue)) } default: if count > 0 { values = []string{"unknown or unsupported type"} } } if len(values) == 1 && values[0] == "none" { values = []string{} } // This block fixes some drivers' marker types, which list an extra // type containing a comma, which CUPS interprets as an extra type. // The extra type starts with a space, so it's easy to detect. if len(values) > 1 && len(values[len(values)-1]) > 1 && values[len(values)-1][0:1] == " " { newValues := make([]string, len(values)-1) for i := 0; i < len(values)-2; i++ { newValues[i] = values[i] } newValues[len(newValues)-1] = strings.Join(values[len(values)-2:], ",") values = newValues } tags[key] = values } return tags } // tagsToPrinter converts a map of tags to a Printer. func tagsToPrinter(printerTags map[string][]string, systemTags map[string]string, infoToDisplayName bool) lib.Printer { tags := make(map[string]string) for k, v := range printerTags { tags[k] = strings.Join(v, ",") } for k, v := range systemTags { tags[k] = v } var name string if n, ok := printerTags[attrPrinterName]; ok { name = n[0] } var uuid string if u, ok := printerTags[attrPrinterUUID]; ok { uuid = u[0] } state := cdd.PrinterStateSection{} if s, ok := printerTags[attrPrinterState]; ok { switch s[0] { case "3": state.State = cdd.CloudDeviceStateIdle case "4": state.State = cdd.CloudDeviceStateProcessing case "5": state.State = cdd.CloudDeviceStateStopped default: state.State = cdd.CloudDeviceStateIdle } } if reasons, ok := printerTags[attrPrinterStateReasons]; ok && len(reasons) > 0 { sort.Strings(reasons) state.VendorState = &cdd.VendorState{Item: make([]cdd.VendorStateItem, len(reasons))} for i, reason := range reasons { vendorState := cdd.VendorStateItem{DescriptionLocalized: cdd.NewLocalizedString(reason)} if strings.HasSuffix(reason, "-error") { vendorState.State = cdd.VendorStateError } else if strings.HasSuffix(reason, "-warning") { vendorState.State = cdd.VendorStateWarning } else if strings.HasSuffix(reason, "-report") { vendorState.State = cdd.VendorStateInfo } else { vendorState.State = cdd.VendorStateInfo } state.VendorState.Item[i] = vendorState } } markers, markerState := convertMarkers(printerTags[attrMarkerNames], printerTags[attrMarkerTypes], printerTags[attrMarkerLevels]) state.MarkerState = markerState description := cdd.PrinterDescriptionSection{Marker: markers} p := lib.Printer{ Name: name, UUID: uuid, State: &state, Description: &description, Tags: tags, } p.SetTagshash() if pi, ok := printerTags[attrPrinterInfo]; ok && infoToDisplayName { p.DefaultDisplayName = pi[0] } return p } var cupsMarkerNameToGCP map[string]cdd.MarkerColorType = map[string]cdd.MarkerColorType{ "black": cdd.MarkerColorBlack, "color": cdd.MarkerColorColor, "cyan": cdd.MarkerColorCyan, "magenta": cdd.MarkerColorMagenta, "yellow": cdd.MarkerColorYellow, "lightcyan": cdd.MarkerColorLightCyan, "lightmagenta": cdd.MarkerColorLightMagenta, "gray": cdd.MarkerColorGray, "lightgray": cdd.MarkerColorLightGray, "pigmentblack": cdd.MarkerColorPigmentBlack, "matteblack": cdd.MarkerColorMatteBlack, "photocyan": cdd.MarkerColorPhotoCyan, "photomagenta": cdd.MarkerColorPhotoMagenta, "photoyellow": cdd.MarkerColorPhotoYellow, "photogray": cdd.MarkerColorPhotoGray, "red": cdd.MarkerColorRed, "green": cdd.MarkerColorGreen, "blue": cdd.MarkerColorBlue, } // convertMarkers converts CUPS marker-(names|types|levels) to *[]cdd.Marker and *cdd.MarkerState. // // Normalizes marker type: toner(Cartridge|-cartridge) => toner, // ink(Cartridge|-cartridge|Ribbon|-ribbon) => ink func convertMarkers(names, types, levels []string) (*[]cdd.Marker, *cdd.MarkerState) { if len(names) == 0 || len(types) == 0 || len(levels) == 0 { return nil, nil } if len(names) != len(types) || len(types) != len(levels) { glog.Warningf("Received badly-formatted markers from CUPS: %s, %s, %s", strings.Join(names, ";"), strings.Join(types, ";"), strings.Join(levels, ";")) return nil, nil } markers := make([]cdd.Marker, 0, len(names)) states := cdd.MarkerState{make([]cdd.MarkerStateItem, 0, len(names))} for i := 0; i < len(names); i++ { if len(names[i]) == 0 { return nil, nil } var markerType cdd.MarkerType switch strings.ToLower(types[i]) { case "toner", "tonercartridge", "toner-cartridge": markerType = cdd.MarkerToner case "ink", "inkcartridge", "ink-cartridge", "ink-ribbon", "inkribbon": markerType = cdd.MarkerInk case "staples": markerType = cdd.MarkerStaples default: continue } nameStripped := strings.Replace(strings.Replace(strings.ToLower(names[i]), " ", "", -1), "-", "", -1) colorType := cdd.MarkerColorCustom for k, v := range cupsMarkerNameToGCP { if strings.HasPrefix(nameStripped, k) { colorType = v break } } color := cdd.MarkerColor{Type: colorType} if colorType == cdd.MarkerColorCustom { name := names[i] name = strings.TrimSuffix(name, " Cartridge") name = strings.TrimSuffix(name, " cartridge") name = strings.TrimSuffix(name, " Ribbon") name = strings.TrimSuffix(name, " ribbon") name = strings.TrimSuffix(name, " Toner") name = strings.TrimSuffix(name, " toner") name = strings.TrimSuffix(name, " Ink") name = strings.TrimSuffix(name, " ink") name = strings.Replace(name, "-", " ", -1) color.CustomDisplayNameLocalized = cdd.NewLocalizedString(name) } marker := cdd.Marker{ VendorID: names[i], Type: markerType, Color: &color, } level, err := strconv.ParseInt(levels[i], 10, 32) if err != nil { glog.Warningf("Failed to parse CUPS marker state %s=%s: %s", names[i], levels[i], err) return nil, nil } if level > 100 { // Lop off extra (proprietary?) bits. level = level & 0x7f } if level < 0 || level > 100 { return nil, nil } var state cdd.MarkerStateType if level > 10 { state = cdd.MarkerStateOK } else { state = cdd.MarkerStateExhausted } level32 := int32(level) markerState := cdd.MarkerStateItem{ VendorID: names[i], State: state, LevelPercent: &level32, } markers = append(markers, marker) states.Item = append(states.Item, markerState) } return &markers, &states } func contains(haystack []string, needle string) bool { for _, h := range haystack { if needle == h { return true } } return false } func findMissing(haystack, needles []string) []string { missing := make([]string, 0) for _, n := range needles { if !contains(haystack, n) { missing = append(missing, n) } } return missing } func checkPrinterAttributes(printerAttributes []string) error { if !contains(printerAttributes, "all") { missing := findMissing(printerAttributes, requiredPrinterAttributes) if len(missing) > 0 { return fmt.Errorf("Printer attributes missing from config file: %s", strings.Join(missing, ",")) } } return nil }
package main import ( "crypto/sha256" "encoding/hex" "strconv" "time" "github.com/fiatjaf/makeinvoice" "github.com/tidwall/sjson" ) func makeMetadata(params *Params) string { metadata, _ := sjson.Set("[]", "0.0", "text/identifier") metadata, _ = sjson.Set(metadata, "0.1", params.Name+"@"+s.Domain) metadata, _ = sjson.Set(metadata, "1.0", "text/plain") metadata, _ = sjson.Set(metadata, "1.1", "Satoshis to "+params.Name+"@"+s.Domain+".") // TODO support image, custom description return metadata } func makeInvoice(params *Params, msat int) (bolt11 string, err error) { // description_hash h := sha256.Sum256([]byte(makeMetadata(params))) // prepare params var backend makeinvoice.BackendParams switch params.Kind { case "sparko": backend = makeinvoice.SparkoParams{ Host: params.Host, Key: params.Key, } case "lnd": backend = makeinvoice.LNDParams{ Host: params.Host, Macaroon: params.Key, } case "lnbits": backend = makeinvoice.LNBitsParams{ Host: params.Host, Key: params.Key, } case "lnpay": backend = makeinvoice.LNPayParams{ PublicAccessKey: params.Pak, WalletInvoiceKey: params.Waki, } } log.Debug().Int("msatoshi", msat). Interface("backend", backend). Str("description_hash", hex.EncodeToString(h[:])). Msg("generating invoice") // actually generate the invoice return makeinvoice.MakeInvoice(makeinvoice.Params{ Msatoshi: int64(msat), DescriptionHash: h[:], Backend: backend, Label: s.Domain + "/" + strconv.FormatInt(time.Now().Unix(), 16), }) }
package state import ( "bytes" "time" "encoding/json" "github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/algorand/types" ) // database keys var ( stateKey = []byte("stateKey") ) type State struct { // LastBlockNum=0 at genesis (ie. block(H=0) does not exist) LastBlockNum uint64 LastBlockTotalTx int64 LastBlockID types.BlockID LastBlockTime time.Time // LastValidators is used to validate block.LastCommit. // Validators are persisted to the database separately every time they change, // so we can query for historical validator sets. // Note that if s.LastBlockNum causes a valset change, // we set s.LastHeightValidatorsChanged = s.LastBlockNum + 1 Validators *types.ValidatorSet LastValidators *types.ValidatorSet LastHeightValidatorsChanged uint64 // Consensus parameters used for validating blocks. // Changes returned by EndBlock and updated after Commit. ConsensusParams types.ConsensusParams LastHeightConsensusParamsChanged int64 // Merkle root of the results from executing prev block LastResultsHash []byte // 上个块的hash值 PreviousHash []byte // 上个块的VRFValue PrevVRFValue []byte // 上个块的VRFProof PrevVRFProof []byte // The latest AppHash we've received from calling abci.Commit() //AppHash []byte } // Copy makes a copy of the State for mutating. func (s State) Copy() State { return State{ LastBlockNum: s.LastBlockNum, LastBlockTotalTx: s.LastBlockTotalTx, LastBlockID: s.LastBlockID, LastBlockTime: s.LastBlockTime, Validators: s.Validators.Copy(), LastValidators: s.LastValidators.Copy(), LastHeightValidatorsChanged: s.LastHeightValidatorsChanged, //ConsensusParams: s.ConsensusParams, LastHeightConsensusParamsChanged: s.LastHeightConsensusParamsChanged, //AppHash: s.AppHash, LastResultsHash: s.LastResultsHash, } } // Equals returns true if the States are identical. func (s State) Equals(s2 State) bool { sbz, s2bz := s.Bytes(), s2.Bytes() return bytes.Equal(sbz, s2bz) } // Bytes serializes the State using go-amino. func (s State) Bytes() []byte { bytes, err := json.Marshal(s) if err != nil { panic(err) } return bytes } // IsEmpty returns true if the State is equal to the empty State. func (s State) IsEmpty() bool { return s.Validators == nil // XXX can't compare to Empty } // GetValidators returns the last and current validator sets. func (s State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) { return s.LastValidators, s.Validators } // Create a block from the latest state // MakeBlock builds a block with the given txs and commit from the current state. func (s State) MakeBlock(blkNum uint64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) { // build base block block := types.MakeBlock(blkNum, txs, commit) //收集交易总数 block.TotalTxs = s.LastBlockTotalTx + block.NumTxs //前块信息 block.LastBlockID = s.LastBlockID block.ValidatorsHash = s.Validators.Hash() block.ConsensusHash = s.ConsensusParams.Hash() var changeHeight = s.LastHeightValidatorsChanged valInfo := &types.ValidatorInfo{ LastHeightChanged: s.LastHeightValidatorsChanged, } if changeHeight == blkNum { valInfo.Validators = s.Validators.Copy() } block.ValidatorInfo = valInfo return block, block.MakePartSet(1024 * 1024) } func (s State) MakeBlockVRF(material *types.BlkMaterial) (*types.Block, *types.PartSet) { // build base block block := types.MakeBlock(material.Height, material.Txs, material.Commit) // fill header with state data //block.ChainID = s.ChainID //block.NumTxs = int64(material.NumTxs) block.TotalTxs = s.LastBlockTotalTx + block.NumTxs block.LastBlockID = s.LastBlockID block.ValidatorsHash = s.Validators.Hash() //block.AppHash = s.AppHash block.ConsensusHash = s.ConsensusParams.Hash() //block.LastResultsHash = s.LastResultsHash block.BlkVRFValue = material.BlkVRFValue block.BlkVRFProof = material.BlkVRFProof var changeHeight = s.LastHeightValidatorsChanged valInfo := &types.ValidatorInfo{ LastHeightChanged: s.LastHeightValidatorsChanged, } if changeHeight == material.Height { valInfo.Validators = s.Validators.Copy() } valInfo.Validators.Proposer = material.Proposer block.ValidatorInfo = valInfo return block, block.MakePartSet(1024 * 1024) } func (s State) MakeBlockDPoS(material *types.BlkMaterial) (*types.Block, *types.PartSet) { // build base block block := types.MakeBlock(material.Height, material.Txs, material.Commit) // fill header with state data block.NumTxs = int64(material.NumTxs) block.TotalTxs = s.LastBlockTotalTx + block.NumTxs block.LastBlockID = s.LastBlockID block.ValidatorsHash = s.Validators.Hash() block.ConsensusHash = s.ConsensusParams.Hash() valInfo := &types.ValidatorInfo{ LastHeightChanged: s.LastHeightValidatorsChanged, } valInfo.Validators = s.Validators.Copy() valInfo.Validators.Proposer = material.Proposer block.ValidatorInfo = valInfo return block, block.MakePartSet(1024 * 1024) }
package status import "github.com/ant0ine/go-json-rest/rest" // Response is a response struct for /status endpoint type Response struct { Status string `json:"status"` } func handler(w rest.ResponseWriter, r *rest.Request) { status := &Response{"Ok"} w.WriteJson(status) }
// Copyright 2018 The eballscan Authors // This file is part of the eballscan. // // The eballscan is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The eballscan is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the eballscan. If not, see <http://www.gnu.org/licenses/>. package data import ( "time" //"encoding/json" //"fmt" "github.com/muesli/cache2go" ) const ( MINOR_BLOCK_SPAN time.Duration = 10 * time.Second ) var ( Minor_blocks = cache2go.Cache("Minor_blocks") //THashArray []string ) type Minor_blockInfo struct { TimeStamp int Hash string PrevHash string TrxHashRoot string StateDeltaHash string CMBlockHash string ShardId int ProposalPublicKey string CMEpochNo int CountTxs int } type Minor_blockInfoH struct { Minor_blockInfo Height int }
//////////////////////////////////////////////////////////////////////////////// // // // Copyright 2021 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // // its subsidiaries. // // // // Licensed under the Apache License, Version 2.0 (the "License"); // // you may not use this file except in compliance with the License. // // You may obtain a copy of the License at // // // // http://www.apache.org/licenses/LICENSE-2.0 // // // // Unless required by applicable law or agreed to in writing, software // // distributed under the License is distributed on an "AS IS" BASIS, // // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // // limitations under the License. // // // //////////////////////////////////////////////////////////////////////////////// package apis import ( "fmt" "strings" "github.com/Azure/sonic-mgmt-common/translib/db" ) // EntryDiff holds diff of two versions of a single db entry. // It contains both old & new db.Value objects and list changed field names. // Dummy "NULL" fields are ignored; array field names will have "@" suffix. type EntryDiff struct { OldValue db.Value // value before change; empty during entry create NewValue db.Value // changed db value; empty during entry delete EntryCreated bool // true if entry being created EntryDeleted bool // true if entry being deleted CreatedFields []string // fields added during entry update UpdatedFields []string // fields modified during entry update DeletedFields []string // fields deleted during entry update } func (d *EntryDiff) String() string { if d == nil { return "<nil>" } return fmt.Sprintf( "{EntryCreated=%t, EntryDeleted=%t, CreatedFields=%v, UpdatedFields=%v, DeletedFields=%v}", d.EntryCreated, d.EntryDeleted, d.CreatedFields, d.UpdatedFields, d.DeletedFields) } // IsEmpty returns true if this EntryDiff has no diff data -- either not initialized // or both old and new values are identical. func (d *EntryDiff) IsEmpty() bool { return d == nil || (!d.EntryCreated && !d.EntryDeleted && len(d.CreatedFields) == 0 && len(d.UpdatedFields) == 0 && len(d.DeletedFields) == 0) } // EntryCompare function compares two db.Value objects representing two versions // of a single db entry. Changes are returned as a DBEntryDiff pointer. func EntryCompare(old, new db.Value) *EntryDiff { diff := &EntryDiff{ OldValue: old, NewValue: new, } switch oldExists, newExists := old.IsPopulated(), new.IsPopulated(); { case !oldExists && newExists: diff.EntryCreated = true return diff case oldExists && !newExists: diff.EntryDeleted = true return diff case !oldExists && !newExists: return diff } // Both old & new versions exist.. compare fields for fldName := range old.Field { if fldName == "NULL" { continue } if _, fldOk := new.Field[fldName]; !fldOk { diff.DeletedFields = append( diff.DeletedFields, strings.TrimSuffix(fldName, "@")) } } for nf, nv := range new.Field { if nf == "NULL" { continue } if ov, exists := old.Field[nf]; !exists { diff.CreatedFields = append( diff.CreatedFields, strings.TrimSuffix(nf, "@")) } else if ov != nv { diff.UpdatedFields = append( diff.UpdatedFields, strings.TrimSuffix(nf, "@")) } } return diff } // EntryFields returns the list of field names in a DB entry. // Ignores the dummy NULL field and also removes @ suffix of array fields. func EntryFields(v db.Value) []string { var fields []string for f := range v.Field { if f != "NULL" { fields = append(fields, strings.TrimSuffix(f, "@")) } } return fields }
package main import ( "github.com/labstack/echo/v4" "log" "sync/atomic" "time" ) func main() { e := echo.New() didTheMove := new(atomic.Bool) channel := make(chan struct{}, 1) e.GET("/status", func(c echo.Context) error { startListeningTime := time.Now() // just for info select { case <-channel: log.Println("Yay! opponent did the move!") case <-time.After(time.Minute): log.Println("just a timeout for avoiding goroutine leak") case <-c.Request().Context().Done(): log.Println("user canceled the connection") } if didTheMove.Load() { return c.String(200, "now your turn, you waited for "+time.Since(startListeningTime).String()) } else { return c.String(200, "waiting for your opponent's move") } }) e.GET("/move", func(context echo.Context) error { didTheMove.Store(true) channel <- struct{}{} return context.String(200, "done") }) e.Logger.Fatal(e.Start(":1323")) }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package telemetry import ( "context" "fmt" "math" "time" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sqlexec" "go.uber.org/zap" ) const ( // selectDeletedRowsOneDaySQL selects the deleted rows for each table of last day selectDeletedRowsOneDaySQL = `SELECT parent_table_id, CAST(SUM(deleted_rows) AS SIGNED) FROM mysql.tidb_ttl_job_history WHERE status != 'running' AND create_time >= CURDATE() - INTERVAL 7 DAY AND finish_time >= CURDATE() - INTERVAL 1 DAY AND finish_time < CURDATE() GROUP BY parent_table_id;` // selectDelaySQL selects the deletion delay in minute for each table at the end of last day selectDelaySQL = `SELECT parent_table_id, TIMESTAMPDIFF(MINUTE, MIN(tm), CURDATE()) AS ttl_minutes FROM ( SELECT table_id, parent_table_id, MAX(ttl_expire) AS tm FROM mysql.tidb_ttl_job_history WHERE create_time > CURDATE() - INTERVAL 7 DAY AND finish_time < CURDATE() AND status = 'finished' AND JSON_VALID(summary_text) AND summary_text ->> "$.scan_task_err" IS NULL GROUP BY table_id, parent_table_id ) t GROUP BY parent_table_id;` ) type ttlHistItem struct { // LessThan is not null means it collects the count of items with condition [prevLessThan, LessThan) // Notice that it's type is an int64 pointer to forbid serializing it when it is not set. LessThan *int64 `json:"less_than,omitempty"` // LessThanMax is true means the condition is [prevLessThan, MAX) LessThanMax bool `json:"less_than_max,omitempty"` // Count is the count of items that fit the condition Count int64 `json:"count"` } type ttlUsageCounter struct { TTLJobEnabled bool `json:"ttl_job_enabled"` TTLTables int64 `json:"ttl_table_count"` TTLJobEnabledTables int64 `json:"ttl_job_enabled_tables"` TTLHistDate string `json:"ttl_hist_date"` TableHistWithDeleteRows []*ttlHistItem `json:"table_hist_with_delete_rows"` TableHistWithDelayTime []*ttlHistItem `json:"table_hist_with_delay_time"` } func int64Pointer(val int64) *int64 { v := val return &v } func (c *ttlUsageCounter) UpdateTableHistWithDeleteRows(rows int64) { for _, item := range c.TableHistWithDeleteRows { if item.LessThanMax || rows < *item.LessThan { item.Count++ return } } } func (c *ttlUsageCounter) UpdateTableHistWithDelayTime(tblCnt int, hours int64) { for _, item := range c.TableHistWithDelayTime { if item.LessThanMax || hours < *item.LessThan { item.Count += int64(tblCnt) return } } } func getTTLUsageInfo(ctx context.Context, sctx sessionctx.Context) (counter *ttlUsageCounter) { counter = &ttlUsageCounter{ TTLJobEnabled: variable.EnableTTLJob.Load(), TTLHistDate: time.Now().Add(-24 * time.Hour).Format(time.DateOnly), TableHistWithDeleteRows: []*ttlHistItem{ { LessThan: int64Pointer(10 * 1000), }, { LessThan: int64Pointer(100 * 1000), }, { LessThan: int64Pointer(1000 * 1000), }, { LessThan: int64Pointer(10000 * 1000), }, { LessThanMax: true, }, }, TableHistWithDelayTime: []*ttlHistItem{ { LessThan: int64Pointer(1), }, { LessThan: int64Pointer(6), }, { LessThan: int64Pointer(24), }, { LessThan: int64Pointer(72), }, { LessThanMax: true, }, }, } is, ok := sctx.GetDomainInfoSchema().(infoschema.InfoSchema) if !ok { // it should never happen logutil.BgLogger().Error(fmt.Sprintf("GetDomainInfoSchema returns a invalid type: %T", is)) return } ttlTables := make(map[int64]*model.TableInfo) for _, db := range is.AllSchemas() { for _, tbl := range is.SchemaTables(db.Name) { tblInfo := tbl.Meta() if tblInfo.State != model.StatePublic || tblInfo.TTLInfo == nil { continue } counter.TTLTables++ if tblInfo.TTLInfo.Enable { counter.TTLJobEnabledTables++ } ttlTables[tblInfo.ID] = tblInfo } } exec := sctx.(sqlexec.RestrictedSQLExecutor) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, selectDeletedRowsOneDaySQL) if err != nil { logutil.BgLogger().Error("exec sql error", zap.String("SQL", selectDeletedRowsOneDaySQL), zap.Error(err)) } else { for _, row := range rows { counter.UpdateTableHistWithDeleteRows(row.GetInt64(1)) } } rows, _, err = exec.ExecRestrictedSQL(ctx, nil, selectDelaySQL) if err != nil { logutil.BgLogger().Error("exec sql error", zap.String("SQL", selectDelaySQL), zap.Error(err)) } else { noHistoryTables := len(ttlTables) for _, row := range rows { tblID := row.GetInt64(0) tbl, ok := ttlTables[tblID] if !ok { // table not exist, maybe truncated or deleted continue } noHistoryTables-- evalIntervalSQL := fmt.Sprintf( "SELECT TIMESTAMPDIFF(HOUR, CURDATE() - INTERVAL %d MINUTE, CURDATE() - INTERVAL %s %s)", row.GetInt64(1), tbl.TTLInfo.IntervalExprStr, ast.TimeUnitType(tbl.TTLInfo.IntervalTimeUnit).String(), ) innerRows, _, err := exec.ExecRestrictedSQL(ctx, nil, evalIntervalSQL) if err != nil || len(innerRows) == 0 { logutil.BgLogger().Error("exec sql error or empty rows returned", zap.String("SQL", evalIntervalSQL), zap.Error(err)) continue } hours := innerRows[0].GetInt64(0) counter.UpdateTableHistWithDelayTime(1, hours) } // When no history found for a table, use max delay counter.UpdateTableHistWithDelayTime(noHistoryTables, math.MaxInt64) } return }
// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package expression import ( goatomic "sync/atomic" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/logutil" "go.uber.org/atomic" ) // ExprCollation is a struct that store the collation related information. type ExprCollation struct { Coer Coercibility Repe Repertoire Charset string Collation string } type collationInfo struct { coer Coercibility coerInit atomic.Bool repertoire Repertoire charset string collation string } func (c *collationInfo) HasCoercibility() bool { return c.coerInit.Load() } func (c *collationInfo) Coercibility() Coercibility { return Coercibility(goatomic.LoadInt32((*int32)(&c.coer))) } // SetCoercibility implements CollationInfo SetCoercibility interface. func (c *collationInfo) SetCoercibility(val Coercibility) { goatomic.StoreInt32((*int32)(&c.coer), int32(val)) c.coerInit.Store(true) } func (c *collationInfo) Repertoire() Repertoire { return c.repertoire } func (c *collationInfo) SetRepertoire(r Repertoire) { c.repertoire = r } func (c *collationInfo) SetCharsetAndCollation(chs, coll string) { c.charset, c.collation = chs, coll } func (c *collationInfo) CharsetAndCollation() (string, string) { return c.charset, c.collation } // CollationInfo contains all interfaces about dealing with collation. type CollationInfo interface { // HasCoercibility returns if the Coercibility value is initialized. HasCoercibility() bool // Coercibility returns the coercibility value which is used to check collations. Coercibility() Coercibility // SetCoercibility sets a specified coercibility for this expression. SetCoercibility(val Coercibility) // Repertoire returns the repertoire value which is used to check collations. Repertoire() Repertoire // SetRepertoire sets a specified repertoire for this expression. SetRepertoire(r Repertoire) // CharsetAndCollation gets charset and collation. CharsetAndCollation() (string, string) // SetCharsetAndCollation sets charset and collation. SetCharsetAndCollation(chs, coll string) } // Coercibility values are used to check whether the collation of one item can be coerced to // the collation of other. See https://dev.mysql.com/doc/refman/8.0/en/charset-collation-coercibility.html type Coercibility int32 const ( // CoercibilityExplicit is derived from an explicit COLLATE clause. CoercibilityExplicit Coercibility = 0 // CoercibilityNone is derived from the concatenation of two strings with different collations. CoercibilityNone Coercibility = 1 // CoercibilityImplicit is derived from a column or a stored routine parameter or local variable or cast() function. CoercibilityImplicit Coercibility = 2 // CoercibilitySysconst is derived from a “system constant” (the string returned by functions such as USER() or VERSION()). CoercibilitySysconst Coercibility = 3 // CoercibilityCoercible is derived from a literal. CoercibilityCoercible Coercibility = 4 // CoercibilityNumeric is derived from a numeric or temporal value. CoercibilityNumeric Coercibility = 5 // CoercibilityIgnorable is derived from NULL or an expression that is derived from NULL. CoercibilityIgnorable Coercibility = 6 ) var ( // CollationStrictnessGroup group collation by strictness CollationStrictnessGroup = map[string]int{ "utf8_general_ci": 1, "utf8mb4_general_ci": 1, "utf8_unicode_ci": 2, "utf8mb4_unicode_ci": 2, charset.CollationASCII: 3, charset.CollationLatin1: 3, charset.CollationUTF8: 3, charset.CollationUTF8MB4: 3, charset.CollationBin: 4, } // CollationStrictness indicates the strictness of comparison of the collation. The unequal order in a weak collation also holds in a strict collation. // For example, if a != b in a weak collation(e.g. general_ci), then there must be a != b in a strict collation(e.g. _bin). // collation group id in value is stricter than collation group id in key CollationStrictness = map[int][]int{ 1: {3, 4}, 2: {3, 4}, 3: {4}, 4: {}, } ) // The Repertoire of a character set is the collection of characters in the set. // See https://dev.mysql.com/doc/refman/8.0/en/charset-repertoire.html. // Only String expression has Repertoire, for non-string expression, it does not matter what the value it is. type Repertoire int const ( // ASCII is pure ASCII U+0000..U+007F. ASCII Repertoire = 0x01 // EXTENDED is extended characters: U+0080..U+FFFF EXTENDED = ASCII << 1 // UNICODE is ASCII | EXTENDED UNICODE = ASCII | EXTENDED ) func deriveCoercibilityForScalarFunc(sf *ScalarFunction) Coercibility { panic("this function should never be called") } func deriveCoercibilityForConstant(c *Constant) Coercibility { if c.Value.IsNull() { return CoercibilityIgnorable } else if c.RetType.EvalType() != types.ETString { return CoercibilityNumeric } return CoercibilityCoercible } func deriveCoercibilityForColumn(c *Column) Coercibility { // For specified type null, it should return CoercibilityIgnorable, which means it got the lowest priority in DeriveCollationFromExprs. if c.RetType.GetType() == mysql.TypeNull { return CoercibilityIgnorable } switch c.RetType.EvalType() { case types.ETJson: case types.ETString: return CoercibilityImplicit default: return CoercibilityNumeric } return CoercibilityImplicit } func deriveCollation(ctx sessionctx.Context, funcName string, args []Expression, retType types.EvalType, argTps ...types.EvalType) (ec *ExprCollation, err error) { switch funcName { case ast.Concat, ast.ConcatWS, ast.Lower, ast.Lcase, ast.Reverse, ast.Upper, ast.Ucase, ast.Quote, ast.Coalesce, ast.Greatest, ast.Least: return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, args...) case ast.Left, ast.Right, ast.Repeat, ast.Trim, ast.LTrim, ast.RTrim, ast.Substr, ast.SubstringIndex, ast.Replace, ast.Substring, ast.Mid, ast.Translate: return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, args[0]) case ast.InsertFunc: return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, args[0], args[3]) case ast.Lpad, ast.Rpad: return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, args[0], args[2]) case ast.Elt, ast.ExportSet, ast.MakeSet: return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, args[1:]...) case ast.FindInSet, ast.Regexp: return CheckAndDeriveCollationFromExprs(ctx, funcName, types.ETInt, args...) case ast.Field: if argTps[0] == types.ETString { return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, args...) } case ast.RegexpReplace: return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, args[0], args[1], args[2]) case ast.Locate, ast.Instr, ast.Position, ast.RegexpLike, ast.RegexpSubstr, ast.RegexpInStr: return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, args[0], args[1]) case ast.GE, ast.LE, ast.GT, ast.LT, ast.EQ, ast.NE, ast.NullEQ, ast.Strcmp: // if compare type is string, we should determine which collation should be used. if argTps[0] == types.ETString { ec, err = CheckAndDeriveCollationFromExprs(ctx, funcName, types.ETInt, args...) if err != nil { return nil, err } ec.Coer = CoercibilityNumeric ec.Repe = ASCII return ec, nil } case ast.If: return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, args[1], args[2]) case ast.Ifnull: return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, args[0], args[1]) case ast.Like, ast.Ilike: ec, err = CheckAndDeriveCollationFromExprs(ctx, funcName, types.ETInt, args[0], args[1]) if err != nil { return nil, err } ec.Coer = CoercibilityNumeric ec.Repe = ASCII return ec, nil case ast.In: if args[0].GetType().EvalType() == types.ETString { return CheckAndDeriveCollationFromExprs(ctx, funcName, types.ETInt, args...) } case ast.DateFormat, ast.TimeFormat: charsetInfo, collation := ctx.GetSessionVars().GetCharsetInfo() return &ExprCollation{args[1].Coercibility(), args[1].Repertoire(), charsetInfo, collation}, nil case ast.Cast: // We assume all the cast are implicit. ec = &ExprCollation{args[0].Coercibility(), args[0].Repertoire(), args[0].GetType().GetCharset(), args[0].GetType().GetCollate()} // Non-string type cast to string type should use @@character_set_connection and @@collation_connection. // String type cast to string type should keep its original charset and collation. It should not happen. if retType == types.ETString && argTps[0] != types.ETString { ec.Charset, ec.Collation = ctx.GetSessionVars().GetCharsetInfo() } return ec, nil case ast.Case: // FIXME: case function aggregate collation is not correct. // We should only aggregate the `then expression`, // case ... when ... expression will be rewritten to: // args: eq scalar func(args: value, condition1), result1, // eq scalar func(args: value, condition2), result2, // ... // else clause // Or // args: condition1, result1, // condition2, result2, // ... // else clause // so, arguments with odd index are the `then expression`. if argTps[1] == types.ETString { fieldArgs := make([]Expression, 0) for i := 1; i < len(args); i += 2 { fieldArgs = append(fieldArgs, args[i]) } if len(args)%2 == 1 { fieldArgs = append(fieldArgs, args[len(args)-1]) } return CheckAndDeriveCollationFromExprs(ctx, funcName, retType, fieldArgs...) } case ast.Database, ast.User, ast.CurrentUser, ast.Version, ast.CurrentRole, ast.TiDBVersion, ast.CurrentResourceGroup: chs, coll := charset.GetDefaultCharsetAndCollate() return &ExprCollation{CoercibilitySysconst, UNICODE, chs, coll}, nil case ast.Format, ast.Space, ast.ToBase64, ast.UUID, ast.Hex, ast.MD5, ast.SHA, ast.SHA2, ast.SM3: // should return ASCII repertoire, MySQL's doc says it depends on character_set_connection, but it not true from its source code. ec = &ExprCollation{Coer: CoercibilityCoercible, Repe: ASCII} ec.Charset, ec.Collation = ctx.GetSessionVars().GetCharsetInfo() return ec, nil case ast.JSONPretty, ast.JSONQuote: // JSON function always return utf8mb4 and utf8mb4_bin. ec = &ExprCollation{Coer: CoercibilityCoercible, Repe: UNICODE, Charset: charset.CharsetUTF8MB4, Collation: charset.CollationUTF8MB4} return ec, nil } ec = &ExprCollation{CoercibilityNumeric, ASCII, charset.CharsetBin, charset.CollationBin} if retType == types.ETString { ec.Charset, ec.Collation = ctx.GetSessionVars().GetCharsetInfo() ec.Coer = CoercibilityCoercible if ec.Charset != charset.CharsetASCII { ec.Repe = UNICODE } } return ec, nil } // CheckAndDeriveCollationFromExprs derives collation information from these expressions, return error if derives collation error. func CheckAndDeriveCollationFromExprs(ctx sessionctx.Context, funcName string, evalType types.EvalType, args ...Expression) (et *ExprCollation, err error) { ec := inferCollation(args...) if ec == nil { return nil, illegalMixCollationErr(funcName, args) } if evalType != types.ETString && ec.Coer == CoercibilityNone { return nil, illegalMixCollationErr(funcName, args) } if evalType == types.ETString && ec.Coer == CoercibilityNumeric { ec.Charset, ec.Collation = ctx.GetSessionVars().GetCharsetInfo() ec.Coer = CoercibilityCoercible ec.Repe = ASCII } if !safeConvert(ctx, ec, args...) { return nil, illegalMixCollationErr(funcName, args) } return ec, nil } func safeConvert(ctx sessionctx.Context, ec *ExprCollation, args ...Expression) bool { enc := charset.FindEncodingTakeUTF8AsNoop(ec.Charset) for _, arg := range args { if arg.GetType().GetCharset() == ec.Charset { continue } // If value has ASCII repertoire, or it is binary string, just skip it. if arg.Repertoire() == ASCII || types.IsBinaryStr(arg.GetType()) { continue } if c, ok := arg.(*Constant); ok { str, isNull, err := c.EvalString(ctx, chunk.Row{}) if err != nil { return false } if isNull { continue } if !enc.IsValid(hack.Slice(str)) { return false } } else { if arg.GetType().GetCollate() != charset.CharsetBin && ec.Charset != charset.CharsetBin && !isUnicodeCollation(ec.Charset) { return false } } } return true } // inferCollation infers collation, charset, coercibility and check the legitimacy. func inferCollation(exprs ...Expression) *ExprCollation { if len(exprs) == 0 { // TODO: see if any function with no arguments could run here. dstCharset, dstCollation := charset.GetDefaultCharsetAndCollate() return &ExprCollation{ Coer: CoercibilityIgnorable, Repe: UNICODE, Charset: dstCharset, Collation: dstCollation, } } repertoire := exprs[0].Repertoire() coercibility := exprs[0].Coercibility() dstCharset, dstCollation := exprs[0].GetType().GetCharset(), exprs[0].GetType().GetCollate() if exprs[0].GetType().EvalType() == types.ETJson { dstCharset, dstCollation = charset.CharsetUTF8MB4, charset.CollationUTF8MB4 } unknownCS := false // Aggregate arguments one by one, agg(a, b, c) := agg(agg(a, b), c). for _, arg := range exprs[1:] { argCharset, argCollation := arg.GetType().GetCharset(), arg.GetType().GetCollate() // The collation of JSON is always utf8mb4_bin in builtin-func which is same as MySQL // see details https://github.com/pingcap/tidb/issues/31320#issuecomment-1010599311 if arg.GetType().EvalType() == types.ETJson { argCharset, argCollation = charset.CharsetUTF8MB4, charset.CollationUTF8MB4 } // If one of the arguments is binary charset, we allow it can be used with other charsets. // If they have the same coercibility, let the binary charset one to be the winner because binary has more precedence. if dstCollation == charset.CollationBin || argCollation == charset.CollationBin { if coercibility > arg.Coercibility() || (coercibility == arg.Coercibility() && argCollation == charset.CollationBin) { coercibility, dstCharset, dstCollation = arg.Coercibility(), argCharset, argCollation } repertoire |= arg.Repertoire() continue } // If charset is different, only if conversion without data loss is allowed: // 1. ASCII repertoire is always convertible. // 2. Non-Unicode charset can convert to Unicode charset. // 3. utf8 can convert to utf8mb4. // 4. constant value is allowed because we can eval and convert it directly. // If we can not aggregate these two collations, we will get CoercibilityNone and wait for an explicit COLLATE clause, if // there is no explicit COLLATE clause, we will get an error. if dstCharset != argCharset { switch { case coercibility < arg.Coercibility(): if arg.Repertoire() == ASCII || arg.Coercibility() >= CoercibilitySysconst || isUnicodeCollation(dstCharset) { repertoire |= arg.Repertoire() continue } case coercibility == arg.Coercibility(): if (isUnicodeCollation(dstCharset) && !isUnicodeCollation(argCharset)) || (dstCharset == charset.CharsetUTF8MB4 && argCharset == charset.CharsetUTF8) { repertoire |= arg.Repertoire() continue } else if (isUnicodeCollation(argCharset) && !isUnicodeCollation(dstCharset)) || (argCharset == charset.CharsetUTF8MB4 && dstCharset == charset.CharsetUTF8) { coercibility, dstCharset, dstCollation = arg.Coercibility(), argCharset, argCollation repertoire |= arg.Repertoire() continue } else if repertoire == ASCII && arg.Repertoire() != ASCII { coercibility, dstCharset, dstCollation = arg.Coercibility(), argCharset, argCollation repertoire |= arg.Repertoire() continue } else if repertoire != ASCII && arg.Repertoire() == ASCII { repertoire |= arg.Repertoire() continue } case coercibility > arg.Coercibility(): if repertoire == ASCII || coercibility >= CoercibilitySysconst || isUnicodeCollation(argCharset) { coercibility, dstCharset, dstCollation = arg.Coercibility(), argCharset, argCollation repertoire |= arg.Repertoire() continue } } // Cannot apply conversion. repertoire |= arg.Repertoire() coercibility, dstCharset, dstCollation = CoercibilityNone, charset.CharsetBin, charset.CollationBin unknownCS = true } else { // If charset is the same, use lower coercibility, if coercibility is the same and none of them are _bin, // derive to CoercibilityNone and _bin collation. switch { case coercibility == arg.Coercibility(): if dstCollation == argCollation { } else if coercibility == CoercibilityExplicit { return nil } else if isBinCollation(dstCollation) { } else if isBinCollation(argCollation) { coercibility, dstCharset, dstCollation = arg.Coercibility(), argCharset, argCollation } else { coercibility, dstCharset, dstCollation = CoercibilityNone, argCharset, getBinCollation(argCharset) } case coercibility > arg.Coercibility(): coercibility, dstCharset, dstCollation = arg.Coercibility(), argCharset, argCollation } repertoire |= arg.Repertoire() } } if unknownCS && coercibility != CoercibilityExplicit { return nil } return &ExprCollation{ Coer: coercibility, Repe: repertoire, Charset: dstCharset, Collation: dstCollation, } } func isUnicodeCollation(ch string) bool { return ch == charset.CharsetUTF8 || ch == charset.CharsetUTF8MB4 } func isBinCollation(collate string) bool { return collate == charset.CollationASCII || collate == charset.CollationLatin1 || collate == charset.CollationUTF8 || collate == charset.CollationUTF8MB4 || collate == charset.CollationGBKBin } // getBinCollation get binary collation by charset func getBinCollation(cs string) string { switch cs { case charset.CharsetUTF8: return charset.CollationUTF8 case charset.CharsetUTF8MB4: return charset.CollationUTF8MB4 case charset.CharsetGBK: return charset.CollationGBKBin } logutil.BgLogger().Error("unexpected charset " + cs) // it must return something, never reachable return charset.CollationUTF8MB4 } var ( coerString = []string{"EXPLICIT", "NONE", "IMPLICIT", "SYSCONST", "COERCIBLE", "NUMERIC", "IGNORABLE"} ) func illegalMixCollationErr(funcName string, args []Expression) error { funcName = GetDisplayName(funcName) switch len(args) { case 2: return collate.ErrIllegalMix2Collation.GenWithStackByArgs(args[0].GetType().GetCollate(), coerString[args[0].Coercibility()], args[1].GetType().GetCollate(), coerString[args[1].Coercibility()], funcName) case 3: return collate.ErrIllegalMix3Collation.GenWithStackByArgs(args[0].GetType().GetCollate(), coerString[args[0].Coercibility()], args[1].GetType().GetCollate(), coerString[args[1].Coercibility()], args[2].GetType().GetCollate(), coerString[args[2].Coercibility()], funcName) default: return collate.ErrIllegalMixCollation.GenWithStackByArgs(funcName) } }
package utilites type Vehicle interface { getDoors() int }
package crashparser //Fieldset for python crash stdout file parsing. type LogFile struct { Filename string `json:"filename"` Exceptions []CrashResult `json:"exceptions"` } //Fieldset for python crash stdout parsing. type CrashResult struct { ExceptionLine int `json:"line"` ExceptionType string `json:"type"` ExceptionMessage string `json:"message"` } type Result struct { Err error Value interface{} }
package controller import ( "github.com/GoAdminGroup/go-admin/context" "github.com/GoAdminGroup/go-admin/modules/logger" "github.com/GoAdminGroup/go-admin/plugins/admin/modules/guard" "github.com/GoAdminGroup/go-admin/plugins/admin/modules/response" ) // Delete delete the row from database. func (h *Handler) Delete(ctx *context.Context) { param := guard.GetDeleteParam(ctx) //token := ctx.FormValue("_t") // //if !auth.TokenHelper.CheckToken(token) { // ctx.SetStatusCode(http.StatusBadRequest) // ctx.WriteString(`{"code":400, "msg":"delete fail"}`) // return //} if err := h.table(param.Prefix, ctx).DeleteData(param.Id); err != nil { logger.Error(err) response.Error(ctx, "delete fail") return } response.OkWithData(ctx, map[string]interface{}{ "token": h.authSrv().AddToken(), }) }
package k8s_client import ( "bytes" "context" "fmt" "io/ioutil" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "strings" "testing" "time" ) func TestNewForConfig(t *testing.T) { c, err := KubeRestConfigGetter() if err != nil { t.Fatal(err) } client, _ := NewForConfig(c) ctx, cancel := context.WithCancel(context.Background()) defer cancel() monitoringClient := client.k8sCluster proms, err := monitoringClient.Deployment("monitoring").List(ctx, metav1.ListOptions{}) if err != nil { t.Fatal(err) } for _, i := range proms.Items { fmt.Printf("deployment : %s \n", i.Name) } } func TestClientSet_Kubernetes(t *testing.T) { c, err := KubeRestConfigGetter() if err != nil { t.Fatal(err) } client, _ := NewForConfig(c) client.Kubernetes() } func TestKubeRestConfigGetter(t *testing.T) { namespace := "dev1-xiaomai-server" podName := "dev1-app-market-latest-5d9b6f84fb-g2pl6" container := "app-market" c, err := KubeRestConfigGetter() if err != nil { t.Fatal(err) } client, err := NewForConfig(c) if err != nil { t.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() clusterClient := client.Kubernetes() fileByte, _ := ioutil.ReadFile("D:/Users/fonzie/go/src/xiaomai-sentry/script/dump.sh") file := strings.ReplaceAll(string(fileByte), "\r", "") _, err = clusterClient.Pods(namespace).CopyToPod(ctx, podName, container, bytes.NewReader([]byte(file)), "/usr/local/bin/dump.sh") if err != nil { t.Fatal(err) } t1 := time.Now().Format("2006-01-02_15-04-05") commands := []string{"/bin/bash", "-c"} commands = append(commands, fmt.Sprintf("/bin/bash /usr/local/bin/dump.sh %s %s", podName, t1)) t.Log(commands) var stdout bytes.Buffer _, err = clusterClient.Pods(namespace).Exec(ctx, podName, container, commands, nil, &stdout) if err != nil { t.Fatal(err) } t.Logf(stdout.String()) }
package overrides import ( "io" "github.com/golang/glog" "k8s.io/kubernetes/pkg/admission" kapi "k8s.io/kubernetes/pkg/api" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" buildadmission "github.com/openshift/origin/pkg/build/admission" overridesapi "github.com/openshift/origin/pkg/build/admission/overrides/api" "github.com/openshift/origin/pkg/build/admission/overrides/api/validation" buildapi "github.com/openshift/origin/pkg/build/api" ) func init() { admission.RegisterPlugin("BuildOverrides", func(c clientset.Interface, config io.Reader) (admission.Interface, error) { overridesConfig, err := getConfig(config) if err != nil { return nil, err } glog.V(5).Infof("Initializing BuildOverrides plugin with config: %#v", overridesConfig) return NewBuildOverrides(overridesConfig), nil }) } func getConfig(in io.Reader) (*overridesapi.BuildOverridesConfig, error) { overridesConfig := &overridesapi.BuildOverridesConfig{} err := buildadmission.ReadPluginConfig(in, overridesConfig) if err != nil { return nil, err } errs := validation.ValidateBuildOverridesConfig(overridesConfig) if len(errs) > 0 { return nil, errs.ToAggregate() } return overridesConfig, nil } type buildOverrides struct { *admission.Handler overridesConfig *overridesapi.BuildOverridesConfig } // NewBuildOverrides returns an admission control for builds that overrides // settings on builds func NewBuildOverrides(overridesConfig *overridesapi.BuildOverridesConfig) admission.Interface { return &buildOverrides{ Handler: admission.NewHandler(admission.Create, admission.Update), overridesConfig: overridesConfig, } } // Admit appplies configured overrides to a build in a build pod func (a *buildOverrides) Admit(attributes admission.Attributes) error { if a.overridesConfig == nil { return nil } if !buildadmission.IsBuildPod(attributes) { return nil } return a.applyOverrides(attributes) } func (a *buildOverrides) applyOverrides(attributes admission.Attributes) error { build, version, err := buildadmission.GetBuild(attributes) if err != nil { return err } glog.V(4).Infof("Handling build %s/%s", build.Namespace, build.Name) if a.overridesConfig.ForcePull { if err := applyForcePullToBuild(build, attributes); err != nil { return err } } // Apply label overrides for _, lbl := range a.overridesConfig.ImageLabels { glog.V(5).Infof("Overriding image label %s=%s in build %s/%s", lbl.Name, lbl.Value, build.Namespace, build.Name) overrideLabel(lbl, &build.Spec.Output.ImageLabels) } return buildadmission.SetBuild(attributes, build, version) } func applyForcePullToBuild(build *buildapi.Build, attributes admission.Attributes) error { if build.Spec.Strategy.DockerStrategy != nil { glog.V(5).Infof("Setting docker strategy ForcePull to true in build %s/%s", build.Namespace, build.Name) build.Spec.Strategy.DockerStrategy.ForcePull = true } if build.Spec.Strategy.SourceStrategy != nil { glog.V(5).Infof("Setting source strategy ForcePull to true in build %s/%s", build.Namespace, build.Name) build.Spec.Strategy.SourceStrategy.ForcePull = true } if build.Spec.Strategy.CustomStrategy != nil { err := applyForcePullToPod(attributes) if err != nil { return err } glog.V(5).Infof("Setting custom strategy ForcePull to true in build %s/%s", build.Namespace, build.Name) build.Spec.Strategy.CustomStrategy.ForcePull = true } return nil } func applyForcePullToPod(attributes admission.Attributes) error { pod, err := buildadmission.GetPod(attributes) if err != nil { return err } for i := range pod.Spec.InitContainers { glog.V(5).Infof("Setting ImagePullPolicy to PullAlways on init container %s of pod %s/%s", pod.Spec.InitContainers[i].Name, pod.Namespace, pod.Name) pod.Spec.InitContainers[i].ImagePullPolicy = kapi.PullAlways } for i := range pod.Spec.Containers { glog.V(5).Infof("Setting ImagePullPolicy to PullAlways on container %s of pod %s/%s", pod.Spec.Containers[i].Name, pod.Namespace, pod.Name) pod.Spec.Containers[i].ImagePullPolicy = kapi.PullAlways } return nil } func overrideLabel(overridingLabel buildapi.ImageLabel, buildLabels *[]buildapi.ImageLabel) { found := false for i, lbl := range *buildLabels { if lbl.Name == overridingLabel.Name { glog.V(5).Infof("Replacing label %s (original value %q) with new value %q", lbl.Name, lbl.Value, overridingLabel.Value) (*buildLabels)[i] = overridingLabel found = true } } if !found { *buildLabels = append(*buildLabels, overridingLabel) } }
/* @Time : 2019/4/16 17:47 @Author : yanKoo @File : redisMap @Software: GoLand @Description: */ package server import ( pb "api/talk_cloud" "encoding/json" "errors" "fmt" "github.com/gomodule/redigo/redis" "log" "server/common/src/cache" "strconv" ) func GetUserState(uIdKey []interface{}, rd redis.Conn) (map[int32]string, error) { if rd == nil { return nil, fmt.Errorf("rd is null") } /* states, err := redis.Strings(rd.Do("MGET", uIdKey...)) for _, v := range states { gInfo := &pb.GroupInfo{} err = json.Unmarshal([]byte(v), gInfo) if err != nil { log.Printf("json parse user data(%s) error: %s\n", string(v), err) g, err := group.SelectGroupByKey(v) if err != nil { return nil, err } gInfo.GroupName = g.GroupName gInfo.Gid = int32(g.Id) } gList = append(gList, gInfo) }*/ return nil, nil } // 保存连接信息 func AddUserStream(uId int32, srv pb.TalkCloud_DataPublishServer) error { rd := cache.GetRedisClient() if rd == nil { return fmt.Errorf("rd is nil") } // 先序列化后设置,同时设置在线时间 srvStream, err := json.Marshal(srv) if err != nil { log.Printf("json marshal error: %s\n", err) return err } _, err = rd.Do("SET", "state:"+strconv.Itoa(int(uId)), srvStream, "ex", 5) if err != nil { return errors.New("SetUserStream error: " + err.Error()) } return nil }
package config type DemoConfig struct { SessionEncKey string SessionCookieName string }
package artifact import ( "io" "io/ioutil" "net/url" "os" "path/filepath" "github.com/square/p2/pkg/auth" "github.com/square/p2/pkg/gzip" "github.com/square/p2/pkg/uri" "github.com/square/p2/pkg/util" ) // Interface for downloading a single artifact. type Downloader interface { // Downloads the artifact represented by the Downloader to the // specified path and transfers file ownership to the specified user Download(location *url.URL, verificationData auth.VerificationData, destination string, owner string) error } // Implements the Downloader interface. Simply fetches a .tar.gz file from a // configured URL and extracts it to the location passed to DownloadTo type downloader struct { fetcher uri.Fetcher verifier auth.ArtifactVerifier } func NewLocationDownloader(fetcher uri.Fetcher, verifier auth.ArtifactVerifier) Downloader { return &downloader{ fetcher: fetcher, verifier: verifier, } } func (l *downloader) Download(location *url.URL, verificationData auth.VerificationData, dst string, owner string) error { // Write to a temporary file for easy cleanup if the network transfer fails // TODO: the end of the artifact URL may not always be suitable as a directory // name artifactFile, err := ioutil.TempFile("", filepath.Base(location.Path)) if err != nil { return err } defer os.Remove(artifactFile.Name()) defer artifactFile.Close() remoteData, err := l.fetcher.Open(location) if err != nil { return err } defer remoteData.Close() _, err = io.Copy(artifactFile, remoteData) if err != nil { return util.Errorf("Could not copy artifact locally: %v", err) } // rewind once so we can ask the verifier _, err = artifactFile.Seek(0, os.SEEK_SET) if err != nil { return util.Errorf("Could not reset artifact file position for verification: %v", err) } err = l.verifier.VerifyHoistArtifact(artifactFile, verificationData) if err != nil { return err } err = artifactFile.Chmod(0644) if err != nil { return err } err = gzip.ExtractTarGz(owner, artifactFile.Name(), dst) if err != nil { _ = os.RemoveAll(dst) return util.Errorf("error while extracting artifact: %s", err) } return err }
package auth import ( "github.com/kataras/golog" "github.com/smartystreets/assertions" "testing" "time" ) func TestRedisSetFunctionality(t *testing.T) { logger := golog.New() manager := Init(logger) db := manager.Database err := db.Set("niconicocsc", "TestRedisSetFunctionality", "2eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2VtYWlsIjoibmljb25pY29jc2NAZ21haWwuY29tIiwicm9sZXMiOlsidXNlciJdLCJpYXQiOjE2MDk0Mzg4MTQsImV4cCI6MTYwOTQzOTcxNCwianRpIjoiYmNiZmQwNTQtMTY5Yi00M2YwLWFkZDgtOTZhYmNkNmVmNmFiIiwiaXNzIjoiR25lbWVzIn0.cuOkMSJuhBimg4vLAzkV-u2kYJ0FdygKIq2Ax7qAYqg", time.Duration(30)*time.Hour, true) assertions.ShouldBeNil(err) value := db.Get("niconicocsc", "TestRedisSetFunctionality") //compare := strings.Compare("This is a key from TestRedisSetFunctionality func", fmt.Sprintf("%v",value)) //compare3 := strings.Compare("This is a key from TestRedisSetFunctionality func", value.(string)) //compare2 := strings.Compare("TestRedisSetFunctionality", "TestRedisSetFunctionality") assertions.ShouldNotBeEmpty(value) //assertions.ShouldNotBeEmpty(compare2) //assertions.ShouldNotBeEmpty(compare3) } func TestRedisGetFunctionality(t *testing.T) { logger := golog.New() manager := Init(logger) db := manager.Database value := db.Get("niconicocsc", "TestRedisSetFunctionality") assertions.ShouldNotBeEmpty(value) }
package awss3 import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/gammazero/workerpool" "github.com/labstack/echo" "main/config" "main/handlers" "main/handlers/base_handlers" "mime/multipart" "net/http" "strings" "time" ) type Handler struct{} // Aws Regions var AwsRegions = []string{ "eu-central-1", "eu-west-1", "eu-west-2", "eu-south-1", "eu-west-3", "eu-north-1", "me-south-1", "sa-east-1", "us-east-2", "us-east-1", "us-west-1", "us-west-2", "af-south-1", "ap-east-1", "ap-south-1", "ap-northeast-3", "ap-northeast-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ca-central-1", "cn-north-1", "cn-northwest-1", } const maxKeys = 100 const workerNum = 15 func getSession() *session.Session { sess, _ := session.NewSession(&aws.Config{ Credentials: credentials.NewStaticCredentials(config.Conf.AwsConfig.AwsId, config.Conf.AwsConfig.AwsSecretKey, ""), Region: aws.String(config.Conf.AwsConfig.AwsRegion), }) return sess } func (h Handler) ListBaseObjects(c echo.Context) error { svc := s3.New(getSession()) bucket := c.ParamValues()[0] var result = new(handlers.ListObjectsResult) result.Bucket = handlers.Bucket{ Name: bucket, Url: bucket, } // Get objects resp, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{ Bucket: aws.String(bucket), MaxKeys: aws.Int64(maxKeys), Delimiter: aws.String("/"), }) if err != nil { // Region Control if _, ok := err.(awserr.RequestFailure); ok { region, err := svc.GetBucketLocation(&s3.GetBucketLocationInput{ Bucket: aws.String(bucket), }) config.Conf.UpdateAwsRegion(*region.LocationConstraint) svc = s3.New(getSession()) resp, err = svc.ListObjectsV2(&s3.ListObjectsV2Input{ Bucket: aws.String(bucket), MaxKeys: aws.Int64(maxKeys), Delimiter: aws.String("/"), }) if err != nil { return c.Render(http.StatusOK, "album.html", result) } } else { return c.Render(http.StatusOK, "album.html", result) } } // Adding folders result.Folders = make([]handlers.Folder, len(resp.CommonPrefixes)) for i, item := range resp.CommonPrefixes { result.Folders[i] = handlers.Folder{ Name: *item.Prefix, Url: c.Echo().URI(base_handlers.ListFolderObjects, bucket, strings.Replace(*item.Prefix, "/", ":", -1)), } } // Adding object count result.Count = len(resp.Contents) // Adding objects result.Objects = make([]handlers.Object, result.Count) for i, item := range resp.Contents { req, _ := svc.GetObjectRequest(&s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(*item.Key), }) fileType := handlers.GetFileType(*item.Key) fileTypeIsValid := false for _, val := range handlers.ValidFileType { if fileType == val { fileTypeIsValid = true break } } urlStr, _ := req.Presign(15 * time.Minute) result.Objects[i] = handlers.Object{ Name: *item.Key, Url: urlStr, Type: fileType, IsValid: fileTypeIsValid, } } return c.Render(http.StatusOK, "album.html", result) } func getPreviousUrl(f string, c echo.Context, b string) string { splitFolder := strings.Split(f, ":") folder := strings.Join(splitFolder[0:len(splitFolder)-2], ":") + ":" if folder == ":" { return c.Echo().URI(base_handlers.ListBaseObjects, b) } else { return c.Echo().URI(base_handlers.ListFolderObjects, b, folder) } } func (h Handler) ListFolderObjects(c echo.Context) error { svc := s3.New(getSession()) bucket := c.ParamValues()[0] folderKey := strings.Replace(c.ParamValues()[1], ":", "/", -1) var result = new(handlers.ListObjectsResult) result.Bucket = handlers.Bucket{ Name: bucket, Prefix: folderKey, Url: bucket, } result.PreviousFolderUrl = getPreviousUrl(c.ParamValues()[1], c, bucket) // Get objects resp, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{ Bucket: aws.String(bucket), MaxKeys: aws.Int64(maxKeys), Delimiter: aws.String("/"), Prefix: aws.String(folderKey), }) if err != nil { // Region Control if _, ok := err.(awserr.RequestFailure); ok { region, err := svc.GetBucketLocation(&s3.GetBucketLocationInput{ Bucket: aws.String(bucket), }) config.Conf.UpdateAwsRegion(*region.LocationConstraint) svc = s3.New(getSession()) resp, err = svc.ListObjectsV2(&s3.ListObjectsV2Input{ Bucket: aws.String(bucket), MaxKeys: aws.Int64(maxKeys), Delimiter: aws.String("/"), }) if err != nil { return c.Render(http.StatusOK, "album.html", result) } } else { return c.Render(http.StatusOK, "album.html", result) } } // Adding folders result.Folders = make([]handlers.Folder, len(resp.CommonPrefixes)) for i, item := range resp.CommonPrefixes { result.Folders[i] = handlers.Folder{ Name: *item.Prefix, Url: c.Echo().URI(base_handlers.ListFolderObjects, bucket, strings.Replace(*item.Prefix, "/", ":", -1)), } } if resp.Contents == nil { return c.Render(http.StatusOK, "album.html", result) } // Adding object count // The first object in the folder is always itself result.Count = len(resp.Contents) - 1 // Adding objects result.Objects = make([]handlers.Object, result.Count) // Used [:i] because the first object is the folder itself for i, item := range resp.Contents[1:] { req, _ := svc.GetObjectRequest(&s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(*item.Key), }) fileType := handlers.GetFileType(*item.Key) fileTypeIsValid := false for _, val := range handlers.ValidFileType { if fileType == val { fileTypeIsValid = true break } } urlStr, _ := req.Presign(15 * time.Minute) result.Objects[i] = handlers.Object{ Name: *item.Key, Url: urlStr, Type: fileType, IsValid: fileTypeIsValid, } } return c.Render(http.StatusOK, "album.html", result) } func (h Handler) CreateFolder(c echo.Context) error { svc := s3.New(getSession()) newFolderName := c.FormValue("new_folder_name") folderName := c.FormValue("folder_name") bucket := c.ParamValues()[0] // Folder create _, err := svc.PutObject(&s3.PutObjectInput{ Bucket: aws.String(bucket), Key: aws.String(folderName + newFolderName + "/"), }) if err != nil { if awsErr, ok := err.(awserr.RequestFailure); ok { return c.JSON(http.StatusOK, handlers.JsonResponse{Error: true, Message: awsErr.Message()}) } } return c.JSON(http.StatusOK, handlers.JsonResponse{Error: false, Message: "Success"}) } func (h Handler) ListObjectsWithKey(c echo.Context) error { svc := s3.New(getSession()) bucket := c.ParamValues()[0] folderKey := c.QueryParam("folder_key") lastKey := c.QueryParam("last_key") var result = new(handlers.ListObjectsResult) result.Bucket = handlers.Bucket{ Name: bucket, Prefix: folderKey, Url: bucket, } // Get objects resp, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{ Bucket: aws.String(bucket), MaxKeys: aws.Int64(maxKeys), Delimiter: aws.String("/"), Prefix: aws.String(folderKey), StartAfter: aws.String(lastKey), }) if err != nil { return c.JSON(http.StatusOK, result) } // Adding folders result.Folders = make([]handlers.Folder, len(resp.CommonPrefixes)) for i, item := range resp.CommonPrefixes { result.Folders[i] = handlers.Folder{ Name: *item.Prefix, Url: c.Echo().URI(base_handlers.ListFolderObjects, bucket, ""), } } // Adding object count result.Count = len(resp.Contents) // Adding objects result.Objects = make([]handlers.Object, result.Count) for i, item := range resp.Contents { req, _ := svc.GetObjectRequest(&s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(*item.Key), }) fileType := handlers.GetFileType(*item.Key) fileTypeIsValid := false for _, val := range handlers.ValidFileType { if fileType == val { fileTypeIsValid = true break } } urlStr, _ := req.Presign(15 * time.Minute) result.Objects[i] = handlers.Object{ Name: *item.Key, Url: urlStr, Type: fileType, IsValid: fileTypeIsValid, } } return c.JSON(http.StatusOK, result) } func (h Handler) ListBuckets(c echo.Context) error { svc := s3.New(getSession()) // List buckets resp, err := svc.ListBuckets(nil) if err != nil { if awsErr, ok := err.(awserr.RequestFailure); ok { return c.JSON(http.StatusOK, handlers.JsonResponse{Error: true, Message: awsErr.Message()}) } else { return c.JSON(http.StatusOK, handlers.JsonResponse{Error: true, Message: "Error"}) } } var buckets handlers.ListBucketsResult buckets.Count = len(resp.Buckets) for _, item := range resp.Buckets { buckets.Buckets = append(buckets.Buckets, handlers.Bucket{ Name: *item.Name, Url: c.Echo().URI(base_handlers.ListBaseObjects, *item.Name, ""), }) } return c.Render(http.StatusOK, "buckets.html", buckets) } func (h Handler) CreateBucket(c echo.Context) error { svc := s3.New(getSession()) bucketName := c.FormValue("bucket_name") // Create bucket _, err := svc.CreateBucket(&s3.CreateBucketInput{ Bucket: aws.String(bucketName), }) if err != nil { if awsErr, ok := err.(awserr.RequestFailure); ok { return c.JSON(http.StatusOK, handlers.JsonResponse{Error: true, Message: awsErr.Message()}) } } // Bucket is exists err = svc.WaitUntilBucketExists(&s3.HeadBucketInput{ Bucket: aws.String(bucketName), }) if err != nil { if awsErr, ok := err.(awserr.RequestFailure); ok { return c.JSON(http.StatusOK, handlers.JsonResponse{Error: true, Message: awsErr.Message()}) } } return c.JSON(http.StatusOK, handlers.JsonResponse{Error: false, Message: "Success"}) } func (h Handler) UploadFileToBucket(c echo.Context) error { sess := getSession() form, err := c.MultipartForm() if err != nil { return err } bucket := c.ParamValues()[0] folder_key := form.Value["folder_key_input"] files := form.File["file_input"] response := handlers.DetailedJsonResponse{Error: false, Message: "Success"} errors := make(chan string, len(files)) wp := workerpool.New(workerNum) for _, file := range files { // Upload file func func(file *multipart.FileHeader) { wp.Submit(func() { src, err := file.Open() if err != nil { if _, ok := err.(awserr.RequestFailure); ok { errors <- file.Filename return } } defer src.Close() // Copy file uploader := s3manager.NewUploader(sess) _, err = uploader.Upload(&s3manager.UploadInput{ Bucket: aws.String(bucket), Key: aws.String(folder_key[0] + file.Filename), Body: src, }) if err != nil { if _, ok := err.(awserr.RequestFailure); ok { errors <- file.Filename return } } }) }(file) } wp.StopWait() close(errors) for e := range errors { response.Failed = append(response.Failed, e) } return c.JSON(http.StatusOK, response) } func (h Handler) DeleteBuckets(c echo.Context) error { svc := s3.New(getSession()) _ = c.FormValue("buckets[]") buckets := c.Request().Form["buckets[]"] errors := make(chan string, len(buckets)) response := handlers.DetailedJsonResponse{Error: false, Message: "Success"} wp := workerpool.New(workerNum) for _, bucket := range buckets { // Delete bucket func func(bucket string) { wp.Submit(func() { _, err := svc.DeleteBucket(&s3.DeleteBucketInput{ Bucket: aws.String(bucket), }) if err != nil { if _, ok := err.(awserr.RequestFailure); ok { errors <- bucket return } } // Bucket is delete control err = svc.WaitUntilBucketNotExists(&s3.HeadBucketInput{ Bucket: aws.String(bucket), }) if err != nil { if _, ok := err.(awserr.RequestFailure); ok { errors <- bucket return } } }) }(bucket) } wp.StopWait() close(errors) for e := range errors { response.Failed = append(response.Failed, e) } return c.JSON(http.StatusOK, response) } // Create object struct for delete object func getObjectsToDelete(keys []string) []*s3.ObjectIdentifier { var objects []*s3.ObjectIdentifier for _, key := range keys { objects = append(objects, &s3.ObjectIdentifier{ Key: aws.String(key), }) } return objects } func (h Handler) DeleteObjects(c echo.Context) error { _ = c.FormValue("keys[]") keys := c.Request().Form["keys[]"] otd := getObjectsToDelete(keys) svc := s3.New(getSession()) bucket := c.ParamValues()[0] // Delete Objects _, err := svc.DeleteObjects(&s3.DeleteObjectsInput{ Bucket: aws.String(bucket), Delete: &s3.Delete{ Objects: otd, }, }) if err != nil { if awsErr, ok := err.(awserr.RequestFailure); ok { return c.JSON(http.StatusOK, handlers.JsonResponse{Error: true, Message: awsErr.Message()}) } } // Exists control err = svc.WaitUntilObjectNotExists(&s3.HeadObjectInput{ Bucket: aws.String(bucket), Key: aws.String(keys[len(keys)-1]), }) if err != nil { if awsErr, ok := err.(awserr.RequestFailure); ok { return c.JSON(http.StatusOK, handlers.JsonResponse{Error: true, Message: awsErr.Message()}) } } return c.JSON(http.StatusOK, handlers.JsonResponse{Error: false, Message: "Objects deleted"}) } func (h Handler) DeleteFolders(c echo.Context) error { _ = c.FormValue("keys[]") keys := c.Request().Form["keys[]"] svc := s3.New(getSession()) bucket := c.ParamValues()[0] response := handlers.DetailedJsonResponse{Error: false, Message: "Success"} errors := make(chan string, len(keys)) wp := workerpool.New(workerNum) for _, key := range keys { // Delete folder func func(bucket string, key string, wp *workerpool.WorkerPool) { wp.Submit(func() { iter := s3manager.NewDeleteListIterator(svc, &s3.ListObjectsInput{ Bucket: aws.String(bucket), Prefix: aws.String(key), }) if err := s3manager.NewBatchDeleteWithClient(svc).Delete(aws.BackgroundContext(), iter); err != nil { errors <- key return } }) }(bucket, key, wp) } wp.StopWait() close(errors) for e := range errors { response.Failed = append(response.Failed, e) } return c.JSON(http.StatusOK, response) }
package pgsql import ( "context" "database/sql" "fmt" "github.com/syahidfrd/go-boilerplate/domain" ) type pgsqlAuthorRepository struct { db *sql.DB } // NewAuthorRepository will create new an authorRepository object representation of domain.AuthorRepository interface func NewPgsqlAuthorRepository(db *sql.DB) domain.AuthorRepository { return &pgsqlAuthorRepository{ db: db, } } func (r *pgsqlAuthorRepository) Create(ctx context.Context, author *domain.Author) (err error) { query := "INSERT INTO authors (name, created_at, updated_at) VALUES ($1, $2, $3)" _, err = r.db.ExecContext(ctx, query, author.Name, author.CreatedAt, author.UpdatedAt) return } func (r *pgsqlAuthorRepository) GetByID(ctx context.Context, id int64) (author domain.Author, err error) { query := "SELECT id, name, created_at, updated_at FROM authors WHERE id = $1" err = r.db.QueryRowContext(ctx, query, id).Scan(&author.ID, &author.Name, &author.CreatedAt, &author.UpdatedAt) return } func (r *pgsqlAuthorRepository) Fetch(ctx context.Context) (authors []domain.Author, err error) { query := "SELECT id, name, created_at, updated_at FROM authors" rows, err := r.db.QueryContext(ctx, query) if err != nil { return authors, err } defer rows.Close() for rows.Next() { var author domain.Author err := rows.Scan(&author.ID, &author.Name, &author.CreatedAt, &author.UpdatedAt) if err != nil { return authors, err } authors = append(authors, author) } return authors, nil } func (r *pgsqlAuthorRepository) Update(ctx context.Context, author *domain.Author) (err error) { query := "UPDATE authors SET name = $1, updated_at = $2 WHERE id = $3" res, err := r.db.ExecContext(ctx, query, author.Name, author.UpdatedAt, author.ID) if err != nil { return } affect, err := res.RowsAffected() if err != nil { return } if affect != 1 { err = fmt.Errorf("weird behavior, total affected: %d", affect) } return } func (r *pgsqlAuthorRepository) Delete(ctx context.Context, id int64) (err error) { query := "DELETE FROM authors WHERE id = $1" res, err := r.db.ExecContext(ctx, query, id) if err != nil { return } affect, err := res.RowsAffected() if err != nil { return } if affect != 1 { err = fmt.Errorf("weird behavior, total affected: %d", affect) } return }
package entity import ( "time" "github.com/Surafeljava/gorm" ) type Admin struct { ID uint AdminId string `gorm:"type:varchar(50);not null"` AdminPwd string `gorm:"type:varchar(50);not null"` } type UserType struct { gorm.Model UsrId string UsrPwd string } type Case struct { ID uint CaseNum string `gorm:"type:varchar(50);not null"` CaseTitle string `gorm:"type:varchar(50);not null"` CaseDesc string `gorm:"type:varchar(50);not null"` CaseStatus string `gorm:"type:varchar(50);not null"` CaseType string `gorm:"type:varchar(50);not null"` CaseCreation time.Time CaseCourtDate time.Time CaseJudge string `gorm:"type:varchar(50);not null"` } type CaseInfo struct { CaseTitle string `json:"case_title" gorm:"type:varchar(255)"` CaseStatus string `json:"case_status" gorm:"type:varchar(255)"` CourtDate time.Time `json:"court_date" gorm:"type:varchar(255)"` } type Relation struct { ID uint CaseNum string `gorm:"type:varchar(255);not null"` PlId string `gorm:"type:varchar(255);not null"` AcId string `gorm:"type:varchar(255);not null"` //JuId string `gorm:"type:varchar(255);not null"` } type Decision struct { ID uint CaseNum string `gorm:"type:varchar(255);not null"` DecisionDate time.Time Decision string `gorm:"type:varchar(255);not null"` DecisionDesc string `gorm:"type:varchar(255);not null"` } type Witness struct { ID uint CaseNum string `gorm:"type:varchar(255);not null"` WitnessDoc string `gorm:"type:varchar(255);not null"` WitnessType string `gorm:"type:varchar(255);not null"` } type Judge struct { ID uint JudgeId string `gorm:"type:varchar(50);not null"` JudgePwd string `gorm:"type:varchar(50);not null"` JudgeName string `gorm:"type:varchar(50);not null"` JudgeGender string `gorm:"type:varchar(50);not null"` JudgeAddress string `gorm:"type:varchar(50);not null"` JudgePhone string `gorm:"type:varchar(50);not null"` JudgeType string `gorm:"type:varchar(50);not null"` JudgePhoto string `gorm:"type:varchar(50);not null"` } type Notification struct { ID uint NotDescription string `gorm:"type:varchar(255);not null"` NotTitle string `gorm:"type:varchar(255);not null"` NotLevel string `gorm:"type:varchar(50);not null"` NotDate time.Time } type Opponent struct { ID uint OppId string `gorm:"type:varchar(50);not null"` OppPwd string `gorm:"type:varchar(50);not null"` OppType string `gorm:"type:varchar(50);not null"` OppName string `gorm:"type:varchar(50);not null"` OppGender string `gorm:"type:varchar(50);not null"` OppBD time.Time OppAddress string `gorm:"type:varchar(50);not null"` OppPhone string `gorm:"type:varchar(50);not null"` OppPhoto string `gorm:"type:varchar(50);not null"` } type SuccessMessage struct { Status string Message string } //TODO: unfinished session work... type Session struct { ID uint UUID string `gorm:"type:varchar(255);not null"` Expires int64 `gorm:"type:varchar(255);not null"` SigningKey []byte `gorm:"type:varchar(255);not null"` } type Messg struct { UserID string UserPwd string UserName string AddtionalMsg string } type Court struct { ID uint CourtName string `gorm:"type:varchar(255);not null"` CourtLevel string `gorm:"type:varchar(255);not null"` CourtAddress string `gorm:"type:varchar(255);not null"` CourtPhone string `gorm:"type:varchar(255);not null"` } type AppealForm struct { CaseNum string CaseCreationDate time.Time CaseTitle string CaseDesc string OppName string OppGender string OppAddress string OppPhone string WitDocm string WitTy string Decision string DecDate time.Time DacDesc string }
package main import ( // #include "math.h" "C" "fmt" ) func main() { fmt.Println("Welcome from Go") fmt.Println("Let's call some functions from our C library") fmt.Printf("C: math_add(2, 3)=%d\n", C.math_add(2, 3)) fmt.Printf("C: math_sub(5, 2)=%d\n", C.math_sub(5, 2)) }
package reverseproxy import ( "testing" "encoding/json" "net/http" "io/ioutil" "bytes" "strconv" "crypto/md5" ) type staticJsonQuery struct {} func (staticJsonQuery) GetSubNodes(jsonData []interface{}) (nodes []interface{}, err error) { testJsonStr := `[{"id": 1}, {"id": 2}]` var testJson []interface{} json.Unmarshal([]byte(testJsonStr), &testJson) return testJson, nil } func hash(obj *http.Response) [16]byte { bytes, _ := json.Marshal(obj) return md5.Sum(bytes) } func getFakeHttpResponse() *http.Response { bodyBytes := []byte(`{"data": [{"id": 1}]}`) body := ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) contentLength := len(bodyBytes) resp := &http.Response{ Body: body, ContentLength: int64(len(bodyBytes)), StatusCode: 200, Header: make(http.Header, 0), } resp.Header.Set("Content-Length", strconv.Itoa(contentLength)) return resp } func Test_ModifyResponseFunc_StatusCode402_ReturnsUnmodifiedBody(t *testing.T) { responseModifier := NewJsonQueryResponseModifier(&staticJsonQuery{}) fakeHttpResponse := getFakeHttpResponse() fakeHttpResponse.StatusCode = 402 hash1 := hash(fakeHttpResponse) err := responseModifier.Get()(fakeHttpResponse) if err != nil { t.Errorf("There was un error calling ModifyResponseFunc.") } hash2 := hash(fakeHttpResponse) if hash1 != hash2 { t.Errorf("Http response should be the same after modification when status code is not 200.") } } func Test_ModifyResponseFunc_NilConverter_ReturnsUnmodifiedBody(t *testing.T) { responseModifier := NewJsonQueryResponseModifier(nil) fakeHttpResponse := getFakeHttpResponse() hash1 := hash(fakeHttpResponse) err := responseModifier.Get()(fakeHttpResponse) if err != nil { t.Errorf("There was un error calling ModifyResponseFunc.") } hash2 := hash(fakeHttpResponse) if hash1 != hash2 { t.Errorf("Http response should be the same when a nil converter is specified.") } } func Test_ModifyResponseFunc_ReturnsCorrectModifedResponse(t *testing.T) { expectedBody := `{"data":[{"id":1},{"id":2}]}` expectedContentLength := len(expectedBody) expectedStatusCode := 200 expectedContentLengthHeaderValue := strconv.Itoa(expectedContentLength) responseModifier := NewJsonQueryResponseModifier(&staticJsonQuery{}) fakeHttpResponse := getFakeHttpResponse() err := responseModifier.Get()(fakeHttpResponse) if err != nil { t.Errorf("There was un error calling ModifyResponseFunc.") } bodyBytes, _ := ioutil.ReadAll(fakeHttpResponse.Body) body := string(bodyBytes) contentLengthHeaderValue := fakeHttpResponse.Header.Get("Content-Length") if body != expectedBody { t.Errorf("Body of http respone is incorrect, got '%s' expected '%s'", body, expectedBody) } if fakeHttpResponse.StatusCode != expectedStatusCode { t.Errorf("Status code of http respone is incorrect, got '%d' expected '%d'", fakeHttpResponse.StatusCode, expectedStatusCode) } if fakeHttpResponse.ContentLength != int64(expectedContentLength) { t.Errorf("Content length of http respone is incorrect, got '%d' expected '%d'", fakeHttpResponse.ContentLength, expectedContentLength) } if contentLengthHeaderValue != expectedContentLengthHeaderValue { t.Errorf("Content length in header of http respone is incorrect, got '%s' expected '%s'", contentLengthHeaderValue, expectedContentLengthHeaderValue) } }
package main import ( "time" "github.com/muesli/cache2go" ) var knownUserIds = cache2go.Cache("visitors") func AddKnownUser(userId string) { if IsNewUser(userId) { knownUserIds.Add(userId, time.Hour*24*15, userId) } } func IsNewUser(userId string) bool { exists, _ := knownUserIds.Value(userId) return exists == nil }
package main import ( "fmt" ) func decimalToBinary(val uint64) []uint64 { bits := []uint64{} for val != 0 { mod := val % 2 bits = append(bits, mod) val = val / 2 } for i := 0; i < len(bits); i++ { fmt.Print(bits) } return bits }
package suggestion_service import ( "ms/sun_old/base" "ms/sun/shared/helper" "ms/sun/servises/view_service" "ms/sun/shared/config" "ms/sun/shared/x" "time" ) const TOP_TAGS_LIMIT = 30 func Tags_RepeatedlyJobs() { //top tags go func() { defer helper.JustRecover() for { if config.DEBUG_DELAY_RUN_STARTUPS { //just don't make the log files messy for this at each startups time.Sleep(time.Minute * 5) } reloadTopTags() reloadTopPostsForTopTags() time.Sleep(time.Minute * config.TAGS_RELOAD_TOP_INTERVAL_MINS) } }() } var TopTagsViews []*x.PB_TagView var TopTagsWithPostsResult = make([]*x.PB_TopTagWithSamplePosts, 0, 50) func reloadTopTags() { tags, err := x.NewTag_Selector().OrderBy_Count_Desc().Limit(TOP_TAGS_LIMIT).GetRows(base.DB) if err != nil { return } var topTagsViews = make([]*x.PB_TagView, 0, 50) for _, m := range tags { pb := &x.PB_TagView{ TagId: int64(m.TagId), Name: m.Name, Count: int32(m.Count), TagStatusEnum: int32(m.TagStatusEnum), CreatedTime: int32(m.CreatedTime), } topTagsViews = append(topTagsViews, pb) } TopTagsViews = topTagsViews } func reloadTopPostsForTopTags() { tags := TopTagsViews var newTopTagsWithPosts = make([]*x.PB_TopTagWithSamplePosts, 0, 50) for _, t := range tags { postsIds, err := x.NewTagPost_Selector().Select_PostId().TagId_Eq(int(t.TagId)). PostTypeEnum_Eq(int(x.PostTypeEnum_POST_PHOTO)). Limit(4). OrderBy_Id_Desc(). GetIntSlice(base.DB) if err != nil { continue } v := &x.PB_TopTagWithSamplePosts{ TagView: t, PostViewList: view_service.PostsViewsForPostIds(postsIds, 0), } newTopTagsWithPosts = append(newTopTagsWithPosts, v) } TopTagsWithPostsResult = newTopTagsWithPosts }
package gherkin import ( "bytes" "fmt" ) type step struct { line string orig string keys []string mldata []map[string]string isPending bool errors bytes.Buffer hasErrors bool } func (s step) String() string { return s.line } func StepFromString(in string) step{ return step{ line : in, keys: []string{}, mldata : []map[string]string{} } } func StepFromStringAndOrig(in, orig string) step{ return step{ line : in, orig: orig, keys: []string{}, mldata : []map[string]string{} } } func (s *step) addMlData(line map[string]string) { s.mldata = append(s.mldata, line) } func (s *step) recoverPending() { if rec := recover(); rec != nil { if rec == "Pending" { s.isPending = true } else { panic(rec) } } } func (currStep *step) executeStepDef(steps []stepdef, ctx interface{}) bool { defer currStep.recoverPending() for _, stepd := range steps { //fmt.Printf("Executing step %s with stepdef %d (%v)\n", currStep, i, stepd) if stepd.execute(currStep, &currStep.errors, ctx) { return true } } fmt.Fprintf(&currStep.errors, `Could not find step definition for "%s"` + "\n", currStep.orig) return false } func (s *step) setMlKeys(keys []string) { s.keys = keys }
package configuration import ( "gopkg.in/yaml.v2" "io/ioutil" "log" ) /* - Load yml file - Read it into struct - store it into map with host as key and servers as value array [{}, {}, {}] */ type Config struct { Hosts map[string][]string `yaml:"Hosts"` } func read(filename string) []byte{ data,err := ioutil.ReadFile(filename) if err != nil{ log.Fatal(err) } return data } func Load(filename string) Config{ var config Config stream := read(filename) yaml.Unmarshal(stream, &config) return config }
package pastebin import ( "io/ioutil" "net/http" "net/url" "strings" ) const endpoint = "https://pastebin.com/api/api_post.php" type Client interface { Paste(title, body, private, expire string) string } type clientImpl struct { apiKey string } func NewClient(apiKey string) Client { return &clientImpl{apiKey: apiKey} } func (T *clientImpl) Paste(title, body, private, expire string) string { u := url.Values{} u.Set("api_option", "paste") u.Set("api_dev_key", T.apiKey) bodyTrimmed := body if len(body) > 100000 { bodyTrimmed = body[:100000] } u.Set("api_paste_code", bodyTrimmed) u.Set("api_paste_name", title) u.Set("api_paste_private", private) u.Set("api_paste_expire_date", expire) hc := http.Client{} req, err := http.NewRequest("POST", endpoint, strings.NewReader(u.Encode())) if (err != nil) { return "" } req.Header.Add("Content-Type", "application/x-www-form-urlencoded") res, err := hc.Do(req) if err != nil { return "" } if res.StatusCode == http.StatusOK { resp, _ := ioutil.ReadAll(res.Body) if strings.Index(string(resp), "Bad API") < 0 { return string(resp) } } return "" }
package bingSpellCheck import "fmt" const ( // ErrorResponseType is used as a value for SpellCheckResponse.Type and // indicates a request error occured ErrorResponseType = "ErrorResponse" // SpellCheckResponseType is used as a value for SpellCheckResponse.Type and // indicates a successful request SpellCheckResponseType = "SpellCheck" // RepeatedTokenType indicates a repeated word in a spell/grammar check RepeatedTokenType = "RepeatedToken" // UnknownTokenType indicates a spelling / grammatical error in the text UnknownTokenType = "UnknownToken" ) // SpellCheckResponse is the main data return from spell check API calls // // Fields // Type - Type hint ("SpellCheck" or "ErrorResponse") // FlaggedTokens - A list of words in text that were flagged as not being // spelled correctly or are grammatically incorrect. // Errors - A list of errors that describe the reasons why the // request failed // // Notes // If no spelling or grammar errors were found, or the specified market is // not supported, the FlaggedTokens array is empty. // // When Errors is non-empty, Type will be "ErrorResponse", otherwise it // is "SpellCheck" // type SpellCheckResponse struct { Type string `json:"_type"` FlaggedTokens []FlaggedToken `json:"flaggedTokens"` Errors []Error `json:"errors"` } // IsErrorResponse determines if the SpellCheckResponse indicates an error func (scr *SpellCheckResponse) IsErrorResponse() bool { return scr.Type == ErrorResponseType } // IsSpellCheckResponse determines if the SpellCheckResponse was successful func (scr *SpellCheckResponse) IsSpellCheckResponse() bool { return scr.Type == SpellCheckResponseType } // HasSuggestions determines if the SpellCheckResponse returned suggestions func (scr *SpellCheckResponse) HasSuggestions() bool { return len(scr.FlaggedTokens) > 0 } // TokenSuggestion is a suggested replacement entry for a token // // Fields // Score - A value that indicates the level of confidence that the // suggested correction is correct // Suggestion - The suggested word to replace the flagged word. // // Notes // If the mode query parameter is set to 'spell', this field is set to 1.0 // // If the flagged word is a repeated word (see FlaggedToken.Type), // this string is empty. // type TokenSuggestion struct { Score float64 `json:"score"` Suggestion string `json:"suggestion"` } // FlaggedToken represents information about the word that is not spelled // correctly or is grammatically incorrect // // Fields // Offset - The zero-based offset from the beginning of the text query // string to the word that was flagged // Suggestions - A list of words that correct the spelling or grammar error. // The list is in decreasing order of preference // Token - The word in the text query string that is not spelled // correctly or is grammatically incorrect. // Type - The type of error that caused the word to be flagged // // Notes // The possible values for Type are: // 'RepeatedToken' — The word was repeated in succession // (for example, the warm warm weather) // 'UnknownToken' — All other spelling or grammar errors // type FlaggedToken struct { Offset int `json:"offset"` Suggestions []TokenSuggestion `json:"suggestions"` Token string `json:"token"` Type string `json:"type"` } // IsRepeatedToken determines if the flagged token represents a repeated word func (token FlaggedToken) IsRepeatedToken() bool { return token.Type == RepeatedTokenType } // IsUnknownToken determines if the flagged token represents a spelling or // grammatical error func (token FlaggedToken) IsUnknownToken() bool { return token.Type == UnknownTokenType } // Error defines an error that occurred // // Fields // Code - the error code that identifies the category of error // Message - A description of the error // MoreDetails - A description that provides additional information about the error // Parameter - The query parameter in the request that caused the error // SubCode - The error code that identifies the error // Value - The query parameter's value that was not valid // // Notes // Code is an HTTP status code // type Error struct { Code string `json:"code"` Message string `json:"message"` MoreDetails string `json:"moreDetails"` Parameter string `json:"parameter"` SubCode string `json:"subCode"` Value string `json:"value"` } func (err Error) Error() string { return fmt.Sprintf("%s: %s. Parameter=%s", err.Code, err.Message, err.Parameter) }
package main import ( "flag" "log" "net" "strconv" "time" ) const ( defaultIP = "192.168.4.1" ) var ( robotIP string // IP address of robot robotPort int // Port on robot robotName string // Hostname of robot serverPort int // Port on which to listen projectVersion = "dev" projectBuild = "dev" ) func init() { flag.StringVar(&robotIP, "ip", "", "IP address of the robot") flag.IntVar(&robotPort, "port", 80, "Port of the robot") flag.StringVar(&robotName, "name", "", "MDNS Name of the robot") flag.IntVar(&serverPort, "server-port", 26153, "Port on which our server will listen") } func main() { flag.Parse() log.Printf("Starting Auto-Bridge version %s, build %s\n", projectVersion, projectBuild) attempt := 0 for { if attempt > 0 { log.Println("Waiting a bit...") time.Sleep(time.Second * 5) } attempt++ ip := robotIP if robotName == "" && ip == "" { ip = defaultIP } else if ip == "" { var err error ip, err = findRobotIP(robotName) if err != nil { log.Printf("Kan robot '%s' niet vinden\n", robotName) continue } } // Now start the server robotAddr := net.JoinHostPort(ip, strconv.Itoa(robotPort)) shutdownServer := make(chan struct{}) s, err := NewServer(robotAddr, serverPort, shutdownServer) if err != nil { log.Printf("Kan server niet starten: %#v\n", err) continue } log.Printf("Listening on port %d\n", serverPort) go func() { if err := s.ListenAndServe(); err != nil { log.Fatalf("Kan de server niet starten: %#v\n", err) } }() <-shutdownServer log.Printf("Shutting down server...\n") if err := s.Shutdown(); err != nil { log.Printf("Failed to shutdown server: %#v\n", err) } } }
package mysqldb import "strings" const ( // maxQuerySize 最大正整数 maxQuerySize = 2147483647 ) // sqlEscape 转移 SQL 语句 func sqlEscape(s string) string { s = strings.Replace(s, "%", "\\%", -1) s = strings.Replace(s, "_", "\\_", -1) s = strings.Replace(s, "\\", "\\\\", -1) return s }
package linkedlists func deletedups(l *ListNode) *ListNode { vals := make(map[int]bool) var prev *ListNode cur := l for cur != nil { val := cur.value if _, ok := vals[val]; ok { prev.next = cur.next cur = cur.next } else { vals[val] = true prev = cur cur = cur.next } } return l } func checkExists(l *ListNode, v int) bool { cur := l for cur != nil { if cur.value == v { return true } cur = cur.next } return false } func nobufferdups(l *ListNode) *ListNode { var prev *ListNode cur := l head := l for cur != nil { if checkExists(cur.next, cur.value) { // The value exists so delete the node // Two case first is prev is null i.e. deleting head if prev == nil { cur = cur.next head = cur } else { prev.next = cur.next cur = cur.next } continue } prev = cur cur = cur.next } return head }
// +build integration package main // To run locally on OS X // $ docker-machine create -d virtualbox testing // $ eval $(docker-machine env testing) // $ export DOCKER_IP=$(docker-machine ip testing) // $ export CONSUL_IP=$(docker-machine ip testing) // $ docker rm -f docker-flow-proxy // $ docker run --rm -v $PWD:/usr/src/myapp -w /usr/src/myapp -v $GOPATH:/go golang:1.6 go build -v -o docker-flow-proxy // $ docker build -t vfarcic/docker-flow-proxy . // $ go test --tags integration // $ docker-machine rm -f testing import ( "fmt" "github.com/stretchr/testify/suite" "net/http" "os" "os/exec" "strings" "testing" ) type IntegrationTestSuite struct { suite.Suite } func (s *IntegrationTestSuite) SetupTest() { } // Integration func (s IntegrationTestSuite) Test_Reconfigure_SingleInstance() { s.reconfigure("", "/v1/test") s.verifyReconfigure(1) } func (s IntegrationTestSuite) Test_Reconfigure_MultipleInstances() { if ok := s.runCmd( "docker-compose", "-p", "test-service", "-f", "docker-compose-test.yml", "scale", "app=3", ); !ok { s.Fail("Failed to scale the service") } s.reconfigure("", "/v1/test") s.verifyReconfigure(1) } func (s IntegrationTestSuite) Test_Reconfigure_PathReg() { if ok := s.runCmd( "docker-compose", "-p", "test-service", "-f", "docker-compose-test.yml", "scale", "app=3", ); !ok { s.Fail("Failed to scale the service") } s.reconfigure("path_reg", "/.*/test") s.verifyReconfigure(1) } func (s IntegrationTestSuite) Test_Reconfigure_MultiplePaths() { s.reconfigure("", "/v1/test", "/v2/test") s.verifyReconfigure(2) } func (s IntegrationTestSuite) Test_Remove() { s.reconfigure("", "/v1/test") s.verifyReconfigure(1) _, err := http.Get(fmt.Sprintf( "http://%s:8080/v1/docker-flow-proxy/remove?serviceName=test-service", os.Getenv("DOCKER_IP"), )) s.NoError(err) url := fmt.Sprintf("http://%s/v1/test", os.Getenv("DOCKER_IP")) fmt.Println(url) resp, err := http.Get(url) s.NoError(err) s.NotEqual(200, resp.StatusCode) } // Util func (s IntegrationTestSuite) verifyReconfigure(version int) { url := fmt.Sprintf("http://%s/v%d/test", os.Getenv("DOCKER_IP"), version) resp, err := http.Get(url) s.NoError(err) s.Equal(200, resp.StatusCode) } func (s IntegrationTestSuite) reconfigure(pathType string, paths ...string) { _, err := http.Get(fmt.Sprintf( "http://%s:8080/v1/docker-flow-proxy/reconfigure?serviceName=test-service&servicePath=%s&pathType=%s", os.Getenv("DOCKER_IP"), strings.Join(paths, ","), pathType, )) s.NoError(err) } func (s IntegrationTestSuite) runCmd(command string, args ...string) bool { cmd := exec.Command(command, args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { fmt.Printf("%s %s\n%s\n", command, strings.Join(args, " "), err.Error()) return false } return true } // Suite func TestIntegrationTestSuite(t *testing.T) { s := new(IntegrationTestSuite) if len(os.Getenv("DOCKER_IP")) == 0 { os.Setenv("DOCKER_IP", "localhost") } if len(os.Getenv("CONSUL_IP")) == 0 { os.Setenv("CONSUL_IP", "localhost") } ok := s.runCmd("docker-compose", "up", "-d", "consul", "proxy", "registrator") if !ok { s.FailNow("Could not run consul, proxy, and registrator") } ok = s.runCmd("docker-compose", "-p", "test-service", "-f", "docker-compose-test.yml", "up", "-d") if !ok { s.FailNow("Could not run the test service") } suite.Run(t, s) }
package main import ( "bytes" "flag" "fmt" "go/scanner" "image" "io" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" ) var ( doWrite = flag.Bool("w", false, "doWrite result to (source) file instead of stdout") doDiff = flag.Bool("d", false, "display diffs instead of rewriting files") whiteNoise = regexp.MustCompile("[ \t]*\n") exitCode = 0 ) func report(err error) { scanner.PrintError(os.Stderr, err) exitCode = 2 } func parseFlags() []string { flag.Parse() return flag.Args() } func usage() { _, _ = fmt.Fprintf(os.Stderr, "usage: gci [flags] [path ...]\n") flag.PrintDefaults() os.Exit(2) } func diff(b1, b2 []byte, filename string) (data []byte, err error) { f1, err := writeTempFile("", "gci", b1) if err != nil { return } defer os.Remove(f1) f2, err := writeTempFile("", "gci", b2) if err != nil { return } defer os.Remove(f2) cmd := "diff" data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() if len(data) > 0 { // diff exits with a non-zero status when the files don't match. // Ignore that failure as long as we get output. return replaceTempFilename(data, filename) } return } func writeTempFile(dir, prefix string, data []byte) (string, error) { file, err := ioutil.TempFile(dir, prefix) if err != nil { return "", err } _, err = file.Write(data) if err1 := file.Close(); err == nil { err = err1 } if err != nil { os.Remove(file.Name()) return "", err } return file.Name(), nil } // replaceTempFilename replaces temporary filenames in diff with actual one. // // --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 // +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 // ... // -> // --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 // +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 // ... func replaceTempFilename(diff []byte, filename string) ([]byte, error) { bs := bytes.SplitN(diff, []byte{'\n'}, 3) if len(bs) < 3 { return nil, fmt.Errorf("got unexpected diff for %s", filename) } // Preserve timestamps. var t0, t1 []byte if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { t0 = bs[0][i:] } if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { t1 = bs[1][i:] } // Always print filepath with slash separator. f := filepath.ToSlash(filename) bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) return bytes.Join(bs, []byte{'\n'}), nil } func processFile(filename string, out io.Writer) error { var err error f, err := os.Open(filename) if err != nil { return err } defer f.Close() if _, _, err := image.Decode(f); err == nil { fmt.Printf("skip image file %s\n", filename) return nil } src, err := ioutil.ReadAll(f) if err != nil { return err } ret := whiteNoise.ReplaceAll(src, []byte{'\n'}) if !bytes.Equal(src, ret) { exitCode = 1 if *doWrite { // On Windows, we need to re-set the permissions from the file. See golang/go#38225. var perms os.FileMode if fi, err := os.Stat(filename); err == nil { perms = fi.Mode() & os.ModePerm } err = ioutil.WriteFile(filename, ret, perms) if err != nil { return err } } if *doDiff { data, err := diff(src, ret, filename) if err != nil { return fmt.Errorf("failed to diff: %v", err) } fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) if _, err := out.Write(data); err != nil { return fmt.Errorf("failed to doWrite: %v", err) } } } if !*doWrite && !*doDiff { if _, err = out.Write(ret); err != nil { return fmt.Errorf("failed to doWrite: %v", err) } } return err } func visitFile(path string, f os.FileInfo, err error) error { if err == nil { err = processFile(path, os.Stdout) } if err != nil { report(err) } return nil } func walkDir(path string) { _ = filepath.Walk(path, visitFile) } func main() { flag.Usage = usage paths := parseFlags() for _, path := range paths { switch dir, err := os.Stat(path); { case err != nil: report(err) case dir.IsDir(): walkDir(path) default: if err := processFile(path, os.Stdout); err != nil { report(err) } } } os.Exit(exitCode) }
/* @Time : 2019/4/15 17:53 @Author : yanKoo @File : message_test @Software: GoLand @Description: */ package msg import ( pb "api/talk_cloud" "log" "server/common/src/db" "testing" ) func testAddMsg(t *testing.T) { /*if err := AddMsg(&pb.ImMsgReqData{ Id:333, ReceiverType:0, ReceiverId:334, ResourcePath:"HELLO WORLD", MsgType: 0, }, db.DBHandler); err != nil { t.Logf("Add Msg error: %v", err) }*/ if err := AddMsg(&pb.ImMsgReqData{ Id: 333, ReceiverType: 1, //group ReceiverId: 1, ResourcePath: "http://www.baidu.com/1.jpg", MsgType: 1, }, db.DBHandler); err != nil { t.Logf("Add Msg error: %v", err) } } func testAddMultiMsg(t *testing.T) { if err := AddMultiMsg(&pb.ImMsgReqData{ Id: 333, ReceiverType: 2, //group ReceiverId: 1, ResourcePath: "http://www.baidu.com/1.jpg", MsgType: 1, }, []int32{335, 336, 337}, db.DBHandler); err != nil { t.Logf("Add Msg error: %v", err) } } func testGetMsg(t *testing.T) { if res, err := GetMsg(1503, int32(1), db.DBHandler); err != nil { log.Println("Test Get offline msg error") } else { log.Printf("Offline msg: %v", res) } }
package ibmcloud import ( "context" "fmt" "net/http" "os" "strings" "sync" "time" "github.com/IBM/go-sdk-core/v5/core" "github.com/IBM/networking-go-sdk/dnsrecordsv1" "github.com/IBM/networking-go-sdk/dnssvcsv1" "github.com/IBM/networking-go-sdk/dnszonesv1" "github.com/IBM/networking-go-sdk/zonesv1" "github.com/IBM/platform-services-go-sdk/iampolicymanagementv1" "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" "github.com/IBM/platform-services-go-sdk/resourcemanagerv2" "github.com/IBM/vpc-go-sdk/vpcv1" "github.com/pkg/errors" "github.com/sirupsen/logrus" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" icibmcloud "github.com/openshift/installer/pkg/asset/installconfig/ibmcloud" "github.com/openshift/installer/pkg/destroy/providers" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/version" ) var ( defaultTimeout = 2 * time.Minute ) // ClusterUninstaller holds the various options for the cluster we want to delete type ClusterUninstaller struct { ClusterName string Context context.Context Logger logrus.FieldLogger InfraID string AccountID string BaseDomain string CISInstanceCRN string DNSInstanceID string Region string ResourceGroupName string UserProvidedSubnets []string UserProvidedVPC string managementSvc *resourcemanagerv2.ResourceManagerV2 controllerSvc *resourcecontrollerv2.ResourceControllerV2 vpcSvc *vpcv1.VpcV1 iamPolicyManagementSvc *iampolicymanagementv1.IamPolicyManagementV1 zonesSvc *zonesv1.ZonesV1 dnsZonesSvc *dnszonesv1.DnsZonesV1 dnsServicesSvc *dnssvcsv1.DnsSvcsV1 dnsRecordsSvc *dnsrecordsv1.DnsRecordsV1 maxRetryAttempt int resourceGroupID string cosInstanceID string zoneID string errorTracker pendingItemTracker } // New returns an IBMCloud destroyer from ClusterMetadata. func New(logger logrus.FieldLogger, metadata *types.ClusterMetadata) (providers.Destroyer, error) { return &ClusterUninstaller{ ClusterName: metadata.ClusterName, Context: context.Background(), Logger: logger, InfraID: metadata.InfraID, AccountID: metadata.ClusterPlatformMetadata.IBMCloud.AccountID, BaseDomain: metadata.ClusterPlatformMetadata.IBMCloud.BaseDomain, CISInstanceCRN: metadata.ClusterPlatformMetadata.IBMCloud.CISInstanceCRN, DNSInstanceID: metadata.ClusterPlatformMetadata.IBMCloud.DNSInstanceID, Region: metadata.ClusterPlatformMetadata.IBMCloud.Region, ResourceGroupName: metadata.ClusterPlatformMetadata.IBMCloud.ResourceGroupName, UserProvidedSubnets: metadata.ClusterPlatformMetadata.IBMCloud.Subnets, UserProvidedVPC: metadata.ClusterPlatformMetadata.IBMCloud.VPC, pendingItemTracker: newPendingItemTracker(), maxRetryAttempt: 30, }, nil } // Retry ... func (o *ClusterUninstaller) Retry(funcToRetry func() (error, bool)) error { var err error var stopRetry bool retryGap := 10 for i := 0; i < o.maxRetryAttempt; i++ { if i > 0 { time.Sleep(time.Duration(retryGap) * time.Second) } // Call function which required retry, retry is decided by function itself err, stopRetry = funcToRetry() if stopRetry { break } if (i + 1) < o.maxRetryAttempt { o.Logger.Infof("UNEXPECTED RESULT, Re-attempting execution .., attempt=%d, retry-gap=%d, max-retry-Attempts=%d, stopRetry=%t, error=%v", i+1, retryGap, o.maxRetryAttempt, stopRetry, err) } } return err } // Run is the entrypoint to start the uninstall process func (o *ClusterUninstaller) Run() (*types.ClusterQuota, error) { err := o.loadSDKServices() if err != nil { return nil, err } err = o.destroyCluster() if err != nil { return nil, errors.Wrap(err, "failed to destroy cluster") } return nil, nil } func (o *ClusterUninstaller) destroyCluster() error { stagedFuncs := [][]struct { name string execute func() error }{{ {name: "Stop instances", execute: o.stopInstances}, }, { // Instances must occur before LB cleanup {name: "Instances", execute: o.destroyInstances}, {name: "Disks", execute: o.destroyDisks}, }, { // LB's must occur before Subnet cleanup {name: "Load Balancers", execute: o.destroyLoadBalancers}, }, { {name: "Subnets", execute: o.destroySubnets}, }, { // Public Gateways must occur before FIP's cleanup // Security Groups must occur before VPC cleanup {name: "Images", execute: o.destroyImages}, {name: "Public Gateways", execute: o.destroyPublicGateways}, {name: "Security Groups", execute: o.destroySecurityGroups}, }, { {name: "Floating IPs", execute: o.destroyFloatingIPs}, }, { {name: "Dedicated Hosts", execute: o.destroyDedicatedHosts}, {name: "VPCs", execute: o.destroyVPCs}, }, { // IAM must occur before COS cleanup {name: "IAM Authorizations", execute: o.destroyIAMAuthorizations}, }, { // COS must occur before RG cleanup {name: "Cloud Object Storage Instances", execute: o.destroyCOSInstances}, {name: "Dedicated Host Groups", execute: o.destroyDedicatedHostGroups}, }, { {name: "DNS Records", execute: o.destroyDNSRecords}, {name: "Resource Groups", execute: o.destroyResourceGroups}, }} for _, stage := range stagedFuncs { var wg sync.WaitGroup errCh := make(chan error) wgDone := make(chan bool) for _, f := range stage { wg.Add(1) go o.executeStageFunction(f, errCh, &wg) } go func() { wg.Wait() close(wgDone) }() select { case <-wgDone: // On to the next stage continue case err := <-errCh: return err } } return nil } func (o *ClusterUninstaller) executeStageFunction(f struct { name string execute func() error }, errCh chan error, wg *sync.WaitGroup) error { defer wg.Done() err := wait.PollImmediateInfinite( time.Second*10, func() (bool, error) { ferr := f.execute() if ferr != nil { o.Logger.Debugf("%s: %v", f.name, ferr) return false, nil } return true, nil }, ) if err != nil { errCh <- err } return nil } func (o *ClusterUninstaller) loadSDKServices() error { apiKey := os.Getenv("IC_API_KEY") userAgentString := fmt.Sprintf("OpenShift/4.x Destroyer/%s", version.Raw) // ResourceManagerV2 rmAuthenticator, err := icibmcloud.NewIamAuthenticator(apiKey) if err != nil { return err } o.managementSvc, err = resourcemanagerv2.NewResourceManagerV2(&resourcemanagerv2.ResourceManagerV2Options{ Authenticator: rmAuthenticator, }) if err != nil { return err } o.managementSvc.Service.SetUserAgent(userAgentString) // Attempt to retrieve the ResourceGroupID as soon as possible to validate ResourceGroupName _, err = o.ResourceGroupID() if err != nil { return err } // ResourceControllerV2 rcAuthenticator, err := icibmcloud.NewIamAuthenticator(apiKey) if err != nil { return err } o.controllerSvc, err = resourcecontrollerv2.NewResourceControllerV2(&resourcecontrollerv2.ResourceControllerV2Options{ Authenticator: rcAuthenticator, }) if err != nil { return err } o.controllerSvc.Service.SetUserAgent(userAgentString) // IamPolicyManagementV1 ipmAuthenticator, err := icibmcloud.NewIamAuthenticator(apiKey) if err != nil { return err } o.iamPolicyManagementSvc, err = iampolicymanagementv1.NewIamPolicyManagementV1(&iampolicymanagementv1.IamPolicyManagementV1Options{ Authenticator: ipmAuthenticator, }) if err != nil { return err } o.iamPolicyManagementSvc.Service.SetUserAgent(userAgentString) if len(o.CISInstanceCRN) > 0 { // ZonesV1 zAuthenticator, err := icibmcloud.NewIamAuthenticator(apiKey) if err != nil { return err } o.zonesSvc, err = zonesv1.NewZonesV1(&zonesv1.ZonesV1Options{ Authenticator: zAuthenticator, Crn: core.StringPtr(o.CISInstanceCRN), }) if err != nil { return err } o.zonesSvc.Service.SetUserAgent(userAgentString) // Get the Zone ID options := o.zonesSvc.NewListZonesOptions() resources, _, err := o.zonesSvc.ListZonesWithContext(o.Context, options) if err != nil { return err } zoneID := "" for _, zone := range resources.Result { if strings.Contains(o.BaseDomain, *zone.Name) { zoneID = *zone.ID } } if zoneID == "" { return errors.Errorf("Could not determine CIS DNS zone ID from base domain %q", o.BaseDomain) } // DnsRecordsV1 dnsAuthenticator, err := icibmcloud.NewIamAuthenticator(apiKey) if err != nil { return err } o.dnsRecordsSvc, err = dnsrecordsv1.NewDnsRecordsV1(&dnsrecordsv1.DnsRecordsV1Options{ Authenticator: dnsAuthenticator, Crn: core.StringPtr(o.CISInstanceCRN), ZoneIdentifier: core.StringPtr(zoneID), }) if err != nil { return err } o.dnsRecordsSvc.Service.SetUserAgent(userAgentString) } else if len(o.DNSInstanceID) > 0 { // DnsSvcsV1 dnsAuthenticator, err := icibmcloud.NewIamAuthenticator(apiKey) if err != nil { return err } o.dnsServicesSvc, err = dnssvcsv1.NewDnsSvcsV1(&dnssvcsv1.DnsSvcsV1Options{ Authenticator: dnsAuthenticator, }) if err != nil { return err } o.dnsServicesSvc.Service.SetUserAgent(userAgentString) // Get the Zone ID dzOptions := o.dnsServicesSvc.NewListDnszonesOptions(o.DNSInstanceID) dzResult, _, err := o.dnsServicesSvc.ListDnszonesWithContext(o.Context, dzOptions) if err != nil { return err } zoneID := "" for _, zone := range dzResult.Dnszones { if o.BaseDomain == *zone.Name { zoneID = *zone.ID break } } if zoneID == "" { return errors.Errorf("Could not determine DNS Services DNS zone ID from base domain %q", o.BaseDomain) } o.Logger.Debugf("Found DNS Services DNS zone ID for base domain %q: %s", o.BaseDomain, zoneID) o.zoneID = zoneID } // VpcV1 vpcAuthenticator, err := icibmcloud.NewIamAuthenticator(apiKey) if err != nil { return err } o.vpcSvc, err = vpcv1.NewVpcV1(&vpcv1.VpcV1Options{ Authenticator: vpcAuthenticator, }) if err != nil { return err } o.vpcSvc.Service.SetUserAgent(userAgentString) region, _, err := o.vpcSvc.GetRegion(o.vpcSvc.NewGetRegionOptions(o.Region)) if err != nil { return err } err = o.vpcSvc.SetServiceURL(fmt.Sprintf("%s/v1", *region.Endpoint)) if err != nil { return err } return nil } func (o *ClusterUninstaller) contextWithTimeout() (context.Context, context.CancelFunc) { return context.WithTimeout(o.Context, defaultTimeout) } // ResourceGroupID returns the ID of the resource group using its name func (o *ClusterUninstaller) ResourceGroupID() (string, error) { if o.resourceGroupID != "" { return o.resourceGroupID, nil } // If no ResourceGroupName is available, raise an error if o.ResourceGroupName == "" { return "", errors.Errorf("No ResourceGroupName provided") } ctx, cancel := o.contextWithTimeout() defer cancel() options := o.managementSvc.NewListResourceGroupsOptions() options.SetAccountID(o.AccountID) options.SetName(o.ResourceGroupName) resources, _, err := o.managementSvc.ListResourceGroupsWithContext(ctx, options) if err != nil { return "", err } if len(resources.Resources) == 0 { return "", errors.Errorf("ResourceGroup '%q' not found", o.ResourceGroupName) } else if len(resources.Resources) > 1 { return "", errors.Errorf("Too many resource groups matched name %q", o.ResourceGroupName) } o.SetResourceGroupID(*resources.Resources[0].ID) return o.resourceGroupID, nil } // SetResourceGroupID sets the resource group ID func (o *ClusterUninstaller) SetResourceGroupID(id string) { o.resourceGroupID = id } type ibmError struct { Status int Message string } func isNoOp(err *ibmError) bool { if err == nil { return false } return err.Status == http.StatusNotFound } // aggregateError is a utility function that takes a slice of errors and an // optional pending argument, and returns an error or nil func aggregateError(errs []error, pending ...int) error { err := utilerrors.NewAggregate(errs) if err != nil { return err } if len(pending) > 0 && pending[0] > 0 { return errors.Errorf("%d items pending", pending[0]) } return nil } // pendingItemTracker tracks a set of pending item names for a given type of resource type pendingItemTracker struct { pendingItems map[string]cloudResources } func newPendingItemTracker() pendingItemTracker { return pendingItemTracker{ pendingItems: map[string]cloudResources{}, } } // GetAllPendintItems returns a slice of all of the pending items across all types. func (t pendingItemTracker) GetAllPendingItems() []cloudResource { var items []cloudResource for _, is := range t.pendingItems { for _, i := range is { items = append(items, i) } } return items } // getPendingItems returns the list of resources to be deleted. func (t pendingItemTracker) getPendingItems(itemType string) []cloudResource { lastFound, exists := t.pendingItems[itemType] if !exists { lastFound = cloudResources{} } return lastFound.list() } // insertPendingItems adds to the list of resources to be deleted. func (t pendingItemTracker) insertPendingItems(itemType string, items []cloudResource) []cloudResource { lastFound, exists := t.pendingItems[itemType] if !exists { lastFound = cloudResources{} } lastFound = lastFound.insert(items...) t.pendingItems[itemType] = lastFound return lastFound.list() } // deletePendingItems removes from the list of resources to be deleted. func (t pendingItemTracker) deletePendingItems(itemType string, items []cloudResource) []cloudResource { lastFound, exists := t.pendingItems[itemType] if !exists { lastFound = cloudResources{} } lastFound = lastFound.delete(items...) t.pendingItems[itemType] = lastFound return lastFound.list() } func isErrorStatus(code int64) bool { return code != 0 && (code < 200 || code >= 300) } func (o *ClusterUninstaller) clusterLabelFilter() string { return fmt.Sprintf("kubernetes-io-cluster-%s:owned", o.InfraID) }
package main import ( "fmt" "strconv" ) func main() { inputString := "there are some (12) digits 5566 in this 770 string 239" var numStrArray []string total := 0 strNum := "" for _, x := range inputString { _, err := strconv.Atoi(string(x)) if err != nil && strNum != "" { numStrArray = append(numStrArray, strNum) num, _ := strconv.Atoi(strNum) total += num strNum = "" continue } else if err != nil { continue } else { strNum += string(x) } } if strNum != "" { num, _ := strconv.Atoi(strNum) total += num } fmt.Println(strNum) fmt.Println(numStrArray) fmt.Println(total) }
package gorpc import ( "bufio" "compress/flate" "encoding/gob" "fmt" "io" "net" "runtime" "sync" "sync/atomic" "time" ) // Server handler function. // // clientAddr contains client address returned by net.TCPConn.RemoteAddr(). // Request and response types may be arbitrary. // All the request types the client may send to the server must be registered // with gorpc.RegisterType() before starting the server. // There is no need in registering base Go types such as int, string, bool, // float64, etc. or arrays, slices and maps containing base Go types. type HandlerFunc func(clientAddr string, request interface{}) (response interface{}) // Rpc server. // // Default server settings are optimized for high load, so don't override // them without valid reason. type Server struct { // TCP address to listen to for incoming connections. Addr string // Handler function for incoming requests. // // Server calls this function for each incoming request. // The function must process the request and return the corresponding response. Handler HandlerFunc // The maximum number of pending responses in the queue. // Default is 32768. PendingResponses int // Size of send buffer per each TCP connection in bytes. // Default is 1M. SendBufferSize int // Size of recv buffer per each TCP connection in bytes. // Default is 1M. RecvBufferSize int // Connection statistics. // // The stats doesn't reset automatically. Feel free resetting it // any time you wish. Stats ConnStats serverStopChan chan struct{} stopWg sync.WaitGroup } // Starts rpc server. // // All the request types the client may send to the server must be registered // with gorpc.RegisterType() before starting the server. // There is no need in registering base Go types such as int, string, bool, // float64, etc. or arrays, slices and maps containing base Go types. func (s *Server) Start() error { if s.Handler == nil { panic("gorpc.Server: Server.Handler cannot be nil") } if s.serverStopChan != nil { panic("gorpc.Server: server is already running. Stop it before starting it again") } s.serverStopChan = make(chan struct{}) if s.PendingResponses <= 0 { s.PendingResponses = 32768 } if s.SendBufferSize <= 0 { s.SendBufferSize = 1024 * 1024 } if s.RecvBufferSize <= 0 { s.RecvBufferSize = 1024 * 1024 } ln, err := net.Listen("tcp", s.Addr) if err != nil { err := fmt.Errorf("gorpc.Server: [%s]. Cannot listen to: [%s]", s.Addr, err) logError("%s", err) return err } s.stopWg.Add(1) go serverHandler(s, ln) return nil } // Stops rpc server. Stopped server can be started again. func (s *Server) Stop() { close(s.serverStopChan) s.stopWg.Wait() s.serverStopChan = nil } // Starts rpc server and blocks until it is stopped. func (s *Server) Serve() error { if err := s.Start(); err != nil { return err } s.stopWg.Wait() return nil } func serverHandler(s *Server, ln net.Listener) { defer s.stopWg.Done() var conn net.Conn var err error for { acceptChan := make(chan struct{}) go func() { if conn, err = ln.Accept(); err != nil { logError("gorpc.Server: [%s]. Cannot accept new connection: [%s]", s.Addr, err) time.Sleep(time.Second) } close(acceptChan) }() select { case <-s.serverStopChan: ln.Close() return case <-acceptChan: atomic.AddUint64(&s.Stats.AcceptCalls, 1) } if err != nil { atomic.AddUint64(&s.Stats.AcceptErrors, 1) continue } if err = setupKeepalive(conn); err != nil { logError("gorpc.Server: [%s]. Cannot setup keepalive: [%s]", s.Addr, err) } s.stopWg.Add(1) go serverHandleConnection(s, conn) } } func setupKeepalive(conn net.Conn) error { tcpConn := conn.(*net.TCPConn) if err := tcpConn.SetKeepAlive(true); err != nil { return err } if err := tcpConn.SetKeepAlivePeriod(30 * time.Second); err != nil { return err } return nil } func serverHandleConnection(s *Server, conn net.Conn) { defer s.stopWg.Done() var enabledCompression bool var err error zChan := make(chan bool, 1) go func() { var buf [1]byte if _, err = conn.Read(buf[:]); err != nil { logError("gorpc.Server: [%s]. Error when reading handshake from client: [%s]", s.Addr, err) } zChan <- (buf[0] != 0) }() select { case enabledCompression = <-zChan: if err != nil { conn.Close() return } case <-s.serverStopChan: conn.Close() return case <-time.After(10 * time.Second): logError("gorpc.Server: [%s]. Cannot obtain handshake from client during 10s", s.Addr) conn.Close() return } responsesChan := make(chan *serverMessage, s.PendingResponses) stopChan := make(chan struct{}) readerDone := make(chan struct{}) clientAddr := conn.RemoteAddr().String() go serverReader(s, conn, clientAddr, responsesChan, stopChan, readerDone, enabledCompression) writerDone := make(chan struct{}) go serverWriter(s, conn, clientAddr, responsesChan, stopChan, writerDone, enabledCompression) select { case <-readerDone: close(stopChan) conn.Close() <-writerDone case <-writerDone: close(stopChan) conn.Close() <-readerDone case <-s.serverStopChan: close(stopChan) conn.Close() <-readerDone <-writerDone } } type serverMessage struct { ID uint64 Request interface{} Response interface{} ClientAddr string } func serverReader(s *Server, r io.Reader, clientAddr string, responsesChan chan<- *serverMessage, stopChan <-chan struct{}, done chan<- struct{}, enabledCompression bool) { defer func() { close(done) }() r = newReaderCounter(r, &s.Stats) br := bufio.NewReaderSize(r, s.RecvBufferSize) rr := br if enabledCompression { zr := flate.NewReader(br) defer zr.Close() rr = bufio.NewReaderSize(zr, s.RecvBufferSize) } d := gob.NewDecoder(rr) for { var m wireMessage if err := d.Decode(&m); err != nil { logError("gorpc.Server: [%s]->[%s]. Cannot decode request: [%s]", clientAddr, s.Addr, err) return } rpcM := &serverMessage{ ID: m.ID, Request: m.Data, ClientAddr: clientAddr, } go serveRequest(s, responsesChan, stopChan, rpcM) } } func serveRequest(s *Server, responsesChan chan<- *serverMessage, stopChan <-chan struct{}, m *serverMessage) { defer func() { select { case <-stopChan: case responsesChan <- m: } if x := recover(); x != nil { logError("gorpc.Server: [%s]->[%s]. Panic occured: %v", m.ClientAddr, s.Addr, x) stackTrace := make([]byte, 1<<20) n := runtime.Stack(stackTrace, false) logError("gorpc.Server: [%s]->[%s]. Stack trace: %s", m.ClientAddr, s.Addr, stackTrace[:n]) } }() m.Response = s.Handler(m.ClientAddr, m.Request) } func serverWriter(s *Server, w io.Writer, clientAddr string, responsesChan <-chan *serverMessage, stopChan <-chan struct{}, done chan<- struct{}, enabledCompression bool) { defer func() { close(done) }() w = newWriterCounter(w, &s.Stats) bw := bufio.NewWriterSize(w, s.SendBufferSize) ww := bw var zw *flate.Writer if enabledCompression { zw, _ = flate.NewWriter(bw, flate.BestSpeed) defer zw.Close() ww = bufio.NewWriterSize(zw, s.SendBufferSize) } e := gob.NewEncoder(ww) for { var rpcM *serverMessage select { case <-stopChan: return default: } select { case rpcM = <-responsesChan: default: if enabledCompression { if err := ww.Flush(); err != nil { logError("gorpc.Server: [%s]->[%s]. Cannot flush data to compressed stream: [%s]", clientAddr, s.Addr, err) return } if err := zw.Flush(); err != nil { logError("gorpc.Server: [%s]->[%s]. Cannot flush compressed data to wire: [%s]", clientAddr, s.Addr, err) return } } if err := bw.Flush(); err != nil { logError("gorpc.Server: [%s]->[%s]. Cannot flush responses to wire: [%s]", clientAddr, s.Addr, err) return } select { case <-stopChan: return case rpcM = <-responsesChan: } } m := wireMessage{ ID: rpcM.ID, Data: rpcM.Response, } if err := e.Encode(&m); err != nil { logError("gorpc.Server: [%s]->[%s]. Cannot send response to wire: [%s]", clientAddr, s.Addr, err) return } } }
package main func BeforeFork() func AfterFork() func main() { }
package query import ( "github.com/juju/errgo" // "github.com/mezis/klask/index" ) // A comparison filter (less than, greater than, or both) type query_filter_between_t struct { name string less_than interface{} greater_than interface{} } func (self *query_filter_between_t) parse(name string, parsed map[string]interface{}) error { self.name = name // parse for key, val := range parsed { switch key { case "$gt": self.greater_than = val case "$lt": self.less_than = val default: return errgo.Newf("unexpected key '%s' for range filter in '%v'", key, parsed) } } // we don't check values, field names, or operator/operand compatibility at this point; // it will be done lazily when applying the filter return nil } func (self *query_filter_between_t) Run(records string, ctx Context) (string, error) { return "", errgo.New("not implemented") }
package common import ( "fmt" "net/http" "github.com/GoAdminGroup/go-admin/modules/config" "github.com/GoAdminGroup/go-admin/modules/language" "github.com/gavv/httpexpect" ) func operationLogTest(e *httpexpect.Expect, sesID *http.Cookie) { fmt.Println() printlnWithColor("Operation Log", "blue") fmt.Println("============================") // show printlnWithColor("show", "green") e.GET(config.Url("/info/op")). WithCookie(sesID.Name, sesID.Value). Expect(). Status(200). Body().Contains(language.Get("operation log")) }
package main import ( "bytes" "context" "crypto/tls" "encoding/base64" "flag" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httputil" "net/url" "strconv" "time" "github.com/mattn/go-colorable" "github.com/natefinch/lumberjack" "github.com/rs/zerolog" "github.com/rs/zerolog/hlog" "github.com/rs/zerolog/log" "github.com/zerosnake0/autoproxy" "golang.org/x/net/http/httpproxy" "gopkg.in/yaml.v2" ) var config struct { Port int `yaml:"port"` Auth struct { Force bool `yaml:"force"` Smart string `yaml:"smart"` Direct string `yaml:"direct"` } TLS struct { Enabled bool `yaml:"enabled"` Cert string `yaml:"cert"` Key string `yaml:"key"` } `yaml:"tls"` Log struct { Format string `yaml:"format"` Path string `yaml:"path"` } Proxy struct { HTTP string `yaml:"http"` HTTPS string `yaml:"https"` } `yaml:"proxy"` AutoProxy struct { Enabled bool `yaml:"enabled"` SortDuration string `yaml:"sortDuration"` Files []string `yaml:"files"` } `yaml:"autoproxy"` Connect struct { Enabled bool `yaml:"enabled"` SortDuration string `yaml:"sortDuration"` Files []string `yaml:"files"` } `yaml:"connect"` } type userKey struct{} var ( ap *autoproxy.AutoProxy authSmart string authDirect string proxyConfigFunc func(*url.URL) (*url.URL, error) roundTripper http.RoundTripper connectProxyFunc func(req *http.Request) (*url.URL, error) ) func loadConfig(configFile string) error { b, err := ioutil.ReadFile(configFile) if err != nil { return err } return yaml.Unmarshal(b, &config) } func loadAutoProxy(sortDuration string, files []string) (*autoproxy.AutoProxy, error) { sd, err := time.ParseDuration(sortDuration) if err != nil { return nil, err } proxy := autoproxy.New(&autoproxy.Option{ SortPeriod: sd, }) for _, fileName := range files { b, err := ioutil.ReadFile(fileName) if err != nil { return nil, fmt.Errorf("unable to read autoproxy from file %q: %s", fileName, err) } err = proxy.Read(bytes.NewReader(b)) if err != nil { return nil, fmt.Errorf("unable to read autoproxy from file %q: %s", fileName, err) } } return proxy, nil } func proxyFunc(ap *autoproxy.AutoProxy) func(req *http.Request) (*url.URL, error) { return func(req *http.Request) (*url.URL, error) { if ap != nil { user := req.Context().Value(userKey{}).(string) if user == "smart" { match := ap.Match(req.URL) hlog.FromRequest(req).Info().Bool("match", match).Msg("proxy") if !match { return nil, nil } } } return proxyConfigFunc(req.URL) } } func setupAutoProxy() { if config.AutoProxy.Enabled { var err error ap, err = loadAutoProxy(config.AutoProxy.SortDuration, config.AutoProxy.Files) if err != nil { log.Fatal().Err(err).Msg("unable to load autoproxy") } } roundTripper = &http.Transport{ Proxy: proxyFunc(ap), DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }).DialContext, ForceAttemptHTTP2: true, MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, } } func formatAuthString(user, pwd string) string { auth := user + ":" + pwd return "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) } func setupAuthString() { authSmart = formatAuthString("smart", config.Auth.Smart) authDirect = formatAuthString("direct", config.Auth.Direct) } func setupProxy() { proxyConfig := httpproxy.Config{ HTTPProxy: config.Proxy.HTTP, HTTPSProxy: config.Proxy.HTTPS, } proxyConfigFunc = proxyConfig.ProxyFunc() } func setupConnectProxy() { var ( connectAP *autoproxy.AutoProxy err error ) if config.Connect.Enabled { connectAP, err = loadAutoProxy(config.Connect.SortDuration, config.Connect.Files) if err != nil { log.Fatal().Err(err).Msg("unable to load connect autoproxy") } } connectProxyFunc = proxyFunc(connectAP) } func init() { consoleWriter := zerolog.NewConsoleWriter(func(w *zerolog.ConsoleWriter) { w.TimeFormat = time.RFC3339 w.Out = colorable.NewColorableStdout() }) log.Logger = zerolog.New(consoleWriter).With().Timestamp().Logger() var configFile string flag.StringVar(&configFile, "config", "config.yaml", "config yaml file") flag.Parse() if err := loadConfig(configFile); err != nil { log.Fatal().Err(err).Str("file", configFile).Msg("unable to read config file") } if config.Log.Path != "" { var writer io.Writer = &lumberjack.Logger{ Filename: config.Log.Path, MaxSize: 1, MaxBackups: 3, Compress: true, } if config.Log.Format != "json" { writer = zerolog.NewConsoleWriter(func(cw *zerolog.ConsoleWriter) { cw.TimeFormat = time.RFC3339 cw.Out = colorable.NewNonColorable(writer) }) } log.Logger = zerolog.New(writer).With().Timestamp().Logger() } setupAuthString() setupProxy() setupAutoProxy() setupConnectProxy() } func transfer(destination io.WriteCloser, source io.ReadCloser) { defer destination.Close() defer source.Close() io.Copy(destination, source) } func handleConnect(w http.ResponseWriter, r *http.Request) { proxyURL, err := connectProxyFunc(r) if err != nil { hlog.FromRequest(r).Error().Err(err).Msg("unable to get proxy url") http.Error(w, err.Error(), http.StatusServiceUnavailable) return } hlog.FromRequest(r).Info().Bool("match", proxyURL != nil).Msg("proxy") var host string if proxyURL == nil { host = r.URL.Host } else { host = proxyURL.Host } d := net.Dialer{} dstConn, err := d.DialContext(r.Context(), "tcp", host) if err != nil { hlog.FromRequest(r).Error().Err(err).Msg("unable to dial") http.Error(w, err.Error(), http.StatusServiceUnavailable) return } if proxyURL != nil { b, err := httputil.DumpRequest(r, true) if err != nil { hlog.FromRequest(r).Error().Err(err).Msg("unable to dump request") http.Error(w, err.Error(), http.StatusServiceUnavailable) return } if _, err := io.Copy(dstConn, bytes.NewReader(b)); err != nil { hlog.FromRequest(r).Error().Err(err).Msg("unable to send connection") http.Error(w, err.Error(), http.StatusServiceUnavailable) return } } else { w.WriteHeader(http.StatusOK) } hijacker, ok := w.(http.Hijacker) if !ok { log.Error().Msg("Hijacking not supported") http.Error(w, "Hijacking not supported", http.StatusInternalServerError) return } srcConn, _, err := hijacker.Hijack() if err != nil { http.Error(w, err.Error(), http.StatusServiceUnavailable) return } go transfer(dstConn, srcConn) go transfer(srcConn, dstConn) } func handleHTTP(w http.ResponseWriter, req *http.Request) { resp, err := roundTripper.RoundTrip(req) if err != nil { http.Error(w, err.Error(), http.StatusServiceUnavailable) return } defer resp.Body.Close() copyHeader(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) io.Copy(w, resp.Body) } func copyHeader(dst, src http.Header) { for k, vv := range src { for _, v := range vv { dst.Add(k, v) } } } func auth(f http.Handler) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { user := "direct" auth := r.Header.Get("Proxy-Authorization") switch auth { case authSmart: user = "smart" case authDirect: default: if config.Auth.Force { hlog.FromRequest(r).Error().Msg("unauthorized") http.Error(w, "unauthorized", http.StatusUnauthorized) return } } r = r.WithContext(context.WithValue(r.Context(), userKey{}, user)) f.ServeHTTP(w, r) } } func getHandler(f http.HandlerFunc) http.Handler { var h http.Handler = f h = auth(h) h = hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) { hlog.FromRequest(r).Info().Int("status", status).Int("size", size). Dur("duration", duration).Msg("done") })(h) h = hlog.RequestHandler("req")(h) h = hlog.RemoteAddrHandler("ip")(h) h = hlog.UserAgentHandler("user_agent")(h) h = hlog.RequestIDHandler("req_id", "")(h) h = hlog.NewHandler(log.Logger)(h) return h } func main() { server := http.Server{ Addr: ":" + strconv.Itoa(config.Port), Handler: getHandler(func(w http.ResponseWriter, r *http.Request) { if r.Method == http.MethodConnect { handleConnect(w, r) } else { handleHTTP(w, r) } }), // Disable HTTP/2. TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), } log.Info().Msg("serving...") var err error if config.TLS.Enabled { err = server.ListenAndServeTLS(config.TLS.Cert, config.TLS.Key) } else { err = server.ListenAndServe() } log.Fatal().Err(err).Msg("error while serving") }
package basecmds import ( _ "embed" "github.com/Nv7-Github/Nv7Haven/eod/types" "github.com/bwmarrin/discordgo" ) //go:embed help/about.txt var helpAbout string //go:embed help/basics.txt var helpBasics string //go:embed help/advanced.txt var helpAdvanced string //go:embed help/setup.txt var helpSetup string func makeHelpComponents(selected string) discordgo.ActionsRow { return discordgo.ActionsRow{ Components: []discordgo.MessageComponent{ discordgo.SelectMenu{ CustomID: "help-select", Options: []discordgo.SelectMenuOption{ { Label: "About", Value: "about", Description: "Get basic information about the bot!", Default: selected == "about", }, { Label: "Basics", Value: "basics", Description: "Learn the basics about using the bot!", Default: selected == "basics", }, { Label: "Advanced", Value: "advanced", Description: "Learn how to use the advanced features of the bot!", Default: selected == "advanced", }, { Label: "Setup", Value: "setup", Description: "Learn how to set up your own EoD server!", Default: selected == "setup", }, }, }, }, } } type helpComponent struct { b *BaseCmds } func (h *helpComponent) Handler(_ *discordgo.Session, i *discordgo.InteractionCreate) { var txt string val := i.MessageComponentData().Values[0] switch val { case "about": txt = helpAbout case "basics": txt = helpBasics case "advanced": txt = helpAdvanced case "setup": txt = helpSetup default: return } h.b.dg.InteractionRespond(i.Interaction, &discordgo.InteractionResponse{ Type: discordgo.InteractionResponseUpdateMessage, Data: &discordgo.InteractionResponseData{ Content: txt, Components: []discordgo.MessageComponent{makeHelpComponents(val)}, }, }) } func (b *BaseCmds) HelpCmd(m types.Msg, rsp types.Rsp) { rsp.Acknowledge() id := rsp.Message(helpAbout, makeHelpComponents("about")) b.lock.RLock() dat, exists := b.dat[m.GuildID] b.lock.RUnlock() if !exists { return } dat.AddComponentMsg(id, &helpComponent{ b: b, }) b.lock.Lock() b.dat[m.GuildID] = dat b.lock.Unlock() }
package middlewares import ( "net/http" "github.com/opentracing/opentracing-go" "github.com/sirupsen/logrus" "fmt" "github.com/opentracing/opentracing-go/ext" "net" "strconv" "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" "github.com/oshankkumar/GatewayOmega/utils" ) func ZipkinTracing(next http.Handler)http.Handler{ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if utils.IsNoAuthPath(r){ next.ServeHTTP(w,r) return } wireCtx,err := opentracing.GlobalTracer().Extract( opentracing.TextMap, opentracing.HTTPHeadersCarrier(r.Header), ) if err != nil { logrus.WithError(err).Errorf("encountterd an error while extracting span from req") } span := opentracing.GlobalTracer().StartSpan( fmt.Sprintf("%s %s",r.Method,r.URL.Path), ext.RPCServerOption(wireCtx), ) defer span.Finish() ctx := opentracing.ContextWithSpan(r.Context(),span) req := r.WithContext(ctx) next.ServeHTTP(w,req) }) } type RequestFunc func(req *http.Request) *http.Request func ToHTTPRequest(tracer opentracing.Tracer) RequestFunc { return func(req *http.Request) *http.Request { // Retrieve the Span from context. if span := opentracing.SpanFromContext(req.Context()); span != nil { // We are going to use this span in a client request, so mark as such. ext.SpanKindRPCClient.Set(span) // Add some standard OpenTracing tags, useful in an HTTP request. ext.HTTPMethod.Set(span, req.Method) span.SetTag(zipkincore.HTTP_HOST, req.URL.Host) span.SetTag(zipkincore.HTTP_PATH, req.URL.Path) ext.HTTPUrl.Set( span, fmt.Sprintf("%s://%s%s", req.URL.Scheme, req.URL.Host, req.URL.Path), ) // Add information on the peer service we're about to contact. if host, portString, err := net.SplitHostPort(req.URL.Host); err == nil { ext.PeerHostname.Set(span, host) if port, err := strconv.Atoi(portString); err != nil { ext.PeerPort.Set(span, uint16(port)) } } else { ext.PeerHostname.Set(span, req.URL.Host) } // Inject the Span context into the outgoing HTTP Request. if err := tracer.Inject( span.Context(), opentracing.TextMap, opentracing.HTTPHeadersCarrier(req.Header), ); err != nil { fmt.Printf("error encountered while trying to inject span: %+v\n", err) } } return req } }
package template import ( "context" "fmt" "log" "github.com/argoproj/pkg/errors" "github.com/spf13/cobra" "github.com/argoproj/argo/cmd/argo/commands/client" workflowtemplatepkg "github.com/argoproj/argo/pkg/apiclient/workflowtemplate" ) // NewDeleteCommand returns a new instance of an `argo delete` command func NewDeleteCommand() *cobra.Command { var ( all bool ) var command = &cobra.Command{ Use: "delete WORKFLOW_TEMPLATE", Short: "delete a workflow template", Run: func(cmd *cobra.Command, args []string) { apiServerDeleteWorkflowTemplates(all, args) }, } command.Flags().BoolVar(&all, "all", false, "Delete all workflow templates") return command } func apiServerDeleteWorkflowTemplates(allWFs bool, wfTmplNames []string) { ctx, apiClient := client.NewAPIClient() serviceClient := apiClient.NewWorkflowTemplateServiceClient() namespace := client.Namespace() var delWFTmplNames []string if allWFs { wftmplList, err := serviceClient.ListWorkflowTemplates(ctx, &workflowtemplatepkg.WorkflowTemplateListRequest{ Namespace: namespace, }) if err != nil { log.Fatal(err) } for _, wfTmpl := range wftmplList.Items { delWFTmplNames = append(delWFTmplNames, wfTmpl.Name) } } else { delWFTmplNames = wfTmplNames } for _, wfTmplNames := range delWFTmplNames { apiServerDeleteWorkflowTemplate(serviceClient, ctx, namespace, wfTmplNames) } } func apiServerDeleteWorkflowTemplate(client workflowtemplatepkg.WorkflowTemplateServiceClient, ctx context.Context, namespace, wftmplName string) { _, err := client.DeleteWorkflowTemplate(ctx, &workflowtemplatepkg.WorkflowTemplateDeleteRequest{ Name: wftmplName, Namespace: namespace, }) if err != nil { errors.CheckError(err) } fmt.Printf("WorkflowTemplate '%s' deleted\n", wftmplName) }
package hash import ( "crypto/hmac" "crypto/sha256" "encoding/binary" "errors" "io" "golang.org/x/crypto/argon2" "golang.org/x/crypto/blake2b" "golang.org/x/crypto/hkdf" ) const ( // MACSize represents the size of a 16 byte MAC. MACSize = 16 // KeySize represents the size of a 32 byte key. KeySize = 32 ) var ( errKeySize = errors.New("invalid key size") errSaltSize = errors.New("invalid salt size") ) // SimplexHash is the simplest usage of a hash function // and is suitable if you only have one use case. func SimplexHash(data []byte) []byte { out := blake2b.Sum256(data) return out[:] } // Hash hashes the data with contextInfo used as the key // for blake2's keying mechanism. contextInfo is meant to // be a human readable string that ensures each use case of // the hash function will have different outputs. func Hash(contextInfo string, data []byte) ([]byte, error) { if len(contextInfo) > blake2b.Size { return nil, errors.New("contextInfo size error") } h, err := blake2b.New256([]byte(contextInfo)) if err != nil { return nil, err } _, err = h.Write(data) if err != nil { return nil, err } return h.Sum(nil), nil } func kdfPairing(salt, info []byte) ([]byte, error) { if len(salt)+len(info) > KeySize-8 { return nil, errSaltSize } saltLen := make([]byte, 4) binary.BigEndian.PutUint32(saltLen, uint32(len(salt))) infoLen := make([]byte, 4) binary.BigEndian.PutUint32(infoLen, uint32(len(info))) out := []byte{} out = append(saltLen, infoLen...) out = append(out, salt...) out = append(out, info...) return out, nil } // Blake2bKDF is a key derivation function. It's suitable to be used with keys // with uniform entropy and not for use with passwords/passphrases. // salt and info are optional and must be less than 64 bytes in total. // NOTE: I SUGGEST NOT USING THIS - instead use HKDF-SHA256. func Blake2bKDF(key []byte, size uint32, salt, info []byte) ([]byte, error) { if len(key) != KeySize { return nil, errKeySize } ikm, err := kdfPairing(salt, info) if err != nil { return nil, err } xof, err := blake2b.NewXOF(size, ikm) if err != nil { return nil, err } _, err = xof.Write(key) if err != nil { return nil, err } output := make([]byte, size) _, err = xof.Read(output) if err != nil { return nil, err } return output, nil } // KDF returns a slice of derive keysNum number of derived keys of size keySize. // The given secret must be a uniform entropy secret (ie not a password) and // info is an optional non-secret which may be omitted. // // Note: in practice you probably don't want to use this particular function // but instead use the HKDF directly where you need it. func KDF(secret, salt, info []byte, keysNum, keySize int) ([][]byte, error) { hash := sha256.New if len(salt) != hash().Size() { return nil, errors.New("wrong salt size") } hkdf := hkdf.New(hash, secret, salt, info) var keys [][]byte for i := 0; i < keysNum; i++ { key := make([]byte, keySize) if _, err := io.ReadFull(hkdf, key); err != nil { panic(err) } keys = append(keys, key) } return keys, nil } // HashPassword returns a 32 byte cryptographic key given // a password and an entropic salt. func HashPassword(password, salt []byte) []byte { return argon2.IDKey(password, salt, 1, 64*1024, 4, 32) } // Blake2bMAC is a MAC that uses Blake2b's keyed hash mechanism // instead of an HMAC construction. The output is of size MACSize. func Blake2bMAC(key, data []byte) []byte { h, err := blake2b.New(MACSize, key) if err != nil { panic(err) } h.Write(data) return h.Sum(nil) } // ValidBlake2bMAC reports whether Blake2b messageMAC is a valid MAC tag for message. func ValidBlake2bMAC(message, messageMAC, key []byte) bool { return hmac.Equal(messageMAC, Blake2bMAC(key, message)) } // HMACSHA256 returns the HMAC-SHA256 authentication code for the given message and key. func HMACSHA256(message, key []byte) []byte { mac := hmac.New(sha256.New, key) mac.Write(message) return mac.Sum(nil) } // ValidMAC reports whether messageMAC is a valid HMAC tag for message. func ValidHMACSHA256(message, messageMAC, key []byte) bool { return hmac.Equal(messageMAC, HMACSHA256(message, key)) }
package main_test import ( "testing" "net/http" "os/exec" "os" "fmt" "gopkg.in/Shopify/sarama.v1" "bufio" "errors" "github.com/bsm/sarama-cluster" "math/rand" "time" ) const sourceMsg = `World` const expectedReply = `Hello World` const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" func init() { rand.Seed(time.Now().UnixNano()) } func randString(n int) string { b := make([]byte, n) for i := range b { b[i] = letters[rand.Intn(len(letters))] } return string(b) } func TestIntegrationWithKafka(t *testing.T) { broker := os.Getenv("KAFKA_BROKER") if broker == "" { t.Fatal("Required environment variable KAFKA_BROKER was not provided") } cmd := exec.Command("../function-sidecar") input := randString(10) output := randString(10) group := randString(10) configJson := fmt.Sprintf(`{ "spring.cloud.stream.kafka.binder.brokers":"%s", "spring.cloud.stream.bindings.input.destination": "%s", "spring.cloud.stream.bindings.output.destination": "%s", "spring.cloud.stream.bindings.input.group": "%s", "spring.profiles.active": "http" }`, broker, input, output, group) cmd.Env = []string{"SPRING_APPLICATION_JSON=" + configJson} cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr startErr := cmd.Start() defer cmd.Process.Kill() if startErr != nil { t.Fatal(startErr) } http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { bodyScanner := bufio.NewScanner(r.Body) if ! bodyScanner.Scan() { t.Fatal(errors.New("Scan of message body failed")) } w.Write([]byte("Hello " + bodyScanner.Text())) }) go func() { http.ListenAndServe(":8080", nil) }() kafkaProducer, kafkaProducerErr := sarama.NewAsyncProducer([]string{broker}, nil) if kafkaProducerErr != nil { t.Fatal(kafkaProducerErr) } testMessage := &sarama.ProducerMessage{Topic: input, Value: sarama.StringEncoder(string([]byte{0xff, 0x00}) + sourceMsg)} kafkaProducer.Input() <- testMessage producerCloseErr := kafkaProducer.Close() if producerCloseErr != nil { t.Fatal(producerCloseErr) } consumerConfig := cluster.NewConfig() consumerConfig.Consumer.Offsets.Initial = sarama.OffsetOldest group2 := randString(10) consumer, err := cluster.NewConsumer([]string{broker}, group2, []string{output}, consumerConfig) if err != nil { panic(err) } defer consumer.Close() select { case msg, ok := <-consumer.Messages(): if ok { reply := string(msg.Value[2:]) if reply != expectedReply { t.Fatal(fmt.Errorf("Received reply [%s] does not match expected reply [%s]", reply, expectedReply)) } } case <-time.After(time.Second * 100): t.Fatal("Timed out waiting for reply") } }
package controllers import ( "encoding/json" "net/http" "github.com/astaxie/beego/validation" "github.com/raykanavheti/LetsworkBackend/controllers/util" "github.com/raykanavheti/LetsworkBackend/models" ) //ProfileController interface type ProfileController struct{} // CreateProfile creates a new Profile for a user func (catCntrl *ProfileController) CreateProfile(w http.ResponseWriter, r *http.Request) { responseWriter := util.GetResponseWriter(w, r) defer responseWriter.Close() profile := models.Profile{} decoder := json.NewDecoder(r.Body) err := decoder.Decode(&profile) if err != nil { mapError := map[string]string{"message": err.Error()} errj, _ := json.Marshal(mapError) responseWriter.WriteHeader(400) responseWriter.Write(errj) } else { valid := validation.Validation{} b, err := valid.Valid(profile) if !b { mapError := map[string]string{"message": err.Error()} errj, _ := json.Marshal(mapError) responseWriter.WriteHeader(400) responseWriter.Write(errj) } else { cat, err := models.CreateProfile(profile) if err == nil { uj, _ := json.Marshal(cat) responseWriter.Header().Set("Content-Type", "application/json") responseWriter.WriteHeader(201) responseWriter.Write(uj) } else { mapError := map[string]string{"message": err.Error()} errj, _ := json.Marshal(mapError) responseWriter.WriteHeader(400) responseWriter.Write(errj) } } } }
package word import ( "bytes" "strings" ) // CamelCase convert `a_b_c` to `aBC` func CamelCase(v string) string { buf := bytes.NewBuffer([]byte{}) length := len(v) for i := 0; i < length; i++ { if v[i] != '_' { buf.WriteByte(v[i]) } else { i++ if i > length { continue } if v[i] >= 'a' && v[i] <= 'z' { buf.WriteByte(v[i] + 'A' - 'a') } else { buf.WriteByte(v[i]) } } } return buf.String() } // UnderlineCase convert `ABC` to `a_b_c` func UnderlineCase(v string) string { buf := bytes.NewBuffer([]byte{}) length := len(v) for i := 0; i < length; i++ { if i > 0 && v[i] >= 'A' && v[i] <= 'Z' { buf.WriteByte('_') buf.WriteByte(v[i] + 'a' - 'A') } else { buf.WriteByte(v[i]) } } return buf.String() } func UpperFirst(v string) string { return strings.ToUpper(string(v[0])) + v[1:] } func LowerFirst(v string) string { return strings.ToLower(string(v[0])) + v[1:] }
// Copyright 2023 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package alpha import ( "bytes" "context" "encoding/json" "fmt" "io/ioutil" "strings" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations" ) func (r *PrivateCloud) validate() error { if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { return err } if err := dcl.Required(r, "networkConfig"); err != nil { return err } if err := dcl.Required(r, "managementCluster"); err != nil { return err } if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { return err } if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { return err } if !dcl.IsEmptyValueIndirect(r.NetworkConfig) { if err := r.NetworkConfig.validate(); err != nil { return err } } if !dcl.IsEmptyValueIndirect(r.ManagementCluster) { if err := r.ManagementCluster.validate(); err != nil { return err } } if !dcl.IsEmptyValueIndirect(r.Hcx) { if err := r.Hcx.validate(); err != nil { return err } } if !dcl.IsEmptyValueIndirect(r.Nsx) { if err := r.Nsx.validate(); err != nil { return err } } if !dcl.IsEmptyValueIndirect(r.Vcenter) { if err := r.Vcenter.validate(); err != nil { return err } } return nil } func (r *PrivateCloudNetworkConfig) validate() error { if err := dcl.Required(r, "managementCidr"); err != nil { return err } return nil } func (r *PrivateCloudManagementCluster) validate() error { if err := dcl.Required(r, "clusterId"); err != nil { return err } return nil } func (r *PrivateCloudHcx) validate() error { return nil } func (r *PrivateCloudNsx) validate() error { return nil } func (r *PrivateCloudVcenter) validate() error { return nil } func (r *PrivateCloud) basePath() string { params := map[string]interface{}{} return dcl.Nprintf("https://vmwareengine.googleapis.com/v1/", params) } func (r *PrivateCloud) getURL(userBasePath string) (string, error) { nr := r.urlNormalized() params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } return dcl.URL("projects/{{project}}/locations/{{location}}/privateClouds/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *PrivateCloud) listURL(userBasePath string) (string, error) { nr := r.urlNormalized() params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), } return dcl.URL("projects/{{project}}/locations/{{location}}/privateClouds", nr.basePath(), userBasePath, params), nil } func (r *PrivateCloud) createURL(userBasePath string) (string, error) { nr := r.urlNormalized() params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } return dcl.URL("projects/{{project}}/locations/{{location}}/privateClouds?privateCloudId={{name}}", nr.basePath(), userBasePath, params), nil } func (r *PrivateCloud) deleteURL(userBasePath string) (string, error) { nr := r.urlNormalized() params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } return dcl.URL("projects/{{project}}/locations/{{location}}/privateClouds/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *PrivateCloud) SetPolicyURL(userBasePath string) string { nr := r.urlNormalized() fields := map[string]interface{}{ "project": *nr.Project, "location": *nr.Location, "name": *nr.Name, } return dcl.URL("projects/{{project}}/locations/{{location}}/privateClouds/{{name}}:setIamPolicy", nr.basePath(), userBasePath, fields) } func (r *PrivateCloud) SetPolicyVerb() string { return "POST" } func (r *PrivateCloud) getPolicyURL(userBasePath string) string { nr := r.urlNormalized() fields := map[string]interface{}{ "project": *nr.Project, "location": *nr.Location, "name": *nr.Name, } return dcl.URL("projects/{{project}}/locations/{{location}}/privateClouds/{{name}}:getIamPolicy", nr.basePath(), userBasePath, fields) } func (r *PrivateCloud) IAMPolicyVersion() int { return 3 } // privateCloudApiOperation represents a mutable operation in the underlying REST // API such as Create, Update, or Delete. type privateCloudApiOperation interface { do(context.Context, *PrivateCloud, *Client) error } // newUpdatePrivateCloudUpdatePrivateCloudRequest creates a request for an // PrivateCloud resource's UpdatePrivateCloud update type by filling in the update // fields based on the intended state of the resource. func newUpdatePrivateCloudUpdatePrivateCloudRequest(ctx context.Context, f *PrivateCloud, c *Client) (map[string]interface{}, error) { req := map[string]interface{}{} res := f _ = res if v, err := expandPrivateCloudNetworkConfig(c, f.NetworkConfig, res); err != nil { return nil, fmt.Errorf("error expanding NetworkConfig into networkConfig: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { req["networkConfig"] = v } if v, err := expandPrivateCloudManagementCluster(c, f.ManagementCluster, res); err != nil { return nil, fmt.Errorf("error expanding ManagementCluster into managementCluster: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { req["managementCluster"] = v } if v := f.Description; !dcl.IsEmptyValueIndirect(v) { req["description"] = v } if v, err := expandPrivateCloudHcx(c, f.Hcx, res); err != nil { return nil, fmt.Errorf("error expanding Hcx into hcx: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { req["hcx"] = v } if v, err := expandPrivateCloudNsx(c, f.Nsx, res); err != nil { return nil, fmt.Errorf("error expanding Nsx into nsx: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { req["nsx"] = v } if v, err := expandPrivateCloudVcenter(c, f.Vcenter, res); err != nil { return nil, fmt.Errorf("error expanding Vcenter into vcenter: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { req["vcenter"] = v } req["name"] = fmt.Sprintf("projects/%s/locations/%s/privateClouds/%s", *f.Project, *f.Location, *f.Name) return req, nil } // marshalUpdatePrivateCloudUpdatePrivateCloudRequest converts the update into // the final JSON request body. func marshalUpdatePrivateCloudUpdatePrivateCloudRequest(c *Client, m map[string]interface{}) ([]byte, error) { return json.Marshal(m) } type updatePrivateCloudUpdatePrivateCloudOperation struct { // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. // Usually it will be nil - this is to prevent us from accidentally depending on apply // options, which should usually be unnecessary. ApplyOptions []dcl.ApplyOption FieldDiffs []*dcl.FieldDiff } // do creates a request and sends it to the appropriate URL. In most operations, // do will transcribe a subset of the resource into a request object and send a // PUT request to a single URL. func (op *updatePrivateCloudUpdatePrivateCloudOperation) do(ctx context.Context, r *PrivateCloud, c *Client) error { _, err := c.GetPrivateCloud(ctx, r) if err != nil { return err } u, err := r.updateURL(c.Config.BasePath, "UpdatePrivateCloud") if err != nil { return err } mask := dcl.UpdateMask(op.FieldDiffs) u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) if err != nil { return err } req, err := newUpdatePrivateCloudUpdatePrivateCloudRequest(ctx, r, c) if err != nil { return err } c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) body, err := marshalUpdatePrivateCloudUpdatePrivateCloudRequest(c, req) if err != nil { return err } resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) if err != nil { return err } var o operations.StandardGCPOperation if err := dcl.ParseResponse(resp.Response, &o); err != nil { return err } err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") if err != nil { return err } return nil } func (c *Client) listPrivateCloudRaw(ctx context.Context, r *PrivateCloud, pageToken string, pageSize int32) ([]byte, error) { u, err := r.urlNormalized().listURL(c.Config.BasePath) if err != nil { return nil, err } m := make(map[string]string) if pageToken != "" { m["pageToken"] = pageToken } if pageSize != PrivateCloudMaxPage { m["pageSize"] = fmt.Sprintf("%v", pageSize) } u, err = dcl.AddQueryParams(u, m) if err != nil { return nil, err } resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) if err != nil { return nil, err } defer resp.Response.Body.Close() return ioutil.ReadAll(resp.Response.Body) } type listPrivateCloudOperation struct { PrivateClouds []map[string]interface{} `json:"privateClouds"` Token string `json:"nextPageToken"` } func (c *Client) listPrivateCloud(ctx context.Context, r *PrivateCloud, pageToken string, pageSize int32) ([]*PrivateCloud, string, error) { b, err := c.listPrivateCloudRaw(ctx, r, pageToken, pageSize) if err != nil { return nil, "", err } var m listPrivateCloudOperation if err := json.Unmarshal(b, &m); err != nil { return nil, "", err } var l []*PrivateCloud for _, v := range m.PrivateClouds { res, err := unmarshalMapPrivateCloud(v, c, r) if err != nil { return nil, m.Token, err } res.Project = r.Project res.Location = r.Location l = append(l, res) } return l, m.Token, nil } func (c *Client) deleteAllPrivateCloud(ctx context.Context, f func(*PrivateCloud) bool, resources []*PrivateCloud) error { var errors []string for _, res := range resources { if f(res) { // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. err := c.DeletePrivateCloud(ctx, res) if err != nil { errors = append(errors, err.Error()) } } } if len(errors) > 0 { return fmt.Errorf("%v", strings.Join(errors, "\n")) } else { return nil } } type deletePrivateCloudOperation struct{} // Create operations are similar to Update operations, although they do not have // specific request objects. The Create request object is the json encoding of // the resource, which is modified by res.marshal to form the base request body. type createPrivateCloudOperation struct { response map[string]interface{} } func (op *createPrivateCloudOperation) FirstResponse() (map[string]interface{}, bool) { return op.response, len(op.response) > 0 } func (op *createPrivateCloudOperation) do(ctx context.Context, r *PrivateCloud, c *Client) error { c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) u, err := r.createURL(c.Config.BasePath) if err != nil { return err } req, err := r.marshal(c) if err != nil { return err } resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) if err != nil { return err } // wait for object to be created. var o operations.StandardGCPOperation if err := dcl.ParseResponse(resp.Response, &o); err != nil { return err } if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) return err } c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") op.response, _ = o.FirstResponse() if _, err := c.GetPrivateCloud(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) return err } return nil } func (c *Client) getPrivateCloudRaw(ctx context.Context, r *PrivateCloud) ([]byte, error) { u, err := r.getURL(c.Config.BasePath) if err != nil { return nil, err } resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) if err != nil { return nil, err } defer resp.Response.Body.Close() b, err := ioutil.ReadAll(resp.Response.Body) if err != nil { return nil, err } return b, nil } func (c *Client) privateCloudDiffsForRawDesired(ctx context.Context, rawDesired *PrivateCloud, opts ...dcl.ApplyOption) (initial, desired *PrivateCloud, diffs []*dcl.FieldDiff, err error) { c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. var fetchState *PrivateCloud if sh := dcl.FetchStateHint(opts); sh != nil { if r, ok := sh.(*PrivateCloud); !ok { c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected PrivateCloud, got %T", sh) } else { fetchState = r } } if fetchState == nil { fetchState = rawDesired } // 1.2: Retrieval of raw initial state from API rawInitial, err := c.GetPrivateCloud(ctx, fetchState) if rawInitial == nil { if !dcl.IsNotFound(err) { c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a PrivateCloud resource already exists: %s", err) return nil, nil, nil, fmt.Errorf("failed to retrieve PrivateCloud resource: %v", err) } c.Config.Logger.InfoWithContext(ctx, "Found that PrivateCloud resource did not exist.") // Perform canonicalization to pick up defaults. desired, err = canonicalizePrivateCloudDesiredState(rawDesired, rawInitial) return nil, desired, nil, err } c.Config.Logger.InfoWithContextf(ctx, "Found initial state for PrivateCloud: %v", rawInitial) c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for PrivateCloud: %v", rawDesired) // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. if err := extractPrivateCloudFields(rawInitial); err != nil { return nil, nil, nil, err } // 1.3: Canonicalize raw initial state into initial state. initial, err = canonicalizePrivateCloudInitialState(rawInitial, rawDesired) if err != nil { return nil, nil, nil, err } c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for PrivateCloud: %v", initial) // 1.4: Canonicalize raw desired state into desired state. desired, err = canonicalizePrivateCloudDesiredState(rawDesired, rawInitial, opts...) if err != nil { return nil, nil, nil, err } c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for PrivateCloud: %v", desired) // 2.1: Comparison of initial and desired state. diffs, err = diffPrivateCloud(c, desired, initial, opts...) return initial, desired, diffs, err } func canonicalizePrivateCloudInitialState(rawInitial, rawDesired *PrivateCloud) (*PrivateCloud, error) { // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. return rawInitial, nil } /* * Canonicalizers * * These are responsible for converting either a user-specified config or a * GCP API response to a standard format that can be used for difference checking. * */ func canonicalizePrivateCloudDesiredState(rawDesired, rawInitial *PrivateCloud, opts ...dcl.ApplyOption) (*PrivateCloud, error) { if rawInitial == nil { // Since the initial state is empty, the desired state is all we have. // We canonicalize the remaining nested objects with nil to pick up defaults. rawDesired.NetworkConfig = canonicalizePrivateCloudNetworkConfig(rawDesired.NetworkConfig, nil, opts...) rawDesired.ManagementCluster = canonicalizePrivateCloudManagementCluster(rawDesired.ManagementCluster, nil, opts...) rawDesired.Hcx = canonicalizePrivateCloudHcx(rawDesired.Hcx, nil, opts...) rawDesired.Nsx = canonicalizePrivateCloudNsx(rawDesired.Nsx, nil, opts...) rawDesired.Vcenter = canonicalizePrivateCloudVcenter(rawDesired.Vcenter, nil, opts...) return rawDesired, nil } canonicalDesired := &PrivateCloud{} if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { canonicalDesired.Name = rawInitial.Name } else { canonicalDesired.Name = rawDesired.Name } canonicalDesired.NetworkConfig = canonicalizePrivateCloudNetworkConfig(rawDesired.NetworkConfig, rawInitial.NetworkConfig, opts...) canonicalDesired.ManagementCluster = canonicalizePrivateCloudManagementCluster(rawDesired.ManagementCluster, rawInitial.ManagementCluster, opts...) if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { canonicalDesired.Description = rawInitial.Description } else { canonicalDesired.Description = rawDesired.Description } if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { canonicalDesired.Project = rawInitial.Project } else { canonicalDesired.Project = rawDesired.Project } if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { canonicalDesired.Location = rawInitial.Location } else { canonicalDesired.Location = rawDesired.Location } return canonicalDesired, nil } func canonicalizePrivateCloudNewState(c *Client, rawNew, rawDesired *PrivateCloud) (*PrivateCloud, error) { rawNew.Name = rawDesired.Name if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { rawNew.CreateTime = rawDesired.CreateTime } else { } if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { rawNew.UpdateTime = rawDesired.UpdateTime } else { } if dcl.IsEmptyValueIndirect(rawNew.DeleteTime) && dcl.IsEmptyValueIndirect(rawDesired.DeleteTime) { rawNew.DeleteTime = rawDesired.DeleteTime } else { } if dcl.IsEmptyValueIndirect(rawNew.ExpireTime) && dcl.IsEmptyValueIndirect(rawDesired.ExpireTime) { rawNew.ExpireTime = rawDesired.ExpireTime } else { } if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { rawNew.State = rawDesired.State } else { } if dcl.IsEmptyValueIndirect(rawNew.NetworkConfig) && dcl.IsEmptyValueIndirect(rawDesired.NetworkConfig) { rawNew.NetworkConfig = rawDesired.NetworkConfig } else { rawNew.NetworkConfig = canonicalizeNewPrivateCloudNetworkConfig(c, rawDesired.NetworkConfig, rawNew.NetworkConfig) } if dcl.IsEmptyValueIndirect(rawNew.ManagementCluster) && dcl.IsEmptyValueIndirect(rawDesired.ManagementCluster) { rawNew.ManagementCluster = rawDesired.ManagementCluster } else { rawNew.ManagementCluster = rawDesired.ManagementCluster } if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { rawNew.Description = rawDesired.Description } else { if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { rawNew.Description = rawDesired.Description } } if dcl.IsEmptyValueIndirect(rawNew.Hcx) && dcl.IsEmptyValueIndirect(rawDesired.Hcx) { rawNew.Hcx = rawDesired.Hcx } else { rawNew.Hcx = canonicalizeNewPrivateCloudHcx(c, rawDesired.Hcx, rawNew.Hcx) } if dcl.IsEmptyValueIndirect(rawNew.Nsx) && dcl.IsEmptyValueIndirect(rawDesired.Nsx) { rawNew.Nsx = rawDesired.Nsx } else { rawNew.Nsx = canonicalizeNewPrivateCloudNsx(c, rawDesired.Nsx, rawNew.Nsx) } if dcl.IsEmptyValueIndirect(rawNew.Vcenter) && dcl.IsEmptyValueIndirect(rawDesired.Vcenter) { rawNew.Vcenter = rawDesired.Vcenter } else { rawNew.Vcenter = canonicalizeNewPrivateCloudVcenter(c, rawDesired.Vcenter, rawNew.Vcenter) } if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { rawNew.Uid = rawDesired.Uid } else { if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { rawNew.Uid = rawDesired.Uid } } rawNew.Project = rawDesired.Project rawNew.Location = rawDesired.Location return rawNew, nil } func canonicalizePrivateCloudNetworkConfig(des, initial *PrivateCloudNetworkConfig, opts ...dcl.ApplyOption) *PrivateCloudNetworkConfig { if des == nil { return initial } if des.empty { return des } if initial == nil { return des } cDes := &PrivateCloudNetworkConfig{} if dcl.StringCanonicalize(des.ManagementCidr, initial.ManagementCidr) || dcl.IsZeroValue(des.ManagementCidr) { cDes.ManagementCidr = initial.ManagementCidr } else { cDes.ManagementCidr = des.ManagementCidr } if dcl.IsZeroValue(des.VmwareEngineNetwork) || (dcl.IsEmptyValueIndirect(des.VmwareEngineNetwork) && dcl.IsEmptyValueIndirect(initial.VmwareEngineNetwork)) { // Desired and initial values are equivalent, so set canonical desired value to initial value. cDes.VmwareEngineNetwork = initial.VmwareEngineNetwork } else { cDes.VmwareEngineNetwork = des.VmwareEngineNetwork } return cDes } func canonicalizePrivateCloudNetworkConfigSlice(des, initial []PrivateCloudNetworkConfig, opts ...dcl.ApplyOption) []PrivateCloudNetworkConfig { if dcl.IsEmptyValueIndirect(des) { return initial } if len(des) != len(initial) { items := make([]PrivateCloudNetworkConfig, 0, len(des)) for _, d := range des { cd := canonicalizePrivateCloudNetworkConfig(&d, nil, opts...) if cd != nil { items = append(items, *cd) } } return items } items := make([]PrivateCloudNetworkConfig, 0, len(des)) for i, d := range des { cd := canonicalizePrivateCloudNetworkConfig(&d, &initial[i], opts...) if cd != nil { items = append(items, *cd) } } return items } func canonicalizeNewPrivateCloudNetworkConfig(c *Client, des, nw *PrivateCloudNetworkConfig) *PrivateCloudNetworkConfig { if des == nil { return nw } if nw == nil { if dcl.IsEmptyValueIndirect(des) { c.Config.Logger.Info("Found explicitly empty value for PrivateCloudNetworkConfig while comparing non-nil desired to nil actual. Returning desired object.") return des } return nil } if dcl.StringCanonicalize(des.ManagementCidr, nw.ManagementCidr) { nw.ManagementCidr = des.ManagementCidr } return nw } func canonicalizeNewPrivateCloudNetworkConfigSet(c *Client, des, nw []PrivateCloudNetworkConfig) []PrivateCloudNetworkConfig { if des == nil { return nw } // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. var items []PrivateCloudNetworkConfig for _, d := range des { matchedIndex := -1 for i, n := range nw { if diffs, _ := comparePrivateCloudNetworkConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { matchedIndex = i break } } if matchedIndex != -1 { items = append(items, *canonicalizeNewPrivateCloudNetworkConfig(c, &d, &nw[matchedIndex])) nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) } } // Also include elements in nw that are not matched in des. items = append(items, nw...) return items } func canonicalizeNewPrivateCloudNetworkConfigSlice(c *Client, des, nw []PrivateCloudNetworkConfig) []PrivateCloudNetworkConfig { if des == nil { return nw } // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. // Return the original array. if len(des) != len(nw) { return nw } var items []PrivateCloudNetworkConfig for i, d := range des { n := nw[i] items = append(items, *canonicalizeNewPrivateCloudNetworkConfig(c, &d, &n)) } return items } func canonicalizePrivateCloudManagementCluster(des, initial *PrivateCloudManagementCluster, opts ...dcl.ApplyOption) *PrivateCloudManagementCluster { if des == nil { return initial } if des.empty { return des } if initial == nil { return des } cDes := &PrivateCloudManagementCluster{} if dcl.StringCanonicalize(des.ClusterId, initial.ClusterId) || dcl.IsZeroValue(des.ClusterId) { cDes.ClusterId = initial.ClusterId } else { cDes.ClusterId = des.ClusterId } return cDes } func canonicalizePrivateCloudManagementClusterSlice(des, initial []PrivateCloudManagementCluster, opts ...dcl.ApplyOption) []PrivateCloudManagementCluster { if dcl.IsEmptyValueIndirect(des) { return initial } if len(des) != len(initial) { items := make([]PrivateCloudManagementCluster, 0, len(des)) for _, d := range des { cd := canonicalizePrivateCloudManagementCluster(&d, nil, opts...) if cd != nil { items = append(items, *cd) } } return items } items := make([]PrivateCloudManagementCluster, 0, len(des)) for i, d := range des { cd := canonicalizePrivateCloudManagementCluster(&d, &initial[i], opts...) if cd != nil { items = append(items, *cd) } } return items } func canonicalizeNewPrivateCloudManagementCluster(c *Client, des, nw *PrivateCloudManagementCluster) *PrivateCloudManagementCluster { if des == nil { return nw } if nw == nil { if dcl.IsEmptyValueIndirect(des) { c.Config.Logger.Info("Found explicitly empty value for PrivateCloudManagementCluster while comparing non-nil desired to nil actual. Returning desired object.") return des } return nil } if dcl.StringCanonicalize(des.ClusterId, nw.ClusterId) { nw.ClusterId = des.ClusterId } return nw } func canonicalizeNewPrivateCloudManagementClusterSet(c *Client, des, nw []PrivateCloudManagementCluster) []PrivateCloudManagementCluster { if des == nil { return nw } // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. var items []PrivateCloudManagementCluster for _, d := range des { matchedIndex := -1 for i, n := range nw { if diffs, _ := comparePrivateCloudManagementClusterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { matchedIndex = i break } } if matchedIndex != -1 { items = append(items, *canonicalizeNewPrivateCloudManagementCluster(c, &d, &nw[matchedIndex])) nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) } } // Also include elements in nw that are not matched in des. items = append(items, nw...) return items } func canonicalizeNewPrivateCloudManagementClusterSlice(c *Client, des, nw []PrivateCloudManagementCluster) []PrivateCloudManagementCluster { if des == nil { return nw } // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. // Return the original array. if len(des) != len(nw) { return nw } var items []PrivateCloudManagementCluster for i, d := range des { n := nw[i] items = append(items, *canonicalizeNewPrivateCloudManagementCluster(c, &d, &n)) } return items } func canonicalizePrivateCloudHcx(des, initial *PrivateCloudHcx, opts ...dcl.ApplyOption) *PrivateCloudHcx { if des == nil { return initial } if des.empty { return des } if initial == nil { return des } cDes := &PrivateCloudHcx{} if dcl.StringCanonicalize(des.InternalIP, initial.InternalIP) || dcl.IsZeroValue(des.InternalIP) { cDes.InternalIP = initial.InternalIP } else { cDes.InternalIP = des.InternalIP } if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { cDes.Version = initial.Version } else { cDes.Version = des.Version } if dcl.StringCanonicalize(des.Fqdn, initial.Fqdn) || dcl.IsZeroValue(des.Fqdn) { cDes.Fqdn = initial.Fqdn } else { cDes.Fqdn = des.Fqdn } return cDes } func canonicalizePrivateCloudHcxSlice(des, initial []PrivateCloudHcx, opts ...dcl.ApplyOption) []PrivateCloudHcx { if dcl.IsEmptyValueIndirect(des) { return initial } if len(des) != len(initial) { items := make([]PrivateCloudHcx, 0, len(des)) for _, d := range des { cd := canonicalizePrivateCloudHcx(&d, nil, opts...) if cd != nil { items = append(items, *cd) } } return items } items := make([]PrivateCloudHcx, 0, len(des)) for i, d := range des { cd := canonicalizePrivateCloudHcx(&d, &initial[i], opts...) if cd != nil { items = append(items, *cd) } } return items } func canonicalizeNewPrivateCloudHcx(c *Client, des, nw *PrivateCloudHcx) *PrivateCloudHcx { if des == nil { return nw } if nw == nil { if dcl.IsEmptyValueIndirect(des) { c.Config.Logger.Info("Found explicitly empty value for PrivateCloudHcx while comparing non-nil desired to nil actual. Returning desired object.") return des } return nil } if dcl.StringCanonicalize(des.InternalIP, nw.InternalIP) { nw.InternalIP = des.InternalIP } if dcl.StringCanonicalize(des.Version, nw.Version) { nw.Version = des.Version } if dcl.StringCanonicalize(des.Fqdn, nw.Fqdn) { nw.Fqdn = des.Fqdn } return nw } func canonicalizeNewPrivateCloudHcxSet(c *Client, des, nw []PrivateCloudHcx) []PrivateCloudHcx { if des == nil { return nw } // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. var items []PrivateCloudHcx for _, d := range des { matchedIndex := -1 for i, n := range nw { if diffs, _ := comparePrivateCloudHcxNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { matchedIndex = i break } } if matchedIndex != -1 { items = append(items, *canonicalizeNewPrivateCloudHcx(c, &d, &nw[matchedIndex])) nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) } } // Also include elements in nw that are not matched in des. items = append(items, nw...) return items } func canonicalizeNewPrivateCloudHcxSlice(c *Client, des, nw []PrivateCloudHcx) []PrivateCloudHcx { if des == nil { return nw } // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. // Return the original array. if len(des) != len(nw) { return nw } var items []PrivateCloudHcx for i, d := range des { n := nw[i] items = append(items, *canonicalizeNewPrivateCloudHcx(c, &d, &n)) } return items } func canonicalizePrivateCloudNsx(des, initial *PrivateCloudNsx, opts ...dcl.ApplyOption) *PrivateCloudNsx { if des == nil { return initial } if des.empty { return des } if initial == nil { return des } cDes := &PrivateCloudNsx{} if dcl.StringCanonicalize(des.InternalIP, initial.InternalIP) || dcl.IsZeroValue(des.InternalIP) { cDes.InternalIP = initial.InternalIP } else { cDes.InternalIP = des.InternalIP } if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { cDes.Version = initial.Version } else { cDes.Version = des.Version } if dcl.StringCanonicalize(des.Fqdn, initial.Fqdn) || dcl.IsZeroValue(des.Fqdn) { cDes.Fqdn = initial.Fqdn } else { cDes.Fqdn = des.Fqdn } return cDes } func canonicalizePrivateCloudNsxSlice(des, initial []PrivateCloudNsx, opts ...dcl.ApplyOption) []PrivateCloudNsx { if dcl.IsEmptyValueIndirect(des) { return initial } if len(des) != len(initial) { items := make([]PrivateCloudNsx, 0, len(des)) for _, d := range des { cd := canonicalizePrivateCloudNsx(&d, nil, opts...) if cd != nil { items = append(items, *cd) } } return items } items := make([]PrivateCloudNsx, 0, len(des)) for i, d := range des { cd := canonicalizePrivateCloudNsx(&d, &initial[i], opts...) if cd != nil { items = append(items, *cd) } } return items } func canonicalizeNewPrivateCloudNsx(c *Client, des, nw *PrivateCloudNsx) *PrivateCloudNsx { if des == nil { return nw } if nw == nil { if dcl.IsEmptyValueIndirect(des) { c.Config.Logger.Info("Found explicitly empty value for PrivateCloudNsx while comparing non-nil desired to nil actual. Returning desired object.") return des } return nil } if dcl.StringCanonicalize(des.InternalIP, nw.InternalIP) { nw.InternalIP = des.InternalIP } if dcl.StringCanonicalize(des.Version, nw.Version) { nw.Version = des.Version } if dcl.StringCanonicalize(des.Fqdn, nw.Fqdn) { nw.Fqdn = des.Fqdn } return nw } func canonicalizeNewPrivateCloudNsxSet(c *Client, des, nw []PrivateCloudNsx) []PrivateCloudNsx { if des == nil { return nw } // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. var items []PrivateCloudNsx for _, d := range des { matchedIndex := -1 for i, n := range nw { if diffs, _ := comparePrivateCloudNsxNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { matchedIndex = i break } } if matchedIndex != -1 { items = append(items, *canonicalizeNewPrivateCloudNsx(c, &d, &nw[matchedIndex])) nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) } } // Also include elements in nw that are not matched in des. items = append(items, nw...) return items } func canonicalizeNewPrivateCloudNsxSlice(c *Client, des, nw []PrivateCloudNsx) []PrivateCloudNsx { if des == nil { return nw } // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. // Return the original array. if len(des) != len(nw) { return nw } var items []PrivateCloudNsx for i, d := range des { n := nw[i] items = append(items, *canonicalizeNewPrivateCloudNsx(c, &d, &n)) } return items } func canonicalizePrivateCloudVcenter(des, initial *PrivateCloudVcenter, opts ...dcl.ApplyOption) *PrivateCloudVcenter { if des == nil { return initial } if des.empty { return des } if initial == nil { return des } cDes := &PrivateCloudVcenter{} if dcl.StringCanonicalize(des.InternalIP, initial.InternalIP) || dcl.IsZeroValue(des.InternalIP) { cDes.InternalIP = initial.InternalIP } else { cDes.InternalIP = des.InternalIP } if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { cDes.Version = initial.Version } else { cDes.Version = des.Version } if dcl.StringCanonicalize(des.Fqdn, initial.Fqdn) || dcl.IsZeroValue(des.Fqdn) { cDes.Fqdn = initial.Fqdn } else { cDes.Fqdn = des.Fqdn } return cDes } func canonicalizePrivateCloudVcenterSlice(des, initial []PrivateCloudVcenter, opts ...dcl.ApplyOption) []PrivateCloudVcenter { if dcl.IsEmptyValueIndirect(des) { return initial } if len(des) != len(initial) { items := make([]PrivateCloudVcenter, 0, len(des)) for _, d := range des { cd := canonicalizePrivateCloudVcenter(&d, nil, opts...) if cd != nil { items = append(items, *cd) } } return items } items := make([]PrivateCloudVcenter, 0, len(des)) for i, d := range des { cd := canonicalizePrivateCloudVcenter(&d, &initial[i], opts...) if cd != nil { items = append(items, *cd) } } return items } func canonicalizeNewPrivateCloudVcenter(c *Client, des, nw *PrivateCloudVcenter) *PrivateCloudVcenter { if des == nil { return nw } if nw == nil { if dcl.IsEmptyValueIndirect(des) { c.Config.Logger.Info("Found explicitly empty value for PrivateCloudVcenter while comparing non-nil desired to nil actual. Returning desired object.") return des } return nil } if dcl.StringCanonicalize(des.InternalIP, nw.InternalIP) { nw.InternalIP = des.InternalIP } if dcl.StringCanonicalize(des.Version, nw.Version) { nw.Version = des.Version } if dcl.StringCanonicalize(des.Fqdn, nw.Fqdn) { nw.Fqdn = des.Fqdn } return nw } func canonicalizeNewPrivateCloudVcenterSet(c *Client, des, nw []PrivateCloudVcenter) []PrivateCloudVcenter { if des == nil { return nw } // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. var items []PrivateCloudVcenter for _, d := range des { matchedIndex := -1 for i, n := range nw { if diffs, _ := comparePrivateCloudVcenterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { matchedIndex = i break } } if matchedIndex != -1 { items = append(items, *canonicalizeNewPrivateCloudVcenter(c, &d, &nw[matchedIndex])) nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) } } // Also include elements in nw that are not matched in des. items = append(items, nw...) return items } func canonicalizeNewPrivateCloudVcenterSlice(c *Client, des, nw []PrivateCloudVcenter) []PrivateCloudVcenter { if des == nil { return nw } // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. // Return the original array. if len(des) != len(nw) { return nw } var items []PrivateCloudVcenter for i, d := range des { n := nw[i] items = append(items, *canonicalizeNewPrivateCloudVcenter(c, &d, &n)) } return items } // The differ returns a list of diffs, along with a list of operations that should be taken // to remedy them. Right now, it does not attempt to consolidate operations - if several // fields can be fixed with a patch update, it will perform the patch several times. // Diffs on some fields will be ignored if the `desired` state has an empty (nil) // value. This empty value indicates that the user does not care about the state for // the field. Empty fields on the actual object will cause diffs. // TODO(magic-modules-eng): for efficiency in some resources, add batching. func diffPrivateCloud(c *Client, desired, actual *PrivateCloud, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { if desired == nil || actual == nil { return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) } c.Config.Logger.Infof("Diff function called with desired state: %v", desired) c.Config.Logger.Infof("Diff function called with actual state: %v", actual) var fn dcl.FieldName var newDiffs []*dcl.FieldDiff // New style diffs. if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.DeleteTime, actual.DeleteTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeleteTime")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.ExpireTime, actual.ExpireTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExpireTime")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.NetworkConfig, actual.NetworkConfig, dcl.DiffInfo{ObjectFunction: comparePrivateCloudNetworkConfigNewStyle, EmptyObject: EmptyPrivateCloudNetworkConfig, OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("NetworkConfig")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.ManagementCluster, actual.ManagementCluster, dcl.DiffInfo{ObjectFunction: comparePrivateCloudManagementClusterNewStyle, EmptyObject: EmptyPrivateCloudManagementCluster, OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("ManagementCluster")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.Hcx, actual.Hcx, dcl.DiffInfo{OutputOnly: true, ObjectFunction: comparePrivateCloudHcxNewStyle, EmptyObject: EmptyPrivateCloudHcx, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Hcx")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.Nsx, actual.Nsx, dcl.DiffInfo{OutputOnly: true, ObjectFunction: comparePrivateCloudNsxNewStyle, EmptyObject: EmptyPrivateCloudNsx, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Nsx")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.Vcenter, actual.Vcenter, dcl.DiffInfo{OutputOnly: true, ObjectFunction: comparePrivateCloudVcenterNewStyle, EmptyObject: EmptyPrivateCloudVcenter, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Vcenter")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { if err != nil { return nil, err } newDiffs = append(newDiffs, ds...) } if len(newDiffs) > 0 { c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) } return newDiffs, nil } func comparePrivateCloudNetworkConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { var diffs []*dcl.FieldDiff desired, ok := d.(*PrivateCloudNetworkConfig) if !ok { desiredNotPointer, ok := d.(PrivateCloudNetworkConfig) if !ok { return nil, fmt.Errorf("obj %v is not a PrivateCloudNetworkConfig or *PrivateCloudNetworkConfig", d) } desired = &desiredNotPointer } actual, ok := a.(*PrivateCloudNetworkConfig) if !ok { actualNotPointer, ok := a.(PrivateCloudNetworkConfig) if !ok { return nil, fmt.Errorf("obj %v is not a PrivateCloudNetworkConfig", a) } actual = &actualNotPointer } if ds, err := dcl.Diff(desired.ManagementCidr, actual.ManagementCidr, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("ManagementCidr")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.VmwareEngineNetwork, actual.VmwareEngineNetwork, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("VmwareEngineNetwork")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.VmwareEngineNetworkCanonical, actual.VmwareEngineNetworkCanonical, dcl.DiffInfo{OutputOnly: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VmwareEngineNetworkCanonical")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.ManagementIPAddressLayoutVersion, actual.ManagementIPAddressLayoutVersion, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagementIpAddressLayoutVersion")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } return diffs, nil } func comparePrivateCloudManagementClusterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { var diffs []*dcl.FieldDiff desired, ok := d.(*PrivateCloudManagementCluster) if !ok { desiredNotPointer, ok := d.(PrivateCloudManagementCluster) if !ok { return nil, fmt.Errorf("obj %v is not a PrivateCloudManagementCluster or *PrivateCloudManagementCluster", d) } desired = &desiredNotPointer } actual, ok := a.(*PrivateCloudManagementCluster) if !ok { actualNotPointer, ok := a.(PrivateCloudManagementCluster) if !ok { return nil, fmt.Errorf("obj %v is not a PrivateCloudManagementCluster", a) } actual = &actualNotPointer } if ds, err := dcl.Diff(desired.ClusterId, actual.ClusterId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("ClusterId")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } return diffs, nil } func comparePrivateCloudHcxNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { var diffs []*dcl.FieldDiff desired, ok := d.(*PrivateCloudHcx) if !ok { desiredNotPointer, ok := d.(PrivateCloudHcx) if !ok { return nil, fmt.Errorf("obj %v is not a PrivateCloudHcx or *PrivateCloudHcx", d) } desired = &desiredNotPointer } actual, ok := a.(*PrivateCloudHcx) if !ok { actualNotPointer, ok := a.(PrivateCloudHcx) if !ok { return nil, fmt.Errorf("obj %v is not a PrivateCloudHcx", a) } actual = &actualNotPointer } if ds, err := dcl.Diff(desired.InternalIP, actual.InternalIP, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("InternalIp")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.Fqdn, actual.Fqdn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("Fqdn")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } return diffs, nil } func comparePrivateCloudNsxNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { var diffs []*dcl.FieldDiff desired, ok := d.(*PrivateCloudNsx) if !ok { desiredNotPointer, ok := d.(PrivateCloudNsx) if !ok { return nil, fmt.Errorf("obj %v is not a PrivateCloudNsx or *PrivateCloudNsx", d) } desired = &desiredNotPointer } actual, ok := a.(*PrivateCloudNsx) if !ok { actualNotPointer, ok := a.(PrivateCloudNsx) if !ok { return nil, fmt.Errorf("obj %v is not a PrivateCloudNsx", a) } actual = &actualNotPointer } if ds, err := dcl.Diff(desired.InternalIP, actual.InternalIP, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("InternalIp")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.Fqdn, actual.Fqdn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("Fqdn")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } return diffs, nil } func comparePrivateCloudVcenterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { var diffs []*dcl.FieldDiff desired, ok := d.(*PrivateCloudVcenter) if !ok { desiredNotPointer, ok := d.(PrivateCloudVcenter) if !ok { return nil, fmt.Errorf("obj %v is not a PrivateCloudVcenter or *PrivateCloudVcenter", d) } desired = &desiredNotPointer } actual, ok := a.(*PrivateCloudVcenter) if !ok { actualNotPointer, ok := a.(PrivateCloudVcenter) if !ok { return nil, fmt.Errorf("obj %v is not a PrivateCloudVcenter", a) } actual = &actualNotPointer } if ds, err := dcl.Diff(desired.InternalIP, actual.InternalIP, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("InternalIp")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } if ds, err := dcl.Diff(desired.Fqdn, actual.Fqdn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updatePrivateCloudUpdatePrivateCloudOperation")}, fn.AddNest("Fqdn")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } return diffs, nil } // urlNormalized returns a copy of the resource struct with values normalized // for URL substitutions. For instance, it converts long-form self-links to // short-form so they can be substituted in. func (r *PrivateCloud) urlNormalized() *PrivateCloud { normalized := dcl.Copy(*r).(PrivateCloud) normalized.Name = dcl.SelfLinkToName(r.Name) normalized.Description = dcl.SelfLinkToName(r.Description) normalized.Uid = dcl.SelfLinkToName(r.Uid) normalized.Project = dcl.SelfLinkToName(r.Project) normalized.Location = dcl.SelfLinkToName(r.Location) return &normalized } func (r *PrivateCloud) updateURL(userBasePath, updateName string) (string, error) { nr := r.urlNormalized() if updateName == "UpdatePrivateCloud" { fields := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } return dcl.URL("projects/{{project}}/locations/{{location}}/privateClouds/{{name}}", nr.basePath(), userBasePath, fields), nil } return "", fmt.Errorf("unknown update name: %s", updateName) } // marshal encodes the PrivateCloud resource into JSON for a Create request, and // performs transformations from the resource schema to the API schema if // necessary. func (r *PrivateCloud) marshal(c *Client) ([]byte, error) { m, err := expandPrivateCloud(c, r) if err != nil { return nil, fmt.Errorf("error marshalling PrivateCloud: %w", err) } return json.Marshal(m) } // unmarshalPrivateCloud decodes JSON responses into the PrivateCloud resource schema. func unmarshalPrivateCloud(b []byte, c *Client, res *PrivateCloud) (*PrivateCloud, error) { var m map[string]interface{} if err := json.Unmarshal(b, &m); err != nil { return nil, err } return unmarshalMapPrivateCloud(m, c, res) } func unmarshalMapPrivateCloud(m map[string]interface{}, c *Client, res *PrivateCloud) (*PrivateCloud, error) { flattened := flattenPrivateCloud(c, m, res) if flattened == nil { return nil, fmt.Errorf("attempted to flatten empty json object") } return flattened, nil } // expandPrivateCloud expands PrivateCloud into a JSON request object. func expandPrivateCloud(c *Client, f *PrivateCloud) (map[string]interface{}, error) { m := make(map[string]interface{}) res := f _ = res if v, err := dcl.EmptyValue(); err != nil { return nil, fmt.Errorf("error expanding Name into name: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["name"] = v } if v, err := expandPrivateCloudNetworkConfig(c, f.NetworkConfig, res); err != nil { return nil, fmt.Errorf("error expanding NetworkConfig into networkConfig: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["networkConfig"] = v } if v, err := expandPrivateCloudManagementCluster(c, f.ManagementCluster, res); err != nil { return nil, fmt.Errorf("error expanding ManagementCluster into managementCluster: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["managementCluster"] = v } if v := f.Description; dcl.ValueShouldBeSent(v) { m["description"] = v } if v, err := dcl.EmptyValue(); err != nil { return nil, fmt.Errorf("error expanding Project into project: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["project"] = v } if v, err := dcl.EmptyValue(); err != nil { return nil, fmt.Errorf("error expanding Location into location: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["location"] = v } return m, nil } // flattenPrivateCloud flattens PrivateCloud from a JSON request object into the // PrivateCloud type. func flattenPrivateCloud(c *Client, i interface{}, res *PrivateCloud) *PrivateCloud { m, ok := i.(map[string]interface{}) if !ok { return nil } if len(m) == 0 { return nil } resultRes := &PrivateCloud{} resultRes.Name = dcl.FlattenString(m["name"]) resultRes.CreateTime = dcl.FlattenString(m["createTime"]) resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) resultRes.DeleteTime = dcl.FlattenString(m["deleteTime"]) resultRes.ExpireTime = dcl.FlattenString(m["expireTime"]) resultRes.State = flattenPrivateCloudStateEnum(m["state"]) resultRes.NetworkConfig = flattenPrivateCloudNetworkConfig(c, m["networkConfig"], res) resultRes.ManagementCluster = flattenPrivateCloudManagementCluster(c, m["managementCluster"], res) resultRes.Description = dcl.FlattenString(m["description"]) resultRes.Hcx = flattenPrivateCloudHcx(c, m["hcx"], res) resultRes.Nsx = flattenPrivateCloudNsx(c, m["nsx"], res) resultRes.Vcenter = flattenPrivateCloudVcenter(c, m["vcenter"], res) resultRes.Uid = dcl.FlattenString(m["uid"]) resultRes.Project = dcl.FlattenString(m["project"]) resultRes.Location = dcl.FlattenString(m["location"]) return resultRes } // expandPrivateCloudNetworkConfigMap expands the contents of PrivateCloudNetworkConfig into a JSON // request object. func expandPrivateCloudNetworkConfigMap(c *Client, f map[string]PrivateCloudNetworkConfig, res *PrivateCloud) (map[string]interface{}, error) { if f == nil { return nil, nil } items := make(map[string]interface{}) for k, item := range f { i, err := expandPrivateCloudNetworkConfig(c, &item, res) if err != nil { return nil, err } if i != nil { items[k] = i } } return items, nil } // expandPrivateCloudNetworkConfigSlice expands the contents of PrivateCloudNetworkConfig into a JSON // request object. func expandPrivateCloudNetworkConfigSlice(c *Client, f []PrivateCloudNetworkConfig, res *PrivateCloud) ([]map[string]interface{}, error) { if f == nil { return nil, nil } items := []map[string]interface{}{} for _, item := range f { i, err := expandPrivateCloudNetworkConfig(c, &item, res) if err != nil { return nil, err } items = append(items, i) } return items, nil } // flattenPrivateCloudNetworkConfigMap flattens the contents of PrivateCloudNetworkConfig from a JSON // response object. func flattenPrivateCloudNetworkConfigMap(c *Client, i interface{}, res *PrivateCloud) map[string]PrivateCloudNetworkConfig { a, ok := i.(map[string]interface{}) if !ok { return map[string]PrivateCloudNetworkConfig{} } if len(a) == 0 { return map[string]PrivateCloudNetworkConfig{} } items := make(map[string]PrivateCloudNetworkConfig) for k, item := range a { items[k] = *flattenPrivateCloudNetworkConfig(c, item.(map[string]interface{}), res) } return items } // flattenPrivateCloudNetworkConfigSlice flattens the contents of PrivateCloudNetworkConfig from a JSON // response object. func flattenPrivateCloudNetworkConfigSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudNetworkConfig { a, ok := i.([]interface{}) if !ok { return []PrivateCloudNetworkConfig{} } if len(a) == 0 { return []PrivateCloudNetworkConfig{} } items := make([]PrivateCloudNetworkConfig, 0, len(a)) for _, item := range a { items = append(items, *flattenPrivateCloudNetworkConfig(c, item.(map[string]interface{}), res)) } return items } // expandPrivateCloudNetworkConfig expands an instance of PrivateCloudNetworkConfig into a JSON // request object. func expandPrivateCloudNetworkConfig(c *Client, f *PrivateCloudNetworkConfig, res *PrivateCloud) (map[string]interface{}, error) { if dcl.IsEmptyValueIndirect(f) { return nil, nil } m := make(map[string]interface{}) if v := f.ManagementCidr; !dcl.IsEmptyValueIndirect(v) { m["managementCidr"] = v } if v := f.VmwareEngineNetwork; !dcl.IsEmptyValueIndirect(v) { m["vmwareEngineNetwork"] = v } return m, nil } // flattenPrivateCloudNetworkConfig flattens an instance of PrivateCloudNetworkConfig from a JSON // response object. func flattenPrivateCloudNetworkConfig(c *Client, i interface{}, res *PrivateCloud) *PrivateCloudNetworkConfig { m, ok := i.(map[string]interface{}) if !ok { return nil } r := &PrivateCloudNetworkConfig{} if dcl.IsEmptyValueIndirect(i) { return EmptyPrivateCloudNetworkConfig } r.ManagementCidr = dcl.FlattenString(m["managementCidr"]) r.VmwareEngineNetwork = dcl.FlattenString(m["vmwareEngineNetwork"]) r.VmwareEngineNetworkCanonical = dcl.FlattenString(m["vmwareEngineNetworkCanonical"]) r.ManagementIPAddressLayoutVersion = dcl.FlattenInteger(m["managementIpAddressLayoutVersion"]) return r } // expandPrivateCloudManagementClusterMap expands the contents of PrivateCloudManagementCluster into a JSON // request object. func expandPrivateCloudManagementClusterMap(c *Client, f map[string]PrivateCloudManagementCluster, res *PrivateCloud) (map[string]interface{}, error) { if f == nil { return nil, nil } items := make(map[string]interface{}) for k, item := range f { i, err := expandPrivateCloudManagementCluster(c, &item, res) if err != nil { return nil, err } if i != nil { items[k] = i } } return items, nil } // expandPrivateCloudManagementClusterSlice expands the contents of PrivateCloudManagementCluster into a JSON // request object. func expandPrivateCloudManagementClusterSlice(c *Client, f []PrivateCloudManagementCluster, res *PrivateCloud) ([]map[string]interface{}, error) { if f == nil { return nil, nil } items := []map[string]interface{}{} for _, item := range f { i, err := expandPrivateCloudManagementCluster(c, &item, res) if err != nil { return nil, err } items = append(items, i) } return items, nil } // flattenPrivateCloudManagementClusterMap flattens the contents of PrivateCloudManagementCluster from a JSON // response object. func flattenPrivateCloudManagementClusterMap(c *Client, i interface{}, res *PrivateCloud) map[string]PrivateCloudManagementCluster { a, ok := i.(map[string]interface{}) if !ok { return map[string]PrivateCloudManagementCluster{} } if len(a) == 0 { return map[string]PrivateCloudManagementCluster{} } items := make(map[string]PrivateCloudManagementCluster) for k, item := range a { items[k] = *flattenPrivateCloudManagementCluster(c, item.(map[string]interface{}), res) } return items } // flattenPrivateCloudManagementClusterSlice flattens the contents of PrivateCloudManagementCluster from a JSON // response object. func flattenPrivateCloudManagementClusterSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudManagementCluster { a, ok := i.([]interface{}) if !ok { return []PrivateCloudManagementCluster{} } if len(a) == 0 { return []PrivateCloudManagementCluster{} } items := make([]PrivateCloudManagementCluster, 0, len(a)) for _, item := range a { items = append(items, *flattenPrivateCloudManagementCluster(c, item.(map[string]interface{}), res)) } return items } // expandPrivateCloudManagementCluster expands an instance of PrivateCloudManagementCluster into a JSON // request object. func expandPrivateCloudManagementCluster(c *Client, f *PrivateCloudManagementCluster, res *PrivateCloud) (map[string]interface{}, error) { if dcl.IsEmptyValueIndirect(f) { return nil, nil } m := make(map[string]interface{}) if v := f.ClusterId; !dcl.IsEmptyValueIndirect(v) { m["clusterId"] = v } return m, nil } // flattenPrivateCloudManagementCluster flattens an instance of PrivateCloudManagementCluster from a JSON // response object. func flattenPrivateCloudManagementCluster(c *Client, i interface{}, res *PrivateCloud) *PrivateCloudManagementCluster { m, ok := i.(map[string]interface{}) if !ok { return nil } r := &PrivateCloudManagementCluster{} if dcl.IsEmptyValueIndirect(i) { return EmptyPrivateCloudManagementCluster } r.ClusterId = dcl.FlattenString(m["clusterId"]) return r } // expandPrivateCloudHcxMap expands the contents of PrivateCloudHcx into a JSON // request object. func expandPrivateCloudHcxMap(c *Client, f map[string]PrivateCloudHcx, res *PrivateCloud) (map[string]interface{}, error) { if f == nil { return nil, nil } items := make(map[string]interface{}) for k, item := range f { i, err := expandPrivateCloudHcx(c, &item, res) if err != nil { return nil, err } if i != nil { items[k] = i } } return items, nil } // expandPrivateCloudHcxSlice expands the contents of PrivateCloudHcx into a JSON // request object. func expandPrivateCloudHcxSlice(c *Client, f []PrivateCloudHcx, res *PrivateCloud) ([]map[string]interface{}, error) { if f == nil { return nil, nil } items := []map[string]interface{}{} for _, item := range f { i, err := expandPrivateCloudHcx(c, &item, res) if err != nil { return nil, err } items = append(items, i) } return items, nil } // flattenPrivateCloudHcxMap flattens the contents of PrivateCloudHcx from a JSON // response object. func flattenPrivateCloudHcxMap(c *Client, i interface{}, res *PrivateCloud) map[string]PrivateCloudHcx { a, ok := i.(map[string]interface{}) if !ok { return map[string]PrivateCloudHcx{} } if len(a) == 0 { return map[string]PrivateCloudHcx{} } items := make(map[string]PrivateCloudHcx) for k, item := range a { items[k] = *flattenPrivateCloudHcx(c, item.(map[string]interface{}), res) } return items } // flattenPrivateCloudHcxSlice flattens the contents of PrivateCloudHcx from a JSON // response object. func flattenPrivateCloudHcxSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudHcx { a, ok := i.([]interface{}) if !ok { return []PrivateCloudHcx{} } if len(a) == 0 { return []PrivateCloudHcx{} } items := make([]PrivateCloudHcx, 0, len(a)) for _, item := range a { items = append(items, *flattenPrivateCloudHcx(c, item.(map[string]interface{}), res)) } return items } // expandPrivateCloudHcx expands an instance of PrivateCloudHcx into a JSON // request object. func expandPrivateCloudHcx(c *Client, f *PrivateCloudHcx, res *PrivateCloud) (map[string]interface{}, error) { if dcl.IsEmptyValueIndirect(f) { return nil, nil } m := make(map[string]interface{}) if v := f.InternalIP; !dcl.IsEmptyValueIndirect(v) { m["internalIp"] = v } if v := f.Version; !dcl.IsEmptyValueIndirect(v) { m["version"] = v } if v := f.Fqdn; !dcl.IsEmptyValueIndirect(v) { m["fqdn"] = v } return m, nil } // flattenPrivateCloudHcx flattens an instance of PrivateCloudHcx from a JSON // response object. func flattenPrivateCloudHcx(c *Client, i interface{}, res *PrivateCloud) *PrivateCloudHcx { m, ok := i.(map[string]interface{}) if !ok { return nil } r := &PrivateCloudHcx{} if dcl.IsEmptyValueIndirect(i) { return EmptyPrivateCloudHcx } r.InternalIP = dcl.FlattenString(m["internalIp"]) r.Version = dcl.FlattenString(m["version"]) r.State = flattenPrivateCloudHcxStateEnum(m["state"]) r.Fqdn = dcl.FlattenString(m["fqdn"]) return r } // expandPrivateCloudNsxMap expands the contents of PrivateCloudNsx into a JSON // request object. func expandPrivateCloudNsxMap(c *Client, f map[string]PrivateCloudNsx, res *PrivateCloud) (map[string]interface{}, error) { if f == nil { return nil, nil } items := make(map[string]interface{}) for k, item := range f { i, err := expandPrivateCloudNsx(c, &item, res) if err != nil { return nil, err } if i != nil { items[k] = i } } return items, nil } // expandPrivateCloudNsxSlice expands the contents of PrivateCloudNsx into a JSON // request object. func expandPrivateCloudNsxSlice(c *Client, f []PrivateCloudNsx, res *PrivateCloud) ([]map[string]interface{}, error) { if f == nil { return nil, nil } items := []map[string]interface{}{} for _, item := range f { i, err := expandPrivateCloudNsx(c, &item, res) if err != nil { return nil, err } items = append(items, i) } return items, nil } // flattenPrivateCloudNsxMap flattens the contents of PrivateCloudNsx from a JSON // response object. func flattenPrivateCloudNsxMap(c *Client, i interface{}, res *PrivateCloud) map[string]PrivateCloudNsx { a, ok := i.(map[string]interface{}) if !ok { return map[string]PrivateCloudNsx{} } if len(a) == 0 { return map[string]PrivateCloudNsx{} } items := make(map[string]PrivateCloudNsx) for k, item := range a { items[k] = *flattenPrivateCloudNsx(c, item.(map[string]interface{}), res) } return items } // flattenPrivateCloudNsxSlice flattens the contents of PrivateCloudNsx from a JSON // response object. func flattenPrivateCloudNsxSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudNsx { a, ok := i.([]interface{}) if !ok { return []PrivateCloudNsx{} } if len(a) == 0 { return []PrivateCloudNsx{} } items := make([]PrivateCloudNsx, 0, len(a)) for _, item := range a { items = append(items, *flattenPrivateCloudNsx(c, item.(map[string]interface{}), res)) } return items } // expandPrivateCloudNsx expands an instance of PrivateCloudNsx into a JSON // request object. func expandPrivateCloudNsx(c *Client, f *PrivateCloudNsx, res *PrivateCloud) (map[string]interface{}, error) { if dcl.IsEmptyValueIndirect(f) { return nil, nil } m := make(map[string]interface{}) if v := f.InternalIP; !dcl.IsEmptyValueIndirect(v) { m["internalIp"] = v } if v := f.Version; !dcl.IsEmptyValueIndirect(v) { m["version"] = v } if v := f.Fqdn; !dcl.IsEmptyValueIndirect(v) { m["fqdn"] = v } return m, nil } // flattenPrivateCloudNsx flattens an instance of PrivateCloudNsx from a JSON // response object. func flattenPrivateCloudNsx(c *Client, i interface{}, res *PrivateCloud) *PrivateCloudNsx { m, ok := i.(map[string]interface{}) if !ok { return nil } r := &PrivateCloudNsx{} if dcl.IsEmptyValueIndirect(i) { return EmptyPrivateCloudNsx } r.InternalIP = dcl.FlattenString(m["internalIp"]) r.Version = dcl.FlattenString(m["version"]) r.State = flattenPrivateCloudNsxStateEnum(m["state"]) r.Fqdn = dcl.FlattenString(m["fqdn"]) return r } // expandPrivateCloudVcenterMap expands the contents of PrivateCloudVcenter into a JSON // request object. func expandPrivateCloudVcenterMap(c *Client, f map[string]PrivateCloudVcenter, res *PrivateCloud) (map[string]interface{}, error) { if f == nil { return nil, nil } items := make(map[string]interface{}) for k, item := range f { i, err := expandPrivateCloudVcenter(c, &item, res) if err != nil { return nil, err } if i != nil { items[k] = i } } return items, nil } // expandPrivateCloudVcenterSlice expands the contents of PrivateCloudVcenter into a JSON // request object. func expandPrivateCloudVcenterSlice(c *Client, f []PrivateCloudVcenter, res *PrivateCloud) ([]map[string]interface{}, error) { if f == nil { return nil, nil } items := []map[string]interface{}{} for _, item := range f { i, err := expandPrivateCloudVcenter(c, &item, res) if err != nil { return nil, err } items = append(items, i) } return items, nil } // flattenPrivateCloudVcenterMap flattens the contents of PrivateCloudVcenter from a JSON // response object. func flattenPrivateCloudVcenterMap(c *Client, i interface{}, res *PrivateCloud) map[string]PrivateCloudVcenter { a, ok := i.(map[string]interface{}) if !ok { return map[string]PrivateCloudVcenter{} } if len(a) == 0 { return map[string]PrivateCloudVcenter{} } items := make(map[string]PrivateCloudVcenter) for k, item := range a { items[k] = *flattenPrivateCloudVcenter(c, item.(map[string]interface{}), res) } return items } // flattenPrivateCloudVcenterSlice flattens the contents of PrivateCloudVcenter from a JSON // response object. func flattenPrivateCloudVcenterSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudVcenter { a, ok := i.([]interface{}) if !ok { return []PrivateCloudVcenter{} } if len(a) == 0 { return []PrivateCloudVcenter{} } items := make([]PrivateCloudVcenter, 0, len(a)) for _, item := range a { items = append(items, *flattenPrivateCloudVcenter(c, item.(map[string]interface{}), res)) } return items } // expandPrivateCloudVcenter expands an instance of PrivateCloudVcenter into a JSON // request object. func expandPrivateCloudVcenter(c *Client, f *PrivateCloudVcenter, res *PrivateCloud) (map[string]interface{}, error) { if dcl.IsEmptyValueIndirect(f) { return nil, nil } m := make(map[string]interface{}) if v := f.InternalIP; !dcl.IsEmptyValueIndirect(v) { m["internalIp"] = v } if v := f.Version; !dcl.IsEmptyValueIndirect(v) { m["version"] = v } if v := f.Fqdn; !dcl.IsEmptyValueIndirect(v) { m["fqdn"] = v } return m, nil } // flattenPrivateCloudVcenter flattens an instance of PrivateCloudVcenter from a JSON // response object. func flattenPrivateCloudVcenter(c *Client, i interface{}, res *PrivateCloud) *PrivateCloudVcenter { m, ok := i.(map[string]interface{}) if !ok { return nil } r := &PrivateCloudVcenter{} if dcl.IsEmptyValueIndirect(i) { return EmptyPrivateCloudVcenter } r.InternalIP = dcl.FlattenString(m["internalIp"]) r.Version = dcl.FlattenString(m["version"]) r.State = flattenPrivateCloudVcenterStateEnum(m["state"]) r.Fqdn = dcl.FlattenString(m["fqdn"]) return r } // flattenPrivateCloudStateEnumMap flattens the contents of PrivateCloudStateEnum from a JSON // response object. func flattenPrivateCloudStateEnumMap(c *Client, i interface{}, res *PrivateCloud) map[string]PrivateCloudStateEnum { a, ok := i.(map[string]interface{}) if !ok { return map[string]PrivateCloudStateEnum{} } if len(a) == 0 { return map[string]PrivateCloudStateEnum{} } items := make(map[string]PrivateCloudStateEnum) for k, item := range a { items[k] = *flattenPrivateCloudStateEnum(item.(interface{})) } return items } // flattenPrivateCloudStateEnumSlice flattens the contents of PrivateCloudStateEnum from a JSON // response object. func flattenPrivateCloudStateEnumSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudStateEnum { a, ok := i.([]interface{}) if !ok { return []PrivateCloudStateEnum{} } if len(a) == 0 { return []PrivateCloudStateEnum{} } items := make([]PrivateCloudStateEnum, 0, len(a)) for _, item := range a { items = append(items, *flattenPrivateCloudStateEnum(item.(interface{}))) } return items } // flattenPrivateCloudStateEnum asserts that an interface is a string, and returns a // pointer to a *PrivateCloudStateEnum with the same value as that string. func flattenPrivateCloudStateEnum(i interface{}) *PrivateCloudStateEnum { s, ok := i.(string) if !ok { return nil } return PrivateCloudStateEnumRef(s) } // flattenPrivateCloudHcxStateEnumMap flattens the contents of PrivateCloudHcxStateEnum from a JSON // response object. func flattenPrivateCloudHcxStateEnumMap(c *Client, i interface{}, res *PrivateCloud) map[string]PrivateCloudHcxStateEnum { a, ok := i.(map[string]interface{}) if !ok { return map[string]PrivateCloudHcxStateEnum{} } if len(a) == 0 { return map[string]PrivateCloudHcxStateEnum{} } items := make(map[string]PrivateCloudHcxStateEnum) for k, item := range a { items[k] = *flattenPrivateCloudHcxStateEnum(item.(interface{})) } return items } // flattenPrivateCloudHcxStateEnumSlice flattens the contents of PrivateCloudHcxStateEnum from a JSON // response object. func flattenPrivateCloudHcxStateEnumSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudHcxStateEnum { a, ok := i.([]interface{}) if !ok { return []PrivateCloudHcxStateEnum{} } if len(a) == 0 { return []PrivateCloudHcxStateEnum{} } items := make([]PrivateCloudHcxStateEnum, 0, len(a)) for _, item := range a { items = append(items, *flattenPrivateCloudHcxStateEnum(item.(interface{}))) } return items } // flattenPrivateCloudHcxStateEnum asserts that an interface is a string, and returns a // pointer to a *PrivateCloudHcxStateEnum with the same value as that string. func flattenPrivateCloudHcxStateEnum(i interface{}) *PrivateCloudHcxStateEnum { s, ok := i.(string) if !ok { return nil } return PrivateCloudHcxStateEnumRef(s) } // flattenPrivateCloudNsxStateEnumMap flattens the contents of PrivateCloudNsxStateEnum from a JSON // response object. func flattenPrivateCloudNsxStateEnumMap(c *Client, i interface{}, res *PrivateCloud) map[string]PrivateCloudNsxStateEnum { a, ok := i.(map[string]interface{}) if !ok { return map[string]PrivateCloudNsxStateEnum{} } if len(a) == 0 { return map[string]PrivateCloudNsxStateEnum{} } items := make(map[string]PrivateCloudNsxStateEnum) for k, item := range a { items[k] = *flattenPrivateCloudNsxStateEnum(item.(interface{})) } return items } // flattenPrivateCloudNsxStateEnumSlice flattens the contents of PrivateCloudNsxStateEnum from a JSON // response object. func flattenPrivateCloudNsxStateEnumSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudNsxStateEnum { a, ok := i.([]interface{}) if !ok { return []PrivateCloudNsxStateEnum{} } if len(a) == 0 { return []PrivateCloudNsxStateEnum{} } items := make([]PrivateCloudNsxStateEnum, 0, len(a)) for _, item := range a { items = append(items, *flattenPrivateCloudNsxStateEnum(item.(interface{}))) } return items } // flattenPrivateCloudNsxStateEnum asserts that an interface is a string, and returns a // pointer to a *PrivateCloudNsxStateEnum with the same value as that string. func flattenPrivateCloudNsxStateEnum(i interface{}) *PrivateCloudNsxStateEnum { s, ok := i.(string) if !ok { return nil } return PrivateCloudNsxStateEnumRef(s) } // flattenPrivateCloudVcenterStateEnumMap flattens the contents of PrivateCloudVcenterStateEnum from a JSON // response object. func flattenPrivateCloudVcenterStateEnumMap(c *Client, i interface{}, res *PrivateCloud) map[string]PrivateCloudVcenterStateEnum { a, ok := i.(map[string]interface{}) if !ok { return map[string]PrivateCloudVcenterStateEnum{} } if len(a) == 0 { return map[string]PrivateCloudVcenterStateEnum{} } items := make(map[string]PrivateCloudVcenterStateEnum) for k, item := range a { items[k] = *flattenPrivateCloudVcenterStateEnum(item.(interface{})) } return items } // flattenPrivateCloudVcenterStateEnumSlice flattens the contents of PrivateCloudVcenterStateEnum from a JSON // response object. func flattenPrivateCloudVcenterStateEnumSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudVcenterStateEnum { a, ok := i.([]interface{}) if !ok { return []PrivateCloudVcenterStateEnum{} } if len(a) == 0 { return []PrivateCloudVcenterStateEnum{} } items := make([]PrivateCloudVcenterStateEnum, 0, len(a)) for _, item := range a { items = append(items, *flattenPrivateCloudVcenterStateEnum(item.(interface{}))) } return items } // flattenPrivateCloudVcenterStateEnum asserts that an interface is a string, and returns a // pointer to a *PrivateCloudVcenterStateEnum with the same value as that string. func flattenPrivateCloudVcenterStateEnum(i interface{}) *PrivateCloudVcenterStateEnum { s, ok := i.(string) if !ok { return nil } return PrivateCloudVcenterStateEnumRef(s) } // This function returns a matcher that checks whether a serialized resource matches this resource // in its parameters (as defined by the fields in a Get, which definitionally define resource // identity). This is useful in extracting the element from a List call. func (r *PrivateCloud) matcher(c *Client) func([]byte) bool { return func(b []byte) bool { cr, err := unmarshalPrivateCloud(b, c, r) if err != nil { c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") return false } nr := r.urlNormalized() ncr := cr.urlNormalized() c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) if nr.Project == nil && ncr.Project == nil { c.Config.Logger.Info("Both Project fields null - considering equal.") } else if nr.Project == nil || ncr.Project == nil { c.Config.Logger.Info("Only one Project field is null - considering unequal.") return false } else if *nr.Project != *ncr.Project { return false } if nr.Location == nil && ncr.Location == nil { c.Config.Logger.Info("Both Location fields null - considering equal.") } else if nr.Location == nil || ncr.Location == nil { c.Config.Logger.Info("Only one Location field is null - considering unequal.") return false } else if *nr.Location != *ncr.Location { return false } if nr.Name == nil && ncr.Name == nil { c.Config.Logger.Info("Both Name fields null - considering equal.") } else if nr.Name == nil || ncr.Name == nil { c.Config.Logger.Info("Only one Name field is null - considering unequal.") return false } else if *nr.Name != *ncr.Name { return false } return true } } type privateCloudDiff struct { // The diff should include one or the other of RequiresRecreate or UpdateOp. RequiresRecreate bool UpdateOp privateCloudApiOperation FieldName string // used for error logging } func convertFieldDiffsToPrivateCloudDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]privateCloudDiff, error) { opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) // Map each operation name to the field diffs associated with it. for _, fd := range fds { for _, ro := range fd.ResultingOperation { if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { fieldDiffs = append(fieldDiffs, fd) opNamesToFieldDiffs[ro] = fieldDiffs } else { config.Logger.Infof("%s required due to diff: %v", ro, fd) opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} } } } var diffs []privateCloudDiff // For each operation name, create a privateCloudDiff which contains the operation. for opName, fieldDiffs := range opNamesToFieldDiffs { // Use the first field diff's field name for logging required recreate error. diff := privateCloudDiff{FieldName: fieldDiffs[0].FieldName} if opName == "Recreate" { diff.RequiresRecreate = true } else { apiOp, err := convertOpNameToPrivateCloudApiOperation(opName, fieldDiffs, opts...) if err != nil { return diffs, err } diff.UpdateOp = apiOp } diffs = append(diffs, diff) } return diffs, nil } func convertOpNameToPrivateCloudApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (privateCloudApiOperation, error) { switch opName { case "updatePrivateCloudUpdatePrivateCloudOperation": return &updatePrivateCloudUpdatePrivateCloudOperation{FieldDiffs: fieldDiffs}, nil default: return nil, fmt.Errorf("no such operation with name: %v", opName) } } func extractPrivateCloudFields(r *PrivateCloud) error { vNetworkConfig := r.NetworkConfig if vNetworkConfig == nil { // note: explicitly not the empty object. vNetworkConfig = &PrivateCloudNetworkConfig{} } if err := extractPrivateCloudNetworkConfigFields(r, vNetworkConfig); err != nil { return err } if !dcl.IsEmptyValueIndirect(vNetworkConfig) { r.NetworkConfig = vNetworkConfig } vManagementCluster := r.ManagementCluster if vManagementCluster == nil { // note: explicitly not the empty object. vManagementCluster = &PrivateCloudManagementCluster{} } if err := extractPrivateCloudManagementClusterFields(r, vManagementCluster); err != nil { return err } if !dcl.IsEmptyValueIndirect(vManagementCluster) { r.ManagementCluster = vManagementCluster } vHcx := r.Hcx if vHcx == nil { // note: explicitly not the empty object. vHcx = &PrivateCloudHcx{} } if err := extractPrivateCloudHcxFields(r, vHcx); err != nil { return err } if !dcl.IsEmptyValueIndirect(vHcx) { r.Hcx = vHcx } vNsx := r.Nsx if vNsx == nil { // note: explicitly not the empty object. vNsx = &PrivateCloudNsx{} } if err := extractPrivateCloudNsxFields(r, vNsx); err != nil { return err } if !dcl.IsEmptyValueIndirect(vNsx) { r.Nsx = vNsx } vVcenter := r.Vcenter if vVcenter == nil { // note: explicitly not the empty object. vVcenter = &PrivateCloudVcenter{} } if err := extractPrivateCloudVcenterFields(r, vVcenter); err != nil { return err } if !dcl.IsEmptyValueIndirect(vVcenter) { r.Vcenter = vVcenter } return nil } func extractPrivateCloudNetworkConfigFields(r *PrivateCloud, o *PrivateCloudNetworkConfig) error { return nil } func extractPrivateCloudManagementClusterFields(r *PrivateCloud, o *PrivateCloudManagementCluster) error { return nil } func extractPrivateCloudHcxFields(r *PrivateCloud, o *PrivateCloudHcx) error { return nil } func extractPrivateCloudNsxFields(r *PrivateCloud, o *PrivateCloudNsx) error { return nil } func extractPrivateCloudVcenterFields(r *PrivateCloud, o *PrivateCloudVcenter) error { return nil } func postReadExtractPrivateCloudFields(r *PrivateCloud) error { vNetworkConfig := r.NetworkConfig if vNetworkConfig == nil { // note: explicitly not the empty object. vNetworkConfig = &PrivateCloudNetworkConfig{} } if err := postReadExtractPrivateCloudNetworkConfigFields(r, vNetworkConfig); err != nil { return err } if !dcl.IsEmptyValueIndirect(vNetworkConfig) { r.NetworkConfig = vNetworkConfig } vManagementCluster := r.ManagementCluster if vManagementCluster == nil { // note: explicitly not the empty object. vManagementCluster = &PrivateCloudManagementCluster{} } if err := postReadExtractPrivateCloudManagementClusterFields(r, vManagementCluster); err != nil { return err } if !dcl.IsEmptyValueIndirect(vManagementCluster) { r.ManagementCluster = vManagementCluster } vHcx := r.Hcx if vHcx == nil { // note: explicitly not the empty object. vHcx = &PrivateCloudHcx{} } if err := postReadExtractPrivateCloudHcxFields(r, vHcx); err != nil { return err } if !dcl.IsEmptyValueIndirect(vHcx) { r.Hcx = vHcx } vNsx := r.Nsx if vNsx == nil { // note: explicitly not the empty object. vNsx = &PrivateCloudNsx{} } if err := postReadExtractPrivateCloudNsxFields(r, vNsx); err != nil { return err } if !dcl.IsEmptyValueIndirect(vNsx) { r.Nsx = vNsx } vVcenter := r.Vcenter if vVcenter == nil { // note: explicitly not the empty object. vVcenter = &PrivateCloudVcenter{} } if err := postReadExtractPrivateCloudVcenterFields(r, vVcenter); err != nil { return err } if !dcl.IsEmptyValueIndirect(vVcenter) { r.Vcenter = vVcenter } return nil } func postReadExtractPrivateCloudNetworkConfigFields(r *PrivateCloud, o *PrivateCloudNetworkConfig) error { return nil } func postReadExtractPrivateCloudManagementClusterFields(r *PrivateCloud, o *PrivateCloudManagementCluster) error { return nil } func postReadExtractPrivateCloudHcxFields(r *PrivateCloud, o *PrivateCloudHcx) error { return nil } func postReadExtractPrivateCloudNsxFields(r *PrivateCloud, o *PrivateCloudNsx) error { return nil } func postReadExtractPrivateCloudVcenterFields(r *PrivateCloud, o *PrivateCloudVcenter) error { return nil }
package pathfileops import ( "fmt" "testing" ) func TestFileMgr_ChangePermissionMode_01(t *testing.T) { filePath := "../../filesfortest/modefilesfortest/modeFileTest_01.txt" fMgr, err := FileMgr{}.NewFromPathFileNameExtStr(filePath) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(filePath). "+ "filePathName='%v' Error='%v'", filePath, err.Error()) return } basePermission, err := FilePermissionConfig{}.New("-rw-rw-rw-") if err != nil { t.Errorf("Error returned from FilePermissionConfig{}.New(\"-rw-rw-rw-\"). "+ "Error='%v'", err.Error()) return } basePermissionText, _ := basePermission.GetPermissionTextCode() err = fMgr.ChangePermissionMode(basePermission) if err != nil { t.Errorf("Error returned from fMgr.ChangePermissionMode(basePermission). "+ "basePermission='%v' Error='%v'", basePermissionText, err.Error()) return } requestedNewPerm, err := FilePermissionConfig{}.New("-r--r--r--") if err != nil { t.Errorf("Error returned from FilePermissionConfig{}.New(\"-r--r--r--\"). "+ "Error='%v'", err.Error()) return } requestedNewPermText, err := requestedNewPerm.GetPermissionTextCode() if err != nil { t.Errorf("Error returned from requestedNewPerm.GetPermissionTextCode(). "+ "Error='%v' ", err.Error()) return } err = fMgr.ChangePermissionMode(requestedNewPerm) if err != nil { t.Errorf("Error returned from fMgr.ChangePermissionMode(requestedNewPerm). "+ "Error='%v'", err.Error()) return } actualNewPermCodeText, err := fMgr.GetFilePermissionTextCodes() if err != nil { t.Errorf("Error returned from #1 fMgr.GetFilePermissionTextCodes(). "+ "Error='%v'", err.Error()) return } if requestedNewPermText != actualNewPermCodeText { t.Errorf("Error expected permission='%v'. Instead, permission='%v' ", requestedNewPermText, actualNewPermCodeText) } err = fMgr.ChangePermissionMode(basePermission) if err != nil { t.Errorf("Error returned from fMgr.ChangePermissionMode(basePermission). "+ "basePermission='%v' Error='%v'", basePermissionText, err.Error()) return } } func TestFileMgr_ChangePermissionMode_02(t *testing.T) { filePath := "../../filesfortest/modefilesfortest/iDoNotExist.txt" fMgr, err := FileMgr{}.NewFromPathFileNameExtStr(filePath) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(filePath). "+ "filePathName='%v' Error='%v'", filePath, err.Error()) return } basePermission, err := FilePermissionConfig{}.New("-rw-rw-rw-") if err != nil { t.Errorf("Error returned from FilePermissionConfig{}.New(\"-rw-rw-rw-\"). "+ "Error='%v'", err.Error()) } err = fMgr.ChangePermissionMode(basePermission) if err == nil { t.Errorf(" Expected error return from fMgr.ChangePermissionMode(basePermission) " + "because file does NOT exist. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_ChangePermissionMode_03(t *testing.T) { filePath := "../../filesfortest/modefilesfortest/modeFileTest_01.txt" fMgr, err := FileMgr{}.NewFromPathFileNameExtStr(filePath) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(filePath). "+ "filePathName='%v' Error='%v'", filePath, err.Error()) return } basePermission, err := FilePermissionConfig{}.New("-rw-rw-rw-") if err != nil { t.Errorf("Error returned from FilePermissionConfig{}.New(\"-rw-rw-rw-\"). "+ "Error='%v'", err.Error()) return } basePermission.isInitialized = false err = fMgr.ChangePermissionMode(basePermission) if err == nil { t.Errorf(" Expected error return from fMgr.ChangePermissionMode(basePermission) " + "because file does NOT exist. However, NO ERROR WAS RETURNED!") } return } func TestFileMgr_ChangePermissionMode_04(t *testing.T) { filePath := "../../filesfortest/modefilesfortest/iDoNotExist.txt" fMgr, err := FileMgr{}.NewFromPathFileNameExtStr(filePath) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(filePath). "+ "filePathName='%v' Error='%v'", filePath, err.Error()) return } basePermission, err := FilePermissionConfig{}.New("-rw-rw-rw-") if err != nil { t.Errorf("Error returned from FilePermissionConfig{}.New(\"-rw-rw-rw-\"). "+ "Error='%v'", err.Error()) return } fMgr.isInitialized = false err = fMgr.ChangePermissionMode(basePermission) if err == nil { t.Errorf(" Expected error return from fMgr.ChangePermissionMode(basePermission) " + "because file does NOT exist. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_CloseThisFile_01(t *testing.T) { fh := FileHelper{} testFile := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\xt_dirmgr_01_test.go") fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(testFile) if err != nil { t.Error("Received Error on GetPathFileNameElements Error:", err) } fileMgr.isInitialized = false err = fileMgr.CloseThisFile() if err == nil { t.Error("Expected error return from fileMgr.CloseThisFile() because " + "fileMgr is Invalid. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_CloseThisFile_02(t *testing.T) { fh := FileHelper{} testFile := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\xt_dirmgr_01_test.go") fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(testFile) if err != nil { t.Error("Received Error on GetPathFileNameElements Error:", err) } err = fileMgr.CloseThisFile() if err != nil { t.Error("Error: File Pointer is 'nil' and NO ERROR should have been returned!") } } func TestFileMgr_CopyIn_01(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\xt_dirmgr_01_test.go") expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common") fileName := "xt_dirmgr_01_test" fileNameExt := "xt_dirmgr_01_test.go" extName := ".go" fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(commonDir) if err != nil { t.Error("Received Error on GetPathFileNameElements Error:", err) } fMgr2 := FileMgr{} fMgr2.CopyIn(&fileMgr) if fMgr2.fileName != fileName { t.Error(fmt.Sprintf("Expected CopyToThis to return fileName == '%v', instead got: ", fileName), fMgr2.fileName) } if fMgr2.fileExt != extName { t.Error(fmt.Sprintf("Expected CopyToThis to return fileExt == '%v', instead got: ", extName), fMgr2.fileExt) } if fMgr2.fileNameExt != fileNameExt { t.Error(fmt.Sprintf("Expected CopyToThis to return fileNameExt == '%v', instead got: ", fileNameExt), fMgr2.fileNameExt) } if fMgr2.dMgr.path != expectedDir { t.Error(fmt.Sprintf("Expected CopyToThis to return path == '%v', instead got: ", expectedDir), fMgr2.dMgr.path) } result := fMgr2.Equal(&fileMgr) if result != true { t.Error("Expected Equal to return 'true' for fMgr2==fileMgr, instead got: ", result) } } func TestFileMgr_CopyOut_01(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\xt_dirmgr_01_test.go") expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common") fileName := "xt_dirmgr_01_test" fileNameExt := "xt_dirmgr_01_test.go" extName := ".go" fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(commonDir) if err != nil { t.Error("Received Error on GetPathFileNameElements Error:", err) } fMgr2 := fileMgr.CopyOut() if fMgr2.fileName != fileName { t.Error(fmt.Sprintf("Expected CopyToThis to return fileName == '%v', instead got: ", fileName), fMgr2.fileName) } if fMgr2.fileExt != extName { t.Error(fmt.Sprintf("Expected CopyToThis to return fileExt == '%v', instead got: ", extName), fMgr2.fileExt) } if fMgr2.fileNameExt != fileNameExt { t.Error(fmt.Sprintf("Expected CopyToThis to return fileNameExt == '%v', instead got: ", fileNameExt), fMgr2.fileNameExt) } if fMgr2.dMgr.path != expectedDir { t.Error(fmt.Sprintf("Expected CopyToThis to return path == '%v', instead got: ", expectedDir), fMgr2.dMgr.path) } result := fMgr2.Equal(&fileMgr) if result != true { t.Error("Expected Equal to return 'true' for fMgr2==fileMgr, instead got: ", result) } } func TestFileMgr_CopyFileMgrByIo_01(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(adjustedPath). "+ "adjustedPath='%v' Error='%v'", adjustedPath, err.Error()) } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt). "+ "dMgr.absolutePath='%v' expectedFileNameExt='%v' Error='%v'", dMgr.absolutePath, adjustedPath, err.Error()) } rawDestPath := "../../checkfiles/checkfiles02" destDMgr, err := DirMgr{}.New(rawDestPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(rawDestPath). "+ "rawDestPath='%v' Error='%v'", rawDestPath, err.Error()) } destFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt). "+ "destDMgr.absolutePath='%v' expectedFileNameExt='%v' Error='%v'", destDMgr.absolutePath, expectedFileNameExt, err.Error()) return } err = srcFMgr.CopyFileMgrByIo(&destFMgr) if err != nil { t.Errorf("Error returned from srcFMgr.CopyFileMgrByIo(&destFMgr). "+ "srcFMgr.absolutePathFileName='%v' destFMgr.absolutePathFileName='%v' Error='%v'", srcFMgr.absolutePathFileName, destFMgr.absolutePathFileName, err.Error()) _ = fh.DeleteDirFile(destFMgr.absolutePathFileName) return } if !fh.DoesFileExist(destFMgr.absolutePathFileName) { t.Errorf("Expected fh.DoesFileExist(destFMgr.absolutePathFileName)=true.\n" + "Instead it was 'false' destFMgr.absolutePathFileName='%v'", destFMgr.absolutePathFileName) } if !destFMgr.doesAbsolutePathFileNameExist { t.Error("Expected destFMgr.doesAbsolutePathFileNameExist='true'.\n" + "ERROR: destFMgr.doesAbsolutePathFileNameExist='false'") } err = fh.DeleteDirFile(destFMgr.absolutePathFileName) if err != nil { t.Errorf("Error returned from fh.DeleteDirFile(destFMgr.absolutePathFileName) "+ "destFMgr.absolutePathFileName='%v' Error='%v'", destFMgr.absolutePathFileName, err.Error()) } if fh.DoesFileExist(destFMgr.absolutePathFileName) { t.Errorf("Expected fh.DoesFileExist(destFMgr.absolutePathFileName)=false. "+ "Instead it was 'true' destFMgr.absolutePathFileName='%v'", destFMgr.absolutePathFileName) } } func TestFileMgr_CopyFileMgrByIo_02(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(adjustedPath). adjustedPath='%v' Error='%v'", adjustedPath, err.Error()) } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt("+ "dMgr, expectedFileNameExt). dMgr.absolutePath='%v'\n"+ "expectedFileNameExt='%v'\nError='%v'\n", dMgr.absolutePath, adjustedPath, err.Error()) } rawDestPath := "../../checkfiles/checkfiles02" destDMgr, err := DirMgr{}.New(rawDestPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(rawDestPath).\n"+ "rawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error()) return } destFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt("+ "destDMgr, expectedFileNameExt).\n"+ "destDMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", destDMgr.absolutePath, expectedFileNameExt, err.Error()) return } srcFMgr.isInitialized = false err = srcFMgr.CopyFileMgrByIo(&destFMgr) if err == nil { t.Error("Expected error return from srcFMgr.CopyFileMgrByIo(&destFMgr) because " + "srcFMgr.isInitialized == false. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_CopyFileMgrByIo_03(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr"+ "(adjustedPath).\n"+ "adjustedPath='%v'\nError='%v'\n", adjustedPath, err.Error()) } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt("+ "dMgr, expectedFileNameExt).\n"+ "dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", dMgr.absolutePath, adjustedPath, err.Error()) return } rawDestPath := "../../checkfiles/checkfiles02" destDMgr, err := DirMgr{}.New(rawDestPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr("+ "rawDestPath).\nrawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error()) return } destFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, "+ "expectedFileNameExt).\n"+ "destDMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", destDMgr.absolutePath, expectedFileNameExt, err.Error()) return } destFMgr.isInitialized = false err = srcFMgr.CopyFileMgrByIo(&destFMgr) if err == nil { t.Error("Expected error return from srcFMgr.CopyFileMgrByIo(&destFMgr) because " + "destFMgr.isInitialized = false. However, NO ERROR WAS RETURNED!") } destFMgr.isInitialized = true err = destFMgr.DeleteThisFile() if err != nil { t.Errorf("Test Clean-Up Error returned by destFMgr.DeleteThisFile().\n"+ "destFMgr='%v'\nError='%v'\n", destFMgr.GetAbsolutePath(), err.Error()) } } func TestFileMgr_CopyFileMgrByIo_04(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr"+ "(adjustedPath).\nadjustedPath='%v'\nError='%v'\n", adjustedPath, err.Error()) return } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, "+ "expectedFileNameExt).\n"+ "dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", dMgr.absolutePath, adjustedPath, err.Error()) return } err = srcFMgr.CopyFileMgrByIo(nil) if err == nil { t.Error("Expected error return from CopyFileMgrByIo(nil) because " + "nil was passed to method. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_CopyFileMgrByIo_05(t *testing.T) { expectedFileNameExt := "iDoNotExist.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr"+ "(adjustedPath).\nadjustedPath='%v'\nError='%v'\n", adjustedPath, err.Error()) return } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt"+ "(dMgr, expectedFileNameExt).\n"+ "dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", dMgr.absolutePath, adjustedPath, err.Error()) return } rawDestPath := "../../checkfiles/checkfiles02" destDMgr, err := DirMgr{}.New(rawDestPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr"+ "(rawDestPath).\nrawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error()) return } destFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt"+ "(destDMgr, expectedFileNameExt).\n"+ "destDMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", destDMgr.absolutePath, expectedFileNameExt, err.Error()) return } err = srcFMgr.CopyFileMgrByIo(&destFMgr) if err == nil { t.Error("Expected error return from CopyFileMgrByIo(&destFMgr) because " + "source file does NOT exist. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_CopyFileMgrByIo_06(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}."+ "NewFromPathFileNameExtStr(adjustedPath).\n"+ "adjustedPath='%v'\nError='%v'\n", adjustedPath, err.Error()) return } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, "+ "expectedFileNameExt).\n"+ "dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", dMgr.absolutePath, adjustedPath, err.Error()) return } destFMgr := srcFMgr.CopyOut() err = srcFMgr.CopyFileMgrByIo(&destFMgr) if err == nil { t.Error("Expected error return from CopyFileMgrByIo(&destFMgr) because " + "source file is equivalent to destination file. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_CopyFileMgrByIoByLink_01(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(adjustedPath). adjustedPath='%v' Error='%v'", adjustedPath, err.Error()) } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt). dMgr.absolutePath='%v' expectedFileNameExt='%v' Error='%v'", dMgr.absolutePath, adjustedPath, err.Error()) } rawDestPath := "../../checkfiles/checkfiles02" destDMgr, err := DirMgr{}.New(rawDestPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(rawDestPath). rawDestPath='%v' Error='%v'", rawDestPath, err.Error()) } destFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt). destDMgr.absolutePath='%v' expectedFileNameExt='%v' Error='%v'", destDMgr.absolutePath, expectedFileNameExt, err.Error()) } err = srcFMgr.CopyFileMgrByIoByLink(&destFMgr) if err != nil { t.Errorf("Error returned from srcFMgr.CopyFileMgrByIoByLink(&destFMgr). srcFMgr.absolutePathFileName='%v' destFMgr.absolutePathFileName='%v' Error='%v'", srcFMgr.absolutePathFileName, destFMgr.absolutePathFileName, err.Error()) } if !fh.DoesFileExist(destFMgr.absolutePathFileName) { t.Errorf("Expected fh.DoesFileExist(destFMgr.absolutePathFileName)=true. Instead it was 'false' destFMgr.absolutePathFileName='%v'", destFMgr.absolutePathFileName) } if !destFMgr.doesAbsolutePathFileNameExist { t.Error("Expected destFMgr.doesAbsolutePathFileNameExist='true'. ERROR destFMgr.doesAbsolutePathFileNameExist='false'") } err = fh.DeleteDirFile(destFMgr.absolutePathFileName) if err != nil { t.Errorf("Error returned from fh.DeleteDirFile(destFMgr.absolutePathFileName) "+ "destFMgr.absolutePathFileName='%v' Error='%v'", destFMgr.absolutePathFileName, err.Error()) } if fh.DoesFileExist(destFMgr.absolutePathFileName) { t.Errorf("Expected fh.DoesFileExist(destFMgr.absolutePathFileName)=false. "+ "Instead it was 'true' destFMgr.absolutePathFileName='%v'", destFMgr.absolutePathFileName) } } func TestFileMgr_CopyFileMgrByIoByLink_02(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(adjustedPath). adjustedPath='%v' Error='%v'", adjustedPath, err.Error()) } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt). dMgr.absolutePath='%v' expectedFileNameExt='%v' Error='%v'", dMgr.absolutePath, adjustedPath, err.Error()) } rawDestPath := "../../checkfiles/checkfiles02" destDMgr, err := DirMgr{}.New(rawDestPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(rawDestPath). rawDestPath='%v' Error='%v'", rawDestPath, err.Error()) } destFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt). destDMgr.absolutePath='%v' expectedFileNameExt='%v' Error='%v'", destDMgr.absolutePath, expectedFileNameExt, err.Error()) } err = srcFMgr.CopyFileMgrByIoByLink(&destFMgr) if err != nil { t.Errorf("Error returned from srcFMgr.CopyFileMgrByIoByLink(&destFMgr). srcFMgr.absolutePathFileName='%v' destFMgr.absolutePathFileName='%v' Error='%v'", srcFMgr.absolutePathFileName, destFMgr.absolutePathFileName, err.Error()) } if !fh.DoesFileExist(destFMgr.absolutePathFileName) { t.Errorf("Expected fh.DoesFileExist(destFMgr.absolutePathFileName)=true. Instead it was 'false' destFMgr.absolutePathFileName='%v'", destFMgr.absolutePathFileName) } if !destFMgr.doesAbsolutePathFileNameExist { t.Error("Expected destFMgr.doesAbsolutePathFileNameExist='true'. ERROR destFMgr.doesAbsolutePathFileNameExist='false'") } err = fh.DeleteDirFile(destFMgr.absolutePathFileName) if err != nil { t.Errorf("Error returned from fh.DeleteDirFile(destFMgr.absolutePathFileName) destFMgr.absolutePathFileName='%v' Error='%v'", destFMgr.absolutePathFileName, err.Error()) } if fh.DoesFileExist(destFMgr.absolutePathFileName) { t.Errorf("Expected fh.DoesFileExist(destFMgr.absolutePathFileName)=false. Instead it was 'true' destFMgr.absolutePathFileName='%v'", destFMgr.absolutePathFileName) } } func TestFileMgr_CopyFileMgrByIoByLink_03(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(adjustedPath). adjustedPath='%v' Error='%v'", adjustedPath, err.Error()) } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt). dMgr.absolutePath='%v' expectedFileNameExt='%v' Error='%v'", dMgr.absolutePath, adjustedPath, err.Error()) } rawDestPath := "../../checkfiles/checkfiles02" destDMgr, err := DirMgr{}.New(rawDestPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(rawDestPath). rawDestPath='%v' Error='%v'", rawDestPath, err.Error()) } destFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt). destDMgr.absolutePath='%v' expectedFileNameExt='%v' Error='%v'", destDMgr.absolutePath, expectedFileNameExt, err.Error()) } srcFMgr.isInitialized = false err = srcFMgr.CopyFileMgrByIoByLink(&destFMgr) if err == nil { t.Error("Expected error return from srcFMgr.CopyFileMgrByIoByLink(&destFMgr) because " + "srcFMgr.isInitialized == false. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_CopyFileMgrByIoByLink_04(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(adjustedPath).\n"+ "adjustedPath='%v' Error='%v'", adjustedPath, err.Error()) return } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, "+ "expectedFileNameExt).\n"+ "dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'", dMgr.absolutePath, adjustedPath, err.Error()) return } rawDestPath := "../../checkfiles/checkfiles02" destDMgr, err := DirMgr{}.New(rawDestPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(rawDestPath).\n"+ "rawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error()) return } destFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, "+ "expectedFileNameExt).\n"+ "destDMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'", destDMgr.absolutePath, expectedFileNameExt, err.Error()) return } destFMgr.isInitialized = false err = srcFMgr.CopyFileMgrByIoByLink(&destFMgr) if err == nil { t.Error("Expected error return from srcFMgr.CopyFileMgrByIoByLink(&destFMgr) because " + "destFMgr.isInitialized = false. However, NO ERROR WAS RETURNED!") } destFMgr.isInitialized = true err = destFMgr.DeleteThisFile() if err != nil { t.Errorf("Test Clean-Up Error returned by destFMgr.DeleteThisFile().\n"+ "destFMgr='%v'\nError='%v'\n", destFMgr.GetAbsolutePath(), err.Error()) } } func TestFileMgr_CopyFileMgrByIoByLink_05(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.New(adjustedPath).\n"+ "adjustedPath='%v'\nError='%v'\n", adjustedPath, err.Error()) return } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt(dMgr, "+ "expectedFileNameExt).\n"+ "dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", dMgr.absolutePath, adjustedPath, err.Error()) return } err = srcFMgr.CopyFileMgrByIoByLink(nil) if err == nil { t.Error("Expected error return from srcFMgr.CopyFileMgrByIoByLink(&destFMgr) because " + "destFMgr is 'nil'. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_CopyFileMgrByIoByLink_06(t *testing.T) { expectedFileNameExt := "iDoNotExist.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr(adjustedPath).\n"+ "adjustedPath='%v'\nError='%v'\n", adjustedPath, err.Error()) return } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt"+ "(dMgr, expectedFileNameExt).\n"+ "dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", dMgr.absolutePath, adjustedPath, err.Error()) return } rawDestPath := "../../checkfiles/checkfiles02" destDMgr, err := DirMgr{}.New(rawDestPath) if err != nil { t.Errorf("Error returned from DirMgr{}.New(rawDestPath).\n"+ "rawDestPath='%v'\nError='%v'\n", rawDestPath, err.Error()) return } destFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(destDMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt("+ "destDMgr, expectedFileNameExt).\n"+ "destDMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", destDMgr.absolutePath, expectedFileNameExt, err.Error()) return } err = srcFMgr.CopyFileMgrByIoByLink(&destFMgr) if err == nil { t.Error("Expected error return from srcFMgr.CopyFileMgrByIoByLink(&destFMgr) because " + "srcFMgr does NOT exist. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_CopyFileMgrByIoByLink_07(t *testing.T) { expectedFileNameExt := "newerFileForTest_01.txt" fh := FileHelper{} adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest") dMgr, err := DirMgr{}.New(adjustedPath) if err != nil { t.Errorf("Error returned from DirMgr{}.New(adjustedPath).\n"+ "adjustedPath='%v'\nError='%v'\n", adjustedPath, err.Error()) return } srcFMgr, err := FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt) if err != nil { t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt"+ "(dMgr, expectedFileNameExt).\n"+ "dMgr.absolutePath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n", dMgr.absolutePath, adjustedPath, err.Error()) return } destFMgr := srcFMgr.CopyOut() err = srcFMgr.CopyFileMgrByIoByLink(&destFMgr) if err == nil { t.Error("Expected error return from srcFMgr.CopyFileMgrByIoByLink(&destFMgr) because " + "srcFMgr is equivalent to destFMgr. However, NO ERROR WAS RETURNED!") } } func TestFileMgr_CopyFileMgrByIoWithBuffer_01(t *testing.T) { setupFile := "../../filesfortest/levelfilesfortest/level_0_3_test.txt" fh := FileHelper{} setupFile = fh.AdjustPathSlash(setupFile) sourceFile := "../../createFilesTest/level_0_3_test.txt" sourceFile = fh.AdjustPathSlash(sourceFile) err := fh.CopyFileByIo(setupFile, sourceFile) if err != nil { t.Errorf("Test Setup Error returned by fh.CopyFileByIo(setupFile, sourceFile)\n"+ "setupFile='%v'\nsourceFile='%v'\nError='%v'\n", setupFile, sourceFile, err.Error()) return } destFile := "../../createFilesTest/TestFileMgr_CopyFileMgrByIoWithBuffer_01.txt" destFile = fh.AdjustPathSlash(destFile) srcFMgr, err := FileMgr{}.New(sourceFile) if err != nil { t.Errorf("Error returned by FileMgr{}.New(sourceFile).\n"+ "sourceFile='%v'\nError='%v'\n", sourceFile, err.Error()) return } destFMgr, err := FileMgr{}.New(destFile) if err != nil { t.Errorf("Error returned by FileMgr{}.New(destFile).\n"+ "destFile='%v'\nError='%v'\n", destFile, err.Error()) return } err = srcFMgr.CopyFileMgrByIoWithBuffer(&destFMgr, 15000) if err != nil { t.Errorf("Error returned by srcFMgr.CopyFileMgrByIoWithBuffer(&destFMgr, 15000)\n"+ "srcFMgr='%v'\nError='%v'\n", srcFMgr.absolutePathFileName, err.Error()) _ = fh.DeleteDirFile(sourceFile) _ = fh.DeleteDirFile(destFile) return } if !fh.DoesFileExist(destFile) { t.Errorf("Error: After Copy Operation, the destination file DOES NOT EXIST!\n"+ "destFile='%v'\n", destFile) } _ = fh.DeleteDirFile(sourceFile) _ = fh.DeleteDirFile(destFile) return } func TestFileMgr_CopyFileMgrByIoWithBuffer_02(t *testing.T) { fh := FileHelper{} sourceDir := "../../iDoNotExist" sourceDir = fh.AdjustPathSlash(sourceDir) sourceFile := "../../iDoNotExist/iDoNotExist.txt" sourceFile = fh.AdjustPathSlash(sourceFile) destFile := "../../createFilesTest/TestFileMgr_CopyFileMgrByIoWithBuffer_02.txt" destFile = fh.AdjustPathSlash(destFile) srcFMgr, err := FileMgr{}.New(sourceFile) if err != nil { t.Errorf("Error returned by FileMgr{}.New(sourceFile).\n"+ "sourceFile='%v'\nError='%v'\n", sourceFile, err.Error()) return } destFMgr, err := FileMgr{}.New(destFile) if err != nil { t.Errorf("Error returned by FileMgr{}.New(destFile).\n"+ "destFile='%v'\nError='%v'\n", destFile, err.Error()) return } err = srcFMgr.CopyFileMgrByIoWithBuffer(&destFMgr, 15000) if err == nil { t.Errorf("Expected an error returned by srcFMgr.CopyFileMgrByIoWithBuffer(&destFMgr, 15000)\n"+ "because srcFMgr DOES NOT EXIST!\n"+ "However, NO ERROR WAS RETURNED!\nsrcFMgr='%v'\n", srcFMgr.absolutePathFileName) } _ = fh.DeleteDirPathAll(sourceDir) _ = fh.DeleteDirFile(destFile) return }
/* (Intermediate): Adjacency Matrix In graph theory, an adjacency matrix is a data structure that can represent the edges between nodes for a [graph](http://en.wikipedia.org/wiki/Graph_(mathematics)) in an N x N matrix. The basic idea is that an edge exists between the elements of a row and column if the entry at that point is set to a valid value. This data structure can also represent either a directed graph or an undirected graph. Your goal is to write a program that takes in a list of edge-node relationships, and print a directed adjacency matrix for it. Our convention will follow that rows point to columns. Follow the examples for clarification of this convention. Here's a great online directed graph editor written in Javascript to help you visualize the challenge. Feel free to post your own helpful links! Formal Inputs & Outputs Input Description On standard console input, you will be first given a line with two space-delimited integers N and M. N is the number of nodes / vertices in the graph, while M is the number of following lines of edge-node data. A line of edge-node data is a space-delimited set of integers, with the special "->" symbol indicating an edge. This symbol shows the edge-relationship between the set of left-sided integers and the right-sided integers. This symbol will only have one element to its left, or one element to its right. These lines of data will also never have duplicate information; you do not have to handle re-definitions of the same edges. An example of data that maps the node 1 to the nodes 2 and 3 is as follows: 1 -> 2 3 Another example where multiple nodes points to the same node: 3 8 -> 2 You can expect input to sometimes create cycles and self-references in the graph. The following is valid: 2 -> 2 3 3 -> 2 Note that there is no order in the given integers; thus "1 -> 2 3" is the same as "1 -> 3 2". Output Description Print the N x N adjacency matrix as a series of 0's (no-edge) and 1's (edge). Sample Inputs & Outputs Sample Input 5 5 0 -> 1 1 -> 2 2 -> 4 3 -> 4 0 -> 3 Sample Output 01010 00100 00001 00001 00000 */ package main import ( "fmt" "reflect" "strconv" "strings" ) func main() { test(5, []string{ "0 -> 1", "1 -> 2", "2 -> 4", "3 -> 4", "0 -> 3", }, [][]byte{ {0, 1, 0, 1, 0}, {0, 0, 1, 0, 0}, {0, 0, 0, 0, 1}, {0, 0, 0, 0, 1}, {0, 0, 0, 0, 0}, }) } func assert(x bool) { if !x { panic("assertion failed") } } func test(n int, m []string, r [][]byte) { p, err := adjacency(n, m) dump(p) assert(err == nil) assert(reflect.DeepEqual(p, r)) } func adjacency(n int, m []string) ([][]byte, error) { g := alloc(n) for l, s := range m { t := strings.Split(s, "->") if len(t) != 2 { return nil, fmt.Errorf("invalid input at line %d", l+1) } a, err := readints(t[0]) b, xerr := readints(t[1]) if err != nil || xerr != nil { return nil, fmt.Errorf("invalid nodes at line %d", l+1) } for _, i := range a { for _, j := range b { if i < 0 || j < 0 || i >= n || j >= n { return nil, fmt.Errorf("out of range nodes (%d, %d) at line %d", i, j, l+1) } g[i][j] = 1 } } } return g, nil } func alloc(n int) [][]byte { p := make([][]byte, n) t := make([]byte, n*n) for i := range p { p[i] = t[i*n : (i+1)*n] } return p } func readints(s string) ([]int, error) { p := []int{} t := strings.Split(strings.TrimSpace(s), " ") for i := range t { n, err := strconv.Atoi(t[i]) if err != nil { return nil, err } p = append(p, n) } return p, nil } func dump(m [][]byte) { for i := range m { for j := range m[i] { fmt.Printf("%d", m[i][j]) } fmt.Println() } }
package main import ( "redisDB" ) func processRedisResTask_goroutine(svr *echoServer) { for { resTask := <- svr._resTaskChan if resTask.ID == redisDB.TaskID_ResLogin { _sendLoginResponse(svr, int(resTask.UID), resTask.Result) } } } func _sendLoginResponse(svr *echoServer, sessionIndex int, result int16) { res := LoginResPacket { Result: result, } outBuf := make([]byte, 32) resPkt, _ := res.EncodingPacket(outBuf) svr._serverNet.ISendToClient(sessionIndex, resPkt) }
package kademlia import ( "log" "testing" "time" ) func otherNode(myId string, myPort int, contact *Contact) { node := NewKademlia(myId, "127.0.0.1", myPort) go node.Listen("127.0.0.1", myPort) time.Sleep(100 * time.Microsecond) if contact != nil { /*node.RoutingTable.AddContact(*contact) node.Network.SendPingMessage(contact)*/ node.Bootstrap(*contact) } } func TestKademliaBootstrap(t *testing.T) { contacts := map[int]Contact{ //8001: NewContact(NewKademliaID("0000000000000000000000000000000000000001"), "127.0.0.1:8001"), 8002: NewContact(NewKademliaID("0000000000000000000000000000000000000002"), "127.0.0.1:8002"), /*8003: NewContact(NewKademliaID("0000000000000000000000000000000000000003"), "127.0.0.1:8003"), 8004: NewContact(NewKademliaID("0000000000000000000000000000000000000004"), "127.0.0.1:8004"), 8005: NewContact(NewKademliaID("0000000000000000000000000000000000000005"), "127.0.0.1:8005"), 8006: NewContact(NewKademliaID("0000000000000000000000000000000000000006"), "127.0.0.1:8006"), 8007: NewContact(NewKademliaID("0000000000000000000000000000000000000007"), "127.0.0.1:8007"), 8008: NewContact(NewKademliaID("0000000000000000000000000000000000000008"), "127.0.0.1:8008"), 8009: NewContact(NewKademliaID("0000000000000000000000000000000000000009"), "127.0.0.1:8009"), 8010: NewContact(NewKademliaID("000000000000000000000000000000000000000A"), "127.0.0.1:8010"), 8011: NewContact(NewKademliaID("000000000000000000000000000000000000000B"), "127.0.0.1:8011"), 8012: NewContact(NewKademliaID("000000000000000000000000000000000000000C"), "127.0.0.1:8012"), 8013: NewContact(NewKademliaID("000000000000000000000000000000000000000D"), "127.0.0.1:8013"), 8014: NewContact(NewKademliaID("000000000000000000000000000000000000000E"), "127.0.0.1:8014"), 8015: NewContact(NewKademliaID("000000000000000000000000000000000000000F"), "127.0.0.1:8015"), 8016: NewContact(NewKademliaID("0000000000000000000000000000000000000010"), "127.0.0.1:8016"), 8017: NewContact(NewKademliaID("0000000000000000000000000000000000000011"), "127.0.0.1:8017"), 8018: NewContact(NewKademliaID("0000000000000000000000000000000000000012"), "127.0.0.1:8018"),*/ } go otherNode("0000000000000000000000000000000000000010", 8000, nil) contact := NewContact(NewKademliaID("0000000000000000000000000000000000000010"), "127.0.0.1:8000") for k, v := range contacts { go otherNode(v.ID.String(), k, &contact) } time.Sleep(100 * time.Microsecond) node := NewKademlia("0000000000000000000000000000000000000014", "127.0.0.1", 8020) go node.Listen("127.0.0.1", 8020) time.Sleep(5 * time.Second) node.Bootstrap(NewContact(NewKademliaID("0000000000000000000000000000000000000010"), "127.0.0.1:8000")) } func TestKademliaPin(t *testing.T) { contacts := map[int]Contact{ 8001: NewContact(NewKademliaID("0000000000000000000000000000000000000001"), "127.0.0.1:8001"), /*8002: NewContact(NewKademliaID("0000000000000000000000000000000000000002"), "127.0.0.1:8002"), 8003: NewContact(NewKademliaID("0000000000000000000000000000000000000003"), "127.0.0.1:8003"), 8004: NewContact(NewKademliaID("0000000000000000000000000000000000000004"), "127.0.0.1:8004"), 8005: NewContact(NewKademliaID("0000000000000000000000000000000000000005"), "127.0.0.1:8005"), 8006: NewContact(NewKademliaID("0000000000000000000000000000000000000006"), "127.0.0.1:8006"), 8007: NewContact(NewKademliaID("0000000000000000000000000000000000000007"), "127.0.0.1:8007"), 8008: NewContact(NewKademliaID("0000000000000000000000000000000000000008"), "127.0.0.1:8008"), 8009: NewContact(NewKademliaID("0000000000000000000000000000000000000009"), "127.0.0.1:8009"), 8010: NewContact(NewKademliaID("000000000000000000000000000000000000000A"), "127.0.0.1:8010"), 8011: NewContact(NewKademliaID("000000000000000000000000000000000000000B"), "127.0.0.1:8011"), 8012: NewContact(NewKademliaID("000000000000000000000000000000000000000C"), "127.0.0.1:8012"), 8013: NewContact(NewKademliaID("000000000000000000000000000000000000000D"), "127.0.0.1:8013"), 8014: NewContact(NewKademliaID("000000000000000000000000000000000000000E"), "127.0.0.1:8014"), 8015: NewContact(NewKademliaID("000000000000000000000000000000000000000F"), "127.0.0.1:8015"), 8016: NewContact(NewKademliaID("0000000000000000000000000000000000000010"), "127.0.0.1:8016"), 8017: NewContact(NewKademliaID("0000000000000000000000000000000000000011"), "127.0.0.1:8017"), 8018: NewContact(NewKademliaID("0000000000000000000000000000000000000012"), "127.0.0.1:8018"),*/ } node := NewKademlia("0000000000000000000000000000000000000014", "127.0.0.1", 8020) for k, v := range contacts { go otherNode(v.ID.String(), k, nil) node.RoutingTable.AddContact(v) } time.Sleep(2 * time.Second) go node.Listen("127.0.0.1", 8020) time.Sleep(100 * time.Microsecond) data := []byte("TEST") key := NewKademliaID(HashBytes(data)) log.Printf("Key: %s\n", key.String()) contact := contacts[8001] node.Network.SendPinMessage(&contact, key, data) node.Network.SendUnpinMessage(&contact, key) time.Sleep(60 * time.Second) }
package zenrpc_mw import ( "context" "encoding/json" "strconv" "time" "github.com/go-kit/kit/metrics" "github.com/semrush/zenrpc" ) func RequestCounter(counter metrics.Counter) zenrpc.MiddlewareFunc { return func(invoke zenrpc.InvokeFunc) zenrpc.InvokeFunc { return func(ctx context.Context, method string, params json.RawMessage) zenrpc.Response { r := invoke(ctx, method, params) if namespace := zenrpc.NamespaceFromContext(ctx); namespace != "" { method = namespace + "." + method } code := "" if r.Error != nil { code = strconv.Itoa(r.Error.Code) } counter.With("method", method, "code", code).Add(1) return r } } } func RequestDuration(histogram metrics.Histogram) zenrpc.MiddlewareFunc { return func(invoke zenrpc.InvokeFunc) zenrpc.InvokeFunc { return func(ctx context.Context, method string, params json.RawMessage) zenrpc.Response { begin := time.Now() r := invoke(ctx, method, params) if namespace := zenrpc.NamespaceFromContext(ctx); namespace != "" { method = namespace + "." + method } code := "" if r.Error != nil { code = strconv.Itoa(r.Error.Code) } histogram.With("method", method, "code", code).Observe(time.Since(begin).Seconds()) return r } } }
package scan import ( "fmt" "testing" "h12.io/gspec" ) var ( c = Char b = Between merge = Merge s = Str con = Con or = Or zeroOrOne = ZeroOrOne zeroOrMore = ZeroOrMore oneOrMore = OneOrMore repeat = Repeat ) func TestExpr(t *testing.T) { expect := gspec.Expect(t.FailNow) for _, tc := range []struct { sg fmt.Stringer s string }{ {c(`a`), `[a]`}, {c(`abc`), `[a-c]`}, {c(`[`), `[\[]`}, {c(`]`), `[\]]`}, {c(`-`), `[\-]`}, {b('a', 'c'), `[a-c]`}, {s(`xy`), `xy`}, {c(`b`).Negate(), `[\x00-ac-\U0010ffff]`}, {c(`b`).Negate().Negate(), `[b]`}, {c(`bf`).Negate(), `[\x00-ac-eg-\U0010ffff]`}, {c(`bf`).Negate().Negate(), `[bf]`}, {c("\x00").Negate(), `[\x01-\U0010ffff]`}, {c("\x00").Negate().Negate(), `[\x00]`}, {c("\U0010ffff").Negate(), `[\x00-\U0010fffe]`}, {c("\U0010ffff").Negate().Negate(), `[\U0010ffff]`}, {c(`abcde`).Exclude(c(`bd`)), `[ace]`}, } { expect(tc.sg.String()).Equal(tc.s) } }
package sqlbuilder_test // This is in a different package so we can also make sure everything works from outside of the sqlbuilder package import ( "fmt" . "github.com/jraede/go-sqlbuilder" . "github.com/smartystreets/goconvey/convey" "testing" ) type param struct { description string query *Query expected string vars []interface{} } // Basic SELECT query to get the first and last name for people younger than 10 or older than 90, ordering by last name, limiting to 20 results func ExampleSelect_basic() { sql, vars := Select("first_name", "last_name").From("people").Where( Equal{"gender", "female"}, Or( GreaterThan{"age", 90}, LessThan{"age", 10}, ), ).OrderBy("last_name", ASC).Limit(20).GetFullSQL() fmt.Println(sql, ",", vars) // Output: SELECT first_name, last_name FROM people WHERE (gender = $1 AND (age > $2 OR age < $3)) ORDER BY last_name ASC LIMIT 20 , [female 90 10] } // Complex SELECT query with multiple joins. This gets the total number of rushing plays that a player was involved in that either went for more than 10 yards or went for a score, if the player has more than 5 such plays. Orders by total matching plays, and show results #51-60 func ExampleSelect_complex() { sql, vars := Select("COUNT(plays) AS playcount", "players.name").From("players").InnerJoin("play_player", OnColumn("players.id", "play_player.player_id"), ).InnerJoin("plays", OnColumn("plays.id", "play_player.play_id"), OnExpression(Equal{"plays.type", "running"}), ).Where( Or( GreaterThan{"plays.yards", 10}, Equal{"plays.scoring", true}, ), ).GroupBy("players.id").Having(GreaterThan{"COUNT(plays)", 5}).OrderBy("playcount", DESC).Limit(10).Offset(50).GetFullSQL() fmt.Println(sql, ",", vars) // Output: SELECT COUNT(plays) AS playcount, players.name FROM players INNER JOIN play_player ON players.id = play_player.player_id INNER JOIN plays ON plays.id = play_player.play_id AND plays.type = $1 WHERE (plays.yards > $2 OR plays.scoring = $3) GROUP BY players.id HAVING COUNT(plays) > $4 ORDER BY playcount DESC LIMIT 10 OFFSET 50 , [running 10 true 5] } func TestSelect(t *testing.T) { params := []param{ {"simple", Select("*").From("foos"), "SELECT * FROM foos", nil}, {"simple where", Select("a", "b", "c").From("foos").Where(Equal{"a", 10}), "SELECT a, b, c FROM foos WHERE a = $1", []interface{}{10}}, {"complex where", Select("*").From("stats").Where(LessThan{"rushing_attempts", 10}, Or(GreaterThan{"rushing_yards", 100}, GreaterThan{"rushing_tds", 0})), "SELECT * FROM stats WHERE (rushing_attempts < $1 AND (rushing_yards > $2 OR rushing_tds > $3))", []interface{}{10, 100, 0}}, {"placeholder", Select("a", "b", "c").Placeholder(func(dex int) string { return "?" }).From("foos").Where(Equal{"a", 10}), "SELECT a, b, c FROM foos WHERE a = ?", []interface{}{10}}, {"ordering", Select("a").From("foos").OrderBy("a.timestamp", DESC), "SELECT a FROM foos ORDER BY a.timestamp DESC", nil}, {"multiple ordering", Select("a").From("foos").OrderBy("a.category", DESC).OrderBy("a.timestamp", ASC), "SELECT a FROM foos ORDER BY a.category DESC, a.timestamp ASC", nil}, {"group by", Select("SUM(a.price)").From("foos").GroupBy("a.category"), "SELECT SUM(a.price) FROM foos GROUP BY a.category", nil}, {"single join", Select("*").From("foos").InnerJoin("bars", OnColumn("bars.foo_id", "foos.id")), "SELECT * FROM foos INNER JOIN bars ON bars.foo_id = foos.id", nil}, {"complex single join", Select("*").From("foos").InnerJoin("categories", OnColumn("foos.category_id", "categories.id"), OnExpression(Equal{"categories.type", "main"})), "SELECT * FROM foos INNER JOIN categories ON foos.category_id = categories.id AND categories.type = $1", []interface{}{"main"}}, {"multiple joins", Select("*").From("games").InnerJoin("drives", OnColumn("drives.game_id", "games.id")).InnerJoin("plays", OnColumn("plays.drive_id", "drives.id")), "SELECT * FROM games INNER JOIN drives ON drives.game_id = games.id INNER JOIN plays ON plays.drive_id = drives.id", nil}, {"everything", Select("COUNT(plays)", "players.name").From("players").InnerJoin("play_player", OnColumn("players.id", "play_player.player_id")).InnerJoin("plays", OnColumn("plays.id", "play_player.play_id")).Where(Or(GreaterThan{"plays.yards", 10}, Equal{"plays.scoring", true})).GroupBy("players.id").Having(GreaterThan{"COUNT(plays)", 5}).OrderBy("players.name", ASC).Limit(10).Offset(50), "SELECT COUNT(plays), players.name FROM players INNER JOIN play_player ON players.id = play_player.player_id INNER JOIN plays ON plays.id = play_player.play_id WHERE (plays.yards > $1 OR plays.scoring = $2) GROUP BY players.id HAVING COUNT(plays) > $3 ORDER BY players.name ASC LIMIT 10 OFFSET 50", []interface{}{10, true, 5}}, {"unions", Select("foo").From(Alias(Union(Select("foo").From("table1"), Select("foo").From("table2")), "u")), "SELECT foo FROM ((SELECT foo FROM table1) UNION (SELECT foo FROM table2)) u", nil}, {"case", Select("foo").From("bar").Where( Case( When(Equal{"foo", "bar"}).Then(Equal{"boop", "baz"}), When(Equal{"foo", "baz"}).Then(Equal{"boop", "foop"}), Else(Equal{"boop", "loop"}), ), ), "SELECT foo FROM bar WHERE (CASE WHEN foo = $1 THEN boop = $2 WHEN foo = $3 THEN boop = $4 ELSE boop = $5 END)", []interface{}{"bar", "baz", "baz", "foop", "loop"}}, } Convey("Select queries", t, func() { for _, p := range params { Convey(p.description, func() { sql, vars := p.query.GetFullSQL() So(sql, ShouldEqual, p.expected) So(len(vars), ShouldEqual, len(p.vars)) for i, v := range vars { So(p.vars[i], ShouldEqual, v) } }) } }) }
package main import ( "flag" "fmt" "time" ) func main() { // THIS IS SO COOL!!!!! // It defines both the type of the input and its unit // `$ ~ go run sleep.go --period 1m` sleeps 1 minute var period = flag.Duration("period", 1*time.Second, "sleep period") flag.Parse() fmt.Printf("Sleeping for %v seconds...\n", *period) time.Sleep(*period) }
package entities // Character represents an in game character. // It has a role and some additional informations. // type Character struct { Role Role // Private fields alive bool sheriff bool } // NewCharacter creates a new character. // This character has theses properties: // // - The character has a given role; // - The character is considered alive; // - The character is NOT sheriff. func NewCharacter(role Role) *Character { c := Character{ Role: role, alive: true, sheriff: false, } return &c } // IsAlive returns true if the current character // has not been killed. func (c Character) IsAlive() bool { return c.alive } // ElectAsSheriff sets the current character as // the sheriff. func (c *Character) ElectAsSheriff() { c.sheriff = true } // IsSheriff returns true if the current character // was elected as sheriff. func (c Character) IsSheriff() bool { return c.sheriff } // IsHuman returns true if the current character // is NOT a werewolf. func (c Character) IsHuman() bool { return !c.IsWerewolf() } // IsWerewolf returns true if the current character // is a werewolf. func (c Character) IsWerewolf() bool { return c.Role&RoleWerewolf != 0 } // Kill sets 'alive' to false, meaning the current // character has been killed. func (c *Character) Kill() { c.alive = false }
package utils import ( "database/sql/driver" "fmt" mathRand "math/rand" "os" "regexp" "strconv" "strings" "time" "github.com/exasol/exasol-driver-go/pkg/errors" ) var localImportRegex = regexp.MustCompile(`(?i)(FROM LOCAL CSV )`) var fileQueryRegex = regexp.MustCompile(`(?i)(FILE\s+(["|'])?(?P<File>[a-zA-Z0-9:<> \\\/._]+)(["|']? ?))`) var rowSeparatorQueryRegex = regexp.MustCompile(`(?i)(ROW\s+SEPARATOR\s+=\s+(["|'])?(?P<RowSeparator>[a-zA-Z]+)(["|']?))`) func NamedValuesToValues(namedValues []driver.NamedValue) ([]driver.Value, error) { values := make([]driver.Value, len(namedValues)) for index, namedValue := range namedValues { if namedValue.Name != "" { return nil, errors.ErrNamedValuesNotSupported } values[index] = namedValue.Value } return values, nil } func BoolToInt(b bool) int { if b { return 1 } return 0 } func BoolToPtr(b bool) *bool { return &b } func IsImportQuery(query string) bool { return localImportRegex.MatchString(query) } func GetRowSeparator(query string) string { r := rowSeparatorQueryRegex.FindStringSubmatch(query) separator := "LF" for i, name := range rowSeparatorQueryRegex.SubexpNames() { if name == "RowSeparator" && len(r) >= i { separator = r[i] } } switch separator { case "CR", "cr": return "\r" case "CRLF", "crlf": return "\r\n" default: return "\n" } } func GetFilePaths(query string) ([]string, error) { r := fileQueryRegex.FindAllStringSubmatch(query, -1) var files []string for _, matches := range r { for i, name := range fileQueryRegex.SubexpNames() { if name == "File" { files = append(files, matches[i]) } } } if len(files) == 0 { return nil, errors.ErrInvalidImportQuery } return files, nil } func OpenFile(path string) (*os.File, error) { file, err := os.Open(path) if err != nil { return nil, errors.NewFileNotFound(path) } return file, nil } func UpdateImportQuery(query string, host string, port int) string { r := fileQueryRegex.FindAllStringSubmatch(query, -1) for i, matches := range r { if i == 0 { query = strings.Replace(query, matches[0], "FILE 'data.csv' ", 1) } else { query = strings.Replace(query, matches[0], "", 1) } } proxyURL := fmt.Sprintf("http://%s:%d", host, port) updatedImport := fmt.Sprintf("CSV AT '%s'", proxyURL) var importQueryRegex = regexp.MustCompile(`(?i)(LOCAL CSV)`) return string(importQueryRegex.ReplaceAll([]byte(query), []byte(updatedImport))) } func ResolveHosts(h string) ([]string, error) { var hosts []string hostRangeRegex := regexp.MustCompile(`^((.+?)(\d+))\.\.(\d+)$`) for _, host := range strings.Split(h, ",") { if hostRangeRegex.MatchString(host) { parsedHosts, err := ParseRange(hostRangeRegex, host) if err != nil { return nil, err } hosts = append(hosts, parsedHosts...) } else { hosts = append(hosts, host) } } return hosts, nil } func ParseRange(hostRangeRegex *regexp.Regexp, host string) ([]string, error) { matches := hostRangeRegex.FindStringSubmatch(host) prefix := matches[2] start, err := strconv.Atoi(matches[3]) if err != nil { return nil, err } stop, err := strconv.Atoi(matches[4]) if err != nil { return nil, err } if stop < start { return nil, errors.NewInvalidHostRangeLimits(host) } var hosts []string for i := start; i <= stop; i++ { hosts = append(hosts, fmt.Sprintf("%s%d", prefix, i)) } return hosts, nil } func ShuffleHosts(hosts []string) { r := mathRand.New(mathRand.NewSource(time.Now().UnixNano())) //nolint:gosec r.Shuffle(len(hosts), func(i, j int) { hosts[i], hosts[j] = hosts[j], hosts[i] }) }
// GO BINARY SEARCH TREE package main import "fmt" type Node struct { data int lChild *Node rChild *Node } type BinaryTree struct { root *Node } // Initialize Tree with content func newBinaryTree(items ...int) BinaryTree { n := BinaryTree{nil } for _, num := range items { n.add(num) } return n } // 1. Search - return bool func (n *BinaryTree) search(v int) bool { current := n.root for current != nil { //root is int if v == current.data { return true } else if v < current.data { current = current.lChild } else { current = current.rChild } } return false } // 2. Insert given int value func (n *BinaryTree) add(inputData int) { newNode := Node{inputData, nil, nil } //empty tree if n.root == nil { n.root = &newNode } else { //not empty insertHelper(n.root, &newNode) } } func insertHelper(current, newOne *Node) { if newOne.data <= current.data { //new data smaller than current node data if current.lChild != nil { //haven't reached location; keep going insertHelper(current.lChild, newOne) } else { //location found current.lChild = newOne return } } else { //new data larger than current node data if current.rChild != nil { //haven't reached location; keep going insertHelper(current.rChild, newOne) } else { //location found current.rChild = newOne return } } } // 3. Compute height of tree func findHeightR(node *Node) int { // RECURSIVE if node == nil { return 0 } heightLeft := findHeightR(node.lChild) heightRight := findHeightR(node.rChild) if heightLeft < heightRight { return 1 + heightRight } return 1 + heightLeft } func findHeight(node *Node) int { if node == nil { return 0 } queue1 := []*Node{node} height := 0 nodeCount := 1 for nodeCount > 0 { height++ newNodeCount := 0 current := queue1[0] nodeCount-- queue1 = queue1[1:] if current.lChild != nil { queue1 = append(queue1, current.lChild) newNodeCount++ } if current.rChild != nil { queue1 = append(queue1, current.rChild) newNodeCount++ } nodeCount = newNodeCount } return height } // 4. Print values - pre-order; Root-Left-Right (me before my children) func preOrderR(node *Node) { // RECURSIVE if node == nil { return } else { fmt.Println(node.data) preOrder(node.lChild) preOrder(node.rChild) } } func preOrder(node *Node) { // ITERATIVE if node == nil { return } stack1 := []*Node{} stack1 = append(stack1, node) for len(stack1) > 0 { current := stack1[len(stack1)-1] fmt.Println(current.data) stack1 = stack1[:len(stack1)-1] if current.rChild != nil { stack1 = append(stack1, current.rChild) } if current.lChild != nil { stack1 = append(stack1, current.lChild) } } } // 5. Print values - in-order; Left-Root-Right, ascending func inOrderR(node *Node) { // RECURSIVE if node == nil { return } else { inOrder(node.lChild) fmt.Println(node.data) inOrder(node.rChild) } } func inOrder(node *Node) { // ITERATIVE if node == nil { return } stack1 := []*Node{} current := node for current != nil || len(stack1) > 0 { if current != nil { stack1 = append(stack1, current) current = current.lChild } else if len(stack1) > 0 { current = stack1[len(stack1)-1] stack1 = stack1[:len(stack1)-1] fmt.Println(current.data) current = current.rChild } } } // 6. Print values - post-order; Left-Right-Root (my children before me) func postOrderR(node *Node) { // RECURSIVE if node == nil { return } else { postOrder(node.lChild) postOrder(node.rChild) fmt.Println(node.data) } } func postOrder(node *Node) { // ITERATIVE if node == nil { return } stack1 := []*Node{} stack2 := []*Node{} stack1 = append(stack1, node) for len(stack1) > 0 { temp := stack1[len(stack1)-1] stack1 = stack1[:len(stack1)-1] stack2 = append(stack2, temp) if temp.lChild != nil { stack1 = append(stack1, temp.lChild) } if temp.rChild != nil { stack1 = append(stack1, temp.rChild) } } for len(stack2) > 0 { fmt.Println(stack2[len(stack2)-1].data) stack2 = stack2[:len(stack2)-1] } } // 7. Print values - level-order func levelOrder(node *Node) { if node == nil { return } queue1 := []*Node{node} for len(queue1) > 0 { current := queue1[0] fmt.Println(current.data) queue1 = queue1[1:] if current.lChild != nil { queue1 = append(queue1, current.lChild) } if current.rChild != nil { queue1 = append(queue1, current.rChild) } } } // 8. Delete a given value from tree //func (n *BinaryTree) delete(v int) { //} func main() { //Initialize tree with items fmt.Println("-----start: 8,3,10,1,6,14,4,7,13-----") var myTree = newBinaryTree(8,3,10,1,6,14,4,7,13) fmt.Println(myTree.root) fmt.Println("-------search 2-------") fmt.Println(myTree.search(2)) fmt.Println("-------search 10-------") fmt.Println(myTree.search(10)) fmt.Println("-------height-------") fmt.Println(findHeight(myTree.root)) fmt.Println("-------RecursiveHeight-------") fmt.Println(findHeightR(myTree.root)) fmt.Println("-------preOrder-------") preOrder(myTree.root) fmt.Println("-------inOrder-------") inOrder(myTree.root) fmt.Println("-------postOrder-------") postOrder(myTree.root) fmt.Println("-------levelOrder-------") levelOrder(myTree.root) }
package leetcode func findSpecialInteger(arr []int) int { l := len(arr) quarter := l / 4 for i := 0; i < l; i++ { if arr[i] == arr[i+quarter] { return arr[i] } } return -1 }
package admin import ( "github.com/astaxie/beego" "go_blog/utils" ) type AdminController struct { beego.Controller } type AdminDirector struct { controller *AdminController modelBuilder AdminModel current string } func (self *AdminDirector) getModel() { self.modelBuilder.GetUserOrRedirectLogin(self.controller) self.modelBuilder.RenderLayout(self.controller, self.current) self.modelBuilder.RenderData(self.controller) } func GetBlogDirector(c *AdminController, builder AdminModel) AdminDirector { return AdminDirector{c, builder, "blog"} } func GetCategoryDirector(c *AdminController, builder AdminModel) AdminDirector { return AdminDirector{c, builder, "category"} } type AdminModel interface { GetUserOrRedirectLogin(c *AdminController) RenderLayout(c *AdminController, current string) RenderData(c *AdminController) } type Admin struct { } func (self *Admin)GetUserOrRedirectLogin(c *AdminController) { if _, err := GetUser(c); err != nil { c.Redirect("/login", 302) } } func (self *Admin) RenderLayout(c *AdminController, current string) { c.Data["current"] = current tables := []map[string]string{} tableNames := utils.GetAllTableNames() for _, table := range tableNames { if table == current { tables = append(tables, map[string]string{"name": current, "active": "true"}) } else { tables = append(tables, map[string]string{"name": table, "active": "false"}) } } c.Data["tables"] = tables c.Layout = "admin.html" } func DeleteRecordAndReturnJson(c *AdminController, DeleteFunction func(int)(error), errMsg string) { var recordId int var ret = 1 var message = "" _, err := GetUserBySession(c) if err != nil { message = utils.USER_NOT_LOGIN } else { if err := c.Ctx.Input.Bind(&recordId, "id"); err != nil { message = utils.ID_NO_FOUND } else { if err = DeleteFunction(recordId); err != nil { message = errMsg } else { ret = 0 message = "删除成功" } } } c.Data["json"] = map[string]interface{}{"ret":ret,"message":message} c.ServeJSON() }
// 使用 `os.Exit` 来立即进行带给定状态的退出。 package main import "fmt" import "os" func main() { // 当使用 `os.Exit` 时 `defer` 将_不会_ 执行,所以这里的 `fmt.Println` // 将永远不会被调用。 defer fmt.Println("!") // 退出并且退出状态为 3。 os.Exit(3) } // 注意,不像例如 C 语言,Go 不使用在 `main` 中返回一个整 // 数来指明退出状态。如果你想以非零状态退出,那么你就要 // 使用 `os.Exit`。
package main import ( "html/template" "log" "net/http" ) type Foo struct { Name string StaticURL string Things []*Thing } type Thing struct { Bleep string Bloop int } func loadTemplates() (*template.Template, error) { return template.New("root").ParseGlob("web/*") } type Server struct { Data *Foo templates *template.Template } func NewServer(data *Foo) (*Server, error) { templates, err := loadTemplates() if err != nil { return nil, err } return &Server{ Data: data, templates: templates, }, nil } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { err := s.templates.ExecuteTemplate(w, "epicPage", s.Data) if err != nil { http.Error(w, err.Error(), 500) } } func main() { foo := &Foo{ Name: "foo bar baz", StaticURL: "http://localhost:8000", Things: []*Thing{ &Thing{ Bleep: "gosho", Bloop: 5, }, &Thing{ Bleep: "tosho", Bloop: 42, }, }, } server, err := NewServer(foo) if err != nil { panic(err) } log.Fatal(http.ListenAndServe(":8080", server)) }
/* * winnow: weighted point selection * * input: * matrix: an integer matrix, whose values are used as masses * mask: a boolean matrix showing which points are eligible for * consideration * nrows, ncols: the number of rows and columns * nelts: the number of points to select * * output: * points: a vector of (x, y) points * */ package main import ( "flag" "fmt" "sort" ) var is_bench = flag.Bool("is_bench", false, "") var matrix [][]byte var mask [][]byte type Point struct { value byte i, j int } type Points []Point var points []Point func (p Points) Len() int { return len(p) } func (p Points) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Points) Less(i, j int) bool { if p[i].value < p[j].value { return true } if p[i].value > p[j].value { return false } if p[i].i < p[j].i { return true } if p[i].i > p[j].i { return false } return p[i].j < p[j].j } func winnow(nrows, ncols, nelts int) { var n = 0 for i := 0; i < nrows; i++ { for j := 0; j < ncols; j++ { if *is_bench { if ((i * j) % (ncols + 1)) == 1 { mask[i][j] = 1 } } if mask[i][j] == 1 { n++ } } } values := make(Points, n) var count = 0 for i := 0; i < nrows; i++ { for j := 0; j < ncols; j++ { if mask[i][j] == 1 { values[count] = Point{matrix[i][j], i, j} count++ } } } sort.Sort(values) var total = len(values) var chunk int = total / nelts for i := 0; i < nelts; i++ { var index = i * chunk points[i] = values[index] } } func read_integer() int { var value int for true { var read, _ = fmt.Scanf("%d", &value) if read == 1 { break } } return value } func read_matrix(nrows, ncols int) { for i := 0; i < nrows; i++ { for j := 0; j < ncols; j++ { matrix[i][j] = byte(read_integer()) } } } func read_mask(nrows, ncols int) { for i := 0; i < nrows; i++ { for j := 0; j < ncols; j++ { mask[i][j] = byte(read_integer()) } } } func main() { var nrows, ncols, nelts int flag.Parse() nrows = read_integer() ncols = read_integer() matrix = make ([][]byte, nrows) for i := range matrix { matrix [i] = make ([]byte, ncols) } mask = make ([][]byte, nrows) for i := range mask { mask [i] = make ([]byte, ncols) } if !*is_bench { read_matrix(nrows, ncols) read_mask(nrows, ncols) } nelts = read_integer() points = make ([]Point, nelts) winnow(nrows, ncols, nelts) if !*is_bench { fmt.Printf("%d\n", nelts) for i := 0; i < nelts; i++ { fmt.Printf("%d %d\n", points[i].i, points[i].j) } fmt.Printf("\n") } }
package main import "fmt" // 格式 /* func 函数名(参数)(返回值){ 函数体 } */ /* func intSum(x,y int)int{ return x + y } func main(){ sum1 := intSum(1,2) fmt.Println(sum1) } */ // 可变参数 func intSum2(x...int)int{ fmt.Println(x) sum := 0 for _,v := range x{ sum += v } return sum } func main(){ ret1 := intSum2() ret2 := intSum2(10) ret3 := intSum2(10, 20) ret4 := intSum2(10, 20, 30) fmt.Println(ret1, ret2, ret3, ret4) //0 10 30 60 }
// +build windows package systray import ( "runtime" "sync/atomic" "testing" "time" "unsafe" "golang.org/x/sys/windows" ) func TestBaseWindowsTray(t *testing.T) { systrayReady = func(){} systrayExit = func(){} runtime.LockOSThread() if err := wt.initInstance(); err != nil { t.Fatalf("initInstance failed: %s", err) } if err := wt.createMenu(); err != nil { t.Fatalf("createMenu failed: %s", err) } defer func() { pDestroyWindow.Call(uintptr(wt.window)) wt.wcex.unregister() }() if err := wt.setIcon("example/icon/iconwin.ico"); err != nil { t.Errorf("SetIcon failed: %s", err) } if err := wt.setTooltip("Cyrillic tooltip тест:)"); err != nil { t.Errorf("SetIcon failed: %s", err) } var id int32 = 0 err := wt.addOrUpdateMenuItem(&MenuItem{title: "Test title", id: atomic.AddInt32(&id, 1)}) if err != nil { t.Errorf("mergeMenuItem failed: %s", err) } err = wt.addOrUpdateMenuItem(&MenuItem{title: "Simple disabled", id: atomic.AddInt32(&id, 1), disabled: true}) if err != nil { t.Errorf("mergeMenuItem failed: %s", err) } err = wt.addSeparatorMenuItem(atomic.AddInt32(&id, 1)) if err != nil { t.Errorf("addSeparatorMenuItem failed: %s", err) } err = wt.addOrUpdateMenuItem(&MenuItem{title: "Simple checked enabled", id: atomic.AddInt32(&id, 1), checkable: true}) if err != nil { t.Errorf("mergeMenuItem failed: %s", err) } err = wt.addOrUpdateMenuItem(&MenuItem{title: "Simple checked disabled", id: atomic.AddInt32(&id, 1), checkable: true, checked: true, disabled: true}) if err != nil { t.Errorf("mergeMenuItem failed: %s", err) } err = wt.hideMenuItem(1) if err != nil { t.Errorf("hideMenuItem failed: %s", err) } err = wt.hideMenuItem(100) if err == nil { t.Error("hideMenuItem failed: must return error on invalid item id") } time.AfterFunc(3*time.Second, quit) m := struct { WindowHandle windows.Handle Message uint32 Wparam uintptr Lparam uintptr Time uint32 Pt point }{} for { ret, _, err := pGetMessage.Call(uintptr(unsafe.Pointer(&m)), 0, 0, 0) res := int32(ret) if res == -1 { t.Errorf("win32 GetMessage failed: %v", err) return } else if res == 0 { break } pTranslateMessage.Call(uintptr(unsafe.Pointer(&m))) pDispatchMessage.Call(uintptr(unsafe.Pointer(&m))) } }
package data import ( "errors" "log" pb "github.com/bgokden/veri/veriservice" ) // Insert inserts data to internal kv store func (dt *Data) Insert(datum *pb.Datum, config *pb.InsertConfig) error { if dt.Config != nil && !dt.Config.NoTarget && dt.N >= dt.Config.TargetN { return errors.New("Number of elements is over the target") } if dt.Initialized == false { dt.InitData() } err := dt.InsertBDMap(datum, config) if err != nil { return err } dt.Dirty = true if config == nil { config = &pb.InsertConfig{ TTL: 0, Count: 0, } } counter := uint32(1) if dt.Config.EnforceReplicationOnInsert && config.Count == 0 { config.Count++ // log.Printf("Sending Insert with config.Count: %v ttl: %v\n", config.Count, config.TTL) dt.RunOnRandomSources(5, func(source DataSource) error { err := source.Insert(datum, config) if err != nil && CheckIfUnkownError(err) { // This error occurs frequently and it is normal log.Printf("Sending Insert error %v\n", err.Error()) } if err == nil { counter++ } if counter >= dt.Config.ReplicationOnInsert { return errors.New("Replication number reached") } return nil }) if counter < dt.Config.ReplicationOnInsert { return errors.New("Replicas is less then Replication Config") } } return nil }
package main import ( "fmt" ) func maxProfit(prices []int) int { max := 0 size := len(prices) for i := 0; i < size-1; i++ { if prices[i] < prices[i+1] { max += prices[i+1] - prices[i] } } return max } func main() { prices := []int{7, 1, 5, 3, 6, 4} fmt.Println(maxProfit(prices)) }
package nsqsubscriber import "github.com/bitly/go-nsq" type NSQMessage struct { *nsq.Message } func (m *NSQMessage) Body() []byte { return m.Message.Body } func (m *NSQMessage) Timestamp() int64 { return m.Message.Timestamp }
package repository import ( "github.com/Tanibox/tania-core/src/assets/domain" "github.com/Tanibox/tania-core/src/assets/storage" "github.com/gofrs/uuid" ) // RepositoryResult is a struct to wrap repository result // so its easy to use it in channel type RepositoryResult struct { Result interface{} Error error } type FarmEventRepository interface { Save(uid uuid.UUID, latestVersion int, events []interface{}) <-chan error } type FarmReadRepository interface { Save(farmRead *storage.FarmRead) <-chan error } func NewFarmFromHistory(events []storage.FarmEvent) *domain.Farm { state := &domain.Farm{} for _, v := range events { state.Transition(v.Event) state.Version++ } return state } type AreaEventRepository interface { Save(uid uuid.UUID, latestVersion int, events []interface{}) <-chan error } type AreaReadRepository interface { Save(areaRead *storage.AreaRead) <-chan error } func NewAreaFromHistory(events []storage.AreaEvent) *domain.Area { state := &domain.Area{} for _, v := range events { state.Transition(v.Event) state.Version++ } return state } type ReservoirEventRepository interface { Save(uid uuid.UUID, latestVersion int, events []interface{}) <-chan error } type ReservoirReadRepository interface { Save(reservoirRead *storage.ReservoirRead) <-chan error } func NewReservoirFromHistory(events []storage.ReservoirEvent) *domain.Reservoir { state := &domain.Reservoir{} for _, v := range events { state.Transition(v.Event) state.Version++ } return state } type MaterialEventRepository interface { Save(uid uuid.UUID, latestVersion int, events []interface{}) <-chan error } func NewMaterialFromHistory(events []storage.MaterialEvent) *domain.Material { state := &domain.Material{} for _, v := range events { state.Transition(v.Event) state.Version++ } return state } type MaterialEventTypeWrapper struct { Type string Data interface{} } func (w MaterialEventTypeWrapper) Code() string { return w.Type } type MaterialReadRepository interface { Save(materialRead *storage.MaterialRead) <-chan error }
package main import ( "fmt" "go_code/project1/61factorymodel/model" ) func main() { //大写的直接引用即可 stu := model.Student{ Name: "tom", Age: 12, } fmt.Println("stu:", stu) //小写的直接引用报错 cannot refer to unexported name model.student // stu1 := model.student{ // Name: "tom", // Age: 12, // } //var stu1 model.student = model.Newstudent("jimmy", 18) cannot refer to unexported name model.student stu1 := model.Newstudent("jimmy", 18) //必须这样去实例化 不能用var类型的形式去做 fmt.Println("stu1", stu1) fmt.Println("stu1.age", stu1.GetAge()) } //工厂模式 // 对于首字母小写的结构体,使用工厂模式进行实例化;对于小写的属性,亦可使用 // 工厂模式,提供一个大写的方法,返回一个实例/变量
// Copyright The Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gcpkms import ( "context" "fmt" "os" "testing" kms "cloud.google.com/go/kms/apiv1" ) var ( testDecrypterKey string testSignerKey string ) func testClient(tb testing.TB) (*kms.KeyManagementClient, context.Context) { tb.Helper() ctx := context.Background() client, err := kms.NewKeyManagementClient(ctx) if err != nil { tb.Fatal(err) } return client, ctx } func TestMain(m *testing.M) { setFromEnv(&testDecrypterKey, "TEST_DECRYPTER_KEY") setFromEnv(&testSignerKey, "TEST_SIGNER_KEY") os.Exit(m.Run()) } func setFromEnv(s *string, k string) { v := os.Getenv(k) if v == "" { fmt.Fprintf(os.Stderr, "missing %s\n", k) os.Exit(1) } *s = v }
package compose import ( "fmt" "time" "github.com/kudrykv/latex-yearly-planner/app/components/calendar" "github.com/kudrykv/latex-yearly-planner/app/components/page" "github.com/kudrykv/latex-yearly-planner/app/config" ) func DailyWMonth(cfg config.Config, tpls []string) (page.Modules, error) { if len(tpls) != 1 { return nil, fmt.Errorf("exppected one tpl, got %d %v", len(tpls), tpls) } modules := make(page.Modules, 0, 366) soy := time.Date(cfg.Year, time.January, 1, 0, 0, 0, 0, time.Local) eoy := soy.AddDate(1, 0, 0) for today := soy; today.Before(eoy); today = today.AddDate(0, 0, 1) { modules = append(modules, page.Module{ Cfg: cfg, Tpl: tpls[0], Body: map[string]interface{}{ "Today": calendar.DayTime{Time: today}, "Month": calendar.NewYearMonth(cfg.Year, today.Month()).Calendar(cfg.WeekStart), "Hours": Hours(cfg.Layout.Numbers.DailyBottomHour, cfg.Layout.Numbers.DailyTopHour), }, }) } return modules, nil }
package models_test import ( "github.com/APTrust/exchange/models" "github.com/APTrust/exchange/util/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" ) func TestNewStorageSummary(t *testing.T) { objIdentifier := "ncsu.edu/bag1" tarPath := "/tmp/ncsu.edu/bag1.tar" untarredPath := "/tmp/ncsu.edu/bag1" gf := testutil.MakeGenericFile(0, 0, objIdentifier) summary, err := models.NewStorageSummary(gf, tarPath, untarredPath) assert.Nil(t, err) assert.NotNil(t, summary) assert.NotNil(t, summary.StoreResult) assert.Equal(t, tarPath, summary.TarFilePath) assert.Equal(t, untarredPath, summary.UntarredPath) summary, err = models.NewStorageSummary(nil, tarPath, untarredPath) require.NotNil(t, err) assert.Equal(t, "Param gf cannot be nil", err.Error()) summary, err = models.NewStorageSummary(gf, "", untarredPath) require.NotNil(t, err) assert.Equal(t, "Param tarPath cannot be empty", err.Error()) // OK for untarredPath to be empty summary, err = models.NewStorageSummary(gf, tarPath, "") assert.Nil(t, err) assert.NotNil(t, summary) }
package tkapi import ( "bytes" "encoding/json" "errors" "github.com/mrxiaojie/taobaoke" ) type ItemInfo struct { ReqParam ItemInfoParam } //请求参数 type ItemInfoParam struct { NumIids string Platform int Ip string } //初始化api func (t *ItemInfo) Init() { t.ReqParam.NumIids = "" //商品ID串,用,分割,最大40个,例如:123,456 t.ReqParam.Platform = 1 //链接形式:1:PC,2:无线,默认:1 t.ReqParam.Ip = "" //ip地址,影响邮费获取,如果不传或者传入不准确,邮费无法精准提供,例如:11.22.33.43 } func (t *ItemInfo) GetParam() map[string]interface{} { paramMap := make(map[string]interface{}) paramMap["num_iids"] = t.ReqParam.NumIids paramMap["platform"] = t.ReqParam.Platform paramMap["ip"] = t.ReqParam.Ip return paramMap } func (t *ItemInfo) ApiName() (s string) { return "taobao.tbk.item.info.get" } func (t *ItemInfo) Run(appKey string,appSecret string,resIsMap bool) (interface{},error) { if t.ReqParam.NumIids =="" { return make(map[string]string) , errors.New("商品ID串未设置") } TopClient := taobaoke.TopClient{} TopClient.SetConf(appKey,appSecret) respByte,err :=TopClient.Exec(t) if resIsMap { mapDate := make(map[string]interface{}) json.NewDecoder(bytes.NewBuffer(respByte)).Decode(&mapDate) return mapDate,err } return respByte,err }
/* The MIT License (MIT) Copyright (c) 2019 Microsoft Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package v1alpha1 // SecretScopeSecret represents a secret in a secret scope type SecretScopeSecret struct { Key string `json:"key,omitempty"` StringValue string `json:"string_value,omitempty"` ByteValue string `json:"byte_value,omitempty"` ValueFrom *SecretScopeValueFrom `json:"value_from,omitempty"` } // SecretScopeACL represents ACLs for a secret scope type SecretScopeACL struct { Principal string `json:"principal,omitempty"` Permission string `json:"permission,omitempty"` } // SecretScopeValueFrom references a secret scope type SecretScopeValueFrom struct { SecretKeyRef SecretScopeKeyRef `json:"secret_key_ref,omitempty"` } // SecretScopeKeyRef refers to a secret scope Key type SecretScopeKeyRef struct { Name string `json:"name,omitempty"` Key string `json:"key,omitempty"` }