text
stringlengths
11
4.05M
package main import ( "fmt" "github.com/gofiber/fiber/v2" ) func main() { app := fiber.New() app.Get("/", func(c *fiber.Ctx) error { return c.JSON(&fiber.Map{ "message": "Hello from Docker 🐳!!1", }) }) err := app.Listen(":8080") if err != nil { fmt.Println(err) } }
package algorithm func RemoveNthFromEnd(head *ListNode, n int) *ListNode { if head == nil { return head } slow := head fast := head i := 0 for i=0; i<n; i++ { if fast == nil { break } fast = fast.Next } if i < n { return head } if fast == nil { return head.Next } for fast.Next != nil { fast = fast.Next slow = slow.Next } slow.Next = slow.Next.Next return head }
package main import ( "excho-job/routes" "github.com/gin-contrib/cors" "github.com/gin-gonic/gin" ) func main() { r := gin.Default() // CORS disable r.Use(cors.Default()) routes.JobSeekerRoute(r) routes.HireRoute(r) routes.JobsRoute(r) routes.JobSeekerDetailsRoute(r) routes.ResumeRoute(r) routes.JobProfileRoute(r) routes.JobSeekerProfileRoute(r) routes.ApplyJobsRoute(r) r.Run() }
package main import ( "fmt" "text/template" "time" "os" "log" ) var tpl *template.Template var fm = template.FuncMap{ "fDateTime" : formatDatetime, } func formatDatetime(t time.Time) string { return t.Format(time.ANSIC) } func init() { tpl = template.Must(template.New("").Funcs(fm).ParseFiles("tpl.gohtml")) } func main() { err := tpl.ExecuteTemplate(os.Stdout, "tpl.gohtml", time.Now()) if err != nil { log.Fatalln(err) } fmt.Println("done") }
package dbc import "fmt" // Common Logger interface for using to logging contact's related panics type Logger interface { Debug(msg string) } // Interface for validating invariant of object type SimpleInvariantValidator interface { Invariant() bool } // Interface for validating invariant of object with Stringer interface type InvariantValidator interface { SimpleInvariantValidator fmt.Stringer }
package model type SendVerificationMailRequest struct { UserId uint Email string Token string }
package crd import ( "context" "io" "os" "path/filepath" fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" "github.com/rancher/wrangler/pkg/crd" "github.com/rancher/wrangler/pkg/schemas/openapi" "github.com/rancher/wrangler/pkg/yaml" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" ) func Create(ctx context.Context, cfg *rest.Config) error { factory, err := crd.NewFactoryFromClient(cfg) if err != nil { return err } return factory.BatchCreateCRDs(ctx, list()...).BatchWait() } func WriteFile(filename string) error { if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { return err } f, err := os.Create(filename) if err != nil { return err } defer f.Close() return print(f) } func print(out io.Writer) error { obj, err := objects() if err != nil { return err } data, err := yaml.Export(obj...) if err != nil { return err } _, err = out.Write(data) return err } func objects() (result []runtime.Object, err error) { for _, crdDef := range list() { crd, err := crdDef.ToCustomResourceDefinition() if err != nil { return nil, err } result = append(result, crd) } return } func list() []crd.CRD { return []crd.CRD{ newCRD(&fleet.Bundle{}, func(c crd.CRD) crd.CRD { schema := mustSchema(fleet.Bundle{}) schema.Properties["spec"].Properties["helm"].Properties["releaseName"] = releaseNameValidation() c.GVK.Kind = "Bundle" return c. WithSchemaFromStruct(nil). WithSchema(schema). WithColumn("BundleDeployments-Ready", ".status.display.readyClusters"). WithColumn("Status", ".status.conditions[?(@.type==\"Ready\")].message") }), newCRD(&fleet.BundleDeployment{}, func(c crd.CRD) crd.CRD { schema := mustSchema(fleet.BundleDeployment{}) schema.Properties["spec"].Properties["options"].Properties["helm"].Properties["releaseName"] = releaseNameValidation() c.GVK.Kind = "BundleDeployment" return c. WithSchemaFromStruct(nil). WithSchema(schema). WithColumn("Deployed", ".status.display.deployed"). WithColumn("Monitored", ".status.display.monitored"). WithColumn("Status", ".status.conditions[?(@.type==\"Ready\")].message") }), newCRD(&fleet.BundleNamespaceMapping{}, func(c crd.CRD) crd.CRD { return c }), newCRD(&fleet.ClusterGroup{}, func(c crd.CRD) crd.CRD { return c. WithCategories("fleet"). WithColumn("Clusters-Ready", ".status.display.readyClusters"). WithColumn("Bundles-Ready", ".status.display.readyBundles"). WithColumn("Status", ".status.conditions[?(@.type==\"Ready\")].message") }), newCRD(&fleet.Cluster{}, func(c crd.CRD) crd.CRD { schema := mustSchema(fleet.Cluster{}) schema.Properties["metadata"] = metadataNameValidation() schema.Properties["spec"].Properties["agentTolerations"].Items.Schema.Properties["tolerationSeconds"] = tolerationSecondsValidation() nodeAffinity := nodeAffinity(schema) nodeAffinity.Properties["requiredDuringSchedulingIgnoredDuringExecution"].Properties["nodeSelectorTerms"].Items.Schema.Properties["matchExpressions"].Items.Schema.Properties["operator"] = nodeSelectorOperatorValidation() nodeAffinity.Properties["requiredDuringSchedulingIgnoredDuringExecution"].Properties["nodeSelectorTerms"].Items.Schema.Properties["matchFields"].Items.Schema.Properties["operator"] = nodeSelectorOperatorValidation() nodeAffinity.Properties["preferredDuringSchedulingIgnoredDuringExecution"].Items.Schema.Properties["preference"].Properties["matchExpressions"].Items.Schema.Properties["operator"] = nodeSelectorOperatorValidation() nodeAffinity.Properties["preferredDuringSchedulingIgnoredDuringExecution"].Items.Schema.Properties["preference"].Properties["matchFields"].Items.Schema.Properties["operator"] = nodeSelectorOperatorValidation() podAffinity := podAffinity(schema) podAffinity.Properties["requiredDuringSchedulingIgnoredDuringExecution"].Items.Schema.Properties["labelSelector"].Properties["matchExpressions"].Items.Schema.Properties["operator"] = labelSelectorOperatorValidation() podAffinity.Properties["requiredDuringSchedulingIgnoredDuringExecution"].Items.Schema.Properties["namespaceSelector"].Properties["matchExpressions"].Items.Schema.Properties["operator"] = labelSelectorOperatorValidation() podAffinity.Properties["preferredDuringSchedulingIgnoredDuringExecution"].Items.Schema.Properties["podAffinityTerm"].Properties["labelSelector"].Properties["matchExpressions"].Items.Schema.Properties["operator"] = labelSelectorOperatorValidation() podAffinity.Properties["preferredDuringSchedulingIgnoredDuringExecution"].Items.Schema.Properties["podAffinityTerm"].Properties["namespaceSelector"].Properties["matchExpressions"].Items.Schema.Properties["operator"] = labelSelectorOperatorValidation() podAntiAffinity := podAntiAffinity(schema) podAntiAffinity.Properties["requiredDuringSchedulingIgnoredDuringExecution"].Items.Schema.Properties["labelSelector"].Properties["matchExpressions"].Items.Schema.Properties["operator"] = labelSelectorOperatorValidation() podAntiAffinity.Properties["requiredDuringSchedulingIgnoredDuringExecution"].Items.Schema.Properties["namespaceSelector"].Properties["matchExpressions"].Items.Schema.Properties["operator"] = labelSelectorOperatorValidation() podAntiAffinity.Properties["preferredDuringSchedulingIgnoredDuringExecution"].Items.Schema.Properties["podAffinityTerm"].Properties["labelSelector"].Properties["matchExpressions"].Items.Schema.Properties["operator"] = labelSelectorOperatorValidation() podAntiAffinity.Properties["preferredDuringSchedulingIgnoredDuringExecution"].Items.Schema.Properties["podAffinityTerm"].Properties["namespaceSelector"].Properties["matchExpressions"].Items.Schema.Properties["operator"] = labelSelectorOperatorValidation() c.GVK.Kind = "Cluster" return c. WithSchemaFromStruct(nil). WithSchema(schema). WithColumn("Bundles-Ready", ".status.display.readyBundles"). WithColumn("Nodes-Ready", ".status.display.readyNodes"). WithColumn("Sample-Node", ".status.display.sampleNode"). WithColumn("Last-Seen", ".status.agent.lastSeen"). WithColumn("Status", ".status.conditions[?(@.type==\"Ready\")].message") }), newCRD(&fleet.ClusterRegistrationToken{}, func(c crd.CRD) crd.CRD { schema := mustSchema(fleet.ClusterRegistrationToken{}) schema.Properties["metadata"] = metadataNameValidation() c.GVK.Kind = "ClusterRegistrationToken" return c. WithSchemaFromStruct(nil). WithSchema(schema). WithColumn("Secret-Name", ".status.secretName") }), newCRD(&fleet.GitRepo{}, func(c crd.CRD) crd.CRD { return c. WithCategories("fleet"). WithColumn("Repo", ".spec.repo"). WithColumn("Commit", ".status.commit"). WithColumn("BundleDeployments-Ready", ".status.display.readyBundleDeployments"). WithColumn("Status", ".status.conditions[?(@.type==\"Ready\")].message") }), newCRD(&fleet.ClusterRegistration{}, func(c crd.CRD) crd.CRD { return c. WithColumn("Cluster-Name", ".status.clusterName"). WithColumn("Labels", ".spec.clusterLabels") }), newCRD(&fleet.GitRepoRestriction{}, func(c crd.CRD) crd.CRD { return c. WithColumn("Default-ServiceAccount", ".defaultServiceAccount"). WithColumn("Allowed-ServiceAccounts", ".allowedServiceAccounts") }), newCRD(&fleet.Content{}, func(c crd.CRD) crd.CRD { c.NonNamespace = true c.Status = false return c }), newCRD(&fleet.ImageScan{}, func(c crd.CRD) crd.CRD { return c.WithCategories("fleet"). WithColumn("Repository", ".spec.image"). WithColumn("Latest", ".status.latestTag") }), } } func newCRD(obj interface{}, customize func(crd.CRD) crd.CRD) crd.CRD { crd := crd.CRD{ GVK: schema.GroupVersionKind{ Group: "fleet.cattle.io", Version: "v1alpha1", }, Status: true, SchemaObject: obj, } if customize != nil { crd = customize(crd) } return crd } // metadataNameValidation returns a schema that validates the metadata.name field // metadata: // // properties: // name: // type: string // pattern: "^[-a-z0-9]*$" // maxLength: 63 // type: object func metadataNameValidation() apiextv1.JSONSchemaProps { prop := apiextv1.JSONSchemaProps{ Type: "string", Pattern: "^[-a-z0-9]+$", MaxLength: &[]int64{63}[0], } return apiextv1.JSONSchemaProps{ Type: "object", Properties: map[string]apiextv1.JSONSchemaProps{"name": prop}, } } // releaseNameValidation for helm release names according to helm itself func releaseNameValidation() apiextv1.JSONSchemaProps { return apiextv1.JSONSchemaProps{ Type: "string", Pattern: `^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`, MaxLength: &[]int64{fleet.MaxHelmReleaseNameLen}[0], Nullable: true, } } // tolerationSecondsValidation limits the maximum of TolerationSeconds to one day func tolerationSecondsValidation() apiextv1.JSONSchemaProps { return apiextv1.JSONSchemaProps{ Type: "integer", Maximum: &[]float64{86400}[0], Nullable: true, } } // nodeSelectorOperatorValidation validates the Operator is one of: In, NotIn, Exists, DoesNotExist, Gt, Lt func nodeSelectorOperatorValidation() apiextv1.JSONSchemaProps { return apiextv1.JSONSchemaProps{ Type: "string", Enum: []apiextv1.JSON{{Raw: []byte(`"In"`)}, {Raw: []byte(`"NotIn"`)}, {Raw: []byte(`"Exists"`)}, {Raw: []byte(`"DoesNotExist"`)}, {Raw: []byte(`"Gt"`)}, {Raw: []byte(`"Lt"`)}}, Nullable: true, } } // labelSelectorOperatorValidation validates the Operator is one of: In, NotIn, Exists, DoesNotExist func labelSelectorOperatorValidation() apiextv1.JSONSchemaProps { return apiextv1.JSONSchemaProps{ Type: "string", Enum: []apiextv1.JSON{{Raw: []byte(`"In"`)}, {Raw: []byte(`"NotIn"`)}, {Raw: []byte(`"Exists"`)}, {Raw: []byte(`"DoesNotExist"`)}}, Nullable: true, } } func mustSchema(obj interface{}) *apiextv1.JSONSchemaProps { result, err := openapi.ToOpenAPIFromStruct(obj) if err != nil { panic(err) } return result } func agentAffinity(schema *apiextv1.JSONSchemaProps) apiextv1.JSONSchemaProps { return schema.Properties["spec"].Properties["agentAffinity"] } func nodeAffinity(schema *apiextv1.JSONSchemaProps) apiextv1.JSONSchemaProps { return agentAffinity(schema).Properties["nodeAffinity"] } func podAffinity(schema *apiextv1.JSONSchemaProps) apiextv1.JSONSchemaProps { return agentAffinity(schema).Properties["podAffinity"] } func podAntiAffinity(schema *apiextv1.JSONSchemaProps) apiextv1.JSONSchemaProps { return agentAffinity(schema).Properties["podAntiAffinity"] }
// Copyright (C)2018 by Lei Peng <pyp126@gmail.com> // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package web import "net/http" type WebApp interface { Version() VersionInfo Dispatch(w http.ResponseWriter, r *http.Request) } type VersionInfo struct { Name string Version string Description string } type BaseWebApp struct { AppVersion *VersionInfo } func NewBaseApp() *BaseWebApp { app := new(BaseWebApp) app.AppVersion = new(VersionInfo) return app } func (this BaseWebApp) Version() VersionInfo { return *this.AppVersion } func (this BaseWebApp) Dispatch(w http.ResponseWriter, r *http.Request) { http.NotFound(w, r) } var WebRoutes map[string]WebApp func init() { WebRoutes = make(map[string]WebApp) }
package thulac import ( "sync" "syscall" "unsafe" ) var ( dll *syscall.DLL thulacInit *syscall.Proc thulacDestory *syscall.Proc thulacGetCtx *syscall.Proc thulacFreeCtx *syscall.Proc thulacCut *syscall.Proc thulacFreeResult *syscall.Proc ) var ( ctxCh chan uintptr doneWG sync.WaitGroup ) // Init Init func Init(workers int) { dll = syscall.MustLoadDLL("libthulac.dll") /* __declspec(dllexport) void thulac_init(const char *path, int maxWorkers); __declspec(dllexport) void thulac_destory(); __declspec(dllexport) void *thulac_get_ctx(); __declspec(dllexport) void thulac_free_ctx(void *ctx); // nullptr: failed // !null: result __declspec(dllexport) const char *thulac_cut(void *ctx, const char *seg, int len, int32_t *outBufferSize); __declspec(dllexport) void thulac_free_result(const char *res); */ thulacInit = dll.MustFindProc("thulac_init") thulacDestory = dll.MustFindProc("thulac_destory") thulacGetCtx = dll.MustFindProc("thulac_get_ctx") thulacFreeCtx = dll.MustFindProc("thulac_free_ctx") thulacCut = dll.MustFindProc("thulac_cut") thulacFreeResult = dll.MustFindProc("thulac_free_result") path := "" if workers < 1 { workers = 1 } thulacInit.Call(uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), uintptr(workers)) ctxCh = make(chan uintptr, workers) for len(ctxCh) < workers { ctx, _, _ := thulacGetCtx.Call() if ctx != 0 { ctxCh <- ctx } } } // Destory Destory func Destory() { doneWG.Wait() for len(ctxCh) > 0 { ctx := <-ctxCh thulacFreeCtx.Call(ctx) } thulacDestory.Call() dll.Release() }
package yoinker import ( "sync" "github.com/lethal-bacon0/WebnovelYoinker/pkg/yoinker/book" ) //IYoinkerManager Provides Functionality to yoink Webnovels and Webtoons type IYoinkerManager interface { StartYoink(metadata book.Metadata, exportPath string) string GetAvailableVolumes(url string, website string) []book.Volume } //WebnovelYoinker scrapes webnovels and webtoons and exports them as epub or pdf type webnovelYoinker struct { scraper IScrapingStrategy exporter IExportStrategy } // StartYoink start yoinking the specified book func (w *webnovelYoinker) StartYoink(metadata book.Metadata, path string) string { var ( scrapedChapters []book.Chapter waiter sync.WaitGroup ) jobs := make(chan book.Chapter, 100) results := make(chan book.Chapter, 100) for i := 0; i < 16; i++ { go w.chapterScraperWorker(jobs, results) } waiter.Add(1) go func() { for i, chapterURL := range metadata.ChapterURLs { jobs <- book.Chapter{ChapterNumber: i + 1, URL: chapterURL} } close(jobs) waiter.Done() }() for i := 0; i < len(metadata.ChapterURLs); i++ { scrapedChapters = append(scrapedChapters, <-results) } close(results) return w.exporter.Export(metadata, path, w.sortChapters(scrapedChapters)) } func (w *webnovelYoinker) chapterScraperWorker(jobs <-chan book.Chapter, results chan<- book.Chapter) { for job := range jobs { results <- w.scraper.ScrapeChapter(job.URL, job.ChapterNumber) } } //GetAvailableVolumes get all available volumes of provided url func (w *webnovelYoinker) GetAvailableVolumes(url string, website string) []book.Volume { return w.scraper.GetAvailableChapters(url) } // simple bubble sort for chapter sorting func (w *webnovelYoinker) sortChapters(chapters []book.Chapter) []book.Chapter { var ( n = len(chapters) sorted = false ) for !sorted { swapped := false for i := 0; i < n-1; i++ { if chapters[i].ChapterNumber > chapters[i+1].ChapterNumber { chapters[i+1], chapters[i] = chapters[i], chapters[i+1] swapped = true } } if !swapped { sorted = true } n = n - 1 } return chapters }
package main /** 1.3.14 编写一个类 ResizingArrayQueueOfStrings,使用定长数组实现队列的抽象,然后扩展实现, 使用调整数组的方法突破大小的限制。 */ func main() { }
/* * @Description: * @Author: ccj * @Date: 2020-05-02 22:42:42 * @LastEditTime: 2020-12-28 22:18:12 * @LastEditors: */ package main import ( "fmt" "go_learn" // "time" "sync" ) // 定义全局sync变量 var syncMap sync.Map var waitGroup sync.WaitGroup func main(){ fmt.Println("Hello world!") basic.Learn1() basic.Learn2() person := &basic.Person{ "ccj","1995-04",25, } person.PrintPerson() basic.Learn3() // time.Sleep(time.Second*20) // go basic.Learn4() // mark: sync 同步包 Mutex,RWMutex,Map,WaitGroup goSize:=5 // 初始化同步等待函数 waitGroup.Add(goSize) for i:=0;i<goSize;i++{ go addNumber(i*10) } // 阻塞主线程 waitGroup.Wait() var size int syncMap.Range(func(key,value interface{}) bool{ size++ fmt.Println("key:vaule is ",key,":",value," ") return true }) fmt.Printf("syncMap size is %d\n",size) value ,ok:=syncMap.Load(0) if ok{ fmt.Println("key 0 has value is",value," ") } } func addNumber(start int){ for i:=start;i<start+3;i++{ syncMap.Store(i,i) } // 通知本次调用结束 waitGroup.Done() }
package worker import ( "fmt" "io/ioutil" "os" "strings" "time" gocontext "context" "github.com/bitly/go-simplejson" "github.com/sirupsen/logrus" "github.com/travis-ci/worker/backend" "github.com/travis-ci/worker/context" "github.com/travis-ci/worker/metrics" ) type fileJob struct { createdFile string receivedFile string startedFile string finishedFile string logFile string bytes []byte payload *JobPayload rawPayload *simplejson.Json startAttributes *backend.StartAttributes finishState FinishState requeued bool } func (j *fileJob) Payload() *JobPayload { return j.payload } func (j *fileJob) RawPayload() *simplejson.Json { return j.rawPayload } func (j *fileJob) StartAttributes() *backend.StartAttributes { return j.startAttributes } func (j *fileJob) FinishState() FinishState { return j.finishState } func (j *fileJob) Requeued() bool { return j.requeued } func (j *fileJob) Received(_ gocontext.Context) error { return os.Rename(j.createdFile, j.receivedFile) } func (j *fileJob) Started(_ gocontext.Context) error { return os.Rename(j.receivedFile, j.startedFile) } func (j *fileJob) Error(ctx gocontext.Context, errMessage string) error { log, err := j.LogWriter(ctx, time.Minute) if err != nil { return err } _, err = log.WriteAndClose([]byte(errMessage)) if err != nil { return err } return j.Finish(ctx, FinishStateErrored) } func (j *fileJob) Requeue(ctx gocontext.Context) error { context.LoggerFromContext(ctx).WithField("self", "file_job").Info("requeueing job") metrics.Mark("worker.job.requeue") j.requeued = true var err error for _, fname := range []string{ j.receivedFile, j.startedFile, j.finishedFile, } { err = os.Rename(fname, j.createdFile) if err == nil { return nil } } return err } func (j *fileJob) Finish(ctx gocontext.Context, state FinishState) error { context.LoggerFromContext(ctx).WithFields(logrus.Fields{ "state": state, "self": "file_job", }).Info("finishing job") metrics.Mark(fmt.Sprintf("travis.worker.job.finish.%s", state)) err := os.Rename(j.startedFile, j.finishedFile) if err != nil { return err } return ioutil.WriteFile(strings.Replace(j.finishedFile, ".json", ".state", -1), []byte(state), os.FileMode(0644)) } func (j *fileJob) LogWriter(ctx gocontext.Context, defaultLogTimeout time.Duration) (LogWriter, error) { logTimeout := time.Duration(j.payload.Timeouts.LogSilence) * time.Second if logTimeout == 0 { logTimeout = defaultLogTimeout } return newFileLogWriter(ctx, j.logFile, logTimeout) } func (j *fileJob) SetupContext(ctx gocontext.Context) gocontext.Context { return ctx } func (j *fileJob) Name() string { return "file" }
// The MIT License (MIT) // Copyright (c) 2014 Jade E Services Pvt. Ltd. // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. package model import ( "fmt" "github.com/pelletier/go-toml" "os" "reflect" ) var ini *toml.TomlTree func init() { var err error dir, _ := os.Getwd() // order in which to search for config file files := []string{ dir + "/dev.ini", dir + "/config.ini", dir + "/config/dev.ini", dir + "/config/config.ini", } for _, f := range files { ini, err = toml.LoadFile(f) if err == nil { fmt.Println("Loaded Configuration:", f) return } } fmt.Println("No configuration file found") } func Get(lookup string, def interface{}) interface{} { if ini == nil { return def } val := ini.Get(lookup) if val == nil { return def } else { return val } } func GetMap(lookupParent string, def map[string]interface{}) map[string]interface{} { if ini == nil { return def } tmp := Get(lookupParent, def) t := tmp.(*toml.TomlTree) mp := make(map[string]interface{}) for _, key := range t.Keys() { mp[key] = t.Get(key) } return mp } func GetString(lookup string, def string) string { value := Get(lookup, def) switch value.(type) { case string: return value.(string) } return def } func GetInt(lookup string, def int) int { value := Get(lookup, def) switch value.(type) { case int, int8, int16, int32, int64: return int(reflect.ValueOf(value).Int()) // don't expect long ints in ini configuraton // so converting int64 to int should be ok. } return def } func GetBool(lookup string, def bool) bool { value := Get(lookup, def) switch value.(type) { case bool: return value.(bool) } return def }
// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Package env provides the basic building block in a virtualnet. package env import ( "context" "fmt" "net" "os" "os/exec" "path/filepath" "runtime" "strings" "time" "golang.org/x/sys/unix" "chromiumos/tast/common/testexec" "chromiumos/tast/errors" "chromiumos/tast/local/network/virtualnet/subnet" "chromiumos/tast/testing" ) var rootSymlinks = [][]string{{"var/run", "/run"}, {"var/lock", "/run/lock"}} // bindRootDirs contains the paths which will be bind mounted when running a // process. var bindRootDirs = []string{"bin", "dev", "dev/pts", "etc/group", "etc/passwd", "lib", "lib32", "lib64", "proc", "sbin", "sys", "usr", "usr/local", "usr/local/sbin"} // bindRootWritableDirs is the subset of bindRootDirs that should be mounted // writable. var bindRootWritableDirs = []string{"dev/pts"} // createdRootDirs contains the paths which will be created inside the chroot. var createdRootDirs = []string{"etc", "etc/ssl", "tmp", "var", "var/log", "run", "run/lock"} // Env wraps the chroot variables. type Env struct { name string // NetNSName is the name of netns associated with this object. NetNSName string // VethOutName is the name of the interface outside the associated netns. VethOutName string // VethInName is the name of the interface inside the associated netns. VethInName string chrootDir string netJailArgs []string netnsCreated bool servers map[string]server } // A server represents a process (or processes for the same functionality) // running in and managed by a Env. Struct that implements this interface can be // registered with Env by StartServer(), and then when Env is shutting down, // stop() and writeLogs() will be called to cleanup and collect logs. type server interface { // Start starts the server. Start(ctx context.Context, e *Env) error // Stop stops the server. Stop(ctx context.Context) error // WriteLogs writes the logs with this server into |f|. WriteLogs(ctx context.Context, f *os.File) error } // New creates a new Env object. |name| will be used as part of the names of // netns, ifnames of veths, and the log file, and thus it should be unique among // different Env objects. func New(name string) *Env { return &Env{ name: name, NetNSName: "netns-" + name, VethOutName: "etho_" + name, VethInName: "ethi_" + name, servers: map[string]server{}, } } // NewHidden creates a new Env object. Different from New(), the veth interface // created in this method will not be visible to shill (its name started with // "veth" and thus shill will ignore it). func NewHidden(name string) *Env { return &Env{ name: name, NetNSName: "netns-" + name, VethOutName: "vetho_" + name, VethInName: "vethi_" + name, servers: map[string]server{}, } } // SetUp starts the required environment, which includes a chroot, a netns, and // a pair of veths with one peer inside the netns and the other peer outside the // netns. It is caller's responsibility to call Cleanup() on the returned object // if this call succeeded. func (e *Env) SetUp(ctx context.Context) error { success := false defer func() { if success { return } if err := e.Cleanup(ctx); err != nil { testing.ContextLogf(ctx, "Failed to cleanup env %s: %v", e.name, err) } }() const maxIfNameLen = 15 // IFNAMSIZ=16 if len(e.VethInName) > maxIfNameLen || len(e.VethOutName) > maxIfNameLen { return errors.Errorf( "ifname is too long: len(%s)=%d, len(%s)=%d, 15 at maximum", e.VethInName, len(e.VethInName), e.VethOutName, len(e.VethOutName)) } if err := e.makeChroot(ctx); err != nil { return errors.Wrap(err, "failed to make the chroot") } if err := e.makeNetNS(ctx); err != nil { return errors.Wrap(err, "failed to create and connect to netns") } success = true return nil } // Cleanup removes all the modifications that this object does on the DUT. The // last error will be returned if any operation failed. func (e *Env) Cleanup(ctx context.Context) error { var lastErr error updateLastErrAndLog := func(err error) { lastErr = err testing.ContextLog(ctx, "Cleanup failed: ", lastErr) } // Collect logs and clean up servers. f, err := e.createLogFile(ctx) if err != nil { updateLastErrAndLog(errors.Wrapf(err, "failed to open file for logging in %s", e.name)) } for serverName, server := range e.servers { if err := server.Stop(ctx); err != nil { updateLastErrAndLog(errors.Wrapf(err, "failed to stop server %s in %s", serverName, e.name)) } if f == nil { continue } if _, err := f.WriteString("\n\n>>>>> " + serverName + "\n"); err != nil { updateLastErrAndLog(errors.Wrapf(err, "failed to write header lines in log file for server %s in %s", serverName, e.name)) } if err := server.WriteLogs(ctx, f); err != nil { updateLastErrAndLog(errors.Wrapf(err, "failed to write logs for server %s in %s", serverName, e.name)) } } // Remove veth interface and the netns. if e.netnsCreated { if err := testexec.CommandContext(ctx, "ip", "netns", "del", e.NetNSName).Run(); err != nil { updateLastErrAndLog(errors.Wrapf(err, "failed to delete the netns %s", e.NetNSName)) } } // Remove the chroot filesystem. if _, err := testexec.CommandContext(ctx, "rm", "-rf", "--one-file-system", e.chrootDir).Output(); err != nil { updateLastErrAndLog(errors.Wrap(err, "failed removing chroot filesystem")) } // Wait until veth pair is removed. It should happen once we remove the netns, // but it may take up to 2 seconds (on a local DUT) to finish. if err := testing.Poll(ctx, func(ctx context.Context) error { if _, err := net.InterfaceByName(e.VethOutName); err == nil { return errors.Errorf("veth %s still exists", e.VethOutName) } return nil }, &testing.PollOptions{Timeout: 5 * time.Second}); err != nil { updateLastErrAndLog(errors.Wrapf(err, "failed to wait for veth %s disappeared", e.VethOutName)) } return lastErr } // StartServer starts a server inside this Env. This Env object will take care // of the lifetime of the server. func (e *Env) StartServer(ctx context.Context, name string, server server) error { if e.servers[name] != nil { return errors.Errorf("server with name %s already exists in %s", name, e.name) } e.servers[name] = server if err := server.Start(ctx, e); err != nil { return errors.Wrapf(err, "failed to start server %s", name) } return nil } // IfaceAddrs represents the IP addresses configured on an interface. type IfaceAddrs struct { // IPv4Addr is the IPv4 address on the interface. There is only one IPv4 // address on an interface. IPv4Addr net.IP // IPv6Addrs is the list of IPv6 addresses (excluding link-local address) on // the interface. IPv6Addrs []net.IP } // All returns all addresses (excluding the IPv6 link-local address) on this // interface. func (addrs *IfaceAddrs) All() []net.IP { var ret []net.IP if addrs.IPv4Addr != nil { ret = append(ret, addrs.IPv4Addr) } return append(ret, addrs.IPv6Addrs...) } // GetVethInAddrs returns the current IP addresses configured on the veth // interface inside this Env. Note that this function reads the addresses from // the kernel directly, which also includes syscalls to change the netns, and // thus it's better that the caller caches the results if possible. Note that if // an address is configured dynamically (e.g., the IPv6 SLAAC address), it may // not be ready immediately after the Env is ready (or the corresponding server // starts). func (e *Env) GetVethInAddrs(ctx context.Context) (retAddrs *IfaceAddrs, retErr error) { cleanup, err := e.EnterNetNS(ctx) if err != nil { return nil, errors.Wrapf(err, "failed to enter the associated netns %s", e.NetNSName) } defer func() { if tempErr := cleanup(); tempErr != nil { testing.ContextLogf(ctx, "Failed to go back to the original netns from netns %s: %v", e.NetNSName, tempErr) if retErr == nil { retErr = tempErr } } }() iface, err := net.InterfaceByName(e.VethInName) if err != nil { return nil, errors.Wrap(err, "failed to get interface object for the in interface") } addrs, err := iface.Addrs() if err != nil { return nil, errors.Wrap(err, "failed to list addrs on the in interface") } // Each object in |addrs| implements the net.Addr interface, which is not very // easy to use. The following code converts it to a CIDR string and then a // net.IP object. var ret IfaceAddrs for _, addr := range addrs { ip, _, err := net.ParseCIDR(addr.String()) if err != nil { return nil, errors.Wrapf(err, "failed to parse CIDR string %s", addr) } if ipv4Addr := ip.To4(); ipv4Addr != nil { if ret.IPv4Addr != nil { return nil, errors.Errorf("there are two IPv4 addrs %s and %s on the in interface", ret.IPv4Addr, ipv4Addr) } ret.IPv4Addr = ipv4Addr continue } if ipv6Addr := ip.To16(); ipv6Addr != nil { if !ipv6Addr.IsLinkLocalUnicast() { ret.IPv6Addrs = append(ret.IPv6Addrs, ipv6Addr) } continue } return nil, errors.Wrapf(err, "%s is neither a v4 addr nor a v6 addr", ip) } return &ret, nil } // WaitForVethInAddrs polls the IP addresses on the inside interface and returns // them until 1) there is IPv4 address if |ipv4| is true and 2) there is IPv6 // address (which is not a link-local address) if |ipv6| is true. func (e *Env) WaitForVethInAddrs(ctx context.Context, ipv4, ipv6 bool) (*IfaceAddrs, error) { var addrs *IfaceAddrs if err := testing.Poll(ctx, func(c context.Context) error { var err error addrs, err = e.GetVethInAddrs(ctx) if err != nil { return errors.Wrapf(err, "failed to get addrs of %s from env %s", e.VethInName, e.NetNSName) } if ipv4 && addrs.IPv4Addr == nil { return errors.Errorf("expect IPv4 addr in %v but was not found", addrs) } // Note that the link-local address not included in |addrs|. if ipv6 && len(addrs.IPv6Addrs) == 0 { return errors.Errorf("expect IPv6 addr in %v but was not found", addrs) } return nil }, &testing.PollOptions{Timeout: 5 * time.Second}); err != nil { return nil, errors.Wrapf(err, "failed to wait for addrs in env %s", e.NetNSName) } return addrs, nil } // EnterNetNS executes the current OS thread in the netns associated with this // Env. It returns the cleanup function, which switches the thread execution // back to the original netns. Note that this function calls // runtime.LockOSThread() to bind the calling goroutine to the current thread, // since the netns only takes effect on a thread. The cleanup function MUST be // called on the same goroutine with this function. func (e *Env) EnterNetNS(ctx context.Context) (func() error, error) { // A helper function wraps the code to open the netns file with proper error // and log messages. openNSByPath := func(path string) (*os.File, func(), error) { f, err := os.Open(path) if err != nil { return nil, nil, errors.Wrapf(err, "failed to open ns with path %s", path) } closeAndLogOnFailure := func() { if err := f.Close(); err != nil { testing.ContextLogf(ctx, "Failed to close file for ns %s: %v", path, err) } } return f, closeAndLogOnFailure, err } success := false runtime.LockOSThread() defer func() { if !success { runtime.UnlockOSThread() } }() // Open the current ns which will be used later in the cleanup closure. pid := unix.Getpid() tid := unix.Gettid() currentNSFile, currentNSClose, err := openNSByPath(fmt.Sprintf("/proc/%d/task/%d/ns/net", pid, tid)) if err != nil { return nil, err } defer func() { if !success { currentNSClose() } }() // Open and enter the target ns. targetNSFile, targertNSClose, err := openNSByPath("/run/netns/" + e.NetNSName) if err != nil { return nil, err } defer targertNSClose() if err := unix.Setns(int(targetNSFile.Fd()), unix.CLONE_NEWNET); err != nil { return nil, errors.Wrapf(err, "failed to enter netns %s", e.NetNSName) } success = true return func() error { // File should always be closed. defer currentNSClose() tidNow := unix.Gettid() if tid != tidNow { return errors.Errorf("cleanup func does not run on the same thread as the one that enters the netns %s", e.NetNSName) } // Thread should be unlocked as long as we are on the same thread. defer runtime.UnlockOSThread() if err := unix.Setns(int(currentNSFile.Fd()), unix.CLONE_NEWNET); err != nil { return errors.Wrap(err, "failed to go back to the original netns") } return nil }, nil } func (e *Env) createLogFile(ctx context.Context) (*os.File, error) { dir, ok := testing.ContextOutDir(ctx) if !ok { return nil, errors.New("failed to get ContextOutDir") } return os.OpenFile(filepath.Join(dir, e.name+"_logs.txt"), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) } // makeChroot makes a chroot filesystem. func (e *Env) makeChroot(ctx context.Context) error { temp, err := testexec.CommandContext(ctx, "mktemp", "-d", "/usr/local/tmp/chroot.XXXXXXXXX").Output() if err != nil { return errors.Wrap(err, "failed to make temp directory: /usr/local/tmp/chroot.XXXXXXXXX") } e.chrootDir = strings.TrimSuffix(string(temp), "\n") if err := testexec.CommandContext(ctx, "chmod", "go+rX", e.chrootDir).Run(); err != nil { return errors.Wrapf(err, "failed to change mode to go+rX for the temp directory: %s", e.chrootDir) } // Make the root directories for the chroot. for _, rootdir := range createdRootDirs { if err := os.Mkdir(e.ChrootPath(rootdir), os.ModePerm); err != nil { return errors.Wrapf(err, "failed to make the directory %s", rootdir) } } var srcPath, dstPath string // Make the bind root directories for the chroot. for _, rootdir := range bindRootDirs { srcPath = filepath.Join("/", rootdir) dstPath = e.ChrootPath(rootdir) if _, err := os.Stat(srcPath); os.IsNotExist(err) { continue } if isLink(srcPath) { linkPath, err := os.Readlink(srcPath) if err != nil { return errors.Wrapf(err, "failed to readlink: %v", srcPath) } if err := os.Symlink(linkPath, dstPath); err != nil { return errors.Wrapf(err, "failed to Symlink %s to %s", linkPath, dstPath) } } else { mountArg := srcPath + "," + srcPath for _, dir := range bindRootWritableDirs { if dir == rootdir { mountArg = mountArg + ",1" } } e.netJailArgs = append(e.netJailArgs, "-b", mountArg) } } for _, path := range rootSymlinks { srcPath = path[0] targetPath := path[1] linkPath := e.ChrootPath(srcPath) if err := os.Symlink(targetPath, linkPath); err != nil { return errors.Wrapf(err, "failed to Symlink %s to %s", targetPath, linkPath) } } return nil } // makeNetNS prepares the veth pair and netns. func (e *Env) makeNetNS(ctx context.Context) error { // Try to remove the leftover netns from the last test run if there is any. // This command will fail if the netns does not exist, which is also expected. if err := testexec.CommandContext(ctx, "ip", "netns", "del", e.NetNSName).Run(); err != nil { if _, ok := err.(*exec.ExitError); !ok { return errors.Wrapf(err, "failed to delete leftover namespace %s", e.NetNSName) } } // Create new namespace. if err := testexec.CommandContext(ctx, "ip", "netns", "add", e.NetNSName).Run(); err != nil { return errors.Wrapf(err, "failed to add the namespace %s", e.NetNSName) } e.netnsCreated = true // Enable IP forwarding. if err := e.RunWithoutChroot(ctx, "sysctl", "-w", "net.ipv4.conf.all.forwarding=1"); err != nil { return errors.Wrapf(err, "failed to enable ipv4 forwarding in %s", e.NetNSName) } if err := e.RunWithoutChroot(ctx, "sysctl", "-w", "net.ipv6.conf.all.forwarding=1"); err != nil { return errors.Wrapf(err, "failed to enable ipv6 forwarding in %s", e.NetNSName) } // Veth pair will be removed together with netns, so no explicit cleanup is // needed here. if err := testexec.CommandContext(ctx, "ip", "link", "add", e.VethOutName, "type", "veth", "peer", e.VethInName, "netns", e.NetNSName, ).Run(); err != nil { return errors.Wrap(err, "failed to setup veth") } if err := e.RunWithoutChroot(ctx, "ip", "link", "set", e.VethInName, "up"); err != nil { return errors.Wrapf(err, "failed to enable interface %s", e.VethInName) } return nil } // ChrootPath returns the the path within the chroot for |path|. func (e *Env) ChrootPath(path string) string { return filepath.Join(e.chrootDir, strings.TrimLeft(path, "/")) } // RunWithoutChroot executes the command inside the netns but outside the // chroot. Combined output will be wrapped in the error on failure. This is // helpful when running command like `ip` and `sysctl`. func (e *Env) RunWithoutChroot(ctx context.Context, args ...string) error { netnsArgs := []string{"netns", "exec", e.NetNSName} args = append(netnsArgs, args...) if o, err := testexec.CommandContext(ctx, "ip", args...).CombinedOutput(); err != nil { return errors.Wrapf(err, "failed to run cmd in netns %s with output %s", e.NetNSName, string(o)) } return nil } // CreateCommand creates a Cmd object which has the netns and chroot params // configured. The caller should control the lifetime of this object. func (e *Env) CreateCommand(ctx context.Context, args ...string) *testexec.Cmd { minijailArgs := []string{"/sbin/minijail0", "-C", e.chrootDir} ipArgs := []string{"netns", "exec", e.NetNSName} ipArgs = append(ipArgs, minijailArgs...) ipArgs = append(ipArgs, e.netJailArgs...) ipArgs = append(ipArgs, args...) return testexec.CommandContext(ctx, "ip", ipArgs...) } // ReadAndWriteLogIfExists reads the file contents from |path|, and writes them // into |f|. It will not be treated as an error that the file does not exist func (e *Env) ReadAndWriteLogIfExists(path string, f *os.File) error { if _, err := os.Stat(path); os.IsNotExist(err) { return nil } else if err != nil { return errors.Wrapf(err, "failed to check existence of file %s", path) } b, err := os.ReadFile(path) if err != nil { return errors.Wrapf(err, "failed to read %s", path) } if _, err := f.Write(b); err != nil { return errors.Wrapf(err, "failed to write contents of %s", path) } return nil } // ConnectToRouter connects this Env to |router| by moving the out interface // into the netns of |router|, using |ipv4Subnet| and |ipv6Subnet|, configuring // static IP addresses on both in and out interface, and installing routes for // the subnet in both of the two netns. An additional default route will be // added from this Env to |router|. func (e *Env) ConnectToRouter(ctx context.Context, router *Env, ipv4Subnet, ipv6Subnet *net.IPNet) error { // Move the out interface into |router| and bring it up. if err := testexec.CommandContext(ctx, "ip", "link", "set", e.VethOutName, "netns", router.NetNSName).Run(); err != nil { return errors.Wrapf(err, "failed to move the out interface of %s into %s", e.NetNSName, router.NetNSName) } if err := router.RunWithoutChroot(ctx, "ip", "link", "set", e.VethOutName, "up"); err != nil { return errors.Wrapf(err, "failed to enable interface %s", e.VethOutName) } // Install IPv4 addresses and routes. ipv4Addr := ipv4Subnet.IP.To4() if ipv4Addr == nil { return errors.Errorf("invalid IPv4 subnet for connecting Envs: %v", ipv4Subnet) } selfIPv4Addr := net.IPv4(ipv4Addr[0], ipv4Addr[1], ipv4Addr[2], 2) routerIPv4Addr := net.IPv4(ipv4Addr[0], ipv4Addr[1], ipv4Addr[2], 1) if err := e.ConfigureInterface(ctx, e.VethInName, selfIPv4Addr, ipv4Subnet); err != nil { return errors.Wrapf(err, "failed to configure IPv4 on %s", e.VethInName) } if err := router.ConfigureInterface(ctx, e.VethOutName, routerIPv4Addr, ipv4Subnet); err != nil { return errors.Wrapf(err, "failed to configure IPv4 on %s", e.VethOutName) } if err := e.RunWithoutChroot(ctx, "ip", "route", "add", "default", "via", routerIPv4Addr.String()); err != nil { return errors.Wrap(err, "failed to add IPv4 default route") } // Install IPv6 addresses and routes. ipv6Addr := ipv6Subnet.IP.To16() if ipv6Addr == nil { return errors.Errorf("invalid IPv6 subnet for connecting Envs: %v", ipv6Subnet) } var selfIPv6Addr, routerIPv6Addr net.IP selfIPv6Addr = append([]byte{}, ipv6Addr...) selfIPv6Addr[15] = 2 routerIPv6Addr = append([]byte{}, ipv6Addr...) routerIPv6Addr[15] = 1 if err := e.ConfigureInterface(ctx, e.VethInName, selfIPv6Addr, ipv6Subnet); err != nil { return errors.Wrapf(err, "failed to configure IPv6 on %s", e.VethInName) } if err := router.ConfigureInterface(ctx, e.VethOutName, routerIPv6Addr, ipv6Subnet); err != nil { return errors.Wrapf(err, "failed to configure IPv6 on %s", e.VethOutName) } if err := e.RunWithoutChroot(ctx, "ip", "route", "add", "default", "via", routerIPv6Addr.String()); err != nil { return errors.Wrap(err, "failed to add IPv6 default route") } return nil } // ConnectToRouterWithPool connects this Env to router. This function works same // as ConnectToRouter(), except for using pool as parameter instead of v4 and v6 // subnets. func (e *Env) ConnectToRouterWithPool(ctx context.Context, router *Env, pool *subnet.Pool) error { ipv4Subnet, err := pool.AllocNextIPv4Subnet() if err != nil { return errors.Wrap(err, "failed to allocate v4 subnet") } ipv6Subnet, err := pool.AllocNextIPv6Subnet() if err != nil { return errors.Wrap(err, "failed to allocate v6 subnet") } return e.ConnectToRouter(ctx, router, ipv4Subnet, ipv6Subnet) } // ConfigureInterface configures |addr| on |ifname|, and adds a route to point // |subnet| to this interface. func (e *Env) ConfigureInterface(ctx context.Context, ifname string, addr net.IP, subnet *net.IPNet) error { if err := e.RunWithoutChroot(ctx, "ip", "addr", "add", addr.String(), "dev", ifname); err != nil { return errors.Wrapf(err, "failed to install address %s on %s", addr.String(), ifname) } if err := e.RunWithoutChroot(ctx, "ip", "route", "add", subnet.String(), "dev", ifname); err != nil { return errors.Wrapf(err, "failed to install route %s on %s", subnet.String(), ifname) } testing.ContextLogf(ctx, "Installed %s with subnet %s on interface %s in netns %s", addr.String(), subnet.String(), ifname, e.NetNSName) return nil } // isLink returns whether path is a symbolic link. func isLink(path string) bool { if !assureExists(path) { return false } fileInfoStat, err := os.Lstat(path) if err != nil { return false } if fileInfoStat.Mode()&os.ModeSymlink != os.ModeSymlink { return false } return true } // assureExists asserts that |path| exists. func assureExists(path string) bool { if _, err := os.Stat(path); os.IsNotExist(err) { return false } return true }
package common type Queue interface { Enqueue(val interface{}) Dequeue() (interface{}, error) IsEmpty() bool HasNext() bool }
// Copyright 2018 NetApp, Inc. All Rights Reserved. package core import ( "reflect" "testing" "github.com/netapp/trident/config" "github.com/netapp/trident/storage" sc "github.com/netapp/trident/storage_class" ) func findVolumeInMap( t *testing.T, backendMap map[string]*mockBackend, name string, ) *storage.Volume { var ret *storage.Volume matches := 0 for _, backend := range backendMap { if volume, ok := backend.volumes[name]; ok { ret = volume matches++ } } if matches > 1 { // Note that there's nothing in the code that prevents multiple // volumes of the same name on different backends, so it's likely that // a test is doing something wrong in this case. t.Error("Found multiple volume matches; returning last encountered.") } return ret } func TestAddMockBackend(t *testing.T) { m := NewMockOrchestrator() m.addMockBackend("test-nfs", config.File) m.addMockBackend("test-iscsi", config.Block) if _, ok := m.mockBackends["test-nfs"]; !ok { t.Error("NFS backend not added to mock backends.") } if _, ok := m.backends["test-nfs"]; !ok { t.Error("NFS backend not added to real backends.") } if _, ok := m.mockBackends["test-iscsi"]; !ok { t.Error("NFS backend not added to mock backends.") } if _, ok := m.backends["test-iscsi"]; !ok { t.Error("ISCSI backend not added to real backends.") } } func addAndRetrieveVolume( t *testing.T, vc *storage.VolumeConfig, m *MockOrchestrator, ) { _, err := m.AddStorageClass(&sc.Config{Name: vc.StorageClass}) if err != nil { t.Fatalf("Unable to add storage class %s (%s): %v", vc.Name, vc.Protocol, err) } vol, err := m.AddVolume(vc) if err != nil { t.Fatalf("Unable to add volume %s (%s): %s", vc.Name, vc.Protocol, err) } if vol.Config != vc { t.Fatalf("Wrong config returned for volume %s (%s)", vc.Name, vc.Protocol) } found := findVolumeInMap(t, m.mockBackends, vc.Name) if found == nil { t.Errorf("Volume %s (%s) not found.", vc.Name, string(vc.Protocol)) } if !reflect.DeepEqual(found.ConstructExternal(), vol) { t.Error("Found incorrect volume in map.") } foundVolume, _ := m.GetVolume(vc.Name) if foundVolume == nil { t.Errorf("Failed to find volume %s (%s)", vc.Name, vc.Protocol) } else if !reflect.DeepEqual(foundVolume, vol) { // Note that both accessor methods return external copies, so we // can't rely on pointer equality to validate success. t.Errorf("Retrieved incorrect volume for %s (%s)", vc.Name, vc.Protocol) } } func TestAddVolume(t *testing.T) { m := NewMockOrchestrator() m.addMockBackend("test-nfs", config.File) m.addMockBackend("test-iscsi", config.Block) for _, v := range []*storage.VolumeConfig{ { Name: "test-nfs-vol", Size: "10MB", Protocol: config.File, StorageClass: "silver", }, { Name: "test-iscsi-vol", Size: "10MB", Protocol: config.Block, StorageClass: "silver", }, } { addAndRetrieveVolume(t, v, m) } }
// Copyright 2015 Bowery, Inc. package slack import ( "encoding/json" "net/http" "net/http/httptest" "testing" ) var ( testClient *Client testChannel = "#testing" testBadChannel = "#foobar" testText = "trying this out" testUsername = "drizzy drake" ) func init() { testClient = NewClient("some-token") } func TestSendMessageSuccessful(t *testing.T) { t.Parallel() server := httptest.NewServer(http.HandlerFunc(sendMessageHandlerOK)) defer server.Close() slackAddr = server.URL err := testClient.SendMessage(testChannel, testText, testUsername) if err != nil { t.Error(err) } } func sendMessageHandlerOK(rw http.ResponseWriter, req *http.Request) { res := &slackPostMessageRes{Ok: true} body, _ := json.Marshal(res) rw.Write(body) } func TestSendMessageMissingArgument(t *testing.T) { t.Parallel() err := testClient.SendMessage("", testText, testUsername) if err == nil { t.Error("should have failed, channel missing") } } func TestSendMessageBadResponse(t *testing.T) { t.Parallel() server := httptest.NewServer(http.HandlerFunc(sendMessageHandlerBad)) defer server.Close() slackAddr = server.URL err := testClient.SendMessage(testBadChannel, testText, testUsername) if err == nil { t.Error("should have failed, invalid channel") } if err.Error() != "channel_not_found" { t.Error("received unexpected error") } } func sendMessageHandlerBad(rw http.ResponseWriter, req *http.Request) { res := &slackPostMessageRes{Ok: false, Error: "channel_not_found"} body, _ := json.Marshal(res) rw.Write(body) }
package webhooks const ( //possible keys BlockApplied = Event("block.applied") BlockForged = Event("block.forged") BlockReverted = Event("block.reverted") DelegateRegistered = Event("delegate.registered") DelegateResigned = Event("delegate.resigned") ForgerFailed = Event("forger.failed") ForgerMissing = Event("forger.missing") ForgerStarted = Event("forger.started") PeerAdded = Event("peer.added") PeerRemoved = Event("peer.removed") RoundCreated = Event("round.created") TransactionApplied = Event("transaction.applied") TransactionExpired = Event("transaction.expired") TransactionForged = Event("transaction.forged") TransactionReverted = Event("transaction.reverted") WalletSaved = Event("wallet.saved") WalletCreatedCold = Event("wallet.created.cold") Between = Cond("between") Contains = Cond("contains") Equal = Cond("eq") Falsy = Cond("falsy") Greater = Cond("gt") GreaterEq = Cond("gte") Lesser = Cond("lt") LesserEq = Cond("lte") NotEqual = Cond("ne") NotBetween = Cond("not-between") Regexp = Cond("regexp") Truthy = Cond("truthy") ) // Events are the possible events as described in https://docs.ark.io/api/webhooks/#create-a-webhook type Event string // Cond are the possible conditions for a webhook to trigger as described in https://docs.ark.io/api/webhooks/#create-a-webhook type Cond string // Condition is the total condition for a webhook to trigger type Condition struct { Key string `json:"key"` Condition string `json:"condition"` Value string `json:"value"` } // NewCondition makes a new Condition func NewCondition(key string, condition Cond, value string) Condition { return Condition{Key: key, Condition: string(condition), Value: value} }
package main import "fmt" // O podemos agrupar los mismos tipos en una misma linea // type User struct { // ID int // Email, FirstName, LastName string // } // User representa un usuario type User struct { ID int Email string FirstName string LastName string } type Group struct { role string users []User newestUser User spaceAvailable bool } func main() { // Se usa un struct como un tipo, y se llena como un slice o un array user := User{ ID: 1, Email: "hola@luispa.im", FirstName: "Luispa", LastName: "Garcia", } user2 := User{ ID: 2, Email: "andrius@gmail.com", FirstName: "Andrea", LastName: "Martinez", } // tambien se pueden acceder a las propiedades como props de js fmt.Println(user.ID) fmt.Println(user.Email) fmt.Println(user.FirstName) fmt.Println(user.LastName) fmt.Println(user) group := Group{ role: "admin", users: []User{user}, newestUser: user2, spaceAvailable: true, } fmt.Println(describeUser(user)) fmt.Println(describeGroup(&group)) fmt.Println(group.spaceAvailable) } func describeUser(u User) string { description := fmt.Sprintf("Name: %s %s, Email: %s", u.FirstName, u.LastName, u.Email) return description } func describeGroup(g *Group) string { if len(g.users) <= 2 { g.spaceAvailable = false } description := fmt.Sprintf("The user group has %d users. The newest user is %s %s. Accepting new users: %t", len(g.users), g.newestUser.FirstName, g.newestUser.LastName, g.spaceAvailable) return description }
package main import ( "fmt" ) func main() { i := 0 isLessThanFive := true for isLessThanFive { if i >= 5 { isLessThanFive = true } fmt.Println(i) i++ } // you can also do the following below // for { // if i >= 5 { // break // } // fmt.Println(i) // i++ // } }
package leetcode func duplicateZeros(arr []int) { var shifted []int for idx, n := range arr { if n == 0 { shifted = append(shifted, 0) shifted = append(shifted, 0) } else { shifted = append(shifted, arr[idx]) } if len(shifted) == len(arr) { break } } copy(arr, shifted) }
package router import ( "github.com/gogf/gf/frame/g" "github.com/gogf/gf/net/ghttp" "onvif-gf-demos/app/api" ) // 你可以将路由注册放到一个文件中管理, // 也可以按照模块拆分到不同的文件中管理, // 但统一都放到router目录下。 func init() { s := g.Server() // 分组路由注册方式 s.Group("/", func(group *ghttp.RouterGroup) { group.Group("/", func(group *ghttp.RouterGroup) { group.GET("/api/discovery", api.ONVIF.Discovery) group.POST("/api/:service/:method", api.ONVIF.PostMethod) }) }) }
/** The Caring Hamster - service wor working with SMS messages Author: kolabse Runing: hamster <typeEnv> Arguments: typeEnv - type of runtime enviroment. Posssible values - dev, prod ./hamster dev */ package main import ( "encoding/json" "io" "log" "net/http" "os" "strconv" "strings" "time" "github.com/google/uuid" "golang.org/x/time/rate" "github.com/fatih/color" "github.com/fiorix/go-smpp/smpp" "github.com/fiorix/go-smpp/smpp/pdu" "github.com/fiorix/go-smpp/smpp/pdu/pdufield" "github.com/fiorix/go-smpp/smpp/pdu/pdutext" "github.com/jinzhu/gorm" _ "github.com/jinzhu/gorm/dialects/postgres" ) var sResult []byte var startTime time.Time var connectionStatus string = "starting..." // Status of SMS const ( StatusNew = 0 StatusSendWork = 1 StatusSendSuccess = 2 StatusSendWait = 3 StatusSendError = 4 StatusSendTimeout = 5 StatusDelivered = 6 ) // Source of SMS const ( Sender = "<sender_name>" ) // Status of SMS part const ( Delivered = "DELIVRD" Expired = "EXPIRED" Deleted = "DELETED" Undeliverable = "UNDELIV" Accepted = "ACCEPTD" Unknown = "UNKNOWN" Rejected = "REJECTD" ) // Text of final const ( RSFINAL = "<r:FINAL>" RSPROGRESS = "<r:PROGRESS>" ) // Code for status of SMS part const ( DeliveredCode = 1 ExpiredCode = 2 DeletedCode = 3 UndeliverableCode = 4 AcceptedCode = 5 UnknownCode = 6 RejectedCode = 7 ) const typeEnvDev = "dev" const typeEnvProd = "prod" const noArgsMsg = "Required argument TypeEnv is missing. Posssible values - dev, prod" //IncomingMessage struct type IncomingMessage struct { Mobile string Text string Source string } //SuccessResponse struct type SuccessResponse struct { SmsID string Message string SendTime string } //StatusResponse struct type StatusResponse struct { Status string Uptime string } //ErrorResponse struct type ErrorResponse struct { Error string } // SmsParts db record type SmsParts struct { ID int //`db:"id"` ClaimsSmsID string //`db:"claims_sms_id"` MessageID string //`db:"message_id"` Status int //`db:"status"` SubmitDate time.Time //`db:"submit_date"` DoneDate time.Time //`db:"done_date"` Err string //`db:"err"` Text string //`db:"text"` CreatedAt time.Time //`db:"created_at"` UpdatedAt time.Time //`db:"updated_at"` } // DeliveryMessage object type DeliveryMessage struct { ID string `json:"id"` Sub string `json:"sub"` Dlvrd string `json:"dlvrd"` SubmitDate string `json:"submit_date"` DoneDate string `json:"done_date"` Stat string `json:"stat"` Err string `json:"err"` Text string `json:"text"` } func init() { startTime = time.Now() } func main() { smppAddr := "" smppPass := "" typeEnv := "" pgServer := "" pgPort := "" pgUser := "" pgUserPass := "" apiPort := "" var intervalBeforeReconnect time.Duration = 0 printWelcome() if len(os.Args) > 1 { typeEnv = os.Args[1] } else { red := color.New(color.FgRed) red.Println(noArgsMsg) panic("Please set required parameters") } if typeEnv == typeEnvProd { smppAddr = "<prod_smpp_server_name>" smppPass = "<prod_smpp_pass>" pgServer = "<prod_db_server>" pgPort = "<prod_db_port>" pgUser = "<prod_db_user>" pgUserPass = "<prod_db_user_pass>" apiPort = "<prod_api_port>" intervalBeforeReconnect = 30 } else { log.Printf("DEV MODE") smppAddr = "<dev_smpp_server_name>" smppPass = "<dev_smpp_pass>" pgServer = "<dev_db_server>" pgPort = "<dev_db_port>" pgUser = "<dev_db_user>" pgUserPass = "<dev_db_user_pass>" apiPort = "<dev_api_port>" intervalBeforeReconnect = 2 } time.Sleep(intervalBeforeReconnect * time.Second) dbURL := "postgres://" + pgUser + ":" + pgUserPass + "@" + pgServer + ":" + pgPort + "/<schema_name>?sslmode=disable" db, err := gorm.Open("postgres", dbURL) if err != nil { panic("Failed to connect database" + err.Error()) } defer db.Close() db.SingularTable(true) var smsPartFound []SmsParts var smsPartMessageID string // func for work with incoming message f := func(p pdu.Body) { switch p.Header().ID { case pdu.DeliverSMID: // when we get message with delivery status f := p.Fields() rs := convertMessageToArray(f["short_message"].String()) smsPartMessageID = rs.ID log.Printf("Try to find part with mID:%s", smsPartMessageID) db.Where(&SmsParts{MessageID: smsPartMessageID}).Where("message_id = ?", smsPartMessageID).Limit(1).Find(&smsPartFound) for _, smsPart := range smsPartFound { log.Printf("Part with id:%s", smsPart.ClaimsSmsID) smsSubmitDate := convertSMPPTimestampToPpostgrsqlDatetime(rs.SubmitDate) smsDoneDate := convertSMPPTimestampToPpostgrsqlDatetime(rs.DoneDate) smsErr := rs.Err smsText := rs.Text smsStatus := getPartStatus(rs.Stat) db.Model(&smsPart).Updates(map[string]interface{}{ "submit_date": smsSubmitDate, "done_date": smsDoneDate, "err": smsErr, "text": smsText, "status": smsStatus, }) } } } lm := rate.NewLimiter(rate.Limit(10), 1) // Max rate of 10/s. tx := &smpp.Transceiver{ Addr: smppAddr + ":11111", User: "website", Passwd: smppPass, Handler: f, // Handle incoming SM or delivery receipts. RateLimiter: lm, // Optional rate limiter. } // Create persistent connection. conn := tx.Bind() go func() { for c := range conn { log.Println("SMPP connection status:", c.Status()) connectionStatus = c.Status().String() } }() http.HandleFunc("/messageSend", func(w http.ResponseWriter, r *http.Request) { //get fields from form decoder := json.NewDecoder(r.Body) var message IncomingMessage err := decoder.Decode(&message) if err != nil { w.WriteHeader(http.StatusBadRequest) w.Write([]byte((err.Error()))) } smsMobile := message.Mobile smsText := message.Text smsSource := message.Source smsID := getUUID() log.Printf("Sending sms from %s, with text \"%s\", to number %s", smsSource, smsText, smsMobile) if isNumeric(smsMobile) { if len(smsMobile) == 11 { if smsText != "" { if smsSource != "" { parts, err := tx.SubmitLongMsg(&smpp.ShortMessage{ Src: smsSource, Dst: smsMobile, Text: pdutext.UCS2(smsText), Validity: 10 * time.Minute, Register: pdufield.FinalDeliveryReceipt, }) if err == smpp.ErrNotConnected { time.Sleep(60000 * time.Millisecond) conn = tx.Bind() } if err != nil { log.Printf("Unable to connect, with error: %s", err.Error()) time.Sleep(60000 * time.Millisecond) conn = tx.Bind() } for index, sm := range parts { msgid := sm.RespID() if msgid == "" { log.Fatalf("pdu does not contain msgid: %#v", sm.Resp()) } else { log.Printf("Sended message index %d, msgid: %q, for sms with id:%s", index, msgid, smsID) ct := time.Now() currentTime := formatToSQLTimestamp(ct) log.Printf(currentTime) var smsPart = SmsParts{MessageID: msgid, ClaimsSmsID: smsID, CreatedAt: ct} db.Create(&smsPart) } } sResult, _ = json.Marshal(&SuccessResponse{SmsID: smsID, Message: "Message was send", SendTime: formatToSQLTimestamp(time.Now())}) w.Header().Set("Content-Type", "application/json") io.WriteString(w, string(sResult[:])) } else { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "application/json") w.Write([]byte("Message field \"source\" is empty")) } } else { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "application/json") w.Write([]byte("Message field \"text\" is empty")) } } else { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "application/json") w.Write([]byte("Message field \"mobile\" is wrong length")) } } else { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "application/json") w.Write([]byte("Message field \"mobile\" is empty or not number")) } }) http.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) { serviceStatus, _ := json.Marshal(&StatusResponse{Status: connectionStatus, Uptime: shortDur(uptime())}) w.Header().Set("Content-Type", "application/json") io.WriteString(w, string(serviceStatus[:])) }) log.Fatal(http.ListenAndServe(":"+apiPort, nil)) } func isNumeric(s string) bool { _, err := strconv.ParseFloat(s, 64) return err == nil } // parse incoming message textt with delivery info func convertMessageToArray(rs string) DeliveryMessage { rs = "[" + rs + "]" rs = strings.Replace(rs, "[id:", "{\"id\": \"", -1) rs = strings.Replace(rs, " sub:", "\",\"sub\": \"", -1) rs = strings.Replace(rs, " dlvrd:", "\",\"dlvrd\": \"", -1) rs = strings.Replace(rs, " submit date:", "\",\"submit_date\": \"", -1) rs = strings.Replace(rs, " done date:", "\",\"done_date\": \"", -1) rs = strings.Replace(rs, " stat:", "\",\"stat\": \"", -1) rs = strings.Replace(rs, " err:", "\",\"err\": \"", -1) rs = strings.Replace(rs, " text:", "\",\"text\": \"", -1) rs = strings.Replace(rs, "]", "\"}", -1) log.Println(rs) var message DeliveryMessage err := json.Unmarshal([]byte(rs), &message) if err != nil { log.Fatal(err) } return message } func convertSMPPTimestampToPpostgrsqlDatetime(ts string) time.Time { currentTime, err := time.Parse("060102150405", ts) failOnError(err, "Time parse faild") return currentTime } func getPartStatus(stat string) int { result := 0 switch stat { case Delivered: result = DeliveredCode case Expired: result = ExpiredCode case Deleted: result = DeletedCode case Undeliverable: result = UndeliverableCode case Accepted: result = AcceptedCode case Unknown: result = UnknownCode case Rejected: result = RejectedCode } return result } func formatToSQLTimestamp(time time.Time) string { return time.Format("2006-01-02 15:04:05") } func getUUID() string { newUUID, _ := uuid.NewUUID() return newUUID.String() } func printWelcome() { log.Printf(" _ _") log.Printf(" (`-`;-\"```\"-;`-`)") log.Printf(" \\.' './") log.Printf(" / \\") log.Printf(" ; 0 0 ;") log.Printf(" /| = = |\\") log.Printf(" ; \\ '._Y_.' / ;") log.Printf(" ; `-._ \\|/ _.-' ;") log.Printf(" ; `\"\"\"` ;") log.Printf(" ; `\"\"-. .-\"\"` ;") log.Printf(" /; '--._ \\ / _.-- ;\\") log.Printf(" : `. `/|| ||\\` .' :") log.Printf(" '. '-._ _.-' .' jgs") log.Printf(" (((-'` `\"\"\"\"\"` `'-)))") log.Printf("____________________________") log.Printf(" ") log.Printf("Hello! Mr. Caring Hamster with you!") log.Printf("Wait for smsgate connection...") } func failOnError(err error, msg string) { if err != nil { log.Fatalf("%s: %s", msg, err) } } func uptime() time.Duration { return time.Since(startTime) } func shortDur(d time.Duration) string { d = d.Round(time.Second) s := d.String() if strings.HasSuffix(s, "m0s") { s = s[:len(s)-2] } if strings.HasSuffix(s, "h0m") { s = s[:len(s)-2] } return s }
package presto type result struct { lastID int64 affected int64 } func (r *result) LastInsertId() (int64, error) { return r.lastID, nil } func (r *result) RowsAffected() (int64, error) { return r.affected, nil }
package reload import ( "os" "syscall" ) type options struct { logger Logger sigHandle sigHandle } // Option 参数 type Option func(*options) var defaultOptions = &options{ logger: &defaultLogger{}, sigHandle: make(sigHandle), } // evaluateOptions 参数处理 func evaluateOptions(opts []Option) *options { optCopy := &options{} *optCopy = *defaultOptions for _, o := range opts { o(optCopy) } return optCopy } // WithLogger 日志 func WithLogger(l Logger) Option { return func(o *options) { o.logger = l } } // WithHandleFunc 信号处理 func WithHandleFunc(s os.Signal, h HandleFunc) Option { return func(o *options) { o.sigHandle[s] = h } } // WithDefaultHandle 默认信号处理 func WithDefaultHandle() Option { return func(o *options) { o.sigHandle[syscall.SIGUSR1] = func(s Service) { if err := s.Reload(); err != nil { s.Logger().Error(err) } } // 设置会退出的信号量 o.sigHandle[syscall.SIGINT] = func(s Service) { s.Shutdown() } o.sigHandle[syscall.SIGTERM] = func(s Service) { s.Shutdown() } o.sigHandle[syscall.SIGTSTP] = func(s Service) { s.Shutdown() } } }
// Copyright 2019 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package coredatamodel_test import ( "fmt" "testing" "time" "github.com/gogo/protobuf/proto" "github.com/onsi/gomega" networking "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/config/coredatamodel" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/labels" "istio.io/istio/pkg/config/protocol" "istio.io/istio/pkg/config/schemas" ) var ( d *coredatamodel.MCPDiscovery controller coredatamodel.CoreDataModel fx *FakeXdsUpdater namespace = "random-namespace" name = "test-synthetic-se" svcPort = []*model.Port{ { Name: "http-port", Port: 80, Protocol: protocol.Instance("http"), }, { Name: "http-alt-port", Port: 8080, Protocol: protocol.Instance("http"), }, } fakeCreateTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") ) // Since service instance is representation of a service with a corresponding backend port // the following ServiceEntry plus a notReadyEndpoint 4.4.4.4:5555 should yield into 4 // service instances once flattened. That is one endpoints IP + endpoint Port, per service Port, per service host. // given proxy address // e.g: // &networking.ServiceEntry{ // Hosts: []string{"svc.example2.com"}, // Ports: []*networking.Port{ // {Number: 80, Name: "http-port", Protocol: "http"}, // {Number: 8080, Name: "http-alt-port", Protocol: "http"}, // }, // Endpoints: []*networking.ServiceEntry_Endpoint{ // { // Address: "2.2.2.2", // Ports: map[string]uint32{"http-port": 7080, "http-alt-port": 18080}, // }, // { // Address: "3.3.3.3", // Ports: map[string]uint32{"http-port": 1080}, // }, // { // Address: "4.4.4.4", // Ports: map[string]uint32{"http-port": 1080}, // Labels: map[string]string{"foo": "bar"}, // }, // }, // } // AND // notReadyEndpoint 4.4.4.4:5555 // should result in the following service instances: // // NetworkEndpoint(endpoint{Address:4.4.4.4, Port:5555, servicePort: &{http-port 80 http}} service:{Hostname: svc.example2.com}) // NetworkEndpoint(endpoint{Address:4.4.4.4, Port:5555, servicePort: &{http-alt-port 8080 http}} service:{Hostname: svc.example2.com}) // NetworkEndpoint(endpoint{Address:4.4.4.4, Port:1080, servicePort: &{http-port 80 http}} service:{Hostname: svc.example2.com}) // NetworkEndpoint(endpoint{Address:4.4.4.4, Port:8080, servicePort: &{http-alt-port 8080 http}} service:{Hostname: svc.example2.com}) func TestGetProxyServiceInstances(t *testing.T) { g := gomega.NewGomegaWithT(t) // add first config testSetup(g) steps := map[string]struct { namespace string address string numSvcInstances int ports []int servicePort []*model.Port hostname []host.Name }{ "single namespace": { namespace: namespace, address: "4.4.4.4", numSvcInstances: 4, ports: []int{1080, 5555, 8080}, servicePort: svcPort, hostname: []host.Name{host.Name("svc.example2.com")}, }, } for description, step := range steps { t.Run(fmt.Sprintf("verify service instances from %s", description), func(_ *testing.T) { proxy := buildProxy(step.address, step.namespace) svcInstances, err := d.GetProxyServiceInstances(proxy) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(step.numSvcInstances)) for _, svcInstance := range svcInstances { g.Expect(step.address).To(gomega.Equal(svcInstance.Endpoint.Address)) g.Expect(step.hostname).To(gomega.ContainElement(svcInstance.Service.Hostname)) g.Expect(step.ports).To(gomega.ContainElement(svcInstance.Endpoint.Port)) } }) } } func TestInstancesByPort(t *testing.T) { g := gomega.NewGomegaWithT(t) testSetup(g) svc := &model.Service{ Attributes: model.ServiceAttributes{ Namespace: "random-namespace", }, Hostname: host.Name("svc.example2.com"), } svcInstances, err := d.InstancesByPort(svc, 80, labels.Collection{}) g.Expect(len(svcInstances)).To(gomega.Equal(6)) g.Expect(err).ToNot(gomega.HaveOccurred()) steps := map[string]struct { address string ports []int servicePort *model.Port hostname host.Name }{ "2.2.2.2": { address: "2.2.2.2", ports: []int{7080, 18080}, servicePort: svcPort[0], hostname: host.Name("svc.example2.com"), }, "3.3.3.3": { address: "3.3.3.3", ports: []int{1080}, servicePort: svcPort[0], hostname: host.Name("svc.example2.com"), }, "4.4.4.4": { address: "4.4.4.4", ports: []int{1080, 5555}, servicePort: svcPort[0], hostname: host.Name("svc.example2.com"), }, "6.6.6.6": { address: "6.6.6.6", ports: []int{7777}, servicePort: svcPort[0], hostname: host.Name("svc.example2.com"), }, "1.1.1.1": { address: "1.1.1.1", ports: []int{2222}, servicePort: svcPort[0], hostname: host.Name("svc.example2.com"), }, } for _, svcInstance := range svcInstances { step := steps[svcInstance.Endpoint.Address] t.Run(fmt.Sprintf("verify service instances %s", step.address), func(_ *testing.T) { g.Expect(step.address).To(gomega.Equal(svcInstance.Endpoint.Address)) g.Expect(step.hostname).To(gomega.Equal(svcInstance.Service.Hostname)) g.Expect(step.ports).To(gomega.ContainElement(svcInstance.Endpoint.Port)) g.Expect(step.servicePort.Name).To(gomega.Equal(svcInstance.Endpoint.ServicePort.Name)) g.Expect(step.servicePort.Port).To(gomega.Equal(svcInstance.Endpoint.ServicePort.Port)) g.Expect(protocol.Parse(string(step.servicePort.Protocol))).To(gomega.Equal(protocol.Parse(string(svcInstance.Endpoint.ServicePort.Protocol)))) }) } } func TestGetProxyServiceInstancesReadsFromCache(t *testing.T) { g := gomega.NewGomegaWithT(t) testSetup(g) proxy := buildProxy("4.4.4.4", namespace) svcInstances, err := d.GetProxyServiceInstances(proxy) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(4)) // drain the cache conf := model.Config{ ConfigMeta: model.ConfigMeta{ Type: schemas.ServiceEntry.Type, Group: schemas.ServiceEntry.Group, Version: schemas.ServiceEntry.Version, Name: name, Namespace: namespace, CreationTimestamp: fakeCreateTime, }, Spec: syntheticServiceEntry0, } d.HandleCacheEvents(conf, model.EventDelete) svcInstances, err = d.GetProxyServiceInstances(proxy) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(0)) } func TestHandleCacheEvents(t *testing.T) { g := gomega.NewGomegaWithT(t) initDiscovery() // add the first config conf1Ns := "default" conf := buildConfig(syntheticServiceEntry0, name, conf1Ns) d.HandleCacheEvents(conf, model.EventAdd) svcInstances, err := d.GetProxyServiceInstances(buildProxy("4.4.4.4", conf1Ns)) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(2)) for _, s := range svcInstances { g.Expect(s.Labels).To(gomega.Equal(labels.Instance{"foo": "bar"})) } // update the first config updatedSyntheticServiceEntry0 := &networking.ServiceEntry{ Hosts: []string{"svc.example2.com"}, Ports: []*networking.Port{ {Number: 80, Name: "http-port", Protocol: "http"}, {Number: 8080, Name: "http-alt-port", Protocol: "http"}, }, Location: networking.ServiceEntry_MESH_EXTERNAL, Resolution: networking.ServiceEntry_DNS, Endpoints: []*networking.ServiceEntry_Endpoint{ { Address: "3.3.3.3", Ports: map[string]uint32{"http-port": 1080}, }, { Address: "4.4.4.4", Ports: map[string]uint32{"http-port": 1080}, Labels: map[string]string{"foo": "bar2"}, }, }, } conf.Spec = updatedSyntheticServiceEntry0 d.HandleCacheEvents(conf, model.EventUpdate) svcInstances, err = d.GetProxyServiceInstances(buildProxy("4.4.4.4", conf1Ns)) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(2)) for _, s := range svcInstances { g.Expect(s.Labels).To(gomega.Equal(labels.Instance{"foo": "bar2"})) } // add another config syntheticServiceEntry1.Endpoints = []*networking.ServiceEntry_Endpoint{ { Address: "3.3.3.3", Ports: map[string]uint32{"http-port": 1080}, }, { Address: "5.5.5.5", Ports: map[string]uint32{"http-port": 1081}, Labels: map[string]string{"foo1": "bar1"}, }, } conf2Ns := "test-namespace" conf2 := buildConfig(syntheticServiceEntry1, "test-name", conf2Ns) d.HandleCacheEvents(conf2, model.EventAdd) svcInstances, err = d.GetProxyServiceInstances(buildProxy("5.5.5.5", conf2Ns)) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(2)) for _, s := range svcInstances { g.Expect(s.Labels).To(gomega.Equal(labels.Instance{"foo1": "bar1"})) } // add another config in the same namespace as the second one conf3Ns := "test-namespace" conf3 := buildConfig(syntheticServiceEntry2, "test-name", conf3Ns) d.HandleCacheEvents(conf3, model.EventAdd) proxy := buildProxy("2.2.2.2", conf3Ns) svcInstances, err = d.GetProxyServiceInstances(proxy) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(2)) for _, s := range svcInstances { g.Expect(s.Labels).To(gomega.Equal(labels.Instance{"foo3": "bar3"})) } // delete the first config d.HandleCacheEvents(conf, model.EventDelete) svcInstances, err = d.GetProxyServiceInstances(buildProxy("4.4.4.4", conf1Ns)) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(0)) // delete the second config d.HandleCacheEvents(conf2, model.EventDelete) proxy = buildProxy("5.5.5.5", conf2Ns) svcInstances, err = d.GetProxyServiceInstances(proxy) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(0)) // check to see if other config in the same namespace // as second config is not deleted svcInstances, err = d.GetProxyServiceInstances(buildProxy("2.2.2.2", conf3Ns)) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(2)) for _, s := range svcInstances { g.Expect(s.Labels).To(gomega.Equal(labels.Instance{"foo3": "bar3"})) } // delete the last config d.HandleCacheEvents(conf3, model.EventDelete) svcInstances, err = d.GetProxyServiceInstances(buildProxy("2.2.2.2", conf3Ns)) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(0)) } func TestInstancesByPortReadsFromCache(t *testing.T) { g := gomega.NewGomegaWithT(t) initDiscovery() // add the first config hostname := host.Name("svc.example2.com") conf1Ns := "default" conf := buildConfig(syntheticServiceEntry0, name, conf1Ns) d.HandleCacheEvents(conf, model.EventAdd) svc := &model.Service{ Attributes: model.ServiceAttributes{ Namespace: conf1Ns, }, Hostname: hostname, } svcInstances, err := d.InstancesByPort(svc, 80, labels.Collection{}) g.Expect(len(svcInstances)).To(gomega.Equal(3)) g.Expect(err).ToNot(gomega.HaveOccurred()) for _, s := range svcInstances { g.Expect(s.Service.Hostname).To(gomega.Equal(hostname)) g.Expect(s.Endpoint.ServicePort.Name).To(gomega.Equal("http-port")) g.Expect(s.Endpoint.ServicePort.Port).To(gomega.Equal(80)) g.Expect(s.Endpoint.ServicePort.Protocol).To(gomega.Equal(protocol.Instance("HTTP"))) } // update the first config updatedSyntheticServiceEntry0 := &networking.ServiceEntry{ Hosts: []string{"svc.example2.com"}, Ports: []*networking.Port{ {Number: 80, Name: "http-port", Protocol: "http"}, {Number: 8080, Name: "http-alt-port", Protocol: "http"}, }, Location: networking.ServiceEntry_MESH_EXTERNAL, Resolution: networking.ServiceEntry_DNS, Endpoints: []*networking.ServiceEntry_Endpoint{ { Address: "3.3.3.3", Ports: map[string]uint32{"http-port": 1080}, }, { Address: "4.4.4.4", Ports: map[string]uint32{"http-port": 1080}, Labels: map[string]string{"foo": "bar2"}, }, }, } conf.Spec = updatedSyntheticServiceEntry0 d.HandleCacheEvents(conf, model.EventUpdate) svcInstances, err = d.InstancesByPort(svc, 80, labels.Collection{}) g.Expect(len(svcInstances)).To(gomega.Equal(2)) g.Expect(err).ToNot(gomega.HaveOccurred()) for _, s := range svcInstances { g.Expect(s.Service.Hostname).To(gomega.Equal(hostname)) g.Expect(s.Endpoint.ServicePort.Name).To(gomega.Equal("http-port")) g.Expect(s.Endpoint.ServicePort.Port).To(gomega.Equal(80)) g.Expect(s.Endpoint.ServicePort.Protocol).To(gomega.Equal(protocol.Instance("HTTP"))) if s.Endpoint.Address == "4.4.4.4" { g.Expect(s.Labels).To(gomega.Equal(labels.Instance{"foo": "bar2"})) } } // add another config syntheticServiceEntry1.Endpoints = []*networking.ServiceEntry_Endpoint{ { Address: "3.3.3.3", Ports: map[string]uint32{"http-port": 1080}, }, { Address: "5.5.5.5", Ports: map[string]uint32{"http-port": 1081}, Labels: map[string]string{"foo1": "bar1"}, }, } hostname2 := host.Name("example2.com") conf2Ns := "test-namespace" conf2 := buildConfig(syntheticServiceEntry1, "test-name", conf2Ns) d.HandleCacheEvents(conf2, model.EventAdd) svc2 := &model.Service{ Attributes: model.ServiceAttributes{ Namespace: conf2Ns, }, Hostname: hostname2, } svcInstances, err = d.InstancesByPort(svc2, 80, labels.Collection{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(2)) for _, s := range svcInstances { g.Expect(s.Service.Hostname).To(gomega.Equal(hostname2)) g.Expect(s.Endpoint.ServicePort.Name).To(gomega.Equal("http-port")) g.Expect(s.Endpoint.ServicePort.Port).To(gomega.Equal(80)) g.Expect(s.Endpoint.ServicePort.Protocol).To(gomega.Equal(protocol.Instance("HTTP"))) if s.Endpoint.Address == "5.5.5.5" { g.Expect(s.Labels).To(gomega.Equal(labels.Instance{"foo1": "bar1"})) } } // add another config in the same namespace as the second one conf3Ns := "test-namespace" hostname3 := host.Name("example3.com") conf3 := buildConfig(syntheticServiceEntry2, "test-name2", conf3Ns) d.HandleCacheEvents(conf3, model.EventAdd) svc3 := &model.Service{ Attributes: model.ServiceAttributes{ Namespace: conf3Ns, }, Hostname: hostname3, } svcInstances, err = d.InstancesByPort(svc3, 80, labels.Collection{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(1)) for _, s := range svcInstances { g.Expect(s.Service.Hostname).To(gomega.Equal(hostname3)) g.Expect(s.Endpoint.ServicePort.Name).To(gomega.Equal("http-port2")) g.Expect(s.Endpoint.ServicePort.Port).To(gomega.Equal(80)) g.Expect(s.Endpoint.ServicePort.Protocol).To(gomega.Equal(protocol.Instance("HTTP"))) g.Expect(s.Labels).To(gomega.Equal(labels.Instance{"foo3": "bar3"})) } // delete the first config d.HandleCacheEvents(conf, model.EventDelete) svcInstances, err = d.InstancesByPort(svc, 80, labels.Collection{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(0)) // delete the second config d.HandleCacheEvents(conf2, model.EventDelete) svcInstances, err = d.InstancesByPort(svc2, 80, labels.Collection{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(0)) // check to see if other config in the same namespace // as second config is not deleted svcInstances, err = d.InstancesByPort(svc3, 80, labels.Collection{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(1)) for _, s := range svcInstances { g.Expect(s.Service.Hostname).To(gomega.Equal(hostname3)) g.Expect(s.Endpoint.ServicePort.Name).To(gomega.Equal("http-port2")) g.Expect(s.Endpoint.ServicePort.Port).To(gomega.Equal(80)) g.Expect(s.Endpoint.ServicePort.Protocol).To(gomega.Equal(protocol.Instance("HTTP"))) g.Expect(s.Labels).To(gomega.Equal(labels.Instance{"foo3": "bar3"})) } // // delete the last config d.HandleCacheEvents(conf3, model.EventDelete) svcInstances, err = d.GetProxyServiceInstances(buildProxy("2.2.2.2", conf3Ns)) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(svcInstances)).To(gomega.Equal(0)) } func TestServicesReadsFromCache(t *testing.T) { g := gomega.NewGomegaWithT(t) initDiscovery() message := convertToResource(g, schemas.SyntheticServiceEntry.MessageName, syntheticServiceEntry0) message1 := convertToResource(g, schemas.SyntheticServiceEntry.MessageName, syntheticServiceEntry1) message2 := convertToResource(g, schemas.SyntheticServiceEntry.MessageName, syntheticServiceEntry2) change := convertToChange([]proto.Message{message, message1, message2}, []string{ fmt.Sprintf("%s/%s", "ns1", name), fmt.Sprintf("%s/%s", "ns2", name), fmt.Sprintf("%s/%s", "ns3", name), }, setCollection(schemas.SyntheticServiceEntry.Collection), setTypeURL(schemas.SyntheticServiceEntry.MessageName)) err := controller.Apply(change) g.Expect(err).ToNot(gomega.HaveOccurred()) services, err := d.Services() g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(len(services)).To(gomega.Equal(3)) for _, s := range services { switch s.Attributes.Namespace { case "ns1": g.Expect(string(s.Hostname)).To(gomega.Equal(syntheticServiceEntry0.Hosts[0])) for i, p := range syntheticServiceEntry0.Ports { g.Expect(s.Ports[i].Name).To(gomega.Equal(p.Name)) g.Expect(s.Ports[i].Port).To(gomega.Equal(int(p.Number))) g.Expect(s.Ports[i].Protocol).To(gomega.Equal(protocol.Parse(p.Protocol))) } case "ns2": g.Expect(string(s.Hostname)).To(gomega.Equal(syntheticServiceEntry1.Hosts[0])) for i, p := range syntheticServiceEntry1.Ports { g.Expect(s.Ports[i].Name).To(gomega.Equal(p.Name)) g.Expect(s.Ports[i].Port).To(gomega.Equal(int(p.Number))) g.Expect(s.Ports[i].Protocol).To(gomega.Equal(protocol.Parse(p.Protocol))) } case "ns3": g.Expect(string(s.Hostname)).To(gomega.Equal(syntheticServiceEntry2.Hosts[0])) for i, p := range syntheticServiceEntry2.Ports { g.Expect(s.Ports[i].Name).To(gomega.Equal(p.Name)) g.Expect(s.Ports[i].Port).To(gomega.Equal(int(p.Number))) g.Expect(s.Ports[i].Protocol).To(gomega.Equal(protocol.Parse(p.Protocol))) } } } } func buildProxy(proxyIP, ns string) *model.Proxy { return &model.Proxy{ IPAddresses: []string{proxyIP}, ConfigNamespace: ns, } } // nolint: interfacer func buildConfig(se *networking.ServiceEntry, name, ns string) model.Config { return model.Config{ ConfigMeta: model.ConfigMeta{ Type: schemas.ServiceEntry.Type, Group: schemas.ServiceEntry.Group, Version: schemas.ServiceEntry.Version, Name: name, Namespace: ns, Domain: "example2.com", ResourceVersion: "1", CreationTimestamp: fakeCreateTime, Labels: map[string]string{"lk1": "lv1"}, Annotations: map[string]string{"ak1": "av1"}, }, Spec: se, } } func initDiscovery() { fx = NewFakeXDS() fx.EDSErr <- nil testControllerOptions.XDSUpdater = fx controller = coredatamodel.NewSyntheticServiceEntryController(testControllerOptions) options := &coredatamodel.DiscoveryOptions{ ClusterID: "test", DomainSuffix: "cluster.local", } d = coredatamodel.NewMCPDiscovery(controller, options) } func testSetup(g *gomega.GomegaWithT) { initDiscovery() message := convertToResource(g, schemas.SyntheticServiceEntry.MessageName, syntheticServiceEntry0) change := convertToChange([]proto.Message{message}, []string{fmt.Sprintf("%s/%s", namespace, name)}, setAnnotations(map[string]string{ "networking.alpha.istio.io/notReadyEndpoints": "1.1.1.1:2222,4.4.4.4:5555,6.6.6.6:7777", }), setCollection(schemas.SyntheticServiceEntry.Collection), setTypeURL(schemas.SyntheticServiceEntry.MessageName)) err := controller.Apply(change) g.Expect(err).ToNot(gomega.HaveOccurred()) entries, err := controller.List(schemas.SyntheticServiceEntry.Type, "") g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(entries).To(gomega.HaveLen(1)) g.Expect(entries[0].Name).To(gomega.Equal("test-synthetic-se")) update := <-fx.Events g.Expect(update).To(gomega.Equal("ConfigUpdate")) }
package shared import ( "flag" "github.com/hashicorp/memberlist" cmap "github.com/streamrail/concurrent-map" ) var ( Dir = flag.String("dir", "/etc/puller", "The dir to load service configs from") D = flag.Bool("d", false, "Run as a daemon") Join = flag.String("join", "", "Join a cluster") Port = flag.Int("port", 7946, "Port is a port used for internal communication. Port + 1 is the port number of the http server") Node = flag.String("node", "", "Name of the node. If left empty, defaults to os value") Interval = flag.Int64("interval", 30, "Time to sleep between runs of processing") PullEvery = flag.Int64("pull-every", 1, "Pull on every Xth processing runs. Specify more than 1 if you are using Puller in a push based way and only use periodic pulls as a fallback") ApiKey = flag.String("apikey", "", "If provided all api endpoints will requires this value sent as an 'authorization' header in the http request") ) var ( Services = cmap.New() // map[string]types.Service ChangedServices = cmap.New() // map[string]bool - service definition has changed. OutdatedServices = cmap.New() // map[string]bool - service was launched with an image that's older than the current one locally BadServiceFiles = cmap.New() // map[string]bool - bad service files ) var ( List *memberlist.Memberlist )
// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package ui import ( "context" "net/http" "net/http/httptest" "time" "chromiumos/tast/errors" uiperf "chromiumos/tast/local/bundles/cros/ui/perf" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/ash" "chromiumos/tast/local/perfutil" "chromiumos/tast/local/power" "chromiumos/tast/testing" "chromiumos/tast/testing/hwdep" ) func init() { testing.AddTest(&testing.Test{ Func: TabLoadingAnimationPerf, LacrosStatus: testing.LacrosVariantUnknown, Desc: "Measures the animation smoothness of tab loading animation", Contacts: []string{"yichenz@chromium.org", "chromeos-wmp@google.com"}, Attr: []string{"group:crosbolt", "crosbolt_perbuild"}, SoftwareDeps: []string{"chrome"}, HardwareDeps: hwdep.D(hwdep.InternalDisplay()), Fixture: "chromeLoggedIn", Timeout: 4 * time.Minute, Data: []string{ "tab_loading_test.html", }, }) } func TabLoadingAnimationPerf(ctx context.Context, s *testing.State) { // Ensure display on to record ui performance correctly. if err := power.TurnOnDisplay(ctx); err != nil { s.Fatal("Failed to turn on display: ", err) } cr := s.FixtValue().(*chrome.Chrome) tconn, err := cr.TestAPIConn(ctx) if err != nil { s.Fatal("Failed to connect to test API: ", err) } cleanup, err := ash.EnsureTabletModeEnabled(ctx, tconn, false) if err != nil { s.Fatal("Failed to ensure in clamshell mode: ", err) } defer cleanup(ctx) server := httptest.NewServer(http.FileServer(s.DataFileSystem())) defer server.Close() if err := perfutil.RunMultipleAndSave(ctx, s.OutDir(), cr.Browser(), uiperf.Run(s, perfutil.RunAndWaitAll(tconn, func(ctx context.Context) error { conn, err := cr.NewConn(ctx, server.URL+"/tab_loading_test.html") if err != nil { return errors.Wrap(err, "failed to open a testing page") } defer conn.Close() defer conn.CloseTarget(ctx) return nil }, "Chrome.Tabs.AnimationSmoothness.TabLoading")), perfutil.StoreSmoothness); err != nil { s.Fatal("Failed to run or save: ", err) } }
package poly import ( "fmt" "image" "testing" ) func TestNewPolygon(t *testing.T) { tests := []struct { test, want string err bool }{ {"1,2 3,4 5,6", "(1,2)-(3,4)-(5,6)", false}, {"1,2 3,4 56", "", true}, {"1,2 3,4 a,b", "", true}, {"1,2 3,4", "", true}, {"1,2", "", true}, {"", "", true}, } for _, tc := range tests { t.Run(tc.test, func(t *testing.T) { p, err := New(tc.test) if tc.err && err == nil { t.Fatalf("expected an error") } if !tc.err && err != nil { t.Fatalf("got error: %v", err) } if got := fmt.Sprintf("%s", p); got != tc.want { t.Fatalf("expected %s; got %s", tc.want, got) } }) } } func TestPolygonBoundingRectangle(t *testing.T) { tests := []struct { test, want string }{ {"1,2 3,4 5,6", "(1,2)-(5,6)"}, {"1,2 5,6 3,4", "(1,2)-(5,6)"}, {"5,6 3,4 1,2", "(1,2)-(5,6)"}, {"5,6 1,2 3,4", "(1,2)-(5,6)"}, } for _, tc := range tests { t.Run(tc.test, func(t *testing.T) { p, err := New(tc.test) if err != nil { t.Fatalf("got error: %v", err) } if got := fmt.Sprintf("%v", p.BoundingRectangle()); got != tc.want { t.Fatalf("expected %v; got %v", tc.want, got) } }) } } func TestPolygonInside(t *testing.T) { tests := []struct { test string point image.Point want bool }{ {"1,3 2,1 3,3", image.Pt(2, 2), true}, {"1,3 2,1 3,3", image.Pt(1, 2), false}, {"1,3 2,1 3,3", image.Pt(1, 1), false}, } for _, tc := range tests { t.Run(tc.test, func(t *testing.T) { p, err := New(tc.test) if err != nil { t.Fatalf("got error: %v", err) } if got := p.Inside(tc.point); got != tc.want { t.Fatalf("expected %t; got %t", tc.want, got) } }) } }
package schemes import ( "path" regv1 "github.com/tmax-cloud/registry-operator/api/v1" "github.com/tmax-cloud/registry-operator/internal/common/config" "github.com/tmax-cloud/registry-operator/internal/utils" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( signerTLSCrtPath = "/certs/signer/tls.crt" signerTLSKeyPath = "/certs/signer/tls.key" signerRootCAPath = "/certs/rootca/ca.crt" ) func NotarySignerPod(notary *regv1.Notary) *corev1.Pod { labels := make(map[string]string) resName := SubresourceName(notary, SubTypeNotarySignerPod) labels["app"] = "notary-signer" labels["apps"] = resName labels[resName] = "lb" mode := int32(511) signerImage := config.Config.GetString(config.ConfigNotarySignerImage) litmitCPU := *notary.Spec.Signer.Resources.Limits.Cpu() litmitMemory := *notary.Spec.Signer.Resources.Limits.Memory() requestCPU := *notary.Spec.Signer.Resources.Requests.Cpu() requestMemory := *notary.Spec.Signer.Resources.Requests.Memory() if litmitCPU.IsZero() { litmitCPU = resource.MustParse(config.Config.GetString(config.ConfigNotarySignerCPU)) } if litmitMemory.IsZero() { litmitMemory = resource.MustParse(config.Config.GetString(config.ConfigNotarySignerMemory)) } if requestCPU.IsZero() { requestCPU = resource.MustParse(config.Config.GetString(config.ConfigNotarySignerCPU)) } if requestMemory.IsZero() { requestMemory = resource.MustParse(config.Config.GetString(config.ConfigNotarySignerMemory)) } pod := &corev1.Pod{ ObjectMeta: v1.ObjectMeta{ Name: resName, Namespace: notary.Namespace, Labels: labels, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { Name: "notary-signer", Image: signerImage, ImagePullPolicy: corev1.PullAlways, Command: []string{"/usr/bin/env", "sh"}, Args: []string{"-c", "/var/lib/notary/migrations/migrate.sh && notary-signer -config=/var/lib/notary/fixtures/custom/signer-config.json"}, Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{ corev1.ResourceCPU: litmitCPU, corev1.ResourceMemory: litmitMemory, }, Requests: corev1.ResourceList{ corev1.ResourceCPU: requestCPU, corev1.ResourceMemory: requestMemory, }, }, Env: []corev1.EnvVar{ { Name: "NOTARY_SIGNER_LOGGING_LEVEL", Value: "debug", }, { Name: "NOTARY_SIGNER_STORAGE_BACKEND", Value: "mysql", }, { Name: "NOTARY_SIGNER_STORAGE_DB_URL", Value: "signer@tcp(" + utils.BuildServiceHostname(SubresourceName(notary, SubTypeNotaryDBService), notary.Namespace) + ":3306)/notarysigner?parseTime=True", }, { Name: "NOTARY_SIGNER_SERVER_HTTP_ADDR", Value: ":4444", }, { Name: "NOTARY_SIGNER_SERVER_GRPC_ADDR", Value: ":7899", }, { Name: "NOTARY_SIGNER_SERVER_TLS_CERT_FILE", Value: signerTLSCrtPath, }, { Name: "NOTARY_SIGNER_SERVER_TLS_KEY_FILE", Value: signerTLSKeyPath, }, { Name: "NOTARY_SIGNER_SERVER_CLIENT_CA_FILE", Value: signerRootCAPath, }, { Name: "MIGRATIONS_PATH", Value: "/var/lib/notary/migrations/signer/mysql", }, { Name: "DB_URL", Value: "mysql://signer@tcp(" + utils.BuildServiceHostname(SubresourceName(notary, SubTypeNotaryDBService), notary.Namespace) + ":3306)/notarysigner", }, }, VolumeMounts: []corev1.VolumeMount{ { Name: "signer-tls", MountPath: path.Dir(signerTLSCrtPath), }, { Name: "root-ca", MountPath: path.Dir(signerRootCAPath), }, }, Ports: []corev1.ContainerPort{ { ContainerPort: 4444, }, { ContainerPort: 7899, }, }, }, }, Volumes: []corev1.Volume{ { Name: "signer-tls", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ DefaultMode: &mode, SecretName: SubresourceName(notary, SubTypeNotarySignerSecret), }, }, }, { Name: "root-ca", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ DefaultMode: &mode, SecretName: notary.Spec.RootCASecret, }, }, }, }, }, } if config.Config.GetString(config.ConfigNotarySignerImagePullSecret) != "" { pod.Spec.ImagePullSecrets = append(pod.Spec.ImagePullSecrets, corev1.LocalObjectReference{Name: config.Config.GetString(config.ConfigNotarySignerImagePullSecret)}) } return pod }
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved. // This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. package upgrade_all_service_instances_test import ( "encoding/pem" "fmt" "io/ioutil" "net/http" "regexp" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/gbytes" "github.com/onsi/gomega/gexec" "github.com/onsi/gomega/ghttp" "github.com/pivotal-cf/on-demand-service-broker/config" . "github.com/pivotal-cf/on-demand-service-broker/integration_tests/helpers" "github.com/pivotal-cf/on-demand-service-broker/loggerfactory" "gopkg.in/yaml.v2" ) const ( brokerUsername = "broker username" brokerPassword = "broker password" ) var _ = Describe("running the tool to upgrade all service instances", func() { startUpgradeAllInstanceBinary := func(errandConfig config.InstanceIteratorConfig) *gexec.Session { b, err := yaml.Marshal(errandConfig) Expect(err).ToNot(HaveOccurred()) configPath := writeConfigFile(string(b)) return StartBinaryWithParams(binaryPath, []string{"-configPath", configPath}) } Describe("upgrading only via BOSH", func() { var broker *ghttp.Server When("the broker is not configured with TLS", func() { var ( serviceInstances string instanceID string lastOperationHandler *FakeHandler serviceInstancesHandler *FakeHandler upgradeHandler *FakeHandler errandConfig config.InstanceIteratorConfig ) BeforeEach(func() { broker = ghttp.NewServer() errandConfig = errandConfigurationBOSH(broker.URL()) serviceInstancesHandler, instanceID, serviceInstances = handleServiceInstanceList(broker) upgradeHandler = handleBOSHServiceInstanceUpgrade(broker) lastOperationHandler = handleBOSHLastOperation(broker) }) AfterEach(func() { broker.Close() }) It("exits successfully and upgrades the instance", func() { runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(SatisfyAll( Not(gbytes.Say("Upgrading all instances via CF")), gbytes.Say("Upgrading all instances via BOSH"), gbytes.Say("Sleep interval until next attempt: 2s"), gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: SUCCESS`), gbytes.Say("Number of successful operations: 1"), )) Expect(upgradeHandler.GetRequestForCall(0).Body).To(MatchJSON(`{ "plan_id": "service-plan-id", "context": {"space_guid": "the-space-guid"} }`)) }) It("exits successfully when all instances are already up-to-date", func() { runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(gbytes.Say("Number of successful operations: 1")) By("running upgrade all again") upgradeHandler.RespondsWith(http.StatusNoContent, "") runningTool = startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(SatisfyAll( gbytes.Say(`Result: instance already up to date - operation skipped`), gbytes.Say("Sleep interval until next attempt: 2s"), gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: SUCCESS`), gbytes.Say("Number of skipped operations: 1"), )) }) It("uses the canary_selection_params when querying canary instances", func() { instanceID := "my-instance-id" canaryInstanceID := "canary-instance-id" canariesList := fmt.Sprintf(`[{"plan_id": "service-plan-id", "service_instance_id": "%s"}]`, canaryInstanceID) serviceInstances := fmt.Sprintf(`[{"plan_id": "service-plan-id", "service_instance_id": "%s"}, {"plan_id": "service-plan-id", "service_instance_id": "%s"}]`, instanceID, canaryInstanceID) serviceInstancesHandler.WithQueryParams().RespondsWith(http.StatusOK, serviceInstances) serviceInstancesHandler.WithQueryParams("foo=bar").RespondsWith(http.StatusOK, canariesList) lastOperationHandler.RespondsWith(http.StatusOK, `{"state":"succeeded"}`) errandConfig.CanarySelectionParams = map[string]string{"foo": "bar"} errandConfig.Canaries = 1 runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(SatisfyAll( gbytes.Say(`\[upgrade\-all\] STARTING CANARIES: 1 canaries`), gbytes.Say(`\[canary-instance-id] Starting to process service instance`), gbytes.Say(`\[upgrade\-all\] FINISHED CANARIES`), gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: SUCCESS`), gbytes.Say("Number of successful operations: 2"), )) }) It("uses the canary_selection_params but returns an error if no instances found but instances exist", func() { canariesList := `[]` serviceInstancesHandler.WithQueryParams("cf_org=my-org", "cf_space=my-space").RespondsWith(http.StatusOK, canariesList) errandConfig.CanarySelectionParams = map[string]string{"cf_org": "my-org", "cf_space": "my-space"} errandConfig.Canaries = 1 runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(1)) Expect(runningTool).To(gbytes.Say("Failed to find a match to the canary selection criteria")) }) It("returns an error if service-instances api responds with a non-200", func() { serviceInstancesHandler.RespondsWith(http.StatusInternalServerError, `{"description": "a forced error"}`) runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(1)) Expect(runningTool).To(gbytes.Say("error listing service instances")) Expect(runningTool).To(gbytes.Say("500")) }) It("exits with a failure and shows a summary message when the upgrade fails", func() { lastOperationHandler.RespondsOnCall(1, http.StatusOK, `{"state":"failed"}`) runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(1)) Expect(runningTool).To(gbytes.Say("Status: FAILED")) Expect(runningTool).To(gbytes.Say(fmt.Sprintf(`Number of service instances that failed to process: 1 \[%s\]`, instanceID))) }) When("the attempt limit is reached", func() { It("exits with an error reporting the instances that were not upgraded", func() { upgradeHandler.RespondsWith(http.StatusConflict, "") runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(1)) Expect(runningTool).To(SatisfyAll( gbytes.Say(`\[upgrade\-all\] Processing all instances. Attempt 1/2`), gbytes.Say(`\[upgrade\-all\] Processing all remaining instances. Attempt 2/2`), gbytes.Say("Number of busy instances which could not be processed: 1"), gbytes.Say(fmt.Sprintf("The following instances could not be processed: %s", instanceID)), )) }) }) When("a service instance plan is updated after upgrade-all starts but before instance upgrade", func() { It("uses the new plan for the upgrade", func() { spaceGuid := "some-space-guid" serviceInstancesInitialResponse := fmt.Sprintf(`[{"plan_id": "service-plan-id", "service_instance_id": "%s", "space_guid": "%s"}]`, instanceID, spaceGuid) serviceInstancesResponseAfterPlanUpdate := fmt.Sprintf(`[{"plan_id": "service-plan-id-2", "service_instance_id": "%s", "space_guid": "%s"}]`, instanceID, spaceGuid) serviceInstancesHandler.RespondsOnCall(0, http.StatusOK, serviceInstancesInitialResponse) serviceInstancesHandler.RespondsOnCall(1, http.StatusOK, serviceInstancesResponseAfterPlanUpdate) runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(SatisfyAll( gbytes.Say("Sleep interval until next attempt: 2s"), gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: SUCCESS`), gbytes.Say("Number of successful operations: 1"), )) Expect(upgradeHandler.GetRequestForCall(0).Body).To(MatchJSON(fmt.Sprintf(`{ "plan_id": "service-plan-id-2", "context": {"space_guid": %q} }`, spaceGuid))) }) }) When("a service instance is deleted after upgrade-all starts but before the instance upgrade", func() { It("Fetches the latest service instances info and reports a deleted service", func() { serviceInstancesHandler.RespondsOnCall(0, http.StatusOK, serviceInstances) serviceInstancesHandler.RespondsOnCall(1, http.StatusOK, "[]") runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(SatisfyAll( gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: SUCCESS`), gbytes.Say("Number of successful operations: 0"), gbytes.Say("Number of deleted instances before operation could happen: 1"), )) }) }) When("a service instance refresh fails prior to instance upgrade", func() { It("logs failure and carries on with previous data", func() { serviceInstancesHandler.RespondsOnCall(0, http.StatusOK, serviceInstances) serviceInstancesHandler.RespondsOnCall(1, http.StatusInternalServerError, "oops") runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(SatisfyAll( gbytes.Say("Failed to get refreshed list of instances. Continuing with previously fetched info"), gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: SUCCESS`), gbytes.Say("Number of successful operations: 1"), )) }) }) }) When("the broker is configured with TLS", func() { var ( pemCert string errandConfig config.InstanceIteratorConfig ) BeforeEach(func() { broker = ghttp.NewTLSServer() broker.HTTPTestServer.Config.ErrorLog = loggerfactory.New(GinkgoWriter, "server", loggerfactory.Flags).New() rawPem := broker.HTTPTestServer.Certificate().Raw pemCert = string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: rawPem})) errandConfig = errandConfigurationBOSH(broker.URL()) handleServiceInstanceList(broker) handleBOSHServiceInstanceUpgrade(broker) handleBOSHLastOperation(broker) }) AfterEach(func() { broker.Close() }) It("upgrades all instances", func() { errandConfig.BrokerAPI.TLS.CACert = pemCert runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(gbytes.Say("Number of successful operations: 1")) }) It("skips ssl cert verification when disabled", func() { errandConfig.BrokerAPI.TLS.DisableSSLCertVerification = true runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(gbytes.Say("Number of successful operations: 1")) }) }) }) Describe("upgrading via CF and BOSH", func() { var ( broker *ghttp.Server cfApi *ghttp.Server uaaApi *ghttp.Server errandConfig config.InstanceIteratorConfig ) BeforeEach(func() { broker = ghttp.NewServer() cfApi = ghttp.NewServer() uaaApi = ghttp.NewServer() errandConfig = errandConfigurationCF(broker.URL(), cfApi.URL(), uaaApi.URL()) handleUAA(uaaApi) handleServiceInstanceList(broker) handleBOSHServiceInstanceUpgrade(broker) handleBOSHLastOperation(broker) handleCFInfo(cfApi) handleCFServicePlans(cfApi) }) AfterEach(func() { broker.Close() cfApi.Close() uaaApi.Close() }) When("an upgrade is available via CF", func() { BeforeEach(func() { cfApi.RouteToHandler(http.MethodPut, regexp.MustCompile(`/v2/service_instances/.*`), ghttp.CombineHandlers( ghttp.RespondWith(http.StatusAccepted, `{"entity": {"last_operation": { "type": "update", "state": "in progress" }}}`), ), ) cfApi.RouteToHandler(http.MethodGet, regexp.MustCompile(`/v2/service_instances/.*`), ghttp.CombineHandlers( ghttp.RespondWith(http.StatusOK, `{"entity": {"last_operation": { "type": "update", "state": "succeeded" }}}`), ), ) }) It("upgrades via CF then BOSH", func() { runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(SatisfyAll( gbytes.Say("Upgrading all instances via CF"), gbytes.Say("Sleep interval until next attempt: 2s"), gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: SUCCESS`), gbytes.Say("Number of successful operations: 1"), gbytes.Say("Number of skipped operations: 0"), gbytes.Say("Upgrading all instances via BOSH"), gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: SUCCESS`), gbytes.Say("Number of successful operations: 1"), gbytes.Say("Number of skipped operations: 0"), )) }) When("the CF upgrade fails", func() { It("doesn't do BOSH upgrades", func() { cfApi.RouteToHandler(http.MethodGet, regexp.MustCompile(`/v2/service_instances/.*`), ghttp.CombineHandlers( ghttp.RespondWith(http.StatusOK, `{"entity": {"last_operation": { "type": "update", "state": "failed" }}}`), ), ) runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(1)) Expect(runningTool).To(SatisfyAll( gbytes.Say("Upgrading all instances via CF"), gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: FAILED`), gbytes.Say("Number of service instances that failed to process: 1"), Not(gbytes.Say("Upgrading all instances via BOSH")), )) }) }) }) When("no upgrades are available via CF", func() { BeforeEach(func() { cfApi.RouteToHandler(http.MethodPut, regexp.MustCompile(`/v2/service_instances/.*`), ghttp.CombineHandlers( ghttp.RespondWith(http.StatusCreated, `{"entity": {"last_operation": {"type": "update", "state": "succeeded"}}}`), ), ) }) It("says that the CF upgrade was skipped, and does the upgrade via BOSH", func() { runningTool := startUpgradeAllInstanceBinary(errandConfig) Eventually(runningTool, 5*time.Second).Should(gexec.Exit(0)) Expect(runningTool).To(SatisfyAll( gbytes.Say("Upgrading all instances via CF"), gbytes.Say("instance already up to date - operation skipped"), gbytes.Say("Sleep interval until next attempt: 2s"), gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: SUCCESS`), gbytes.Say("Number of successful operations: 0"), gbytes.Say("Number of skipped operations: 1"), gbytes.Say("Upgrading all instances via BOSH"), gbytes.Say(`\[upgrade\-all\] FINISHED PROCESSING Status: SUCCESS`), gbytes.Say("Number of successful operations: 1"), gbytes.Say("Number of skipped operations: 0"), )) }) }) }) }) func writeConfigFile(configContent string) string { file, err := ioutil.TempFile("", "config") Expect(err).NotTo(HaveOccurred()) defer file.Close() _, err = file.Write([]byte(configContent)) Expect(err).NotTo(HaveOccurred()) return file.Name() } func handleServiceInstanceList(broker *ghttp.Server) (*FakeHandler, string, string) { serviceInstancesHandler := new(FakeHandler) broker.RouteToHandler(http.MethodGet, "/mgmt/service_instances", ghttp.CombineHandlers( ghttp.VerifyBasicAuth(brokerUsername, brokerPassword), serviceInstancesHandler.Handle, )) instanceID := "service-instance-id" response := `[{"plan_id": "service-plan-id", "service_instance_id": "%s", "space_guid": "the-space-guid"}]` serviceInstances := fmt.Sprintf(response, instanceID) serviceInstancesHandler.RespondsWith(http.StatusOK, serviceInstances) return serviceInstancesHandler, instanceID, serviceInstances } func handleBOSHServiceInstanceUpgrade(broker *ghttp.Server) *FakeHandler { upgradeHandler := new(FakeHandler) broker.RouteToHandler(http.MethodPatch, regexp.MustCompile(`/mgmt/service_instances/.*`), ghttp.CombineHandlers( ghttp.VerifyBasicAuth(brokerUsername, brokerPassword), ghttp.VerifyRequest(http.MethodPatch, ContainSubstring("/mgmt/service_instances/"), "operation_type=upgrade"), upgradeHandler.Handle, )) operationData := `{"BoshTaskID":1,"OperationType":"upgrade","PostDeployErrand":{},"PreDeleteErrand":{}}` upgradeHandler.RespondsWith(http.StatusAccepted, operationData) return upgradeHandler } func handleBOSHLastOperation(broker *ghttp.Server) *FakeHandler { lastOperationHandler := new(FakeHandler) broker.RouteToHandler(http.MethodGet, regexp.MustCompile(`/v2/service_instances/.*/last_operation`), ghttp.CombineHandlers( ghttp.VerifyBasicAuth(brokerUsername, brokerPassword), lastOperationHandler.Handle, )) lastOperationHandler.RespondsOnCall(0, http.StatusOK, `{"state":"in progress"}`) lastOperationHandler.RespondsOnCall(1, http.StatusOK, `{"state":"succeeded"}`) return lastOperationHandler } func handleUAA(uaaAPI *ghttp.Server) { uaaAuthenticationHandler := new(FakeHandler) uaaAPI.RouteToHandler(http.MethodPost, regexp.MustCompile(`/oauth/token`), ghttp.CombineHandlers( uaaAuthenticationHandler.Handle, )) authenticationResponse := `{ "access_token": "some-random-token", "expires_in": 3600}` uaaAuthenticationHandler.RespondsWith(http.StatusOK, authenticationResponse) } func handleCFInfo(cfAPI *ghttp.Server) { cfInfoHandler := new(FakeHandler) cfAPI.RouteToHandler(http.MethodGet, "/v2/info", ghttp.CombineHandlers( cfInfoHandler.Handle)) cfInfoResponse := `{"api_version": "2.139.0","osbapi_version": "2.15"}` cfInfoHandler.RespondsWith(http.StatusOK, cfInfoResponse) } func handleCFServicePlans(cfAPI *ghttp.Server) { servicePlanHandler := new(FakeHandler) cfAPI.RouteToHandler(http.MethodGet, regexp.MustCompile(`/v2/service_plans`), ghttp.CombineHandlers( servicePlanHandler.Handle, )) servicePlanResponse := `{ "resources":[{ "entity": { "maintenance_info": { "version": "0.31.0" }}}]}` servicePlanHandler.RespondsWith(http.StatusOK, servicePlanResponse) } func errandConfigurationBOSH(brokerURL string) config.InstanceIteratorConfig { return config.InstanceIteratorConfig{ PollingInterval: 1, AttemptLimit: 2, AttemptInterval: 2, MaxInFlight: 1, BrokerAPI: config.BrokerAPI{ URL: brokerURL, Authentication: config.Authentication{ Basic: config.UserCredentials{ Username: brokerUsername, Password: brokerPassword, }, }, }, } } func errandConfigurationCF(brokerURL, cfURL, uaaURL string) config.InstanceIteratorConfig { errandConfig := errandConfigurationBOSH(brokerURL) errandConfig.CF = config.CF{ URL: cfURL, UAA: config.UAAConfig{ URL: uaaURL, Authentication: config.UAACredentials{ UserCredentials: config.UserCredentials{ Username: "cf-username", Password: "cf-password", }, }, }, DisableSSLCertVerification: true, } errandConfig.MaintenanceInfoPresent = true return errandConfig }
package day12 import ( "bufio" "math" "os" "strconv" ) type instruction struct { direction string ammount int } type location struct { x int y int } //ParseInput : parse input of day12 func ParseInput(fileName string) []instruction { file, err := os.Open(fileName) if err != nil { panic(err) } scanner := bufio.NewScanner(file) instructions := []instruction{} for scanner.Scan() { line := scanner.Text() direction := line[:1] ammount, _ := strconv.Atoi(line[1:]) instructions = append(instructions, instruction{direction: direction, ammount: ammount}) } return instructions } //CalculateDistance : day12 part 1 func CalculateDistance(fileName string) int { instructions := ParseInput(fileName) directionsLeft := []string{"east", "north", "west", "south"} directionsRight := []string{"north", "east", "south", "west"} direction := "east" ferryLocation := location{0, 0} for _, instruction := range instructions { //go over all instructions switch instruction.direction { case "N": // go up ferryLocation.y += instruction.ammount case "S": ferryLocation.y -= instruction.ammount case "E": ferryLocation.x += instruction.ammount case "W": ferryLocation.x -= instruction.ammount case "L": test := instruction.ammount / 90 currentIndex := SliceIndex(len(directionsLeft), func(i int) bool { return directionsLeft[i] == direction }) direction = directionsLeft[(test+currentIndex)%4] case "R": test := instruction.ammount / 90 currentIndex := SliceIndex(len(directionsRight), func(i int) bool { return directionsRight[i] == direction }) direction = directionsRight[(test+currentIndex)%4] case "F": switch direction { case "east": ferryLocation.x += instruction.ammount case "north": ferryLocation.y += instruction.ammount case "west": ferryLocation.x -= instruction.ammount case "south": ferryLocation.y -= instruction.ammount } } } return Abs(Abs(ferryLocation.x) + Abs(ferryLocation.y)) } //CalculateDistanceWaypoint : day12 part 2 func CalculateDistanceWaypoint(fileName string) int { instructions := ParseInput(fileName) ferryLocation := location{0, 0} waypointLocation := location{10, 1} for _, instruction := range instructions { //go over all instructions switch instruction.direction { case "N": // go up waypointLocation.y += instruction.ammount case "S": waypointLocation.y -= instruction.ammount case "E": waypointLocation.x += instruction.ammount case "W": waypointLocation.x -= instruction.ammount case "L": rad := float64(instruction.ammount) * (math.Pi / 180) wlx := waypointLocation.x wly := waypointLocation.y waypointLocation.x = int(float64(wlx)*math.Cos(rad)) - int(float64(wly)*math.Sin(rad)) waypointLocation.y = int(float64(wlx)*math.Sin(rad)) + int(float64(wly)*math.Cos(rad)) case "R": rad := (float64(instruction.ammount*-1) * (math.Pi / 180)) wlx := waypointLocation.x wly := waypointLocation.y waypointLocation.x = int(float64(wlx)*math.Cos(rad)) - int(float64(wly)*math.Sin(rad)) waypointLocation.y = int(float64(wlx)*math.Sin(rad)) + int(float64(wly)*math.Cos(rad)) case "F": ferryLocation.x += instruction.ammount * waypointLocation.x ferryLocation.y += instruction.ammount * waypointLocation.y } } return Abs(Abs(ferryLocation.x) + Abs(ferryLocation.y)) } // Abs returns the absolute value of x. func Abs(x int) int { if x < 0 { return -x } return x } // SliceIndex : https://stackoverflow.com/questions/8307478/how-to-find-out-element-position-in-slice func SliceIndex(limit int, predicate func(i int) bool) int { for i := 0; i < limit; i++ { if predicate(i) { return i } } return -1 }
/* * Created by lintao on 2023/8/1 下午5:13 * Copyright © 2020-2023 LINTAO. All rights reserved. * */ package main import ( "bytes" "flag" "fmt" "go/ast" "go/constant" "go/format" "go/token" "go/types" "html/template" "log" "os" "path/filepath" "regexp" "strings" "golang.org/x/tools/go/packages" ) var errCodeDocPrefix = `# 错误码 !!系统错误码列表,由 {{.}}codegen -type=int -doc{{.}} 命令生成,不要对此文件做任何更改。 ## 功能说明 如果返回结果中存在 {{.}}code{{.}} 字段,则表示调用 API 接口失败。例如: {{.}}{{.}}{{.}}json { "code": 100101, "message": "Database error" } {{.}}{{.}}{{.}} 上述返回中 {{.}}code{{.}} 表示错误码,{{.}}message{{.}} 表示该错误的具体信息。每个错误同时也对应一个 HTTP 状态码,比如上述错误码对应了 HTTP 状态码 500(Internal Server Error)。 ## 错误码列表 系统支持的错误码列表如下: | Identifier | Code | HTTP Code | Description | | ---------- | ---- | --------- | ----------- | ` var ( typeNames = flag.String("type", "", "comma-separated list of type names; must be set") output = flag.String("output", "", "output file name; default srcdir/<type>_string.go") trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names") buildTags = flag.String("tags", "", "comma-separated list of build tags to apply") doc = flag.Bool("doc", false, "if true only generate error code documentation in markdown format") ) // Usage is a replacement usage function for the flags package. func Usage() { fmt.Fprintf(os.Stderr, "Usage of codegen:\n") fmt.Fprintf(os.Stderr, "\tcodegen [flags] -type T [directory]\n") fmt.Fprintf(os.Stderr, "\tcodegen [flags] -type T files... # Must be a single package\n") fmt.Fprintf(os.Stderr, "Flags:\n") flag.PrintDefaults() } func main() { log.SetFlags(0) log.SetPrefix("codegen: ") flag.Usage = Usage flag.Parse() if len(*typeNames) == 0 { flag.Usage() os.Exit(2) } types := strings.Split(*typeNames, ",") var tags []string if len(*buildTags) > 0 { tags = strings.Split(*buildTags, ",") } // We accept either one directory or a list of files. Which do we have? args := flag.Args() if len(args) == 0 { // Default: process whole package in current directory. args = []string{"."} } // Parse the package once. var dir string g := Generator{ trimPrefix: *trimprefix, } // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). if len(args) == 1 && isDirectory(args[0]) { dir = args[0] } else { if len(tags) != 0 { log.Fatal("-tags option applies only to directories, not when files are specified") } dir = filepath.Dir(args[0]) } g.parsePackage(args, tags) if !*doc { // Print the header and package clause. g.Printf("// Copyright 2020 Lingfei Kong <colin404@foxmail.com>. All rights reserved.\n") g.Printf("// Use of this source code is governed by a MIT style\n") g.Printf("// license that can be found in the LICENSE file.\n") g.Printf("\n") g.Printf("// Code generated by \"codegen %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) g.Printf("\n") g.Printf("package %s", g.pkg.name) g.Printf("\n") } // Run generate for each type. var src []byte for _, typeName := range types { if *doc { g.generateDocs(typeName) src = g.buf.Bytes() } else { g.generate(typeName) // Format the output. src = g.format() } } // Write to file. outputName := *output if outputName == "" { absDir, _ := filepath.Abs(dir) baseName := fmt.Sprintf("%s_generated.go", strings.ReplaceAll(filepath.Base(absDir), "-", "_")) if len(flag.Args()) == 1 { baseName = fmt.Sprintf( "%s_generated.go", strings.ReplaceAll(filepath.Base(strings.TrimSuffix(flag.Args()[0], ".go")), "-", "_"), ) } outputName = filepath.Join(dir, strings.ToLower(baseName)) } err := os.WriteFile(outputName, src, 0o600) if err != nil { log.Fatalf("writing output: %s", err) } } // isDirectory reports whether the named file is a directory. func isDirectory(name string) bool { info, err := os.Stat(name) if err != nil { log.Fatal(err) } return info.IsDir() } // Generator holds the state of the analysis. Primarily used to buffer // the output for format.Source. type Generator struct { buf bytes.Buffer // Accumulated output. pkg *Package // Package we are scanning. trimPrefix string } // Printf like fmt.Printf, but add the string to g.buf. func (g *Generator) Printf(format string, args ...interface{}) { fmt.Fprintf(&g.buf, format, args...) } // File holds a single parsed file and associated data. type File struct { pkg *Package // Package to which this file belongs. file *ast.File // Parsed AST. // These fields are reset for each type being generated. typeName string // Name of the constant type. values []Value // Accumulator for constant values of that type. trimPrefix string } // Package defines options for package. type Package struct { name string defs map[*ast.Ident]types.Object files []*File } // parsePackage analyzes the single package constructed from the patterns and tags. // parsePackage exits if there is an error. func (g *Generator) parsePackage(patterns []string, tags []string) { cfg := &packages.Config{ // nolint: staticcheck Mode: packages.NeedName | packages.NeedDeps | packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedTypes, // TODO: Need to think about constants in test files. Maybe write type_string_test.go // in a separate pass? For later. Tests: false, BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, } pkgs, err := packages.Load(cfg, patterns...) if err != nil { log.Fatal(err) } if len(pkgs) != 1 { log.Fatalf("error: %d packages found", len(pkgs)) } g.addPackage(pkgs[0]) } // addPackage adds a type checked Package and its syntax files to the generator. func (g *Generator) addPackage(pkg *packages.Package) { g.pkg = &Package{ name: pkg.Name, defs: pkg.TypesInfo.Defs, files: make([]*File, len(pkg.Syntax)), } for i, file := range pkg.Syntax { g.pkg.files[i] = &File{ file: file, pkg: g.pkg, trimPrefix: g.trimPrefix, } } } // generate produces the register calls for the named type. func (g *Generator) generate(typeName string) { values := make([]Value, 0, 100) for _, file := range g.pkg.files { // Set the state for this run of the walker. file.typeName = typeName file.values = nil if file.file != nil { ast.Inspect(file.file, file.genDecl) values = append(values, file.values...) } } if len(values) == 0 { log.Fatalf("no values defined for type %s", typeName) } // Generate code that will fail if the constants change value. g.Printf("\t// init register error codes defines in this source code to `github.com/marmotedu/errors`\n") g.Printf("func init() {\n") for _, v := range values { code, description := v.ParseComment() g.Printf("\tregister(%s, %s, \"%s\")\n", v.originalName, code, description) } g.Printf("}\n") } // generateDocs produces error code markdown document for the named type. func (g *Generator) generateDocs(typeName string) { values := make([]Value, 0, 100) for _, file := range g.pkg.files { // Set the state for this run of the walker. file.typeName = typeName file.values = nil if file.file != nil { ast.Inspect(file.file, file.genDecl) values = append(values, file.values...) } } if len(values) == 0 { log.Fatalf("no values defined for type %s", typeName) } tmpl, _ := template.New("doc").Parse(errCodeDocPrefix) var buf bytes.Buffer _ = tmpl.Execute(&buf, "`") // Generate code that will fail if the constants change value. g.Printf(buf.String()) for _, v := range values { code, description := v.ParseComment() // g.Printf("\tregister(%s, %s, \"%s\")\n", v.originalName, code, description) g.Printf("| %s | %d | %s | %s |\n", v.originalName, v.value, code, description) } g.Printf("\n") } // format returns the gofmt-ed contents of the Generator's buffer. func (g *Generator) format() []byte { src, err := format.Source(g.buf.Bytes()) if err != nil { // Should never happen, but can arise when developing this code. // The user can compile the output to see the error. log.Printf("warning: internal error: invalid Go generated: %s", err) log.Printf("warning: compile the package to analyze the error") return g.buf.Bytes() } return src } // Value represents a declared constant. type Value struct { comment string originalName string // The name of the constant. name string // The name with trimmed prefix. // The value is stored as a bit pattern alone. The boolean tells us // whether to interpret it as an int64 or a uint64; the only place // this matters is when sorting. // Much of the time the str field is all we need; it is printed // by Value.String. value uint64 // Will be converted to int64 when needed. signed bool // Whether the constant is a signed type. str string // The string representation given by the "go/constant" package. } func (v *Value) String() string { return v.str } // ParseComment parse comment to http code and error code description. func (v *Value) ParseComment() (string, string) { reg := regexp.MustCompile(`\w\s*-\s*(\d{3})\s*:\s*(\s*.*)\s*\.\n*`) if !reg.MatchString(v.comment) { log.Printf("constant '%s' have wrong comment format, register with 500 as default", v.originalName) return "500", "Internal server error" } groups := reg.FindStringSubmatch(v.comment) if len(groups) != 3 { return "500", "Internal server error" } return groups[1], groups[2] } // nolint: gocognit // genDecl processes one declaration clause. func (f *File) genDecl(node ast.Node) bool { decl, ok := node.(*ast.GenDecl) if !ok || decl.Tok != token.CONST { // We only care about const declarations. return true } // The name of the type of the constants we are declaring. // Can change if this is a multi-element declaration. typ := "" // Loop over the elements of the declaration. Each element is a ValueSpec: // a list of names possibly followed by a type, possibly followed by values. // If the type and value are both missing, we carry down the type (and value, // but the "go/types" package takes care of that). for _, spec := range decl.Specs { vspec, _ := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST. if vspec.Type == nil && len(vspec.Values) > 0 { // "X = 1". With no type but a value. If the constant is untyped, // skip this vspec and reset the remembered type. typ = "" // If this is a simple type conversion, remember the type. // We don't mind if this is actually a call; a qualified call won't // be matched (that will be SelectorExpr, not Ident), and only unusual // situations will result in a function call that appears to be // a type conversion. ce, ok := vspec.Values[0].(*ast.CallExpr) if !ok { continue } id, ok := ce.Fun.(*ast.Ident) if !ok { continue } typ = id.Name } if vspec.Type != nil { // "X T". We have a type. Remember it. ident, ok := vspec.Type.(*ast.Ident) if !ok { continue } typ = ident.Name } if typ != f.typeName { // This is not the type we're looking for. continue } // We now have a list of names (from one line of source code) all being // declared with the desired type. // Grab their names and actual values and store them in f.values. for _, name := range vspec.Names { if name.Name == "_" { continue } // This dance lets the type checker find the values for us. It's a // bit tricky: look up the object declared by the name, find its // types.Const, and extract its value. obj, ok := f.pkg.defs[name] if !ok { log.Fatalf("no value for constant %s", name) } info := obj.Type().Underlying().(*types.Basic).Info() if info&types.IsInteger == 0 { log.Fatalf("can't handle non-integer constant type %s", typ) } value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST. if value.Kind() != constant.Int { log.Fatalf("can't happen: constant is not an integer %s", name) } i64, isInt := constant.Int64Val(value) u64, isUint := constant.Uint64Val(value) if !isInt && !isUint { log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String()) } if !isInt { u64 = uint64(i64) } v := Value{ originalName: name.Name, value: u64, signed: info&types.IsUnsigned == 0, str: value.String(), } if vspec.Doc != nil && vspec.Doc.Text() != "" { v.comment = vspec.Doc.Text() } else if c := vspec.Comment; c != nil && len(c.List) == 1 { v.comment = c.Text() } v.name = strings.TrimPrefix(v.originalName, f.trimPrefix) f.values = append(f.values, v) } } return false }
package main import ( "encoding/json" "os" "strings" "time" "github.com/aws/aws-lambda-go/lambda" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/external" "github.com/aws/aws-sdk-go-v2/service/dynamodb" "github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute" "github.com/pkg/errors" ) // Event JSON type event struct { QueryStringParameters struct { Value int `json:"value"` } `json:"queryStringParameters"` Headers struct { XForwardedFor string `json:"X-Forwarded-For"` } } // Counter model type Counter struct { UserIP string `json:"user_ip" dynamodbav:"UserIP"` Timestamp time.Time `json:"timestamp" dynamodbav:"Timestamp"` Value int `json:"value" dynamodbav:"Value"` } // Response JSON type response struct { StatusCode int `json:"statusCode"` Body []Counter `json:"body"` } func main() { lambda.Start(handle) } func handle(evt json.RawMessage) (interface{}, error) { // Unmarshal the JSON var e event if err := json.Unmarshal(evt, &e); err != nil { return newErrorResponse(errors.New("couldn't unmarshal JSON")), nil } // Extract parameters ctr := extract(e) // Load AWS stuff cfg, err := external.LoadDefaultAWSConfig() if err != nil { return newErrorResponse(errors.Wrap(err, "could not load AWS config")), nil } db := dynamodb.New(cfg) table := os.Getenv("DYNAMODB_COUNTER") // Put it in DB if err := put(db, table, ctr); err != nil { return newErrorResponse(err), nil } // Read all the counters ctrs, err := read(db, table) if err != nil { return newErrorResponse(err), nil } // Construct response return newSuccessResponse(ctrs), nil } func extract(e event) Counter { ip := strings.Split(e.Headers.XForwardedFor, ",")[0] t := time.Now() val := e.QueryStringParameters.Value return Counter{ip, t, val} } func put(db *dynamodb.DynamoDB, table string, c Counter) error { item, err := dynamodbattribute.MarshalMap(c) if err != nil { panic(err) } in := &dynamodb.PutItemInput{ Item: item, TableName: aws.String(table), } req := db.PutItemRequest(in) _, err = req.Send() if err != nil { return errors.Wrap(err, "couldn't put item") } return nil } func read(db *dynamodb.DynamoDB, table string) ([]Counter, error) { in := &dynamodb.ScanInput{ TableName: aws.String(table), } req := db.ScanRequest(in) out, err := req.Send() if err != nil { return nil, errors.Wrap(err, "couldn't scan items") } ctrs := []Counter{} if err := dynamodbattribute.UnmarshalListOfMaps(out.Items, &ctrs); err != nil { panic(err) } return ctrs, nil } func newErrorResponse(err error) map[string]interface{} { body := map[string]interface{}{ "error": err.Error(), } json, err := json.Marshal(body) if err != nil { panic(err) } return map[string]interface{}{ "statusCode": 500, "body": string(json), } } func newSuccessResponse(ctrs []Counter) map[string]interface{} { body := map[string]interface{}{ "message": "Success!", "data": ctrs, } json, err := json.Marshal(body) if err != nil { panic(err) } return map[string]interface{}{ "statusCode": 200, "body": string(json), } }
/* * Copyright (C) 2018 The ontology Authors * This file is part of The ontology library. * * The ontology is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The ontology is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with The ontology. If not, see <http://www.gnu.org/licenses/>. */ package message import "time" type Invitation struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Label string `json:"label,omitempty"` Did string `json:"did,omitempty"` Router []string `json:"router"` } func (self *Invitation) GetConnection() *Connection { return nil } type RequestInf interface { GetConnection() *Connection } type ConnectionRequest struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Label string `json:"label,omitempty"` Connection Connection `json:"connection,omitempty"` InvitationId string `json:"invitation_id"` } func (self *ConnectionRequest) GetConnection() *Connection { return &self.Connection } type Connection struct { MyDid string `json:"my_did,omitempty"` MyRouter []string `json:"my_router"` TheirDid string `json:"their_did"` TheirRouter []string `json:"their_router"` } // Thread thread data type Thread struct { ID string `json:"thid,omitempty"` PID string `json:"pthid,omitempty"` SenderOrder int `json:"sender_order,omitempty"` ReceivedOrders map[string]int `json:"received_orders,omitempty"` } type ConnectionResponse struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Thread Thread `json:"~thread,omitempty"` Connection Connection `json:"connection,omitempty"` } func (self *ConnectionResponse) GetConnection() *Connection { return &self.Connection } type ConnectionACK struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Thread Thread `json:"~thread,omitempty"` Status string `json:"status,omitempty"` Connection Connection `json:"connection,omitempty"` } func (self *ConnectionACK) GetConnection() *Connection { return &self.Connection } type DisconnectRequest struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Connection Connection `json:"connection,omitempty"` } func (self *DisconnectRequest) GetConnection() *Connection { return &self.Connection } type CredentialACK struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Thread Thread `json:"~thread,omitempty"` Status string `json:"status,omitempty"` Connection Connection `json:"connection,omitempty"` } func (self *CredentialACK) GetConnection() *Connection { return &self.Connection } type PresentationACK struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Thread Thread `json:"~thread,omitempty"` Status string `json:"status,omitempty"` Connection Connection `json:"connection,omitempty"` } func (self *PresentationACK) GetConnection() *Connection { return &self.Connection } //issue credential type ProposalCredential struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Comment string `json:"comment,omitempty"` CredentialProposal CredentialPreview `json:"credential_proposal,omitempty"` Connection Connection `json:"connection,omitempty"` } func (self *ProposalCredential) GetConnection() *Connection { return &self.Connection } type OfferCredential struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Comment string `json:"comment,omitempty"` CredentialPreview CredentialPreview `json:"credential_preview,omitempty"` OffersAttach []Attachment `json:"offers_attach,omitempty"` Connection Connection `json:"connection,omitempty"` Thread Thread `json:"~thread,omitempty"` } func (self *OfferCredential) GetConnection() *Connection { return &self.Connection } type CredentialPreview struct { Type string `json:"@type,omitempty"` Attributre []Attributre `json:"attributre,omitempty"` } type Attributre struct { Name string `json:"name,omitempty"` MimeType string `json:"mime_type,omitempty"` Value string `json:"value,omitempty"` CredDefId string `json:"cred_def_id,omitempty"` referent string `json:"referent,omitempty"` } type Attachment struct { Id string `json:"@id,omitempty"` Description string `json:"description,omitempty"` FileName string `json:"filename,omitempty"` MimeType string `json:"mime_type,omitempty"` LastModTime time.Time `json:"lastmod_time,omitempty"` ByteCount int64 `json:"byte_count,omitempty"` Data Data `json:"data,omitempty"` } type Data struct { Sha256 string `json:"sha256,omitempty"` Links []string `json:"links,omitempty"` Base64 string `json:"base64,omitempty"` JSON interface{} `json:"json,omitempty"` } type Format struct { AttachID string `json:"attach_id,omitempty"` Format string `json:"format,omitempty"` } type RequestCredential struct { Type string `json:"@type"` Id string `json:"@id"` Comment string `json:"comment"` Formats []Format `json:"formats,omitempty"` RequestsAttach []Attachment `json:"requests_attach"` Connection Connection `json:"connection,omitempty"` } func (self *RequestCredential) GetConnection() *Connection { return &self.Connection } type IssueCredential struct { Type string `json:"@type,omitempty"` Id string `json:"@id"` Comment string `json:"comment,omitempty"` Formats []Format `json:"formats,omitempty"` CredentialsAttach []Attachment `json:"credentials~attach,omitempty"` Connection Connection `json:"connection,omitempty"` Thread Thread `json:"~thread,omitempty"` } func (self *IssueCredential) GetConnection() *Connection { return &self.Connection } //present proof type ProposePresentation struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Comment string `json:"comment,omitempty"` Formats []Format `json:"formats,omitempty"` ProposeAttach []Attachment `json:"propose~attach,omitempty"` } type RequestPresentation struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Comment string `json:"comment,omitempty"` Formats []Format `json:"formats,omitempty"` RequestPresentationAttach []Attachment `json:"request_presentation_attach,omitempty"` Connection Connection `json:"connection,omitempty"` } func (self *RequestPresentation) GetConnection() *Connection { return &self.Connection } type Presentation struct { Type string `json:"@type,omitempty"` Id string `json:"@id,omitempty"` Comment string `json:"comment,omitempty"` Formats []Format `json:"formats,omitempty"` PresentationAttach []Attachment `json:"presentations~attach,omitempty"` Connection Connection `json:"connection,omitempty"` Thread Thread `json:"~thread,omitempty"` } func (self *Presentation) GetConnection() *Connection { return &self.Connection } type BasicMessage struct { Type string `json:"@type"` Id string `json:"@id"` SendTime time.Time `json:"send_time"` Content string `json:"content"` I10n I10n `json:"~I10n"` Connection Connection `json:"connection,omitempty"` } func (self *BasicMessage) GetConnection() *Connection { return &self.Connection } type I10n struct { Locale string `json:"locale"` } type DeleteCredentialRequest struct { DId string `json:"did"` Id string `json:"id"` } func (self *DeleteCredentialRequest) GetConnection() *Connection { return nil } type DeletePresentationRequest struct { DId string `json:"did"` Id string `json:"id"` } func (self *DeletePresentationRequest) GetConnection() *Connection { return nil } type QueryCredentialRequest struct { DId string `json:"did"` Id string `json:"id"` } func (self *QueryCredentialRequest) GetConnection() *Connection { return nil } type QueryCredentialResponse struct { Formats []Format `json:"formats,omitempty"` CredentialsAttach []Attachment `json:"credentials~attach,omitempty"` } func (self *QueryCredentialResponse) GetConnection() *Connection { return nil } type QueryPresentationRequest struct { DId string `json:"did"` Id string `json:"id"` } func (self *QueryPresentationRequest) GetConnection() *Connection { return nil } type QueryPresentationResponse struct { Formats []Format `json:"formats,omitempty"` PresentationAttach []Attachment `json:"presentations~attach,omitempty"` } func (self *QueryPresentationResponse) GetConnection() *Connection { return nil } type QueryBasicMessageRequest struct { DID string `json:"did"` Latest bool `json:"latest"` RemoveAfterRead bool `json:"remove_after_read"` } func (self *QueryBasicMessageRequest) GetConnection() *Connection { return nil } type ForwardMessageRequest struct { MsgType int `json:"msg_type"` Data []byte `json:"data"` Connection Connection `json:"connection"` } func (self *ForwardMessageRequest) GetConnection() *Connection { return &self.Connection } type QueryConnectionsRequest struct { DID string `json:"did"` } func (q *QueryConnectionsRequest) GetConnection() *Connection { return nil }
package common // active object对象 type Service struct { channel chan interface{} `desc:"即将加入到数据slice的数据"` data []interface{} `desc:"数据slice"` } // 新建一个size大小缓存的active object对象 func NewService(size int, done func()) *Service { s := &Service{ channel: make(chan interface{}, size), data: make([]interface{}, 0), } go func() { s.schedule() done() }() return s } // 把管道中的数据append到slice中 func (s *Service) schedule() { for v := range s.channel { s.data = append(s.data, v) } } // 增加一个值 func (s *Service) Add(v interface{}) { s.channel <- v } // 管道使用完关闭 func (s *Service) Close() { close(s.channel) } // 返回slice func (s *Service) Slice() []interface{} { return s.data }
package migrates import ( "festival/app/common/db" "festival/app/model/module" ) // 点亮线路表 // power by 7be.cn func init() { db.DbList = append(db.DbList, module.ModUserRoute{}, ) }
package blocker import ( "log" "testing" ) func TestBlocker(t *testing.T) { var list RipIPList err := list.LoadFromFile("allowedlist") if err != nil { t.Errorf("Error: %v", err) } log.Printf("length of entries: %v", len(list)) list.Dump() DefaultAllowing.Dump() log.Print("***split***") }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package sql import ( "testing" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" ) func TestDefaultOid(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) testCases := []struct { id descpb.ID oid *tree.DOid }{ { 1, tree.NewDOid(tree.DInt(1)), }, { 2, tree.NewDOid(tree.DInt(2)), }, } for _, tc := range testCases { oid := tableOid(tc.id) if tc.oid.DInt != oid.DInt { t.Fatalf("expected oid %d(%32b), got %d(%32b)", tc.oid.DInt, tc.oid.DInt, oid.DInt, oid.DInt) } } }
// Copyright 2016 The Lucas Alves Author. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "encoding/json" "github.com/luk4z7/pagarme-go/auth" "github.com/luk4z7/pagarme-go/lib/recipient" "net/url" "os" ) var payableRecord recipient.Recipient func main() { get, err, errorsApi := payableRecord.Get(url.Values{"id": {"25786"}}, auth.Headers{}) if err != nil { response, _ := json.MarshalIndent(errorsApi, "", " ") os.Stdout.Write(response) } else { responseGet, _ := json.MarshalIndent(get, "", " ") os.Stdout.Write(responseGet) } getall, err, errorsApi := payableRecord.GetAll(url.Values{}, auth.Headers{ "page": "1", "count": "10", }) if err != nil { response, _ := json.MarshalIndent(errorsApi, "", " ") os.Stdout.Write(response) } else { responseGetAll, _ := json.MarshalIndent(getall, "", " ") os.Stdout.Write(responseGetAll) } }
package function import ( "encoding/json" "errors" "github.com/hecatoncheir/Storage" "log" "os" ) type Storage interface { CreateJSON([]byte) (string, error) } type Functions interface { ReadPriceByID(string, string) storage.Price } type Executor struct { Store Storage Functions Functions } var ExecutorLogger = log.New(os.Stdout, "Executor: ", log.Lshortfile) var ( // ErrPriceCanNotBeCreated means that the price can't be added to database ErrPriceCanNotBeCreated = errors.New("price can't be created") ) //// CreatePrice make price and save it to storage func (executor *Executor) CreatePrice(price storage.Price, language string) (storage.Price, error) { price.IsActive = true encodedProduct, err := json.Marshal(price) if err != nil { ExecutorLogger.Printf(ErrPriceCanNotBeCreated.Error()) return price, ErrPriceCanNotBeCreated } uidOfCreatedPrice, err := executor.Store.CreateJSON(encodedProduct) if err != nil { return price, ErrPriceCanNotBeCreated } createdPrice := executor.Functions.ReadPriceByID(uidOfCreatedPrice, language) return createdPrice, nil }
package module import ( "fmt" "io" "io/ioutil" "github.com/dnaeon/gru/resource" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" ) // Module type represents a collection of resources and module imports type Module struct { // Name of the module Name string // Resources loaded from the module Resources []resource.Resource // Module imports Imports []Import // Configuration settings for the module Config *Config // Unknown keys found in the module UnknownKeys []string } // Config type contains configuration options for the module type Config struct { // Module path Path string // Configuration settings for the resources ResourceConfig *resource.Config } // Import type represents an import declaration type Import struct { // Name of the module that is imported Name string `hcl:"name"` // Path to the module file Path string `hcl:"path"` } // validKeys returns a map of valid keys which can be used in modules func validKeys() map[string]struct{} { // All registered resources are considered valid keys keys := make(map[string]struct{}) for name := range resource.Registry { keys[name] = struct{}{} } // Other keys considered as valid, such as "import" keys["import"] = struct{}{} return keys } // Load loads a module from the given HCL or JSON input func Load(name string, config *Config, r io.Reader) (*Module, error) { m := &Module{ Name: name, Resources: make([]resource.Resource, 0), Imports: make([]Import, 0), UnknownKeys: make([]string, 0), Config: config, } input, err := ioutil.ReadAll(r) if err != nil { return m, err } // Parse configuration obj, err := hcl.Parse(string(input)) if err != nil { return m, err } // Top-level node should be an object list root, ok := obj.Node.(*ast.ObjectList) if !ok { return m, fmt.Errorf("Missing root node in %s", name) } // Load the module imports err = m.hclLoadImport(root) if err != nil { return m, err } // Load all known resource types from the given input for r := range resource.Registry { if err := m.hclLoadResources(r, root); err != nil { return m, err } } // Check for unknown keys in the provided input // // For now the only valid keys are the resource types, // which can be found in resource.Registry. valid := validKeys() for _, item := range root.Items { key := item.Keys[0].Token.Value().(string) if _, ok := valid[key]; !ok { m.UnknownKeys = append(m.UnknownKeys, key) } } return m, nil } // hclLoadResources loads all declarations with the // given resource type from the provided HCL input func (m *Module) hclLoadResources(resourceType string, root *ast.ObjectList) error { hclResources := root.Filter(resourceType) for _, item := range hclResources.Items { position := item.Val.Pos().String() // The item is expected to exactly one key which // represent the resource name if len(item.Keys) != 1 { e := fmt.Errorf("Invalid resource declaration found in %s:%s", m.Name, position) return e } // Get the resource from registry and create the actual resource title := item.Keys[0].Token.Value().(string) resourceItem, ok := resource.Registry[resourceType] if !ok { e := fmt.Errorf("Unknown resource type '%s' found in %s:%s", resourceType, m.Name, position) return e } // Create the actual resource by calling it's provider r, err := resourceItem.Provider(title, item, m.Config.ResourceConfig) if err != nil { return err } m.Resources = append(m.Resources, r) } return nil } // hclLoadImport loads all import declarations from the given HCL input func (m *Module) hclLoadImport(root *ast.ObjectList) error { hclImport := root.Filter("import") for _, item := range hclImport.Items { position := item.Val.Pos().String() if len(item.Keys) != 0 { e := fmt.Errorf("Invalid module import found in %s:%s", m.Name, position) return e } var i Import err := hcl.DecodeObject(&i, item) if err != nil { return err } m.Imports = append(m.Imports, i) } return nil }
package query import ( "log" "github.com/PuerkitoBio/goquery" ) // URI query an url to get dom tree func URI(uri string) *goquery.Document { dom, err := goquery.NewDocument(uri) if err != nil { log.Println(err) } return dom } // Dom query dom to find matched nodes func Dom(dom interface{}, pattern string) []*goquery.Selection { var s []*goquery.Selection d, ok := dom.(interface { Find(p string) *goquery.Selection }) if ok { d.Find(pattern).Each(func(i int, selection *goquery.Selection) { s = append(s, selection) }) } return s }
package main import ( "flag" "log" "net/http" "os" "path/filepath" "sync" "text/template" "mryer1.chat/trace" ) // templ represents a single template type templateHandler struct { once sync.Once filename string templ *template.Template } // This implements net/http Handler interface thus it makes templateHandler // as a Handler object func (t *templateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // The sync.Once type guarantees that the function we pass as an argument // will only be executed once, regardless of how many goroutines are calling ServeHTTP. t.once.Do(func() { t.templ = template.Must(template.ParseFiles(filepath.Join("templates", t.filename))) // "templates" is the directory }) // This tells the template to render itself using data that can be extracted from http.Request, // which happens to include the host address that we need. // func (t *Template) Execute(wr io.Writer, data interface{}) (err error) t.templ.Execute(w, r) } // We are running the room in a separate Go routine (notice the go keyword again) so // that the chatting operations occur in the background, allowing our main thread to // run the web server. func main() { // > go build -o chat // > ./chat -addr=":3000" // The call to flag.String returns a type of *string, which is to say it returns // the address of a string variable where the value of the flag is stored. var addr = flag.String("addr", ":8080", "The addr of the application.") flag.Parse() // parse the flags r := NewRoom() // send the output to the os.Stdout standard output pipe (print output to terminal) r.tracer = trace.New(os.Stdout) // 'templateHandler' structure is a valid http.Handler because it implements // ServeHTTP(ResponseWriter, *Request) method. See type Handler interface. http.Handle("/", &templateHandler{filename: "chat.html"}) // & address of operator http.Handle("/room", r) // get the room going go r.run() // start the web server log.Println("Starting web server on", *addr) if err := http.ListenAndServe(*addr, nil); err != nil { log.Fatal("ListenAndServe:", err) } }
package metrics import ( "github.com/anabiozz/yotunheim/backend/common/datastore" ) type accumulator struct { metrics chan datastore.InfluxMetrics mapmetrics chan datastore.Response getter MetricGetter } // MetricGetter ... type MetricGetter interface { GetMetric(influxMetrics datastore.InfluxMetrics) datastore.InfluxMetrics GetMetrics(influxMetrics datastore.Response) datastore.Response } // NewAccumulator ... func NewAccumulator( runningInput MetricGetter, metrics chan datastore.InfluxMetrics, mapmetrics chan datastore.Response) *accumulator { acc := accumulator{ getter: runningInput, metrics: metrics, mapmetrics: mapmetrics, } return &acc } func (ac *accumulator) AddMetric(influxMetrics datastore.InfluxMetrics) { ac.metrics <- ac.getter.GetMetric(influxMetrics) } func (ac *accumulator) AddMetrics(influxMetrics datastore.Response) { ac.mapmetrics <- ac.getter.GetMetrics(influxMetrics) } func (ac *accumulator) AddTable(influxMetrics datastore.InfluxMetrics) { ac.metrics <- ac.getter.GetMetric(influxMetrics) }
package main import ( "crypto/tls" "encoding/binary" "fmt" "io" "net" "os" "snirouter/snirouter" ) func readInt16BE(data []byte, pos int) int { return int(binary.BigEndian.Uint16(data[pos : pos+2])) } /** * Gets the SNI header. Returns the host if set, or an empty string, and a 'clean' connection to start TLS on */ func getSNI(underConn net.Conn) (*snirouter.Conn) { var ( data = make([]byte, 1024) sniHeader = "" ) // var conn(net.Conn) /* Read the SNI shizz This is all thanks to https://github.com/axiak/filternet - lib/sniparse.js */ readLen, _ := underConn.Read(data) // Check if it's a TLS connection if data[0] != 22 { // Not TLS handshake. Replay conn, pass through. return &snirouter.Conn{underConn, data[0:readLen], ""} } // Session ID length currentPos := 43 // skip Session IDs currentPos += 1 + int(data[currentPos]) // skip cipher suites currentPos += 2 + readInt16BE(data, currentPos) // skip compression methods currentPos += 1 + int(data[currentPos]) // skip extensions length currentPos += 2 for currentPos < len(data) { if readInt16BE(data, currentPos) == 0 { sniLength := readInt16BE(data, currentPos+2) currentPos += 4 if data[currentPos] != 0 { // RFC says this is a reserved host type, not DNS. } currentPos += 5 sniHeader = string(data[currentPos:(currentPos + sniLength - 5)]) break } else { // TODO - there's still some weirdness here - need to figure structure better. // For now, just break out if the first header isn't us break // currentPos += 4 + readInt16BE(data, currentPos+2) } } return &snirouter.Conn{underConn, data[0:readLen], sniHeader} } func handleConn(underConn net.Conn) { var ( certfile = "" keyfile = "" ) fmt.Printf("=== New Connection received from: %s \n", underConn.RemoteAddr()) // get the SNI host and replace the conn conn := getSNI(underConn) if conn.ServerName != "" { fmt.Printf("=== Incoming connection for %s\n", conn.ServerName) } else { fmt.Println("=== No SNI header specified") } // TODO - this is where the magic cert lookup goes. if conn.ServerName == "test.com" { certfile = "certs/test.com.crt" keyfile = "certs/test.com.key" } else { certfile = "certs/unknown.com.crt" keyfile = "certs/unknown.com.key" } cert, _ := tls.LoadX509KeyPair(certfile, keyfile) config := tls.Config{ Certificates: []tls.Certificate{cert}, } tlsconn := tls.Server(conn, &config) fmt.Println("=== Created TLS Server") // Open upstream connection upconn, err := net.Dial("tcp", "localhost:9997") if err != nil { panic(fmt.Errorf("Error opening upstream conn: %v", err)) } n, err := io.Copy(upconn, tlsconn) if err != nil { fmt.Printf("Error: Reading data : %s \n", err) } fmt.Printf("=== Closing Connections after %d inbound bytes\n", n) upconn.Close() conn.Close() } func main() { var ( host = "127.0.0.1" port = "9998" remote = host + ":" + port ) fmt.Println("Initiating server on port 9998... (Ctrl-C to stop)") lis, err := net.Listen("tcp", remote) defer lis.Close() if err != nil { fmt.Printf("Error creating listener: %s\n", err) os.Exit(1) } for { underConn, err := lis.Accept() if err != nil { fmt.Printf("Error: Accepting data: %s\n", err) os.Exit(2) } go handleConn(underConn) } }
package com import ( "JsGo/JsBench/JsProduct" "JsGo/JsHttp" "JsGo/JsLogger" "JsGo/JsStore/JsRedis" "JunSie/constant" "JunSie/util" "fmt" ) type Goods struct { ProID string //产品idJ ProName string //产品名称J Tags []string //标签 ProFormat JsProduct.ProductFormat //产品规格 Nums int //数量J BusinessID string //商家id BusinessName string //商家名称 ExData map[string]string //扩充的数据 CreatTime string //添加的时间 } func InitShoppingCart() { JsHttp.WhiteHttps("/getshoppingcart", GetShoppingCart) //获取购物车所有商品 JsHttp.WhiteHttps("/add2shoppingcart", AddToShoppingCart) //添加一个商品到购物车 JsHttp.WhiteHttps("/removefromshoppingcart", RemoveFromShoppingCart) //从购物车删除一个商品 JsHttp.WhiteHttps("/removemorefromshoppingcart", RemoveMoreFromShoppingCart) //从购物车删除多个商品(从购物车支付后删除购物车对应的产品) JsHttp.WhiteHttps("/modifygoodnumwithshoppingcart", ModifyGoodNumWithShoppingCart) //修改购物车内的商品数量 JsHttp.WhiteHttps("/clearusershoppingcart", ClearUserShoppingCart) //清空购物车 } type ShoppingCart struct { UID string //用户id` Data []Goods //收藏的商品(购物车) CreatTime string //创建时间 } //获取所有的购物车 func GetShoppingCart(s *JsHttp.Session) { type Para struct { UID string //用户id } st := &Para{} if e := s.GetPara(st); e != nil { JsLogger.Error(e.Error()) s.Forward("1", e.Error(), nil) return } if st.UID == "" { info := "GetShoppingCart failed,UID = nil " JsLogger.Error(info) s.Forward("2", info, nil) return } data := ShoppingCart{} if err := JsRedis.Redis_hget(constant.SHOPPINGCART, st.UID, &data); err != nil { JsLogger.Error(err.Error()) s.Forward("3,", err.Error(), nil) return } s.Forward("0", "GetShoppingCart success", data) } //增加一个产品 func AddToShoppingCart(s *JsHttp.Session) { type Para struct { UID string //用户信息 Product Goods //商品 } st := &Para{} //判断数据完整性 if e := s.GetPara(st); e != nil { JsLogger.Error(e.Error()) s.Forward("1", e.Error(), nil) return } //逻辑正确性 if st.Product.ProID == "" || (st.Product.ProFormat.Format != "" && st.Product.ProFormat.Price <= 0) || st.Product.Nums == 0 || st.UID == "" { info := fmt.Sprintf("AddToShoppingCart failed,ProID=%s,Nums=%d,UID=%s,Format=%s,Price=%d\n", st.Product.ProID, st.Product.Nums, st.UID, st.Product.ProFormat.Format, st.Product.ProFormat.Price) JsLogger.Error(info) s.Forward("2", info, nil) return } //判断是否存在用户(用户第一次往购物车里面加商品) data := ShoppingCart{} if err := JsRedis.Redis_hget(constant.SHOPPINGCART, st.UID, &data); err != nil { data.UID = st.UID data.CreatTime = util.CurTime() } //判断是否同款 index := -1 for i, v := range data.Data { if v.ProID == st.Product.ProID && v.ProFormat.Format == st.Product.ProFormat.Format { index = i break } } if index == -1 { st.Product.CreatTime = util.CurTime() data.Data = append(data.Data, st.Product) //不是同款追加 } else { //追加数量 data.Data[index].Nums += st.Product.Nums } data.UID = st.UID if err := JsRedis.Redis_hset(constant.SHOPPINGCART, st.UID, &data); err != nil { info := "Redis_hset error,:" + err.Error() JsLogger.Error(info) s.Forward("5", info, nil) return } s.Forward("0", "success", data) } //移除一个产品 func RemoveFromShoppingCart(s *JsHttp.Session) { type Para struct { UID string //用户id ProID string //产品id Format string //商品规格 } st := &Para{} if e := s.GetPara(st); e != nil { JsLogger.Error(e.Error()) s.Forward("1", e.Error(), nil) return } if st.UID == "" { info := "RemoveFromShoppingCart : UID is empty\n" JsLogger.Error(info) s.Forward("2", info, nil) return } if st.ProID == "" { info := "RemoveFromShoppingCart : ProID is empty\n" JsLogger.Error(info) s.Forward("2", info, nil) return } data := ShoppingCart{} JsRedis.Redis_hget(constant.SHOPPINGCART, st.UID, &data) index := -1 for i, v := range data.Data { str := fmt.Sprintf("ProFormat.Format =%s ,Format =%s", v.ProFormat.Format, st.Format) JsLogger.Error(str) if v.ProID == st.ProID && v.ProFormat.Format == st.Format { index = i break } } if index != -1 { info := "index != -1aaaaaaaaaaaaaaaaaaaaa" JsLogger.Error(info) data.Data = append(data.Data[:index], data.Data[index+1:]...) } if err := JsRedis.Redis_hset(constant.SHOPPINGCART, st.UID, &data); err != nil { info := "set error, try again" + err.Error() JsLogger.Error(info) s.Forward("5", info, nil) return } s.Forward("0", "success", nil) } //修改产品的数量 func ModifyGoodNumWithShoppingCart(s *JsHttp.Session) { type Para struct { UID string //用户id ProID string //产品id Num int //数量 Format string //商品规格 } st := &Para{} if e := s.GetPara(st); e != nil { JsLogger.Error(e.Error()) s.Forward("1", e.Error(), nil) return } //逻辑正确性 if st.UID == "" || st.ProID == "" || st.Num < 1 { info := fmt.Sprintf("ModifyGoodNumWithShoppingCart failed:UID=%s,ProID=%s,Num=%d\n", st.UID, st.ProID, st.Num) JsLogger.Error(info) s.Forward("2", info, nil) return } data := ShoppingCart{} if e := JsRedis.Redis_hget(constant.SHOPPINGCART, st.UID, &data); e != nil { JsLogger.Error(e.Error()) s.Forward("3", e.Error(), nil) } index := -1 for i, v := range data.Data { if v.ProID == st.ProID && v.ProFormat.Format == st.Format { index = i break } } if index != -1 { data.Data[index].Nums = st.Num } if err := JsRedis.Redis_hset(constant.SHOPPINGCART, st.UID, &data); err != nil { info := err.Error() JsLogger.Error(info) s.Forward("5", info, nil) return } s.Forward("0", "success", data) } //清空购物车 func ClearUserShoppingCart(s *JsHttp.Session) { type Para struct { UID string //用户id } st := &Para{} if e := s.GetPara(st); e != nil { JsLogger.Error(e.Error()) s.Forward("1", e.Error(), nil) return } if st.UID == "" { JsLogger.Error("ClearUserShoppingCart failed,UID i empty\n") s.Forward("2", "ClearUserShoppingCart failed,UID i empty\n", nil) return } data := ShoppingCart{} if e := JsRedis.Redis_hset(constant.SHOPPINGCART, st.UID, &data); e != nil { JsLogger.Error(e.Error()) s.Forward("4", e.Error(), nil) return } s.Forward("0", "ClearUserShoppingCart success", data) } //从购物车删除多个商品(从购物车支付后删除购物商品) func RemoveMoreFromShoppingCart(s *JsHttp.Session) { type id_form struct { ProID string //产品id Format string //商品规格 } type Para struct { UID string //用户id IDform []id_form //产品ID和规格 } st := &Para{} if e := s.GetPara(st); e != nil { JsLogger.Error(e.Error()) s.Forward("1", e.Error(), nil) return } if st.UID == "" { JsLogger.Error("ClearUserShoppingCart failed,UID i empty\n") s.Forward("2", "ClearUserShoppingCart failed,UID i empty\n", nil) return } for _, v := range st.IDform { if v.ProID == "" || v.Format == "" { info := "RemoveFromShoppingCart : ProID or Format = nil." JsLogger.Error(info) s.Forward("2", info, nil) return } } data := ShoppingCart{} JsRedis.Redis_hget(constant.SHOPPINGCART, st.UID, &data) l := len(data.Data) newdata := make([]Goods, l) for i, v := range data.Data { for _, w := range st.IDform { if v.ProID == w.ProID && v.ProFormat.Format == w.Format { } else { newdata = append(newdata, data.Data[i]) } } } data.Data = newdata //index := -1 //for _, w := range st.IDform { // for i, v := range data.Data { // if v.ProID == w.ProID && v.ProFormat.Format == w.Format { // index = i // break // } // } // if index != -1 { // data.Data = append(data.Data[:index], data.Data[index+1:]...) // } //} if err := JsRedis.Redis_hset(constant.SHOPPINGCART, st.UID, &data); err != nil { info := "set error, try again" + err.Error() JsLogger.Error(info) s.Forward("5", info, nil) return } s.Forward("0", "success", nil) } //选择商品 //小计商品数量 //商品金额 //计算 //总价 //总计产品数量
package system import ( "io/ioutil" "github.com/layer5io/meshery/mesheryctl/internal/cli/root/config" "github.com/layer5io/meshery/mesheryctl/pkg/utils" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" log "github.com/sirupsen/logrus" ) var loginCmd = &cobra.Command{ Use: "login", Short: "Authenticate to a Meshery Server", Long: ` Authenticate to the Local or a Remote Provider of a Meshery Server The authentication mode is web-based browser flow`, Args: cobra.MinimumNArgs(0), PreRunE: func(cmd *cobra.Command, args []string) error { //Check prerequisite hcOptions := &HealthCheckOptions{ IsPreRunE: true, PrintLogs: false, Subcommand: cmd.Use, } hc, err := NewHealthChecker(hcOptions) if err != nil { return errors.Wrapf(err, "failed to initialize healthchecker") } // execute healthchecks err = hc.RunPreflightHealthChecks() if err != nil { cmd.SilenceUsage = true } return err }, RunE: func(cmd *cobra.Command, args []string) error { mctlCfg, err := config.GetMesheryCtl(viper.GetViper()) if err != nil { return errors.Wrap(err, "error processing config") } currCtx, err := mctlCfg.GetCurrentContext() if err != nil { return err } isRunning, err := utils.IsMesheryRunning(currCtx.GetPlatform()) if err != nil { log.Error("failed to check Meshery Server status: ", err) return nil } if !isRunning { log.Error(`Meshery Server is not running. Run "mesheryctl system start" to start Meshery.`) return nil } tokenData, err := utils.InitiateLogin(mctlCfg) if err != nil { log.Println("authentication failed:", err) return nil } log.Println("successfully authenticated") token, err := mctlCfg.GetTokenForContext(mctlCfg.GetCurrentContextName()) if err != nil { // Attempt to create token if it doesn't already exists token.Location = utils.AuthConfigFile // Write new entry in the config if err := config.AddTokenToConfig(token, utils.DefaultConfigPath); err != nil { log.Error("failed to find token path for the current context") return nil } } if err := ioutil.WriteFile(token.GetLocation(), tokenData, 0666); err != nil { log.Error("failed to write the token to the filesystem: ", err) } return nil }, }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package platform import ( "context" "regexp" "strconv" "strings" "time" "github.com/golang/protobuf/ptypes/empty" "chromiumos/tast/common/servo" "chromiumos/tast/dut" "chromiumos/tast/errors" "chromiumos/tast/remote/powercontrol" "chromiumos/tast/rpc" "chromiumos/tast/services/cros/platform" "chromiumos/tast/services/cros/security" "chromiumos/tast/testing" "chromiumos/tast/testing/hwdep" ) type bootupTimes struct { bootType string } const ( reboot string = "reboot" lidCloseOpen string = "lidCloseOpen" powerButton string = "powerButton" bootFromS5 string = "bootFromS5" refreshPower string = "refreshPower" ) func init() { testing.AddTest(&testing.Test{ Func: BootupTimes, LacrosStatus: testing.LacrosVariantUnneeded, Desc: "Boot performance test after reboot, powerbutton and lid close open", Contacts: []string{"pathan.jilani@intel.com", "intel-chrome-system-automation-team@intel.com"}, // Disabled due to 98%-99% failure rate and preventing other tests from running. TODO(b/242478571): fix and re-enable. //Attr: []string{"group:mainline", "informational"}, SoftwareDeps: []string{"chrome"}, ServiceDeps: []string{"tast.cros.arc.PerfBootService", "tast.cros.platform.BootPerfService", "tast.cros.security.BootLockboxService", "tast.cros.security.BootLockboxService"}, Vars: []string{"servo", "platform.BootupTimes.bootTime", "platform.BootupTimes.cbmemTimeout", "platform.mode", // Optional. Expecting "tablet". By default platform.mode will be "clamshell". }, Params: []testing.Param{{ Name: "reboot", Val: bootupTimes{bootType: reboot}, Timeout: 5 * time.Minute, }, { Name: "lid_close_open", Val: bootupTimes{bootType: lidCloseOpen}, Timeout: 5 * time.Minute, }, { Name: "power_button", Val: bootupTimes{bootType: powerButton}, Timeout: 5 * time.Minute, }, { Name: "from_s5", Val: bootupTimes{bootType: bootFromS5}, Timeout: 5 * time.Minute, ExtraHardwareDeps: hwdep.D(hwdep.ChromeEC()), }, { Name: "refresh_power", Val: bootupTimes{bootType: refreshPower}, Timeout: 5 * time.Minute, ExtraHardwareDeps: hwdep.D(hwdep.ChromeEC()), }}, }) } func BootupTimes(ctx context.Context, s *testing.State) { var ( bootTime = 8.4 // default bootup time in seconds cbmemTimeout = 1.35 // default cbmem timeout in seconds ) dut := s.DUT() btType := s.Param().(bootupTimes) bootupTime, ok := s.Var("platform.BootupTimes.bootTime") if !ok { s.Log("Default Boot Time for validation: ", bootTime) } else { btime, err := strconv.ParseFloat(bootupTime, 8) if err != nil { s.Fatal("Failed to convert boot time: ", err) } bootTime = btime s.Log("Boot Time for validation: ", bootTime) } cbmemtime, ok := s.Var("platform.BootupTimes.cbmemTimeout") if !ok { s.Log("Default Cbmem Timeout for validation: ", cbmemTimeout) } else { cbmtime, err := strconv.ParseFloat(cbmemtime, 8) if err != nil { s.Fatal("Failed to convert cbmemtime: ", err) } cbmemTimeout = cbmtime s.Log("Cbmem Timeout for validation: ", cbmemTimeout) } pxy, err := servo.NewProxy(ctx, s.RequiredVar("servo"), dut.KeyFile(), dut.KeyDir()) if err != nil { s.Fatal("Failed to connect to servo: ", err) } defer pxy.Close(ctx) // Get the initial tablet_mode_angle settings to restore at the end of test. re := regexp.MustCompile(`tablet_mode_angle=(\d+) hys=(\d+)`) out, err := dut.Conn().CommandContext(ctx, "ectool", "motionsense", "tablet_mode_angle").Output() if err != nil { s.Fatal("Failed to retrieve tablet_mode_angle settings: ", err) } m := re.FindSubmatch(out) if len(m) != 3 { s.Fatalf("Failed to get initial tablet_mode_angle settings: got submatches %+v", m) } initLidAngle := m[1] initHys := m[2] defaultMode := "clamshell" if mode, ok := s.Var("platform.mode"); ok { defaultMode = mode } if defaultMode == "tablet" { // Set tabletModeAngle to 0 to force the DUT into tablet mode. testing.ContextLog(ctx, "Put DUT into tablet mode") if err := dut.Conn().CommandContext(ctx, "ectool", "motionsense", "tablet_mode_angle", "0", "0").Run(); err != nil { s.Fatal("Failed to set DUT into tablet mode: ", err) } } cl, err := rpc.Dial(ctx, dut, s.RPCHint()) if err != nil { s.Fatal("Failed to connect to the RPC service on the DUT: ", err) } defer cl.Close(ctx) // Connect to the gRPC server on the DUT. // Perform a Chrome login. // Chrome login excluding for lidCloseOpen. if btType.bootType != "lidCloseOpen" { client := security.NewBootLockboxServiceClient(cl.Conn) if _, err := client.NewChromeLogin(ctx, &empty.Empty{}); err != nil { s.Fatal("Failed to start Chrome") } } // Enable bootchart before running the boot perf test. bootPerfService := platform.NewBootPerfServiceClient(cl.Conn) s.Log("Enabling boot chart") _, err = bootPerfService.EnableBootchart(ctx, &empty.Empty{}) if err != nil { // If we failed in enabling bootchart, log the failure and proceed without bootchart. s.Log("Warning: failed to enable bootchart: ", err) } // Stop tlsdated, that makes sure nobody will touch the RTC anymore, and also creates a sync-rtc bootstat file. if err := dut.Conn().CommandContext(ctx, "stop", "tlsdated").Run(); err != nil { s.Fatal("Failed to stop tlsdated") } // Undo the effect of enabling bootchart. This cleanup can also be performed (becomes a no-op) if bootchart is not enabled. defer func() { // Restore the side effect made in this test by disabling bootchart for subsequent system boots. s.Log("Disable bootchart") chl, err := rpc.Dial(ctx, dut, s.RPCHint()) if err != nil { s.Fatal("Failed to connect to the RPC service on the DUT: ", err) } defer chl.Close(ctx) bootPerfService := platform.NewBootPerfServiceClient(chl.Conn) _, err = bootPerfService.DisableBootchart(ctx, &empty.Empty{}) if err != nil { s.Log("Error in disabling bootchart: ", err) } if err := dut.Conn().CommandContext(ctx, "ectool", "motionsense", "tablet_mode_angle", string(initLidAngle), string(initHys)).Run(); err != nil { s.Fatal("Failed to restore tablet_mode_angle to the original settings: ", err) } }() // Cleanup. defer func(ctx context.Context) { s.Log("Performing clean up") if err := powerNormalPress(ctx, dut, pxy); err != nil { s.Error("Failed to press power button: ", err) } }(ctx) if btType.bootType == "reboot" { s.Log("Rebooting DUT") if err := dut.Reboot(ctx); err != nil { s.Fatal("Failed to reboot DUT: ", err) } } else if btType.bootType == "lidCloseOpen" { s.Log("Closing lid") if err := pxy.Servo().SetString(ctx, "lid_open", "no"); err != nil { s.Fatal("Unable to close lid : ", err) } if err := testing.Poll(ctx, func(ctx context.Context) error { pwrState, err := pxy.Servo().GetECSystemPowerState(ctx) if err != nil { return errors.Wrap(err, "failed to get power state S5 error") } if pwrState != "S5" { return errors.Errorf("System is not in S5, got: %s", pwrState) } return nil }, &testing.PollOptions{Timeout: 20 * time.Second}); err != nil { s.Fatal("Failed to enter S5 state : ", err) } if err := pxy.Servo().SetString(ctx, "lid_open", "yes"); err != nil { s.Fatal("Failed to open lid: ", err) } if err := dut.WaitConnect(ctx); err != nil { if err := powerNormalPress(ctx, dut, pxy); err != nil { s.Fatal("Failed to press power button: ", err) } } } else if btType.bootType == "powerButton" { if err := dut.Conn().CommandContext(ctx, "sh", "-c", "rm -rf /var/log/metrics/*").Run(); err != nil { s.Fatal("Failed to remove /var/log/metrics/* files: ", err) } if err := pxy.Servo().SetString(ctx, "power_key", "long_press"); err != nil { s.Fatal("Unable to power state off: ", err) } if err := dut.WaitUnreachable(ctx); err != nil { s.Fatal("Failed to shutdown: ", err) } if err := powerNormalPress(ctx, dut, pxy); err != nil { s.Fatal("Failed to press power button: ", err) } } else if btType.bootType == bootFromS5 { if err := dut.Conn().CommandContext(ctx, "sh", "-c", "rm -rf /var/log/metrics/*").Run(); err != nil { s.Fatal("Failed to remove /var/log/metrics/* files: ", err) } // Use the ec command here instead of power_key, because servo sleeps before the command returns if err := pxy.Servo().RunECCommand(ctx, "powerbtn 8500"); err != nil { s.Fatal("Unable to power off: ", err) } if err := waitForS0State(ctx, pxy); err != nil { s.Fatal("Failed to wait for S0 state: ", err) } waitCtx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() if err := dut.WaitConnect(waitCtx); err != nil { s.Fatal("Failed to wait connect DUT: ", err) } } else if btType.bootType == refreshPower { waitCtx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() s.Log("Pressing power btn to shutdown DUT") if err := pxy.Servo().KeypressWithDuration(ctx, servo.PowerKey, servo.DurLongPress); err != nil { s.Fatal("Failed to power off DUT: ", err) } if err := dut.WaitUnreachable(ctx); err != nil { if err := powerNormalPress(ctx, dut, pxy); err != nil { s.Fatal("Failed to press power button: ", err) } } // expected time sleep 5 seconds to ensure dut switch to s5. if err := testing.Sleep(ctx, 5*time.Second); err != nil { s.Fatal("Failed to sleep: ", err) } s.Log("Pressing refresh + power key to boot up DUT") if err := pxy.Servo().KeypressWithDuration(ctx, servo.Refresh, servo.DurLongPress); err != nil { s.Fatal("Failed to press refresh key: ", err) } if err := pxy.Servo().KeypressWithDuration(ctx, servo.PowerKey, servo.DurPress); err != nil { s.Fatal("Failed to power normal press: ", err) } if err := dut.WaitConnect(waitCtx); err != nil { s.Fatal("Failed to wait connect DUT: ", err) } } // Validating prev sleep state for power modes. if btType.bootType == "reboot" { if err := powercontrol.ValidatePrevSleepState(ctx, dut, 0); err != nil { s.Fatal("Failed to get previous sleep state: ", err) } } else { if err := powercontrol.ValidatePrevSleepState(ctx, dut, 5); err != nil { s.Fatal("Failed to get previous sleep state: ", err) } } if err := getBootPerf(ctx, dut, s.RPCHint(), bootTime); err != nil { s.Fatal("Failed to get boot perf values: ", err) } if err := verifyCBMem(ctx, dut, cbmemTimeout); err != nil { s.Fatal("Failed to verify cbmem timeout: ", err) } } // verifyCBMem verifies cbmem timeout. func verifyCBMem(ctx context.Context, dut *dut.DUT, cbmemTimeout float64) error { cbmemOutput, err := dut.Conn().CommandContext(ctx, "sh", "-c", "cbmem -t").Output() if err != nil { return errors.Wrap(err, "failed to execute cbmem command") } cbmemPattern := regexp.MustCompile(`Total Time: (.*)`) match := cbmemPattern.FindStringSubmatch(string(cbmemOutput)) cbmemTotalTime := "" if len(match) > 1 { cbmemTotalTime = strings.Replace(match[1], ",", "", -1) } cbmemTime, _ := strconv.ParseFloat(cbmemTotalTime, 8) cbmemTime = cbmemTime / 1000000 if cbmemTime > cbmemTimeout { return errors.Wrapf(err, "failed to validate cbmem time, actual cbmem time is more than expected cbmem time, want %v; got %v", cbmemTimeout, cbmemTime) } return nil } // getBootPerf validates seconds power on to login from platform bootperf values. func getBootPerf(ctx context.Context, dut *dut.DUT, rpcHint *testing.RPCHint, btime float64) error { cl, err := rpc.Dial(ctx, dut, rpcHint) if err != nil { return errors.Wrap(err, "failed to connect to the RPC service on the DUT") } defer cl.Close(ctx) bootPerfService := platform.NewBootPerfServiceClient(cl.Conn) metrics, err := bootPerfService.GetBootPerfMetrics(ctx, &empty.Empty{}) if err != nil { return errors.Wrap(err, "failed to get boot perf metrics") } if metrics.Metrics["seconds_power_on_to_login"] > btime { return errors.Wrapf(err, "failed seconds_power_on_to_login is greater than expected, want %v; got %v", btime, metrics.Metrics["seconds_power_on_to_login"]) } return nil } // powerNormalPress wakes up DUT by normal pressing power button. func powerNormalPress(ctx context.Context, dut *dut.DUT, pxy *servo.Proxy) error { testing.ContextLog(ctx, "Waking up DUT") if !dut.Connected(ctx) { testing.ContextLog(ctx, "Power Normal Pressing") waitCtx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() if err := pxy.Servo().KeypressWithDuration(ctx, servo.PowerKey, servo.DurPress); err != nil { return errors.Wrap(err, "failed to power normal press") } if err := dut.WaitConnect(waitCtx); err != nil { return errors.Wrap(err, "failed to wait connect DUT") } } else { testing.ContextLog(ctx, "DUT is UP") } return nil } // waitForS0State waits for S0 power state func waitForS0State(ctx context.Context, pxy *servo.Proxy) error { var leftoverLines string readyForPowerOn := regexp.MustCompile(`power state 1 = S5`) tooLateToPowerOn := regexp.MustCompile(`power state 0 = G3`) powerOnFinished := regexp.MustCompile(`power state 3 = S0`) powerButtonPressFinished := regexp.MustCompile(`PB task 0 = idle`) didPowerOn := false hitS5 := false donePowerOff := false testing.ContextLog(ctx, "Capturing EC log") if err := pxy.Servo().SetOnOff(ctx, servo.ECUARTCapture, servo.On); err != nil { return errors.Wrap(err, "failed to capture EC UART") } defer func() error { if err := pxy.Servo().SetOnOff(ctx, servo.ECUARTCapture, servo.Off); err != nil { return errors.Wrap(err, "failed to disable capture EC UART") } return nil }() if err := testing.Poll(ctx, func(ctx context.Context) error { lines, err := pxy.Servo().GetQuotedString(ctx, servo.ECUARTStream) if err != nil { return errors.Wrap(err, "failed to read UART") } if lines == "" { return errors.New("Not in S0 yet") } // It is possible to read partial lines, so save the part after newline for later lines = leftoverLines + lines if crlfIdx := strings.LastIndex(lines, "\r\n"); crlfIdx < 0 { leftoverLines = lines lines = "" } else { leftoverLines = lines[crlfIdx+2:] lines = lines[:crlfIdx+2] } for _, l := range strings.Split(lines, "\r\n") { testing.ContextLogf(ctx, "%q", l) if readyForPowerOn.MatchString(l) && !didPowerOn { testing.ContextLogf(ctx, "Found S5: %q", l) hitS5 = true } if powerButtonPressFinished.MatchString(l) && !didPowerOn { testing.ContextLogf(ctx, "Found power button release: %q", l) donePowerOff = true } // If the long press above is done, and we've seen S5, then do a short press to power on. if hitS5 && donePowerOff && !didPowerOn { testing.ContextLog(ctx, "Pressing power button") if err := pxy.Servo().SetString(ctx, servo.ECUARTCmd, "powerbtn 200"); err != nil { return testing.PollBreak(err) } didPowerOn = true } if tooLateToPowerOn.MatchString(l) && !didPowerOn { testing.ContextLogf(ctx, "Found G3: %q", l) return errors.New("power state reached G3, power button pressed too late") } if powerOnFinished.MatchString(l) && didPowerOn { testing.ContextLogf(ctx, "Found S0: %q", l) return nil } } return nil }, &testing.PollOptions{Interval: 200 * time.Millisecond, Timeout: time.Minute}); err != nil { return errors.Wrap(err, "EC output parsing failed") } return nil }
package kv import ( "encoding/json" "io" "net/url" "github.com/cerana/cerana/acomm" "github.com/cerana/cerana/pkg/errors" "github.com/cerana/cerana/pkg/kv" "github.com/cerana/cerana/pkg/logrusx" ) var watches = newChanMap() // WatchArgs specify the arguments to the "kv-watch" endpoint. type WatchArgs struct { Prefix string `json:"prefix"` Index uint64 `json:"index"` } // Event specifies structure describing events that took place on watched prefixes. type Event struct { kv.Event Error error } func makeEventReader(events chan kv.Event, errs chan error) io.ReadCloser { r, w := io.Pipe() go func() { var err error defer logrusx.LogReturnedErr(r.Close, nil, "") defer logrusx.LogReturnedErr(w.Close, nil, "") defer logrusx.LogReturnedErr(func() error { return err }, nil, "event reader failed") var event Event for { var ev kv.Event var ok bool select { case ev, ok = <-events: if !ok { return } event = Event{Event: ev} case err, ok = <-errs: if !ok { return } event = Event{Error: err} } data, err := json.Marshal(event) if err != nil { err = errors.Wrapv(err, map[string]interface{}{"event": event}) return } n, err := w.Write(data) if err != nil { err = errors.Wrapv(err, map[string]interface{}{"data": string(data)}) return } if n != len(data) { err = errors.Newv("bytes written not equal to data length", map[string]interface{}{"written": n, "expectedWritten": len(data)}) return } } }() return r } func (k *KV) watch(req *acomm.Request) (interface{}, *url.URL, error) { args := WatchArgs{} if err := req.UnmarshalArgs(&args); err != nil { return nil, nil, err } if args.Prefix == "" { return nil, nil, errors.Newv("missing arg: prefix", map[string]interface{}{"args": args}) } if k.kvDown() { return nil, nil, errors.Wrap(errorKVDown) } stop := make(chan struct{}) events, errs, err := k.kv.Watch(args.Prefix, args.Index, stop) if err != nil { return nil, nil, err } reader := makeEventReader(events, errs) addr, err := k.tracker.NewStreamUnix(k.config.StreamDir("kv-watch"), reader) if err != nil { return nil, nil, err } cookie, err := watches.Add(stop) if err != nil { close(stop) return nil, nil, err } return Cookie{Cookie: uint64(cookie)}, addr, nil } func (k *KV) stop(req *acomm.Request) (interface{}, *url.URL, error) { args := Cookie{} if err := req.UnmarshalArgs(&args); err != nil { return nil, nil, err } if args.Cookie == 0 { return nil, nil, errors.Newv("missing arg: cookie", map[string]interface{}{"args": args}) } ch, err := watches.Get(args.Cookie) if err != nil { return nil, nil, err } close(ch) return nil, nil, nil }
package segment import ( "context" "errors" "sync" "sync/atomic" "time" "github.com/derry6/gleafd/pkg/log" ) var ( ErrClosed = errors.New("service closed") ErrBizTagNotFound = errors.New("biztag not found") ) type waitItem struct { biztag string result chan *Segment step int32 } type Service struct { repo Repository // 仓储 gs map[string]*generator // 保存所有的generators gsMu sync.RWMutex waits chan waitItem // 等待更新的BizTags logger log.Logger closed int32 // 是否关闭Flag closeC chan struct{} wg sync.WaitGroup } // 查找对应biztag的generator func (s *Service) findGenerator(biztag string) (*generator, error) { s.gsMu.RLock() defer s.gsMu.RUnlock() g, ok := s.gs[biztag] if !ok { return nil, ErrBizTagNotFound } return g, nil } // 获取所有的generator func (s *Service) getGenerators() (gs []*generator) { s.gsMu.RLock() defer s.gsMu.RUnlock() for _, g := range s.gs { gs = append(gs, g) } return gs } // 获取当前所有的biztags func (s *Service) getBizTagsUnsafe() (tags []string) { for k, _ := range s.gs { tags = append(tags, k) } return tags } // 从数据库更新所有的biztags func (s *Service) handleBizTagsUpdated(newTags []string) (added []string, removed []string) { s.gsMu.RLock() defer s.gsMu.RUnlock() oldTags := s.getBizTagsUnsafe() // cache new biztag for _, newTag := range newTags { found := false for _, oldTag := range oldTags { if oldTag == newTag { found = true break } } if !found { // 这是从数据库取出的新的biztag,需要创建对应的biztag added = append(added, newTag) } } for _, oldTag := range oldTags { found := false for _, newTag := range newTags { if newTag == oldTag { found = true break } } if !found { removed = append(removed, oldTag) } } return added, removed } // 从数据库中读取所有的biztags, 每分钟更新一次 func (s *Service) updateBizTagsFromRepo() error { tags, err := s.repo.ListBizTags(context.Background()) if err != nil { return err } added, removed := s.handleBizTagsUpdated(tags) if len(added) > 0 { s.logger.Infow("Segment biztags added", "tags", added) } if len(removed) > 0 { s.logger.Infow("Segment biztags removed", "tags", removed) } // 删除对应的generators uscMap := make(map[string]chan *Segment) s.gsMu.Lock() for _, biztag := range removed { g, ok := s.gs[biztag] if ok { delete(s.gs, biztag) } if ok { g.stop() } } // 创建相应的generators for _, biztag := range added { // generator读 usc := make(chan *Segment, 1) g := newGenerator(s, biztag, usc) s.wg.Add(1) go func() { defer s.wg.Done() g.run() }() uscMap[biztag] = usc s.gs[biztag] = g } s.gsMu.Unlock() return nil } func (s *Service) notifyUpdate(biztag string, step int32, result chan *Segment) { // waitUpdateBizTags/closeC 生命周期跟Service相同 select { case <-s.closeC: return case s.waits <- waitItem{biztag, result, step}: } } func (s *Service) update(ws waitItem) error { var ( seg *Segment err error ) ctx := context.Background() if ws.step <= 0 { seg, err = s.repo.UpdateMaxID(ctx, ws.biztag) if err != nil { return err } // use default step } else { seg, err = s.repo.UpdateMaxIDWithStep(ctx, ws.biztag, ws.step) if err != nil { return err } // move to UpdateMaxIDWithStep ? seg.Step = ws.step } // 如果usc被关闭? select { case <-s.closeC: return ErrClosed case ws.result <- seg: return nil } } // 负责从数据库中拉取数据 func (s *Service) run() error { timer := time.NewTicker(time.Minute) for { select { case <-s.closeC: return ErrClosed case item, ok := <-s.waits: if !ok { return ErrClosed } if err := s.update(item); err != nil { } case <-timer.C: s.updateBizTagsFromRepo() } } } func (s *Service) init() error { return s.updateBizTagsFromRepo() } func (s *Service) Get(ctx context.Context, biztag string, count int) (ids []int64, err error) { g, err := s.findGenerator(biztag) if err != nil { return nil, err } for i := 0; i < count; i++ { id, err := g.get(ctx) if err != nil { return nil, err } ids = append(ids, id) } return ids, nil } func (s *Service) Close() error { if atomic.CompareAndSwapInt32(&s.closed, 0, 1) { // stopping all generators gs := s.getGenerators() for _, g := range gs { g.stop() } close(s.closeC) close(s.waits) s.wg.Wait() } return nil } func NewService(repo Repository, logger log.Logger) *Service { s := &Service{ repo: repo, gs: make(map[string]*generator), waits: make(chan waitItem, 100), closeC: make(chan struct{}), logger: logger, } if err := s.init(); err != nil { logger.Fatalw("New segment service", "err", err) } s.wg.Add(1) go func() { defer s.wg.Done() if err := s.run(); err != nil { return } }() return s }
package list import ( "encoding/json" "fmt" "log" "net/http" "os" "sort" "sync" "time" ) type Timer struct { Time int64 `json:"time"` Callback string `json:"callback"` } type List struct { savefile string list []*Timer locker sync.Mutex exitChan chan bool } func (l *List) Len() int { return len(l.list) } func (l *List) Less(i, j int) bool { return l.list[i].Time < l.list[j].Time } func (l *List) Swap(i, j int) { l.list[i], l.list[j] = l.list[j], l.list[i] } func NewList(savefile string) *List { l := &List{ exitChan: make(chan bool), savefile: savefile, } file, err := os.Open(savefile) if err == nil { dec := json.NewDecoder(file) var timers []Timer err = dec.Decode(&timers) if err == nil { for _, t := range timers { l.list = append(l.list, &t) } } file.Close() } go l.Run() return l } func (l *List) Run() { for { select { case now := <-time.After(time.Second): l.onTimer(now.Unix()) case <-l.exitChan: break } } } func (l *List) Save() { l.locker.Lock() defer l.locker.Unlock() // save fmt.Printf("Save, len: %d\n", len(l.list)) if file, err := os.Create(l.savefile); err == nil { var timers []Timer for _, t := range l.list { if t != nil { timers = append(timers, *t) } } if len(timers) > 0 { enc := json.NewEncoder(file) enc.Encode(timers) file.Sync() } file.Close() } } func (l *List) AddTimer(timer *Timer) { log.Printf("add %+v\n", timer) l.locker.Lock() defer l.locker.Unlock() l.list = append(l.list, timer) sort.Sort(l) } func (l *List) Exit() { l.Save() l.exitChan <- true } func (l *List) onTimer(now int64) { l.locker.Lock() defer l.locker.Unlock() var index int var item *Timer for index, item = range l.list { if item.Time <= now { go callback(item) } else { if index == 0 { return } l.list = l.list[index:] return } } l.list = nil } func callback(item *Timer) { log.Printf("callback: %+v\n", item) resp, err := http.Get(item.Callback) if err != nil { return } resp.Body.Close() }
package WebUtility import ( "crypto/tls" "io/ioutil" "net/http" "strings" "time" ) func ReadWebPage(url string) (string, error) { // 參考 https://dlintw.github.io/gobyexample/public/http-client.html timeout := time.Duration(15 * time.Second) tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } client := &http.Client{Transport: tr, Timeout: timeout} resp, err := client.Get(url) if err != nil { return "", err } body, err2 := ioutil.ReadAll(resp.Body) if err2 != nil { return "", err } // var aaa []byte // body2, _ := utf8.DecodeRune(body) // str1, _, _ := transform.String(traditionalchinese.Big5.NewEncoder(), string(body)) // str1, err := iconv.NewReader(body, "big5", "utf-8") // nR, nW, err := iconv.Convert(body, aaa[:], "big5", "utf-8") return string(body), nil } func CutString(strSource string, tagHead string, tagTail string, findCnt int, revTag bool) (string, int) { for i := 1; i <= findCnt; i++ { idxHead := strings.Index(strSource, tagHead) if idxHead < 0 { // fmt.Println("CutString: Error! not found tagHead") return "", -1 } if i < findCnt { strSource = strSource[(idxHead + 1):] continue } idxTail := strings.Index(strSource[idxHead:], tagTail) if idxTail < 0 { // fmt.Println("CutString: Error! not found tagTail") return "", -2 } idxTail += idxHead if revTag { return strSource[idxHead:(idxTail + len(tagTail))], (idxTail + len(tagTail)) } else { return strSource[(idxHead + len(tagHead)):idxTail], idxTail } } return "", -3 }
package boshdeployment import ( "context" "fmt" "strings" "time" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" qjv1a1 "code.cloudfoundry.org/quarks-job/pkg/kube/apis/quarksjob/v1alpha1" "code.cloudfoundry.org/quarks-operator/pkg/bosh/converter" bdm "code.cloudfoundry.org/quarks-operator/pkg/bosh/manifest" bdv1 "code.cloudfoundry.org/quarks-operator/pkg/kube/apis/boshdeployment/v1alpha1" qsv1a1 "code.cloudfoundry.org/quarks-operator/pkg/kube/apis/quarkssecret/v1alpha1" "code.cloudfoundry.org/quarks-operator/pkg/kube/util/boshdns" "code.cloudfoundry.org/quarks-operator/pkg/kube/util/mutate" "code.cloudfoundry.org/quarks-utils/pkg/config" log "code.cloudfoundry.org/quarks-utils/pkg/ctxlog" "code.cloudfoundry.org/quarks-utils/pkg/meltdown" "code.cloudfoundry.org/quarks-utils/pkg/pointers" ) // JobFactory creates Jobs for a given manifest type JobFactory interface { VariableInterpolationJob(namespace string, deploymentName string, manifest bdm.Manifest) (*qjv1a1.QuarksJob, error) InstanceGroupManifestJob(namespace string, deploymentName string, manifest bdm.Manifest, linkInfos converter.LinkInfos, initialRollout bool) (*qjv1a1.QuarksJob, error) } // VariablesConverter converts BOSH variables into QuarksSecrets type VariablesConverter interface { Variables(namespace string, manifestName string, variables []bdm.Variable) ([]qsv1a1.QuarksSecret, error) } // WithOps interpolates BOSH manifests and operations files to create the WithOps manifest type WithOps interface { Manifest(ctx context.Context, bdpl *bdv1.BOSHDeployment, namespace string) (*bdm.Manifest, []string, error) } // Check that ReconcileBOSHDeployment implements the reconcile.Reconciler interface var _ reconcile.Reconciler = &ReconcileBOSHDeployment{} type setReferenceFunc func(owner, object metav1.Object, scheme *runtime.Scheme) error // NewDeploymentReconciler returns a new reconcile.Reconciler func NewDeploymentReconciler(ctx context.Context, config *config.Config, mgr manager.Manager, withops WithOps, jobFactory JobFactory, converter VariablesConverter, srf setReferenceFunc) reconcile.Reconciler { return &ReconcileBOSHDeployment{ ctx: ctx, config: config, client: mgr.GetClient(), scheme: mgr.GetScheme(), withops: withops, setReference: srf, jobFactory: jobFactory, converter: converter, } } // ReconcileBOSHDeployment reconciles a BOSHDeployment object type ReconcileBOSHDeployment struct { ctx context.Context config *config.Config client client.Client scheme *runtime.Scheme withops WithOps setReference setReferenceFunc jobFactory JobFactory converter VariablesConverter } // Reconcile starts the deployment process for a BOSHDeployment and deploys QuarksJobs to generate required properties for instance groups and rendered BPM func (r *ReconcileBOSHDeployment) Reconcile(request reconcile.Request) (reconcile.Result, error) { // Fetch the BOSHDeployment instance bdpl := &bdv1.BOSHDeployment{} // Set the ctx to be Background, as the top-level context for incoming requests. ctx, cancel := context.WithTimeout(r.ctx, r.config.CtxTimeOut) defer cancel() log.Infof(ctx, "Reconciling BOSHDeployment '%s'", request.NamespacedName) err := r.client.Get(ctx, request.NamespacedName, bdpl) if err != nil { if apierrors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue log.Debug(ctx, "Skip reconcile: BOSHDeployment not found") return reconcile.Result{}, nil } return reconcile.Result{}, log.WithEvent(bdpl, "GetBOSHDeploymentError").Errorf(ctx, "failed to get BOSHDeployment '%s': %v", request.NamespacedName, err) } if meltdown.NewWindow(r.config.MeltdownDuration, bdpl.Status.LastReconcile).Contains(time.Now()) { log.WithEvent(bdpl, "Meltdown").Debugf(ctx, "Resource '%s' is in meltdown, requeue reconcile after %s", request.NamespacedName, r.config.MeltdownRequeueAfter) return reconcile.Result{RequeueAfter: r.config.MeltdownRequeueAfter}, nil } manifest, err := r.resolveManifest(ctx, bdpl) if err != nil { return reconcile.Result{}, log.WithEvent(bdpl, "WithOpsManifestError").Errorf(ctx, "failed to get with-ops manifest for BOSHDeployment '%s': %v", request.NamespacedName, err) } // Get link infos containing provider name and its secret name linkInfos, err := r.listLinkInfos(bdpl, manifest) if err != nil { return reconcile.Result{}, log.WithEvent(bdpl, "InstanceGroupManifestError").Errorf(ctx, "failed to list quarks-link secrets for BOSHDeployment '%s': %v", request.NamespacedName, err) } // Apply the "with-ops" manifest secret log.Debug(ctx, "Creating with-ops manifest secret") manifestSecret, err := r.createManifestWithOps(ctx, bdpl, *manifest) if err != nil { return reconcile.Result{}, log.WithEvent(bdpl, "WithOpsManifestError").Errorf(ctx, "failed to create with-ops manifest secret for BOSHDeployment '%s': %v", request.NamespacedName, err) } // Create all QuarksSecret variables log.Debug(ctx, "Converting BOSH manifest variables to QuarksSecret resources") secrets, err := r.converter.Variables(request.Namespace, bdpl.Name, manifest.Variables) if err != nil { return reconcile.Result{}, log.WithEvent(bdpl, "BadManifestError").Error(ctx, errors.Wrap(err, "failed to generate quarks secrets from manifest")) } // Create/update all explicit BOSH Variables if len(secrets) > 0 { err = r.createQuarksSecrets(ctx, manifestSecret, secrets) if err != nil { return reconcile.Result{}, log.WithEvent(bdpl, "VariableGenerationError").Errorf(ctx, "failed to create quarks secrets for BOSH manifest '%s': %v", request.NamespacedName, err) } } // Apply the "Variable Interpolation" QuarksJob, which creates the desired manifest secret qJob, err := r.jobFactory.VariableInterpolationJob(request.Namespace, bdpl.Name, *manifest) if err != nil { return reconcile.Result{}, log.WithEvent(bdpl, "DesiredManifestError").Errorf(ctx, "failed to build the desired manifest qJob: %v", err) } log.Debug(ctx, "Creating desired manifest QuarksJob") err = r.createQuarksJob(ctx, bdpl, qJob) if err != nil { return reconcile.Result{}, log.WithEvent(bdpl, "DesiredManifestError").Errorf(ctx, "failed to create desired manifest qJob for BOSHDeployment '%s': %v", request.NamespacedName, err) } // Apply the "Instance group manifest" QuarksJob, which creates instance group manifests (ig-resolved) secrets and BPM config secrets // once the "Variable Interpolation" job created the desired manifest. qJob, err = r.jobFactory.InstanceGroupManifestJob(request.Namespace, bdpl.Name, *manifest, linkInfos, bdpl.ObjectMeta.Generation == 1) if err != nil { return reconcile.Result{}, log.WithEvent(bdpl, "InstanceGroupManifestError").Errorf(ctx, "failed to build instance group manifest qJob: %v", err) } log.Debug(ctx, "Creating instance group manifest QuarksJob") err = r.createQuarksJob(ctx, bdpl, qJob) if err != nil { return reconcile.Result{}, log.WithEvent(bdpl, "InstanceGroupManifestError").Errorf(ctx, "failed to create instance group manifest qJob for BOSHDeployment '%s': %v", request.NamespacedName, err) } // Update status of bdpl with the timestamp of the last reconcile now := metav1.Now() bdpl.Status.LastReconcile = &now err = r.client.Status().Update(ctx, bdpl) if err != nil { log.WithEvent(bdpl, "UpdateError").Errorf(ctx, "failed to update reconcile timestamp on bdpl '%s' (%v): %s", request.NamespacedName, bdpl.ResourceVersion, err) return reconcile.Result{Requeue: false}, nil } return reconcile.Result{}, nil } // resolveManifest resolves manifest with ops manifest func (r *ReconcileBOSHDeployment) resolveManifest(ctx context.Context, bdpl *bdv1.BOSHDeployment) (*bdm.Manifest, error) { log.Debug(ctx, "Resolving manifest") manifest, _, err := r.withops.Manifest(ctx, bdpl, bdpl.GetNamespace()) if err != nil { return nil, log.WithEvent(bdpl, "WithOpsManifestError").Errorf(ctx, "Error resolving the manifest '%s': %s", bdpl.GetNamespacedName(), err) } return manifest, nil } // createManifestWithOps creates a secret containing the deployment manifest with ops files applied func (r *ReconcileBOSHDeployment) createManifestWithOps(ctx context.Context, bdpl *bdv1.BOSHDeployment, manifest bdm.Manifest) (*corev1.Secret, error) { log.Debug(ctx, "Creating manifest secret with ops") // Create manifest with ops, which will be used as a base for variable interpolation in desired manifest job input. manifestBytes, err := manifest.Marshal() if err != nil { return nil, log.WithEvent(bdpl, "ManifestWithOpsMarshalError").Errorf(ctx, "Error marshaling the manifest '%s': %s", bdpl.GetNamespacedName(), err) } manifestSecretName := bdv1.DeploymentSecretTypeManifestWithOps.String() // Create a secret object for the manifest manifestSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: manifestSecretName, Namespace: bdpl.GetNamespace(), Labels: map[string]string{ bdv1.LabelDeploymentName: bdpl.Name, bdv1.LabelDeploymentSecretType: bdv1.DeploymentSecretTypeManifestWithOps.String(), }, }, StringData: map[string]string{ "manifest.yaml": string(manifestBytes), }, } // Set ownership reference if err := r.setReference(bdpl, manifestSecret, r.scheme); err != nil { return nil, log.WithEvent(bdpl, "ManifestWithOpsRefError").Errorf(ctx, "failed to set ownerReference for Secret '%s/%s': %v", bdpl.Namespace, manifestSecretName, err) } // Apply the secret op, err := controllerutil.CreateOrUpdate(ctx, r.client, manifestSecret, mutate.SecretMutateFn(manifestSecret)) if err != nil { return nil, log.WithEvent(bdpl, "ManifestWithOpsApplyError").Errorf(ctx, "failed to apply Secret '%s/%s': %v", bdpl.Namespace, manifestSecretName, err) } log.Debugf(ctx, "ResourceReference secret '%s/%s' has been %s", bdpl.Namespace, manifestSecret.Name, op) return manifestSecret, nil } // createQuarksJob creates a QuarksJob and sets its ownership func (r *ReconcileBOSHDeployment) createQuarksJob(ctx context.Context, bdpl *bdv1.BOSHDeployment, qJob *qjv1a1.QuarksJob) error { if err := r.setReference(bdpl, qJob, r.scheme); err != nil { return errors.Errorf("failed to set ownerReference for QuarksJob '%s/%s': %v", bdpl.Namespace, qJob.GetName(), err) } op, err := controllerutil.CreateOrUpdate(ctx, r.client, qJob, mutate.QuarksJobMutateFn(qJob)) if err != nil { return errors.Wrapf(err, "creating or updating QuarksJob '%s/%s'", bdpl.Namespace, qJob.Name) } log.Debugf(ctx, "QuarksJob '%s/%s' has been %s", bdpl.Namespace, qJob.Name, op) return err } // listLinkInfos returns a LinkInfos containing link providers if needed // and updates `quarks_links` properties func (r *ReconcileBOSHDeployment) listLinkInfos(bdpl *bdv1.BOSHDeployment, manifest *bdm.Manifest) (converter.LinkInfos, error) { linkInfos := converter.LinkInfos{} // find all missing providers in the manifest, so we can look for secrets missingProviders := manifest.ListMissingProviders() // quarksLinks store for missing provider names with types read from secrets quarksLinks := map[string]bdm.QuarksLink{} if len(missingProviders) != 0 { // list secrets and services from target deployment secrets := &corev1.SecretList{} err := r.client.List(r.ctx, secrets, client.InNamespace(bdpl.Namespace), ) if err != nil { return linkInfos, errors.Wrapf(err, "listing secrets for link in deployment '%s':", bdpl.GetNamespacedName()) } services := &corev1.ServiceList{} err = r.client.List(r.ctx, services, client.InNamespace(bdpl.Namespace), ) if err != nil { return linkInfos, errors.Wrapf(err, "listing services for link in deployment '%s':", bdpl.GetNamespacedName()) } for _, s := range secrets.Items { if deploymentName, ok := s.GetAnnotations()[bdv1.LabelDeploymentName]; ok && deploymentName == bdpl.Name { linkProvider, err := newLinkProvider(s.GetAnnotations()) if err != nil { return linkInfos, errors.Wrapf(err, "failed to parse link JSON for '%s'", bdpl.GetNamespacedName()) } if dup, ok := missingProviders[linkProvider.Name]; ok { if dup { return linkInfos, errors.New(fmt.Sprintf("duplicated secrets of provider: %s", linkProvider.Name)) } linkInfos = append(linkInfos, converter.LinkInfo{ SecretName: s.Name, ProviderName: linkProvider.Name, ProviderType: linkProvider.ProviderType, }) if linkProvider.ProviderType != "" { quarksLinks[s.Name] = bdm.QuarksLink{ Type: linkProvider.ProviderType, } } missingProviders[linkProvider.Name] = true } } } serviceRecords, err := r.getServiceRecords(bdpl.Namespace, bdpl.Name, services.Items) if err != nil { return linkInfos, errors.Wrapf(err, "failed to get link services for '%s'", bdpl.GetNamespacedName()) } for qName := range quarksLinks { if svcRecord, ok := serviceRecords[qName]; ok { pods, err := r.listPodsFromSelector(bdpl.Namespace, svcRecord.selector) if err != nil { return linkInfos, errors.Wrapf(err, "Failed to get link pods for '%s'", bdpl.GetNamespacedName()) } var jobsInstances []bdm.JobInstance for i, p := range pods { if len(p.Status.PodIP) == 0 { return linkInfos, fmt.Errorf("empty ip of kube native component: '%s'", p.Name) } jobsInstances = append(jobsInstances, bdm.JobInstance{ Name: qName, ID: string(p.GetUID()), Index: i, Address: p.Status.PodIP, Bootstrap: i == 0, }) } quarksLinks[qName] = bdm.QuarksLink{ Type: quarksLinks[qName].Type, Address: svcRecord.dnsRecord, Instances: jobsInstances, } } } } missingPs := make([]string, 0, len(missingProviders)) for key, found := range missingProviders { if !found { missingPs = append(missingPs, key) } } if len(missingPs) != 0 { return linkInfos, errors.New(fmt.Sprintf("missing link secrets for providers: %s", strings.Join(missingPs, ", "))) } if len(quarksLinks) != 0 { if manifest.Properties == nil { manifest.Properties = map[string]interface{}{} } manifest.Properties["quarks_links"] = quarksLinks } return linkInfos, nil } // getServiceRecords gets service records from Kube Services func (r *ReconcileBOSHDeployment) getServiceRecords(namespace string, name string, svcs []corev1.Service) (map[string]serviceRecord, error) { svcRecords := map[string]serviceRecord{} for _, svc := range svcs { if deploymentName, ok := svc.GetAnnotations()[bdv1.LabelDeploymentName]; ok && deploymentName == name { providerName, ok := svc.GetAnnotations()[bdv1.AnnotationLinkProviderService] if ok { if _, ok := svcRecords[providerName]; ok { return svcRecords, errors.New(fmt.Sprintf("duplicated services of provider: %s", providerName)) } svcRecords[providerName] = serviceRecord{ selector: svc.Spec.Selector, dnsRecord: fmt.Sprintf("%s.%s.svc.%s", svc.Name, namespace, boshdns.GetClusterDomain()), } } } } return svcRecords, nil } // listPodsFromSelector lists pods from the selector func (r *ReconcileBOSHDeployment) listPodsFromSelector(namespace string, selector map[string]string) ([]corev1.Pod, error) { podList := &corev1.PodList{} err := r.client.List(r.ctx, podList, client.InNamespace(namespace), client.MatchingLabels(selector), ) if err != nil { return podList.Items, errors.Wrapf(err, "listing pods from selector '%+v':", selector) } if len(podList.Items) == 0 { return podList.Items, fmt.Errorf("got an empty list of pods") } return podList.Items, nil } // createQuarksSecrets create variables quarksSecrets func (r *ReconcileBOSHDeployment) createQuarksSecrets(ctx context.Context, manifestSecret *corev1.Secret, variables []qsv1a1.QuarksSecret) error { // TODO: vladi: don't generate the variables that are "user-defined" for _, variable := range variables { log.Debugf(ctx, "CreateOrUpdate QuarksSecrets for explicit variable '%s'", variable.GetNamespacedName()) // Set the "manifest with ops" secret as the owner for the QuarksSecrets // The "manifest with ops" secret is owned by the actual BOSHDeployment, so everything // should be garbage collected properly. if err := r.setReference(manifestSecret, &variable, r.scheme); err != nil { err = log.WithEvent(manifestSecret, "OwnershipError").Errorf(ctx, "failed to set ownership for '%s': %v", variable.GetNamespacedName(), err) return err } op, err := controllerutil.CreateOrUpdate(ctx, r.client, &variable, mutate.QuarksSecretMutateFn(&variable)) if err != nil { return errors.Wrapf(err, "creating or updating QuarksSecret '%s'", variable.GetNamespacedName()) } // Update does not update status. We only trigger quarks secret // reconciler again if variable was updated by previous CreateOrUpdate if op == controllerutil.OperationResultUpdated { variable.Status.Generated = pointers.Bool(false) if err := r.client.Status().Update(ctx, &variable); err != nil { log.WithEvent(&variable, "UpdateError").Errorf(ctx, "failed to update generated status on quarks secret '%s' (%v): %s", variable.GetNamespacedName(), variable.ResourceVersion, err) return err } } log.Debugf(ctx, "QuarksSecret '%s' has been %s", variable.GetNamespacedName(), op) } return nil } type serviceRecord struct { selector map[string]string dnsRecord string }
package main //32. 最长有效括号 // //给定一个只包含 '('和 ')'的字符串,找出最长的包含有效括号的子串的长度。 // //示例1: // //输入: "(()" //输出: 2 //解释: 最长有效括号子串为 "()" //示例 2: // //输入: ")()())" //输出: 4 //解释: 最长有效括号子串为 "()()" //思路 栈 ,动态规划 //"()(()" func longestValidParentheses(s string) int { result := 0 n := len(s) array := make([]int, n) stack := make([]int, 0) for i, v := range s { if v == '(' { stack = append(stack, i) } else { size := len(stack) if size > 0 { array[i] = 1 array[stack[size-1]] = 1 stack = stack[:size-1] } } } sum := 0 for i := 0; i < n; i++ { if array[i] == 1 { sum += 1 result = max(result, sum) } else { sum = 0 } } return result } func max(x, y int) int { if x > y { return x } return y } func main() { println(longestValidParentheses(")()())")) }
/* Copyright 2021-2023 ICS-FORTH. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package utils import ( "github.com/carv-ics-forth/frisbee/api/v1alpha1" "github.com/carv-ics-forth/frisbee/pkg/distributions" ) func SetTimeline(cascade *v1alpha1.Cascade) { if cascade.Spec.Schedule == nil || cascade.Spec.Schedule.Timeline == nil { return } probabilitySlice := distributions.GenerateProbabilitySliceFromSpec(int64(cascade.Spec.MaxInstances), cascade.Spec.Schedule.Timeline.DistributionSpec) cascade.Status.ExpectedTimeline = probabilitySlice.ApplyToTimeline( cascade.GetCreationTimestamp(), *cascade.Spec.Schedule.Timeline.TotalDuration, ) }
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stack import ( "fmt" "time" "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/waiter" ) // NetworkEndpointID is the identifier of a network layer protocol endpoint. // Currently the local address is sufficient because all supported protocols // (i.e., IPv4 and IPv6) have different sizes for their addresses. type NetworkEndpointID struct { LocalAddress tcpip.Address } // TransportEndpointID is the identifier of a transport layer protocol endpoint. // // +stateify savable type TransportEndpointID struct { // LocalPort is the local port associated with the endpoint. LocalPort uint16 // LocalAddress is the local [network layer] address associated with // the endpoint. LocalAddress tcpip.Address // RemotePort is the remote port associated with the endpoint. RemotePort uint16 // RemoteAddress it the remote [network layer] address associated with // the endpoint. RemoteAddress tcpip.Address } // NetworkPacketInfo holds information about a network layer packet. // // +stateify savable type NetworkPacketInfo struct { // LocalAddressBroadcast is true if the packet's local address is a broadcast // address. LocalAddressBroadcast bool // IsForwardedPacket is true if the packet is being forwarded. IsForwardedPacket bool } // TransportErrorKind enumerates error types that are handled by the transport // layer. type TransportErrorKind int const ( // PacketTooBigTransportError indicates that a packet did not reach its // destination because a link on the path to the destination had an MTU that // was too small to carry the packet. PacketTooBigTransportError TransportErrorKind = iota // DestinationHostUnreachableTransportError indicates that the destination // host was unreachable. DestinationHostUnreachableTransportError // DestinationPortUnreachableTransportError indicates that a packet reached // the destination host, but the transport protocol was not active on the // destination port. DestinationPortUnreachableTransportError // DestinationNetworkUnreachableTransportError indicates that the destination // network was unreachable. DestinationNetworkUnreachableTransportError // DestinationProtoUnreachableTransportError indicates that the destination // protocol was unreachable. DestinationProtoUnreachableTransportError // SourceRouteFailedTransportError indicates that the source route failed. SourceRouteFailedTransportError // SourceHostIsolatedTransportError indicates that the source machine is not // on the network. SourceHostIsolatedTransportError // DestinationHostDownTransportError indicates that the destination host is // down. DestinationHostDownTransportError ) // TransportError is a marker interface for errors that may be handled by the // transport layer. type TransportError interface { tcpip.SockErrorCause // Kind returns the type of the transport error. Kind() TransportErrorKind } // TransportEndpoint is the interface that needs to be implemented by transport // protocol (e.g., tcp, udp) endpoints that can handle packets. type TransportEndpoint interface { // UniqueID returns an unique ID for this transport endpoint. UniqueID() uint64 // HandlePacket is called by the stack when new packets arrive to this // transport endpoint. It sets the packet buffer's transport header. // // HandlePacket may modify the packet. HandlePacket(TransportEndpointID, PacketBufferPtr) // HandleError is called when the transport endpoint receives an error. // // HandleError takes may modify the packet buffer. HandleError(TransportError, PacketBufferPtr) // Abort initiates an expedited endpoint teardown. It puts the endpoint // in a closed state and frees all resources associated with it. This // cleanup may happen asynchronously. Wait can be used to block on this // asynchronous cleanup. Abort() // Wait waits for any worker goroutines owned by the endpoint to stop. // // An endpoint can be requested to stop its worker goroutines by calling // its Close method. // // Wait will not block if the endpoint hasn't started any goroutines // yet, even if it might later. Wait() } // RawTransportEndpoint is the interface that needs to be implemented by raw // transport protocol endpoints. RawTransportEndpoints receive the entire // packet - including the network and transport headers - as delivered to // netstack. type RawTransportEndpoint interface { // HandlePacket is called by the stack when new packets arrive to // this transport endpoint. The packet contains all data from the link // layer up. // // HandlePacket may modify the packet. HandlePacket(PacketBufferPtr) } // PacketEndpoint is the interface that needs to be implemented by packet // transport protocol endpoints. These endpoints receive link layer headers in // addition to whatever they contain (usually network and transport layer // headers and a payload). type PacketEndpoint interface { // HandlePacket is called by the stack when new packets arrive that // match the endpoint. // // Implementers should treat packet as immutable and should copy it // before before modification. // // linkHeader may have a length of 0, in which case the PacketEndpoint // should construct its own ethernet header for applications. // // HandlePacket may modify pkt. HandlePacket(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) } // UnknownDestinationPacketDisposition enumerates the possible return values from // HandleUnknownDestinationPacket(). type UnknownDestinationPacketDisposition int const ( // UnknownDestinationPacketMalformed denotes that the packet was malformed // and no further processing should be attempted other than updating // statistics. UnknownDestinationPacketMalformed UnknownDestinationPacketDisposition = iota // UnknownDestinationPacketUnhandled tells the caller that the packet was // well formed but that the issue was not handled and the stack should take // the default action. UnknownDestinationPacketUnhandled // UnknownDestinationPacketHandled tells the caller that it should do // no further processing. UnknownDestinationPacketHandled ) // TransportProtocol is the interface that needs to be implemented by transport // protocols (e.g., tcp, udp) that want to be part of the networking stack. type TransportProtocol interface { // Number returns the transport protocol number. Number() tcpip.TransportProtocolNumber // NewEndpoint creates a new endpoint of the transport protocol. NewEndpoint(netProto tcpip.NetworkProtocolNumber, waitQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) // NewRawEndpoint creates a new raw endpoint of the transport protocol. NewRawEndpoint(netProto tcpip.NetworkProtocolNumber, waitQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) // MinimumPacketSize returns the minimum valid packet size of this // transport protocol. The stack automatically drops any packets smaller // than this targeted at this protocol. MinimumPacketSize() int // ParsePorts returns the source and destination ports stored in a // packet of this protocol. ParsePorts(b []byte) (src, dst uint16, err tcpip.Error) // HandleUnknownDestinationPacket handles packets targeted at this // protocol that don't match any existing endpoint. For example, // it is targeted at a port that has no listeners. // // HandleUnknownDestinationPacket may modify the packet if it handles // the issue. HandleUnknownDestinationPacket(TransportEndpointID, PacketBufferPtr) UnknownDestinationPacketDisposition // SetOption allows enabling/disabling protocol specific features. // SetOption returns an error if the option is not supported or the // provided option value is invalid. SetOption(option tcpip.SettableTransportProtocolOption) tcpip.Error // Option allows retrieving protocol specific option values. // Option returns an error if the option is not supported or the // provided option value is invalid. Option(option tcpip.GettableTransportProtocolOption) tcpip.Error // Close requests that any worker goroutines owned by the protocol // stop. Close() // Wait waits for any worker goroutines owned by the protocol to stop. Wait() // Pause requests that any protocol level background workers pause. Pause() // Resume resumes any protocol level background workers that were // previously paused by Pause. Resume() // Parse sets pkt.TransportHeader and trims pkt.Data appropriately. It does // neither and returns false if pkt.Data is too small, i.e. pkt.Data.Size() < // MinimumPacketSize() Parse(pkt PacketBufferPtr) (ok bool) } // TransportPacketDisposition is the result from attempting to deliver a packet // to the transport layer. type TransportPacketDisposition int const ( // TransportPacketHandled indicates that a transport packet was handled by the // transport layer and callers need not take any further action. TransportPacketHandled TransportPacketDisposition = iota // TransportPacketProtocolUnreachable indicates that the transport // protocol requested in the packet is not supported. TransportPacketProtocolUnreachable // TransportPacketDestinationPortUnreachable indicates that there weren't any // listeners interested in the packet and the transport protocol has no means // to notify the sender. TransportPacketDestinationPortUnreachable ) // TransportDispatcher contains the methods used by the network stack to deliver // packets to the appropriate transport endpoint after it has been handled by // the network layer. type TransportDispatcher interface { // DeliverTransportPacket delivers packets to the appropriate // transport protocol endpoint. // // pkt.NetworkHeader must be set before calling DeliverTransportPacket. // // DeliverTransportPacket may modify the packet. DeliverTransportPacket(tcpip.TransportProtocolNumber, PacketBufferPtr) TransportPacketDisposition // DeliverTransportError delivers an error to the appropriate transport // endpoint. // // DeliverTransportError may modify the packet buffer. DeliverTransportError(local, remote tcpip.Address, _ tcpip.NetworkProtocolNumber, _ tcpip.TransportProtocolNumber, _ TransportError, _ PacketBufferPtr) // DeliverRawPacket delivers a packet to any subscribed raw sockets. // // DeliverRawPacket does NOT take ownership of the packet buffer. DeliverRawPacket(tcpip.TransportProtocolNumber, PacketBufferPtr) } // PacketLooping specifies where an outbound packet should be sent. type PacketLooping byte const ( // PacketOut indicates that the packet should be passed to the link // endpoint. PacketOut PacketLooping = 1 << iota // PacketLoop indicates that the packet should be handled locally. PacketLoop ) // NetworkHeaderParams are the header parameters given as input by the // transport endpoint to the network. type NetworkHeaderParams struct { // Protocol refers to the transport protocol number. Protocol tcpip.TransportProtocolNumber // TTL refers to Time To Live field of the IP-header. TTL uint8 // TOS refers to TypeOfService or TrafficClass field of the IP-header. TOS uint8 } // GroupAddressableEndpoint is an endpoint that supports group addressing. // // An endpoint is considered to support group addressing when one or more // endpoints may associate themselves with the same identifier (group address). type GroupAddressableEndpoint interface { // JoinGroup joins the specified group. JoinGroup(group tcpip.Address) tcpip.Error // LeaveGroup attempts to leave the specified group. LeaveGroup(group tcpip.Address) tcpip.Error // IsInGroup returns true if the endpoint is a member of the specified group. IsInGroup(group tcpip.Address) bool } // PrimaryEndpointBehavior is an enumeration of an AddressEndpoint's primary // behavior. type PrimaryEndpointBehavior int const ( // CanBePrimaryEndpoint indicates the endpoint can be used as a primary // endpoint for new connections with no local address. CanBePrimaryEndpoint PrimaryEndpointBehavior = iota // FirstPrimaryEndpoint indicates the endpoint should be the first // primary endpoint considered. If there are multiple endpoints with // this behavior, they are ordered by recency. FirstPrimaryEndpoint // NeverPrimaryEndpoint indicates the endpoint should never be a // primary endpoint. NeverPrimaryEndpoint ) func (peb PrimaryEndpointBehavior) String() string { switch peb { case CanBePrimaryEndpoint: return "CanBePrimaryEndpoint" case FirstPrimaryEndpoint: return "FirstPrimaryEndpoint" case NeverPrimaryEndpoint: return "NeverPrimaryEndpoint" default: panic(fmt.Sprintf("unknown primary endpoint behavior: %d", peb)) } } // AddressConfigType is the method used to add an address. type AddressConfigType int const ( // AddressConfigStatic is a statically configured address endpoint that was // added by some user-specified action (adding an explicit address, joining a // multicast group). AddressConfigStatic AddressConfigType = iota // AddressConfigSlaac is an address endpoint added by SLAAC, as per RFC 4862 // section 5.5.3. AddressConfigSlaac ) // AddressLifetimes encodes an address' preferred and valid lifetimes, as well // as if the address is deprecated. type AddressLifetimes struct { // Deprecated is whether the address is deprecated. Deprecated bool // PreferredUntil is the time at which the address will be deprecated. // // Note that for certain addresses, deprecating the address at the // PreferredUntil time is not handled as a scheduled job by the stack, but // is information provided by the owner as an indication of when it will // deprecate the address. // // PreferredUntil should be ignored if Deprecated is true. If Deprecated // is false, and PreferredUntil is the zero value, no information about // the preferred lifetime can be inferred. PreferredUntil tcpip.MonotonicTime // ValidUntil is the time at which the address will be invalidated. // // Note that for certain addresses, invalidating the address at the // ValidUntil time is not handled as a scheduled job by the stack, but // is information provided by the owner as an indication of when it will // invalidate the address. // // If ValidUntil is the zero value, no information about the valid lifetime // can be inferred. ValidUntil tcpip.MonotonicTime } // AddressProperties contains additional properties that can be configured when // adding an address. type AddressProperties struct { PEB PrimaryEndpointBehavior ConfigType AddressConfigType // Lifetimes encodes the address' lifetimes. // // Lifetimes.PreferredUntil and Lifetimes.ValidUntil are informational, i.e. // the stack will not deprecated nor invalidate the address upon reaching // these timestamps. // // If Lifetimes.Deprecated is true, the address will be added as deprecated. Lifetimes AddressLifetimes // Temporary is as defined in RFC 4941, but applies not only to addresses // added via SLAAC, e.g. DHCPv6 can also add temporary addresses. Temporary // addresses are short-lived and are not to be valid (or preferred) // forever; hence the term temporary. Temporary bool Disp AddressDispatcher } // AddressAssignmentState is an address' assignment state. type AddressAssignmentState int const ( _ AddressAssignmentState = iota // AddressDisabled indicates the NIC the address is assigned to is disabled. AddressDisabled // AddressTentative indicates an address is yet to pass DAD (IPv4 addresses // are never tentative). AddressTentative // AddressAssigned indicates an address is assigned. AddressAssigned ) func (state AddressAssignmentState) String() string { switch state { case AddressDisabled: return "Disabled" case AddressTentative: return "Tentative" case AddressAssigned: return "Assigned" default: panic(fmt.Sprintf("unknown address assignment state: %d", state)) } } // AddressRemovalReason is the reason an address was removed. type AddressRemovalReason int const ( _ AddressRemovalReason = iota // AddressRemovalManualAction indicates the address was removed explicitly // using the stack API. AddressRemovalManualAction // AddressRemovalInterfaceRemoved indicates the address was removed because // the NIC it is assigned to was removed. AddressRemovalInterfaceRemoved // AddressRemovalDADFailed indicates the address was removed because DAD // failed. AddressRemovalDADFailed // AddressRemovalInvalidated indicates the address was removed because it // was invalidated. AddressRemovalInvalidated ) func (reason AddressRemovalReason) String() string { switch reason { case AddressRemovalManualAction: return "ManualAction" case AddressRemovalInterfaceRemoved: return "InterfaceRemoved" case AddressRemovalDADFailed: return "DADFailed" case AddressRemovalInvalidated: return "Invalidated" default: panic(fmt.Sprintf("unknown address removal reason: %d", reason)) } } // AddressDispatcher is the interface integrators can implement to receive // address-related events. type AddressDispatcher interface { // OnChanged is called with an address' properties when they change. // // OnChanged is called once when the address is added with the initial state, // and every time a property changes. // // The PreferredUntil and ValidUntil fields in AddressLifetimes must be // considered informational, i.e. one must not consider an address to be // deprecated/invalid even if the monotonic clock timestamp is past these // deadlines. The Deprecated field indicates whether an address is // preferred or not; and OnRemoved will be called when an address is // removed due to invalidation. OnChanged(AddressLifetimes, AddressAssignmentState) // OnRemoved is called when an address is removed with the removal reason. OnRemoved(AddressRemovalReason) } // AssignableAddressEndpoint is a reference counted address endpoint that may be // assigned to a NetworkEndpoint. type AssignableAddressEndpoint interface { // AddressWithPrefix returns the endpoint's address. AddressWithPrefix() tcpip.AddressWithPrefix // Subnet returns the subnet of the endpoint's address. Subnet() tcpip.Subnet // IsAssigned returns whether or not the endpoint is considered bound // to its NetworkEndpoint. IsAssigned(allowExpired bool) bool // IncRef increments this endpoint's reference count. // // Returns true if it was successfully incremented. If it returns false, then // the endpoint is considered expired and should no longer be used. IncRef() bool // DecRef decrements this endpoint's reference count. DecRef() } // AddressEndpoint is an endpoint representing an address assigned to an // AddressableEndpoint. type AddressEndpoint interface { AssignableAddressEndpoint // GetKind returns the address kind for this endpoint. GetKind() AddressKind // SetKind sets the address kind for this endpoint. SetKind(AddressKind) // ConfigType returns the method used to add the address. ConfigType() AddressConfigType // Deprecated returns whether or not this endpoint is deprecated. Deprecated() bool // SetDeprecated sets this endpoint's deprecated status. SetDeprecated(bool) // Lifetimes returns this endpoint's lifetimes. Lifetimes() AddressLifetimes // SetLifetimes sets this endpoint's lifetimes. // // Note that setting preferred-until and valid-until times do not result in // deprecation/invalidation jobs to be scheduled by the stack. SetLifetimes(AddressLifetimes) // Temporary returns whether or not this endpoint is temporary. Temporary() bool // RegisterDispatcher registers an address dispatcher. // // OnChanged will be called immediately on the provided address dispatcher // with this endpoint's current state. RegisterDispatcher(AddressDispatcher) } // AddressKind is the kind of an address. // // See the values of AddressKind for more details. type AddressKind int const ( // PermanentTentative is a permanent address endpoint that is not yet // considered to be fully bound to an interface in the traditional // sense. That is, the address is associated with a NIC, but packets // destined to the address MUST NOT be accepted and MUST be silently // dropped, and the address MUST NOT be used as a source address for // outgoing packets. For IPv6, addresses are of this kind until NDP's // Duplicate Address Detection (DAD) resolves. If DAD fails, the address // is removed. PermanentTentative AddressKind = iota // Permanent is a permanent endpoint (vs. a temporary one) assigned to the // NIC. Its reference count is biased by 1 to avoid removal when no route // holds a reference to it. It is removed by explicitly removing the address // from the NIC. Permanent // PermanentExpired is a permanent endpoint that had its address removed from // the NIC, and it is waiting to be removed once no references to it are held. // // If the address is re-added before the endpoint is removed, its type // changes back to Permanent. PermanentExpired // Temporary is an endpoint, created on a one-off basis to temporarily // consider the NIC bound an an address that it is not explicitly bound to // (such as a permanent address). Its reference count must not be biased by 1 // so that the address is removed immediately when references to it are no // longer held. // // A temporary endpoint may be promoted to permanent if the address is added // permanently. Temporary ) // IsPermanent returns true if the AddressKind represents a permanent address. func (k AddressKind) IsPermanent() bool { switch k { case Permanent, PermanentTentative: return true case Temporary, PermanentExpired: return false default: panic(fmt.Sprintf("unrecognized address kind = %d", k)) } } // AddressableEndpoint is an endpoint that supports addressing. // // An endpoint is considered to support addressing when the endpoint may // associate itself with an identifier (address). type AddressableEndpoint interface { // AddAndAcquirePermanentAddress adds the passed permanent address. // // Returns *tcpip.ErrDuplicateAddress if the address exists. // // Acquires and returns the AddressEndpoint for the added address. AddAndAcquirePermanentAddress(addr tcpip.AddressWithPrefix, properties AddressProperties) (AddressEndpoint, tcpip.Error) // RemovePermanentAddress removes the passed address if it is a permanent // address. // // Returns *tcpip.ErrBadLocalAddress if the endpoint does not have the passed // permanent address. RemovePermanentAddress(addr tcpip.Address) tcpip.Error // SetLifetimes sets an address' lifetimes (strictly informational) and // whether it should be deprecated or preferred. // // Returns *tcpip.ErrBadLocalAddress if the endpoint does not have the passed // address. SetLifetimes(addr tcpip.Address, lifetimes AddressLifetimes) tcpip.Error // MainAddress returns the endpoint's primary permanent address. MainAddress() tcpip.AddressWithPrefix // AcquireAssignedAddress returns an address endpoint for the passed address // that is considered bound to the endpoint, optionally creating a temporary // endpoint if requested and no existing address exists. // // The returned endpoint's reference count is incremented. // // Returns nil if the specified address is not local to this endpoint. AcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB PrimaryEndpointBehavior) AddressEndpoint // AcquireOutgoingPrimaryAddress returns a primary address that may be used as // a source address when sending packets to the passed remote address. // // If allowExpired is true, expired addresses may be returned. // // The returned endpoint's reference count is incremented. // // Returns nil if a primary address is not available. AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) AddressEndpoint // PrimaryAddresses returns the primary addresses. PrimaryAddresses() []tcpip.AddressWithPrefix // PermanentAddresses returns all the permanent addresses. PermanentAddresses() []tcpip.AddressWithPrefix } // NDPEndpoint is a network endpoint that supports NDP. type NDPEndpoint interface { NetworkEndpoint // InvalidateDefaultRouter invalidates a default router discovered through // NDP. InvalidateDefaultRouter(tcpip.Address) } // NetworkInterface is a network interface. type NetworkInterface interface { NetworkLinkEndpoint // ID returns the interface's ID. ID() tcpip.NICID // IsLoopback returns true if the interface is a loopback interface. IsLoopback() bool // Name returns the name of the interface. // // May return an empty string if the interface is not configured with a name. Name() string // Enabled returns true if the interface is enabled. Enabled() bool // Promiscuous returns true if the interface is in promiscuous mode. // // When in promiscuous mode, the interface should accept all packets. Promiscuous() bool // Spoofing returns true if the interface is in spoofing mode. // // When in spoofing mode, the interface should consider all addresses as // assigned to it. Spoofing() bool // PrimaryAddress returns the primary address associated with the interface. // // PrimaryAddress will return the first non-deprecated address if such an // address exists. If no non-deprecated addresses exist, the first deprecated // address will be returned. If no deprecated addresses exist, the zero value // will be returned. PrimaryAddress(tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, tcpip.Error) // CheckLocalAddress returns true if the address exists on the interface. CheckLocalAddress(tcpip.NetworkProtocolNumber, tcpip.Address) bool // WritePacketToRemote writes the packet to the given remote link address. WritePacketToRemote(tcpip.LinkAddress, PacketBufferPtr) tcpip.Error // WritePacket writes a packet through the given route. // // WritePacket may modify the packet buffer. The packet buffer's // network and transport header must be set. WritePacket(*Route, PacketBufferPtr) tcpip.Error // HandleNeighborProbe processes an incoming neighbor probe (e.g. ARP // request or NDP Neighbor Solicitation). // // HandleNeighborProbe assumes that the probe is valid for the network // interface the probe was received on. HandleNeighborProbe(tcpip.NetworkProtocolNumber, tcpip.Address, tcpip.LinkAddress) tcpip.Error // HandleNeighborConfirmation processes an incoming neighbor confirmation // (e.g. ARP reply or NDP Neighbor Advertisement). HandleNeighborConfirmation(tcpip.NetworkProtocolNumber, tcpip.Address, tcpip.LinkAddress, ReachabilityConfirmationFlags) tcpip.Error } // LinkResolvableNetworkEndpoint handles link resolution events. type LinkResolvableNetworkEndpoint interface { // HandleLinkResolutionFailure is called when link resolution prevents the // argument from having been sent. HandleLinkResolutionFailure(PacketBufferPtr) } // NetworkEndpoint is the interface that needs to be implemented by endpoints // of network layer protocols (e.g., ipv4, ipv6). type NetworkEndpoint interface { // Enable enables the endpoint. // // Must only be called when the stack is in a state that allows the endpoint // to send and receive packets. // // Returns *tcpip.ErrNotPermitted if the endpoint cannot be enabled. Enable() tcpip.Error // Enabled returns true if the endpoint is enabled. Enabled() bool // Disable disables the endpoint. Disable() // DefaultTTL is the default time-to-live value (or hop limit, in ipv6) // for this endpoint. DefaultTTL() uint8 // MTU is the maximum transmission unit for this endpoint. This is // generally calculated as the MTU of the underlying data link endpoint // minus the network endpoint max header length. MTU() uint32 // MaxHeaderLength returns the maximum size the network (and lower // level layers combined) headers can have. Higher levels use this // information to reserve space in the front of the packets they're // building. MaxHeaderLength() uint16 // WritePacket writes a packet to the given destination address and // protocol. It may modify pkt. pkt.TransportHeader must have // already been set. WritePacket(r *Route, params NetworkHeaderParams, pkt PacketBufferPtr) tcpip.Error // WriteHeaderIncludedPacket writes a packet that includes a network // header to the given destination address. It may modify pkt. WriteHeaderIncludedPacket(r *Route, pkt PacketBufferPtr) tcpip.Error // HandlePacket is called by the link layer when new packets arrive to // this network endpoint. It sets pkt.NetworkHeader. // // HandlePacket may modify pkt. HandlePacket(pkt PacketBufferPtr) // Close is called when the endpoint is removed from a stack. Close() // NetworkProtocolNumber returns the tcpip.NetworkProtocolNumber for // this endpoint. NetworkProtocolNumber() tcpip.NetworkProtocolNumber // Stats returns a reference to the network endpoint stats. Stats() NetworkEndpointStats } // NetworkEndpointStats is the interface implemented by each network endpoint // stats struct. type NetworkEndpointStats interface { // IsNetworkEndpointStats is an empty method to implement the // NetworkEndpointStats marker interface. IsNetworkEndpointStats() } // IPNetworkEndpointStats is a NetworkEndpointStats that tracks IP-related // statistics. type IPNetworkEndpointStats interface { NetworkEndpointStats // IPStats returns the IP statistics of a network endpoint. IPStats() *tcpip.IPStats } // ForwardingNetworkEndpoint is a network endpoint that may forward packets. type ForwardingNetworkEndpoint interface { NetworkEndpoint // Forwarding returns the forwarding configuration. Forwarding() bool // SetForwarding sets the forwarding configuration. // // Returns the previous forwarding configuration. SetForwarding(bool) bool } // MulticastForwardingNetworkEndpoint is a network endpoint that may forward // multicast packets. type MulticastForwardingNetworkEndpoint interface { ForwardingNetworkEndpoint // MulticastForwarding returns true if multicast forwarding is enabled. // Otherwise, returns false. MulticastForwarding() bool // SetMulticastForwarding sets the multicast forwarding configuration. // // Returns the previous forwarding configuration. SetMulticastForwarding(bool) bool } // NetworkProtocol is the interface that needs to be implemented by network // protocols (e.g., ipv4, ipv6) that want to be part of the networking stack. type NetworkProtocol interface { // Number returns the network protocol number. Number() tcpip.NetworkProtocolNumber // MinimumPacketSize returns the minimum valid packet size of this // network protocol. The stack automatically drops any packets smaller // than this targeted at this protocol. MinimumPacketSize() int // ParseAddresses returns the source and destination addresses stored in a // packet of this protocol. ParseAddresses(b []byte) (src, dst tcpip.Address) // NewEndpoint creates a new endpoint of this protocol. NewEndpoint(nic NetworkInterface, dispatcher TransportDispatcher) NetworkEndpoint // SetOption allows enabling/disabling protocol specific features. // SetOption returns an error if the option is not supported or the // provided option value is invalid. SetOption(option tcpip.SettableNetworkProtocolOption) tcpip.Error // Option allows retrieving protocol specific option values. // Option returns an error if the option is not supported or the // provided option value is invalid. Option(option tcpip.GettableNetworkProtocolOption) tcpip.Error // Close requests that any worker goroutines owned by the protocol // stop. Close() // Wait waits for any worker goroutines owned by the protocol to stop. Wait() // Parse sets pkt.NetworkHeader and trims pkt.Data appropriately. It // returns: // - The encapsulated protocol, if present. // - Whether there is an encapsulated transport protocol payload (e.g. ARP // does not encapsulate anything). // - Whether pkt.Data was large enough to parse and set pkt.NetworkHeader. Parse(pkt PacketBufferPtr) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) } // UnicastSourceAndMulticastDestination is a tuple that represents a unicast // source address and a multicast destination address. type UnicastSourceAndMulticastDestination struct { // Source represents a unicast source address. Source tcpip.Address // Destination represents a multicast destination address. Destination tcpip.Address } // MulticastRouteOutgoingInterface represents an outgoing interface in a // multicast route. type MulticastRouteOutgoingInterface struct { // ID corresponds to the outgoing NIC. ID tcpip.NICID // MinTTL represents the minumum TTL/HopLimit a multicast packet must have to // be sent through the outgoing interface. // // Note: a value of 0 allows all packets to be forwarded. MinTTL uint8 } // MulticastRoute is a multicast route. type MulticastRoute struct { // ExpectedInputInterface is the interface on which packets using this route // are expected to ingress. ExpectedInputInterface tcpip.NICID // OutgoingInterfaces is the set of interfaces that a multicast packet should // be forwarded out of. // // This field should not be empty. OutgoingInterfaces []MulticastRouteOutgoingInterface } // MulticastForwardingNetworkProtocol is the interface that needs to be // implemented by the network protocols that support multicast forwarding. type MulticastForwardingNetworkProtocol interface { NetworkProtocol // AddMulticastRoute adds a route to the multicast routing table such that // packets matching the addresses will be forwarded using the provided route. // // Returns an error if the addresses or route is invalid. AddMulticastRoute(UnicastSourceAndMulticastDestination, MulticastRoute) tcpip.Error // RemoveMulticastRoute removes the route matching the provided addresses // from the multicast routing table. // // Returns an error if the addresses are invalid or a matching route is not // found. RemoveMulticastRoute(UnicastSourceAndMulticastDestination) tcpip.Error // MulticastRouteLastUsedTime returns a monotonic timestamp that // represents the last time that the route matching the provided addresses // was used or updated. // // Returns an error if the addresses are invalid or a matching route was not // found. MulticastRouteLastUsedTime(UnicastSourceAndMulticastDestination) (tcpip.MonotonicTime, tcpip.Error) // EnableMulticastForwarding enables multicast forwarding for the protocol. // // Returns an error if the provided multicast forwarding event dispatcher is // nil. Otherwise, returns true if the multicast forwarding was already // enabled. EnableMulticastForwarding(MulticastForwardingEventDispatcher) (bool, tcpip.Error) // DisableMulticastForwarding disables multicast forwarding for the protocol. DisableMulticastForwarding() } // MulticastPacketContext is the context in which a multicast packet triggered // a multicast forwarding event. type MulticastPacketContext struct { // SourceAndDestination contains the unicast source address and the multicast // destination address found in the relevant multicast packet. SourceAndDestination UnicastSourceAndMulticastDestination // InputInterface is the interface on which the relevant multicast packet // arrived. InputInterface tcpip.NICID } // MulticastForwardingEventDispatcher is the interface that integrators should // implement to handle multicast routing events. type MulticastForwardingEventDispatcher interface { // OnMissingRoute is called when an incoming multicast packet does not match // any installed route. // // The packet that triggered this event may be queued so that it can be // transmitted once a route is installed. Even then, it may still be dropped // as per the routing table's GC/eviction policy. OnMissingRoute(MulticastPacketContext) // OnUnexpectedInputInterface is called when a multicast packet arrives at an // interface that does not match the installed route's expected input // interface. // // This may be an indication of a routing loop. The packet that triggered // this event is dropped without being forwarded. OnUnexpectedInputInterface(context MulticastPacketContext, expectedInputInterface tcpip.NICID) } // NetworkDispatcher contains the methods used by the network stack to deliver // inbound/outbound packets to the appropriate network/packet(if any) endpoints. type NetworkDispatcher interface { // DeliverNetworkPacket finds the appropriate network protocol endpoint // and hands the packet over for further processing. // // // If the link-layer has a header, the packet's link header must be populated. // // DeliverNetworkPacket may modify pkt. DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) // DeliverLinkPacket delivers a packet to any interested packet endpoints. // // This method should be called with both incoming and outgoing packets. // // If the link-layer has a header, the packet's link header must be populated. DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) } // LinkEndpointCapabilities is the type associated with the capabilities // supported by a link-layer endpoint. It is a set of bitfields. type LinkEndpointCapabilities uint // The following are the supported link endpoint capabilities. const ( CapabilityNone LinkEndpointCapabilities = 0 // CapabilityTXChecksumOffload indicates that the link endpoint supports // checksum computation for outgoing packets and the stack can skip // computing checksums when sending packets. CapabilityTXChecksumOffload LinkEndpointCapabilities = 1 << iota // CapabilityRXChecksumOffload indicates that the link endpoint supports // checksum verification on received packets and that it's safe for the // stack to skip checksum verification. CapabilityRXChecksumOffload CapabilityResolutionRequired CapabilitySaveRestore CapabilityDisconnectOk CapabilityLoopback ) // LinkWriter is an interface that supports sending packets via a data-link // layer endpoint. It is used with QueueingDiscipline to batch writes from // upper layer endpoints. type LinkWriter interface { // WritePackets writes packets. Must not be called with an empty list of // packet buffers. // // Each packet must have the link-layer header set, if the link requires // one. // // WritePackets may modify the packet buffers, and takes ownership of the PacketBufferList. // it is not safe to use the PacketBufferList after a call to WritePackets. WritePackets(PacketBufferList) (int, tcpip.Error) } // NetworkLinkEndpoint is a data-link layer that supports sending network // layer packets. type NetworkLinkEndpoint interface { // MTU is the maximum transmission unit for this endpoint. This is // usually dictated by the backing physical network; when such a // physical network doesn't exist, the limit is generally 64k, which // includes the maximum size of an IP packet. MTU() uint32 // MaxHeaderLength returns the maximum size the data link (and // lower level layers combined) headers can have. Higher levels use this // information to reserve space in the front of the packets they're // building. MaxHeaderLength() uint16 // LinkAddress returns the link address (typically a MAC) of the // endpoint. LinkAddress() tcpip.LinkAddress // Capabilities returns the set of capabilities supported by the // endpoint. Capabilities() LinkEndpointCapabilities // Attach attaches the data link layer endpoint to the network-layer // dispatcher of the stack. // // Attach is called with a nil dispatcher when the endpoint's NIC is being // removed. Attach(dispatcher NetworkDispatcher) // IsAttached returns whether a NetworkDispatcher is attached to the // endpoint. IsAttached() bool // Wait waits for any worker goroutines owned by the endpoint to stop. // // For now, requesting that an endpoint's worker goroutine(s) stop is // implementation specific. // // Wait will not block if the endpoint hasn't started any goroutines // yet, even if it might later. Wait() // ARPHardwareType returns the ARPHRD_TYPE of the link endpoint. // // See: // https://github.com/torvalds/linux/blob/aa0c9086b40c17a7ad94425b3b70dd1fdd7497bf/include/uapi/linux/if_arp.h#L30 ARPHardwareType() header.ARPHardwareType // AddHeader adds a link layer header to the packet if required. AddHeader(PacketBufferPtr) // ParseHeader parses the link layer header to the packet. ParseHeader(PacketBufferPtr) bool } // QueueingDiscipline provides a queueing strategy for outgoing packets (e.g // FIFO, LIFO, Random Early Drop etc). type QueueingDiscipline interface { // WritePacket writes a packet. // // WritePacket may modify the packet buffer. The packet buffer's // network and transport header must be set. // // To participate in transparent bridging, a LinkEndpoint implementation // should call eth.Encode with header.EthernetFields.SrcAddr set to // pkg.EgressRoute.LocalLinkAddress if it is provided. WritePacket(PacketBufferPtr) tcpip.Error Close() } // LinkEndpoint is the interface implemented by data link layer protocols (e.g., // ethernet, loopback, raw) and used by network layer protocols to send packets // out through the implementer's data link endpoint. When a link header exists, // it sets each PacketBuffer's LinkHeader field before passing it up the // stack. type LinkEndpoint interface { NetworkLinkEndpoint LinkWriter } // InjectableLinkEndpoint is a LinkEndpoint where inbound packets are // delivered via the Inject method. type InjectableLinkEndpoint interface { LinkEndpoint // InjectInbound injects an inbound packet. InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) // InjectOutbound writes a fully formed outbound packet directly to the // link. // // dest is used by endpoints with multiple raw destinations. InjectOutbound(dest tcpip.Address, packet *buffer.View) tcpip.Error } // DADResult is a marker interface for the result of a duplicate address // detection process. type DADResult interface { isDADResult() } var _ DADResult = (*DADSucceeded)(nil) // DADSucceeded indicates DAD completed without finding any duplicate addresses. type DADSucceeded struct{} func (*DADSucceeded) isDADResult() {} var _ DADResult = (*DADError)(nil) // DADError indicates DAD hit an error. type DADError struct { Err tcpip.Error } func (*DADError) isDADResult() {} var _ DADResult = (*DADAborted)(nil) // DADAborted indicates DAD was aborted. type DADAborted struct{} func (*DADAborted) isDADResult() {} var _ DADResult = (*DADDupAddrDetected)(nil) // DADDupAddrDetected indicates DAD detected a duplicate address. type DADDupAddrDetected struct { // HolderLinkAddress is the link address of the node that holds the duplicate // address. HolderLinkAddress tcpip.LinkAddress } func (*DADDupAddrDetected) isDADResult() {} // DADCompletionHandler is a handler for DAD completion. type DADCompletionHandler func(DADResult) // DADCheckAddressDisposition enumerates the possible return values from // DAD.CheckDuplicateAddress. type DADCheckAddressDisposition int const ( _ DADCheckAddressDisposition = iota // DADDisabled indicates that DAD is disabled. DADDisabled // DADStarting indicates that DAD is starting for an address. DADStarting // DADAlreadyRunning indicates that DAD was already started for an address. DADAlreadyRunning ) const ( // defaultDupAddrDetectTransmits is the default number of NDP Neighbor // Solicitation messages to send when doing Duplicate Address Detection // for a tentative address. // // Default = 1 (from RFC 4862 section 5.1) defaultDupAddrDetectTransmits = 1 ) // DADConfigurations holds configurations for duplicate address detection. type DADConfigurations struct { // The number of Neighbor Solicitation messages to send when doing // Duplicate Address Detection for a tentative address. // // Note, a value of zero effectively disables DAD. DupAddrDetectTransmits uint8 // The amount of time to wait between sending Neighbor Solicitation // messages. // // Must be greater than or equal to 1ms. RetransmitTimer time.Duration } // DefaultDADConfigurations returns the default DAD configurations. func DefaultDADConfigurations() DADConfigurations { return DADConfigurations{ DupAddrDetectTransmits: defaultDupAddrDetectTransmits, RetransmitTimer: defaultRetransmitTimer, } } // Validate modifies the configuration with valid values. If invalid values are // present in the configurations, the corresponding default values are used // instead. func (c *DADConfigurations) Validate() { if c.RetransmitTimer < minimumRetransmitTimer { c.RetransmitTimer = defaultRetransmitTimer } } // DuplicateAddressDetector handles checking if an address is already assigned // to some neighboring node on the link. type DuplicateAddressDetector interface { // CheckDuplicateAddress checks if an address is assigned to a neighbor. // // If DAD is already being performed for the address, the handler will be // called with the result of the original DAD request. CheckDuplicateAddress(tcpip.Address, DADCompletionHandler) DADCheckAddressDisposition // SetDADConfigurations sets the configurations for DAD. SetDADConfigurations(c DADConfigurations) // DuplicateAddressProtocol returns the network protocol the receiver can // perform duplicate address detection for. DuplicateAddressProtocol() tcpip.NetworkProtocolNumber } // LinkAddressResolver handles link address resolution for a network protocol. type LinkAddressResolver interface { // LinkAddressRequest sends a request for the link address of the target // address. The request is broadcast on the local network if a remote link // address is not provided. LinkAddressRequest(targetAddr, localAddr tcpip.Address, remoteLinkAddr tcpip.LinkAddress) tcpip.Error // ResolveStaticAddress attempts to resolve address without sending // requests. It either resolves the name immediately or returns the // empty LinkAddress. // // It can be used to resolve broadcast addresses for example. ResolveStaticAddress(addr tcpip.Address) (tcpip.LinkAddress, bool) // LinkAddressProtocol returns the network protocol of the // addresses this resolver can resolve. LinkAddressProtocol() tcpip.NetworkProtocolNumber } // RawFactory produces endpoints for writing various types of raw packets. type RawFactory interface { // NewUnassociatedEndpoint produces endpoints for writing packets not // associated with a particular transport protocol. Such endpoints can // be used to write arbitrary packets that include the network header. NewUnassociatedEndpoint(stack *Stack, netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) // NewPacketEndpoint produces endpoints for reading and writing packets // that include network and (when cooked is false) link layer headers. NewPacketEndpoint(stack *Stack, cooked bool, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) } // GSOType is the type of GSO segments. // // +stateify savable type GSOType int // Types of gso segments. const ( GSONone GSOType = iota // Hardware GSO types: GSOTCPv4 GSOTCPv6 // GSOGvisor is used for gVisor GSO segments which have to be sent by // endpoint.WritePackets. GSOGvisor ) // GSO contains generic segmentation offload properties. // // +stateify savable type GSO struct { // Type is one of GSONone, GSOTCPv4, etc. Type GSOType // NeedsCsum is set if the checksum offload is enabled. NeedsCsum bool // CsumOffset is offset after that to place checksum. CsumOffset uint16 // Mss is maximum segment size. MSS uint16 // L3Len is L3 (IP) header length. L3HdrLen uint16 // MaxSize is maximum GSO packet size. MaxSize uint32 } // SupportedGSO is the type of segmentation offloading supported. type SupportedGSO int const ( // GSONotSupported indicates that segmentation offloading is not supported. GSONotSupported SupportedGSO = iota // HostGSOSupported indicates that segmentation offloading may be performed // by the host. This is typically true when netstack is attached to a host // AF_PACKET socket, and not true when attached to a unix socket or other // non-networking data layer. HostGSOSupported // GvisorGSOSupported indicates that segmentation offloading may be performed // in gVisor. GvisorGSOSupported ) // GSOEndpoint provides access to GSO properties. type GSOEndpoint interface { // GSOMaxSize returns the maximum GSO packet size. GSOMaxSize() uint32 // SupportedGSO returns the supported segmentation offloading. SupportedGSO() SupportedGSO } // GvisorGSOMaxSize is a maximum allowed size of a software GSO segment. // This isn't a hard limit, because it is never set into packet headers. const GvisorGSOMaxSize = 1 << 16
package http2 import ( "fmt" "net/http" "net/url" "strings" "github.com/xgfone/go-tools/log2" ) // Render is a HTTP render interface. type Render interface { // Render only writes the body data into the response, which should not // write the status code and has no need to set the Content-Type header. Render(http.ResponseWriter) error } // Context is a wrapper of http.Request and http.ResponseWriter. // // Notice: the Context struct refers to github.com/henrylee2cn/faygo and // github.com/gin-gonic/gin. type Context struct { Request *http.Request Writer http.ResponseWriter query url.Values } // ContextHandler converts a context handler to http.Handler. // // For example, // // func handler(c Context) error { // // ... // } // http.Handle("/", ContextHandler(handler)) func ContextHandler(f func(Context) error) http.Handler { return ContextHandlerFunc(f) } // ContextHandlerFunc converts a context handler to http.Handler. // // For example, // // func handler(c Context) error { // // ... // } // http.HandleFunc("/", ContextHandlerFunc(handler)) func ContextHandlerFunc(f func(Context) error) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if err := f(NewContext(w, r)); err != nil { log2.ErrorF("Failed to handle %q: %s", r.RequestURI, err) } }) } // NewContext returns a new Context. func NewContext(w http.ResponseWriter, r *http.Request) Context { return Context{ Request: r, Writer: w, query: r.URL.Query(), } } // IsWebsocket returns true if the request is websocket. func (c Context) IsWebsocket() bool { if strings.Contains(strings.ToLower(c.GetHeader("Connection")), "upgrade") && strings.ToLower(c.GetHeader("Upgrade")) == "websocket" { return true } return false } // ClientIP returns the client ip. func (c Context) ClientIP() string { return ClientIP(c.Request) } // Host returns a host:port of the this request from the client. func (c Context) Host() string { return c.Request.Host } // Method returns the request method. func (c Context) Method() string { return c.Request.Method } // Domain returns the domain of the client. func (c Context) Domain() string { return strings.Split(c.Request.Host, ":")[0] } // Path returns the path of the request URL. func (c Context) Path() string { return c.Request.URL.Path } // Proxy returns all the proxys. func (c Context) Proxy() []string { if ip := c.GetHeader(XForwardedFor); ip != "" { return strings.Split(ip, ",") } return []string{} } // IsMethod returns true if the request method is the given method. func (c Context) IsMethod(method string) bool { return c.Method() == method } // IsAjax returns true if the request is a AJAX request. func (c Context) IsAjax() bool { return c.GetHeader(XRequestedWith) == "XMLHttpRequest" } // UserAgent returns the request header "UserAgent". func (c Context) UserAgent() string { return c.GetHeader(UserAgent) } // ContentType returns the Content-Type header of the request. func (c Context) ContentType() string { return GetContentType(c.Request) } // ContentLength returns the length of the body. func (c Context) ContentLength() int64 { return c.Request.ContentLength } // GetRawBody returns the raw body data. func (c Context) GetRawBody() ([]byte, error) { return GetBody(c.Request) } // GetBody returns the body as string. func (c Context) GetBody() (string, error) { b, err := c.GetRawBody() return string(b), err } ////////////////////////////////////////////////////////////////////////////// // Get the request Cookie and Set the response Cookie // Cookie returns the named cookie provided in the request. // // It will return http.ErrNoCookie if there is not the named cookie. func (c Context) Cookie(name string) (string, error) { cookie, err := c.Request.Cookie(name) if err != nil { return "", err } return url.QueryUnescape(cookie.Value) } // SetCookie adds a Set-Cookie header into the response header. // // If the cookie is invalid, it will be dropped silently. func (c Context) SetCookie(name, value, path, domain string, maxAge int, secure, httpOnly bool) { if path == "" { path = "/" } http.SetCookie(c.Writer, &http.Cookie{ Name: name, Value: url.QueryEscape(value), MaxAge: maxAge, Path: path, Domain: domain, Secure: secure, HttpOnly: httpOnly, }) } ////////////////////////////////////////////////////////////////////////////// // URL Query // GetQuerys returns all query values for the given key. // // It will return nil if not the key. func (c Context) GetQuerys(key string) []string { return c.query[key] } // GetQuery returns the first query value for the given key. // // It will return "" if not the key. func (c Context) GetQuery(key string) string { if vs := c.GetQuerys(key); len(vs) > 0 { return vs[0] } return "" } // GetQueryWithDefault is equal to GetQuery, but returns the default if not // the key. func (c Context) GetQueryWithDefault(key, _default string) string { if v := c.GetQuery(key); v != "" { return v } return _default } ////////////////////////////////////////////////////////////////////////////// // Get the request header and Set the response header. // GetHeader returns the request header by the key. func (c Context) GetHeader(key string) string { return c.Request.Header.Get(key) } // SetHeader will set the response header if value is not empty, // Or delete the response header by the key. // // Notice: if key is "", ignore it. func (c Context) SetHeader(key, value string) { if key == "" { return } if value == "" { c.Writer.Header().Del(key) } else { c.Writer.Header().Set(key, value) } } ///////////////////////////////////////////////////////////////////////////// // Render the response // Status writes the response header with the status code. // // The returned value is nil forever. func (c Context) Status(code int) error { c.Writer.WriteHeader(code) return nil } // Redirect redirects the request to location. // // code must be betwwen 300 and 308, that's [300, 308], or return an error. func (c Context) Redirect(code int, location string) error { if code < 300 || code > 308 { return fmt.Errorf("Cannot redirect with status code %d", code) } if location == "" { location = "/" } http.Redirect(c.Writer, c.Request, location, code) return nil } // Error renders the error information to the response body. // // if having no second argument, the status code is 500. func (c Context) Error(err error, code ...int) error { if len(code) > 0 { return c.String(code[0], "%s", err) } return c.String(500, "%s", err) } // File Sends the file to the client. func (c Context) File(filepath string) { http.ServeFile(c.Writer, c.Request, filepath) } // Data writes some data into the repsonse body, with a status code. func (c Context) Data(code int, contentType string, data []byte) error { return Bytes(c.Writer, code, contentType, data) } // Render renders the content into the response body, with a status code. func (c Context) Render(code int, contentType string, r Render) error { c.Status(code) SetContentType(c.Writer, contentType) return r.Render(c.Writer) } // String renders the format string into the response body, with a status code. func (c Context) String(code int, format string, args ...interface{}) error { return String(c.Writer, code, format, args...) } // XML renders the XML into the response body, with a status code. func (c Context) XML(code int, v interface{}) error { return XML(c.Writer, code, v) } // JSON renders the JSON into the response body, with a status code. func (c Context) JSON(code int, v interface{}) error { return JSON(c.Writer, code, v) } /////////////////////////////////////////////////////////////////////////////// // Status2 writes the response header with the status code. // // The returned value is nil forever. // // The code is 200 by default. It is equal to c.Status(200). func (c Context) Status2(code ...int) error { if len(code) > 0 { return c.Status(code[0]) } return c.Status(200) } // String2 renders the string s into the response body. // // The code is 200 by default. It is equal to c.String(200, "%s", s). func (c Context) String2(s string, code ...int) error { if len(code) > 0 { return c.String(code[0], "%s", s) } return c.String(200, "%s", s) } // Redirect2 redirects the request to location. // // code must be betwwen 300 and 308, that's [300, 308], or return an error. // // The code is 301 by default. It is equal to c.Redirect(301, location). func (c Context) Redirect2(location string, code ...int) error { if len(code) > 0 { return c.Redirect(code[0], location) } return c.Redirect(301, location) } // Data2 writes some data into the repsonse body, with a status code. // // The code is 200 by default, which is equal to c.Data(200, contentType, data). func (c Context) Data2(contentType string, data []byte, code ...int) error { if len(code) > 0 { return c.Data(code[0], contentType, data) } return c.Data(200, contentType, data) } // Render2 renders the content into the response body, with a status code. // // The code is 200 by default, which is equal to c.Render(200, contentType, r). func (c Context) Render2(contentType string, r Render, code ...int) error { if len(code) > 0 { return c.Render(code[0], contentType, r) } return c.Render(200, contentType, r) } // XML2 renders the XML into the response body, with a status code. // // The code is 200 by default, which is equal to c.XML(200, v). func (c Context) XML2(v interface{}, code ...int) error { if len(code) > 0 { return c.XML(code[0], v) } return c.XML(200, v) } // JSON2 renders the JSON into the response body, with a status code. // // The code is 200 by default, which is equal to c.JSON(200, v). func (c Context) JSON2(v interface{}, code ...int) error { if len(code) > 0 { return c.JSON(code[0], v) } return c.JSON(200, v) }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package text import ( "fmt" "strings" "github.com/google/gapid/core/math/sint" ) // LineNumber prefixes returns s prefixed with a line number for each line, // starting from 1. Useful for adding line numbers to source. func LineNumber(s string) string { lines := strings.Split(s, "\n") width := sint.Log10(len(lines)) + 1 fa := fmt.Sprintf("%%%dd:", width) fb := fmt.Sprintf("%%%dd: %%s", width) for i, l := range lines { if len(l) == 0 { lines[i] = fmt.Sprintf(fa, i+1) } else { lines[i] = fmt.Sprintf(fb, i+1, l) } } return strings.Join(lines, "\n") }
package crawler import ( "fmt" "io" "log" "net/url" "os" "path" "strings" "github.com/ahmdrz/goinsta" "github.com/iveronanomi/goinstagrab" ) // LatestMedia ... func (s *service) LatestMedia() { names := goinstagrab.Config.ScanTargets for _, uName := range names { user, err := s.api.Profiles.ByName(uName) if err != nil { s.l.Print(err) return } j := 0 feeds := user.Feed() for feeds.Next() { j++ for _, img := range feeds.Items { _, _, err := s.download(&img, fmt.Sprintf("./data/%s/feed/", user.Username), "", user.Username) if err != nil { s.l.Printf("%d:%s stories saved: %d", user.ID, user.Username, j) s.l.Printf("getting feed error: %v", err) break } } s.l.Printf("%d:%s stories saved: %d", user.ID, user.Username, j) if goinstagrab.Config.DeepScan <= j { break } } j = 0 stories := user.Stories() for stories.Next() { for _, story := range stories.Items { j++ _, _, err := s.download(&story, fmt.Sprintf("./data/%s/story/", user.Username), "", user.Username) if err != nil { s.l.Printf("%d:%s stories saved: %d", user.ID, user.Username, j) s.l.Printf("getting story error: %v", err) break } } s.l.Printf("%d:%s stories saved: %d", user.ID, user.Username, j) if goinstagrab.Config.DeepScan <= j { break } } } } func (s *service) download(item *goinsta.Item, folder, name, username string) (imgs, vds string, err error) { log.SetPrefix("Download ") var u *url.URL var nname string imgFolder := path.Join(folder, "images") vidFolder := path.Join(folder, "videos") inst := item.Media.Instagram() os.MkdirAll(folder, 0777) os.MkdirAll(imgFolder, 0777) os.MkdirAll(vidFolder, 0777) vds = goinsta.GetBest(item.Videos) if vds != "" { if name == "" { u, err = url.Parse(vds) if err != nil { return } nname = path.Join(vidFolder, path.Base(u.Path)) } else { nname = path.Join(vidFolder, nname) } imgName := name if goinstagrab.Dump.IsScanned(username, "videos", imgName) { return "", vds, nil } nname = getname(nname) vds, err = get(inst, vds, nname) if err != nil { goinstagrab.Dump.MarkMediaScanned(username, "videos", imgName) } return "", vds, err } imgs = goinsta.GetBest(item.Images.Versions) if imgs != "" { if name == "" { u, err = url.Parse(imgs) if err != nil { return } nname = path.Join(imgFolder, path.Base(u.Path)) } else { nname = path.Join(imgFolder, nname) } imgName := name if goinstagrab.Dump.IsScanned(username, "images", imgName) { return imgs, vds, nil } nname = getname(nname) imgs, err = get(inst, imgs, nname) if err != nil { goinstagrab.Dump.MarkMediaScanned(username, "videos", imgName) } return imgs, "", err } return imgs, vds, fmt.Errorf("cannot find any image or video") } func get(inst *goinsta.Instagram, url, dst string) (string, error) { file, err := os.Create(dst) if err != nil { return "", err } defer file.Close() log.Print(url) resp, err := inst.C.Get(url) if err != nil { return "", err } _, err = io.Copy(file, resp.Body) return dst, err } func getname(name string) string { nname := name i := 1 for { ext := path.Ext(name) _, err := os.Stat(name) if err != nil { break } if ext != "" { nname = strings.Replace(nname, ext, "", -1) } name = fmt.Sprintf("%s.%d%s", nname, i, ext) i++ } return name }
// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package inputs import ( "context" "time" "chromiumos/tast/ctxutil" "chromiumos/tast/local/bundles/cros/inputs/fixture" "chromiumos/tast/local/bundles/cros/inputs/pre" "chromiumos/tast/local/bundles/cros/inputs/testserver" "chromiumos/tast/local/bundles/cros/inputs/util" "chromiumos/tast/local/chrome/ime" "chromiumos/tast/local/chrome/uiauto" "chromiumos/tast/local/chrome/uiauto/faillog" "chromiumos/tast/local/chrome/uiauto/nodewith" "chromiumos/tast/local/chrome/uiauto/role" "chromiumos/tast/local/chrome/useractions" "chromiumos/tast/local/input" "chromiumos/tast/testing" "chromiumos/tast/testing/hwdep" ) func init() { testing.AddTest(&testing.Test{ Func: PhysicalKeyboardLongpressDiacritics, LacrosStatus: testing.LacrosVariantExists, Desc: "Checks diacritics on long-press with physical keyboard typing", Contacts: []string{"essential-inputs-gardener-oncall@google.com", "essential-inputs-team@google.com"}, Attr: []string{"group:mainline", "group:input-tools"}, SoftwareDeps: []string{"chrome", "chrome_internal"}, Timeout: 2 * time.Minute, SearchFlags: util.IMESearchFlags([]ime.InputMethod{ime.EnglishUS}), Params: []testing.Param{ { Fixture: fixture.ClamshellNonVKWithDiacriticsOnPKLongpress, ExtraHardwareDeps: hwdep.D(pre.InputsStableModels), ExtraAttr: []string{"group:input-tools-upstream"}, }, { Name: "lacros", Fixture: fixture.LacrosClamshellNonVKWithDiacriticsOnPKLongpress, ExtraSoftwareDeps: []string{"lacros_stable"}, ExtraHardwareDeps: hwdep.D(pre.InputsStableModels), ExtraAttr: []string{"informational"}, }, }, }) } func PhysicalKeyboardLongpressDiacritics(ctx context.Context, s *testing.State) { cr := s.FixtValue().(fixture.FixtData).Chrome tconn := s.FixtValue().(fixture.FixtData).TestAPIConn uc := s.FixtValue().(fixture.FixtData).UserContext cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second) defer cancel() // PK longpress diacritics only works in English(US). inputMethod := ime.EnglishUS if err := inputMethod.Activate(tconn)(ctx); err != nil { s.Fatal("Failed to set IME: ", err) } uc.SetAttribute(useractions.AttributeInputMethod, inputMethod.Name) its, err := testserver.LaunchBrowser(ctx, s.FixtValue().(fixture.FixtData).BrowserType, cr, tconn) if err != nil { s.Fatal("Failed to launch inputs test server: ", err) } defer its.CloseAll(cleanupCtx) defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_tree") kb, err := input.Keyboard(ctx) if err != nil { s.Fatal("Failed to get keyboard: ", err) } defer kb.Close() const inputField = testserver.TextInputField const ( longpressKeyChar = "a" diacritic = "à" ) candidateWindowFinder := nodewith.HasClass("SuggestionWindowView").Role(role.Window) suggestionCharFinder := nodewith.Name(diacritic).Ancestor(candidateWindowFinder).First() ui := uiauto.New(tconn) testCases := []struct { name string scenario string // The action occur while suggestion window is open and should result in the window being closed. actions uiauto.Action expectedText string }{ { name: "left_click", scenario: "PK longpress and left click to insert diacritics", actions: ui.LeftClick(suggestionCharFinder), expectedText: diacritic, }, { name: "right_arrow_enter", scenario: "PK longpress and arrow key then enter to insert diacritics", actions: uiauto.Combine("right arrow then enter", kb.AccelAction("Right"), kb.AccelAction("Enter"), ), expectedText: diacritic, }, { name: "number_key", scenario: "PK longpress and number key to insert diacritics", actions: kb.AccelAction("1"), expectedText: diacritic, }, { name: "esc_to_dismiss", scenario: "PK longpress and esc to dismiss", actions: kb.AccelAction("Esc"), expectedText: longpressKeyChar, }, } for _, testcase := range testCases { util.RunSubTest(ctx, s, cr, testcase.name, uiauto.UserAction(testcase.scenario, uiauto.Combine(testcase.scenario, its.Clear(inputField), its.ClickFieldAndWaitForActive(inputField), // Simulate a held down key until window appears. kb.AccelPressAction(longpressKeyChar), ui.WaitUntilExists(candidateWindowFinder), kb.AccelReleaseAction(longpressKeyChar), testcase.actions, ui.WaitUntilGone(candidateWindowFinder), its.ValidateResult(inputField, testcase.expectedText), ), uc, &useractions.UserActionCfg{ Attributes: map[string]string{ useractions.AttributeTestScenario: testcase.scenario, useractions.AttributeFeature: useractions.FeatureLongpressDiacritics, }, }, )) } }
package servicegraph import ( "encoding/json" "math" "strings" "alauda.io/diablo/src/backend/integration/prometheus" "github.com/prometheus/common/model" ) const ( NODE_EDGE_TYPE = "edge" NODE_WORKLOAD_TYPE = "workload" NODE_SERVICE_TYPE = "service" Quantile50 = 0.50 Quantile90 = 0.90 Quantile99 = 0.99 ) type ServiceMetrics struct { StartTime int `json:"start_time"` EndTime int `json:"end_time"` Step int `json:"step"` Type string `json:"type"` Namespace string `json:"namespace"` Service string `json:"service,omitempty"` Workloads []string `json:"workloads"` RequestCount RequestCount `json:"request_count"` RequestRate []*TimeStampMetrics `json:"request_rate,omitempty"` ErrorRate []*TimeStampMetrics `json:"error_rate,omitempty"` RequestResponseTime map[string][]*TimeStampMetrics `json:"request_response_time,omitempty"` } type WorkloadMetrics struct { StartTime int `json:"start_time"` EndTime int `json:"end_time"` Step int `json:"step"` Type string `json:"type"` Namespace string `json:"namespace"` Services []string `json:"services,omitempty"` Workload string `json:"workload"` RequestCountIn RequestCount `json:"request_count_in"` RequestCountOut RequestCount `json:"request_count_out"` RequestRateIn []*TimeStampMetrics `json:"request_rate_in,omitempty"` RequestRateOut []*TimeStampMetrics `json:"request_rate_out,omitempty"` ErrorRateIn []*TimeStampMetrics `json:"error_rate_in,omitempty"` ErrorRateOut []*TimeStampMetrics `json:"error_rate_out,omitempty"` RequestResponseTime map[string][]*TimeStampMetrics `json:"request_response_time,omitempty"` } type EdgeMetrics struct { QueryOptions *EdgeMetricsQueryOptions `json:"query_options"` RequestCount RequestCount `json:"request_count"` RequestRate []*TimeStampMetrics `json:"request_rate,omitempty"` ErrorRate []*TimeStampMetrics `json:"error_rate,omitempty"` RequestResponseTime map[string][]*TimeStampMetrics `json:"request_response_time,omitempty"` } type EdgeMetricsQueryOptions struct { SourceNamespace string `json:"source_namespace"` TargetNamespace string `json:"target_namespace"` SourceWorkload string `json:"source_workload"` SourceService string `json:"source_service"` TargetWorkload string `json:"target_workload"` TargetService string `json:"target_service"` StartTime int `json:"start_time"` EndTime int `json:"end_time"` Step int `json:"step"` MetricsType string `json:"metrics_type"` P8sURL string `json:"-"` } type RequestCount struct { Http_2xx int32 `json:"http_2xx"` Http_3xx int32 `json:"http_3xx"` Http_4xx int32 `json:"http_4xx"` Http_5xx int32 `json:"http_5xx"` } type TimeStampMetrics struct { int64 float64 } // MarshalJSON implements json.Marshaler. func (ts TimeStampMetrics) MarshalJSON() ([]byte, error) { return json.Marshal([]interface{}{ts.int64, ts.float64}) } func GetServiceMetrics(namespace, service, workload string, startTime, endTime, step int, p8sURL string) (*ServiceMetrics, error) { promClient, err := prometheus.NewClient(p8sURL) if err != nil { return nil, err } sm := &ServiceMetrics{StartTime: startTime, EndTime: endTime, Step: step, Service: service, Namespace: namespace, Type: NODE_SERVICE_TYPE} sm.Workloads = promClient.GetServiceWorkloads(service, namespace, startTime, endTime) rci, err := promClient.GetServiceRequestCountIn(service, workload, namespace, startTime, endTime) if err != nil { return sm, err } // servide node no outbound sm.RequestCount = generateRequestCount(rci) rri, err := promClient.GetServiceRequestRateIn(service, workload, namespace, startTime, endTime, step) if err != nil { return sm, err } eri, err := promClient.GetServiceErrorRateIn(service, workload, namespace, startTime, endTime, step) if err != nil { return sm, err } populateServiceMetricsRequestRate(sm, rri, eri) histo := promClient.GetServiceRequestResponseTime(service, namespace, startTime, endTime, step) sm.RequestResponseTime = generateRequestResponseTime(histo, "") return sm, nil } func GetWorkloadMetrics(namespace, workload string, startTime, endTime, step int, p8sURL string) (*WorkloadMetrics, error) { promClient, err := prometheus.NewClient(p8sURL) if err != nil { return nil, err } wm := &WorkloadMetrics{StartTime: startTime, EndTime: endTime, Step: step, Namespace: namespace, Workload: workload, Type: NODE_WORKLOAD_TYPE} wm.Services = promClient.GetWorkloadServices(workload, namespace, startTime, endTime) rci, err := promClient.GetWorkloadRequestCountIn(workload, namespace, startTime, endTime) if err != nil { return wm, err } rco, err := promClient.GetWorkloadRequestCountOut(workload, namespace, startTime, endTime) if err != nil { return wm, err } populateWorkloadMetricsRequestCount(wm, rci, rco) rri, err := promClient.GetWorkloadRequestRateIn(workload, namespace, startTime, endTime, step) if err != nil { return wm, err } rro, err := promClient.GetWorkloadRequestRateOut(workload, namespace, startTime, endTime, step) if err != nil { return wm, err } eri, err := promClient.GetWorkloadErrorRateIn(workload, namespace, startTime, endTime, step) if err != nil { return wm, err } ero, err := promClient.GetWorkloadErrorRateOut(workload, namespace, startTime, endTime, step) if err != nil { return wm, err } populateWorkloadMetricsRequestRate(wm, rri, rro, eri, ero) histo := promClient.GetWorkloadRequestResponseTime(workload, namespace, startTime, endTime, step) wm.RequestResponseTime = generateRequestResponseTime(histo, "") return wm, nil } func GetEdgeMetrics(options *EdgeMetricsQueryOptions) (*EdgeMetrics, error) { promClient, err := prometheus.NewClient(options.P8sURL) if err != nil { return nil, err } queryLabels, queryErrorLabels := promClient.BuildEdgeQueryLabels(options.SourceWorkload, options.SourceNamespace, options.SourceService, options.TargetWorkload, options.TargetNamespace, options.TargetService) em := &EdgeMetrics{QueryOptions: options} rc, err := promClient.GetEdgeRequestCount(queryLabels, options.StartTime, options.EndTime) if err != nil { return em, err } em.RequestCount = generateRequestCount(rc) rr, err := promClient.GetEdgeRequestRate(queryLabels, options.StartTime, options.EndTime, options.Step) if err != nil { return em, err } if len(rr) != 0 { em.RequestRate = generateRequestRate(rr[0].Values) } er, err := promClient.GetEdgeErrorRate(queryErrorLabels, options.StartTime, options.EndTime, options.Step) if err != nil { return em, err } if len(er) != 0 { em.ErrorRate = generateRequestRate(er[0].Values) } histo := promClient.GetEdgeRequestResponseTime(options.SourceWorkload, options.SourceNamespace, options.SourceService, options.TargetWorkload, options.TargetNamespace, options.TargetService, options.StartTime, options.EndTime, options.Step) em.RequestResponseTime = generateRequestResponseTime(histo, options.SourceWorkload) return em, nil } func generateRequestCount(requestCount model.Vector) RequestCount { r := RequestCount{} for _, s := range requestCount { m := s.Metric lCode, ok := m["response_code"] if !ok || len(lCode) == 0 { continue } code := string(lCode) switch { case strings.HasPrefix(code, "2"): r.Http_2xx += int32(s.Value) case strings.HasPrefix(code, "3"): r.Http_3xx += int32(s.Value) case strings.HasPrefix(code, "4"): r.Http_4xx += int32(s.Value) case strings.HasPrefix(code, "5"): r.Http_5xx += int32(s.Value) } } return r } func generateRequestRate(requestRate []model.SamplePair) []*TimeStampMetrics { ts := make([]*TimeStampMetrics, 0, len(requestRate)) for _, s := range requestRate { ts = append(ts, &TimeStampMetrics{s.Timestamp.Unix(), float64(s.Value)}) } return ts } func populateWorkloadMetricsRequestCount(wm *WorkloadMetrics, requestCountIn, requestCountOut model.Vector) { wm.RequestCountIn = generateRequestCount(requestCountIn) wm.RequestCountOut = generateRequestCount(requestCountOut) } func populateWorkloadMetricsRequestRate(wm *WorkloadMetrics, requestRateIn, requestRateOut, errorRateIn, errorRateOut model.Matrix) { if len(requestRateIn) != 0 { samples := requestRateIn[0].Values wm.RequestRateIn = generateRequestRate(samples) } if len(requestRateOut) != 0 { samples := requestRateOut[0].Values wm.RequestRateOut = generateRequestRate(samples) } if len(errorRateIn) != 0 { samples := errorRateIn[0].Values wm.ErrorRateIn = generateRequestRate(samples) } if len(errorRateOut) != 0 { samples := errorRateOut[0].Values wm.ErrorRateOut = generateRequestRate(samples) } } func populateServiceMetricsRequestRate(sm *ServiceMetrics, requestRateIn, errorRateIn model.Matrix) { if len(requestRateIn) != 0 { samples := requestRateIn[0].Values sm.RequestRate = generateRequestRate(samples) } if len(errorRateIn) != 0 { samples := errorRateIn[0].Values sm.ErrorRate = generateRequestRate(samples) } } func generateRequestResponseTime(histo map[string]model.Matrix, sourceWorkload string) map[string][]*TimeStampMetrics { result := make(map[string][]*TimeStampMetrics) for k, m := range histo { for _, sample := range m { lb := sample.Metric if val, ok := lb["source_workload"]; ok && sourceWorkload != "" { if string(val) == sourceWorkload { result[k] = convertToTimeStampMetrics(sample.Values) } } else { result[k] = convertToTimeStampMetrics(sample.Values) } } } return result } func convertToTimeStampMetrics(requestRate []model.SamplePair) []*TimeStampMetrics { ts := make([]*TimeStampMetrics, 0, len(requestRate)) for _, s := range requestRate { val := s.Value if math.IsNaN(float64(val)) { ts = append(ts, &TimeStampMetrics{s.Timestamp.Unix(), 0}) } else { ts = append(ts, &TimeStampMetrics{s.Timestamp.Unix(), float64(s.Value)}) } } return ts }
// Copyright 2019 Liquidata, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package diff import ( "context" "strings" "testing" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/liquidata-inc/dolt/go/libraries/doltcore/dtestutils" "github.com/liquidata-inc/dolt/go/libraries/doltcore/env" "github.com/liquidata-inc/dolt/go/libraries/doltcore/row" "github.com/liquidata-inc/dolt/go/libraries/doltcore/schema" "github.com/liquidata-inc/dolt/go/libraries/doltcore/schema/alterschema" "github.com/liquidata-inc/dolt/go/libraries/doltcore/sql" ) const name = "Jeffery Williams" const email = "meet.me@the.london" type StringBuilderCloser struct { strings.Builder } func (*StringBuilderCloser) Close() error { return nil } func setupSchema() (context.Context, schema.Schema, *env.DoltEnv) { ctx := context.Background() sch := dtestutils.TypedSchema dEnv := dtestutils.CreateTestEnv() _ = dEnv.DoltDB.WriteEmptyRepo(ctx, name, email) return ctx, sch, dEnv } func strPointer(s string) *string { return &s } func TestSqlTableDiffAdd(t *testing.T) { ctx, sch, dEnv := setupSchema() oldRoot, _ := dEnv.WorkingRoot(ctx) dtestutils.CreateTestTable(t, dEnv, "addTable", sch, []row.Row{}...) newRoot, _ := dEnv.WorkingRoot(ctx) a, _, rm, _ := newRoot.TableDiff(ctx, oldRoot) adds, removed, renamed, _ := findRenames(ctx, newRoot, oldRoot, a, rm) assert.Equal(t, []string{"addTable"}, adds) assert.Equal(t, []string{}, removed) assert.Equal(t, map[string]string{}, renamed) var stringWr StringBuilderCloser _ = PrintSqlTableDiffs(ctx, newRoot, oldRoot, &stringWr) expectedOutput := sql.SchemaAsCreateStmt("addTable", sch) + "\n" assert.Equal(t, expectedOutput, stringWr.String()) } func TestSqlTableDiffAddThenInsert(t *testing.T) { id := uuid.MustParse("00000000-0000-0000-0000-000000000000") ctx, sch, dEnv := setupSchema() oldRoot, _ := dEnv.WorkingRoot(ctx) dtestutils.CreateTestTable(t, dEnv, "addTable", sch, []row.Row{}...) r := dtestutils.NewTypedRow(id, "Big Billy", 77, false, strPointer("Doctor")) newRoot, _ := dEnv.WorkingRoot(ctx) newRoot, _ = dtestutils.AddRowToRoot(dEnv, ctx, newRoot, "addTable", r) a, _, rm, _ := newRoot.TableDiff(ctx, oldRoot) added, removed, renamed, _ := findRenames(ctx, newRoot, oldRoot, a, rm) assert.Equal(t, []string{"addTable"}, added) assert.Equal(t, []string{}, removed) assert.Equal(t, map[string]string{}, renamed) var stringWr StringBuilderCloser _ = PrintSqlTableDiffs(ctx, newRoot, oldRoot, &stringWr) expectedOutput := sql.SchemaAsCreateStmt("addTable", sch) + "\n" expectedOutput = expectedOutput + "INSERT INTO `addTable` (`id`,`name`,`age`,`is_married`,`title`) " + "VALUES (\"00000000-0000-0000-0000-000000000000\",\"Big Billy\",77,FALSE,\"Doctor\");\n" assert.Equal(t, expectedOutput, stringWr.String()) } func TestSqlTableDiffsDrop(t *testing.T) { ctx, sch, dEnv := setupSchema() dtestutils.CreateTestTable(t, dEnv, "dropTable", sch, []row.Row{}...) oldRoot, _ := dEnv.WorkingRoot(ctx) newRoot, _ := oldRoot.RemoveTables(ctx, []string{"dropTable"}...) a, _, rm, _ := newRoot.TableDiff(ctx, oldRoot) added, drops, renamed, _ := findRenames(ctx, newRoot, oldRoot, a, rm) assert.Equal(t, []string{"dropTable"}, drops) assert.Equal(t, []string{}, added) assert.Equal(t, map[string]string{}, renamed) var stringWr StringBuilderCloser _ = PrintSqlTableDiffs(ctx, newRoot, oldRoot, &stringWr) expectedOutput := "DROP TABLE `dropTable`;\n" assert.Equal(t, expectedOutput, stringWr.String()) } func TestSqlTableDiffRename(t *testing.T) { ctx, sch, dEnv := setupSchema() dtestutils.CreateTestTable(t, dEnv, "renameTable", sch, []row.Row{}...) oldRoot, _ := dEnv.WorkingRoot(ctx) newRoot, _ := alterschema.RenameTable(ctx, oldRoot, "renameTable", "newTableName") a, _, rm, _ := newRoot.TableDiff(ctx, oldRoot) added, removed, renames, _ := findRenames(ctx, newRoot, oldRoot, a, rm) assert.Equal(t, map[string]string{"renameTable": "newTableName"}, renames) assert.Equal(t, []string{}, removed) assert.Equal(t, []string{}, added) expectedOutput := "RENAME TABLE `renameTable` TO `newTableName`;\n" var stringWr StringBuilderCloser _ = PrintSqlTableDiffs(ctx, newRoot, oldRoot, &stringWr) assert.Equal(t, expectedOutput, stringWr.String()) } func TestSqlTableDiffRenameChangedTable(t *testing.T) { id := uuid.MustParse("00000000-0000-0000-0000-000000000000") ctx, sch, dEnv := setupSchema() dtestutils.CreateTestTable(t, dEnv, "renameTable", sch, []row.Row{}...) oldRoot, _ := dEnv.WorkingRoot(ctx) newRoot, _ := alterschema.RenameTable(ctx, oldRoot, "renameTable", "newTableName") r := dtestutils.NewTypedRow(id, "Big Billy", 77, false, strPointer("Doctor")) newRoot, _ = dtestutils.AddRowToRoot(dEnv, ctx, newRoot, "newTableName", r) a, _, rm, _ := newRoot.TableDiff(ctx, oldRoot) added, removed, renamed, _ := findRenames(ctx, newRoot, oldRoot, a, rm) assert.Equal(t, []string{"renameTable"}, removed) assert.Equal(t, []string{"newTableName"}, added) assert.Equal(t, map[string]string{}, renamed) var stringWr StringBuilderCloser _ = PrintSqlTableDiffs(ctx, newRoot, oldRoot, &stringWr) expectedOutput := "DROP TABLE `renameTable`;\n" expectedOutput = expectedOutput + sql.SchemaAsCreateStmt("newTableName", sch) + "\n" + "INSERT INTO `newTableName` (`id`,`name`,`age`,`is_married`,`title`) " + "VALUES (\"00000000-0000-0000-0000-000000000000\",\"Big Billy\",77,FALSE,\"Doctor\");\n" assert.Equal(t, expectedOutput, stringWr.String()) }
package models type ItemModel struct { ID int64 `json:"id"` SKU string `json:"string,omitempty"` ItemName string `json:"item_name,omitempty"` Amount int `json:"amount,omitempty"` }
package main type Search struct { Meta struct { Status int `json:"status"` } `json:"meta"` Response struct { Hits []Hit `json:"hits"` } `json:"response"` } type Hit struct { Type string `json:"type"` Result Result `json:"result"` } type Result struct { PrimaryArtist Artist `json:"primary_artist"` TitleWithFeatured string `json:"title_with_featured"` Url string `json:"url"` } type Artist struct { Name string `json:"name"` }
package main import ( "bufio" "fmt" "log" "os" "strings" ) func blackCard(s []string, m int) string { for len(s) > 1 { n := m%len(s) - 1 if n == -1 { s = s[:len(s)-1] } else { s = append(s[:n], s[n+1:]...) } } return s[0] } func main() { var m int data, err := os.Open(os.Args[1]) if err != nil { log.Fatal(err) } defer data.Close() scanner := bufio.NewScanner(data) for scanner.Scan() { t := strings.Split(scanner.Text(), " | ") fmt.Sscan(t[1], &m) fmt.Println(blackCard(strings.Fields(t[0]), m)) } }
package store import ( "context" "errors" "fmt" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ssm" "github.com/aws/aws-sdk-go/service/ssm/ssmiface" ) var ( ErrNoParameters = errors.New("No Parameters found") ) type ParamStore struct { client ssmiface.SSMAPI path string RequestRetries int RequestTimeout time.Duration } func New() *ParamStore { sess := session.Must(session.NewSessionWithOptions(session.Options{ Config: aws.Config{MaxRetries: aws.Int(2)}, })) svc := ssm.New(sess) ps := &ParamStore{ client: svc, RequestTimeout: time.Second, } return ps } func (ps *ParamStore) Retries(retries int) { ps.RequestRetries = retries } func (ps *ParamStore) Timeout(timeout time.Duration) { ps.RequestTimeout = timeout } func (ps *ParamStore) Param(key string, opts ...ParameterOptionFn) (*Parameter, error) { svc := ps.client ctx, cancel := context.WithTimeout(context.Background(), ps.RequestTimeout) defer cancel() out, err := svc.GetParameterWithContext(ctx, &ssm.GetParameterInput{ Name: aws.String(key), WithDecryption: aws.Bool(true), }) if err != nil { return nil, err } param := NewParameter( *out.Parameter.Name, *out.Parameter.Value, opts..., ) param.RefreshFn = func(p *Parameter) (interface{}, error) { refreshCtx, cancel := context.WithTimeout(context.Background(), ps.RequestTimeout) defer cancel() out, err := svc.GetParameterWithContext(refreshCtx, &ssm.GetParameterInput{ Name: aws.String(key), WithDecryption: aws.Bool(true), }) if err != nil { return nil, err } p.UpdateValue(out.Parameter.Value) return p.Value, nil } return param, nil } func (ps *ParamStore) ParamsByPath(p string, filters []*Filter, opts ...ParameterOptionFn) ([]*Parameter, error) { svc := ps.client ctx, cancel := context.WithTimeout(context.Background(), ps.RequestTimeout) defer cancel() paramFilters := []*ssm.ParameterStringFilter{} for _, f := range filters { paramFilters = append(paramFilters, f.AWSFilter()) } out, err := svc.GetParametersByPathWithContext(ctx, &ssm.GetParametersByPathInput{ Path: aws.String(p), WithDecryption: aws.Bool(true), }) if err != nil { return nil, err } if out.Parameters == nil || len(out.Parameters) == 0 { return nil, ErrNoParameters } params := []*Parameter{} for _, awsParam := range out.Parameters { param := NewParameter(*awsParam.Name, *awsParam.Value, nil) param.RefreshFn = func(p *Parameter) (interface{}, error) { refreshCtx, cancel := context.WithTimeout(context.Background(), ps.RequestTimeout) defer cancel() out, err := svc.GetParameterWithContext(refreshCtx, &ssm.GetParameterInput{ Name: aws.String(p.Name), WithDecryption: aws.Bool(true), }) if err != nil { return nil, err } p.UpdateValue(out.Parameter.Value) return p.Value, nil } params = append(params, param) } return params, nil } func Path(p ...string) string { return fmt.Sprintf("/%s", strings.Join(p, "/")) }
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // package model import ( "encoding/json" "fmt" "io" "net/url" "github.com/pkg/errors" ) const ( forceInstallationRestartEnvVar = "CLOUD_PROVISIONER_ENFORCED_RESTART" // ShowInstallationCountQueryParameter the query parameter name for GET /groups in order to enable // or disable the installation count on the output. ShowInstallationCountQueryParameter = "show_installation_count" ) // CreateGroupRequest specifies the parameters for a new group. type CreateGroupRequest struct { Name string Description string Version string Image string MaxRolling int64 APISecurityLock bool MattermostEnv EnvVarMap Annotations []string } // Validate validates the values of a group create request. func (request *CreateGroupRequest) Validate() error { if len(request.Name) == 0 { return errors.New("must specify name") } if request.MaxRolling < 0 { return errors.New("max rolling must be 0 or greater") } err := request.MattermostEnv.Validate() if err != nil { return errors.Wrapf(err, "bad environment variable map in create group request") } return nil } // NewCreateGroupRequestFromReader will create a CreateGroupRequest from an io.Reader with JSON data. func NewCreateGroupRequestFromReader(reader io.Reader) (*CreateGroupRequest, error) { var createGroupRequest CreateGroupRequest err := json.NewDecoder(reader).Decode(&createGroupRequest) if err != nil && err != io.EOF { return nil, errors.Wrap(err, "failed to decode create group request") } err = createGroupRequest.Validate() if err != nil { return nil, errors.Wrap(err, "invalid group create request") } return &createGroupRequest, nil } // PatchGroupRequest specifies the parameters for an updated group. type PatchGroupRequest struct { ID string MaxRolling *int64 Name *string Description *string Version *string Image *string MattermostEnv EnvVarMap ForceSequenceUpdate bool ForceInstallationsRestart bool } // Apply applies the patch to the given group. func (p *PatchGroupRequest) Apply(group *Group) bool { var applied bool if p.Name != nil && *p.Name != group.Name { applied = true group.Name = *p.Name } if p.Description != nil && *p.Description != group.Description { applied = true group.Description = *p.Description } if p.Version != nil && *p.Version != group.Version { applied = true group.Version = *p.Version } if p.Image != nil && *p.Image != group.Image { applied = true group.Image = *p.Image } if p.MaxRolling != nil && *p.MaxRolling != group.MaxRolling { applied = true group.MaxRolling = *p.MaxRolling } if p.MattermostEnv != nil { if group.MattermostEnv.ClearOrPatch(&p.MattermostEnv) { applied = true } } // This special value allows us to bump the group sequence number even when // the patch contains no group modifications. if p.ForceSequenceUpdate { applied = true } // Force restart of pods even if nothing have changed. This is done by // setting non-meaningful environment variable. // We keep it separate from ForceSequenceUpdate in case we want to run force // update that does not require restarting pods. if p.ForceInstallationsRestart { group.MattermostEnv[forceInstallationRestartEnvVar] = EnvVar{Value: fmt.Sprintf("force-restart-at-sequence-%d", group.Sequence)} applied = true } return applied } // Validate validates the values of a group patch request func (p *PatchGroupRequest) Validate() error { if p.Name != nil && len(*p.Name) == 0 { return errors.New("provided name update value was blank") } if p.MaxRolling != nil && *p.MaxRolling < 0 { return errors.New("max rolling must be 0 or greater") } // EnvVarMap validation is skipped as all configurations of this now imply // a specific patch action should be taken. return nil } // NewPatchGroupRequestFromReader will create a PatchGroupRequest from an io.Reader with JSON data. func NewPatchGroupRequestFromReader(reader io.Reader) (*PatchGroupRequest, error) { var patchGroupRequest PatchGroupRequest err := json.NewDecoder(reader).Decode(&patchGroupRequest) if err != nil && err != io.EOF { return nil, errors.Wrap(err, "failed to decode patch group request") } err = patchGroupRequest.Validate() if err != nil { return nil, errors.Wrap(err, "invalid patch group request") } return &patchGroupRequest, nil } // GetGroupsRequest describes the parameters to request a list of groups. type GetGroupsRequest struct { Paging WithInstallationCount bool } // ApplyToURL modifies the given url to include query string parameters for the request. func (request *GetGroupsRequest) ApplyToURL(u *url.URL) { q := u.Query() if request.WithInstallationCount { q.Add(ShowInstallationCountQueryParameter, "true") } request.Paging.AddToQuery(q) u.RawQuery = q.Encode() } // LeaveGroupRequest describes the parameters to leave a group. type LeaveGroupRequest struct { RetainConfig bool } // ApplyToURL modifies the given url to include query string parameters for the request. func (request *LeaveGroupRequest) ApplyToURL(u *url.URL) { q := u.Query() if !request.RetainConfig { q.Add("retain_config", "false") } u.RawQuery = q.Encode() }
package db import ( "github.com/astaxie/beego/orm" "intra-hub/models" ) func AddLog(user *models.User, action, table string, targetID int) error { l := &models.Log{ Action: action, Table: table, TargetID: targetID, User: user, } _, err := orm.NewOrm().Insert(l) return err }
package main import ( // "fmt" ) type ApiKeyDescriptor struct { UUID string Name string Description string AccessKey string SecretKey string Host string GenAccessKey string GenSecretKey string ProjectId string } type LocalAuthConfigDescriptor struct { UUID string Host string AccessKey string SecretKey string ProjectId string Realname string Username string Password string Enabled bool Accessmode string }
// Copyright 2020 The SwiftShader Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package llvm provides functions and types for locating and using the llvm // toolchains. package llvm import ( "bytes" "fmt" "io/ioutil" "net/http" "os" "os/exec" "path/filepath" "regexp" "runtime" "sort" "strconv" "swiftshader.googlesource.com/SwiftShader/tests/regres/util" ) const maxLLVMVersion = 10 // Version holds the build version information of an LLVM toolchain. type Version struct { Major, Minor, Point int } func (v Version) String() string { return fmt.Sprintf("%v.%v.%v", v.Major, v.Minor, v.Point) } // GreaterEqual returns true if v >= rhs. func (v Version) GreaterEqual(rhs Version) bool { if v.Major > rhs.Major { return true } if v.Major < rhs.Major { return false } if v.Minor > rhs.Minor { return true } if v.Minor < rhs.Minor { return false } return v.Point >= rhs.Point } // Download downloads and verifies the LLVM toolchain for the current OS. func (v Version) Download() ([]byte, error) { return v.DownloadForOS(runtime.GOOS) } // DownloadForOS downloads and verifies the LLVM toolchain for the given OS. func (v Version) DownloadForOS(osName string) ([]byte, error) { url, sig, key, err := v.DownloadInfoForOS(osName) if err != nil { return nil, err } resp, err := http.Get(url) if err != nil { return nil, fmt.Errorf("Could not download LLVM from %v: %v", url, err) } defer resp.Body.Close() content, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("Could not download LLVM from %v: %v", url, err) } sigfile, err := os.Open(sig) if err != nil { return nil, fmt.Errorf("Couldn't open file '%s': %v", sig, err) } defer sigfile.Close() keyfile, err := os.Open(key) if err != nil { return nil, fmt.Errorf("Couldn't open file '%s': %v", key, err) } defer keyfile.Close() if err := util.CheckPGP(bytes.NewReader(content), sigfile, keyfile); err != nil { return nil, err } return content, nil } // DownloadInfoForOS returns the download url, signature and key for the given // LLVM version for the given OS. func (v Version) DownloadInfoForOS(os string) (url, sig, key string, err error) { switch v { case Version{10, 0, 0}: key = relfile("10.0.0.pub.key") switch os { case "linux": url = "https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz" sig = relfile("10.0.0-ubuntu.sig") return case "darwin": url = "https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-apple-darwin.tar.xz" sig = relfile("10.0.0-darwin.sig") return case "windows": url = "https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/LLVM-10.0.0-win64.exe" sig = relfile("10.0.0-win64.sig") return default: return "", "", "", fmt.Errorf("Unsupported OS: %v", os) } default: return "", "", "", fmt.Errorf("Unknown download for LLVM %v", v) } } func relfile(path string) string { _, thisFile, _, _ := runtime.Caller(1) thisDir := filepath.Dir(thisFile) return filepath.Join(thisDir, path) } // Toolchain holds the paths and version information about an LLVM toolchain. type Toolchain struct { Version Version BinDir string } // Toolchains is a list of Toolchain type Toolchains []Toolchain // Find looks for a toolchain with the specific version. func (l Toolchains) Find(v Version) *Toolchain { for _, t := range l { if t.Version == v { return &t } } return nil } // FindAtLeast looks for a toolchain with the given version, returning the highest found version. func (l Toolchains) FindAtLeast(v Version) *Toolchain { out := (*Toolchain)(nil) for _, t := range l { if t.Version.GreaterEqual(v) && (out == nil || out.Version.GreaterEqual(t.Version)) { t := t out = &t } } return out } // Search looks for llvm toolchains in paths. // If paths is empty, then PATH is searched. func Search(paths ...string) Toolchains { toolchains := map[Version]Toolchain{} search := func(name string) { if len(paths) > 0 { for _, path := range paths { if util.IsFile(path) { path = filepath.Dir(path) } if t := toolchain(path); t != nil { toolchains[t.Version] = *t continue } if t := toolchain(filepath.Join(path, "bin")); t != nil { toolchains[t.Version] = *t continue } } } else { path, err := exec.LookPath(name) if err == nil { if t := toolchain(filepath.Dir(path)); t != nil { toolchains[t.Version] = *t } } } } search("clang") for i := 8; i < maxLLVMVersion; i++ { search(fmt.Sprintf("clang-%d", i)) } out := make([]Toolchain, 0, len(toolchains)) for _, t := range toolchains { out = append(out, t) } sort.Slice(out, func(i, j int) bool { return out[i].Version.GreaterEqual(out[j].Version) }) return out } // Clang returns the path to the clang executable. func (t Toolchain) Clang() string { return filepath.Join(t.BinDir, "clang"+exeExt()) } // ClangXX returns the path to the clang++ executable. func (t Toolchain) ClangXX() string { return filepath.Join(t.BinDir, "clang++"+exeExt()) } // Cov returns the path to the llvm-cov executable. func (t Toolchain) Cov() string { return filepath.Join(t.BinDir, "llvm-cov"+exeExt()) } // Profdata returns the path to the llvm-profdata executable. func (t Toolchain) Profdata() string { return filepath.Join(t.BinDir, "llvm-profdata"+exeExt()) } func toolchain(dir string) *Toolchain { t := Toolchain{BinDir: dir} if t.resolve() { return &t } return nil } func (t *Toolchain) resolve() bool { if !util.IsFile(t.Profdata()) { // llvm-profdata doesn't have --version flag return false } version, ok := parseVersion(t.Cov()) t.Version = version return ok } func exeExt() string { switch runtime.GOOS { case "windows": return ".exe" default: return "" } } var versionRE = regexp.MustCompile(`(?:clang|LLVM) version ([0-9]+)\.([0-9]+)\.([0-9]+)`) func parseVersion(tool string) (Version, bool) { out, err := exec.Command(tool, "--version").Output() if err != nil { return Version{}, false } matches := versionRE.FindStringSubmatch(string(out)) if len(matches) < 4 { return Version{}, false } major, majorErr := strconv.Atoi(matches[1]) minor, minorErr := strconv.Atoi(matches[2]) point, pointErr := strconv.Atoi(matches[3]) if majorErr != nil || minorErr != nil || pointErr != nil { return Version{}, false } return Version{major, minor, point}, true }
package models import ( "time" _ "github.com/jinzhu/gorm/dialects/mysql" ) type Shortener struct { CreatedAt time.Time `gorm:"column:created_at; type:datetime"` ExpireDay int `gorm:"column:expire_day; type:int(11); default:'365'" ` ExpiredAt *time.Time `gorm:"column:expired_at; type:datetime"` ID int `gorm:"column:id; type:int(11) AUTO_INCREMENT"` LongURL string `gorm:"column:long_url; type:varchar(255)"` ShortenKey string `gorm:"column:shorten_key; type:varchar(255)"` SourceID int `gorm:"column:source_id; type:int(11)"` SourceType string `gorm:"column:source_type; type:varchar(191)"` StoreID int `gorm:"column:store_id; type:int(11)"` UpdatedAt time.Time `gorm:"column:updated_at; type:datetime"` } func (s *Shortener) TableName() string { return "shorteners" }
package main import ( "strings" "testing" ) func TestCreateConfigString(t *testing.T) { expected := true testString := createConfigString("cookies", "cake", "candies") actual := strings.Contains(testString, "cookies") if actual != expected { t.Fail() } actual = strings.Contains(testString, "cake") if actual != expected { t.Fail() } actual = strings.Contains(testString, "candies") if actual != expected { t.Fail() } } func TestDoesFileExist(t *testing.T) { expected := false actual := doesFileExist("./cookies") if actual != expected { t.Fail() } expected = true actual = doesFileExist("./main.go") if actual != expected { t.Fail() } } func TestGetCredPathString(t *testing.T) { expected := "./.gogit/creds.json" actual := GetCredPathString(".") if actual != expected { t.Fail() } }
package main import ( "encoding/json" "fmt" ) type Project struct { Name string `json:"name"` Url string `json:"url"` Docs string `json:"docs,omitempty"` } func main() { p1 := Project{ Name:"CleverGo高性能框架", Url:"https://github.com/headwindfly/clevergo", } data, err := json.Marshal(p1) if err != nil { panic(err) } // p1 没有为Docs赋值,这里打印出来不会出现Docs的字段 fmt.Printf("%s\n", data) p2 := Project{ Name:"CleverGo高性能框架", Url:"https://github.com/headwindfly/clevergo", Docs:"https://github.com/headwindfly/clevergo/tree/master/docs", } data2, err := json.Marshal(p2) if err != nil { panic(err) } // p2 则会打印所有 fmt.Printf("%s\n", data2) }
package main import ( "os" "github.com/typical-go/typical-go/pkg/typgo" "github.com/typical-go/typical-go/pkg/typrls" ) var descriptor = typgo.Descriptor{ ProjectName: "typical-go", ProjectVersion: "0.11.7", Tasks: []typgo.Tasker{ // compile &typgo.GoBuild{MainPackage: "."}, // run &typgo.RunBinary{ Before: typgo.TaskNames{"build"}, }, // test &typgo.GoTest{ Includes: []string{"internal/**", "pkg/**"}, }, // test-examples &typgo.Task{ Name: "test-examples", Aliases: []string{"e"}, Usage: "Test all example", Action: &typgo.Command{ Name: "go", Args: []string{"test", "./examples/..."}, Stdout: os.Stdout, Stderr: os.Stderr, }, }, // test-setup &typgo.Task{ Name: "test-setup", Usage: "test setup command", Action: typgo.NewAction(func(c *typgo.Context) error { os.RemoveAll("examples/my-project") err := c.ExecuteCommand(&typgo.Command{ Name: "../bin/typical-go", Args: []string{"setup", "-new", "-go-mod", "-project-pkg=github.com/typical-go/typical-go/examples/my-project"}, Dir: "examples", Stdout: os.Stdout, Stderr: os.Stderr, }) if err != nil { return err } os.RemoveAll("examples/my-project/go.mod") os.RemoveAll("examples/my-project/go.sum") return c.ExecuteCommand(&typgo.Command{ Name: "./typicalw", Args: []string{"run"}, Dir: "examples/my-project", Stdout: os.Stdout, Stderr: os.Stderr, }) }), }, // test-setup &typgo.Task{ Name: "test-all", Usage: "test project, test examples and test setup command", Action: typgo.TaskNames{"test", "build", "test-examples", "test-setup"}, }, // release &typrls.ReleaseProject{ Before: typgo.TaskNames{"test-all"}, Publisher: &typrls.Github{Owner: "typical-go", Repo: "typical-go"}, }, }, } func main() { typgo.Start(&descriptor) }
package clipper import ( "net" "log" "io" "encoding/binary" "sync" "time" "encoding/json" "strings" "fmt" ) var _startTime time.Time type clipperInfo struct { path string time float64 port uint32 } type master struct { connections []net.Conn lastCopyConn net.Conn mutex sync.Mutex info map[net.Conn]clipperInfo currentPort uint32 portMutex sync.Mutex } func init() { _startTime = time.Now() } func NewMaster() *master { return &master{ info: make(map[net.Conn]clipperInfo), currentPort: 8686, } } func (m *master) StartUp() { l, err := net.Listen("tcp", ":8686") if err != nil { log.Fatalln(err) } defer l.Close() for { c, err := l.Accept() if err != nil { log.Fatalln(err) } go m.handleConnection(c) } } func (m *master) handleConnection(c net.Conn) { msgLenBuf := make([]byte, 4) for { _, err := io.ReadFull(c, msgLenBuf) if err != nil { log.Println(err) break } bytes := make([]byte, binary.LittleEndian.Uint32(msgLenBuf)) _, err = io.ReadFull(c, bytes) if err != nil { log.Println(err) break } req := commonReq{} err = json.Unmarshal(bytes, &req) if err != nil { log.Println(err) break } log.Println("master: msgID=", req.MsgID, " remoteAddr=", c.RemoteAddr()) switch msgType(req.MsgID) { case MSG_REGISTER: m.handleMsgRegister(c, bytes) case MSG_SET_CLIPPER_INFO: m.handleMsgSetClipperInfo(c, bytes) case MSG_GET_CLIPPER_INFO: m.handleMsgGetClipperInfo(c, bytes) case MSG_REQUEST_ASSIGN_PORT: m.handleMsgRequestAssignPort(c, bytes) default: log.Fatalln("master: error msg type...", req.MsgID) } } } func (m *master) handleMsgRegister(c net.Conn, bytes []byte) { m.mutex.Lock() exist := false for _, v := range m.connections { if v == c { exist = true break } } if !exist { m.connections = append(m.connections, c) } m.mutex.Unlock() } func (m *master) handleMsgSetClipperInfo(c net.Conn, bytes []byte) { req := reqSetClipperInfo{} json.Unmarshal(bytes, &req) m.mutex.Lock() m.info[c] = clipperInfo{ path: req.Data, time: time.Since(_startTime).Seconds(), port: req.Port, } m.mutex.Unlock() } func (m *master) handleMsgGetClipperInfo(conn net.Conn, bytes []byte) { var tempTime float64 addr := "" var info clipperInfo for c, v := range m.info { if v.time > tempTime { tempTime = v.time addr = c.RemoteAddr().String() info = v } } split := strings.Split(addr, ":") fixedAddr := fmt.Sprintf("%s:%d", split[0], info.port) respBytes, _ := json.Marshal(&respGetClipperInfo{ Path: info.path, Addr: fixedAddr, }) conn.Write(respBytes) } func (m *master) handleMsgRequestAssignPort(conn net.Conn, bytes[]byte) { m.portMutex.Lock() m.currentPort++ respBytes, _ := json.Marshal(&respAssignPort{ Port: m.currentPort, }) m.portMutex.Unlock() conn.Write(respBytes) }
// Copyright (c) 2020 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package wireguard_test import ( "github.com/projectcalico/calico/felix/logutils" . "github.com/projectcalico/calico/felix/wireguard" "errors" "fmt" "net" "syscall" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" log "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "github.com/projectcalico/calico/felix/ifacemonitor" "github.com/projectcalico/calico/felix/ip" mocknetlink "github.com/projectcalico/calico/felix/netlinkshim/mocknetlink" "github.com/projectcalico/calico/felix/timeshim/mocktime" ) var ( zeroKey = wgtypes.Key{} ifaceName = "wireguard-if" hostname = "my-host" peer1 = "peer1" peer2 = "peer2" peer3 = "peer3" peer4 = "peer4" FelixRouteProtocol = netlink.RouteProtocol(syscall.RTPROT_BOOT) tableIndex = 99 rulePriority = 98 firewallMark = 10 listeningPort = 1000 mtu = 2000 ipv4_int1 = ip.FromString("192.168.0.0") ipv4_int2 = ip.FromString("192.168.10.0") ipv4_host = ip.FromString("1.2.3.0") ipv4_peer1 = ip.FromString("1.2.3.5") ipv4_peer2 = ip.FromString("1.2.3.6") ipv4_peer2_2 = ip.FromString("1.2.3.7") ipv4_peer3 = ip.FromString("10.10.20.20") ipv4_peer4 = ip.FromString("10.10.20.30") cidr_local = ip.MustParseCIDROrIP("192.180.0.0/30") cidr_1 = ip.MustParseCIDROrIP("192.168.1.0/24") cidr_2 = ip.MustParseCIDROrIP("192.168.2.0/24") cidr_3 = ip.MustParseCIDROrIP("192.168.3.0/24") cidr_4 = ip.MustParseCIDROrIP("192.168.4.0/26") cidr_5 = ip.MustParseCIDROrIP("192.168.5.0/26") cidr_6 = ip.MustParseCIDROrIP("192.168.6.0/32") // Single IP ipnet_1 = cidr_1.ToIPNet() ipnet_2 = cidr_2.ToIPNet() ipnet_3 = cidr_3.ToIPNet() ipnet_4 = cidr_4.ToIPNet() //ipnet_6 = cidr_6.ToIPNet() routekey_cidr_local = fmt.Sprintf("%d-%s", tableIndex, cidr_local) //routekey_1 = fmt.Sprintf("%d-%s", tableIndex, cidr_1) //routekey_2 = fmt.Sprintf("%d-%s", tableIndex, cidr_2) //routekey_3 = fmt.Sprintf("%d-%s", tableIndex, cidr_3) routekey_4 = fmt.Sprintf("%d-%s", tableIndex, cidr_4) routekey_6 = fmt.Sprintf("%d-%s", tableIndex, cidr_6) ) func mustGeneratePrivateKey() wgtypes.Key { key, err := wgtypes.GeneratePrivateKey() Expect(err).ToNot(HaveOccurred()) return key } type applyWithErrors struct { numExpected int errors []error wg *Wireguard } func newApplyWithErrors(wg *Wireguard, numExpected int) *applyWithErrors { return &applyWithErrors{wg: wg, numExpected: numExpected} } func (a *applyWithErrors) Apply() error { for { err := a.wg.Apply() if err == nil { log.Debug("Successfully applied") return nil } log.WithError(err).Debug("Failed to apply") a.errors = append(a.errors, err) a.numExpected-- if a.numExpected < 0 { log.Error("Hit failure limit") return err } } } func (a *applyWithErrors) LastError() error { if len(a.errors) == 0 { return nil } return a.errors[len(a.errors)-1] } type mockCallbacks struct { numStatusCallbacks int statusErr error statusKey wgtypes.Key numProcSysCallbacks int procSysPath string procSysValue string procSysErr error } func (m *mockCallbacks) status(publicKey wgtypes.Key) error { log.Debugf("Status update with public key: %s", publicKey) m.numStatusCallbacks++ if m.statusErr != nil { return m.statusErr } m.statusKey = publicKey log.Debugf("Num callbacks: %d", m.numStatusCallbacks) return nil } func (m *mockCallbacks) writeProcSys(path, value string) error { m.numProcSysCallbacks++ if m.procSysErr != nil { return m.procSysErr } m.procSysPath = path m.procSysValue = value return nil } var _ = Describe("Enable wireguard", func() { var wgDataplane, rtDataplane, rrDataplane *mocknetlink.MockNetlinkDataplane var t *mocktime.MockTime var s *mockCallbacks var wg *Wireguard var rule *netlink.Rule BeforeEach(func() { wgDataplane = mocknetlink.New() rtDataplane = mocknetlink.New() rrDataplane = mocknetlink.New() t = mocktime.New() s = &mockCallbacks{} // Setting an auto-increment greater than the route cleanup delay effectively // disables the grace period for these tests. t.SetAutoIncrement(11 * time.Second) wg = NewWithShims( hostname, &Config{ Enabled: true, ListeningPort: listeningPort, FirewallMark: firewallMark, RoutingRulePriority: rulePriority, RoutingTableIndex: tableIndex, InterfaceName: ifaceName, MTU: mtu, EncryptHostTraffic: true, }, rtDataplane.NewMockNetlink, rrDataplane.NewMockNetlink, wgDataplane.NewMockNetlink, wgDataplane.NewMockWireguard, 10*time.Second, t, FelixRouteProtocol, s.status, s.writeProcSys, logutils.NewSummarizer("test loop"), ) rule = netlink.NewRule() rule.Family = netlink.FAMILY_V4 rule.Priority = rulePriority rule.Table = tableIndex rule.Invert = true rule.Mark = firewallMark rule.Mask = firewallMark }) It("should be constructable", func() { Expect(wg).ToNot(BeNil()) }) Describe("create the wireguard link", func() { BeforeEach(func() { err := wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("should configure the link but wait for link to be active", func() { Expect(wgDataplane.NumLinkAddCalls).To(Equal(1)) Expect(wgDataplane.AddedLinks).To(HaveKey(ifaceName)) Expect(wgDataplane.NameToLink[ifaceName].LinkType).To(Equal("wireguard")) Expect(wgDataplane.NameToLink[ifaceName].LinkAttrs.MTU).To(Equal(2000)) Expect(wgDataplane.NumLinkAddCalls).To(Equal(1)) Expect(wgDataplane.WireguardOpen).To(BeFalse()) }) It("another apply will no-op until link is active", func() { // Apply, but still not iface update wgDataplane.ResetDeltas() err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.NumLinkAddCalls).To(Equal(0)) Expect(wgDataplane.WireguardOpen).To(BeFalse()) }) It("no op after a link down callback", func() { // Iface update indicating down. wgDataplane.ResetDeltas() wg.OnIfaceStateChanged(ifaceName, ifacemonitor.StateDown) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.NumLinkAddCalls).To(Equal(0)) Expect(wgDataplane.WireguardOpen).To(BeFalse()) }) It("no op for an interface callback for non-wg interface (same prefix)", func() { // Iface update indicating up. wgDataplane.ResetDeltas() wgDataplane.AddIface(1919, ifaceName+".foobar", true, true) wg.OnIfaceStateChanged(ifaceName+".foobar", ifacemonitor.StateUp) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.NumLinkAddCalls).To(Equal(0)) Expect(wgDataplane.WireguardOpen).To(BeFalse()) }) It("should handle status update raising an error", func() { wgDataplane.SetIface(ifaceName, true, true) wg.OnIfaceStateChanged(ifaceName, ifacemonitor.StateUp) s.statusErr = errors.New("foobarbaz") err := wg.Apply() Expect(err).To(HaveOccurred()) Expect(err).To(Equal(s.statusErr)) }) Describe("set the link up", func() { BeforeEach(func() { wgDataplane.SetIface(ifaceName, true, true) wg.OnIfaceStateChanged(ifaceName, ifacemonitor.StateUp) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("should create wireguard client and create private key", func() { Expect(wgDataplane.NumLinkAddCalls).To(Equal(1)) Expect(wgDataplane.WireguardOpen).To(BeTrue()) link := wgDataplane.NameToLink[ifaceName] Expect(link.WireguardFirewallMark).To(Equal(10)) Expect(link.WireguardListenPort).To(Equal(listeningPort)) Expect(link.WireguardPrivateKey).NotTo(Equal(zeroKey)) Expect(link.WireguardPrivateKey.PublicKey()).To(Equal(link.WireguardPublicKey)) Expect(s.numStatusCallbacks).To(Equal(1)) Expect(s.statusKey).To(Equal(link.WireguardPublicKey)) }) It("should add the routing rule when wireguard device is configured", func() { wgDataplane.ResetDeltas() err := wg.Apply() Expect(err).ToNot(HaveOccurred()) Expect(rrDataplane.AddedRules).To(HaveLen(1)) Expect(rrDataplane.DeletedRules).To(HaveLen(0)) Expect(rrDataplane.AddedRules).To(ConsistOf(*rule)) }) It("should delete invalid rules jumping to the wireguard table", func() { incorrectRule := netlink.NewRule() incorrectRule.Family = 2 incorrectRule.Priority = rulePriority + 10 incorrectRule.Table = tableIndex incorrectRule.Mark = firewallMark + 10 incorrectRule.Invert = false err := rrDataplane.RuleAdd(incorrectRule) Expect(err).ToNot(HaveOccurred()) rrDataplane.ResetDeltas() wg.QueueResync() err = wg.Apply() Expect(err).ToNot(HaveOccurred()) Expect(rrDataplane.AddedRules).To(HaveLen(0)) Expect(rrDataplane.DeletedRules).To(ConsistOf(*incorrectRule)) }) It("after endpoint update with incorrect key should program the interface address and resend same key as status", func() { link := wgDataplane.NameToLink[ifaceName] Expect(link.WireguardPrivateKey).NotTo(Equal(zeroKey)) Expect(s.numStatusCallbacks).To(Equal(1)) key := link.WireguardPrivateKey Expect(s.statusKey).To(Equal(key.PublicKey())) ipv4 := ip.FromString("1.2.3.4") wg.EndpointWireguardUpdate(hostname, zeroKey, ipv4) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) link = wgDataplane.NameToLink[ifaceName] Expect(link.Addrs).To(HaveLen(1)) Expect(link.Addrs[0].IP).To(Equal(ipv4.AsNetIP())) Expect(wgDataplane.WireguardOpen).To(BeTrue()) Expect(link.WireguardFirewallMark).To(Equal(10)) Expect(link.WireguardListenPort).To(Equal(listeningPort)) Expect(link.WireguardPrivateKey).To(Equal(key)) Expect(link.WireguardPrivateKey.PublicKey()).To(Equal(link.WireguardPublicKey)) Expect(s.numStatusCallbacks).To(Equal(2)) Expect(s.statusKey).To(Equal(key.PublicKey())) }) It("after endpoint update with correct key should program the interface address and not send another status update", func() { link := wgDataplane.NameToLink[ifaceName] Expect(link.WireguardPrivateKey).NotTo(Equal(zeroKey)) Expect(s.numStatusCallbacks).To(Equal(1)) key := link.WireguardPrivateKey ipv4 := ip.FromString("1.2.3.4") wg.EndpointWireguardUpdate(hostname, key.PublicKey(), ipv4) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) link = wgDataplane.NameToLink[ifaceName] Expect(link.Addrs).To(HaveLen(1)) Expect(link.Addrs[0].IP).To(Equal(ipv4.AsNetIP())) Expect(wgDataplane.WireguardOpen).To(BeTrue()) Expect(link.WireguardFirewallMark).To(Equal(10)) Expect(link.WireguardListenPort).To(Equal(listeningPort)) Expect(link.WireguardPrivateKey).To(Equal(key)) Expect(link.WireguardPrivateKey.PublicKey()).To(Equal(link.WireguardPublicKey)) Expect(s.numStatusCallbacks).To(Equal(1)) }) It("will use node IP on EndpointUpdate when interface is not specified on previous EndpointWireguardUpdate", func() { link := wgDataplane.NameToLink[ifaceName] Expect(link.WireguardPrivateKey).NotTo(Equal(zeroKey)) Expect(s.numStatusCallbacks).To(Equal(1)) key := link.WireguardPrivateKey ipv4 := ip.FromString("1.2.3.4") wg.EndpointWireguardUpdate(hostname, key.PublicKey(), nil) wg.EndpointUpdate(hostname, ipv4) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) link = wgDataplane.NameToLink[ifaceName] Expect(link.Addrs).To(HaveLen(1)) Expect(link.Addrs[0].IP).To(Equal(ipv4.AsNetIP())) Expect(wgDataplane.WireguardOpen).To(BeTrue()) Expect(link.WireguardFirewallMark).To(Equal(10)) Expect(link.WireguardListenPort).To(Equal(listeningPort)) Expect(link.WireguardPrivateKey).To(Equal(key)) Expect(link.WireguardPrivateKey.PublicKey()).To(Equal(link.WireguardPublicKey)) Expect(s.numStatusCallbacks).To(Equal(1)) }) It("will use node IP from previous EndpointUpdate when interface is not specified on EndpointWireguardUpdate", func() { link := wgDataplane.NameToLink[ifaceName] Expect(link.WireguardPrivateKey).NotTo(Equal(zeroKey)) Expect(s.numStatusCallbacks).To(Equal(1)) key := link.WireguardPrivateKey // Basically the same test as before but calls are reveresed. ipv4 := ip.FromString("1.2.3.4") wg.EndpointUpdate(hostname, ipv4) wg.EndpointWireguardUpdate(hostname, key.PublicKey(), nil) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) link = wgDataplane.NameToLink[ifaceName] Expect(link.Addrs).To(HaveLen(1)) Expect(link.Addrs[0].IP).To(Equal(ipv4.AsNetIP())) Expect(wgDataplane.WireguardOpen).To(BeTrue()) Expect(link.WireguardFirewallMark).To(Equal(10)) Expect(link.WireguardListenPort).To(Equal(listeningPort)) Expect(link.WireguardPrivateKey).To(Equal(key)) Expect(link.WireguardPrivateKey.PublicKey()).To(Equal(link.WireguardPublicKey)) Expect(s.numStatusCallbacks).To(Equal(1)) }) Describe("add local routes with overlap", func() { var lc1, lc2, lc3 ip.CIDR BeforeEach(func() { lc1 = ip.MustParseCIDROrIP("12.12.10.10/32") lc2 = ip.MustParseCIDROrIP("12.12.10.0/24") lc3 = ip.MustParseCIDROrIP("12.12.11.0/32") wg.RouteUpdate(hostname, lc1) wg.RouteUpdate(hostname, lc2) wg.RouteUpdate(hostname, lc3) err := wg.Apply() Expect(err).ToNot(HaveOccurred()) }) It("should create the rule when routing config is updated", func() { Expect(rrDataplane.DeletedRules).To(HaveLen(0)) Expect(rrDataplane.AddedRules).To(ConsistOf(*rule)) }) It("should not re-add a deleted rule until resync", func() { err := rrDataplane.RuleDel(rule) Expect(err).ToNot(HaveOccurred()) rrDataplane.ResetDeltas() err = wg.Apply() Expect(err).ToNot(HaveOccurred()) Expect(rrDataplane.AddedRules).To(HaveLen(0)) Expect(rrDataplane.DeletedRules).To(HaveLen(0)) wg.QueueResync() err = wg.Apply() Expect(err).ToNot(HaveOccurred()) Expect(rrDataplane.DeletedRules).To(HaveLen(0)) Expect(rrDataplane.AddedRules).To(ConsistOf(*rule)) }) It("should not fix a modified rule until resync", func() { badrule := netlink.NewRule() badrule.Family = netlink.FAMILY_V4 badrule.Priority = rulePriority + 1 badrule.Table = tableIndex badrule.Mark = 0 badrule.Mask = firewallMark err := rrDataplane.RuleDel(rule) Expect(err).ToNot(HaveOccurred()) err = rrDataplane.RuleAdd(badrule) Expect(err).ToNot(HaveOccurred()) rrDataplane.ResetDeltas() err = wg.Apply() Expect(err).ToNot(HaveOccurred()) Expect(rrDataplane.AddedRules).To(HaveLen(0)) Expect(rrDataplane.DeletedRules).To(HaveLen(0)) wg.QueueResync() err = wg.Apply() Expect(err).ToNot(HaveOccurred()) Expect(rrDataplane.DeletedRules).To(ConsistOf(*badrule)) Expect(rrDataplane.AddedRules).To(ConsistOf(*rule)) }) }) Describe("create two wireguard nodes with different public keys", func() { var key_peer1, key_peer2 wgtypes.Key var link *mocknetlink.MockLink BeforeEach(func() { Expect(s.numStatusCallbacks).To(Equal(1)) wg.EndpointWireguardUpdate(hostname, s.statusKey, nil) key_peer1 = mustGeneratePrivateKey().PublicKey() wg.EndpointWireguardUpdate(peer1, key_peer1, nil) wg.EndpointUpdate(peer1, ipv4_peer1) key_peer2 = mustGeneratePrivateKey().PublicKey() wg.EndpointWireguardUpdate(peer2, key_peer2, nil) wg.EndpointUpdate(peer2, ipv4_peer2) wg.RouteUpdate(hostname, cidr_local) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) link = wgDataplane.NameToLink[ifaceName] Expect(link).ToNot(BeNil()) Expect(wgDataplane.WireguardOpen).To(BeTrue()) Expect(rrDataplane.NetlinkOpen).To(BeTrue()) Expect(rrDataplane.NumRuleDelCalls).To(Equal(0)) Expect(rrDataplane.NumRuleAddCalls).To(Equal(1)) }) It("should have both nodes configured", func() { Expect(link.WireguardPeers).To(HaveLen(2)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) Expect(link.WireguardPeers[key_peer1]).To(Equal(wgtypes.Peer{ PublicKey: key_peer1, Endpoint: &net.UDPAddr{ IP: ipv4_peer1.AsNetIP(), Port: 1000, }, })) Expect(link.WireguardPeers[key_peer2]).To(Equal(wgtypes.Peer{ PublicKey: key_peer2, Endpoint: &net.UDPAddr{ IP: ipv4_peer2.AsNetIP(), Port: 1000, }, })) }) It("should have no updates for local EndpointUpdate and EndpointRemove msgs", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointUpdate(hostname, nil) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) wg.EndpointRemove(hostname) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) }) It("should have no updates for backing out an endpoint update", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointUpdate(peer1, ipv4_peer2) wg.EndpointUpdate(peer1, ipv4_peer1) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) }) It("should have no updates for backing out a peer key update", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointWireguardUpdate(peer1, key_peer2, nil) wg.EndpointWireguardUpdate(peer1, key_peer1, nil) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) }) It("should have no updates if adding and deleting peer config before applying", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointUpdate(peer3, ipv4_peer3) wg.EndpointWireguardUpdate(peer3, key_peer1, nil) wg.EndpointRemove(peer3) wg.EndpointWireguardRemove(peer3) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) }) It("should trigger another status message if deleting the local Wireguard config", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() Expect(s.numStatusCallbacks).To(Equal(1)) wg.EndpointWireguardRemove(hostname) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(s.numStatusCallbacks).To(Equal(2)) }) It("should contain a throw route for the local CIDR", func() { Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_cidr_local)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(BeEmpty()) }) Describe("add local workload as a single IP", func() { BeforeEach(func() { // Update the routetable dataplane so it knows about the interface. rtDataplane.NameToLink[ifaceName] = link wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteUpdate(hostname, cidr_6) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("should have a throw route to the local IP", func() { Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_6)) }) It("should handle the IP being deleted and then moved to another node", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteRemove(cidr_6) wg.RouteUpdate(peer1, cidr_6) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_6)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_6)) }) It("should handle the IP being moved to another node without first deleting", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteUpdate(peer1, cidr_6) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_6)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_6)) }) It("should handle the IP being moved to another node with a deletion in between", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteUpdate(peer1, cidr_6) wg.RouteRemove(cidr_6) wg.RouteUpdate(peer1, cidr_6) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_6)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_6)) }) }) Describe("public key updated to conflict on two nodes", func() { var wgPeers map[wgtypes.Key]wgtypes.Peer BeforeEach(func() { link = wgDataplane.NameToLink[ifaceName] // Take a copy of the current peer configuration for one of the tests. wgPeers = make(map[wgtypes.Key]wgtypes.Peer) for k, p := range link.WireguardPeers { wgPeers[k] = p } wg.EndpointWireguardUpdate(peer2, key_peer1, nil) rtDataplane.ResetDeltas() err := wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("should remove both nodes", func() { Expect(link.WireguardPeers).To(HaveLen(0)) }) It("should handle a resync if the peer is added back in out-of-band", func() { link.WireguardPeers = wgPeers link.WireguardListenPort = listeningPort + 1 link.WireguardFirewallMark = firewallMark + 1 link.LinkAttrs.MTU = mtu + 1 wg.QueueResync() err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(link.WireguardListenPort).To(Equal(listeningPort)) Expect(link.WireguardFirewallMark).To(Equal(firewallMark)) Expect(link.WireguardPeers).To(HaveLen(0)) }) It("should add both nodes when conflicting public keys updated to no longer conflict", func() { wg.EndpointWireguardUpdate(peer2, key_peer2, nil) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) Expect(link.WireguardPeers[key_peer1]).To(Equal(wgtypes.Peer{ PublicKey: key_peer1, Endpoint: &net.UDPAddr{ IP: ipv4_peer1.AsNetIP(), Port: 1000, }, })) Expect(link.WireguardPeers[key_peer2]).To(Equal(wgtypes.Peer{ PublicKey: key_peer2, Endpoint: &net.UDPAddr{ IP: ipv4_peer2.AsNetIP(), Port: 1000, }, })) }) It("should contain no more route updates", func() { Expect(rtDataplane.AddedRouteKeys).To(BeEmpty()) Expect(rtDataplane.DeletedRouteKeys).To(BeEmpty()) }) }) Describe("create a non-wireguard peer", func() { BeforeEach(func() { wg.EndpointUpdate(peer3, ipv4_peer3) rtDataplane.ResetDeltas() err := wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("should not create wireguard configuration for the peer", func() { Expect(link.WireguardPeers).To(HaveLen(2)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) }) It("should contain no more route updates", func() { Expect(rtDataplane.AddedRouteKeys).To(BeEmpty()) Expect(rtDataplane.DeletedRouteKeys).To(BeEmpty()) }) Describe("create destinations on each peer", func() { var routekey_1, routekey_2, routekey_3 string BeforeEach(func() { // Update the mock routing table dataplane so that it knows about the wireguard interface. rtDataplane.NameToLink[ifaceName] = link routekey_1 = fmt.Sprintf("%d-%s", tableIndex, cidr_1) routekey_2 = fmt.Sprintf("%d-%s", tableIndex, cidr_2) routekey_3 = fmt.Sprintf("%d-%s", tableIndex, cidr_3) wg.RouteUpdate(hostname, cidr_local) wg.RouteUpdate(peer1, cidr_1) wg.RouteUpdate(peer1, cidr_2) wg.RouteUpdate(peer2, cidr_3) wg.RouteUpdate(peer3, cidr_4) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("should have wireguard routes for peer1 and peer2", func() { Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) Expect(link.WireguardPeers[key_peer1]).To(Equal(wgtypes.Peer{ PublicKey: key_peer1, Endpoint: &net.UDPAddr{ IP: ipv4_peer1.AsNetIP(), Port: 1000, }, AllowedIPs: []net.IPNet{ipnet_1, ipnet_2}, })) Expect(link.WireguardPeers[key_peer2]).To(Equal(wgtypes.Peer{ PublicKey: key_peer2, Endpoint: &net.UDPAddr{ IP: ipv4_peer2.AsNetIP(), Port: 1000, }, AllowedIPs: []net.IPNet{ipnet_3}, })) }) It("should route to wireguard for peer1 and peer2 routes, but not peer3 routes", func() { Expect(rtDataplane.AddedRouteKeys).To(HaveLen(4)) Expect(rtDataplane.DeletedRouteKeys).To(BeEmpty()) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_2)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_4)) Expect(rtDataplane.RouteKeyToRoute[routekey_1]).To(Equal(netlink.Route{ LinkIndex: link.LinkAttrs.Index, Dst: &ipnet_1, Type: syscall.RTN_UNICAST, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_LINK, Table: tableIndex, })) Expect(rtDataplane.RouteKeyToRoute[routekey_2]).To(Equal(netlink.Route{ LinkIndex: link.LinkAttrs.Index, Dst: &ipnet_2, Type: syscall.RTN_UNICAST, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_LINK, Table: tableIndex, })) Expect(rtDataplane.RouteKeyToRoute[routekey_3]).To(Equal(netlink.Route{ LinkIndex: link.LinkAttrs.Index, Dst: &ipnet_3, Type: syscall.RTN_UNICAST, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_LINK, Table: tableIndex, })) Expect(rtDataplane.RouteKeyToRoute[routekey_4]).To(Equal(netlink.Route{ Dst: &ipnet_4, Type: syscall.RTN_THROW, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_UNIVERSE, Table: tableIndex, })) }) It("should remove a route from the peer", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteRemove(cidr_1) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_1)) Expect(wgDataplane.WireguardConfigUpdated).To(BeTrue()) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers[key_peer1]).To(Equal(wgtypes.Peer{ PublicKey: key_peer1, Endpoint: &net.UDPAddr{ IP: ipv4_peer1.AsNetIP(), Port: 1000, }, AllowedIPs: []net.IPNet{ipnet_2}, })) }) It("should have no updates if swapping routes and swapping back before an apply", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteUpdate(peer1, cidr_3) wg.RouteUpdate(peer2, cidr_1) wg.RouteUpdate(peer1, cidr_1) wg.RouteUpdate(peer2, cidr_3) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) }) It("should have no updates if adding and deleting a CIDR to a peer", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteUpdate(peer1, cidr_5) wg.RouteRemove(cidr_5) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) }) It("should have no updates if deleting an unknown CIDR", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteRemove(cidr_5) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) }) It("should handle deletion of nodes 2 and 3", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointRemove(peer3) wg.EndpointWireguardRemove(peer3) wg.RouteRemove(cidr_4) wg.RouteRemove(cidr_3) wg.EndpointWireguardRemove(peer2) wg.EndpointRemove(peer2) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(2)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_4)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_3)) Expect(wgDataplane.WireguardConfigUpdated).To(BeTrue()) Expect(link.WireguardPeers).To(HaveLen(1)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) }) It("should handle deletion of a wireguard peer over multiple applies: endpoint, wireguard, route", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() // Remove the endpoint. Wireguard config should be removed at this point. The route should // be converted to a throw route. By("Removing the node") wg.EndpointRemove(peer2) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(4)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_3)) Expect(wgDataplane.WireguardConfigUpdated).To(BeTrue()) Expect(link.WireguardPeers).To(HaveLen(1)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(rtDataplane.RouteKeyToRoute[routekey_3]).To(Equal(netlink.Route{ Dst: &ipnet_3, Type: syscall.RTN_THROW, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_UNIVERSE, Table: tableIndex, })) // Remove the wireguard config for this peer. Should have no further impact. By("Removing the wireguard configuration") wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointWireguardRemove(peer2) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(4)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) Expect(link.WireguardPeers).To(HaveLen(1)) // Remove the route. // This is the last bit of configuration for the peer and so the node should be removed // from the cache. By("Removing the route") wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteRemove(cidr_3) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(3)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_3)) Expect(link.WireguardPeers).To(HaveLen(1)) }) It("should handle deletion of a wireguard peer over multiple applies: route, endpoint, wireguard", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() // Remove the route. By("Removing the route") wg.RouteRemove(cidr_3) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(4)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.RouteKeyToRoute).ToNot(HaveKey(routekey_3)) Expect(wgDataplane.WireguardConfigUpdated).To(BeTrue()) Expect(link.WireguardPeers).To(HaveLen(2)) // Remove the endpoint. Wireguard config should be removed at this point. The route should // be converted to a throw route. By("Removing the node") wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointRemove(peer2) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(4)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeTrue()) Expect(link.WireguardPeers).To(HaveLen(1)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) // Remove the wireguard config for this peer. // This is the last bit of configuration for the peer and so the node should be removed // from the cache. By("Removing the wireguard configuration") wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointWireguardRemove(peer2) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(3)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) Expect(link.WireguardPeers).To(HaveLen(1)) }) It("should handle deletion of a wireguard peer over multiple applies: route, endpoint, wireguard", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() // Remove the wireguard config for this peer. Wireguard config should be removed at this // point. The route should be converted to a throw route. By("Removing the wireguard configuration") wg.EndpointWireguardRemove(peer2) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(4)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_3)) Expect(wgDataplane.WireguardConfigUpdated).To(BeTrue()) Expect(rtDataplane.RouteKeyToRoute[routekey_3]).To(Equal(netlink.Route{ Dst: &ipnet_3, Type: syscall.RTN_THROW, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_UNIVERSE, Table: tableIndex, })) Expect(link.WireguardPeers).To(HaveLen(1)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) // Remove the route. By("Removing the route") wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteRemove(cidr_3) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(4)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.RouteKeyToRoute).ToNot(HaveKey(routekey_3)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) Expect(link.WireguardPeers).To(HaveLen(1)) // Remove the endpoint. // This is the last bit of configuration for the peer and so the node should be removed // from the cache. By("Removing the node") wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointRemove(peer2) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(3)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) Expect(link.WireguardPeers).To(HaveLen(1)) }) It("should handle deletion and re-adding an endpoint over multiple applies", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() // Remove the endpoint. Wireguard config should be removed at this point. The route should // be converted to a throw route. By("Removing the node") wg.EndpointRemove(peer2) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(4)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_3)) Expect(wgDataplane.WireguardConfigUpdated).To(BeTrue()) Expect(link.WireguardPeers).To(HaveLen(1)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(rtDataplane.RouteKeyToRoute[routekey_3]).To(Equal(netlink.Route{ Dst: &ipnet_3, Type: syscall.RTN_THROW, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_UNIVERSE, Table: tableIndex, })) // Re-add the endpoint. Wireguard config will be added back in. By("Re-adding the node") wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointUpdate(peer2, ipv4_peer2) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(4)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_3)) Expect(wgDataplane.WireguardConfigUpdated).To(BeTrue()) Expect(link.WireguardPeers).To(HaveLen(2)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) Expect(rtDataplane.RouteKeyToRoute[routekey_3]).To(Equal(netlink.Route{ LinkIndex: link.LinkAttrs.Index, Dst: &ipnet_3, Type: syscall.RTN_UNICAST, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_LINK, Table: tableIndex, })) }) It("should handle deletion and re-adding an endpoint in a single apply", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() // Remove the endpoint. Wireguard config should be removed at this point. The route should // be converted to a throw route. By("Removing the node") wg.EndpointRemove(peer2) wg.EndpointUpdate(peer2, ipv4_peer2) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(4)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) Expect(link.WireguardPeers).To(HaveLen(2)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) }) It("should handle deletion and re-adding an endpoint with a different IP in a single apply", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() // Remove the endpoint. Wireguard config should be removed at this point. The route should // be converted to a throw route. By("Removing the node") wg.EndpointRemove(peer2) wg.EndpointUpdate(peer2, ipv4_peer2_2) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wg.DebugNodes()).To(HaveLen(4)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeTrue()) Expect(link.WireguardPeers).To(HaveLen(2)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) Expect(link.WireguardPeers[key_peer2].Endpoint.IP).To(Equal(ipv4_peer2_2.AsNetIP())) }) It("should handle immediate and subsequent reuse after a node deletion", func() { wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.EndpointRemove(peer2) wg.EndpointWireguardRemove(peer2) wg.RouteRemove(cidr_3) wg.RouteUpdate(hostname, cidr_3) By("Applying deletion and IP moving to local host") err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.RouteKeyToRoute[routekey_3]).To(Equal(netlink.Route{ Dst: &ipnet_3, Type: syscall.RTN_THROW, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_UNIVERSE, Table: tableIndex, })) By("Deleting local route") wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteRemove(cidr_3) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.RouteKeyToRoute).NotTo(HaveKey(routekey_3)) By("Applying the same route to be remote") wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() wg.RouteUpdate(peer1, cidr_3) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.RouteKeyToRoute[routekey_3]).To(Equal(netlink.Route{ LinkIndex: link.LinkAttrs.Index, Dst: &ipnet_3, Type: syscall.RTN_UNICAST, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_LINK, Table: tableIndex, })) }) Describe("move a route from peer1 to peer2 and a route from peer2 to peer3", func() { BeforeEach(func() { wg.RouteRemove(cidr_2) wg.RouteUpdate(peer2, cidr_2) wg.RouteUpdate(peer3, cidr_3) rtDataplane.ResetDeltas() err := wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("should have wireguard routes for peer1 and peer2", func() { Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) Expect(link.WireguardPeers[key_peer1]).To(Equal(wgtypes.Peer{ PublicKey: key_peer1, Endpoint: &net.UDPAddr{ IP: ipv4_peer1.AsNetIP(), Port: 1000, }, AllowedIPs: []net.IPNet{ipnet_1}, })) Expect(link.WireguardPeers[key_peer2]).To(Equal(wgtypes.Peer{ PublicKey: key_peer2, Endpoint: &net.UDPAddr{ IP: ipv4_peer2.AsNetIP(), Port: 1000, }, AllowedIPs: []net.IPNet{ipnet_2}, })) }) It("should reprogram the route to the non-wireguard peer only", func() { Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.RouteKeyToRoute[routekey_3]).To(Equal(netlink.Route{ Dst: &ipnet_3, Type: syscall.RTN_THROW, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_UNIVERSE, Table: tableIndex, })) }) }) Describe("enable wireguard on peer 3", func() { var key_peer3 wgtypes.Key BeforeEach(func() { key_peer3 = mustGeneratePrivateKey() wg.EndpointWireguardUpdate(peer3, key_peer3, nil) rtDataplane.ResetDeltas() err := wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("should have wireguard routes for all nodes", func() { Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) Expect(link.WireguardPeers).To(HaveKey(key_peer3)) Expect(link.WireguardPeers[key_peer1]).To(Equal(wgtypes.Peer{ PublicKey: key_peer1, Endpoint: &net.UDPAddr{ IP: ipv4_peer1.AsNetIP(), Port: 1000, }, AllowedIPs: []net.IPNet{ipnet_1, ipnet_2}, })) Expect(link.WireguardPeers[key_peer2]).To(Equal(wgtypes.Peer{ PublicKey: key_peer2, Endpoint: &net.UDPAddr{ IP: ipv4_peer2.AsNetIP(), Port: 1000, }, AllowedIPs: []net.IPNet{ipnet_3}, })) Expect(link.WireguardPeers[key_peer3]).To(Equal(wgtypes.Peer{ PublicKey: key_peer3, Endpoint: &net.UDPAddr{ IP: ipv4_peer3.AsNetIP(), Port: 1000, }, AllowedIPs: []net.IPNet{ipnet_4}, })) }) It("should reprogram the route to peer3 only", func() { routekey_4 := fmt.Sprintf("%d-%s", tableIndex, cidr_4) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_4)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_4)) Expect(rtDataplane.RouteKeyToRoute[routekey_4]).To(Equal(netlink.Route{ LinkIndex: link.LinkAttrs.Index, Dst: &ipnet_4, Type: syscall.RTN_UNICAST, Protocol: FelixRouteProtocol, Scope: netlink.SCOPE_LINK, Table: tableIndex, })) }) }) }) }) }) }) }) It("should create wireguard client if link activates immediately", func() { wgDataplane.ImmediateLinkUp = true err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.NumLinkAddCalls).To(Equal(1)) Expect(wgDataplane.WireguardOpen).To(BeTrue()) }) It("should create wireguard client and not attempt to create the link if link is already up", func() { wgDataplane.AddIface(10, ifaceName, true, true) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.NumLinkAddCalls).To(Equal(0)) Expect(wgDataplane.WireguardOpen).To(BeTrue()) }) It("should update listen port and firewall mark but maintain correct key", func() { key, err := wgtypes.GeneratePrivateKey() Expect(err).NotTo(HaveOccurred()) wgDataplane.AddIface(10, ifaceName, true, true) link := wgDataplane.NameToLink[ifaceName] Expect(link).ToNot(BeNil()) link.WireguardPrivateKey = key link.WireguardPublicKey = key.PublicKey() link.WireguardListenPort = 1010 link.WireguardFirewallMark = 11 ipv4 := ip.FromString("1.2.3.4") wg.EndpointWireguardUpdate(hostname, key, ipv4) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.NumLinkAddCalls).To(Equal(0)) Expect(wgDataplane.WireguardOpen).To(BeTrue()) link = wgDataplane.NameToLink[ifaceName] Expect(link).ToNot(BeNil()) Expect(link.Addrs).To(HaveLen(1)) Expect(link.Addrs[0].IP).To(Equal(ipv4.AsNetIP())) Expect(wgDataplane.WireguardOpen).To(BeTrue()) Expect(link.WireguardFirewallMark).To(Equal(10)) Expect(link.WireguardListenPort).To(Equal(1000)) Expect(link.WireguardPrivateKey).To(Equal(key)) Expect(link.WireguardPrivateKey.PublicKey()).To(Equal(link.WireguardPublicKey)) Expect(s.numStatusCallbacks).To(Equal(1)) }) Describe("wireguard initially not supported", func() { BeforeEach(func() { // Set the fail flags. wgDataplane.FailuresToSimulate = mocknetlink.FailNextLinkAddNotSupported // Set the wireguard interface ip address wg.EndpointWireguardUpdate(hostname, zeroKey, ipv4_peer1) // No error should occur err := wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("should not create the wireguard interface", func() { link := wgDataplane.NameToLink[ifaceName] Expect(link).To(BeNil()) }) It("should not create the wireguard interface after another apply", func() { err := wg.Apply() Expect(err).NotTo(HaveOccurred()) link := wgDataplane.NameToLink[ifaceName] Expect(link).To(BeNil()) }) It("should create the wireguard interface after a resync", func() { wg.QueueResync() err := wg.Apply() Expect(err).NotTo(HaveOccurred()) link := wgDataplane.NameToLink[ifaceName] Expect(link).ToNot(BeNil()) }) }) for _, testFailFlags := range []mocknetlink.FailFlags{ mocknetlink.FailNextNewNetlink, mocknetlink.FailNextLinkAdd, mocknetlink.FailNextLinkByName, mocknetlink.FailNextAddrList, mocknetlink.FailNextAddrAdd, mocknetlink.FailNextAddrDel, mocknetlink.FailNextLinkSetUp, mocknetlink.FailNextLinkSetMTU, mocknetlink.FailNextRuleList, mocknetlink.FailNextRuleAdd, mocknetlink.FailNextNewWireguard, mocknetlink.FailNextWireguardConfigureDevice, mocknetlink.FailNextWireguardDeviceByName, } { failFlags := testFailFlags desc := fmt.Sprintf("multiple nodes with routes and failed link management (%v)", failFlags) Describe(desc, func() { var key_peer1, key_peer2 wgtypes.Key var routekey_1, routekey_2, routekey_3 string var link *mocknetlink.MockLink BeforeEach(func() { // Set the fail flags and reset errors.| Expect(wgDataplane.FailuresToSimulate).To(Equal(mocknetlink.FailNone)) Expect(rrDataplane.FailuresToSimulate).To(Equal(mocknetlink.FailNone)) Expect(rtDataplane.FailuresToSimulate).To(Equal(mocknetlink.FailNone)) if failFlags&(mocknetlink.FailNextRuleList|mocknetlink.FailNextRuleAdd) != 0 { rrDataplane.FailuresToSimulate = failFlags } else { wgDataplane.FailuresToSimulate = failFlags } wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() rrDataplane.ResetDeltas() // Expect exactly one error from the series of applies. apply := newApplyWithErrors(wg, 1) // Set the wireguard interface ip address wg.EndpointWireguardUpdate(hostname, zeroKey, ipv4_int1) err := apply.Apply() Expect(err).NotTo(HaveOccurred()) // We expect the link to exist. link = wgDataplane.NameToLink[ifaceName] Expect(link).ToNot(BeNil()) routekey_1 = fmt.Sprintf("%d-%s", tableIndex, cidr_1) routekey_2 = fmt.Sprintf("%d-%s", tableIndex, cidr_2) routekey_3 = fmt.Sprintf("%d-%s", tableIndex, cidr_3) // Set the interface to be up wgDataplane.SetIface(ifaceName, true, true) rtDataplane.AddIface(link.LinkAttrs.Index, ifaceName, true, true) wg.OnIfaceStateChanged(ifaceName, ifacemonitor.StateUp) err = apply.Apply() Expect(err).NotTo(HaveOccurred()) // Change the wireguard interface ip address wg.EndpointWireguardUpdate(hostname, zeroKey, ipv4_int2) // Add a single wireguard peer with a single route key_peer1 = mustGeneratePrivateKey() wg.EndpointWireguardUpdate(peer1, key_peer1, nil) wg.EndpointUpdate(peer1, ipv4_peer1) wg.RouteUpdate(peer1, cidr_1) wg.RouteUpdate(peer1, cidr_2) // Add a single local workload CIDR to ensure we add a route rule. wg.RouteUpdate(hostname, cidr_local) // Apply - a single error should have been observed across all of the Applies. err = apply.Apply() Expect(wgDataplane.FailuresToSimulate).To(Equal(mocknetlink.FailNone)) Expect(rtDataplane.FailuresToSimulate).To(Equal(mocknetlink.FailNone)) Expect(err).NotTo(HaveOccurred()) Expect(apply.LastError()).To(HaveOccurred()) }) It("should correctly program the dataplane after a single failure", func() { Expect(link.LinkType).To(Equal("wireguard")) Expect(link.LinkAttrs.MTU).To(Equal(2000)) Expect(link.Addrs).To(HaveLen(1)) Expect(link.Addrs[0].IP).To(Equal(ipv4_int2.AsNetIP())) Expect(link.WireguardPeers).To(HaveLen(1)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers[key_peer1].AllowedIPs).To(ConsistOf(cidr_1.ToIPNet(), cidr_2.ToIPNet())) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(3)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(0)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_2)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_cidr_local)) // All of these failures will trigger an attempt to get a either a new netlink or wireguard client. if failFlags&(mocknetlink.FailNextNewWireguard|mocknetlink.FailNextWireguardConfigureDevice|mocknetlink.FailNextWireguardDeviceByName) != 0 { Expect(wgDataplane.NumNewWireguardCalls).To(Equal(2)) } else if failFlags&(mocknetlink.FailNextRuleList|mocknetlink.FailNextRuleAdd) != 0 { Expect(rrDataplane.NumNewNetlinkCalls).To(Equal(2)) } else { Expect(wgDataplane.NumNewNetlinkCalls).To(Equal(2)) } }) for _, nextTestFailFlags := range []mocknetlink.FailFlags{ mocknetlink.FailNextWireguardConfigureDevice, mocknetlink.FailNextRouteAdd, mocknetlink.FailNextRouteDel, } { failFlags := nextTestFailFlags desc := fmt.Sprintf("additional adds/deletes with another failure (%v)", failFlags) Describe(desc, func() { BeforeEach(func() { // Set the fail flags and reset errors. if failFlags&mocknetlink.FailNextWireguardConfigureDevice != 0 { wgDataplane.FailuresToSimulate = failFlags } else { rtDataplane.FailuresToSimulate = failFlags rtDataplane.PersistFailures = true } wgDataplane.ResetDeltas() rtDataplane.ResetDeltas() // Delete peer1 wg.EndpointWireguardRemove(peer1) wg.EndpointRemove(peer1) wg.RouteRemove(cidr_1) wg.RouteRemove(cidr_2) // Add peer2 with one of the same CIDRs as the previous peer1, and one different CIDR key_peer2 = mustGeneratePrivateKey() wg.EndpointWireguardUpdate(peer2, key_peer2, nil) wg.EndpointUpdate(peer2, ipv4_peer2) wg.RouteUpdate(peer2, cidr_1) wg.RouteUpdate(peer2, cidr_3) // Apply. err := wg.Apply() Expect(err).To(HaveOccurred()) rtDataplane.PersistFailures = false err = wg.Apply() Expect(err).ToNot(HaveOccurred()) }) It("should correctly program the dataplane after a second failure", func() { Expect(link.WireguardPeers).To(HaveLen(1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) Expect(link.WireguardPeers[key_peer2].AllowedIPs).To(Equal([]net.IPNet{cidr_1.ToIPNet(), cidr_3.ToIPNet()})) Expect(rtDataplane.AddedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.DeletedRouteKeys).To(HaveLen(1)) Expect(rtDataplane.AddedRouteKeys).To(HaveKey(routekey_3)) Expect(rtDataplane.DeletedRouteKeys).To(HaveKey(routekey_2)) if failFlags&mocknetlink.FailNextWireguardConfigureDevice != 0 { Expect(wgDataplane.NumNewWireguardCalls).ToNot(Equal(0)) Expect(rtDataplane.NumNewNetlinkCalls).To(Equal(0)) } else { Expect(rtDataplane.NumNewNetlinkCalls).ToNot(Equal(0)) Expect(wgDataplane.NumNewWireguardCalls).To(Equal(0)) } }) }) } }) } for _, testFailFlags := range []mocknetlink.FailFlags{ mocknetlink.FailNextLinkAddNotSupported, mocknetlink.FailNextNewWireguardNotSupported, } { failFlags := testFailFlags desc := fmt.Sprintf("multiple nodes with wireguard not supported (%v)", failFlags) Describe(desc, func() { It("should update on resync", func() { // Set the fail flags and set link to automatically come up. wgDataplane.FailuresToSimulate = failFlags wgDataplane.ImmediateLinkUp = true // Set the wireguard interface ip address. No error should occur because "not supported" is perfectly // valid. wg.EndpointWireguardUpdate(hostname, zeroKey, ipv4_peer1) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) // Expect a zero key status update. Expect(s.statusKey).To(Equal(zeroKey)) Expect(s.numStatusCallbacks).To(Equal(1)) // Always expect to attempt to create the netlink client Expect(wgDataplane.NumNewNetlinkCalls).To(Equal(1)) if failFlags&mocknetlink.FailNextLinkAddNotSupported == 0 { // If we are not emulating netlink link-not-supported failure then we should also attempt to create // the wireguard client. Expect(wgDataplane.NumNewWireguardCalls).To(Equal(1)) } // Should not attempt any further updates wgDataplane.ResetDeltas() err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.NumNewNetlinkCalls).To(Equal(0)) Expect(wgDataplane.NumNewWireguardCalls).To(Equal(0)) // Queue a resync and re-apply. wg.QueueResync() err = wg.Apply() Expect(err).NotTo(HaveOccurred()) // Expect an updated public key and the previously failed client to have been re-requested. Expect(s.statusKey).NotTo(Equal(zeroKey)) Expect(s.numStatusCallbacks).To(Equal(2)) if failFlags&mocknetlink.FailNextNewWireguardNotSupported != 0 { // And if emulating the wireguard failure, we expect a call to that too. Expect(wgDataplane.NumNewWireguardCalls).To(Equal(1)) } // The previous netlink client is still ok - just wireguard wasn't supported, we should not attempt to // recreate the netlink client. Expect(wgDataplane.NumNewNetlinkCalls).To(Equal(0)) }) }) } for _, port := range []int{listeningPort, listeningPort + 1} { configuredPort := port desc := fmt.Sprintf("wireguard dataplane needs updating (port=%d)", configuredPort) Describe(desc, func() { It("should handle a resync", func() { key_peer1 := mustGeneratePrivateKey().PublicKey() key_peer2 := mustGeneratePrivateKey().PublicKey() key_peer3 := mustGeneratePrivateKey().PublicKey() key_peer4 := mustGeneratePrivateKey().PublicKey() wg.EndpointUpdate(hostname, ipv4_host) wg.EndpointUpdate(peer1, ipv4_peer1) wg.EndpointUpdate(peer2, ipv4_peer2) wg.EndpointUpdate(peer3, ipv4_peer3) wg.EndpointUpdate(peer4, ipv4_peer4) wg.EndpointWireguardUpdate(peer1, key_peer1, nil) wg.EndpointWireguardUpdate(peer2, key_peer2, nil) wg.EndpointWireguardUpdate(peer3, key_peer3, nil) wg.EndpointWireguardUpdate(peer4, key_peer3, nil) // Peer 3 and 4 declaring same public key wg.RouteUpdate(peer1, cidr_1) wg.RouteUpdate(peer2, cidr_2) wg.RouteUpdate(peer3, cidr_3) wg.RouteUpdate(peer4, cidr_4) wgDataplane.AddIface(1, ifaceName, true, true) link := wgDataplane.NameToLink[ifaceName] Expect(link).NotTo(BeNil()) link.WireguardPeers = map[wgtypes.Key]wgtypes.Peer{ key_peer1: { PublicKey: key_peer1, Endpoint: &net.UDPAddr{ IP: ipv4_peer1.AsNetIP(), Port: configuredPort, }, AllowedIPs: []net.IPNet{}, // Need to add an entry (no deletes) }, key_peer2: { PublicKey: key_peer2, Endpoint: nil, AllowedIPs: []net.IPNet{ cidr_2.ToIPNet(), cidr_3.ToIPNet(), // Need to delete an entry. }, }, key_peer3: { PublicKey: key_peer3, Endpoint: &net.UDPAddr{}, AllowedIPs: []net.IPNet{}, }, key_peer4: { PublicKey: key_peer4, Endpoint: &net.UDPAddr{}, AllowedIPs: []net.IPNet{ cidr_4.ToIPNet(), }, }, } // Apply the update. err := wg.Apply() Expect(err).NotTo(HaveOccurred()) // Expect peer1 and peer2 to be updated and peer3 and peer4 to be deleted. link = wgDataplane.NameToLink[ifaceName] Expect(link).NotTo(BeNil()) Expect(link.WireguardPeers).To(HaveLen(2)) Expect(link.WireguardPeers).To(HaveKey(key_peer1)) Expect(link.WireguardPeers).To(HaveKey(key_peer2)) Expect(link.WireguardPeers[key_peer1]).To(Equal(wgtypes.Peer{ PublicKey: key_peer1, Endpoint: &net.UDPAddr{ IP: ipv4_peer1.AsNetIP(), Port: listeningPort, }, AllowedIPs: []net.IPNet{cidr_1.ToIPNet()}, })) Expect(link.WireguardPeers[key_peer2]).To(Equal(wgtypes.Peer{ PublicKey: key_peer2, Endpoint: &net.UDPAddr{ IP: ipv4_peer2.AsNetIP(), Port: listeningPort, }, AllowedIPs: []net.IPNet{cidr_2.ToIPNet()}, })) // If the listening port was incorrect then we expect that to be included in the updated, // otherwise we do not. Expect(wgDataplane.LastWireguardUpdates).To(HaveKey(key_peer1)) if configuredPort == listeningPort { Expect(wgDataplane.LastWireguardUpdates[key_peer1].Endpoint).To(BeNil()) } else { Expect(wgDataplane.LastWireguardUpdates[key_peer1].Endpoint).NotTo(BeNil()) } // Expect peer2 update to include the endpoint addr (since this was missing) Expect(wgDataplane.LastWireguardUpdates).To(HaveKey(key_peer2)) Expect(wgDataplane.LastWireguardUpdates[key_peer2].Endpoint).NotTo(BeNil()) // Expect peer1 to be an update and peer2 to be a full replace of CIDRs. Expect(wgDataplane.LastWireguardUpdates[key_peer1].ReplaceAllowedIPs).To(BeFalse()) Expect(wgDataplane.LastWireguardUpdates[key_peer2].ReplaceAllowedIPs).To(BeTrue()) }) }) } }) var _ = Describe("Wireguard (disabled)", func() { var wgDataplane, rtDataplane, rrDataplane *mocknetlink.MockNetlinkDataplane var t *mocktime.MockTime var s mockCallbacks var wg *Wireguard BeforeEach(func() { wgDataplane = mocknetlink.New() rtDataplane = mocknetlink.New() rrDataplane = mocknetlink.New() t = mocktime.New() // Setting an auto-increment greater than the route cleanup delay effectively // disables the grace period for these tests. t.SetAutoIncrement(11 * time.Second) wg = NewWithShims( hostname, &Config{ Enabled: false, ListeningPort: 1000, FirewallMark: 1, RoutingRulePriority: rulePriority, RoutingTableIndex: tableIndex, InterfaceName: ifaceName, MTU: 1042, }, rtDataplane.NewMockNetlink, rrDataplane.NewMockNetlink, wgDataplane.NewMockNetlink, wgDataplane.NewMockWireguard, 10*time.Second, t, FelixRouteProtocol, s.status, s.writeProcSys, logutils.NewSummarizer("test loop"), ) }) It("should be constructable", func() { Expect(wg).ToNot(BeNil()) }) It("should not attempt to create the link", func() { err := wg.Apply() Expect(err).NotTo(HaveOccurred()) err = wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.NumLinkAddCalls).To(Equal(0)) Expect(wgDataplane.NumLinkDeleteCalls).To(Equal(0)) }) It("should handle deletion of the wireguard link", func() { wgDataplane.AddIface(1, ifaceName, true, true) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.NumLinkAddCalls).To(Equal(0)) Expect(wgDataplane.NumLinkDeleteCalls).To(Equal(1)) Expect(wgDataplane.DeletedLinks).To(HaveKey(ifaceName)) }) Describe("With some endpoint updates", func() { BeforeEach(func() { wg.EndpointUpdate(peer1, ipv4_peer1) wg.EndpointWireguardUpdate(peer1, mustGeneratePrivateKey().PublicKey(), nil) wg.RouteUpdate(peer1, cidr_1) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("should ignore the updates", func() { Expect(wgDataplane.NumLinkAddCalls).To(Equal(0)) Expect(wgDataplane.NumLinkDeleteCalls).To(Equal(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) }) It("should ignore endpoint deletes", func() { wg.RouteRemove(cidr_1) wg.EndpointRemove(peer1) wg.EndpointWireguardRemove(peer1) err := wg.Apply() Expect(err).NotTo(HaveOccurred()) Expect(wgDataplane.NumLinkAddCalls).To(Equal(0)) Expect(wgDataplane.NumLinkDeleteCalls).To(Equal(0)) Expect(wgDataplane.WireguardConfigUpdated).To(BeFalse()) }) }) for _, testFailFlags := range []mocknetlink.FailFlags{ mocknetlink.FailNextNewNetlink, mocknetlink.FailNextLinkDel, mocknetlink.FailNextLinkByName, mocknetlink.FailNextRuleList, mocknetlink.FailNextRuleDel, mocknetlink.FailNextRouteList, } { failFlags := testFailFlags desc := fmt.Sprintf("failed netlink management (%v), sync with incorrect rule", failFlags) Describe(desc, func() { BeforeEach(func() { // Create an interface to delete. wgDataplane.AddIface(1, ifaceName, true, true) rtDataplane.AddIface(1, ifaceName, true, true) // Create a rule to route to the wireguard table. rrDataplane.Rules = []netlink.Rule{ { Family: 2, Priority: 0, Table: 255, }, { Family: 2, Table: tableIndex, Mark: firewallMark, Invert: true, }, { Family: 2, Priority: 32766, Table: 254, }, { Family: 2, Priority: 32767, Table: 253, }, } // Set the fail flags and reset errors. Routetable and Routerule modules have retry mechanisms built in // so need to persist failures in those cases. if failFlags&mocknetlink.FailNextRouteList != 0 { rtDataplane.FailuresToSimulate = failFlags rtDataplane.PersistFailures = true } else if failFlags&(mocknetlink.FailNextRuleList|mocknetlink.FailNextRuleDel) != 0 { rrDataplane.FailuresToSimulate = failFlags } else { wgDataplane.FailuresToSimulate = failFlags } // Apply the settings - this should remove wireguard config. err := wg.Apply() Expect(err).To(HaveOccurred()) // The error should now resolve itself. rtDataplane.PersistFailures = false err = wg.Apply() Expect(err).NotTo(HaveOccurred()) }) It("deletes the link", func() { link := wgDataplane.NameToLink[ifaceName] Expect(link).To(BeNil()) // These errors will trigger netlink reconnection. The routetable retries multiple times, so just assert // there is >0 reconnections. if failFlags&mocknetlink.FailNextRouteList != 0 { Expect(rtDataplane.NumNewNetlinkCalls).To(BeNumerically(">", 1)) } else { Expect(wgDataplane.NumNewNetlinkCalls).To(BeNumerically(">", 1)) } }) It("should delete the route rule", func() { Expect(rrDataplane.NumRuleDelCalls).ToNot(Equal(0)) Expect(rrDataplane.NumRuleAddCalls).To(Equal(0)) Expect(rrDataplane.Rules).To(Equal([]netlink.Rule{ { Family: 2, Priority: 0, Table: 255, }, { Family: 2, Priority: 32766, Table: 254, }, { Family: 2, Priority: 32767, Table: 253, }, })) }) }) } }) var _ = Describe("Wireguard (with no table index)", func() { var wgDataplane, rtDataplane, rrDataplane *mocknetlink.MockNetlinkDataplane var t *mocktime.MockTime var s mockCallbacks var wgFn func(bool) BeforeEach(func() { wgDataplane = mocknetlink.New() rtDataplane = mocknetlink.New() rrDataplane = mocknetlink.New() t = mocktime.New() t.SetAutoIncrement(11 * time.Second) wgFn = func(enabled bool) { NewWithShims( hostname, &Config{ Enabled: enabled, ListeningPort: 1000, FirewallMark: 1, RoutingRulePriority: rulePriority, RoutingTableIndex: 0, InterfaceName: ifaceName, MTU: 1042, }, rtDataplane.NewMockNetlink, rrDataplane.NewMockNetlink, wgDataplane.NewMockNetlink, wgDataplane.NewMockWireguard, 10*time.Second, t, FelixRouteProtocol, s.status, s.writeProcSys, logutils.NewSummarizer("test loop"), ) } }) It("should panic if wireguard is enabled", func() { Expect(func() { wgFn(true) }).To(Panic()) }) It("should not panic if wireguard is disabled", func() { Expect(func() { wgFn(false) }).NotTo(Panic()) }) })
/* Method Expressions: MethodExpr = ReceiverType "." MethodName ReceiverType = TypeName | "(" "*" TypeName ")" | "(" ReceiverType ")" */ package main import ( "fmt" ) //可以自由的向基本类型中添加方法 type WW int func (w *WW) Print(flag string) { fmt.Println(flag) } func main() { var ww WW ww.Print("weiweiu") //method is value ww.Print("Isoftstone Inc.") //method is value (*WW).Print(&ww, "weiweiu") //Method is Expressions (*WW).Print(&ww, "Isoftstone Inc.") // Method is Expressions }
package main import ( "fmt" "regexp" ) type ExtraConfig map[string]interface{} var test ExtraConfig = map[string]interface{}{"customer": map[interface{}]interface{}{ "body-tem": `{"code":601,"data":{"value":""},"message": "对不起,尊敬的旅客,您的访问存在风险,请您稍后重试。如有疑问请拨打0871-96598。感谢您的理解。IP:{{ .ClientIP }},时间:{{ .Time }},访问ID:{{ .TraceID }}"}`, "code": 601, "data": "no", }} func main() { re := `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` reg := regexp.MustCompile(re) str := "2001:0db8:85a3:0000:0000:8a2e:0370:7334" str2 := "AD80::ABAA:0000:00C2:0002" fmt.Println(reg.FindString(str)) fmt.Println(reg.FindString(str2)) }
package taskmanapi import ( "net/http" "strings" "appengine" ) //var BaseResourceHandlers = make(map[string]resourceHandler) type ResourceHandler interface { Handle(http.ResponseWriter, *http.Request, appengine.Context, *User, string, ...interface{}) AddResource(string, ResourceHandler) } type resourceHandler struct { handlers map[string]ResourceHandler } func (rh *resourceHandler) Handle(w http.ResponseWriter, r *http.Request, c appengine.Context, user *User, path string, opt ...interface{}) { resString, nextPath := getResource(path) res, ok := rh.handlers[resString] if !ok { http.NotFound(w,r) return } res.Handle(w, r, c, user, nextPath) } func (r *resourceHandler) AddResource(name string, h ResourceHandler) { r.handlers[name] = h } func NewResourceHandler() (resourceHandler) { handlers := make(map[string]ResourceHandler) return resourceHandler{handlers} } func getResource(path string) (string, string) { //for a path "/a/b/v" returns "a","/b/c" if path == "" { return "", "" } if path == "/" { return "/", "" } resEnd := strings.Index(path[1:], "/") + 1 var res, nextPath string if resEnd == 0 { res = path[1:] nextPath = "" } else { res = path[1:resEnd] nextPath = path[resEnd:] } return res, nextPath }
// Copyright © 2018 NAME HERE <EMAIL ADDRESS> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "context" "fmt" "os" "strconv" "time" "github.com/olekukonko/tablewriter" "github.com/pkg/errors" "github.com/spf13/cobra" yaml "gopkg.in/yaml.v2" ) // articleGetCmd represents the article command var articleGetCmd = &cobra.Command{ Use: "article [ArticleID]", Short: "Get article", Long: `TBD`, Args: cobra.MinimumNArgs(1), RunE: runArticleGetCmd, } // articleListCmd represents the article command var articleListCmd = &cobra.Command{ Use: "article", Short: "List articles", Long: `TBD`, RunE: runArticleListCmd, } func init() { articleListCmd.Flags().IntVarP(&o.CategoryID, "category-id", "", 0, "Filter by Category ID") articleListCmd.Flags().IntVarP(&o.SectionID, "section-id", "", 0, "Filter by Section ID") getCmd.AddCommand(articleGetCmd) listCmd.AddCommand(articleListCmd) } func runArticleGetCmd(cmd *cobra.Command, args []string) error { client, err := newDefaultClient() if err != nil { return errors.Wrap(err, "newClient failed:") } articleID, err := strconv.Atoi(args[0]) if err != nil { return errors.Wrapf(err, "failed to parse ArticleID: %s", args[0]) } req := ArticleGetRequest{ ID: articleID, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() res, err := client.ArticleGet(ctx, req) if err != nil { return errors.Wrapf(err, "StackShow was failed: req = %+v, res = %+v", req, res) } article := res.Article if article.ID == 0 { fmt.Printf("Loading article failed. Please confirm the article id.\n") return nil } // Structure // a_<article id> // a_<article_id>_<locale>_<title>.html // meta_<article_id>_<locale>.yaml dirPath := fmt.Sprintf("a_%d", article.ID) if err := os.Mkdir(dirPath, 0777); err != nil { panic(err) } bodyPath := fmt.Sprintf("%s/a_%d_%s_%s.html", dirPath, article.ID, article.Locale, article.Title) metaPath := fmt.Sprintf("%s/meta_%d_%s.html", dirPath, article.ID, article.Locale) file, err := os.Create(bodyPath) if err != nil { return err } defer file.Close() file.Write(([]byte)(article.Body)) fmt.Printf("Exported article to %s\n", bodyPath) metafile, err := os.Create(metaPath) if err != nil { return err } defer metafile.Close() yml, err := yaml.Marshal(article) if err != nil { return err } metafile.Write(([]byte)(string(yml))) fmt.Printf("Exported metadata to %s\n", metaPath) return nil } func runArticleListCmd(cmd *cobra.Command, args []string) error { client, err := newDefaultClient() if err != nil { return errors.Wrap(err, "newClient failed:") } req := ArticleListRequest{ Page: o.Page, PerPage: o.Limit, SortBy: o.SortBy, SortOrder: o.SortOrder, CategoryID: o.CategoryID, SectionID: o.SectionID, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() res, err := client.ArticleList(ctx, req) if err != nil { return errors.Wrapf(err, "StackShow was failed: req = %+v, res = %+v", req, res) } articles := res.Articles table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{ "category id", "category", "section id", "section", "article id", "title", "url", "edited at", }) for _, v := range articles { category := "" categoryID := 0 section := "" for _, s := range res.Sections { if v.SectionID == s.ID { section = s.Name categoryID = s.CategoryID } } for _, c := range res.Categories { if categoryID == c.ID { category = c.Name } } table.Append([]string{ fmt.Sprintf("%d", categoryID), category, fmt.Sprintf("%d", v.SectionID), section, fmt.Sprintf("%d", v.ID), v.Title, v.HTMLURL, v.EditedAt, }) } table.Render() return nil } // ArticleGet is TBD func (client *Client) ArticleGet(ctx context.Context, apiRequest ArticleGetRequest) (*ArticleGetResponse, error) { subPath := fmt.Sprintf("/%s/articles/%d.json", client.Locale, apiRequest.ID) httpRequest, err := client.newRequest(ctx, "GET", subPath, nil) if err != nil { return nil, err } fmt.Printf("Info: Request to %s\n", httpRequest.URL) httpResponse, err := client.HTTPClient.Do(httpRequest) if err != nil { return nil, err } // TODO: Check status code here… var apiResponse ArticleGetResponse if err := decodeBody(httpResponse, &apiResponse); err != nil { return nil, err } return &apiResponse, nil } // ArticleList is TBD func (client *Client) ArticleList(ctx context.Context, apiRequest ArticleListRequest) (*ArticleListResponse, error) { subPath := fmt.Sprintf("/%s/articles.json", client.Locale) if apiRequest.CategoryID != 0 { subPath = fmt.Sprintf("/%s/categories/%d/articles.json", client.Locale, apiRequest.CategoryID) } if apiRequest.SectionID != 0 { subPath = fmt.Sprintf("/%s/sections/%d/articles.json", client.Locale, apiRequest.SectionID) } httpRequest, err := client.newRequest(ctx, "GET", subPath, nil) if err != nil { return nil, err } q := httpRequest.URL.Query() q.Add("page", fmt.Sprintf("%d", apiRequest.Page)) q.Add("per_page", fmt.Sprintf("%d", apiRequest.PerPage)) q.Add("sort_by", apiRequest.SortBy) q.Add("sort_order", apiRequest.SortOrder) q.Add("include", "categories,sections") httpRequest.URL.RawQuery = q.Encode() fmt.Printf("Info: Request to %s\n", httpRequest.URL) httpResponse, err := client.HTTPClient.Do(httpRequest) if err != nil { return nil, err } // TODO: Check status code here… var apiResponse ArticleListResponse if err := decodeBody(httpResponse, &apiResponse); err != nil { return nil, err } return &apiResponse, nil }
package cluster import ( "fmt" "os" "github.com/jedib0t/go-pretty/v6/table" "github.com/michaelhenkel/gokvm/image" "github.com/michaelhenkel/gokvm/instance" "github.com/michaelhenkel/gokvm/network" log "github.com/sirupsen/logrus" ) type Cluster struct { Name string Network network.Network Image image.Image Suffix string Worker int Controller int PublicKey string Resources instance.Resources Instances []*instance.Instance } func List() ([]*Cluster, error) { instances, err := instance.List("") if err != nil { return nil, err } var clusterMap = make(map[string][]*instance.Instance) for _, inst := range instances { clusterMap[inst.ClusterName] = append(clusterMap[inst.ClusterName], inst) } var clusterList []*Cluster for k, v := range clusterMap { cl := &Cluster{ Name: k, Instances: v, } clusterList = append(clusterList, cl) } return clusterList, nil } func Render(clusters []*Cluster) { rowConfigAutoMerge := table.RowConfig{AutoMerge: true} t := table.NewWriter() t.SetOutputMirror(os.Stdout) t.AppendHeader(table.Row{"Cluster", "Instances", "IP"}) for _, cluster := range clusters { for _, inst := range cluster.Instances { for _, addr := range inst.IPAddresses { t.AppendRow(table.Row{cluster.Name, inst.Name, addr}, rowConfigAutoMerge) } } } t.SetColumnConfigs([]table.ColumnConfig{ {Number: 1, AutoMerge: true}, }) t.SetStyle(table.StyleLight) t.Render() } func (c *Cluster) Delete() error { instances, err := instance.List(c.Name) if err != nil { return err } for _, inst := range instances { if err := inst.Delete(); err != nil { return err } } return nil } func (c *Cluster) Create() error { instances, err := instance.List(c.Name) if err != nil { return err } if len(instances) > 0 { log.Info("Cluster already exists") return nil } imageExists, err := image.Get(c.Image.Name, c.Image.Pool) if err != nil { return err } if imageExists == nil { defaultImage := image.DefaultImage() defaultImage.Name = c.Image.Name if err := defaultImage.Create(); err != nil { return err } c.Image = defaultImage } else { c.Image = *imageExists } networkExists, err := network.Get(c.Network.Name) if err != nil { return err } if networkExists == nil { defaultNetwork := network.DefaultNetwork() defaultNetwork.Name = c.Network.Name if err := defaultNetwork.Create(); err != nil { return err } c.Network = defaultNetwork } else { c.Network = *networkExists } for i := 0; i < c.Controller; i++ { inst := instance.Instance{ Name: fmt.Sprintf("c-instance-%d.%s.%s", i, c.Name, c.Suffix), PubKey: c.PublicKey, Network: c.Network, Image: c.Image, ClusterName: c.Name, Suffix: c.Suffix, Resources: c.Resources, } if err := inst.Create(); err != nil { return err } } for i := 0; i < c.Worker; i++ { inst := instance.Instance{ Name: fmt.Sprintf("w-instance-%d.%s.%s", i, c.Name, c.Suffix), PubKey: c.PublicKey, Network: c.Network, Image: c.Image, ClusterName: c.Name, Suffix: c.Suffix, Resources: c.Resources, } if err := inst.Create(); err != nil { return err } } return nil }
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package qcloud import ( "fmt" "time" "yunion.io/x/jsonutils" "yunion.io/x/pkg/errors" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/cloudprovider" "yunion.io/x/onecloud/pkg/multicloud" ) type SVpc struct { multicloud.SVpc multicloud.QcloudTags region *SRegion iwires []cloudprovider.ICloudWire secgroups []cloudprovider.ICloudSecurityGroup CidrBlock string CreatedTime time.Time DhcpOptionsId string DnsServerSet []string DomainName string EnableMulticast bool IsDefault bool VpcId string VpcName string } func (self *SVpc) GetId() string { return self.VpcId } func (self *SVpc) GetName() string { if len(self.VpcName) > 0 { return self.VpcName } return self.VpcId } func (self *SVpc) GetGlobalId() string { return self.VpcId } func (self *SVpc) IsEmulated() bool { return false } func (self *SVpc) GetIsDefault() bool { return self.IsDefault } func (self *SVpc) GetCidrBlock() string { return self.CidrBlock } func (self *SVpc) GetStatus() string { return api.VPC_STATUS_AVAILABLE } func (self *SVpc) Delete() error { return self.region.DeleteVpc(self.VpcId) } func (self *SVpc) GetISecurityGroups() ([]cloudprovider.ICloudSecurityGroup, error) { secgroups := make([]SSecurityGroup, 0) for { parts, total, err := self.region.GetSecurityGroups([]string{}, self.VpcId, "", len(secgroups), 50) if err != nil { return nil, err } secgroups = append(secgroups, parts...) if len(secgroups) >= total { break } } isecgroups := make([]cloudprovider.ICloudSecurityGroup, len(secgroups)) for i := 0; i < len(secgroups); i++ { secgroups[i].region = self.region isecgroups[i] = &secgroups[i] } return isecgroups, nil } func (self *SVpc) GetIRouteTables() ([]cloudprovider.ICloudRouteTable, error) { rts := []cloudprovider.ICloudRouteTable{} routetables, err := self.region.GetAllRouteTables(self.GetId(), []string{}) if err != nil { return nil, errors.Wrapf(err, "self.region.GetAllRouteTables(%s, []string{})", self.GetId()) } for i := range routetables { routetables[i].vpc = self rts = append(rts, &routetables[i]) } return rts, nil } func (self *SVpc) GetIRouteTableById(routeTableId string) (cloudprovider.ICloudRouteTable, error) { routetables, err := self.region.GetAllRouteTables(self.GetId(), []string{routeTableId}) if err != nil { return nil, errors.Wrapf(err, "self.region.GetAllRouteTables(%s, []string{})", self.GetId()) } if len(routetables) == 0 { return nil, cloudprovider.ErrNotFound } if len(routetables) > 1 { return nil, cloudprovider.ErrDuplicateId } routetables[0].vpc = self return &routetables[0], nil } func (self *SVpc) getWireByZoneId(zoneId string) *SWire { for i := 0; i <= len(self.iwires); i++ { wire := self.iwires[i].(*SWire) if wire.zone.Zone == zoneId { return wire } } return nil } func (self *SVpc) GetINatGateways() ([]cloudprovider.ICloudNatGateway, error) { nats := []SNatGateway{} for { part, total, err := self.region.GetNatGateways(self.VpcId, len(nats), 50) if err != nil { return nil, err } nats = append(nats, part...) if len(nats) >= total { break } } inats := []cloudprovider.ICloudNatGateway{} for i := 0; i < len(nats); i++ { nats[i].vpc = self inats = append(inats, &nats[i]) } return inats, nil } func (self *SVpc) fetchNetworks() error { networks, total, err := self.region.GetNetworks(nil, self.VpcId, 0, 50) if err != nil { return err } if total > len(networks) { networks, _, err = self.region.GetNetworks(nil, self.VpcId, 0, total) if err != nil { return err } } for i := 0; i < len(networks); i += 1 { wire := self.getWireByZoneId(networks[i].Zone) networks[i].wire = wire wire.addNetwork(&networks[i]) } return nil } func (self *SVpc) GetIWireById(wireId string) (cloudprovider.ICloudWire, error) { if self.iwires == nil { err := self.fetchNetworks() if err != nil { return nil, err } } for i := 0; i < len(self.iwires); i += 1 { if self.iwires[i].GetGlobalId() == wireId { return self.iwires[i], nil } } return nil, cloudprovider.ErrNotFound } func (self *SVpc) GetIWires() ([]cloudprovider.ICloudWire, error) { if self.iwires == nil { err := self.fetchNetworks() if err != nil { return nil, err } } return self.iwires, nil } func (self *SVpc) GetRegion() cloudprovider.ICloudRegion { return self.region } func (self *SVpc) Refresh() error { new, err := self.region.getVpc(self.VpcId) if err != nil { return err } return jsonutils.Update(self, new) } func (self *SVpc) addWire(wire *SWire) { if self.iwires == nil { self.iwires = make([]cloudprovider.ICloudWire, 0) } self.iwires = append(self.iwires, wire) } func (self *SVpc) GetSVpcPeeringConnections() ([]SVpcPC, error) { svpcPCs, err := self.region.GetAllVpcPeeringConnections(self.GetId()) if err != nil { return nil, errors.Wrapf(err, "self.region.GetAllVpcPeeringConnections(%s)", self.GetId()) } for i := range svpcPCs { svpcPCs[i].vpc = self } return svpcPCs, nil } func (self *SVpc) GetAccepterSVpcPeeringConnections() ([]SVpcPC, error) { result := []SVpcPC{} svpcPCs, err := self.region.GetAllVpcPeeringConnections("") if err != nil { return nil, errors.Wrapf(err, "self.region.GetAllVpcPeeringConnections(%s)", self.GetId()) } for i := range svpcPCs { if svpcPCs[i].PeerVpcID == self.GetId() { svpcPCs[i].vpc = self result = append(result, svpcPCs[i]) } } return result, nil } func (self *SVpc) GetSVpcPeeringConnectionById(id string) (*SVpcPC, error) { svpcPC, err := self.region.GetVpcPeeringConnectionbyId(id) if err != nil { return nil, errors.Wrapf(err, "self.region.GetVpcPeeringConnectionbyId(%s)", id) } svpcPC.vpc = self return svpcPC, nil } func (self *SVpc) CreateSVpcPeeringConnection(opts *cloudprovider.VpcPeeringConnectionCreateOptions) (*SVpcPC, error) { vpcPCId, err := self.region.CreateVpcPeeringConnection(self.GetId(), opts) if err != nil { return nil, errors.Wrapf(err, "self.region.CreateVpcPeeringConnection(%s, %s)", self.GetId(), jsonutils.Marshal(opts).String()) } SvpcPC, err := self.GetSVpcPeeringConnectionById(vpcPCId) if err != nil { return nil, errors.Wrapf(err, "self.GetSVpcPeeringConnectionById(%s)", vpcPCId) } return SvpcPC, nil } func (self *SVpc) CreateCrossRegionSVpcPeeringConnection(opts *cloudprovider.VpcPeeringConnectionCreateOptions) (*SVpcPC, error) { taskId, err := self.region.CreateVpcPeeringConnectionEx(self.GetId(), opts) if err != nil { return nil, errors.Wrapf(err, "self.region.CreateVpcPeeringConnectionEx(%s, %s)", self.GetId(), jsonutils.Marshal(opts).String()) } err = cloudprovider.Wait(time.Second*5, time.Minute*6, func() (bool, error) { status, err := self.region.DescribeVpcTaskResult(fmt.Sprintf("%d", taskId)) if err != nil { return false, errors.Wrap(err, "self.vpc.region.DescribeVpcTaskResult") } //任务的当前状态。0:成功,1:失败,2:进行中。 if status == 1 { return false, errors.Wrapf(fmt.Errorf("taskfailed,taskId=%d", taskId), "client.DescribeVpcTaskResult(taskId)") } if status == 0 { return true, nil } return false, nil }) if err != nil { return nil, errors.Wrapf(err, "cloudprovider.Wait %d", taskId) } svpcPCs, err := self.GetSVpcPeeringConnections() if err != nil { return nil, errors.Wrap(err, "self.GetSVpcPeeringConnections()") } for i := range svpcPCs { if svpcPCs[i].GetPeerVpcId() == opts.PeerVpcId { return &svpcPCs[i], nil } } return nil, errors.Wrap(cloudprovider.ErrNotFound, "vpcPeeringConnection not found after createEx") } func (self *SVpc) AcceptSVpcPeeringConnection(id string) error { err := self.region.AcceptVpcPeeringConnection(id) if err != nil { return errors.Wrapf(err, "self.region.AcceptVpcPeeringConnection(%s)", id) } return nil } func (self *SVpc) AcceptCrossRegionSVpcPeeringConnection(id string) error { taskId, err := self.region.AcceptVpcPeeringConnectionEx(id) if err != nil { return errors.Wrapf(err, "self.region.AcceptVpcPeeringConnectionEx(%s)", id) } err = cloudprovider.Wait(time.Second*5, time.Minute*6, func() (bool, error) { status, err := self.region.DescribeVpcTaskResult(fmt.Sprintf("%d", taskId)) if err != nil { return false, errors.Wrap(err, "self.vpc.region.DescribeVpcTaskResult") } //任务的当前状态。0:成功,1:失败,2:进行中。 if status == 1 { return false, errors.Wrap(fmt.Errorf("taskfailed,taskId=%d", taskId), "client.DescribeVpcTaskResult(taskId)") } if status == 0 { return true, nil } return false, nil }) if err != nil { return errors.Wrapf(err, " cloudprovider.Wait %d", taskId) } svpcPC, err := self.GetSVpcPeeringConnectionById(id) if err != nil { return errors.Wrapf(err, "self.GetSVpcPeeringConnectionById(%s)", id) } if svpcPC.GetStatus() != api.VPC_PEERING_CONNECTION_STATUS_ACTIVE { return errors.Wrap(cloudprovider.ErrInvalidStatus, "invalid status after AcceptCrossRegionSVpcPeeringConnection") } return nil } func (self *SVpc) GetICloudVpcPeeringConnections() ([]cloudprovider.ICloudVpcPeeringConnection, error) { result := []cloudprovider.ICloudVpcPeeringConnection{} SVpcPCs, err := self.GetSVpcPeeringConnections() if err != nil { return nil, errors.Wrap(err, "self.GetSVpcPeeringConnections()") } for i := range SVpcPCs { result = append(result, &SVpcPCs[i]) } return result, nil } func (self *SVpc) GetICloudAccepterVpcPeeringConnections() ([]cloudprovider.ICloudVpcPeeringConnection, error) { result := []cloudprovider.ICloudVpcPeeringConnection{} SVpcPCs, err := self.GetAccepterSVpcPeeringConnections() if err != nil { return nil, errors.Wrap(err, "self.GetSVpcPeeringConnections()") } for i := range SVpcPCs { result = append(result, &SVpcPCs[i]) } return result, nil } func (self *SVpc) GetICloudVpcPeeringConnectionById(id string) (cloudprovider.ICloudVpcPeeringConnection, error) { svpcPC, err := self.GetSVpcPeeringConnectionById(id) if err != nil { return nil, errors.Wrapf(err, "self.GetSVpcPeeringConnectionById(%s)", id) } return svpcPC, nil } func (self *SVpc) CreateICloudVpcPeeringConnection(opts *cloudprovider.VpcPeeringConnectionCreateOptions) (cloudprovider.ICloudVpcPeeringConnection, error) { if self.GetRegion().GetId() == opts.PeerRegionId { return self.CreateSVpcPeeringConnection(opts) } else { return self.CreateCrossRegionSVpcPeeringConnection(opts) } } func (self *SVpc) AcceptICloudVpcPeeringConnection(id string) error { svpcPC, err := self.GetSVpcPeeringConnectionById(id) if err != nil { return errors.Wrapf(err, "self.GetSVpcPeeringConnectionById(%s)", id) } if svpcPC.GetStatus() == api.VPC_PEERING_CONNECTION_STATUS_ACTIVE { return nil } if !svpcPC.IsCrossRegion() { return self.AcceptSVpcPeeringConnection(id) } else { return self.AcceptCrossRegionSVpcPeeringConnection(id) } } func (self *SVpc) GetAuthorityOwnerId() string { return self.region.client.ownerName } func (self *SVpc) ProposeJoinICloudInterVpcNetwork(opts *cloudprovider.SVpcJointInterVpcNetworkOption) error { instance := SCcnAttachInstanceInput{ InstanceType: "VPC", InstanceId: self.GetId(), InstanceRegion: self.region.GetId(), } err := self.region.AttachCcnInstances(opts.InterVpcNetworkId, opts.NetworkAuthorityOwnerId, []SCcnAttachInstanceInput{instance}) if err != nil { return errors.Wrapf(err, "self.region.AttachCcnInstance(%s,%s,%s)", jsonutils.Marshal(opts).String(), self.GetId(), self.region.GetId()) } return nil }
package flux // Action represents an action to be dispatched. type Action struct { Name string Payload interface{} } // Dispatch dispatches actions to the registered stores. Each actions // contained in a are dispatched sequentially within the call. eg a[1] will be // dispached once a[0] dispatch is complete. Each Dispatch call is perform // within a new goroutine, which mean N different dispatch calls can be // executed concurrently. func Dispatch(a ...Action) { go dispatchActions(a) } func dispatchActions(actions []Action) { for _, a := range actions { if err := dispatch(a); err != nil { return } } } func dispatch(a Action) error { storesMutex.Lock() storescpy := make([]Storer, len(stores)) copy(storescpy, stores) storesMutex.Unlock() for _, s := range storescpy { if err := s.OnDispatch(a); err != nil { return err } } return nil }
package storage_test import ( "context" "io/ioutil" "os" "github.com/containers/image/copy" "github.com/containers/image/types" "github.com/containers/libpod/pkg/rootless" cs "github.com/containers/storage" "github.com/cri-o/cri-o/pkg/storage" "github.com/golang/mock/gomock" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/opencontainers/go-digest" ) // The actual test suite var _ = t.Describe("Image", func() { // Test constants const ( testRegistry = "docker.io" testImageName = "image" testSHA256 = "2a03a6059f21e150ae84b0973863609494aad70f0a80eaeb64bddd8d92465812" ) // The system under test var sut storage.ImageServer // Prepare the system under test BeforeEach(func() { var err error sut, err = storage.GetImageService( context.Background(), nil, storeMock, "", []string{}, []string{testRegistry}, ) Expect(err).To(BeNil()) Expect(sut).NotTo(BeNil()) }) mockParseStoreReference := func() { gomock.InOrder( storeMock.EXPECT().GraphOptions().Return([]string{}), storeMock.EXPECT().GraphDriverName().Return(""), storeMock.EXPECT().GraphRoot().Return(""), storeMock.EXPECT().RunRoot().Return(""), ) } mockGetRef := func() { gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), ) mockParseStoreReference() } mockListImage := func() { gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().ListImageBigData(gomock.Any()). Return([]string{""}, nil), storeMock.EXPECT().ImageBigDataSize(gomock.Any(), gomock.Any()). Return(int64(0), nil), storeMock.EXPECT().ImageBigData(gomock.Any(), gomock.Any()). Return(testManifest, nil), storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), ) } t.Describe("GetImageService", func() { It("should succeed to retrieve an image service", func() { // Given // When imageService, err := storage.GetImageService( context.Background(), nil, storeMock, "", []string{"reg1", "reg1", "reg2"}, []string{"reg3", "reg3", "reg4"}, ) // Then Expect(err).To(BeNil()) Expect(imageService).NotTo(BeNil()) }) It("should succeed with custom registries.conf", func() { // Given // When imageService, err := storage.GetImageService( context.Background(), &types.SystemContext{ SystemRegistriesConfPath: "../../test/registries.conf"}, storeMock, "", []string{}, []string{}, ) // Then Expect(err).To(BeNil()) Expect(imageService).NotTo(BeNil()) }) It("should fail to retrieve an image service without storage", func() { // Given storeOptions, err := cs.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) Expect(err).To(BeNil()) storeOptions.GraphRoot = "" // When _, err = cs.GetStore(storeOptions) // Then Expect(err).NotTo(BeNil()) }) It("should fail if unqualified search registries errors", func() { // Given // When imageService, err := storage.GetImageService( context.Background(), &types.SystemContext{SystemRegistriesConfPath: "/invalid"}, storeMock, "", []string{}, []string{}, ) // Then Expect(err).NotTo(BeNil()) Expect(imageService).To(BeNil()) }) }) t.Describe("GetStore", func() { It("should succeed to retrieve the store", func() { // Given gomock.InOrder( storeMock.EXPECT().Delete(gomock.Any()).Return(nil), ) // When store := sut.GetStore() // Then Expect(store).NotTo(BeNil()) Expect(store.Delete("")).To(BeNil()) }) }) t.Describe("ResolveNames", func() { It("should succeed to resolve", func() { // Given gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: "id"}, nil), ) // When names, err := sut.ResolveNames(testImageName) // Then Expect(err).To(BeNil()) Expect(len(names)).To(Equal(1)) Expect(names[0]).To(Equal(testRegistry + "/library/" + testImageName)) }) It("should succeed to resolve with full qualified image name", func() { // Given const imageName = "docker.io/library/busybox:latest" gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: "id"}, nil), ) // When names, err := sut.ResolveNames(imageName) // Then Expect(err).To(BeNil()) Expect(len(names)).To(Equal(1)) Expect(names[0]).To(Equal(imageName)) }) It("should succeed to resolve with a local copy", func() { // Given gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), ) // When names, err := sut.ResolveNames(testImageName) // Then Expect(err).To(BeNil()) Expect(len(names)).To(Equal(1)) Expect(names[0]).To(Equal(testImageName)) }) It("should fail to resolve with invalid image id", func() { // Given gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), ) // When names, err := sut.ResolveNames(testSHA256) // Then Expect(err).NotTo(BeNil()) Expect(err).To(Equal(storage.ErrCannotParseImageID)) Expect(names).To(BeNil()) }) It("should fail to resolve with invalid registry name", func() { // Given gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), ) // When names, err := sut.ResolveNames("camelCaseName") // Then Expect(err).NotTo(BeNil()) Expect(names).To(BeNil()) }) It("should fail to resolve without configured registries", func() { // Given gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: "id"}, nil), ) // Create an empty file for the registries config path file, err := ioutil.TempFile(".", "registries") Expect(err).To(BeNil()) defer os.Remove(file.Name()) sut, err := storage.GetImageService(context.Background(), &types.SystemContext{SystemRegistriesConfPath: file.Name()}, storeMock, "", []string{}, []string{}) Expect(err).To(BeNil()) Expect(sut).NotTo(BeNil()) // When names, err := sut.ResolveNames(testImageName) // Then Expect(err).NotTo(BeNil()) Expect(err).To(Equal(storage.ErrNoRegistriesConfigured)) Expect(names).To(BeNil()) }) }) t.Describe("RemoveImage", func() { It("should succeed to remove an image on first store ref", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().DeleteImage(gomock.Any(), gomock.Any()). Return(nil, nil), ) // When err := sut.RemoveImage(&types.SystemContext{}, testImageName) // Then Expect(err).To(BeNil()) }) It("should succeed to remove an image on second store ref", func() { // Given gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()).Return(nil, t.TestError), ) mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().DeleteImage(gomock.Any(), gomock.Any()). Return(nil, nil), ) // When err := sut.RemoveImage(&types.SystemContext{}, testImageName) // Then Expect(err).To(BeNil()) }) It("should fail to remove an image with invalid name", func() { // Given // When err := sut.RemoveImage(&types.SystemContext{}, "") // Then Expect(err).NotTo(BeNil()) }) }) t.Describe("UntagImage", func() { It("should succeed to untag an image", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().DeleteImage(gomock.Any(), gomock.Any()). Return(nil, nil), ) // When err := sut.UntagImage(&types.SystemContext{}, testImageName) // Then Expect(err).To(BeNil()) }) It("should fail to untag an image with invalid name", func() { // Given // When err := sut.UntagImage(&types.SystemContext{}, "") // Then Expect(err).NotTo(BeNil()) }) It("should fail to untag an image with invalid name", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()).Return(nil, t.TestError), storeMock.EXPECT().Image(gomock.Any()).Return(nil, t.TestError), ) // When err := sut.UntagImage(&types.SystemContext{}, testImageName) // Then Expect(err).NotTo(BeNil()) }) It("should fail to untag an image with failed reference preparation", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: "otherImage"}, nil), ) // When err := sut.UntagImage(&types.SystemContext{}, testImageName) // Then Expect(err).NotTo(BeNil()) }) It("should fail to untag an image with docker reference", func() { // Given const imageName = "docker://localhost/busybox:latest" gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), ) // When err := sut.UntagImage(&types.SystemContext{}, imageName) // Then Expect(err).NotTo(BeNil()) }) It("should fail to untag an image with digest docker reference", func() { // Given const imageName = "docker://localhost/busybox@sha256:" + testSHA256 gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), ) // When err := sut.UntagImage(&types.SystemContext{}, imageName) // Then Expect(err).NotTo(BeNil()) }) It("should fail to untag an image with multiple names", func() { // Given const imageName = "docker://localhost/busybox:latest" gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ ID: testImageName, Names: []string{"a", "b", "c"}, }, nil), storeMock.EXPECT().SetNames(gomock.Any(), gomock.Any()). Return(t.TestError), ) // When err := sut.UntagImage(&types.SystemContext{}, imageName) // Then Expect(err).NotTo(BeNil()) }) }) t.Describe("ImageStatus", func() { It("should succeed to get the image status with digest", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName, Names: []string{"a@sha256:" + testSHA256, "b@sha256:" + testSHA256, "c"}, }, nil), storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().ImageBigData(gomock.Any(), gomock.Any()). Return(testManifest, nil), storeMock.EXPECT().ListImageBigData(gomock.Any()). Return([]string{""}, nil), storeMock.EXPECT().ImageBigDataSize(gomock.Any(), gomock.Any()). Return(int64(0), nil), ) mockListImage() gomock.InOrder( storeMock.EXPECT().ImageBigDataDigest(gomock.Any(), gomock.Any()). Return(digest.Digest("a:"+testSHA256), nil), ) // When res, err := sut.ImageStatus(&types.SystemContext{}, testImageName) // Then Expect(err).To(BeNil()) Expect(res).NotTo(BeNil()) }) It("should fail to get on wrong reference", func() { // Given // When res, err := sut.ImageStatus(&types.SystemContext{}, "") // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) It("should fail to get on wrong store image", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()).Return(nil, t.TestError), storeMock.EXPECT().Image(gomock.Any()).Return(nil, t.TestError), ) // When res, err := sut.ImageStatus(&types.SystemContext{}, testImageName) // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) It("should fail to get on wrong image search", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().ImageBigData(gomock.Any(), gomock.Any()). Return(nil, t.TestError), ) // When res, err := sut.ImageStatus(&types.SystemContext{}, testImageName) // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) It("should fail to get on wrong image config digest", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().ImageBigData(gomock.Any(), gomock.Any()). Return(testManifest, nil), storeMock.EXPECT().ListImageBigData(gomock.Any()). Return([]string{""}, nil), storeMock.EXPECT().ImageBigDataSize(gomock.Any(), gomock.Any()). Return(int64(0), nil), storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().ListImageBigData(gomock.Any()). Return([]string{""}, nil), storeMock.EXPECT().ImageBigDataSize(gomock.Any(), gomock.Any()). Return(int64(0), nil), storeMock.EXPECT().ImageBigData(gomock.Any(), gomock.Any()). Return(nil, t.TestError), ) // When res, err := sut.ImageStatus(&types.SystemContext{}, testImageName) // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) }) t.Describe("ListImages", func() { It("should succeed to list images without filter", func() { // Given gomock.InOrder( storeMock.EXPECT().Images().Return([]cs.Image{}, nil), ) // When res, err := sut.ListImages(&types.SystemContext{}, "") // Then Expect(err).To(BeNil()) Expect(len(res)).To(Equal(0)) }) It("should succeed to list multiple images without filter", func() { // Given mockLoop := func() { gomock.InOrder( storeMock.EXPECT().ImageBigData(gomock.Any(), gomock.Any()). Return(testManifest, nil), storeMock.EXPECT().ListImageBigData(gomock.Any()). Return([]string{""}, nil), storeMock.EXPECT().ImageBigDataSize(gomock.Any(), gomock.Any()). Return(int64(0), nil), storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), storeMock.EXPECT().ImageBigDataDigest(gomock.Any(), gomock.Any()). Return(digest.Digest(""), nil), ) } gomock.InOrder( storeMock.EXPECT().Images().Return( []cs.Image{ {ID: testSHA256, Names: []string{"a", "b", "c@sha256:" + testSHA256}}, {ID: testSHA256}}, nil), ) mockParseStoreReference() mockListImage() mockLoop() mockParseStoreReference() mockListImage() mockLoop() // When res, err := sut.ListImages(&types.SystemContext{}, "") // Then Expect(err).To(BeNil()) Expect(len(res)).To(Equal(2)) }) It("should succeed to list images with filter", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), ) mockListImage() gomock.InOrder( storeMock.EXPECT().ImageBigData(gomock.Any(), gomock.Any()). Return(testManifest, nil), storeMock.EXPECT().ListImageBigData(gomock.Any()). Return([]string{""}, nil), storeMock.EXPECT().ImageBigDataSize(gomock.Any(), gomock.Any()). Return(int64(0), nil), storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName, Names: []string{"a", "b", "c"}, Digest: "digest"}, nil), ) // When res, err := sut.ListImages(&types.SystemContext{}, testImageName) // Then Expect(err).To(BeNil()) Expect(len(res)).To(Equal(1)) Expect(res[0].ID).To(Equal(testImageName)) }) It("should succeed to list images on wrong image retrieval", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()).Return(nil, t.TestError), storeMock.EXPECT().Image(gomock.Any()).Return(nil, t.TestError), ) // When res, err := sut.ListImages(&types.SystemContext{}, testImageName) // Then Expect(err).To(BeNil()) Expect(len(res)).To(Equal(0)) }) It("should fail to list images with filter on wrong reference", func() { // Given gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()).Return(nil, t.TestError), ) // When res, err := sut.ListImages(&types.SystemContext{}, "wrong://image") // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) It("should fail to list images with filter on wrong append cache", func() { // Given mockGetRef() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(&cs.Image{ID: testImageName}, nil), ) mockListImage() gomock.InOrder( storeMock.EXPECT().ImageBigData(gomock.Any(), gomock.Any()). Return(nil, t.TestError), ) // When res, err := sut.ListImages(&types.SystemContext{}, testImageName) // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) It("should fail to list images witout filter on wrong store", func() { // Given gomock.InOrder( storeMock.EXPECT().Images().Return(nil, t.TestError), ) // When res, err := sut.ListImages(&types.SystemContext{}, "") // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) It("should fail to list multiple images without filter on invalid ref", func() { // Given gomock.InOrder( storeMock.EXPECT().Images().Return( []cs.Image{{ID: ""}}, nil), ) // When res, err := sut.ListImages(&types.SystemContext{}, "") // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) It("should fail to list multiple images without filter on append", func() { // Given gomock.InOrder( storeMock.EXPECT().Images().Return( []cs.Image{{ID: testSHA256}}, nil), ) mockParseStoreReference() gomock.InOrder( storeMock.EXPECT().Image(gomock.Any()). Return(nil, t.TestError), ) // When res, err := sut.ListImages(&types.SystemContext{}, "") // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) }) t.Describe("PrepareImage", func() { It("should succeed with testimage", func() { // Given const imageName = "tarball:../../test/testdata/image.tar" // When res, err := sut.PrepareImage(imageName, &copy.Options{}) // Then Expect(err).To(BeNil()) Expect(res).NotTo(BeNil()) }) It("should fail on invalid image name", func() { // Given // When res, err := sut.PrepareImage("", &copy.Options{}) // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) }) t.Describe("PullImage", func() { It("should fail on invalid image name", func() { // Given // When res, err := sut.PullImage(&types.SystemContext{}, "", &copy.Options{}) // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) It("should fail on invalid policy path", func() { // Given // When res, err := sut.PullImage(&types.SystemContext{ SignaturePolicyPath: "/not-existing", }, "", &copy.Options{}) // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) It("should fail on copy image", func() { // Given const imageName = "docker://localhost/busybox:latest" mockParseStoreReference() // When res, err := sut.PullImage(&types.SystemContext{ SignaturePolicyPath: "../../test/policy.json", }, imageName, &copy.Options{}) // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) It("should fail on canonical copy image", func() { // Given const imageName = "docker://localhost/busybox@sha256:" + testSHA256 mockParseStoreReference() // When res, err := sut.PullImage(&types.SystemContext{ SignaturePolicyPath: "../../test/policy.json", }, imageName, &copy.Options{}) // Then Expect(err).NotTo(BeNil()) Expect(res).To(BeNil()) }) }) })
package quark type PartialHandler struct { Prefixes []string ExcludedPrefixes []string Preds []func(*Context) bool Handler Handler } func Partial(h interface{}) *PartialHandler { return &PartialHandler{Handler: handlerOf(h)} } func (h *PartialHandler) For(prefixes ...string) *PartialHandler { if len(prefixes) > 0 { h.Prefixes = append(h.Prefixes, prefixes...) } return h } func (h *PartialHandler) NotFor(prefixes ...string) *PartialHandler { if len(prefixes) > 0 { h.ExcludedPrefixes = append(h.ExcludedPrefixes, prefixes...) } return h } func (h *PartialHandler) AddPred(pred func(*Context)bool) *PartialHandler { if pred != nil { h.Preds = append(h.Preds, pred) } return h } func (h *PartialHandler) HandleRequest(hc *Context) { for _, prefix := range h.ExcludedPrefixes { if _, ok := hc.Request.HasPrefix(prefix); ok { hc.Next() return } } for _, prefix := range h.Prefixes { if _, ok := hc.Request.HasPrefix(prefix); !ok { hc.Next() return } } for _, pred := range h.Preds { if !pred(hc) { hc.Next() return } } h.Handler.HandleRequest(hc) }
// +build debug package west const debug = true
package main import ( "io" "log" "net" "net/http" "net/http/httputil" ) type Proxy struct{} func NewProxy() *Proxy { return &Proxy{} } // ServeHTTP is the main handler for all requests. func (p *Proxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { dump, _ := httputil.DumpRequest(req, false) log.Printf("Received request from %s\n%s", req.RemoteAddr, string(dump)) if req.Method != "CONNECT" { rw.WriteHeader(http.StatusMethodNotAllowed) rw.Write([]byte("This is a http tunnel proxy, only CONNECT method is allowed.")) return } // Step 1 host := req.URL.Host hij, ok := rw.(http.Hijacker) if !ok { rw.WriteHeader(http.StatusInternalServerError) rw.Write([]byte("HTTP Server does not support hijacking")) return } client, _, err := hij.Hijack() if err != nil { return } // Step 2 server, err := net.Dial("tcp", host) if err != nil { rw.WriteHeader(http.StatusInternalServerError) rw.Write([]byte("Dial failed")) return } log.Printf("Dial %s", host) client.Write([]byte("HTTP/1.0 200 Connection Established\r\n\r\n")) // Step 3 go io.Copy(server, client) io.Copy(client, server) } func main() { proxy := NewProxy() log.Printf("Listen on 0.0.0.0:8080") log.Fatal(http.ListenAndServe("0.0.0.0:8080", proxy)) } /* nc 127.0.0.1 8080 CONNECT icanhazip.com:80 HTTP/1.1 Host: icanhazip.com:80 GET http://icanhazip.com HTTP/1.1 Host: icanhazip.com */
package matcher import ( "bufio" "io" ) // SequenceMatcherOptions are the options for creating a new SequenceMatcher type SequenceMatcherOptions struct { precedingCharCount int succeedingCharCount int sequence []byte eos rune } // MatchResult represents the result of one found match type MatchResult struct { preceding string match string succeeding string } // SequenceMatcher is used for finding sequences in a stream with left and right contexts. type SequenceMatcher struct { reader io.Reader options SequenceMatcherOptions ch chan MatchResult // SequenceMatcher holds a queue for each of Succeeding, Current and Preceding elements. preQueue *queue matchingQueue *queue sucQueue *queue } // NewSequenceMatcher returns a new SequenceMatcher for finding sequences in a stream func NewSequenceMatcher(reader io.Reader, options SequenceMatcherOptions) SequenceMatcher { return SequenceMatcher{ reader: reader, options: options, ch: make(chan MatchResult, 1), preQueue: newQueue(options.precedingCharCount), matchingQueue: newQueue(len(options.sequence)), sucQueue: newQueue(options.succeedingCharCount), } } // sendMatch sends current Match to the channel func (m SequenceMatcher) sendMatch() { m.ch <- MatchResult{ preceding: string(m.preQueue.getElements()), match: string(m.matchingQueue.getElements()), succeeding: string(m.sucQueue.getElements()), } } // addByte adds byte element to SucceedingQueue. // Overflowed element from the SucceedingQueue will be added to MatchingQueue. // Overflowed element of MatchingQueue will also be added to PrecedingQueue. // Overflowed element of PrecedingQueue will be thrown away func (m SequenceMatcher) addByte(b byte) { b, overflowed := m.sucQueue.add(b) if !overflowed { return } b, overflowed = m.matchingQueue.add(b) if !overflowed { return } _, _ = m.preQueue.add(b) } // pop pops one element from SucceedingQueue adds it to MatchingQueue. // Overflowed element of MatchingQueue will be added to PrecedingQueue. // Overflowed element of PrecedingQueue will be thrown away func (m SequenceMatcher) pop() { b := m.sucQueue.pop() b, _ = m.matchingQueue.add(b) m.preQueue.add(b) } // isMatch checks if current elements in the MatchingQueue is a valid match for wanted sequence func (m SequenceMatcher) isMatch() bool { sequence := m.matchingQueue.getElements() if len(sequence) != len(m.options.sequence) { return false } for i := 0; i < len(sequence); i++ { if sequence[i] != m.options.sequence[i] { return false } } return true } // Run starts the matching process func (m SequenceMatcher) Run() <-chan MatchResult { go m.readStreamAndMatchSequences() return m.ch } // readStreamAndMatchSequences reads from stream rune-by-rune until encountering EOS or EOF. func (m SequenceMatcher) readStreamAndMatchSequences() { defer close(m.ch) reader := bufio.NewReader(m.reader) // Add new elements to queue one-by-one // Check if current sequence is a Match for { char, size, err := reader.ReadRune() if err != nil && err != io.EOF { panic(err) } if err == io.EOF || char == m.options.eos { break } // if read rune is not 1-byte long, it means there are non-ascii characters in the stream other than EOS if size != 1 { panic("encountered non ascii character which is not EOS either. Program only supports ascii charset for stream except EOS.") } m.addByte(byte(char)) if m.isMatch() { m.sendMatch() } } // turn the wheel for number of elements in the SucceedingQueue too // and check possible matches l := len(m.sucQueue.getElements()) for i := 0; i < l; i++ { m.pop() if m.isMatch() { m.sendMatch() } } }
package Workflows import ( "fmt" "math/rand" "time" "github.com/pkg/errors" ) type FSM interface { Inputs(inputs ...Input) (bool, error) Input(input Input) (State, error) IsInFinalState() bool } func NewFSM(fsmConfig *Config) (FSM, error) { rand.Seed(time.Now().Unix()) config, err := parseConfig(fsmConfig) if err != nil { return nil, err } return newFSM(config) } func newFSM(config *config) (FSM, error) { fsm := &fsm{ alphabet: config.alphabet, } fsm.addStates(config.states) if err := fsm.addFinalStates(config.finalStates); err != nil { return nil, err } if err := fsm.addTransitions(config.transitions); err != nil { return nil, err } if err := fsm.setStartState(config.startState); err != nil { return nil, err } return fsm, nil } type fsm struct { alphabet Alphabet currentState State states map[State]struct{} finalStates map[State]struct{} transitions map[State]map[Input][]State } func (fsm *fsm) Inputs(inputs ...Input) (bool, error) { for i, input := range inputs { if _, err := fsm.Input(input); err != nil { return false, errors.Wrap(err, fmt.Sprintf("failure at input %d", i)) } } return fsm.IsInFinalState(), nil } func (fsm *fsm) Input(input Input) (State, error) { if !fsm.alphabet.Valid(input) { return nil, errors.New("invalid input: not within alphabet") } nextStates, valid := fsm.transitions[fsm.currentState][input] if !valid { return nil, errors.New("invalid input: invalid transition for current state") } nextStateIndex := rand.Intn(len(nextStates)) nextState := nextStates[nextStateIndex] fsm.currentState.RunExitEvent() fsm.setNewState(nextState) return fsm.currentState, nil } func (fsm *fsm) setStartState(newState State) error { if _, ok := fsm.states[newState]; !ok { return errors.New("'startState' must be a subset of 'states'") } fsm.setNewState(newState) return nil } func (fsm *fsm) setNewState(newState State) { fsm.currentState = newState fsm.currentState.RunEntryEvent() fmt.Println("> Current State:", fsm.currentState) } func (fsm *fsm) IsInFinalState() bool { _, finalState := fsm.finalStates[fsm.currentState] return finalState } func (fsm *fsm) addStates(states []State) { fsm.states = make(map[State]struct{}, len(states)) for _, state := range states { fsm.states[state] = struct{}{} } } func (fsm *fsm) addFinalStates(finalStates []State) error { fsm.finalStates = make(map[State]struct{}, len(finalStates)) for _, finalState := range finalStates { if _, ok := fsm.states[finalState]; !ok { return errors.New("'finalStates' must be a subset of 'states'") } fsm.finalStates[finalState] = struct{}{} } return nil } func (fsm *fsm) addTransitions(transitions []Transition) error { fsm.transitions = map[State]map[Input][]State{} for _, transition := range transitions { err := fsm.addTransition(transition) if err != nil { return err } } return nil } func (fsm *fsm) addTransition(transition Transition) error { if err := fsm.validateTransition(transition); err != nil { return errors.Wrap(err, "invalid transition") } if _, ok := fsm.transitions[transition.StartState()]; !ok { fsm.transitions[transition.StartState()] = map[Input][]State{} } endStates := fsm.transitions[transition.StartState()][transition.Input()] endStates = append(endStates, transition.EndState()) fsm.transitions[transition.StartState()][transition.Input()] = endStates return nil } func (fsm *fsm) validateTransition(transition Transition) error { if _, valid := fsm.states[transition.StartState()]; !valid { return errors.New("startState not within states") } if !fsm.alphabet.Valid(transition.Input()) { return errors.New("input not within alphabet") } if _, valid := fsm.states[transition.EndState()]; !valid { return errors.New("endState not within states") } return nil }
package heap import "jean/classfile" type InterfaceMethodRef struct { MemberRef method *Method } func newInterfaceMethodRef(rtCp *ConstantPool, info *classfile.ConstantInterfaceMethodrefInfo) *InterfaceMethodRef { ref := &InterfaceMethodRef{} ref.rtCp = rtCp ref.copyMemberRefInfo(&info.ConstantMemberrefInfo) return ref } func (imr *InterfaceMethodRef) ResolvedInterfaceMethod() *Method { if imr.method == nil { imr.resolveInterfaceMethodRef() } return imr.method } func (imr *InterfaceMethodRef) resolveInterfaceMethodRef() { d := imr.rtCp.class c := imr.ResolvedClass() if !c.IsInterface() { panic("java.lang.IncompatibleClassChangeError") } method := lookupInterfaceMethod(c, imr.name, imr.descriptor) if method == nil { panic("java.lang.NoSuchMethodError") } if !method.isAccessibleTo(d) { panic("java.lang.IllegalAccessError") } imr.method = method } func lookupInterfaceMethod(iface *Class, name, descriptor string) *Method { for _, method := range iface.methods { if method.name == name && method.descriptor == descriptor { return method } } return lookupMethodInInterface(iface.interfaces, name, descriptor) }
package operator import ( "context" "fmt" "time" "github.com/google/uuid" "github.com/jinghzhu/KubernetesPodOperator/pkg/types" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/workqueue" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Operator is a watch on Pods. type Operator struct { kubeClient *kubernetes.Clientset indexer cache.Indexer // A cache of Pods. watcher cache.Controller // Watch changes to all Pods. podsQueue workqueue.RateLimitingInterface context context.Context } // New returns an instance of Pod Operator. func New(masterURL, kubeconfigPath, namespace string) (*Operator, error) { fmt.Println("Ready to new an Operator object for kubeconfig " + kubeconfigPath + " and namespace " + namespace) clientConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) if err != nil { fmt.Printf( "Fail to get RESTClient config by kubeconfig %s and ane namespace %s: %+v\n", kubeconfigPath, namespace, err, ) return nil, err } client, err := kubernetes.NewForConfig(clientConfig) if err != nil { fmt.Printf("Fail to create a new Kubernetes Clientset: %+v\n", err) return nil, err } id, _ := uuid.NewRandom() ctx := context.WithValue(types.ContextRoot, "operator-id", id) op := &Operator{ kubeClient: client, podsQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(100*time.Millisecond, 5*time.Second), "pods"), context: ctx, } // fieldSelector := labels.Set{"keys": string(nodeName)}.AsSelector() ListOption := metav1.ListOptions{} op.indexer, op.watcher = cache.NewIndexerInformer( // cache.NewListWatchFromClient(client.CoreV1().RESTClient(), "pods", namespace, fieldSelector), &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options = ListOption return op.kubeClient.CoreV1().Pods(namespace).List(ctx, options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options = ListOption return op.kubeClient.CoreV1().Pods(namespace).Watch(ctx, options) }, }, &corev1.Pod{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: op.onAdd, UpdateFunc: op.onUpdate, DeleteFunc: op.onDelete, }, cache.Indexers{}, ) return op, nil } // GetContext returns the context of the Operator instance. func (op *Operator) GetContext() context.Context { return op.context }
package core import ( "sync" "testing" "github.com/google/uuid" "github.com/jrapoport/gothic/core/audit" "github.com/jrapoport/gothic/core/context" "github.com/jrapoport/gothic/models/auditlog" "github.com/jrapoport/gothic/models/types" "github.com/jrapoport/gothic/models/types/key" "github.com/jrapoport/gothic/store" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type testLog struct { act auditlog.Action uid uuid.UUID fields types.Map logID uint } var ( testUID = uuid.New() testBook = uuid.New().String() ) var _testLogs []testLog var _logAPI *API var once sync.Once func setupLogs(t *testing.T) (*API, []testLog) { once.Do(func() { a := apiWithTempDB(t) var tests = []testLog{ {auditlog.Startup, uuid.New(), nil, 0}, {auditlog.Shutdown, uuid.New(), nil, 0}, {auditlog.Signup, uuid.New(), nil, 0}, {auditlog.ConfirmSent, uuid.New(), nil, 0}, {auditlog.Confirmed, uuid.New(), nil, 0}, {auditlog.Granted, uuid.New(), nil, 0}, {auditlog.Revoked, uuid.New(), nil, 0}, {auditlog.RevokedAll, uuid.New(), nil, 0}, {auditlog.Login, uuid.New(), nil, 0}, {auditlog.Logout, uuid.New(), nil, 0}, {auditlog.Password, uuid.New(), nil, 0}, {auditlog.Email, uuid.New(), nil, 0}, {auditlog.Updated, uuid.New(), nil, 0}, } for _, test := range tests { tst := test tst.uid = testUID tests = append(tests, tst) } for _, bk := range []interface{}{ "thing2", testBook, uuid.New().String(), } { for _, test := range tests { test.fields = types.Map{ "dr_suess": "thing1", "book": bk, } tests = append(tests, test) } } ctx := context.Background() err := a.conn.Transaction(func(tx *store.Connection) error { for i, test := range tests { le, err := audit.CreateLogEntry(ctx, tx, test.act, test.uid, test.fields) require.NoError(t, err) tests[i].logID = le.ID } return nil }) require.NoError(t, err) _testLogs = tests _logAPI = a }) require.NotNil(t, _logAPI) require.NotEmpty(t, _testLogs) return _logAPI, _testLogs } func TestAPI_GetAuditLog(t *testing.T) { t.Parallel() a, tests := setupLogs(t) _, err := a.GetAuditLog(nil, 9999) assert.Error(t, err) for _, test := range tests { le, err := a.GetAuditLog(nil, test.logID) assert.NoError(t, err) assert.EqualValues(t, test.uid, le.UserID) assert.EqualValues(t, test.act, le.Action) for k, v := range test.fields { assert.EqualValues(t, v, le.Fields[k]) } } } func TestAPI_SearchAuditLogs(t *testing.T) { t.Parallel() a, _ := setupLogs(t) tests := []struct { filters store.Filters comp func(log *auditlog.AuditLog) }{ { store.Filters{ key.UserID: testUID.String(), }, func(log *auditlog.AuditLog) { assert.Equal(t, testUID, log.UserID) }, }, { store.Filters{ key.Action: auditlog.Startup.String(), }, func(log *auditlog.AuditLog) { assert.Equal(t, auditlog.Startup, log.Action) }, }, { store.Filters{ "dr_suess": "thing1", }, func(log *auditlog.AuditLog) { assert.Equal(t, "thing1", log.Fields["dr_suess"]) }, }, { store.Filters{ key.Type: auditlog.Account.String(), "dr_suess": "thing1", }, func(log *auditlog.AuditLog) { assert.Equal(t, auditlog.Account, log.Type) assert.Equal(t, "thing1", log.Fields["dr_suess"]) }, }, { store.Filters{ "dr_suess": "thing1", "book": testBook, }, func(log *auditlog.AuditLog) { assert.Equal(t, "thing1", log.Fields["dr_suess"]) assert.Equal(t, testBook, log.Fields["book"]) }, }, } for _, test := range tests { logs, err := a.SearchAuditLogs(nil, test.filters, nil) assert.NoError(t, err) assert.Greater(t, len(logs), 0) for _, log := range logs { test.comp(log) } } } func TestAPI_SearchAuditLogs_Sort(t *testing.T) { t.Parallel() filters := store.Filters{ "dr_suess": []string{"thing1"}, "book": []string{testBook}, } a, _ := setupLogs(t) ctx := testContext(a) ctx.SetSort(store.Descending) logs, err := a.SearchAuditLogs(ctx, filters, nil) assert.NoError(t, err) assert.Greater(t, len(logs), 0) // reverse the indexes testIdx := make([]uint, len(logs)) for i := len(logs) - 1; i >= 0; i-- { log := logs[i] assert.Equal(t, "thing1", log.Fields["dr_suess"]) assert.Equal(t, testBook, log.Fields["book"]) testIdx[i] = log.ID } // reverse the sort (and the indexes) ctx.SetSort(store.Ascending) logs, err = a.SearchAuditLogs(ctx, filters, nil) assert.NoError(t, err) assert.Greater(t, len(logs), 0) require.Len(t, logs, len(testIdx)) descIdx := make([]uint, len(logs)) for i, log := range logs { assert.Equal(t, "thing1", log.Fields["dr_suess"]) assert.Equal(t, testBook, log.Fields["book"]) descIdx[i] = log.ID } assert.Equal(t, testIdx, descIdx) }
package supportbundle import ( "fmt" "io/ioutil" "net/http" "os" "path/filepath" "strings" "time" cursor "github.com/ahmetalpbalkan/go-cursor" "github.com/fatih/color" "github.com/pkg/errors" analyzer "github.com/replicatedhq/troubleshoot/pkg/analyze" troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2" "github.com/replicatedhq/troubleshoot/pkg/collect" "k8s.io/client-go/rest" ) type SupportBundleCreateOpts struct { CollectorProgressCallback func(chan interface{}, string) CollectWithoutPermissions bool HttpClient *http.Client KubernetesRestConfig *rest.Config Namespace string ProgressChan chan interface{} SinceTime *time.Time FromCLI bool } type SupportBundleResponse struct { AnalyzerResults []*analyzer.AnalyzeResult ArchivePath string FileUploaded bool } // CollectSupportBundleFromSpec collects support bundle from start to finish, including running // collectors, analyzers and after collection steps. Input arguments are specifications. // if FromCLI option is set to true, the output is the name of the archive on disk in the cwd. // if FromCLI option is set to false, the support bundle is archived in the OS temp folder (os.TempDir()). func CollectSupportBundleFromSpec(spec *troubleshootv1beta2.SupportBundleSpec, additionalRedactors *troubleshootv1beta2.Redactor, opts SupportBundleCreateOpts) (*SupportBundleResponse, error) { resultsResponse := SupportBundleResponse{} if opts.KubernetesRestConfig == nil { return nil, errors.New("did not receive kube rest config") } if opts.ProgressChan == nil { return nil, errors.New("did not receive collector progress chan") } tmpDir, err := ioutil.TempDir("", "supportbundle") if err != nil { return nil, errors.Wrap(err, "create temp dir") } defer os.RemoveAll(tmpDir) basename := fmt.Sprintf("support-bundle-%s", time.Now().Format("2006-01-02T15_04_05")) if !opts.FromCLI { basename = filepath.Join(os.TempDir(), basename) } filename, err := findFileName(basename, "tar.gz") if err != nil { return nil, errors.Wrap(err, "find file name") } resultsResponse.ArchivePath = filename bundlePath := filepath.Join(tmpDir, strings.TrimSuffix(filename, ".tar.gz")) if err := os.MkdirAll(bundlePath, 0777); err != nil { return nil, errors.Wrap(err, "create bundle dir") } // Run collectors files, err := runCollectors(spec.Collectors, additionalRedactors, bundlePath, opts) if err != nil { return nil, errors.Wrap(err, "failed to run collectors") } version, err := getVersionFile() if err != nil { return nil, errors.Wrap(err, "failed to get version file") } err = files.SaveResult(bundlePath, VersionFilename, version) if err != nil { return nil, errors.Wrap(err, "failed to write version") } // Run Analyzers analyzeResults, err := AnalyzeSupportBundle(spec, bundlePath) if err != nil { if opts.FromCLI { c := color.New(color.FgHiRed) c.Printf("%s\r * %v\n", cursor.ClearEntireLine(), err) // don't die } else { return nil, errors.Wrap(err, "failed to run analysis") } } resultsResponse.AnalyzerResults = analyzeResults analysis, err := getAnalysisFile(analyzeResults) if err != nil { return nil, errors.Wrap(err, "failed to get analysis file") } err = files.SaveResult(bundlePath, AnalysisFilename, analysis) if err != nil { return nil, errors.Wrap(err, "failed to write analysis") } if err := collect.TarSupportBundleDir(bundlePath, files, filename); err != nil { return nil, errors.Wrap(err, "create bundle file") } fileUploaded, err := ProcessSupportBundleAfterCollection(spec, filename) if err != nil { if opts.FromCLI { c := color.New(color.FgHiRed) c.Printf("%s\r * %v\n", cursor.ClearEntireLine(), err) // don't die } else { return nil, errors.Wrap(err, "failed to process bundle after collection") } } resultsResponse.FileUploaded = fileUploaded return &resultsResponse, nil } // CollectSupportBundleFromURI collects support bundle from start to finish, including running // collectors, analyzers and after collection steps. Input arguments are the URIs of the support bundle and redactor specs. // The support bundle is archived in the OS temp folder (os.TempDir()). func CollectSupportBundleFromURI(specURI string, redactorURIs []string, opts SupportBundleCreateOpts) (*SupportBundleResponse, error) { supportbundle, err := GetSupportBundleFromURI(specURI) if err != nil { return nil, errors.Wrap(err, "could not bundle from URI") } additionalRedactors := &troubleshootv1beta2.Redactor{} for _, redactor := range redactorURIs { redactorObj, err := GetRedactorFromURI(redactor) if err != nil { return nil, errors.Wrapf(err, "failed to get redactor spec %s", redactor) } if redactorObj != nil { additionalRedactors.Spec.Redactors = append(additionalRedactors.Spec.Redactors, redactorObj.Spec.Redactors...) } } return CollectSupportBundleFromSpec(&supportbundle.Spec, additionalRedactors, opts) } // ProcessSupportBundleAfterCollection performs the after collection actions, like Callbacks and sending the archive to a remote server. func ProcessSupportBundleAfterCollection(spec *troubleshootv1beta2.SupportBundleSpec, archivePath string) (bool, error) { fileUploaded := false if len(spec.AfterCollection) > 0 { for _, ac := range spec.AfterCollection { if ac.UploadResultsTo != nil { if err := uploadSupportBundle(ac.UploadResultsTo, archivePath); err != nil { return false, errors.Wrap(err, "failed to upload support bundle") } else { fileUploaded = true } } else if ac.Callback != nil { if err := callbackSupportBundleAPI(ac.Callback, archivePath); err != nil { return false, errors.Wrap(err, "failed to notify API that support bundle has been uploaded") } } } } return fileUploaded, nil } // AnalyzeSupportBundle performs analysis on a support bundle using the support bundle spec and an already unpacked support // bundle on disk func AnalyzeSupportBundle(spec *troubleshootv1beta2.SupportBundleSpec, tmpDir string) ([]*analyzer.AnalyzeResult, error) { if len(spec.Analyzers) == 0 { return nil, nil } analyzeResults, err := analyzer.AnalyzeLocal(tmpDir, spec.Analyzers) if err != nil { return nil, errors.Wrap(err, "failed to analyze support bundle") } return analyzeResults, nil }
package hsc // HscDerivationPath is the standard BIP44 derivation path for hsc const HscDerivationPath string = "m/44'/532'/0'/0/0"
// Written in 2014 by Petar Maymounkov. // // It helps future understanding of past knowledge to save // this notice, so peers of other times and backgrounds can // see history clearly. package faculty import ( // "fmt" "sync" "github.com/gocircuit/escher/think" ) // Eye is an implementation of Leslie Valiant's “Mind's Eye”, described in // http://www.probablyapproximatelycorrect.com/ type Eye struct { retina map[string]*think.Synapse nerve EyeNerve } // ShortCognize is the cognition interface provided by the Mind's Eye (short-term memory) mechanism. // The short-term memory is what allows people to process a linguistic sentence with all its structure. type ShortCognize func(Impression) type EyeNerve struct { cognize ShortCognize connected chan struct{} recognize think.MapReCognizer memory } type memory struct { sync.Mutex Age int Imp Impression } // NewEye creates a new short-term memory mechanism, called an eye. func NewEye(valve ...string) (think.Reflex, *Eye) { reflex := make(think.Reflex) m := &Eye{ retina: make(map[string]*think.Synapse), nerve: EyeNerve{ connected: make(chan struct{}), memory: memory{ Imp: MakeImpression(), }, }, } m.nerve.memory.Imp = MakeImpression() for _, v := range valve { if _, ok := reflex[v]; ok { panic("two valves, same name") } reflex[v], m.retina[v] = think.NewSynapse() m.nerve.memory.Imp.Show(0, v, nil) } return reflex, m } // Focus binds this short memory reflex to the response function cognize. func (m *Eye) Focus(cognize ShortCognize) *EyeNerve { m.nerve.memory.Lock() // Locking prevents individual competing Focus invocations defer m.nerve.memory.Unlock() // from initiating cognition before all valves/synapses have been attached. m.nerve.cognize = cognize ch := make(chan struct{}) for v_, _ := range m.nerve.memory.Imp.Image { v := v_ go func() { m.nerve.recognize.Bind( v, m.retina[v].Focus( func(w interface{}) { m.nerve.cognizeWith(v, w) }, ), ) ch <- struct{}{} }() } for _, _ = range m.nerve.memory.Imp.Image { <-ch } close(m.nerve.connected) return &m.nerve }
package transformer import ( "bytes" "fmt" "github.com/kubesimple/transformer/context" v1 "github.com/kubesimple/transformer/transform/v1" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/viper" ) func Transform(s context.Session) error { return transform(nil, s) } func transform(b []byte, s context.Session) error { v := viper.New() setDefaults(v) switch { case b == nil: if err := v.ReadInConfig(); err != nil { log.Errorf("failed to read kubesimple configuration file: %s", err) return errors.Wrap(err, "failed to read kubesimple configuration file") } default: if err := v.ReadConfig(bytes.NewBuffer(b)); err != nil { log.Errorf("failed to read kubesimple configuration file: %s", err) return errors.Wrap(err, "failed to read kubesimple configuration file") } } version := v.GetString("version") switch version { case "1": return v1.Parse(v, s) default: return errors.New(fmt.Sprintf("config: unknown version %s", version)) } } func setDefaults(v *viper.Viper) { v.AddConfigPath(".") v.SetConfigFile("kubesimple") defaults := map[string]string{ "version": "1", } for key, value := range defaults { v.SetDefault(key, value) } }
package main import ( L "./lib" "fmt" "os" ) func main() { filename := os.Args[1] bags := L.ParseBagsData(filename) res := 0 for _, bag := range bags { if bag.CanContain("shiny gold") { res += 1 } } fmt.Printf("%d\n", res) }
/* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "os" "github.com/containerd/containerd/cmd/ctr/app" "github.com/containerd/containerd/pkg/seed" //nolint:staticcheck // Global math/rand seed is deprecated, but still used by external dependencies "github.com/containerd/stargz-snapshotter/cmd/ctr-remote/commands" "github.com/urfave/cli" ) func init() { // From https://github.com/containerd/containerd/blob/f7f2be732159a411eae46b78bfdb479b133a823b/cmd/ctr/main.go //nolint:staticcheck // Global math/rand seed is deprecated, but still used by external dependencies seed.WithTimeAndRand() } func main() { customCommands := []cli.Command{ commands.RpullCommand, commands.OptimizeCommand, commands.ConvertCommand, commands.GetTOCDigestCommand, commands.IPFSPushCommand, } app := app.New() for i := range app.Commands { if app.Commands[i].Name == "images" { sc := map[string]cli.Command{} for _, subcmd := range customCommands { sc[subcmd.Name] = subcmd } // First, replace duplicated subcommands for j := range app.Commands[i].Subcommands { for name, subcmd := range sc { if name == app.Commands[i].Subcommands[j].Name { app.Commands[i].Subcommands[j] = subcmd delete(sc, name) } } } // Next, append all new sub commands for _, subcmd := range sc { app.Commands[i].Subcommands = append(app.Commands[i].Subcommands, subcmd) } break } } app.Commands = append(app.Commands, commands.FanotifyCommand) if err := app.Run(os.Args); err != nil { fmt.Fprintf(os.Stderr, "ctr-remote: %v\n", err) os.Exit(1) } }