text
stringlengths
11
4.05M
// Copyright 2018, Irfan Sharif. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package log implements leveled execution logs for go. The library provides // hooks such that the following top-level usage is made possible: // // $ <binary-name> -help // Usage of <binary-name>: // -log-dir string // Write log files in this directory. // -log-to-stderr // Log to standard error. // -log-level (info|debug|warn|error) // Log level for logs emitted (global, can be overrode using -log-filter). // -log-filter value // Comma-separated list of pattern:level settings for file-filtered logging. // -log-backtrace-at value // Comma-separated list of filename:N settings, when any logging statement at // the specified locations are executed, a stack trace will be emitted. // // $ <binary-name> -log-level info \ // -log-dir /path/to/dir \ // -log-to-stderr \ // -log-filter f.go:warn,g/h/*.go:debug \ // -log-backtrace-at y.go:42 // // These hooks can be invoked at runtime, what this means is that if needed, a // running service could opt-in to provide open endpoints to accept logger // reconfigurations (via RPCs or otherwise). // // Basic example: // // import "github.com/irfansharif/log" // // ... // // logger := log.New() // logger.Info("hello, world") // // The logger can be be configured to be safe for concurrent use, output to // rotating logs, log with specific formatted headers, etc. using variadic // options during initialization. An example of the above: // // writer := os.Stderr // writer = log.SynchronizedWriter(writer) // writer = log.MultiWriter(writer, // log.LogRotationWriter("/logs", 50 << 20 /* 50 MiB */)) // // logf := log.Lmode | log.Ldate | log.Ltime | log.Llongfile // // logger.New(log.Writer(writer), log.Flags(logf)) package log
/** * @Author: fanpengfei * @Description: * @File: main_test1 * @Version: 1.0.0 * @Date: 2020/5/26 13:07 */ package main import ( "fmt" "github.com/ebayboy/math-engine/engine" ) func main() { s := "0/0 + 2" // call top level function r, err := engine.ParseAndExec(s) if err != nil { fmt.Println(err) } fmt.Printf("%s = %v\n", s, r) }
// Copyright 2020 The VectorSQL Authors. // // Code is licensed under Apache License, Version 2.0. package expressions import ( "base/docs" "datavalues" ) func LT(left interface{}, right interface{}) IExpression { exprs := expressionsFor(left, right) return &BinaryExpression{ name: "<", argumentNames: [][]string{ {"left", "right"}, }, description: docs.Text("Less than."), validate: All(), left: exprs[0], right: exprs[1], updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) { cmp, err := left.Compare(right) if err != nil { return nil, err } return datavalues.MakeBool(cmp == datavalues.LessThan), nil }, } } func LTE(left interface{}, right interface{}) IExpression { exprs := expressionsFor(left, right) return &BinaryExpression{ name: "<=", argumentNames: [][]string{ {"left", "right"}, }, description: docs.Text("Less than or equal to."), validate: All(), left: exprs[0], right: exprs[1], updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) { cmp, err := left.Compare(right) if err != nil { return nil, err } return datavalues.MakeBool(cmp < datavalues.GreaterThan), nil }, } } func EQ(left interface{}, right interface{}) IExpression { exprs := expressionsFor(left, right) return &BinaryExpression{ name: "=", argumentNames: [][]string{ {"left", "right"}, }, description: docs.Text("Equal."), validate: All(), left: exprs[0], right: exprs[1], updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) { cmp, err := left.Compare(right) if err != nil { return nil, err } return datavalues.MakeBool(cmp == datavalues.Equal), nil }, } } func NEQ(left interface{}, right interface{}) IExpression { exprs := expressionsFor(left, right) return &BinaryExpression{ name: "!=", argumentNames: [][]string{ {"left", "right"}, }, description: docs.Text("Not equal."), validate: All(), left: exprs[0], right: exprs[1], updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) { cmp, err := left.Compare(right) if err != nil { return nil, err } return datavalues.MakeBool(cmp != datavalues.Equal), nil }, } } func GT(left interface{}, right interface{}) IExpression { exprs := expressionsFor(left, right) return &BinaryExpression{ name: ">", argumentNames: [][]string{ {"left", "right"}, }, description: docs.Text("Greater than."), validate: All(), left: exprs[0], right: exprs[1], updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) { cmp, err := left.Compare(right) if err != nil { return nil, err } return datavalues.MakeBool(cmp == datavalues.GreaterThan), nil }, } } func GTE(left interface{}, right interface{}) IExpression { exprs := expressionsFor(left, right) return &BinaryExpression{ name: ">=", argumentNames: [][]string{ {"left", "right"}, }, description: docs.Text("Greater than or equal to."), validate: All(), left: exprs[0], right: exprs[1], updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) { cmp, err := left.Compare(right) if err != nil { return nil, err } return datavalues.MakeBool(cmp > datavalues.LessThan), nil }, } } func AND(left interface{}, right interface{}) IExpression { exprs := expressionsFor(left, right) return &BinaryExpression{ name: "AND", argumentNames: [][]string{ {"left", "right"}, }, description: docs.Text("Logic AND."), validate: All(), left: exprs[0], right: exprs[1], updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) { l := datavalues.AsBool(left) r := datavalues.AsBool(right) return datavalues.ToValue(l && r), nil }, } } func OR(left interface{}, right interface{}) IExpression { exprs := expressionsFor(left, right) return &BinaryExpression{ name: "OR", argumentNames: [][]string{ {"left", "right"}, }, description: docs.Text("Logic OR."), validate: All(), left: exprs[0], right: exprs[1], updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) { l := datavalues.AsBool(left) r := datavalues.AsBool(right) return datavalues.ToValue(l || r), nil }, } } func LIKE(left interface{}, right interface{}) IExpression { exprs := expressionsFor(left, right) return &BinaryExpression{ name: "LIKE", argumentNames: [][]string{ {"left", "right"}, }, description: docs.Text("LIKE."), validate: All(), left: exprs[0], right: exprs[1], updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) { r := datavalues.AsString(right) return datavalues.ToValue(datavalues.Like(r, left)), nil }, } } func NOT_LIKE(left interface{}, right interface{}) IExpression { exprs := expressionsFor(left, right) return &BinaryExpression{ name: "NOT LIKE", argumentNames: [][]string{ {"left", "right"}, }, description: docs.Text("NOT LIKE."), validate: All(), left: exprs[0], right: exprs[1], updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) { r := datavalues.AsString(right) return datavalues.ToValue(!datavalues.Like(r, left)), nil }, } }
package openrtb_ext // ExtImpMobileFuse defines the contract for bidrequest.imp[i].ext.prebid.bidder.mobilefuse type ExtImpMobileFuse struct { PlacementId int `json:"placement_id"` PublisherId int `json:"pub_id"` TagidSrc string `json:"tagid_src"` }
package main import ( "encoding/json" "fmt" "io/ioutil" "os" "github.com/develersrl/lunches/pkg/tuttobene" ) const ( usage = ` ____ ______________ _____ _____ | | | | | | ||__]|___|\ ||___ | |__| | | |__||__]|___| \||___ A tool for parsing TuttoBene's menus Usage: tuttobene <xlsx file> <output format> Format can be: - json - tina ` ) var TinaFormatTitles = map[tuttobene.MenuRowType]string{ tuttobene.Primo: "Primi Piatti", tuttobene.Secondo: "Secondi Piatti", tuttobene.Contorno: "Contorni", tuttobene.Vegetariano: "Piatti Vegetariano", tuttobene.Frutta: "Frutta", tuttobene.Panino: "Panini Espressi", } func main() { if len(os.Args) < 3 { fmt.Print(usage) os.Exit(1) } bs, err := ioutil.ReadFile(os.Args[1]) if err != nil { fmt.Fprintf(os.Stderr, "Could not open file: %v\n", err) os.Exit(1) } menu, err := tuttobene.ParseMenuBytes(bs) if err != nil { fmt.Fprintf(os.Stderr, "Could not parse file: %v\n", err) os.Exit(1) } if menu == nil { fmt.Fprintf(os.Stderr, "Unexpected nil menu: %v\n", err) os.Exit(1) } switch os.Args[2] { case "json": out, err := json.MarshalIndent(menu, "", " ") if err != nil { fmt.Fprintf(os.Stderr, "Could not marshal menu: %v\n", err) os.Exit(1) } fmt.Println(string(out)) case "tina": var currentSection tuttobene.MenuRowType for _, m := range menu.Rows { if currentSection != m.Type { currentSection = m.Type fmt.Println("\n" + TinaFormatTitles[currentSection]) } if m.IsDailyProposal { fmt.Print("Proposta del giorno: ") } fmt.Println(m.Content) } fmt.Println("") default: fmt.Fprintf(os.Stderr, "Invalid format (json|tina): %v\n", os.Args[2]) os.Exit(1) } }
package helmdeployer import ( "io" "github.com/rancher/fleet/internal/manifest" fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" "github.com/sirupsen/logrus" "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chartutil" kubefake "helm.sh/helm/v3/pkg/kube/fake" "helm.sh/helm/v3/pkg/storage" "helm.sh/helm/v3/pkg/storage/driver" "k8s.io/apimachinery/pkg/runtime" ) // Template runs helm template and returns the resources as a list of objects, without applying them. func Template(bundleID string, manifest *manifest.Manifest, options fleet.BundleDeploymentOptions) ([]runtime.Object, error) { h := &Helm{ globalCfg: action.Configuration{}, useGlobalCfg: true, template: true, } mem := driver.NewMemory() mem.SetNamespace("default") h.globalCfg.Capabilities = chartutil.DefaultCapabilities h.globalCfg.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard} h.globalCfg.Log = logrus.Infof h.globalCfg.Releases = storage.Init(mem) resources, err := h.Deploy(bundleID, manifest, options) if err != nil { return nil, err } return resources.Objects, nil }
package internal import ( "context" "fmt" ) // Interface to deliver messages. type Deliverable interface { // Commit the given message on the state machine. Commit(message Message) Response } // A struct that is able to deliver message from the protocol. // The messages will be committed on the peer state machine // and a notification will be generated, type Deliver struct { // Parent context of the delivery. // The parent who instantiate the delivery is the peer that // relies inside a partition, so for each peer will exists a // deliver instance. // When the peer is shutdown, also will be shutdown the deliver. ctx context.Context // Conflict relationship to order the messages. conflict ConflictRelationship // The peer state machine. sm StateMachine // Deliver logger. log Logger } // Creates a new instance of the Deliverable interface. func NewDeliver(ctx context.Context, log Logger, conflict ConflictRelationship, storage Storage) (Deliverable, error) { sm := NewStateMachine(storage) if err := sm.Restore(); err != nil { return nil, err } d := &Deliver{ ctx: ctx, conflict: conflict, sm: sm, log: log, } return d, nil } // Commit the message on the peer state machine. // After the commit a notification is sent through the commit channel. func (d Deliver) Commit(m Message) Response { res := Response{ Success: false, Identifier: m.Identifier, Data: nil, Extra: nil, Failure: nil, } d.log.Debugf("commit request %#v", m) entry := &Entry{ Operation: m.Content.Operation, Identifier: m.Identifier, Key: m.Content.Key, FinalTimestamp: m.Timestamp, Data: m.Content.Content, Extensions: m.Content.Extensions, } commit, err := d.sm.Commit(entry) if err != nil { d.log.Errorf("failed to commit %#v. %v", m, err) res.Success = false res.Failure = err } else { switch c := commit.(type) { case *Entry: res.Success = true res.Data = c.Data res.Extra = c.Extensions default: res.Success = false res.Failure = fmt.Errorf("commit unknown response. %#v", c) } } return res }
package main import "fmt" type error interface(){ Error() ; } func main1(){ fmt.Println("teasdadsadasd") }
package history import ( "context" "fmt" "gorm.io/gorm" "time" ) const ( ActionCreate Action = "create" ActionUpdate Action = "update" userOptionKey userOptionCtxKey = pluginName + ":user" sourceOptionKey sourceOptionCtxKey = pluginName + ":source" ) var ( _ History = (*Entry)(nil) _ TimestampableHistory = (*Entry)(nil) _ BlameableHistory = (*Entry)(nil) _ SourceableHistory = (*Entry)(nil) ) type ( Version string Action string userOptionCtxKey string sourceOptionCtxKey string Recordable interface { CreateHistory() History } TimestampableHistory interface { SetHistoryCreatedAt(createdAt time.Time) } BlameableHistory interface { SetHistoryUserID(id string) SetHistoryUserEmail(email string) } SourceableHistory interface { SetHistorySourceID(ID string) SetHistorySourceType(typ string) } History interface { SetHistoryVersion(version Version) SetHistoryObjectID(id interface{}) SetHistoryAction(action Action) } Entry struct { Version Version `gorm:"type:char(26)"` ObjectID string `gorm:"index"` Action Action `gorm:"type:varchar(24)"` UserID string `gorm:"type:varchar(255)"` UserEmail string `gorm:"type:varchar(255)"` SourceID string `gorm:"type:varchar(255)"` SourceType string `gorm:"type:varchar(255)"` CreatedAt time.Time `gorm:"type:datetime"` } User struct { ID string Email string } Source struct { ID string Type string } ) func SetUser(db *gorm.DB, user User) *gorm.DB { ctx := context.WithValue(db.Statement.Context, userOptionKey, user) return db.WithContext(ctx).Set(string(userOptionKey), user) } func GetUser(db *gorm.DB) (User, bool) { value, ok := db.Get(string(userOptionKey)) if !ok { value := db.Statement.Context.Value(userOptionKey) user, ok := value.(User) return user, ok } user, ok := value.(User) return user, ok } func SetSource(db *gorm.DB, source Source) *gorm.DB { ctx := context.WithValue(db.Statement.Context, sourceOptionKey, source) return db.WithContext(ctx).Set(string(sourceOptionKey), source) } func GetSource(db *gorm.DB) (Source, bool) { value, ok := db.Get(string(sourceOptionKey)) if !ok { value := db.Statement.Context.Value(sourceOptionKey) source, ok := value.(Source) return source, ok } source, ok := value.(Source) return source, ok } func (e *Entry) SetHistoryVersion(version Version) { e.Version = version } func (e *Entry) SetHistoryObjectID(id interface{}) { e.ObjectID = fmt.Sprintf("%v", id) } func (e *Entry) SetHistoryAction(action Action) { e.Action = action } func (e *Entry) SetHistoryUserID(id string) { e.UserID = id } func (e *Entry) SetHistoryUserEmail(email string) { e.UserEmail = email } func (e *Entry) SetHistoryCreatedAt(createdAt time.Time) { e.CreatedAt = createdAt } func (e *Entry) SetHistorySourceID(id string) { e.SourceID = id } func (e *Entry) SetHistorySourceType(typ string) { e.SourceType = typ }
package mapping import ( _ "strconv" "github.com/omniscale/imposm3/element" "github.com/omniscale/imposm3/geom" ) func init() { RegisterFieldTypes( FieldType{ Name: "mapping_key_value", GoType: "string", Func: getField_KeyValueName, MakeFunc: nil, }, ) } func getField_KeyValueName(val string, elem *element.OSMElem, geom *geom.Geometry, match Match) interface{} { return match.Key + "_" + match.Value }
// This file is part of CycloneDX GoMod // // Licensed under the Apache License, Version 2.0 (the “License”); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an “AS IS” BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // SPDX-License-Identifier: Apache-2.0 // Copyright (c) OWASP Foundation. All Rights Reserved. package gomod import ( "bytes" "os/exec" "path/filepath" "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestModule_Coordinates(t *testing.T) { module := Module{ Path: "path", Version: "version", } assert.Equal(t, "path@version", module.Coordinates()) module.Version = "" assert.Equal(t, "path", module.Coordinates()) } func TestModule_Hash(t *testing.T) { // Download a specific version of a module cmd := exec.Command("go", "get", "github.com/google/uuid@v1.2.0") cmd.Dir = t.TempDir() // Just has to be outside of this module's directory to prevent modification of go.mod require.NoError(t, cmd.Run()) // Locate the module on the file system modCacheDir, err := exec.Command("go", "env", "GOMODCACHE").Output() require.NoError(t, err) modDir := filepath.Join(string(bytes.TrimSpace(modCacheDir)), "github.com", "google", "uuid@v1.2.0") // Construct module instance module := Module{ Path: "github.com/google/uuid", Version: "v1.2.0", Dir: modDir, } // Calculate a directory hash for the downloaded module hash, err := module.Hash() require.NoError(t, err) // The returned hash must match the one from sumdb // See https://sum.golang.org/lookup/github.com/google/uuid@v1.2.0 require.Equal(t, "h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=", hash) } func TestModule_PackageURL(t *testing.T) { module := Module{ Path: "github.com/CycloneDX/cyclonedx-go", Version: "v0.1.0", } assert.Equal(t, "pkg:golang/github.com/CycloneDX/cyclonedx-go@v0.1.0", module.PackageURL()) } func TestIsModule(t *testing.T) { t.Run("Positive", func(t *testing.T) { require.True(t, IsModule("../../")) }) t.Run("Negative", func(t *testing.T) { require.False(t, IsModule(t.TempDir())) }) } func TestParseModules(t *testing.T) { modulesJSON := `{ "Path": "github.com/CycloneDX/cyclonedx-go", "Main": true, "Dir": "/path/to/cyclonedx-go", "GoMod": "/path/to/cyclonedx-go/go.mod", "GoVersion": "1.14" } { "Path": "github.com/davecgh/go-spew", "Version": "v1.1.1", "Time": "2018-02-21T23:26:28Z", "Indirect": true, "Dir": "/path/to/go/pkg/mod/github.com/davecgh/go-spew@v1.1.1", "GoMod": "/path/to/go/pkg/mod/cache/download/github.com/davecgh/go-spew/@v/v1.1.1.mod" }` modules, err := parseModules(strings.NewReader(modulesJSON)) require.NoError(t, err) assert.Len(t, modules, 2) assert.Equal(t, "github.com/CycloneDX/cyclonedx-go", modules[0].Path) assert.Empty(t, modules[0].Version) assert.True(t, modules[0].Main) assert.False(t, modules[0].Vendored) assert.Equal(t, "github.com/davecgh/go-spew", modules[1].Path) assert.Equal(t, "v1.1.1", modules[1].Version) assert.False(t, modules[1].Main) assert.False(t, modules[1].Vendored) } func TestSortModules(t *testing.T) { modules := []Module{ { Path: "path", Version: "v1.3.2", }, { Path: "path", Version: "v1.2.3", }, { Path: "path/v2", Version: "v2.0.0", Main: true, }, } sortModules(modules) require.Equal(t, "v2.0.0", modules[0].Version) // main require.Equal(t, "v1.2.3", modules[1].Version) require.Equal(t, "v1.3.2", modules[2].Version) }
package test import ( "fmt" "io/ioutil" "log" "net/http" "net/http/httptest" "os" "strings" "testing" "github.com/jinzhu/gorm" "github.com/stretchr/testify/assert" "github.com/Blackoutta/blog-service/global" v1 "github.com/Blackoutta/blog-service/internal/api/v1" "github.com/Blackoutta/blog-service/internal/service" "github.com/Blackoutta/blog-service/pkg/logger" "github.com/gin-gonic/gin" ) func TestGet(t *testing.T) { type testcase struct { data string code int want string } type testsuite []testcase var ts testsuite = testsuite{ { data: "1", code: 200, want: "success", }, { data: "0", code: 400, want: "入参错误", }, { data: "10000", code: 404, want: gorm.ErrRecordNotFound.Error(), }, } global.Logger = logger.NewLogger(os.Stdout, "", log.LstdFlags) for _, tc := range ts { // 模拟http response writer 和 *gin.Context w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) // 模拟请求和service tag := v1.NewTagAPI() tag.AddService(service.TagService{Ctx: c}) tag.Service.AddDao(&mockTagDao{}) req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8080/api/v1/tags/{id}", nil) if err != nil { t.Fatal(err) } c.Request = req c.Params = append(c.Params, gin.Param{ Key: "id", Value: tc.data, }) // 执行方法 tag.Get(c) // 断言http响应 assert.Equal(t, tc.code, w.Result().StatusCode) assert.Contains(t, w.Body.String(), tc.want) } } func TestCreateTag(t *testing.T) { type testcase struct { data string code int } type testsuite []testcase var ts testsuite = testsuite{ { data: "1", code: 200, }, { data: "2", code: 200, }, { data: "3", code: 400, }, } global.Logger = logger.NewLogger(os.Stdout, "", log.LstdFlags) for _, tc := range ts { // 模拟http response writer 和 *gin.Context w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) // 模拟请求和service tag := v1.NewTagAPI() tag.AddService(service.TagService{Ctx: c}) tag.Service.AddDao(&mockTagDao{}) body := fmt.Sprintf(`{ "created_by": "some_user_name", "name": "some_tag_name", "state": %v }`, tc.data) rc := ioutil.NopCloser(strings.NewReader(body)) req, err := http.NewRequest(http.MethodPost, "http://127.0.0.1:8080/api/v1/tags", nil) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/json") c.Request = req c.Request.Body = rc // 执行方法 tag.Create(c) // 断言http响应 assert.Equal(t, tc.code, w.Result().StatusCode) } }
package leetcode_go import ( "sort" ) func subsetsWithDup(nums []int) [][]int { sort.Ints(nums) res := [][]int{} helperP90(nums, []int{}, 0, &res) return res } func helperP90(nums []int, curSet []int, pos int, res *[][]int) { *res = append(*res, curSet) for i := pos; i < len(nums); i++ { curSet = append(append([]int{}, curSet...), nums[i]) helperP90(nums, curSet, i+1, res) curSet = curSet[:len(curSet)-1] for i+1 < len(nums) && nums[i] == nums[i+1] { i++ } } }
package main import "fmt" func main() { foo() bar("James") s1 := woo("Moneypenny") fmt.Println(s1) x, y := mouse("Ian", "Fleming") fmt.Println(x) fmt.Println(y) } func foo() { fmt.Println("hello from foo") } // Everything in go is PASS BY VALUE func bar(s string) { fmt.Println("hello,", s) } //RETURN func woo(s string)string { return fmt.Sprint("hello from woo, ", s) } //MULTI RETURN func mouse(st string, ln string) (string, bool) { a := fmt.Sprint(st, " ", ln, `says "Hello"`) b := true return a, b } // func ( r receiver) indentifier(parameters) (return(s)) {...} // Parameters: When defining a function the parameters are // defined. (if any exist) //Arguments: When you call a function the parameters are now called //arguments. We call our function and pass arguments.
package controller import ( "github.com/Brickchain/go-document.v2" keys "github.com/Brickchain/go-keys.v1" jose "gopkg.in/square/go-jose.v1" ) // Binding describes the methods for managing a specific binding with it's related configuration. type Binding interface { // ID returns the ID of the binding. ID() string // Secret for the binding. Secret() string // GenerateKey will generate a new keypair for this binding. GenerateKey(keys.StoredKeyService, []byte) error // PublicKey of the binding. PublicKey() *jose.JsonWebKey // PrivateKey of the binding. Requires a StoredKeyService and a Key Encryption Key (KEK). PrivateKey(keys.StoredKeyService, []byte) (*jose.JsonWebKey, error) // Descriptor returns the ControllerDescriptor for this binding Descriptor() document.ControllerDescriptor // SetDescriptor sets the ControllerDescriptor for this binding SetDescriptor(document.ControllerDescriptor) error // Bind is used when the realm binds to this binding. Bind(*document.ControllerBinding) error // Unbind removes the realm binding Unbind() error // Certificate of the binding. Certificate() string // Mandates returns the mandates we got from the realm. Mandates() []string // AdminRoles returns the list of roles that can administer this binding. AdminRoles() []string // Realm returns the realm-descriptor that was used for this binding. Realm() *document.RealmDescriptor // ControllerBinding returns the controller-binding document. ControllerBinding() *document.ControllerBinding // Status is used to tell the realm if this binding requires some extra setup steps. Status() string // SetStatus updates the Status value. SetStatus(string) error // BindEndpoint returns the endpoint where the realm should post the controller-binding. BindEndpoint() string // SetBindEndpoint updates the BindEndpoint value. SetBindEndpoint(string) error }
// +build linux package systeminfo import ( "fmt" "io/ioutil" "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/system" ) type SysInfoStats struct { ps system.PS } func (_ *SysInfoStats) Description() string { return "Read metrics about /etc/.systeminfo" } func (_ *SysInfoStats) SampleConfig() string { return "" } func (_ *SysInfoStats) Gather(acc telegraf.Accumulator) error { //读取文件内容 bytes, err := ioutil.ReadFile("/etc/.systeminfo") if err != nil { return fmt.Errorf("error getting system info: %s", err) } lines := strings.Split(string(bytes), "\n") fields := make(map[string]interface{}) for _, line := range lines { props := strings.Split(line, "=") if len(props) < 2 { props = strings.Split(line, ":") if len(props) < 2 { continue } } key := strings.TrimSpace(props[0]) value := strings.TrimSpace(props[1]) switch key { case "产品名称", "ProductName": fields["pro_name"] = value case "产品型号", "ProductModel": fields["pro_number"] = value case "标识码(产品唯一标识)", "ID": fields["pro_code"] = value case "电磁泄漏发射防护类型", "电磁泄露发射防护类型", "ShelterModel": fields["launch_type"] = value case "生产者(制造商)", "Producter": fields["manufacturer"] = value case "操作系统名称", "Name": fields["os_name"] = value case "系统版本", "Release": fields["sys_version"] = value case "内核版本", "kernel": fields["kernel"] = value case "系统位数", "Bit": fields["sys_number"] = value case "I/O保密管理模块": fields["io_sec_model"] = value case "安全卡版本", "Version": fields["safe_number"] = value case "固件版本(BIOS)", "固件版本(BIOS)", "固件版本(BIOS)", "固件版本(BIOS)", "BiosVersion": fields["bios"] = value case "处理器信息", "CPU": fields["cpu_info"] = value case "内存", "Memory": fields["memory"] = value case "硬盘序列号", "HDSerial": fields["disk_number"] = value case "硬盘容量", "HDCapacity": fields["disk_capacity"] = value case "主板版本号": fields["mainboard_version"] = value case "系统安装时间": fields["sys_begin_time"] = value case "系统更新时间", "UpdateTime": fields["sys_update_time"] = value case "三合一内核版本", "KernelVersion": fields["three_kernel"] = value case "三合一软件版本", "SoftWareVersion": fields["three_version"] = value case "硬盘2序列号", "HDSerial_1": //HOME 硬盘序列号 fields["home_disk_number"] = value case "硬盘2容量", "HDCapacity_1": //HOME 硬盘容量 fields["home_disk_capacity"] = value } } acc.AddGauge("systeminfo", fields, nil) return nil } func init() { ps := system.NewSystemPS() inputs.Add("systeminfo", func() telegraf.Input { return &SysInfoStats{ps: ps} }) }
package handlers import ( "context" "github.com/PabloGilvan/transaction/cmd/helpers" "github.com/PabloGilvan/transaction/internal/services/account" "github.com/PabloGilvan/transaction/internal/services/transaction" "github.com/gin-gonic/gin" "net/http" ) type TransactionController struct { TransactionService transaction.TransactionService AccountService account.AccountService } func NewTransactionController(service transaction.TransactionService, accountService account.AccountService) TransactionController { return TransactionController{ TransactionService: service, AccountService: accountService, } } func (crtl TransactionController) Router(router *gin.RouterGroup) { transactions := router.Group("/transactions") { transactions.POST("/", crtl.SaveTransaction) } } // SaveTransaction @Title saveTransaction // @Tags Transaction // @Summary Persist a transaction // @Description Persist a transaction validating the operation type and account // @Param content body transaction.TransactionPersist true "Object for persisting the transaction" // @Success 201 {object} transaction.TransactionResponse // @Failure 400 "operation not found" // @Failure 400 "account inactive" // @Failure 404 "account not found" // @Accept json // @Router /transactions/{id} [get] func (crtl TransactionController) SaveTransaction(c *gin.Context) { var transactionPersist transaction.TransactionPersist if err := c.BindJSON(&transactionPersist); err != nil { c.IndentedJSON(http.StatusBadRequest, helpers.ErrorMessage{ErrorMessage: helpers.ErrInvalidRequest.Error()}) return } _, err := crtl.AccountService.LoadAccount(context.Background(), transactionPersist.AccountId) if errMessage, statusCode := helpers.ProcessIfBusinessError(err); errMessage != nil { c.IndentedJSON(statusCode, errMessage) return } transactionIdentifier, err := crtl.TransactionService.SaveTransaction(context.Background(), transactionPersist) if errMessage, statusCode := helpers.ProcessIfBusinessError(err); errMessage != nil { c.IndentedJSON(statusCode, errMessage) return } c.IndentedJSON(http.StatusAccepted, transactionIdentifier) }
package main import ( "fmt" "net/http" "time" storage "github.com/alokyadav/buildings/storage" service "github.com/alokyadav/buildings/service" "github.com/gorilla/mux" "github.com/google/jsonapi" ) const ( headerAccept = "Accept" headerContentType = "Content-Type" ) func main() { jsonapi.Instrumentation = func(r *jsonapi.Runtime, eventType jsonapi.Event, callGUID string, dur time.Duration) { metricPrefix := r.Value("instrument").(string) if eventType == jsonapi.UnmarshalStart { fmt.Printf("%s: id, %s, started at %v\n", metricPrefix+".jsonapi_unmarshal_time", callGUID, time.Now()) } if eventType == jsonapi.UnmarshalStop { fmt.Printf("%s: id, %s, stopped at, %v , and took %v to unmarshal payload\n", metricPrefix+".jsonapi_unmarshal_time", callGUID, time.Now(), dur) } if eventType == jsonapi.MarshalStart { fmt.Printf("%s: id, %s, started at %v\n", metricPrefix+".jsonapi_marshal_time", callGUID, time.Now()) } if eventType == jsonapi.MarshalStop { fmt.Printf("%s: id, %s, stopped at, %v , and took %v to marshal payload\n", metricPrefix+".jsonapi_marshal_time", callGUID, time.Now(), dur) } } storage := storage.NewBuildingStorage() service := service.NewBuildingService(storage) r := mux.NewRouter() r.HandleFunc("/buildings", service.ListBuildings).Methods("GET") r.HandleFunc("/buildings", service.AddBuilding).Methods("POST") r.HandleFunc("/buildings/{id}", service.GetBuilding).Methods("GET") r.HandleFunc("/buildings/{id}", service.RemoveBuilding).Methods("DELETE") http.ListenAndServe(":8000", r) }
package xin import ( "bufio" "io" "math" "math/rand" "os" "strings" "time" ) type formEvaler func(*Frame, []Value, *astNode) (Value, InterpreterError) type NativeFormValue struct { name string evaler formEvaler } func (v NativeFormValue) String() string { return "(<native form> " + v.name + ")" } func (v NativeFormValue) Repr() string { return v.String() } func (v NativeFormValue) Equal(o Value) bool { if ov, ok := o.(NativeFormValue); ok { return v.name == ov.name } return false } func loadAllDefaultValues(vm *Vm) { fr := vm.Frame stdoutStream := NewStream() stdoutStream.callbacks.sink = func(v Value, node *astNode) InterpreterError { os.Stdout.Write([]byte(v.String())) return nil } fr.Put("os::stdout", stdoutStream) stdinStream := NewStream() stdinStream.callbacks.source = func() (Value, InterpreterError) { reader := bufio.NewReader(os.Stdin) input, err := reader.ReadString('\n') if err == io.EOF { return StringValue(""), nil } else if err != nil { return nil, RuntimeError{ reason: "Cannot read from stdin", } } return StringValue(input[:len(input)-1]), nil } fr.Put("os::stdin", stdinStream) } func loadAllNativeForms(vm *Vm) { // seed PRNG for math::rand rand.Seed(time.Now().UTC().UnixNano()) vm.evalers = map[string]formEvaler{ "+": addForm, "-": subtractForm, "*": multiplyForm, "/": divideForm, "%": modForm, "^": powForm, ">": greaterForm, "<": lessForm, "=": equalForm, "!": notForm, "&": andForm, "|": orForm, "xor": xorForm, "int": intForm, "frac": fracForm, "str": stringForm, "type": typeForm, "str::get": strGetForm, "str::set!": strSetForm, "str::add!": strAddForm, "str::size": strSizeForm, "str::slice": strSliceForm, "str::enc": strEncForm, "str::dec": strDecForm, "vec": vecForm, "vec::get": vecGetForm, "vec::set!": vecSetForm, "vec::add!": vecAddForm, "vec::size": vecSizeForm, "vec::slice": vecSliceForm, "map": mapForm, "map::get": mapGetForm, "map::set!": mapSetForm, "map::has?": mapHasForm, "map::del!": mapDelForm, "map::size": mapSizeForm, "map::keys": mapKeysForm, "stream": streamForm, "stream::set-sink!": streamSetSink, "stream::set-source!": streamSetSource, "stream::set-close!": streamSetClose, "->": streamSourceForm, "<-": streamSinkForm, "stream::close!": streamCloseForm, "math::sin": mathSinForm, "math::cos": mathCosForm, "math::tan": mathTanForm, "math::asin": mathAsinForm, "math::acos": mathAcosForm, "math::atan": mathAtanForm, "math::ln": mathLnForm, "math::rand": mathRandForm, "crypto::rand": cryptoRandForm, "os::wait": osWaitForm, "os::stat": osStatForm, "os::open": osOpenForm, "os::delete": osDeleteForm, "os::dial": osDialForm, "os::listen": osListenForm, "os::log": osLogForm, "os::args": osArgsForm, "os::time": osTimeForm, "debug::dump": debugDumpForm, } fr := vm.Frame for name, evaler := range vm.evalers { fr.Put(name, NativeFormValue{ name: name, evaler: evaler, }) } } var Noop = NativeFormValue{ name: "noop", evaler: noopForm, } func noopForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { return zeroValue, nil } func addForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] if firstInt, fok := first.(IntValue); fok { if _, sok := second.(FracValue); sok { first = FracValue(float64(firstInt)) } } else if _, fok := first.(FracValue); fok { if secondInt, sok := second.(IntValue); sok { second = FracValue(float64(secondInt)) } } switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { return cleanFirst + cleanSecond, nil } case FracValue: if cleanSecond, ok := second.(FracValue); ok { return cleanFirst + cleanSecond, nil } case StringValue: if cleanSecond, ok := second.(StringValue); ok { // In this context, strings are immutable. i.e. concatenating // strings should produce a completely new string whose modifications // won't be observable by the original strings. base := make([]byte, 0, len(cleanFirst)+len(cleanSecond)) return StringValue(append(append(base, cleanFirst...), cleanSecond...)), nil } case VecValue: if cleanSecond, ok := second.(VecValue); ok { base := make([]Value, 0, len(cleanFirst.underlying.items)+len(cleanSecond.underlying.items)) return NewVecValue(append(append(base, cleanFirst.underlying.items...), cleanSecond.underlying.items...)), nil } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func subtractForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] if firstInt, fok := first.(IntValue); fok { if _, sok := second.(FracValue); sok { first = FracValue(float64(firstInt)) } } else if _, fok := first.(FracValue); fok { if secondInt, sok := second.(IntValue); sok { second = FracValue(float64(secondInt)) } } switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { return cleanFirst - cleanSecond, nil } case FracValue: if cleanSecond, ok := second.(FracValue); ok { return cleanFirst - cleanSecond, nil } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func multiplyForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] if firstInt, fok := first.(IntValue); fok { if _, sok := second.(FracValue); sok { first = FracValue(float64(firstInt)) } } else if _, fok := first.(FracValue); fok { if secondInt, sok := second.(IntValue); sok { second = FracValue(float64(secondInt)) } } switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { return cleanFirst * cleanSecond, nil } case FracValue: if cleanSecond, ok := second.(FracValue); ok { return cleanFirst * cleanSecond, nil } case StringValue: if cleanSecond, ok := second.(IntValue); ok { max := int(cleanSecond) result, iter := "", string(cleanFirst) for i := 0; i < max; i++ { result += iter } return StringValue(result), nil } case VecValue: if cleanSecond, ok := second.(IntValue); ok { max := int(cleanSecond) result := make([]Value, 0, max*len(cleanFirst.underlying.items)) copy(result, cleanFirst.underlying.items) for i := 0; i < max; i++ { result = append(result, cleanFirst.underlying.items...) } return NewVecValue(result), nil } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func divideForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] if firstInt, fok := first.(IntValue); fok { if _, sok := second.(FracValue); sok { first = FracValue(float64(firstInt)) } } else if _, fok := first.(FracValue); fok { if secondInt, sok := second.(IntValue); sok { second = FracValue(float64(secondInt)) } } switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { if cleanSecond == zeroValue { return zeroValue, nil } return cleanFirst / cleanSecond, nil } case FracValue: if cleanSecond, ok := second.(FracValue); ok { if cleanSecond == FracValue(0) { return zeroValue, nil } return cleanFirst / cleanSecond, nil } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func modForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] if firstInt, fok := first.(IntValue); fok { if _, sok := second.(FracValue); sok { first = FracValue(float64(firstInt)) } } else if _, fok := first.(FracValue); fok { if secondInt, sok := second.(IntValue); sok { second = FracValue(float64(secondInt)) } } switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { if cleanSecond == zeroValue { return zeroValue, nil } return cleanFirst % cleanSecond, nil } case FracValue: if cleanSecond, ok := second.(FracValue); ok { if cleanSecond == FracValue(0) { return zeroValue, nil } modulus := math.Mod( float64(cleanFirst), float64(cleanSecond), ) return FracValue(modulus), nil } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func powForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] if firstInt, fok := first.(IntValue); fok { if _, sok := second.(FracValue); sok { first = FracValue(float64(firstInt)) } } else if _, fok := first.(FracValue); fok { if secondInt, sok := second.(IntValue); sok { second = FracValue(float64(secondInt)) } } switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { return IntValue(math.Pow(float64(cleanFirst), float64(cleanSecond))), nil } case FracValue: if cleanSecond, ok := second.(FracValue); ok { power := math.Pow( float64(cleanFirst), float64(cleanSecond), ) return FracValue(power), nil } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func notForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 1 { return nil, IncorrectNumberOfArgsError{ node: node, required: 1, given: len(args), } } first := args[0] if firstInt, ok := first.(IntValue); ok { if firstInt.Equal(zeroValue) { return trueValue, nil } else { return falseValue, nil } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func andForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { return cleanFirst & cleanSecond, nil } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func orForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { return cleanFirst | cleanSecond, nil } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func xorForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { return cleanFirst ^ cleanSecond, nil } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func greaterForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] if firstInt, fok := first.(IntValue); fok { if _, sok := second.(FracValue); sok { first = FracValue(float64(firstInt)) } } else if _, fok := first.(FracValue); fok { if secondInt, sok := second.(IntValue); sok { second = FracValue(float64(secondInt)) } } switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { if cleanFirst > cleanSecond { return trueValue, nil } else { return falseValue, nil } } case FracValue: if cleanSecond, ok := second.(FracValue); ok { if cleanFirst > cleanSecond { return trueValue, nil } else { return falseValue, nil } } case StringValue: if cleanSecond, ok := second.(StringValue); ok { cmp := strings.Compare(string(cleanFirst), string(cleanSecond)) if cmp == 1 { return trueValue, nil } else { return falseValue, nil } } } return nil, MismatchedArgumentsError{ node: node, args: args, } } func lessForm(fr *Frame, args []Value, node *astNode) (Value, InterpreterError) { if len(args) < 2 { return nil, IncorrectNumberOfArgsError{ node: node, required: 2, given: len(args), } } first, second := args[0], args[1] if firstInt, fok := first.(IntValue); fok { if _, sok := second.(FracValue); sok { first = FracValue(float64(firstInt)) } } else if _, fok := first.(FracValue); fok { if secondInt, sok := second.(IntValue); sok { second = FracValue(float64(secondInt)) } } switch cleanFirst := first.(type) { case IntValue: if cleanSecond, ok := second.(IntValue); ok { if cleanFirst < cleanSecond { return trueValue, nil } else { return falseValue, nil } } case FracValue: if cleanSecond, ok := second.(FracValue); ok { if cleanFirst < cleanSecond { return trueValue, nil } else { return falseValue, nil } } case StringValue: if cleanSecond, ok := second.(StringValue); ok { cmp := strings.Compare(string(cleanFirst), string(cleanSecond)) if cmp == -1 { return trueValue, nil } else { return falseValue, nil } } } return nil, MismatchedArgumentsError{ node: node, args: args, } }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package platform import ( "context" "chromiumos/tast/common/storage" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: StorageWearoutDetect, Desc: "Fails if storage device information indicates impending failure", Contacts: []string{ "puthik@chromium.org", // Autotest author "brooke.mylander@intel.com", // Migrated Autotest to Tast }, Attr: []string{"group:mainline", "informational"}, SoftwareDeps: []string{"storage_wearout_detect"}, }) } func StorageWearoutDetect(ctx context.Context, s *testing.State) { info, err := storage.Get(ctx) if err != nil { s.Fatal("Failed to get storage info: ", err) } if info.Status == storage.Failing { s.Error("Storage device is failing, consider removing from DUT farm") } }
package gopigtranslator import ( "strings" "unicode" ) func translateWord(word string) string { vowels := map[rune]bool{ 'a': true, 'e': true, 'i': true, 'o': true, 'u': true} prefix := make([]rune, len(word)) suffix := make([]rune, len(word)) postfix := "ay" if vowels[rune(word[0])] { postfix = "yay" } var prefixEnded bool for _, symbol := range word { if !prefixEnded && !vowels[unicode.ToLower(symbol)] { prefix = append(prefix, symbol) } else { prefixEnded = true suffix = append(suffix, symbol) } } return string(suffix) + string(prefix) + postfix } // Translate translates your english text to Pig Latin func Translate(englishText string) string { result := make([]string, 0) for _, word := range strings.Split(englishText, " ") { result = append(result, translateWord(word)) } return strings.Join(result, " ") }
// Package messages wraps an i18n library. // Provides the needed to get translated messages. package messages
package splunk type Generator struct { Build string `json:"build"` Version string `json:"version"` } type Paging struct { Total int64 `json:"total"` PerPage int64 `json:"perPage"` Offset int64 `json:"offset"` } type UpdateSearchConcurrencySettingsScheduleReq struct { // MaxSearchesPer The maximum number of searches the scheduler can run as a percentage of the maximum number of concurrent searches. Default: 50. MaxSearchesPer int // AutoSummaryPer The maximum number of concurrent searches to be allocated for auto summarization, as a percentage of the concurrent searches that the scheduler can run. Default: 50. AutoSummaryPer int }
package main import ( "bytes" "crypto/sha256" "encoding/json" "flag" "fmt" "io" "io/fs" "log" "os" "os/exec" "os/user" "path/filepath" "strconv" "strings" "time" ) type Config struct { User, Group string Prefix string ExecPrefix string Bindir string Libexecdir string Localstatedir string WithGoCapnp string WithWasmExecJs string TinyGo bool // Actual arguments passed to configure: Args []string } func getGid(name string) int { g, err := user.LookupGroup(name) chkfatal(err) id, err := strconv.Atoi(g.Gid) chkfatal(err) return id } func (c *Config) ParseFlags(args []string, name string, errorHandling flag.ErrorHandling) { c.Args = args[1:] fs := flag.NewFlagSet(name, errorHandling) fs.StringVar(&c.User, "user", "sandstorm", "the user to run as") fs.StringVar(&c.Group, "group", "sandstorm", "the group to run as") fs.StringVar(&c.Prefix, "prefix", "/usr/local", "install prefix") fs.StringVar(&c.ExecPrefix, "exec-prefix", "", "executable prefix (default ${PREFIX})") fs.StringVar(&c.Bindir, "bindir", "", "path for executables (default ${EXEC_PREFIX}/bin)") fs.StringVar(&c.Libexecdir, "libexecdir", "", `path for helper commands (default "${PREFIX}/libexec")`) fs.StringVar(&c.Localstatedir, "localstatedir", "", `path to store run-time data (default "${PREFIX}/var/lib")`) fs.StringVar(&c.WithGoCapnp, "with-go-capnp", "", "path to go-capnp source") fs.StringVar(&c.WithWasmExecJs, "with-wasm_exec.js", "", "path to wasm_exec.js") fs.BoolVar(&c.TinyGo, "use-tinygo", true, "Use tinygo for webassembly build") // currently unused, but permitted, for compatibility with gnu coding guidelines/autoconf. fs.String("sbindir", "", "unused") fs.String("sysconfdir", "", "unused") fs.String("sharedstatedir", "", "unused") fs.String("runstatedir", "", "unused") fs.String("libdir", "", "unused") fs.String("includedir", "", "unused") fs.String("oldincludedir", "", "unused") fs.String("datarootdir", "", "unused") fs.String("datadir", "", "unused") fs.String("infodir", "", "unused") fs.String("mandir", "", "unused") fs.String("docdir", "", "unused") fs.String("htmldir", "", "unused") fs.String("dvidir", "", "unused") fs.String("pdfdir", "", "unused") fs.String("psdir", "", "unused") fs.Parse(args[1:]) if c.ExecPrefix == "" { c.ExecPrefix = c.Prefix } if c.Bindir == "" { c.Bindir = c.ExecPrefix + "/bin" } if c.Libexecdir == "" { c.Libexecdir = c.Prefix + "/libexec" } if c.Localstatedir == "" { c.Localstatedir = c.Prefix + "/var/lib" } } func (c Config) GoSrc() string { return fmt.Sprintf(`package config const ( Prefix = %q Libexecdir = %q Localstatedir = %q ) `, c.Prefix, c.Libexecdir, c.Localstatedir, ) } func (c Config) CSrc() string { return fmt.Sprintf(` #pragma once #define PREFIX %q #define LIBEXECDIR %q #define LOCALSTATEDIR %q `, c.Prefix, c.Libexecdir, c.Localstatedir, ) } func chkfatal(err error) { if err != nil { log.Fatal(err) } } func withMyOuts(cmd *exec.Cmd) *exec.Cmd { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd } func runInDir(dir, bin string, args ...string) error { cmd := exec.Command(bin, args...) cmd.Dir = dir return withMyOuts(cmd).Run() } func installExe(cfg Config, exe, dir, caps string) { destDir := os.Getenv("DESTDIR") src, err := os.Open("./_build/" + exe) chkfatal(err) defer src.Close() dstPathDir := destDir + dir + "/" chkfatal(os.MkdirAll(dstPathDir, 0755)) dstPath := dstPathDir + exe dst, err := os.OpenFile(dstPath, os.O_CREATE|os.O_RDWR, 0750) chkfatal(err) defer dst.Close() _, err = io.Copy(dst, src) chkfatal(err) chkfatal(os.Chown(dstPath, 0, getGid(cfg.Group))) if caps != "" { chkfatal(withMyOuts(exec.Command("setcap", caps, dstPath)).Run()) } } func buildC() error { log.Println("Building C executable") return runInDir("c", "make") } func buildConfig(r *BuildRecord) { if r.IsModified("./config.json") { cfg := readConfig() files := []struct { path string content string }{ { path: "./internal/config/config.go", content: cfg.GoSrc(), }, { path: "./c/config.h", content: cfg.CSrc(), }, } for _, f := range files { chkfatal(os.WriteFile(f.path, []byte(f.content), 0600)) r.RecordFile(f.path) } } } func buildCapnp(r *BuildRecord) { log.Println("Compiling capnp schema") c := readConfig() dirs := []string{ "capnp", "internal/capnp", } for _, d := range dirs { files, err := filepath.Glob(d + "/*.capnp") chkfatal(err) for _, file := range files { dir := file[:len(file)-len(".capnp")] err := os.MkdirAll(dir, 0755) chkfatal(err) cmd := exec.Command("capnp", "compile", "-o-", "--src-prefix="+d+"/", "-I", c.WithGoCapnp+"/std", "-I", "capnp", file, ) cmd.Stderr = os.Stderr cgr, err := cmd.Output() chkfatal(err) cgrPath := file + ".cgr" oldSig, ok := r.Files[cgrPath] if !ok || oldSig.Stamp.Size != int64(len(cgr)) { hash := sha256.Sum256(cgr) if !bytes.Equal(hash[:], oldSig.Hash) { log.Printf("Generating go code for %q", file) chkfatal(os.WriteFile(cgrPath, cgr, 0644)) cmd := exec.Command("capnpc-go") cmd.Dir = dir cmd.Stdin, err = os.Open(cgrPath) chkfatal(err) chkfatal(withMyOuts(cmd).Run()) chkfatal(r.RecordFile(cgrPath)) } } } } } func findWasmExecJs(cfg Config) (string, error) { if cfg.WithWasmExecJs != "" { return cfg.WithWasmExecJs, nil } if cfg.TinyGo { // Try to find wasm_exec.js based on the location of tinygo; // e.g. if tinygo is at /usr/bin/tinygo, we look under // /usr/lib/tinygo and other similar directories. tinygoExe, err := exec.LookPath("tinygo") if err != nil { return "", fmt.Errorf("can't find tinygo executable: %w", err) } prefix := filepath.Dir(filepath.Dir(tinygoExe)) candidates := []string{"/lib", "/lib32", "/lib64", "/share"} suffix := "/tinygo/targets/wasm_exec.js" for _, c := range candidates { path := prefix + c + suffix if _, err := os.Stat(path); err == nil { return path, nil } } return "", fmt.Errorf("failed to find wasm_exec.js") } // Regular go toolchain cmd := exec.Command("go", "env", "GOROOT") cmd.Env = append(cmd.Env, os.Environ()...) goroot, err := cmd.Output() if err != nil { return "", fmt.Errorf("could not determine GOROOT: %v", err) } path := strings.TrimSpace(string(goroot)) + "/misc/wasm/wasm_exec.js" if _, err := os.Stat(path); err != nil { return "", fmt.Errorf("could not stat %q: %v", path, err) } return path, nil } func buildWebui(r *BuildRecord, cfg Config) error { const ( tmpPath = "_build/webui.wasm" finalPath = "internal/server/embed/webui.wasm" srcDir = "./cmd/webui" ) wasmExecSrc, err := findWasmExecJs(cfg) if err != nil { return err } // Build the webassembly binary: log.Println("Building wasm binary") if cfg.TinyGo { err := runInDir(".", "tinygo", "build", "-target", "wasm", "-panic", "trap", "-no-debug", "-o="+tmpPath, srcDir) if err != nil { return err } } else { // Use the standard go toolchain. cmd := exec.Command("go", "build", "-o", tmpPath, srcDir) cmd.Env = append(cmd.Env, os.Environ()...) cmd.Env = append(cmd.Env, "GOOS=js", "GOARCH=wasm") err := withMyOuts(cmd).Run() if err != nil { return err } } if !r.IsModified(tmpPath) { return nil } chkfatal(r.RecordFile(tmpPath)) runInDir(".", "du", "-hs", tmpPath) chkfatal(copyFile(finalPath, tmpPath)) return copyFile("internal/server/embed/wasm_exec.js", wasmExecSrc) } func copyFile(dest, src string) error { in, err := os.Open(src) if err != nil { return err } defer in.Close() out, err := os.Create(dest) if err != nil { return err } defer out.Close() _, err = io.Copy(out, in) return err } func buildGo(r *BuildRecord) error { buildCapnp(r) err := buildWebui(r, readConfig()) if err != nil { return err } exes := []struct { name string static bool }{ {"sandstorm-import-tool", false}, {"tempest", false}, {"tempest-make-user", false}, {"tempest-grain-agent", true}, {"test-app", true}, } for _, exe := range exes { err = compileGoExe(exe.name, exe.static) if err != nil { return err } } return nil } func compileGoExe(name string, static bool) error { log.Printf("Compiling go executable %q (static = %v)", name, static) cmd := exec.Command("go", "build", "-v", "-o", "_build/"+name, "./cmd/"+name) cmd.Env = append(cmd.Env, os.Environ()...) if static { cmd.Env = append(cmd.Env, "CGO_ENABLED=0") } else { cmd.Env = append(cmd.Env, "CGO_ENABLED=1") } return withMyOuts(cmd).Run() } func buildTestSpk() error { return runInDir("cmd/test-app", "spk", "pack", "--keyring", "./sandstorm-keyring", "../../_build/test-app.spk", ) } // Run configure if its outputs aren't already present. func maybeConfigure() { _, errJson := os.Stat("./config.json") if errJson == nil { // Config is already present; we're done. return } log.Println("'configure' has not been run; running with default options.") run("configure") } func run(args ...string) { switch args[0] { case "build": maybeConfigure() chkfatal(os.MkdirAll("_build", 0755)) r := GetBuildRecord() buildConfig(r) chkfatal(buildC()) chkfatal(buildGo(r)) r.Save() case "test-app": run("build") chkfatal(buildTestSpk()) case "export-import": maybeConfigure() run("build") exportImport(readConfig()) case "configure": cfg := &Config{} cfg.ParseFlags(args, "configure", flag.ExitOnError) jsonData, err := json.MarshalIndent(cfg, "", " ") chkfatal(err) chkfatal(os.WriteFile( "./config.json", jsonData, 0600)) case "install": run("build") c := readConfig() installExe(c, "tempest", c.Bindir, "cap_net_bind_service+ep") installExe(c, "tempest-sandbox-launcher", c.Libexecdir+"/tempest", "cap_sys_admin,cap_net_admin,cap_mknod+ep") installExe(c, "tempest-grain-agent", c.Libexecdir+"/tempest", "") chkfatal(os.MkdirAll(c.Localstatedir+"/sandstorm/mnt", 0755)) case "dev": run("install") c := readConfig() cmd := withMyOuts( exec.Command("sudo", "--preserve-env", "-u", c.User, "-g", c.Group, c.Bindir+"/tempest", ), ) cmd.Env = os.Environ() chkfatal(cmd.Run()) default: fmt.Fprintln(os.Stderr, "Unknown command:", args[0]) os.Exit(1) } } func readConfig() Config { var c Config data, err := os.ReadFile("config.json") chkfatal(err) chkfatal(json.Unmarshal(data, &c)) return c } func main() { if len(os.Args) < 2 { run("build") } else { run(os.Args[1:]...) } } const buildRecordPath = "_build/build_record.json" func GetBuildRecord() *BuildRecord { empty := &BuildRecord{ Files: make(map[string]FileSig), } data, err := os.ReadFile(buildRecordPath) if err != nil { return empty } var ret BuildRecord err = json.Unmarshal(data, &ret) if err != nil { return empty } return &ret } type BuildRecord struct { Files map[string]FileSig } func (r *BuildRecord) Save() { data, err := json.Marshal(r) chkfatal(err) chkfatal(os.WriteFile(buildRecordPath, data, 0644)) } func (r *BuildRecord) IsModified(path string) bool { stamp, err := StampFile(path) if err != nil { return true } sig, ok := r.Files[path] if !ok { return true } if stamp == sig.Stamp { return false } hash, err := HashFile(path) if err != nil { return true } return !bytes.Equal(hash, sig.Hash) } func (r *BuildRecord) RecordFile(path string) error { stamp, err := StampFile(path) if err != nil { return err } hash, err := HashFile(path) if err != nil { return err } r.Files[path] = FileSig{ Stamp: stamp, Hash: hash, } return nil } func HashFile(path string) ([]byte, error) { f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() h := sha256.New() _, err = io.Copy(h, f) if err != nil { return nil, err } return h.Sum(nil), nil } func StampFile(path string) (FileStamp, error) { fi, err := os.Lstat(path) if err != nil { return FileStamp{}, err } return FileStamp{ Size: fi.Size(), Mode: fi.Mode(), ModTime: fi.ModTime(), }, nil } type FileStamp struct { Size int64 Mode fs.FileMode ModTime time.Time } type FileSig struct { Stamp FileStamp Hash []byte } func exportImport(cfg Config) { dbPath := cfg.Localstatedir + "/sandstorm/sandstorm.sqlite3" chkfatal(runInDir(".", "_build/sandstorm-import-tool", "export")) chkfatal(os.Remove(dbPath)) chkfatal(runInDir(".", "_build/sandstorm-import-tool", "import")) chkfatal(runInDir(".", "chown", cfg.User+":"+cfg.Group, dbPath)) }
package utils import "testing" func TestAdd(t *testing.T) { expected := 10 actual := Add(1,2,3,4) if actual != expected { t.Errorf("no iguales se espera %d pero se obtiene %d", expected, actual) } }
package rooms import ( "log" "strings" "github.com/Iteam1337/go-udp-wejay/room" "github.com/Iteam1337/go-udp-wejay/users" ) type Rooms struct { rooms map[string]*room.Room } func (r *Rooms) Get(id string) (room *room.Room) { if res, ok := r.rooms[id]; ok { room = res } return } func (r *Rooms) Restore() { for _, user := range users.GetAll() { id := user.Room userID := user.ID if id == "" { continue } if existingRoom, ok := r.rooms[id]; ok { existingRoom.Add(userID) } else { newRoom := room.New(id, userID) r.rooms[id] = &newRoom } } } func (r *Rooms) Add(userID string, id string) (out *room.Room, ok bool) { user, _ := users.GetUser(userID) ex := rooms.Get(user.Room) if ex != nil { if id, empty := ex.Evict(userID); empty { delete(r.rooms, id) } } if existingRoom, ok := r.rooms[id]; ok { if existingRoom.Size() < 30 { existingRoom.Add(userID) out = existingRoom } } else { newRoom := room.New(id, userID) r.rooms[id] = &newRoom out = &newRoom } if out.Size() > 0 { ok = true } return } func (r *Rooms) Exists(id string) bool { if _, ok := r.rooms[id]; ok { return true } return false } func (r *Rooms) Delete(id string) { delete(r.rooms, id) } func (r *Rooms) Evict(userID string) (ok bool) { user, err := users.GetUser(userID) if err != nil { log.Printf(`(%s) Evict: %s`, userID, err) return } ok = true userRoom := Get(user.Room) if userRoom != nil { id, empty := userRoom.Evict(userID) if empty { Delete(id) } } return } func (r *Rooms) InRoom(id string, userID string) (isInRoom bool) { if res, ok := r.rooms[id]; ok { isInRoom = res.Includes(userID) } return } type QueryResult struct { Name string Size int } func (r *Rooms) Available(filter string) (result []QueryResult) { for key, res := range r.rooms { if filter == "" || strings.Contains(key, filter) { result = append(result, QueryResult{ Name: key, Size: res.Size(), }) } } return } var ( rooms = Rooms{rooms: make(map[string]*room.Room)} Get = rooms.Get Add = rooms.Add Exists = rooms.Exists Delete = rooms.Delete Evict = rooms.Evict Available = rooms.Available Restore = rooms.Restore )
package server import ( "IMServer/internal/server/conf" "IMServer/internal/server/model" "fmt" "io" "net" "reflect" "time" "go.uber.org/zap" "gogit.oa.com/March/gopkg/metric" "gogit.oa.com/March/gopkg/util" "gogit.oa.com/March/gopkg/protocol/bypack" ) type TCPPackage struct { conn net.Conn reader *bypack.Reader } type TCPServer struct { lis net.Listener pc chan TCPPackage done chan struct{} } func (s *TCPServer) ListenAndServer() { conf.L.Info("tcp listen and server") go s.Proc() go model.Heartbeat(s.done) for { conn, err := s.lis.Accept() if err != nil { conf.L.Warn(err.Error()) return } model.AddConn(conn) go func() { s.handleConn(conn) model.DelConn(conn) }() } } func (s *TCPServer) handleConn(conn net.Conn) { conf.L.Info("handle conn", zap.String("conn", conn.RemoteAddr().String())) defer conn.Close() if err := conn.SetDeadline(time.Now().Add(60 * time.Second)); err != nil { conf.L.Warn(err.Error()) return } for { var hb = make([]byte, bypack.HeaderSize) n, err := io.ReadFull(conn, hb) if err != nil { return } header, err := bypack.NewHeader(hb[:n]) if err != nil { conf.L.Warn(err.Error()) return } buff := make([]byte, header.GetSize()) _, err = io.ReadFull(conn, buff) if err != nil { conf.L.Error(err.Error()) return } reader := bypack.NewReader(header.GetCmd(), buff) reader.RawBuffer = append(hb, buff...) select { case s.pc <- TCPPackage{ conn: conn, reader: reader, }: if err = conn.SetDeadline(time.Now().Add(60 * time.Second)); err != nil { conf.L.Warn(err.Error()) return } case <-time.After(30 * time.Second): conf.L.Sugar().Errorf("conn %s channel full!!!", conn.RemoteAddr().String()) } } } func (s *TCPServer) Proc() { for { select { case p := <-s.pc: go s.Transport(p) case <-s.done: conf.L.Info("tcp server done") return } } } func (s *TCPServer) Stop() { conf.L.Info("tcp server stop") _ = s.lis.Close() close(s.done) } func (s *TCPServer) Transport(p TCPPackage) { defer func() { if err := recover(); err != nil { conf.L.Error(util.CatchPanic(err).Error()) } }() model.AddConn(p.conn) worker := NewWorkerWithConn(p.conn, p.reader) method := fmt.Sprintf("TCP0x%x", p.reader.GetCmd()) v := reflect.ValueOf(worker).MethodByName(method) if v.String() == "<invalid Value>" { conf.L.Sugar().Warnf("worker not found method %s", method) return } if p.reader.GetCmd() != 0x2 { conf.L.Debug(method, zap.String("conn", p.conn.RemoteAddr().String())) } reporter := metric.NewReporter(method) res := v.Call(nil) if len(res) > 0 { code, ok := res[0].Interface().(metric.Code) if !ok { return } reporter.HandledWithCode(code) } } func NewTCPServer(addr string) *TCPServer { lis, err := net.Listen("tcp", addr) util.MustNil(err) return &TCPServer{ lis: lis, pc: make(chan TCPPackage, 1024), done: make(chan struct{}, 1), } }
package main import ( // Standard library packages "fmt" "log" "net" "net/http" "strconv" // Third party packages "github.com/julienschmidt/httprouter" "github.com/skratchdot/open-golang/open" ) // https://blog.golang.org/context/userip/userip.go func getIP(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { fmt.Fprintf(w, "<h1>static file server</h1><p><a href='./static'>folder</p></a>") ip, port, err := net.SplitHostPort(req.RemoteAddr) if err != nil { //return nil, fmt.Errorf("userip: %q is not IP:port", req.RemoteAddr) fmt.Fprintf(w, "userip: %q is not IP:port", req.RemoteAddr) } userIP := net.ParseIP(ip) if userIP == nil { //return nil, fmt.Errorf("userip: %q is not IP:port", req.RemoteAddr) fmt.Fprintf(w, "userip: %q is not IP:port", req.RemoteAddr) return } // This will only be defined when site is accessed via non-anonymous proxy // and takes precedence over RemoteAddr // Header.Get is case-insensitive forward := req.Header.Get("X-Forwarded-For") fmt.Fprintf(w, "<p>IP: %s</p>", ip) fmt.Fprintf(w, "<p>Port: %s</p>", port) fmt.Fprintf(w, "<p>Forwarded for: %s</p>", forward) } func main() { myport := strconv.Itoa(10002) // Instantiate a new router r := httprouter.New() r.GET("/ip", getIP) // Add a handler on /test r.GET("/test", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { // Simply write some test data for now fmt.Fprint(w, "Welcome!\n") }) l, err := net.Listen("tcp", "localhost:"+myport) if err != nil { log.Fatal(err) } // The browser can connect now because the listening socket is open. //err = open.Start("http://localhost:"+ myport + "/test") err = open.Start("http://localhost:" + myport + "/ip") if err != nil { log.Println(err) } // Start the blocking server loop. log.Fatal(http.Serve(l, r)) }
package models // Copyright 2016-2017 MediaMath // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import ( "github.com/MediaMath/go-t1/time" ) // User represents a user object type User struct { AccessInternalFees bool `json:"access_internal_fees"` Active bool `json:"active"` CreatedOn t1time.T1Time `json:"created_on"` CreatorID int `json:"creator_id"` EditCampaigns bool `json:"edit_campaigns"` EditDataDefinition bool `json:"edit_data_definition"` EditMarginsAndPerformance bool `json:"edit_margins_and_performance"` Email string `json:"email,omitempty,readonly"` Fax string `json:"fax"` FirstName string `json:"first_name"` ID int `json:"id,omitempty,readonly"` LabsEnableRMX bool `json:"labs_enable_rmx"` LastLoginOn t1time.T1Time `json:"last_login_on"` LastName string `json:"last_name"` LinkLDAP bool `json:"link_ldap"` Mobile string `json:"mobile"` Name string `json:"name"` Password string `json:"password,omitempty"` PasswordResetSent t1time.T1Time `json:"password_reset_sent"` PasswordResetToken string `json:"password_reset_token"` Phone string `json:"phone"` Role string `json:"role"` Scope string `json:"scope"` SSOAuthSent t1time.T1Time `json:"sso_auth_sent"` SSOAuthToken string `json:"sso_auth_token"` Title string `json:"title"` Type string `json:"type"` UpdatedOn t1time.T1Time `json:"updated_on"` Username string `json:"username"` Version int `json:"version"` ViewDataDefinition bool `json:"view_data_definition"` ViewDMPReports bool `json:"view_dmp_reports"` ViewOrganizations bool `json:"view_organizations"` ViewSegments bool `json:"view_segments"` EntityType string `json:"entity_type"` }
package main import ( "fmt" "github.com/tallongsun/testmod/hi" ) func main() { fmt.Println(hi.Hi("x")) }
package main import ( "fmt" "math/rand" "time" ) func random(min, max int) int { return rand.Intn(max-min) + min } func main() { rand.Seed(time.Now().UnixNano()) for i := 0; i < 10; i++ { randomNum := random(1, 6) fmt.Printf("Play %d - Random number : %d\n", i, randomNum) } }
package main import ( "IMServer/internal/server" "IMServer/internal/server/conf" "flag" "fmt" "math/rand" "os" "os/signal" "syscall" "time" "go.uber.org/zap" ) var ( BuildTime = "unset" Commit = "unset" Release = "unset" ) var ( ConfFile string Version string ) func init() { rand.Seed(time.Now().Unix()) flag.StringVar(&ConfFile, "conf", "app.toml", "conf file") flag.StringVar(&Version, "v", "0.0.0.1", "version") // 为了命令行启动参数 conf.Version = fmt.Sprintf("release:%s commit:%s build:%s", Release, Commit, BuildTime) } func main() { flag.Parse() fmt.Println("im server go", conf.Version) conf.Init(ConfFile) done := make(chan struct{}, 1) signs := make(chan os.Signal, 1) signal.Notify(signs, syscall.SIGINT, syscall.SIGTERM) go func() { sign := <-signs conf.L.Info("catch os signal", zap.String("value", sign.String())) server.Close() close(done) }() go server.Run() <-done conf.L.Info("exiting...") }
package id_035 func twoSum(nums []int, target int) []int { var a []int = make([]int, 2) for i := 0; i < len(nums) - 1; i++ { for j := i + 1; j < len(nums); j++ { if nums[i] + nums[j] == target { a[0] = i a[1] = j } } } return a }
package main //basic mutex - locks for both reading and writing //RWmutex - more flexible allows for reading, but locks for writing import ( "fmt" "runtime" "sync" ) func main() { fmt.Println("CPUs:", runtime.NumCPU()) fmt.Println("GO routines:", runtime.NumGoroutine()) counter := 0 const gs = 100 var wg sync.WaitGroup wg.Add(gs) var mu sync.Mutex for i := 0; i < gs; i++ { go func() { mu.Lock() v := counter //time.Sleep(time.Second) runtime.Gosched() v++ counter = v mu.Unlock() wg.Done() }() fmt.Println("GO routines:", runtime.NumGoroutine()) } wg.Wait() fmt.Println("GO routines:", runtime.NumGoroutine()) fmt.Println("Counter value:", counter) } // END main //Hands-on exercise #4 //Fix the race condition you created in the previous exercise by using a mutex //it makes sense to remove runtime.Gosched() //code: https://github.com/GoesToEleven/go-programming //video: 151 //SOLUTION: //package main //import ( //"fmt" //"sync" //) //func main() { // var wg sync.WaitGroup // incrementer := 0 // gs := 100 // wg.Add(gs) // var m sync.Mutex // for i := 0; i < gs; i++ { // go func() { // m.Lock() // v := incrementer // v++ // incrementer = v // fmt.Println(incrementer) // m.Unlock() // wg.Done() // }() // } // wg.Wait() // fmt.Println("end value:", incrementer) //}
package helpers import ( "bytes" "fmt" "strings" "unicode" ) func isAlphabetical(c rune) bool { return isLower(c) || isUpper(c) } func isUpper(c rune) bool { return 'A' <= c && 'Z' >= c } func isLower(c rune) bool { return 'a' <= c && 'z' >= c } func isNumeric(c rune) bool { return '0' <= c && '9' >= c } func LowerSnakeCase(s string) string { return strings.ToLower(SnakeCase(s)) } func UpperSnakeCase(s string) string { return strings.ToUpper(SnakeCase(s)) } func SnakeCase(s string) string { return Chop(s, '_') } func Chop(s string, d rune) string { var b bytes.Buffer for i, c := range s { switch { case isUpper(c): if i > 0 { p := rune(s[i-1]) if isLower(p) { b.WriteRune(d) } } b.WriteRune(c) case !(isAlphabetical(c) || isNumeric(c)): b.WriteRune(d) default: b.WriteRune(c) } } return b.String() } type convert func(rune) rune func LowerCamelCase(s string) string { return CamelCase(s, unicode.ToLower) } func UpperCamelCase(s string) string { return CamelCase(s, unicode.ToUpper) } func CamelCase(s string, fn convert) string { var b bytes.Buffer first := true apply := false for i := 0; i < len(s); i++ { c := rune(s[i]) switch { case !(isAlphabetical(c) || isNumeric(c)): apply = true case first: first = false apply = false b.WriteRune(fn(c)) case apply: apply = false b.WriteRune(unicode.ToUpper(c)) default: apply = false b.WriteRune(c) } } return b.String() } // SpaceToUpperCamelCase is deprecated func SpaceToUpperCamelCase(s string) string { fmt.Printf("Warning: helpers.SpaceToUpperCamelCase is deprecated\n") if s == "" { return "" } buf := bytes.Buffer{} for _, p := range rspace.Split(s, -1) { buf.WriteString(strings.ToUpper(p[:1])) buf.WriteString(p[1:]) } return buf.String() } // SnakeToUpperCamelCase is deprecated func SnakeToUpperCamelCase(s string) string { fmt.Printf("Warning: helpers.SnakeToUpperCamelCase is deprecated\n") if s == "" { return "" } buf := bytes.Buffer{} for _, p := range rsnake.Split(s, -1) { buf.WriteString(strings.ToUpper(p[:1])) buf.WriteString(p[1:]) } return buf.String() } // SnakeToLowerCamelCase is deprecated func SnakeToLowerCamelCase(s string) string { fmt.Printf("Warning: helpers.SnakeToLowerCamelCase is deprecated\n") if s == "" { return "" } buf := bytes.Buffer{} for i, p := range rsnake.Split(s, -1) { if i == 0 { buf.WriteString(p) continue } buf.WriteString(strings.ToUpper(p[:1])) buf.WriteString(p[1:]) } return buf.String() } // ToUpperFirst is deprecated func ToUpperFirst(s string) string { fmt.Printf("Warning: helpers.ToUpperFirst is deprecated\n") buf := bytes.Buffer{} buf.WriteString(strings.ToUpper(s[:1])) buf.WriteString(s[1:]) return buf.String() } // ToLowerFirst is deprecated func ToLowerFirst(s string) string { fmt.Printf("Warning: helpers.ToLowerFirst is deprecated\n") buf := bytes.Buffer{} buf.WriteString(strings.ToLower(s[:1])) buf.WriteString(s[1:]) return buf.String() }
package controllers import ( "github.com/astaxie/beego" "intra-hub/db" "intra-hub/models" ) type CalendarController struct { BaseController } func (c *CalendarController) Add() { c.RequireManager() c.EnableRender = false calendar := &models.Calendar{} if err := c.ParseForm(calendar); err != nil { beego.Error(err) c.SetErrorAndRedirect(err) return } beego.Warn(c.Input()) beego.Warn(calendar) if err := db.AddCalendar(calendar); err != nil { beego.Error(err) c.SetErrorAndRedirect(err) return } c.Redirect("/admin#calendars", 301) } func (c *CalendarController) Delete() { c.RequireManager() c.EnableRender = false id, _ := c.GetInt(":id", 0) if err := db.DeleteCalendar(id); err != nil { beego.Error(err) c.SetErrorAndRedirect(err) return } c.Redirect("/admin#calendars", 302) }
package main import ( "fmt" "io/ioutil" "net/http" "time" "github.com/pkg/errors" "github.com/tidwall/gjson" "github.com/dailymotion/code-review-for-interviews/model" ) func loadCurrencies(currencyCodes []string) ([]model.Currency, error) { currencies := make([]model.Currency, len(currencyCodes)) for i, currencyCode := range currencyCodes { currency, err := loadCurrency(currencyCode) if err != nil { return nil, err } currencies[i] = *currency } return currencies, nil } func loadCurrency(currencyCode string) (*model.Currency, error) { query := fmt.Sprintf("%s_USD", currencyCode) url := fmt.Sprintf("http://free.currencyconverterapi.com/api/v3/convert?q=%s&compact=ultra", query) resp, err := http.Get(url) if err != nil { return nil, errors.Wrapf(err, "could not query currencyconverterapi.com for currency %s", currencyCode) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, errors.Errorf("invalid http response status code for currency %s: %d (%s)", currencyCode, resp.StatusCode, resp.Status) } json, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, errors.Wrapf(err, "could not parse currencyconverterapi.com response for currency %s", currencyCode) } currency := model.Currency{ Name: currencyCode, PriceInDollars: gjson.ParseBytes(json).Get(query).Float(), Date: time.Now(), } return &currency, nil }
package model type Param struct { SchoolId int64 `form:"schoolId" binding:"required"` ExamId int64 `form:"examId" binding:"required"` }
package controllers import ( "fmt" "net/http" ) func ByeController(w http.ResponseWriter, req *http.Request) { fmt.Fprintf(w, "Bye!") }
// Copyright 2019 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gofer import ( "fmt" "math" "strings" "sync" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/atomicbitops" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/errors/linuxerr" "gvisor.dev/gvisor/pkg/fspath" "gvisor.dev/gvisor/pkg/sentry/fsimpl/host" "gvisor.dev/gvisor/pkg/sentry/fsmetric" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/kernel/pipe" "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" "gvisor.dev/gvisor/pkg/sentry/vfs" ) // Sync implements vfs.FilesystemImpl.Sync. func (fs *filesystem) Sync(ctx context.Context) error { // Snapshot current syncable dentries and special file FDs. fs.syncMu.Lock() ds := make([]*dentry, 0, fs.syncableDentries.Len()) for elem := fs.syncableDentries.Front(); elem != nil; elem = elem.Next() { ds = append(ds, elem.d) } sffds := make([]*specialFileFD, 0, fs.specialFileFDs.Len()) for sffd := fs.specialFileFDs.Front(); sffd != nil; sffd = sffd.Next() { sffds = append(sffds, sffd) } fs.syncMu.Unlock() // Return the first error we encounter, but sync everything we can // regardless. var retErr error // Note that lisafs is capable of batching FSync RPCs. However, we can not // batch all the FDIDs to be synced from ds and sffds. Because the error // handling varies based on file type. FSync errors are only considered for // regular file FDIDs that were opened for writing. We could do individual // RPCs for such FDIDs and batch the rest, but it increases code complexity // substantially. We could implement it in the future if need be. // Sync syncable dentries. for _, d := range ds { if err := d.syncCachedFile(ctx, true /* forFilesystemSync */); err != nil { ctx.Infof("gofer.filesystem.Sync: dentry.syncCachedFile failed: %v", err) if retErr == nil { retErr = err } } } // Sync special files, which may be writable but do not use dentry shared // handles (so they won't be synced by the above). for _, sffd := range sffds { if err := sffd.sync(ctx, true /* forFilesystemSync */); err != nil { ctx.Infof("gofer.filesystem.Sync: specialFileFD.sync failed: %v", err) if retErr == nil { retErr = err } } } return retErr } // MaxFilenameLen is the maximum length of a filename. This is dictated by 9P's // encoding of strings, which uses 2 bytes for the length prefix. const MaxFilenameLen = (1 << 16) - 1 // dentrySlicePool is a pool of *[]*dentry used to store dentries for which // dentry.checkCachingLocked() must be called. The pool holds pointers to // slices because Go lacks generics, so sync.Pool operates on any, so // every call to (what should be) sync.Pool<[]*dentry>.Put() allocates a copy // of the slice header on the heap. var dentrySlicePool = sync.Pool{ New: func() any { ds := make([]*dentry, 0, 4) // arbitrary non-zero initial capacity return &ds }, } func appendDentry(ds *[]*dentry, d *dentry) *[]*dentry { if ds == nil { ds = dentrySlicePool.Get().(*[]*dentry) } *ds = append(*ds, d) return ds } // Precondition: !parent.isSynthetic() && !child.isSynthetic(). func appendNewChildDentry(ds **[]*dentry, parent *dentry, child *dentry) { // The new child was added to parent and took a ref on the parent (hence // parent can be removed from cache). A new child has 0 refs for now. So // checkCachingLocked() should be called on both. Call it first on the parent // as it may create space in the cache for child to be inserted - hence // avoiding a cache eviction. *ds = appendDentry(*ds, parent) *ds = appendDentry(*ds, child) } // Preconditions: ds != nil. func putDentrySlice(ds *[]*dentry) { // Allow dentries to be GC'd. for i := range *ds { (*ds)[i] = nil } *ds = (*ds)[:0] dentrySlicePool.Put(ds) } // renameMuRUnlockAndCheckCaching calls fs.renameMu.RUnlock(), then calls // dentry.checkCachingLocked on all dentries in *dsp with fs.renameMu locked // for writing. // // dsp is a pointer-to-pointer since defer evaluates its arguments immediately, // but dentry slices are allocated lazily, and it's much easier to say "defer // fs.renameMuRUnlockAndCheckCaching(&ds)" than "defer func() { // fs.renameMuRUnlockAndCheckCaching(ds) }()" to work around this. // +checklocksreleaseread:fs.renameMu func (fs *filesystem) renameMuRUnlockAndCheckCaching(ctx context.Context, dsp **[]*dentry) { fs.renameMu.RUnlock() if *dsp == nil { return } ds := **dsp for _, d := range ds { d.checkCachingLocked(ctx, false /* renameMuWriteLocked */) } putDentrySlice(*dsp) } // +checklocksrelease:fs.renameMu func (fs *filesystem) renameMuUnlockAndCheckCaching(ctx context.Context, ds **[]*dentry) { if *ds == nil { fs.renameMu.Unlock() return } for _, d := range **ds { d.checkCachingLocked(ctx, true /* renameMuWriteLocked */) } fs.renameMu.Unlock() putDentrySlice(*ds) } // stepLocked resolves rp.Component() to an existing file, starting from the // given directory. // // Dentries which may become cached as a result of the traversal are appended // to *ds. // // Preconditions: // - fs.renameMu must be locked. // - d.opMu must be locked for reading. // - !rp.Done(). // - If !d.cachedMetadataAuthoritative(), then d and all children that are // part of rp must have been revalidated. // // +checklocksread:d.opMu func (fs *filesystem) stepLocked(ctx context.Context, rp resolvingPath, d *dentry, mayFollowSymlinks bool, ds **[]*dentry) (*dentry, bool, error) { if !d.isDir() { return nil, false, linuxerr.ENOTDIR } if err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil { return nil, false, err } name := rp.Component() if name == "." { rp.Advance() return d, false, nil } if name == ".." { if isRoot, err := rp.CheckRoot(ctx, &d.vfsd); err != nil { return nil, false, err } else if isRoot || d.parent == nil { rp.Advance() return d, false, nil } if err := rp.CheckMount(ctx, &d.parent.vfsd); err != nil { return nil, false, err } rp.Advance() return d.parent, false, nil } child, err := fs.getChildAndWalkPathLocked(ctx, d, rp, ds) if err != nil { return nil, false, err } if err := rp.CheckMount(ctx, &child.vfsd); err != nil { return nil, false, err } if child.isSymlink() && mayFollowSymlinks && rp.ShouldFollowSymlink() { target, err := child.readlink(ctx, rp.Mount()) if err != nil { return nil, false, err } followedSymlink, err := rp.HandleSymlink(target) return d, followedSymlink, err } rp.Advance() return child, false, nil } // getChildLocked returns a dentry representing the child of parent with the // given name. Returns ENOENT if the child doesn't exist. // // Preconditions: // - fs.renameMu must be locked. // - parent.opMu must be locked. // - parent.isDir(). // - name is not "." or "..". // - parent and the dentry at name have been revalidated. // // +checklocks:parent.opMu func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name string, ds **[]*dentry) (*dentry, error) { if child, err := parent.getCachedChildLocked(name); child != nil || err != nil { return child, err } // We don't need to check for race here because parent.opMu is held for // writing. return fs.getRemoteChildLocked(ctx, parent, name, false /* checkForRace */, ds) } // getRemoteChildLocked is similar to getChildLocked, with the additional // precondition that the child identified by name does not exist in cache. // // If checkForRace argument is true, then this method will check to see if the // call has raced with another getRemoteChild call, and will handle the race if // so. // // Preconditions: // - If checkForRace is false, then parent.opMu must be held for writing. // - Otherwise, parent.opMu must be held for reading. // // Postcondition: The returned dentry is already cached appropriately. // // +checklocksread:parent.opMu func (fs *filesystem) getRemoteChildLocked(ctx context.Context, parent *dentry, name string, checkForRace bool, ds **[]*dentry) (*dentry, error) { child, err := parent.getRemoteChild(ctx, name) // Cache the result appropriately in the dentry tree. if err != nil { if linuxerr.Equals(linuxerr.ENOENT, err) { parent.childrenMu.Lock() defer parent.childrenMu.Unlock() parent.cacheNegativeLookupLocked(name) } return nil, err } parent.childrenMu.Lock() defer parent.childrenMu.Unlock() if checkForRace { // See if we raced with anoter getRemoteChild call that added // to the cache. if cachedChild, ok := parent.children[name]; ok && cachedChild != nil { // We raced. Destroy our child and return the cached // one. This child has no handles, no data, and has not // been cached, so destruction is quick and painless. child.destroyDisconnected(ctx) // All good. Return the cached child. return cachedChild, nil } // No race, continue with the child we got. } parent.cacheNewChildLocked(child, name) appendNewChildDentry(ds, parent, child) return child, nil } // getChildAndWalkPathLocked is the same as getChildLocked, except that it // may prefetch the entire path represented by rp. // // +checklocksread:parent.opMu func (fs *filesystem) getChildAndWalkPathLocked(ctx context.Context, parent *dentry, rp resolvingPath, ds **[]*dentry) (*dentry, error) { if child, err := parent.getCachedChildLocked(rp.Component()); child != nil || err != nil { return child, err } // dentry.getRemoteChildAndWalkPathLocked already handles dentry caching. return parent.getRemoteChildAndWalkPathLocked(ctx, rp, ds) } // getCachedChildLocked returns a child dentry if it was cached earlier. If no // cached child dentry exists, (nil, nil) is returned. // // Preconditions: // - fs.renameMu must be locked. // - d.opMu must be locked for reading. // - d.isDir(). // - name is not "." or "..". // - d and the dentry at name have been revalidated. // // +checklocksread:d.opMu func (d *dentry) getCachedChildLocked(name string) (*dentry, error) { if len(name) > MaxFilenameLen { return nil, linuxerr.ENAMETOOLONG } d.childrenMu.Lock() defer d.childrenMu.Unlock() if child, ok := d.children[name]; ok || d.isSynthetic() { if child == nil { return nil, linuxerr.ENOENT } return child, nil } if d.childrenSet != nil { // Is the child even there? Don't make RPC if not. if _, ok := d.childrenSet[name]; !ok { return nil, linuxerr.ENOENT } } return nil, nil } // walkParentDirLocked resolves all but the last path component of rp to an // existing directory, starting from the given directory (which is usually // rp.Start().Impl().(*dentry)). It does not check that the returned directory // is searchable by the provider of rp. // // Preconditions: // - fs.renameMu must be locked. // - !rp.Done(). // - If !d.cachedMetadataAuthoritative(), then d's cached metadata must be up // to date. func (fs *filesystem) walkParentDirLocked(ctx context.Context, vfsRP *vfs.ResolvingPath, d *dentry, ds **[]*dentry) (*dentry, error) { rp := resolvingPathParent(vfsRP) if err := fs.revalidatePath(ctx, rp, d, ds); err != nil { return nil, err } for !rp.done() { d.opMu.RLock() next, followedSymlink, err := fs.stepLocked(ctx, rp, d, true /* mayFollowSymlinks */, ds) d.opMu.RUnlock() if err != nil { return nil, err } d = next if followedSymlink { if err := fs.revalidatePath(ctx, rp, d, ds); err != nil { return nil, err } } } if !d.isDir() { return nil, linuxerr.ENOTDIR } return d, nil } // resolveLocked resolves rp to an existing file. // // Preconditions: fs.renameMu must be locked. func (fs *filesystem) resolveLocked(ctx context.Context, vfsRP *vfs.ResolvingPath, ds **[]*dentry) (*dentry, error) { rp := resolvingPathFull(vfsRP) d := rp.Start().Impl().(*dentry) if err := fs.revalidatePath(ctx, rp, d, ds); err != nil { return nil, err } for !rp.done() { d.opMu.RLock() next, followedSymlink, err := fs.stepLocked(ctx, rp, d, true /* mayFollowSymlinks */, ds) d.opMu.RUnlock() if err != nil { return nil, err } d = next if followedSymlink { if err := fs.revalidatePath(ctx, rp, d, ds); err != nil { return nil, err } } } if rp.MustBeDir() && !d.isDir() { return nil, linuxerr.ENOTDIR } return d, nil } // doCreateAt checks that creating a file at rp is permitted, then invokes // createInRemoteDir (if the parent directory is a real remote directory) or // createInSyntheticDir (if the parent directory is synthetic) to do so. // // Preconditions: // - !rp.Done(). // - For the final path component in rp, !rp.ShouldFollowSymlink(). func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir bool, createInRemoteDir func(parent *dentry, name string, ds **[]*dentry) (*dentry, error), createInSyntheticDir func(parent *dentry, name string) (*dentry, error)) error { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) start := rp.Start().Impl().(*dentry) parent, err := fs.walkParentDirLocked(ctx, rp, start, &ds) if err != nil { return err } // Order of checks is important. First check if parent directory can be // executed, then check for existence, and lastly check if mount is writable. if err := parent.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil { return err } name := rp.Component() if name == "." || name == ".." { return linuxerr.EEXIST } if parent.isDeleted() { return linuxerr.ENOENT } if err := fs.revalidateOne(ctx, rp.VirtualFilesystem(), parent, name, &ds); err != nil { return err } parent.opMu.Lock() defer parent.opMu.Unlock() if len(name) > MaxFilenameLen { return linuxerr.ENAMETOOLONG } // Check for existence only if caching information is available. Otherwise, // don't check for existence just yet. We will check for existence if the // checks for writability fail below. Existence check is done by the creation // RPCs themselves. parent.childrenMu.Lock() if child, ok := parent.children[name]; ok && child != nil { parent.childrenMu.Unlock() return linuxerr.EEXIST } if parent.childrenSet != nil { if _, ok := parent.childrenSet[name]; ok { parent.childrenMu.Unlock() return linuxerr.EEXIST } } parent.childrenMu.Unlock() checkExistence := func() error { if child, err := fs.getChildLocked(ctx, parent, name, &ds); err != nil && !linuxerr.Equals(linuxerr.ENOENT, err) { return err } else if child != nil { return linuxerr.EEXIST } return nil } mnt := rp.Mount() if err := mnt.CheckBeginWrite(); err != nil { // Existence check takes precedence. if existenceErr := checkExistence(); existenceErr != nil { return existenceErr } return err } defer mnt.EndWrite() if err := parent.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil { // Existence check takes precedence. if existenceErr := checkExistence(); existenceErr != nil { return existenceErr } return err } if !dir && rp.MustBeDir() { return linuxerr.ENOENT } if parent.isSynthetic() { if createInSyntheticDir == nil { return linuxerr.EPERM } child, err := createInSyntheticDir(parent, name) if err != nil { return err } parent.childrenMu.Lock() parent.cacheNewChildLocked(child, name) parent.syntheticChildren++ parent.clearDirentsLocked() parent.childrenMu.Unlock() parent.touchCMtime() ev := linux.IN_CREATE if dir { ev |= linux.IN_ISDIR } parent.watches.Notify(ctx, name, uint32(ev), 0, vfs.InodeEvent, false /* unlinked */) return nil } // No cached dentry exists; however, in InteropModeShared there might still be // an existing file at name. Just attempt the file creation RPC anyways. If a // file does exist, the RPC will fail with EEXIST like we would have. child, err := createInRemoteDir(parent, name, &ds) if err != nil { return err } parent.childrenMu.Lock() parent.cacheNewChildLocked(child, name) if child.isSynthetic() { parent.syntheticChildren++ ds = appendDentry(ds, parent) } else { appendNewChildDentry(&ds, parent, child) } if fs.opts.interop != InteropModeShared { if child, ok := parent.children[name]; ok && child == nil { // Delete the now-stale negative dentry. delete(parent.children, name) parent.negativeChildren-- } parent.clearDirentsLocked() parent.touchCMtime() } parent.childrenMu.Unlock() ev := linux.IN_CREATE if dir { ev |= linux.IN_ISDIR } parent.watches.Notify(ctx, name, uint32(ev), 0, vfs.InodeEvent, false /* unlinked */) return nil } // Preconditions: !rp.Done(). func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir bool) error { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) start := rp.Start().Impl().(*dentry) parent, err := fs.walkParentDirLocked(ctx, rp, start, &ds) if err != nil { return err } if err := parent.checkPermissions(rp.Credentials(), vfs.MayWrite|vfs.MayExec); err != nil { return err } if err := rp.Mount().CheckBeginWrite(); err != nil { return err } defer rp.Mount().EndWrite() name := rp.Component() if dir { if name == "." { return linuxerr.EINVAL } if name == ".." { return linuxerr.ENOTEMPTY } } else { if name == "." || name == ".." { return linuxerr.EISDIR } } vfsObj := rp.VirtualFilesystem() if err := fs.revalidateOne(ctx, vfsObj, parent, rp.Component(), &ds); err != nil { return err } mntns := vfs.MountNamespaceFromContext(ctx) defer mntns.DecRef(ctx) parent.opMu.Lock() defer parent.opMu.Unlock() parent.childrenMu.Lock() if parent.childrenSet != nil { if _, ok := parent.childrenSet[name]; !ok { parent.childrenMu.Unlock() return linuxerr.ENOENT } } parent.childrenMu.Unlock() // Load child if sticky bit is set because we need to determine whether // deletion is allowed. var child *dentry if parent.mode.Load()&linux.ModeSticky == 0 { var ok bool parent.childrenMu.Lock() child, ok = parent.children[name] parent.childrenMu.Unlock() if ok && child == nil { // Hit a negative cached entry, child doesn't exist. return linuxerr.ENOENT } } else { child, _, err = fs.stepLocked(ctx, resolvingPathFull(rp), parent, false /* mayFollowSymlinks */, &ds) if err != nil { return err } if err := parent.mayDelete(rp.Credentials(), child); err != nil { return err } } // If a child dentry exists, prepare to delete it. This should fail if it is // a mount point. We detect mount points by speculatively calling // PrepareDeleteDentry, which fails if child is a mount point. // // Also note that if child is nil, then it can't be a mount point. if child != nil { // Hold child.childrenMu so we can check child.children and // child.syntheticChildren. We don't access these fields until a bit later, // but locking child.childrenMu after calling vfs.PrepareDeleteDentry() would // create an inconsistent lock ordering between dentry.childrenMu and // vfs.Dentry.mu (in the VFS lock order, it would make dentry.childrenMu both "a // FilesystemImpl lock" and "a lock acquired by a FilesystemImpl between // PrepareDeleteDentry and CommitDeleteDentry). To avoid this, lock // child.childrenMu before calling PrepareDeleteDentry. child.childrenMu.Lock() defer child.childrenMu.Unlock() if err := vfsObj.PrepareDeleteDentry(mntns, &child.vfsd); err != nil { return err } } flags := uint32(0) // If a dentry exists, use it for best-effort checks on its deletability. if dir { if child != nil { // child must be an empty directory. if child.syntheticChildren != 0 { // +checklocksforce: child.childrenMu is held if child != nil. // This is definitely not an empty directory, irrespective of // fs.opts.interop. vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: PrepareDeleteDentry called if child != nil. return linuxerr.ENOTEMPTY } // If InteropModeShared is in effect and the first call to // PrepareDeleteDentry above succeeded, then child wasn't // revalidated (so we can't expect its file type to be correct) and // individually revalidating its children (to confirm that they // still exist) would be a waste of time. if child.cachedMetadataAuthoritative() { if !child.isDir() { vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: see above. return linuxerr.ENOTDIR } for _, grandchild := range child.children { // +checklocksforce: child.childrenMu is held if child != nil. if grandchild != nil { vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: see above. return linuxerr.ENOTEMPTY } } } } flags = linux.AT_REMOVEDIR } else { // child must be a non-directory file. if child != nil && child.isDir() { vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: see above. return linuxerr.EISDIR } if rp.MustBeDir() { if child != nil { vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: see above. } return linuxerr.ENOTDIR } } if parent.isSynthetic() { if child == nil { return linuxerr.ENOENT } } else if child == nil || !child.isSynthetic() { if err := parent.unlink(ctx, name, flags); err != nil { if child != nil { vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: see above. } return err } } // Generate inotify events for rmdir or unlink. if dir { parent.watches.Notify(ctx, name, linux.IN_DELETE|linux.IN_ISDIR, 0, vfs.InodeEvent, true /* unlinked */) } else { var cw *vfs.Watches if child != nil { cw = &child.watches } vfs.InotifyRemoveChild(ctx, cw, &parent.watches, name) } parent.childrenMu.Lock() defer parent.childrenMu.Unlock() if child != nil { vfsObj.CommitDeleteDentry(ctx, &child.vfsd) // +checklocksforce: see above. child.setDeleted() if child.isSynthetic() { parent.syntheticChildren-- child.decRefNoCaching() } ds = appendDentry(ds, child) } parent.cacheNegativeLookupLocked(name) if parent.cachedMetadataAuthoritative() { parent.clearDirentsLocked() parent.touchCMtime() if dir { parent.decLinks() } } return nil } // AccessAt implements vfs.Filesystem.Impl.AccessAt. func (fs *filesystem) AccessAt(ctx context.Context, rp *vfs.ResolvingPath, creds *auth.Credentials, ats vfs.AccessTypes) error { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { return err } if err := d.checkPermissions(creds, ats); err != nil { return err } if ats.MayWrite() && rp.Mount().ReadOnly() { return linuxerr.EROFS } return nil } // GetDentryAt implements vfs.FilesystemImpl.GetDentryAt. func (fs *filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetDentryOptions) (*vfs.Dentry, error) { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { return nil, err } if opts.CheckSearchable { if !d.isDir() { return nil, linuxerr.ENOTDIR } if err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil { return nil, err } } d.IncRef() // Call d.checkCachingLocked() so it can be removed from the cache if needed. ds = appendDentry(ds, d) return &d.vfsd, nil } // GetParentDentryAt implements vfs.FilesystemImpl.GetParentDentryAt. func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPath) (*vfs.Dentry, error) { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) start := rp.Start().Impl().(*dentry) d, err := fs.walkParentDirLocked(ctx, rp, start, &ds) if err != nil { return nil, err } d.IncRef() // Call d.checkCachingLocked() so it can be removed from the cache if needed. ds = appendDentry(ds, d) return &d.vfsd, nil } // LinkAt implements vfs.FilesystemImpl.LinkAt. func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error { err := fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, name string, ds **[]*dentry) (*dentry, error) { if rp.Mount() != vd.Mount() { return nil, linuxerr.EXDEV } d := vd.Dentry().Impl().(*dentry) if d.isDir() { return nil, linuxerr.EPERM } gid := auth.KGID(d.gid.Load()) uid := auth.KUID(d.uid.Load()) mode := linux.FileMode(d.mode.Load()) if err := vfs.MayLink(rp.Credentials(), mode, uid, gid); err != nil { return nil, err } if d.nlink.Load() == 0 { return nil, linuxerr.ENOENT } if d.nlink.Load() == math.MaxUint32 { return nil, linuxerr.EMLINK } return parent.link(ctx, d, name) }, nil) if err == nil { // Success! vd.Dentry().Impl().(*dentry).incLinks() } return err } // MkdirAt implements vfs.FilesystemImpl.MkdirAt. func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error { creds := rp.Credentials() return fs.doCreateAt(ctx, rp, true /* dir */, func(parent *dentry, name string, ds **[]*dentry) (*dentry, error) { // If the parent is a setgid directory, use the parent's GID // rather than the caller's and enable setgid. kgid := creds.EffectiveKGID mode := opts.Mode if parent.mode.Load()&linux.S_ISGID != 0 { kgid = auth.KGID(parent.gid.Load()) mode |= linux.S_ISGID } child, err := parent.mkdir(ctx, name, mode, creds.EffectiveKUID, kgid) if err == nil { if fs.opts.interop != InteropModeShared { parent.incLinks() } return child, nil } if !opts.ForSyntheticMountpoint || linuxerr.Equals(linuxerr.EEXIST, err) { return nil, err } ctx.Infof("Failed to create remote directory %q: %v; falling back to synthetic directory", name, err) child = fs.newSyntheticDentry(&createSyntheticOpts{ name: name, mode: linux.S_IFDIR | opts.Mode, kuid: creds.EffectiveKUID, kgid: creds.EffectiveKGID, }) if fs.opts.interop != InteropModeShared { parent.incLinks() } return child, nil }, func(parent *dentry, name string) (*dentry, error) { if !opts.ForSyntheticMountpoint { // Can't create non-synthetic files in synthetic directories. return nil, linuxerr.EPERM } child := fs.newSyntheticDentry(&createSyntheticOpts{ name: name, mode: linux.S_IFDIR | opts.Mode, kuid: creds.EffectiveKUID, kgid: creds.EffectiveKGID, }) parent.incLinks() return child, nil }) } // MknodAt implements vfs.FilesystemImpl.MknodAt. func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MknodOptions) error { return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, name string, ds **[]*dentry) (*dentry, error) { creds := rp.Credentials() if child, err := parent.mknod(ctx, name, creds, &opts); err == nil { return child, nil } else if !linuxerr.Equals(linuxerr.EPERM, err) { return nil, err } // EPERM means that gofer does not allow creating a socket or pipe. Fallback // to creating a synthetic one, i.e. one that is kept entirely in memory. // Check that we're not overriding an existing file with a synthetic one. _, _, err := fs.stepLocked(ctx, resolvingPathFull(rp), parent, false /* mayFollowSymlinks */, ds) // +checklocksforce: parent.opMu taken by doCreateAt. switch { case err == nil: // Step succeeded, another file exists. return nil, linuxerr.EEXIST case !linuxerr.Equals(linuxerr.ENOENT, err): // Schrödinger. File/Cat may or may not exist. return nil, err } switch opts.Mode.FileType() { case linux.S_IFSOCK: return fs.newSyntheticDentry(&createSyntheticOpts{ name: name, mode: opts.Mode, kuid: creds.EffectiveKUID, kgid: creds.EffectiveKGID, endpoint: opts.Endpoint, }), nil case linux.S_IFIFO: return fs.newSyntheticDentry(&createSyntheticOpts{ name: name, mode: opts.Mode, kuid: creds.EffectiveKUID, kgid: creds.EffectiveKGID, pipe: pipe.NewVFSPipe(true /* isNamed */, pipe.DefaultPipeSize), }), nil } // Retain error from gofer if synthetic file cannot be created internally. return nil, linuxerr.EPERM }, nil) } // OpenAt implements vfs.FilesystemImpl.OpenAt. func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.OpenOptions) (*vfs.FileDescription, error) { // Reject O_TMPFILE, which is not supported; supporting it correctly in the // presence of other remote filesystem users requires remote filesystem // support, and it isn't clear that there's any way to implement this in // 9P. if opts.Flags&linux.O_TMPFILE != 0 { return nil, linuxerr.EOPNOTSUPP } mayCreate := opts.Flags&linux.O_CREAT != 0 mustCreate := opts.Flags&(linux.O_CREAT|linux.O_EXCL) == (linux.O_CREAT | linux.O_EXCL) var ds *[]*dentry fs.renameMu.RLock() unlocked := false unlock := func() { if !unlocked { fs.renameMuRUnlockAndCheckCaching(ctx, &ds) unlocked = true } } defer unlock() start := rp.Start().Impl().(*dentry) if rp.Done() { // Reject attempts to open mount root directory with O_CREAT. if mayCreate && rp.MustBeDir() { return nil, linuxerr.EISDIR } if mustCreate { return nil, linuxerr.EEXIST } if !start.cachedMetadataAuthoritative() { // Refresh dentry's attributes before opening. if err := start.updateMetadata(ctx); err != nil { return nil, err } } start.IncRef() defer start.DecRef(ctx) unlock() // start is intentionally not added to ds (which would remove it from the // cache) because doing so regresses performance in practice. return start.open(ctx, rp, &opts) } afterTrailingSymlink: parent, err := fs.walkParentDirLocked(ctx, rp, start, &ds) if err != nil { return nil, err } // Check for search permission in the parent directory. if err := parent.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil { return nil, err } // Reject attempts to open directories with O_CREAT. if mayCreate && rp.MustBeDir() { return nil, linuxerr.EISDIR } if err := fs.revalidateOne(ctx, rp.VirtualFilesystem(), parent, rp.Component(), &ds); err != nil { return nil, err } // Determine whether or not we need to create a file. // NOTE(b/263297063): Don't hold opMu for writing here, to avoid // serializing OpenAt calls in the same directory in the common case // that the file exists. parent.opMu.RLock() child, followedSymlink, err := fs.stepLocked(ctx, resolvingPathFull(rp), parent, true /* mayFollowSymlinks */, &ds) parent.opMu.RUnlock() if followedSymlink { if mustCreate { // EEXIST must be returned if an existing symlink is opened with O_EXCL. return nil, linuxerr.EEXIST } if err != nil { // If followedSymlink && err != nil, then this symlink resolution error // must be handled by the VFS layer. return nil, err } start = parent goto afterTrailingSymlink } if linuxerr.Equals(linuxerr.ENOENT, err) && mayCreate { if parent.isSynthetic() { return nil, linuxerr.EPERM } // Take opMu for writing, but note that the file may have been // created by another goroutine since we checked for existence // a few lines ago. We must handle that case. parent.opMu.Lock() fd, createErr := parent.createAndOpenChildLocked(ctx, rp, &opts, &ds) if !linuxerr.Equals(linuxerr.EEXIST, createErr) { // Either the creation was a success, or we got an // unexpected error. Either way we can return here. parent.opMu.Unlock() return fd, createErr } // We raced, and now the file exists. if mustCreate { parent.opMu.Unlock() return nil, linuxerr.EEXIST } // Step to the file again. Since we still hold opMu for // writing, there can't be a race here. child, _, err = fs.stepLocked(ctx, resolvingPathFull(rp), parent, false /* mayFollowSymlinks */, &ds) parent.opMu.Unlock() } if err != nil { return nil, err } if mustCreate { return nil, linuxerr.EEXIST } if rp.MustBeDir() && !child.isDir() { return nil, linuxerr.ENOTDIR } child.IncRef() defer child.DecRef(ctx) unlock() // child is intentionally not added to ds (which would remove it from the // cache) because doing so regresses performance in practice. return child.open(ctx, rp, &opts) } // Preconditions: The caller must hold no locks (since opening pipes may block // indefinitely). func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions) (*vfs.FileDescription, error) { ats := vfs.AccessTypesForOpenFlags(opts) if err := d.checkPermissions(rp.Credentials(), ats); err != nil { return nil, err } if !d.isSynthetic() { // renameMu is locked here because it is required by d.openHandle(), which // is called by d.ensureSharedHandle() and d.openSpecialFile() below. It is // also required by d.connect() which is called by // d.openSocketByConnecting(). Note that opening non-synthetic pipes may // block, renameMu is unlocked separately in d.openSpecialFile() for pipes. d.fs.renameMu.RLock() defer d.fs.renameMu.RUnlock() } trunc := opts.Flags&linux.O_TRUNC != 0 && d.fileType() == linux.S_IFREG if trunc { // Lock metadataMu *while* we open a regular file with O_TRUNC because // open(2) will change the file size on server. d.metadataMu.Lock() defer d.metadataMu.Unlock() } var vfd *vfs.FileDescription var err error mnt := rp.Mount() switch d.fileType() { case linux.S_IFREG: if !d.fs.opts.regularFilesUseSpecialFileFD { if err := d.ensureSharedHandle(ctx, ats.MayRead(), ats.MayWrite(), trunc); err != nil { return nil, err } fd, err := newRegularFileFD(mnt, d, opts.Flags) if err != nil { return nil, err } vfd = &fd.vfsfd } case linux.S_IFDIR: // Can't open directories with O_CREAT. if opts.Flags&linux.O_CREAT != 0 { return nil, linuxerr.EISDIR } // Can't open directories writably. if ats&vfs.MayWrite != 0 { return nil, linuxerr.EISDIR } if opts.Flags&linux.O_DIRECT != 0 { return nil, linuxerr.EINVAL } if !d.isSynthetic() { if err := d.ensureSharedHandle(ctx, ats&vfs.MayRead != 0, false /* write */, false /* trunc */); err != nil { return nil, err } } fd := &directoryFD{} fd.LockFD.Init(&d.locks) if err := fd.vfsfd.Init(fd, opts.Flags, mnt, &d.vfsd, &vfs.FileDescriptionOptions{}); err != nil { return nil, err } if d.readFD.Load() >= 0 { fsmetric.GoferOpensHost.Increment() } else { fsmetric.GoferOpens9P.Increment() } return &fd.vfsfd, nil case linux.S_IFLNK: // Can't open symlinks without O_PATH, which is handled at the VFS layer. return nil, linuxerr.ELOOP case linux.S_IFSOCK: if d.isSynthetic() { return nil, linuxerr.ENXIO } if d.fs.iopts.OpenSocketsByConnecting { return d.openSocketByConnecting(ctx, opts) } case linux.S_IFIFO: if d.isSynthetic() { return d.pipe.Open(ctx, mnt, &d.vfsd, opts.Flags, &d.locks) } if d.fs.opts.disableFifoOpen { return nil, linuxerr.EPERM } } if vfd == nil { if vfd, err = d.openSpecialFile(ctx, mnt, opts); err != nil { return nil, err } } if trunc { // If no errors occured so far then update file size in memory. This // step is required even if !d.cachedMetadataAuthoritative() because // d.mappings has to be updated. // d.metadataMu has already been acquired if trunc == true. d.updateSizeLocked(0) if d.cachedMetadataAuthoritative() { d.touchCMtimeLocked() } } return vfd, err } // Precondition: fs.renameMu is locked. func (d *dentry) openSocketByConnecting(ctx context.Context, opts *vfs.OpenOptions) (*vfs.FileDescription, error) { if opts.Flags&linux.O_DIRECT != 0 { return nil, linuxerr.EINVAL } // Note that special value of linux.SockType = 0 is interpreted by lisafs // as "do not care about the socket type". Analogous to p9.AnonymousSocket. sockFD, err := d.connect(ctx, 0 /* sockType */) if err != nil { return nil, err } fd, err := host.NewFD(ctx, kernel.KernelFromContext(ctx).HostMount(), sockFD, &host.NewFDOptions{ HaveFlags: true, Flags: opts.Flags, }) if err != nil { unix.Close(sockFD) return nil, err } return fd, nil } // Preconditions: // - !d.isSynthetic(). // - fs.renameMu is locked. It may be released temporarily while pipe blocks. // - If d is a pipe, no other locks (other than fs.renameMu) should be held. func (d *dentry) openSpecialFile(ctx context.Context, mnt *vfs.Mount, opts *vfs.OpenOptions) (*vfs.FileDescription, error) { ats := vfs.AccessTypesForOpenFlags(opts) if opts.Flags&linux.O_DIRECT != 0 && !d.isRegularFile() { return nil, linuxerr.EINVAL } // We assume that the server silently inserts O_NONBLOCK in the open flags // for all named pipes (because all existing gofers do this). // // NOTE(b/133875563): This makes named pipe opens racy, because the // mechanisms for translating nonblocking to blocking opens can only detect // the instantaneous presence of a peer holding the other end of the pipe // open, not whether the pipe was *previously* opened by a peer that has // since closed its end. isBlockingOpenOfNamedPipe := d.fileType() == linux.S_IFIFO && opts.Flags&linux.O_NONBLOCK == 0 retry: h, err := d.openHandle(ctx, ats.MayRead(), ats.MayWrite(), opts.Flags&linux.O_TRUNC != 0) if err != nil { if isBlockingOpenOfNamedPipe && ats == vfs.MayWrite && linuxerr.Equals(linuxerr.ENXIO, err) { // An attempt to open a named pipe with O_WRONLY|O_NONBLOCK fails // with ENXIO if opening the same named pipe with O_WRONLY would // block because there are no readers of the pipe. Release renameMu // while blocking. d.fs.renameMu.RUnlock() err := sleepBetweenNamedPipeOpenChecks(ctx) d.fs.renameMu.RLock() if err != nil { return nil, err } goto retry } return nil, err } if isBlockingOpenOfNamedPipe && ats == vfs.MayRead && h.fd >= 0 { // Release renameMu while blocking. d.fs.renameMu.RUnlock() err := blockUntilNonblockingPipeHasWriter(ctx, h.fd) d.fs.renameMu.RLock() if err != nil { h.close(ctx) return nil, err } } fd, err := newSpecialFileFD(h, mnt, d, opts.Flags) if err != nil { h.close(ctx) return nil, err } return &fd.vfsfd, nil } // Preconditions: // - d.fs.renameMu must be locked. // - d.opMu must be locked for writing. // - !d.isSynthetic(). // // +checklocks:d.opMu func (d *dentry) createAndOpenChildLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions, ds **[]*dentry) (*vfs.FileDescription, error) { if err := d.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil { return nil, err } if d.isDeleted() { return nil, linuxerr.ENOENT } mnt := rp.Mount() if err := mnt.CheckBeginWrite(); err != nil { return nil, err } defer mnt.EndWrite() creds := rp.Credentials() name := rp.Component() // If the parent is a setgid directory, use the parent's GID rather // than the caller's. kgid := creds.EffectiveKGID if d.mode.Load()&linux.S_ISGID != 0 { kgid = auth.KGID(d.gid.Load()) } child, h, err := d.openCreate(ctx, name, opts.Flags&linux.O_ACCMODE, opts.Mode, creds.EffectiveKUID, kgid) if err != nil { return nil, err } // Incorporate the fid that was opened by lcreate. useRegularFileFD := child.fileType() == linux.S_IFREG && !d.fs.opts.regularFilesUseSpecialFileFD if useRegularFileFD { var readable, writable bool child.handleMu.Lock() if vfs.MayReadFileWithOpenFlags(opts.Flags) { readable = true if h.fd != -1 { child.readFD = atomicbitops.FromInt32(h.fd) child.mmapFD = atomicbitops.FromInt32(h.fd) } } if vfs.MayWriteFileWithOpenFlags(opts.Flags) { writable = true child.writeFD = atomicbitops.FromInt32(h.fd) } child.updateHandles(ctx, h, readable, writable) child.handleMu.Unlock() } // Insert the dentry into the tree. d.childrenMu.Lock() // We have d.opMu for writing, so there can not be a cached child with // this name. We could not have raced. d.cacheNewChildLocked(child, name) appendNewChildDentry(ds, d, child) if d.cachedMetadataAuthoritative() { d.touchCMtime() d.clearDirentsLocked() } d.childrenMu.Unlock() // Finally, construct a file description representing the created file. var childVFSFD *vfs.FileDescription if useRegularFileFD { fd, err := newRegularFileFD(mnt, child, opts.Flags) if err != nil { return nil, err } childVFSFD = &fd.vfsfd } else { fd, err := newSpecialFileFD(h, mnt, child, opts.Flags) if err != nil { h.close(ctx) return nil, err } childVFSFD = &fd.vfsfd } d.watches.Notify(ctx, name, linux.IN_CREATE, 0, vfs.PathEvent, false /* unlinked */) return childVFSFD, nil } // ReadlinkAt implements vfs.FilesystemImpl.ReadlinkAt. func (fs *filesystem) ReadlinkAt(ctx context.Context, rp *vfs.ResolvingPath) (string, error) { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { return "", err } if !d.isSymlink() { return "", linuxerr.EINVAL } return d.readlink(ctx, rp.Mount()) } // RenameAt implements vfs.FilesystemImpl.RenameAt. func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldParentVD vfs.VirtualDentry, oldName string, opts vfs.RenameOptions) error { // Resolve newParent first to verify that it's on this Mount. var ds *[]*dentry fs.renameMu.Lock() defer fs.renameMuUnlockAndCheckCaching(ctx, &ds) newParent, err := fs.walkParentDirLocked(ctx, rp, rp.Start().Impl().(*dentry), &ds) if err != nil { return err } if opts.Flags&^linux.RENAME_NOREPLACE != 0 { return linuxerr.EINVAL } if fs.opts.interop == InteropModeShared && opts.Flags&linux.RENAME_NOREPLACE != 0 { // Requires 9P support to synchronize with other remote filesystem // users. return linuxerr.EINVAL } newName := rp.Component() if newName == "." || newName == ".." { if opts.Flags&linux.RENAME_NOREPLACE != 0 { return linuxerr.EEXIST } return linuxerr.EBUSY } if len(newName) > MaxFilenameLen { return linuxerr.ENAMETOOLONG } mnt := rp.Mount() if mnt != oldParentVD.Mount() { return linuxerr.EXDEV } if err := mnt.CheckBeginWrite(); err != nil { return err } defer mnt.EndWrite() oldParent := oldParentVD.Dentry().Impl().(*dentry) if !oldParent.cachedMetadataAuthoritative() { if err := oldParent.updateMetadata(ctx); err != nil { return err } } creds := rp.Credentials() if err := oldParent.checkPermissions(creds, vfs.MayWrite|vfs.MayExec); err != nil { return err } vfsObj := rp.VirtualFilesystem() if err := fs.revalidateOne(ctx, vfsObj, newParent, newName, &ds); err != nil { return err } if err := fs.revalidateOne(ctx, vfsObj, oldParent, oldName, &ds); err != nil { return err } // We need a dentry representing the renamed file since, if it's a // directory, we need to check for write permission on it. oldParent.opMu.Lock() defer oldParent.opMu.Unlock() renamed, err := fs.getChildLocked(ctx, oldParent, oldName, &ds) if err != nil { return err } if err := oldParent.mayDelete(creds, renamed); err != nil { return err } if renamed.isDir() { if renamed == newParent || genericIsAncestorDentry(renamed, newParent) { return linuxerr.EINVAL } if oldParent != newParent { if err := renamed.checkPermissions(creds, vfs.MayWrite); err != nil { return err } } } else { if opts.MustBeDir || rp.MustBeDir() { return linuxerr.ENOTDIR } } if oldParent != newParent { if err := newParent.checkPermissions(creds, vfs.MayWrite|vfs.MayExec); err != nil { return err } newParent.opMu.Lock() defer newParent.opMu.Unlock() } if newParent.isDeleted() { return linuxerr.ENOENT } replaced, err := fs.getChildLocked(ctx, newParent, newName, &ds) // +checklocksforce: newParent.opMu taken if newParent != oldParent. if err != nil && !linuxerr.Equals(linuxerr.ENOENT, err) { return err } var replacedVFSD *vfs.Dentry if replaced != nil { if opts.Flags&linux.RENAME_NOREPLACE != 0 { return linuxerr.EEXIST } replacedVFSD = &replaced.vfsd if replaced.isDir() { if !renamed.isDir() { return linuxerr.EISDIR } if genericIsAncestorDentry(replaced, renamed) { return linuxerr.ENOTEMPTY } } else { if rp.MustBeDir() || renamed.isDir() { return linuxerr.ENOTDIR } } } if oldParent == newParent && oldName == newName { return nil } mntns := vfs.MountNamespaceFromContext(ctx) defer mntns.DecRef(ctx) if err := vfsObj.PrepareRenameDentry(mntns, &renamed.vfsd, replacedVFSD); err != nil { return err } // Update the remote filesystem. if !renamed.isSynthetic() { if err := oldParent.rename(ctx, oldName, newParent, newName); err != nil { vfsObj.AbortRenameDentry(&renamed.vfsd, replacedVFSD) return err } } else if replaced != nil && !replaced.isSynthetic() { // We are replacing an existing real file with a synthetic one, so we // need to unlink the former. flags := uint32(0) if replaced.isDir() { flags = linux.AT_REMOVEDIR } if err := newParent.unlink(ctx, newName, flags); err != nil { vfsObj.AbortRenameDentry(&renamed.vfsd, replacedVFSD) return err } } // Update the dentry tree. newParent.childrenMu.Lock() defer newParent.childrenMu.Unlock() if oldParent != newParent { oldParent.childrenMu.Lock() defer oldParent.childrenMu.Unlock() } vfsObj.CommitRenameReplaceDentry(ctx, &renamed.vfsd, replacedVFSD) if replaced != nil { replaced.setDeleted() if replaced.isSynthetic() { newParent.syntheticChildren-- replaced.decRefNoCaching() } ds = appendDentry(ds, replaced) // Remove the replaced entry from its parent's cache. delete(newParent.children, newName) } oldParent.cacheNegativeLookupLocked(oldName) // +checklocksforce: oldParent.childrenMu is held if oldParent != newParent. if renamed.isSynthetic() { oldParent.syntheticChildren-- newParent.syntheticChildren++ } // We have d.opMu for writing, so no need to check for existence of a // child with the given name. We could not have raced. newParent.cacheNewChildLocked(renamed, newName) oldParent.decRefNoCaching() if oldParent != newParent { ds = appendDentry(ds, newParent) ds = appendDentry(ds, oldParent) } // Update metadata. if renamed.cachedMetadataAuthoritative() { renamed.touchCtime() } if oldParent.cachedMetadataAuthoritative() { oldParent.clearDirentsLocked() oldParent.touchCMtime() if renamed.isDir() { oldParent.decLinks() } } if newParent.cachedMetadataAuthoritative() { newParent.clearDirentsLocked() newParent.touchCMtime() if renamed.isDir() && (replaced == nil || !replaced.isDir()) { // Increase the link count if we did not replace another directory. newParent.incLinks() } } vfs.InotifyRename(ctx, &renamed.watches, &oldParent.watches, &newParent.watches, oldName, newName, renamed.isDir()) return nil } // RmdirAt implements vfs.FilesystemImpl.RmdirAt. func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error { return fs.unlinkAt(ctx, rp, true /* dir */) } // SetStatAt implements vfs.FilesystemImpl.SetStatAt. func (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetStatOptions) error { var ds *[]*dentry fs.renameMu.RLock() d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { fs.renameMuRUnlockAndCheckCaching(ctx, &ds) return err } err = d.setStat(ctx, rp.Credentials(), &opts, rp.Mount()) fs.renameMuRUnlockAndCheckCaching(ctx, &ds) if err != nil { return err } if ev := vfs.InotifyEventFromStatMask(opts.Stat.Mask); ev != 0 { d.InotifyWithParent(ctx, ev, 0, vfs.InodeEvent) } return nil } // StatAt implements vfs.FilesystemImpl.StatAt. func (fs *filesystem) StatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.StatOptions) (linux.Statx, error) { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { return linux.Statx{}, err } // Since walking updates metadata for all traversed dentries under // InteropModeShared, including the returned one, we can return cached // metadata here regardless of fs.opts.interop. var stat linux.Statx d.statTo(&stat) return stat, nil } // StatFSAt implements vfs.FilesystemImpl.StatFSAt. func (fs *filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linux.Statfs, error) { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { return linux.Statfs{}, err } // If d is synthetic, invoke statfs on the first ancestor of d that isn't. for d.isSynthetic() { d = d.parent } statfs, err := d.statfs(ctx) if err != nil { return linux.Statfs{}, err } if statfs.NameLength == 0 || statfs.NameLength > MaxFilenameLen { statfs.NameLength = MaxFilenameLen } // This is primarily for distinguishing a gofer file system in // tests. Testing is important, so instead of defining // something completely random, use a standard value. statfs.Type = linux.V9FS_MAGIC return statfs, nil } // SymlinkAt implements vfs.FilesystemImpl.SymlinkAt. func (fs *filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, target string) error { return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, name string, ds **[]*dentry) (*dentry, error) { child, err := parent.symlink(ctx, name, target, rp.Credentials()) if err != nil { return nil, err } if parent.fs.opts.interop != InteropModeShared { // Cache the symlink target on creation. In practice, this helps avoid a // lot of ReadLink RPCs. Note that when InteropModeShared is in effect, // we are forced to make Readlink RPCs. Because in this mode, we use host // timestamps, not timestamps based on our internal clock. And readlink // updates the atime on the host. child.haveTarget = true child.target = target } return child, nil }, nil) } // UnlinkAt implements vfs.FilesystemImpl.UnlinkAt. func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error { return fs.unlinkAt(ctx, rp, false /* dir */) } // BoundEndpointAt implements vfs.FilesystemImpl.BoundEndpointAt. func (fs *filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.BoundEndpointOptions) (transport.BoundEndpoint, error) { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { return nil, err } if err := d.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil { return nil, err } if !d.isSocket() { return nil, linuxerr.ECONNREFUSED } if d.endpoint != nil { return d.endpoint, nil } if !d.isSynthetic() { d.IncRef() ds = appendDentry(ds, d) return &endpoint{ dentry: d, path: opts.Addr, }, nil } return nil, linuxerr.ECONNREFUSED } // ListXattrAt implements vfs.FilesystemImpl.ListXattrAt. func (fs *filesystem) ListXattrAt(ctx context.Context, rp *vfs.ResolvingPath, size uint64) ([]string, error) { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { return nil, err } return d.listXattr(ctx, size) } // GetXattrAt implements vfs.FilesystemImpl.GetXattrAt. func (fs *filesystem) GetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetXattrOptions) (string, error) { var ds *[]*dentry fs.renameMu.RLock() defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds) d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { return "", err } return d.getXattr(ctx, rp.Credentials(), &opts) } // SetXattrAt implements vfs.FilesystemImpl.SetXattrAt. func (fs *filesystem) SetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetXattrOptions) error { var ds *[]*dentry fs.renameMu.RLock() d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { fs.renameMuRUnlockAndCheckCaching(ctx, &ds) return err } err = d.setXattr(ctx, rp.Credentials(), &opts) fs.renameMuRUnlockAndCheckCaching(ctx, &ds) if err != nil { return err } d.InotifyWithParent(ctx, linux.IN_ATTRIB, 0, vfs.InodeEvent) return nil } // RemoveXattrAt implements vfs.FilesystemImpl.RemoveXattrAt. func (fs *filesystem) RemoveXattrAt(ctx context.Context, rp *vfs.ResolvingPath, name string) error { var ds *[]*dentry fs.renameMu.RLock() d, err := fs.resolveLocked(ctx, rp, &ds) if err != nil { fs.renameMuRUnlockAndCheckCaching(ctx, &ds) return err } err = d.removeXattr(ctx, rp.Credentials(), name) fs.renameMuRUnlockAndCheckCaching(ctx, &ds) if err != nil { return err } d.InotifyWithParent(ctx, linux.IN_ATTRIB, 0, vfs.InodeEvent) return nil } // PrependPath implements vfs.FilesystemImpl.PrependPath. func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDentry, b *fspath.Builder) error { fs.renameMu.RLock() defer fs.renameMu.RUnlock() return genericPrependPath(vfsroot, vd.Mount(), vd.Dentry().Impl().(*dentry), b) } type mopt struct { key string value any } func (m mopt) String() string { if m.value == nil { return fmt.Sprintf("%s", m.key) } return fmt.Sprintf("%s=%v", m.key, m.value) } // MountOptions implements vfs.FilesystemImpl.MountOptions. func (fs *filesystem) MountOptions() string { optsKV := []mopt{ {moptTransport, transportModeFD}, // Only valid value, currently. {moptReadFD, fs.opts.fd}, // Currently, read and write FD are the same. {moptWriteFD, fs.opts.fd}, // Currently, read and write FD are the same. {moptAname, fs.opts.aname}, {moptDfltUID, fs.opts.dfltuid}, {moptDfltGID, fs.opts.dfltgid}, } switch fs.opts.interop { case InteropModeExclusive: optsKV = append(optsKV, mopt{moptCache, cacheFSCache}) case InteropModeWritethrough: optsKV = append(optsKV, mopt{moptCache, cacheFSCacheWritethrough}) case InteropModeShared: optsKV = append(optsKV, mopt{moptCache, cacheRemoteRevalidating}) } if fs.opts.regularFilesUseSpecialFileFD { optsKV = append(optsKV, mopt{moptDisableFileHandleSharing, nil}) } if fs.opts.disableFifoOpen { optsKV = append(optsKV, mopt{moptDisableFifoOpen, nil}) } if fs.opts.forcePageCache { optsKV = append(optsKV, mopt{moptForcePageCache, nil}) } if fs.opts.limitHostFDTranslation { optsKV = append(optsKV, mopt{moptLimitHostFDTranslation, nil}) } if fs.opts.overlayfsStaleRead { optsKV = append(optsKV, mopt{moptOverlayfsStaleRead, nil}) } if fs.opts.directfs.enabled { optsKV = append(optsKV, mopt{moptDirectfs, nil}) } opts := make([]string, 0, len(optsKV)) for _, opt := range optsKV { opts = append(opts, opt.String()) } return strings.Join(opts, ",") }
package cloudformation // AWSGlueClassifier_JsonClassifier AWS CloudFormation Resource (AWS::Glue::Classifier.JsonClassifier) // See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-glue-classifier-jsonclassifier.html type AWSGlueClassifier_JsonClassifier struct { // JsonPath AWS CloudFormation Property // Required: true // See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-glue-classifier-jsonclassifier.html#cfn-glue-classifier-jsonclassifier-jsonpath JsonPath string `json:"JsonPath,omitempty"` // Name AWS CloudFormation Property // Required: false // See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-glue-classifier-jsonclassifier.html#cfn-glue-classifier-jsonclassifier-name Name string `json:"Name,omitempty"` } // AWSCloudFormationType returns the AWS CloudFormation resource type func (r *AWSGlueClassifier_JsonClassifier) AWSCloudFormationType() string { return "AWS::Glue::Classifier.JsonClassifier" }
package main import ( "network" "quorum" "testing" ) func TestNetworkedQuorum(t *testing.T) { // create a tcp server and 2 states // ms == messageSender ms, err := network.NewTCPServer(9980) if err != nil { println("fail") } // mh == messageHandler _, err = quorum.CreateState(ms) if err != nil { println("fail") } _, err = quorum.CreateState(ms) if err != nil { println("fail") } // more code here }
package main import ( "bufio" "fmt" "log" "os" "strings" ) func details(s string) int { t := strings.Split(s, ",") m := len(t[0]) for _, i := range t { if n := strings.Index(i, "Y") - strings.LastIndex(i, "X"); n < m { m = n } } return m - 1 } func main() { data, err := os.Open(os.Args[1]) if err != nil { log.Fatal(err) } defer data.Close() scanner := bufio.NewScanner(data) for scanner.Scan() { fmt.Println(details(scanner.Text())) } }
package main import ( "html/template" "os" "fmt" ) type Person struct { Name string Age int Emails []string Jobs []*Job //指针的引用, 引用到 job 的结构体去 } type Job struct { Employer string Role string } const templ = `The name is {{.Name}}. The age is {{.Age}}. {{range .Emails}} An email is {{.}} {{end}} {{with .Jobs}} {{range .}} An employer is {{.Employer}} and the role is {{.Role}} {{end}} {{end}} ` func main() { job1 := Job{Employer:"jimmy", Role:"founder"} job2 := Job{Employer:"tom", Role:"gooder"} person := Person{ Name: "jan", Age: 50, Emails: []string{"xxx@11.com", "sss@dd.com"}, Jobs: []*Job{&job1, &job2}, } t := template.New("person template") t, err := t.Parse(templ) checkError(1, err) err = t.Execute(os.Stdout, person) checkError(2, err) } func checkError(code int, err error) { if err != nil { fmt.Println(err) os.Exit(code) } }
/* * Lean tool - hypothesis testing application * * https://github.com/MikaelLazarev/willie/ * Copyright (c) 2020. Mikhail Lazarev * */ package marketing import ( "context" "github.com/MikaelLazarev/willie/server/core" ) type service struct { store core.MarketingStoreI cache map[string][]byte isCached bool } func New(store core.MarketingStoreI) core.MarketingServiceI { cs := NewWithoutReload(store) cs.Reload(context.TODO()) return cs } func NewWithoutReload(store core.MarketingStoreI) core.MarketingServiceI { cs := &service{ cache: make(map[string][]byte), store: store, } return cs }
package spotify_client import ( "context" c "github.com/davidiola/ucc_twitter_bot/constants" u "github.com/davidiola/ucc_twitter_bot/utils" "github.com/zmb3/spotify" "golang.org/x/oauth2/clientcredentials" "log" ) type SpotifyCl struct { c *spotify.Client } func NewSpotifyCl() *SpotifyCl { config := &clientcredentials.Config{ ClientID: u.GetEnv(c.SPOTIFY_ID_ENV), ClientSecret: u.GetEnv(c.SPOTIFY_SEC_ENV), TokenURL: spotify.TokenURL, } token, err := config.Token(context.Background()) if err != nil { log.Fatalf("Unable to retrieve authorization token: %v", err) } client := spotify.Authenticator{}.NewClient(token) return &SpotifyCl{c: &client} } func (sc *SpotifyCl) RetrieveEpisodesForID(id string) []spotify.EpisodePage { country := "US" limit := 50 opts := spotify.Options{ Country: &country, Limit: &limit, } episodeList := make([]spotify.EpisodePage, 0) uccEps, err := sc.c.GetShowEpisodesOpt(&opts, id) noMorePages := false for !noMorePages { if err != nil { log.Fatalf("Unable to retrieve episode list: %v", err) } else { episodeList = append(episodeList, uccEps.Episodes...) } if len(uccEps.Next) == 0 { noMorePages = true } else { sc.c.NextPage(uccEps) } } return episodeList } func RetrieveLinkFromEpisode(ep spotify.EpisodePage) string { if val, ok := ep.ExternalURLs[c.SPOTIFY_LINK_KEY]; ok { return val } return "" }
package openrtb_ext type ExtImpBetween struct { Host string `json:"host"` PublisherID string `json:"publisher_id"` }
package mocks import ( "errors" "github.com/joaodias/hugito-app/usecases" "golang.org/x/oauth2" ) type RepositoryInteractor struct { IsNewError bool IsValidateError bool IsNewCalled bool IsValidateCalled bool IsValidRepository bool } func (ri *RepositoryInteractor) New(name, projectBranch, publicBranch, accessToken string) (*usecases.Repository, error) { ri.IsNewCalled = true if ri.IsNewError { return nil, errors.New("Some error") } return &usecases.Repository{}, nil } func (ri *RepositoryInteractor) Validate(name string, projectBranch string, accessToken string, oauthConfiguration *oauth2.Config) (bool, error) { ri.IsValidateCalled = true if ri.IsValidateError { return false, errors.New("Some error") } return ri.IsValidRepository, nil }
package charts import ( "fmt" "time" "github.com/imdario/mergo" "github.com/linkerd/linkerd2/pkg/k8s" "k8s.io/helm/pkg/chartutil" "sigs.k8s.io/yaml" ) const ( helmDefaultChartDir = "linkerd2" helmDefaultHAValuesFile = "values-ha.yaml" ) type ( // Values contains the top-level elements in the Helm charts Values struct { Stage string Namespace string ClusterDomain string ControllerImage string ControllerImageVersion string WebImage string PrometheusImage string GrafanaImage string ImagePullPolicy string UUID string CliVersion string ControllerReplicas uint ControllerLogLevel string PrometheusLogLevel string ControllerComponentLabel string ControllerNamespaceLabel string CreatedByAnnotation string ProxyContainerName string ProxyInjectAnnotation string ProxyInjectDisabled string LinkerdNamespaceLabel string ControllerUID int64 EnableH2Upgrade bool EnablePodAntiAffinity bool HighAvailability bool NoInitContainer bool WebhookFailurePolicy string OmitWebhookSideEffects bool RestrictDashboardPrivileges bool DisableHeartBeat bool HeartbeatSchedule string InstallNamespace bool Configs ConfigJSONs Identity *Identity ProxyInjector *ProxyInjector ProfileValidator *ProfileValidator Tap *Tap Proxy *Proxy ProxyInit *ProxyInit NodeSelector map[string]string DestinationResources, GrafanaResources, HeartbeatResources, IdentityResources, PrometheusResources, ProxyInjectorResources, PublicAPIResources, SPValidatorResources, TapResources, WebResources *Resources } // ConfigJSONs is the JSON encoding of the Linkerd configuration ConfigJSONs struct{ Global, Proxy, Install string } // Proxy contains the fields to set the proxy sidecar container Proxy struct { Capabilities *Capabilities Component string DisableIdentity bool DisableTap bool EnableExternalProfiles bool Image *Image LogLevel string SAMountPath *SAMountPath Ports *Ports Resources *Resources Trace *Trace UID int64 } // ProxyInit contains the fields to set the proxy-init container ProxyInit struct { Capabilities *Capabilities IgnoreInboundPorts string IgnoreOutboundPorts string Image *Image SAMountPath *SAMountPath Resources *Resources } // DebugContainer contains the fields to set the debugging sidecar DebugContainer struct { Image *Image } // Image contains the details to define a container image Image struct { Name string PullPolicy string Version string } // Ports contains all the port-related setups Ports struct { Admin int32 Control int32 Inbound int32 Outbound int32 } // Constraints wraps the Limit and Request settings for computational resources Constraints struct { Limit string Request string } // Capabilities contains the SecurityContext capabilities to add/drop into the injected // containers Capabilities struct { Add []string Drop []string } // SAMountPath contains the details for ServiceAccount volume mount SAMountPath struct { Name string MountPath string ReadOnly bool } // Resources represents the computational resources setup for a given container Resources struct { CPU Constraints Memory Constraints } // Identity contains the fields to set the identity variables in the proxy // sidecar container Identity struct { TrustAnchorsPEM string TrustDomain string Issuer *Issuer } // Issuer has the Helm variables of the identity issuer Issuer struct { ClockSkewAllowance string IssuanceLifetime string CrtExpiryAnnotation string CrtExpiry time.Time TLS *TLS } // ProxyInjector has all the proxy injector's Helm variables ProxyInjector struct { *TLS } // ProfileValidator has all the profile validator's Helm variables ProfileValidator struct { *TLS } // Tap has all the Tap's Helm variables Tap struct { *TLS } // TLS has a pair of PEM-encoded key and certificate variables used in the // Helm templates TLS struct { KeyPEM, CrtPEM string } // Trace has all the tracing-related Helm variables Trace struct { CollectorSvcAddr string CollectorSvcAccount string } ) // NewValues returns a new instance of the Values type. func NewValues(ha bool) (*Values, error) { chartDir := fmt.Sprintf("%s/", helmDefaultChartDir) v, err := readDefaults(chartDir, ha) if err != nil { return nil, err } v.CliVersion = k8s.CreatedByAnnotationValue() v.ProfileValidator = &ProfileValidator{TLS: &TLS{}} v.ProxyInjector = &ProxyInjector{TLS: &TLS{}} v.ProxyContainerName = k8s.ProxyContainerName v.Tap = &Tap{TLS: &TLS{}} return v, nil } // readDefaults read all the default variables from the values.yaml file. // chartDir is the root directory of the Helm chart where values.yaml is. func readDefaults(chartDir string, ha bool) (*Values, error) { valuesFiles := []*chartutil.BufferedFile{ {Name: chartutil.ValuesfileName}, } if ha { valuesFiles = append(valuesFiles, &chartutil.BufferedFile{ Name: helmDefaultHAValuesFile, }) } if err := filesReader(chartDir, valuesFiles); err != nil { return nil, err } values := Values{} for _, valuesFile := range valuesFiles { var v Values if err := yaml.Unmarshal(valuesFile.Data, &v); err != nil { return nil, err } var err error values, err = values.merge(v) if err != nil { return nil, err } } return &values, nil } // merge merges the non-empty properties of src into v. // A new Values instance is returned. Neither src nor v are mutated after // calling merge. func (v Values) merge(src Values) (Values, error) { // By default, mergo.Merge doesn't overwrite any existing non-empty values // in its first argument. So in HA mode, we are merging values.yaml into // values-ha.yaml, instead of the other way round (like Helm). This ensures // that all the HA values take precedence. if err := mergo.Merge(&src, v); err != nil { return Values{}, err } return src, nil }
package presenters import ( "encoding/json" "io" "github.com/pivotal-cf/om/api" "github.com/pivotal-cf/om/models" ) type JSONPresenter struct { stdout io.Writer } func NewJSONPresenter(stdout io.Writer) JSONPresenter { return JSONPresenter{ stdout: stdout, } } func (j JSONPresenter) PresentAvailableProducts(products []models.Product) { j.encodeJSON(&map[string][]models.Product{ "available_products": products, }) } func (j JSONPresenter) PresentCertificateAuthorities(certificateAuthorities []api.CA) { j.encodeJSON(&map[string][]api.CA{ "certificate_authorities": certificateAuthorities, }) } func (j JSONPresenter) PresentCredentialReferences(credentialReferences []string) { j.encodeJSON(&map[string][]string{ "credential_references": credentialReferences, }) } func (j JSONPresenter) PresentCredentials(credentials map[string]string) { j.encodeJSON(&map[string]map[string]string{ "credential": credentials, }) } func (j JSONPresenter) PresentDeployedProducts(deployedProducts []api.DiagnosticProduct) { j.encodeJSON(&map[string][]api.DiagnosticProduct{ "deployed_products": deployedProducts, }) } func (j JSONPresenter) PresentErrands(errands []models.Errand) { j.encodeJSON(&map[string][]models.Errand{ "errands": errands, }) } func (j JSONPresenter) PresentCertificateAuthority(certificateAuthority api.CA) { j.encodeJSON(&map[string]api.CA{ "certificate_authority": certificateAuthority, }) } func (j JSONPresenter) PresentInstallations(installations []models.Installation) { j.encodeJSON(&map[string][]models.Installation{ "installations": installations, }) } func (j JSONPresenter) PresentPendingChanges(pendingChanges []api.ProductChange) { j.encodeJSON(&map[string][]api.ProductChange{ "pending_changes": pendingChanges, }) } func (j JSONPresenter) PresentStagedProducts(stagedProducts []api.DiagnosticProduct) { j.encodeJSON(&map[string][]api.DiagnosticProduct{ "staged_products": stagedProducts, }) } func (j JSONPresenter) encodeJSON(v interface{}) { encoder := json.NewEncoder(j.stdout) encoder.Encode(&v) }
package main import ( "github.com/TutorialEdge/api.tutorialedge.net/database" "github.com/TutorialEdge/api.tutorialedge.net/users" "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-lambda-go/lambda" ) func handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { db, err := database.GetDBConn() if err != nil { panic(err.Error()) } defer db.Close() switch request.HTTPMethod { case "GET": response, _ := users.GetUser(request, db) return response, nil case "POST": response, _ := users.NewUser(request, db) return response, nil } return events.APIGatewayProxyResponse{ Body: "{\"status\": \"success\"}", StatusCode: 200, }, nil } func main() { lambda.Start(handler) }
package helper import "errors" func Compact(list []interface{}) ([]interface{}, error) { if list == nil { return nil, errors.New("missing list") } results := []interface{}{} for _, v := range list { if v != 0 && v != false && v != "" { results = append(results, v) } } return results, nil }
// ===================================== // // author: gavingqf // // == Please don'g change me by hand == // //====================================== // /*you have defined the following interface: type IConfig interface { // load interface Load(path string) bool // clear interface Clear() } */ package base import ( "shared/utility/glog" "strings" ) type CfgStoreGoods struct { Id int32 RewardsIdAndCnt []string UnlockCondition []string Probability int32 Times int32 Currencies []int32 RealPrice []int32 } type CfgStoreGoodsConfig struct { data map[int32]*CfgStoreGoods } func NewCfgStoreGoodsConfig() *CfgStoreGoodsConfig { return &CfgStoreGoodsConfig{ data: make(map[int32]*CfgStoreGoods), } } func (c *CfgStoreGoodsConfig) Load(filePath string) bool { parse := NewParser() if err := parse.Load(filePath, true); err != nil { glog.Info("Load", filePath, "err: ", err) return false } // iterator all lines' content for i := 2; i < parse.GetAllCount(); i++ { data := new(CfgStoreGoods) /* parse Id field */ vId, _ := parse.GetFieldByName(uint32(i), "id") var IdRet bool data.Id, IdRet = String2Int32(vId) if !IdRet { glog.Error("Parse CfgStoreGoods.Id field error,value:", vId) return false } /* parse RewardsIdAndCnt field */ vecRewardsIdAndCnt, _ := parse.GetFieldByName(uint32(i), "rewardsIdAndCnt") arrayRewardsIdAndCnt := strings.Split(vecRewardsIdAndCnt, ",") for j := 0; j < len(arrayRewardsIdAndCnt); j++ { v := arrayRewardsIdAndCnt[j] data.RewardsIdAndCnt = append(data.RewardsIdAndCnt, v) } /* parse UnlockCondition field */ vecUnlockCondition, _ := parse.GetFieldByName(uint32(i), "unlockCondition") arrayUnlockCondition := strings.Split(vecUnlockCondition, ",") for j := 0; j < len(arrayUnlockCondition); j++ { v := arrayUnlockCondition[j] data.UnlockCondition = append(data.UnlockCondition, v) } /* parse Probability field */ vProbability, _ := parse.GetFieldByName(uint32(i), "probability") var ProbabilityRet bool data.Probability, ProbabilityRet = String2Int32(vProbability) if !ProbabilityRet { glog.Error("Parse CfgStoreGoods.Probability field error,value:", vProbability) return false } /* parse Times field */ vTimes, _ := parse.GetFieldByName(uint32(i), "times") var TimesRet bool data.Times, TimesRet = String2Int32(vTimes) if !TimesRet { glog.Error("Parse CfgStoreGoods.Times field error,value:", vTimes) return false } /* parse Currencies field */ vecCurrencies, _ := parse.GetFieldByName(uint32(i), "currencies") if vecCurrencies != "" { arrayCurrencies := strings.Split(vecCurrencies, ",") for j := 0; j < len(arrayCurrencies); j++ { v, ret := String2Int32(arrayCurrencies[j]) if !ret { glog.Error("Parse CfgStoreGoods.Currencies field error, value:", arrayCurrencies[j]) return false } data.Currencies = append(data.Currencies, v) } } /* parse RealPrice field */ vecRealPrice, _ := parse.GetFieldByName(uint32(i), "realPrice") if vecRealPrice != "" { arrayRealPrice := strings.Split(vecRealPrice, ",") for j := 0; j < len(arrayRealPrice); j++ { v, ret := String2Int32(arrayRealPrice[j]) if !ret { glog.Error("Parse CfgStoreGoods.RealPrice field error, value:", arrayRealPrice[j]) return false } data.RealPrice = append(data.RealPrice, v) } } if _, ok := c.data[data.Id]; ok { glog.Errorf("Find %d repeated", data.Id) return false } c.data[data.Id] = data } return true } func (c *CfgStoreGoodsConfig) Clear() { } func (c *CfgStoreGoodsConfig) Find(id int32) (*CfgStoreGoods, bool) { v, ok := c.data[id] return v, ok } func (c *CfgStoreGoodsConfig) GetAllData() map[int32]*CfgStoreGoods { return c.data } func (c *CfgStoreGoodsConfig) Traverse() { for _, v := range c.data { glog.Info(v.Id, ",", v.RewardsIdAndCnt, ",", v.UnlockCondition, ",", v.Probability, ",", v.Times, ",", v.Currencies, ",", v.RealPrice) } }
package main import ( "bytes" "fmt" "html/template" "io" "io/ioutil" "log" "net/http" "os" "path/filepath" "strconv" "strings" "time" "github.com/karrick/godirwalk" "gopkg.in/urfave/cli.v1" ) var ( dir http.Dir er error programName string tmplt *template.Template wlcm, upld, cntnt template.HTML args = make([]string, 15, 15) currentPath = "" port = ":8080" ssize = false modt = false full = false optQuiet = false ) //Page struct to describe page template type Page struct { Title string Welcome template.HTML UploadForm template.HTML ContentFolder template.HTML } //Article struct for article inside page type Welcome struct { Title, Text string } type Upload struct { } // Fily struct to hold single file type Fily struct { filepath, filename string size int64 moddate time.Time isdir bool } func init() { // get template to know html files needed tmplt = template.Must(template.ParseFiles("./templates/index.html", "./templates/welcome.html", "./templates/form_upload.html")) } func main() { // app params cPath, err := os.Getwd() currentPath = cPath dir = http.Dir(currentPath) if err != nil { er = err log.Println(err) } app := cli.NewApp() app.Name = "sfl" app.Usage = "Serve any folder to local network" app.Flags = []cli.Flag{ cli.StringFlag{ Name: "directory, d", Usage: "Path to folder to serve to", EnvVar: "", Hidden: false, Value: "./", Destination: new(string), }, cli.StringFlag{ Name: "port, p", Usage: "Port to open", EnvVar: "", Hidden: false, Value: ":8080", Destination: new(string), }, cli.BoolFlag{ Name: "size, s", Usage: "Show the Sizes of files", EnvVar: "", Hidden: false, Destination: new(bool), }, cli.BoolFlag{ Name: "modified, m", Usage: "Show the last Modification time", EnvVar: "", Hidden: false, Destination: new(bool), }, cli.BoolFlag{ Name: "full, f", Usage: "Print Full path to files", EnvVar: "", Hidden: false, Destination: new(bool), }, cli.BoolFlag{ Name: "quiet, q", Usage: "ElideQ printing of non-critical error messages.", EnvVar: "", Hidden: false, Destination: new(bool), }, cli.BoolFlag{ Name: "upload, u", Usage: "Enable Upload to folder.", EnvVar: "", Hidden: false, Destination: new(bool), }, } app.Action = func(c *cli.Context) error { currentPath = c.GlobalString("directory") dir = http.Dir(currentPath) port = c.GlobalString("port") ssize = c.GlobalBool("size") full = c.GlobalBool("full") optQuiet = c.GlobalBool("quiet") optQuiet = c.GlobalBool("upload") //modt = c.GlobalString("modt") fmt.Printf("Path: %s, Port %s\n\n", dir, port) return nil } app.Run(os.Args) //diveIntoFolder(currentPath) diveDirTree(currentPath) //scanFolder(currentPath) if err != nil { log.Println(err) } intro := &Welcome{ Title: `Welcome.`, Text: `This is local server, you can down/up-load files here.`, } form := &Upload{} var b bytes.Buffer // tmplt.ExecuteTemplate(&b, "welcome.html", intro) // parse var with template for article into buffer wlcm = template.HTML(b.String()) // fill var with string from buffer b.Reset() // clear buffer tmplt.ExecuteTemplate(&b, "form_upload.html", form) // parse into buffer upld = template.HTML(b.String()) // fill var ... b.Reset() http.HandleFunc("/", displayPage) http.ListenAndServe(port, nil) } func diveDirTree(path string) { fmt.Println("\nfilepath.Walk------------------") args = nil er = filepath.Walk(path, func(path string, info os.FileInfo, errr error) error { if er != nil { return er } if info.IsDir() == true { args = append(args, "+") } if full != false { args = append(args, path) } else { args = append(args, info.Name()) } if ssize != false && info.IsDir() == false { args = append(args, " "+strconv.FormatInt(info.Size()/1000, 10)+"Kb") } // if modt != false { // args = append(args, info.ModTime().Local().String()) // } args = append(args, "\n") return nil }) printList(args) args = nil } func diveIntoFolder(path string) { files, err := ioutil.ReadDir(path) if err != nil { log.Fatal(err) } for _, f := range files { if full != false { // how to print path args = append(args, path+"/"+f.Name()) } else { args = append(args, f.Name()) } if ssize != false && f.IsDir() != true { // when to print sizes fl := float64(f.Size()) / 1000000 sz := fmt.Sprintf("%.3f", fl) args = append(args, " "+sz+"Mb ") } if modt != false { args = append(args, " "+f.ModTime().Local().String()+" ") } if f.IsDir() != false { args = append([]string{"\n|><| "}, args...) args = append(args, "\n") diveIntoFolder(path + "/" + f.Name()) args = append(args, "\n") } else { args = append(args, "\n") } printList(args) args = nil } } func scanFolder(path string) { err := godirwalk.Walk(path, &godirwalk.Options{ Callback: func(osPathname string, de *godirwalk.Dirent) error { fmt.Printf("%s %s\n", de.ModeType(), osPathname) return nil }, ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction { fmt.Fprintf(os.Stderr, "ERROR: %s\n", err) return godirwalk.SkipNode // TODO: hold error }, Unsorted: true, // set true for faster yet non-deterministic enumeration (see godoc) }) if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) os.Exit(1) } } func getFInfo(f os.FileInfo) { if f.IsDir() == true { args = append(args, "\n|=| ") diveIntoFolder(currentPath + "/" + f.Name()) } } func printList(args []string) { s := strings.Join(args, "") fmt.Printf(s) } func stderr(f string, args ...interface{}) { fmt.Fprintf(os.Stderr, programName+": "+fmt.Sprintf(f, args...)+"\n") } func fatal(f string, args ...interface{}) { stderr(f, args...) os.Exit(1) } func warning(f string, args ...interface{}) { if !optQuiet { stderr(f, args...) } } func displayPage(w http.ResponseWriter, r *http.Request) { var b bytes.Buffer maxValueBytes := int64(10 << 20) // 10mb for holding not-files information if r.Method == "GET" { tit := "Serving: " + currentPath p := &Page{ // make var to hold whole page content Title: tit, // assign page itle Welcome: wlcm, // post var to parameter UploadForm: upld, ContentFolder: wlcm, } tmplt.ExecuteTemplate(w, "index.html", p) } else { mr, err := r.MultipartReader() values := make(map[string][]string) if err != nil { panic("Failed to read multipart message: ") } for { part, err := mr.NextPart() if err == io.EOF { break } name := part.FormName() if name == "" { continue } filename := part.FileName() if filename == "" { // not a file n, err := io.CopyN(&b, part, maxValueBytes) if err != nil && err != io.EOF { fmt.Fprint(w, "Error processing form") return } maxValueBytes -= n if maxValueBytes == 0 { fmt.Fprint(w, "multipart message too large") return } values[name] = append(values[name], b.String()) continue } now := time.Now().Format("(Jan _2 15-04-05)-") newFilePath := currentPath + "/" + now + filename dst, err := os.Create(newFilePath) defer dst.Close() if err != nil { return } for { buffer := make([]byte, 100000) cBytes, err := part.Read(buffer) dst.Write(buffer[0:cBytes]) if err == io.EOF { break } } } fmt.Printf("Upload %s done", values) fmt.Fprint(w, "Upload complete") } }
package recommend import ( "fmt" "log" "time" "regexp" "net/url" "sort" "github.com/syou6162/go-active-learning-web/lib/submodular" "github.com/syou6162/go-active-learning/lib/classifier" "github.com/syou6162/go-active-learning/lib/hatena_bookmark" "github.com/syou6162/go-active-learning/lib/model" "github.com/syou6162/go-active-learning/lib/service" "github.com/syou6162/go-active-learning/lib/util" "github.com/urfave/cli" ) var listName2Rule = map[string]*regexp.Regexp{ "general": regexp.MustCompile(`.+`), "article": regexp.MustCompile(`.+`), // あとでog:typeで絞り込む "github": regexp.MustCompile(`https://github.com/[^/]+/[^/]+$`), // リポジトリのトップのみ "slide": regexp.MustCompile(`https://(www.slideshare.net|speakerdeck.com|docs.google.com/presentation/d)/.+$`), "arxiv": regexp.MustCompile(`https://(arxiv.org/abs/.+$|openreview.net/forum\?id=.+$)`), "video": regexp.MustCompile(`https?://(www.youtube.com/watch\?v=.+$|videolectures.net/.+$|vimeo.com/.+$)`), "event": regexp.MustCompile(`https://(.*?\.?connpass.com/event/.+/$|techplay.jp/event/.+)$`), } var listName2Hosts = map[string][]string{ "general": {"http"}, "article": {"http"}, "github": {"https://github.com"}, "slide": {"https://www.slideshare.net", "https://speakerdeck.com"}, "arxiv": {"https://arxiv.org", "https://openreview.net"}, "video": {"https://www.youtube.com", "http://videolectures.net", "https://vimeo.com"}, "event": {"https://"}, } func UniqByHost(examples model.Examples) model.Examples { result := model.Examples{} examplesByHost := map[string]model.Examples{} for _, e := range examples { if u, err := url.Parse(e.FinalUrl); err == nil { examplesByHost[u.Host] = append(examplesByHost[u.Host], e) } } for _, arry := range examplesByHost { sort.Sort(sort.Reverse(arry)) result = append(result, arry[0]) } return result } func doRecommend(c *cli.Context) error { subsetSelection := c.Bool("subset-selection") uniqueByHost := c.Bool("unique-by-host") sizeConstraint := c.Int("size-constraint") alpha := c.Float64("alpha") r := c.Float64("r") lambda := c.Float64("lambda") scoreThreshold := c.Float64("score-threshold") durationDay := c.Int64("duration-day") listName := c.String("listname") rule, ok := listName2Rule[listName] if ok == false { return cli.NewExitError("No matched rule", 1) } app, err := service.NewDefaultApp() if err != nil { return err } defer app.Close() targetExamples := model.Examples{} hosts, ok := listName2Hosts[listName] if ok == false { return cli.NewExitError("No matched rule", 1) } for _, h := range hosts { tmp, err := app.SearchRecentExamplesByHost(h, time.Now().Add(-time.Duration(24*durationDay)*time.Hour), 10000) if err != nil { return err } targetExamples = append(targetExamples, tmp...) } targetExamples = util.RemoveNegativeExamples(targetExamples) log.Println("Started to attach metadata to positive or unlabeled...") if err = app.AttachMetadata(targetExamples, 0, 0); err != nil { return err } okExamples := util.FilterStatusCodeOkExamples(targetExamples) if err = app.AttachMetadataIncludingFeatureVector(okExamples, 0, 0); err != nil { return err } notOkExamples := util.FilterStatusCodeNotOkExamples(targetExamples) app.Fetch(notOkExamples) for _, e := range util.FilterStatusCodeOkExamples(notOkExamples) { app.UpdateOrCreateExample(e) app.UpdateFeatureVector(e) } targetExamples = util.FilterStatusCodeOkExamples(targetExamples) targetExamples = util.UniqueByFinalUrl(targetExamples) targetExamples = util.UniqueByTitle(targetExamples) log.Println(fmt.Sprintf("target size: %d", len(targetExamples))) m, err := app.FindLatestMIRAModel(classifier.EXAMPLE) if err != nil { return err } log.Println("Started to predict scores...") result := model.Examples{} for _, e := range targetExamples { if !rule.MatchString(e.FinalUrl) { continue } if listName == "general" && e.IsTwitterUrl() { continue } if listName == "article" && !e.IsArticle() { continue } e.Score = m.PredictScore(e.Fv) if e.Score > scoreThreshold { result = append(result, e) } } log.Println(fmt.Sprintf("Original result size: %d", len(result))) if uniqueByHost { result = UniqByHost(result) log.Println(fmt.Sprintf("Filtered by host: %d", len(result))) } log.Println("Started to filter by submodular...") if subsetSelection { result = submodular.SelectSubExamplesBySubModular(result, sizeConstraint, alpha, r, lambda) } log.Println("Started to write result...") err = app.UpdateRecommendation(listName, result) if err != nil { return err } for _, e := range result { if bookmark, err := hatena_bookmark.GetHatenaBookmark(e.FinalUrl); err == nil { if e.HatenaBookmark.Count > bookmark.Count { continue } e.HatenaBookmark = bookmark if err = app.UpdateHatenaBookmark(e); err != nil { log.Println(fmt.Sprintf("Error to update bookmark info %s %s", e.Url, err.Error())) } } time.Sleep(time.Second) fmt.Println(fmt.Sprintf("%0.03f\t%s", e.Score, e.Url)) } return nil } var CommandRecommend = cli.Command{ Name: "recommend", Usage: "Get recommendation list and store them", Description: ` Get recommendation list and store them. `, Action: doRecommend, Flags: []cli.Flag{ cli.BoolFlag{Name: "subset-selection", Usage: "Use subset selection algorithm (maximizing submodular function) to filter entries"}, cli.BoolFlag{Name: "unique-by-host", Usage: "Filter entries to be unique by host"}, cli.Int64Flag{Name: "size-constraint", Value: 10, Usage: "Budget constraint. Max number of entries to be contained"}, cli.Float64Flag{Name: "alpha", Value: 1.0}, cli.Float64Flag{Name: "r", Value: 1.0, Usage: "Scaling factor for number of words"}, cli.Float64Flag{Name: "lambda", Value: 1.0, Usage: "Diversity parameter"}, cli.Float64Flag{Name: "score-threshold", Value: 0.0}, cli.StringFlag{Name: "listname", Usage: "List name for cache"}, cli.Int64Flag{Name: "duration-day", Usage: "Time span for fetching prediction target", Value: 2}, }, }
package main import "fmt" func main() { numeros := make([]int, 0, 6) fmt.Println(numeros[0:6]) }
package main import ( "fmt" "time" ) /* 结构体是一种聚合的数据类型,是由零个或者多个任意类型的值聚合而成的实体。 每一项值都称为结构体的成员, 结构体变量的成员可以通过 点操作符 访问,如 a.ID, 结构体是变量,他的成员也是变量。 操作结构体的成员变量: 1. 直接操作 2. 对成员取地址,然后通过指针访问 通常一行对应一个结构体的成员,名字在前,类型在后,如果相邻的成员类型相同可以合并到一行, 成员的顺序有意义,成员也依赖于大写开头即可以导出的规则。 命名为 S 的结构体不能有 S 类型的成员,但是 S 类型结构体可以包含 *S 指针类型的成员。 如果结构体的全部成员是可以比较的,该结构体也可以使用 == 或者 != 比较。 将会比较所有成员。 */ type Employee struct { ID int Name string Address string Dob time.Time Position string Salary int ManagerID int } type Point struct { X, Y int } var ik Employee func main() { op() structInitial() compare() } func op() { ik = Employee{ ID: 1, Name:"Ikaros", Address: "Beijing", Dob: time.Now(), } fmt.Println(ik.Address) ik.Salary -=5000 fmt.Printf("Demoted salary, now is %d\n", ik.Salary) position := &ik.Position *position = "Senior" + *position fmt.Printf("New position, now is %s\n", ik.Position) var employeeOfTheMonth = &ik //employeeOfTheMonth.Position += " (proactive team player)" // 与 下方直接使用指针的方式一样 (*employeeOfTheMonth).Position += " (proactive team player)" fmt.Printf("New position for ik, now is %s\n", ik.Position) fmt.Printf("New position for em, now is %s\n", employeeOfTheMonth.Position) fmt.Println(EmployeeByID(1).Position) // update by func id := ik.ID EmployeeByID(id).Salary = 0 fmt.Printf("Fired, now salary is %d\n", ik.Salary) // 通过 结构体和 Map 来实现 集合 的数据结构 但应避免此用法,节约空间有限,而且语法复杂。 /* seen := make(map[string]struct{}) if _, ok := seen[s]; !ok{ seen[s] = struct{}{} } */ } func structInitial() { // 根据字面值语法指定 // 顺序指定字面值初始化 p:= Point{1,2} // 指定成员名称的初始化,推荐 p1 := Point{X:3, Y:4} fmt.Println(p) fmt.Println(p1) } func EmployeeByID(id int) *Employee { /* ... */ if id == ik.ID{ return &ik } return nil } func compare() { p:= Point{1, 2} q:= Point{2, 1} fmt.Println(p.X == q.X && p.Y == q.Y) fmt.Println(p == q) type address struct { hostname string port int } // 可比较的结构体可以用作 map 的 key hits := make(map[address]int) hits[address{"golang.org", 443}] ++ fmt.Println(hits) } func nestedStruct() { type Circle1 struct { X, Y, Radius int } type Wheel1 struct { X, Y, Radius, Spokes int } var w1 Wheel1 w1.X = 8 w1.Y = 8 w1.Radius = 5 w1.Spokes = 20 // 抽离公共属性 type Point struct { X, Y int } type Circle2 struct { Center Point Radius int } type Wheel2 struct { Circle Circle2 Spokes int } var w2 Wheel2 w2.Circle.Center.X = 8 w2.Circle.Center.Y = 8 w2.Circle.Radius = 5 w2.Spokes = 20 // 匿名成员 type Circle3 struct { Point Radius int } type Wheel3 struct { Circle3 Spokes int } var w3 Wheel3 w3.X = 8 w3.Y = 8 w3.Radius = 5 w3.Spokes = 20 // 结构体字面值没有简短的表示法,但是并没有 // w4 = Wheel3{8,8,5,20} Error }
package udp import "fmt" type UnknownDirective struct { Key_name string unknonw_dir string } func (ukd UnknownDirective) Error() string { return fmt.Sprintf("error: %s does not have directive named %s\n", ukd.Key_name, ukd.unknonw_dir) } type DuplicatedUdp struct { Key_name string udp_name string } func (dudp DuplicatedUdp) Error() string { return fmt.Sprintf("the udp named %s already exists.\n", dudp.udp_name) } type CannotFindUdp struct { Key_name string udp_name string } func (cnfudp CannotFindUdp) Error() string { return fmt.Sprintf("the udp named %s not exists.\n", cnfudp.udp_name) }
package main import ( "context" "encoding/json" "errors" "flag" "log" "time" "github.com/trains629/ms/base" ) var ( _timeout = flag.Int64("ttl", int64(2*time.Second), "timeout") _Endpoints = base.StringArray{} _serviceName = flag.String("service", "", "service name") _port = flag.Int64("port", 0, "service port") _host = flag.String("host", "", "host") _config = flag.String("config", "", "service config") _prefix = flag.String("prefix", base.ServicePrefix, "prefix") ) func registerService(serviceName string) error { if serviceName == "" { return errors.New("service name is empty") } cli, err := base.NewEtcdClient([]string(_Endpoints), time.Duration(*_timeout)) if err != nil { return err } defer cli.Close() config := base.NewServiceConfig(serviceName) config.Prefix = *_prefix config.Host = *_host config.Port = *_port config.Info = map[string]interface{}{} if *_config != "" { var info interface{} if err := json.Unmarshal([]byte(*_config), &info); err == nil { config.Info = info } } if err = base.RegisterService(context.Background(), cli, config, 0); err != nil { return err } return nil } func main() { flag.Var(&_Endpoints, "endpoints", "endpoint") flag.Parse() b := base.CheckFunc(context.Background(), 10, func() bool { _, err := base.NewEtcdClient([]string(_Endpoints), time.Duration(*_timeout)) return err == nil }) if !b { log.Fatalln("error Service") } sName := *_serviceName // 没有服务名称的时候只是负责检查etcd服务 if sName == "" { return } if err := registerService(sName); err != nil { log.Fatalln(err) } }
package worker import ( "fmt" "io" "strings" ) const ( travisFoldStart = "travis_fold:start:%s\r\033[0K" travisFoldEnd = "travis_fold:end:%s\r\033[0K" ) func writeFold(w io.Writer, name string, b []byte) (int, error) { folded := []byte(fmt.Sprintf(travisFoldStart, name)) folded = append(folded, b...) if string(folded[len(folded)-1]) != "\n" { folded = append(folded, []byte("\n")...) } folded = append(folded, []byte(fmt.Sprintf(travisFoldEnd, name))...) return w.Write(folded) } func writeFoldStart(w io.Writer, name string, b []byte) (int, error) { folded := []byte(fmt.Sprintf(travisFoldStart, name)) folded = append(folded, b...) return w.Write(folded) } func writeFoldEnd(w io.Writer, name string, b []byte) (int, error) { folded := b folded = append(folded, []byte(fmt.Sprintf(travisFoldEnd, name))...) return w.Write(folded) } func stringSplitSpace(s string) []string { parts := []string{} for _, part := range strings.Split(s, " ") { parts = append(parts, strings.TrimSpace(part)) } return parts }
package server import ( "log" "sync" // "github.com/et-zone/gcelery/control" pb "github.com/et-zone/gcelery/protos/base" "github.com/et-zone/gcelery/task" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) var client *CeleryClient var err error const TimeOut = 60 type CeleryClient struct { conn *grpc.ClientConn timeout int isnew bool pool *CliPool // Cursor } type CliPool struct { sync.Mutex conn *grpc.ClientConn Maxconn int //连接数 Address string // isSTL bool //stl certFile string //cert文件路径 cred *credentials.TransportCredentials timeout int //超时时间 MaxIdleConn int //最大连接数 MinOpenConn int //最小存活数 LocalConn int //当前连接数 } func newConn(address string) *grpc.ClientConn { //, grpc.WithTimeout(time.Duration(10)*time.Second) client conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { log.Fatal(err.Error()) } return conn } func newTlsConn(address string, certFile string) (*grpc.ClientConn, *credentials.TransportCredentials) { creds, err := credentials.NewClientTLSFromFile(certFile, "") if err != nil { log.Fatalf("could not process the credentials: %v", err) } conn, err := grpc.Dial(address, grpc.WithTransportCredentials(creds)) if err != nil { log.Fatal(err.Error()) } return conn, &creds } //init client Pool func InitClient(addr string) *CeleryClient { if client != nil { return client } client = &CeleryClient{ // Cursor: nil, } cliPool := &CliPool{ conn: newConn(addr), Mutex: sync.Mutex{}, Address: addr, isSTL: false, timeout: TimeOut, } client.pool = cliPool client.timeout = cliPool.timeout return client } //init STL client Pool func InitSTLClient(addr string, certFile string) *CeleryClient { if client != nil { return client } client = &CeleryClient{ /*Cursor: nil*/ } conn, cred := newTlsConn(addr, certFile) cliPool := &CliPool{ conn: conn, cred: cred, Mutex: sync.Mutex{}, Address: addr, isSTL: true, certFile: certFile, timeout: TimeOut, } client.pool = cliPool client.timeout = cliPool.timeout return client } func (cli *CeleryClient) Close() { var err error cli.conn = nil // cli.Cursor = nil if cli.pool != nil { err = cli.pool.conn.Close() } if err != nil { log.Println(err.Error()) } } func (cli *CeleryClient) cursor() Cursor { return &cursor{pb.NewBridgeClient(client.pool.conn), client.pool.timeout} } func (cli *CeleryClient) Do(req *task.Request) task.Response { return cli.cursor().Do(req) }
package util import "testing" func TestGetCurrentDate(t *testing.T) { date := GetCurrentDate() if date == "" { t.Errorf("couldn't get the current date err: %v\n", date) } }
package main import ( "github.com/rs/zerolog" "github.com/cloudflare/cloudflared/config" "github.com/cloudflare/cloudflared/overwatch" ) // AppService is the main service that runs when no command lines flags are passed to cloudflared // it manages all the running services such as tunnels, forwarders, DNS resolver, etc type AppService struct { configManager config.Manager serviceManager overwatch.Manager shutdownC chan struct{} configUpdateChan chan config.Root log *zerolog.Logger } // NewAppService creates a new AppService with needed supporting services func NewAppService(configManager config.Manager, serviceManager overwatch.Manager, shutdownC chan struct{}, log *zerolog.Logger) *AppService { return &AppService{ configManager: configManager, serviceManager: serviceManager, shutdownC: shutdownC, configUpdateChan: make(chan config.Root), log: log, } } // Run starts the run loop to handle config updates and run forwarders, tunnels, etc func (s *AppService) Run() error { go s.actionLoop() return s.configManager.Start(s) } // Shutdown kills all the running services func (s *AppService) Shutdown() error { s.configManager.Shutdown() s.shutdownC <- struct{}{} return nil } // ConfigDidUpdate is a delegate notification from the config manager // it is trigger when the config file has been updated and now the service needs // to update its services accordingly func (s *AppService) ConfigDidUpdate(c config.Root) { s.configUpdateChan <- c } // actionLoop handles the actions from running processes func (s *AppService) actionLoop() { for { select { case c := <-s.configUpdateChan: s.handleConfigUpdate(c) case <-s.shutdownC: for _, service := range s.serviceManager.Services() { service.Shutdown() } return } } } func (s *AppService) handleConfigUpdate(c config.Root) { // handle the client forward listeners activeServices := map[string]struct{}{} for _, f := range c.Forwarders { service := NewForwardService(f, s.log) s.serviceManager.Add(service) activeServices[service.Name()] = struct{}{} } // handle resolver changes if c.Resolver.Enabled { service := NewResolverService(c.Resolver, s.log) s.serviceManager.Add(service) activeServices[service.Name()] = struct{}{} } // TODO: TUN-1451 - tunnels // remove any services that are no longer active for _, service := range s.serviceManager.Services() { if _, ok := activeServices[service.Name()]; !ok { s.serviceManager.Remove(service.Name()) } } }
package main import ( "context" "errors" "fmt" "github.com/go-graphite/carbonapi/expr" pb "github.com/go-graphite/carbonzipper/carbonzipperpb3" realZipper "github.com/go-graphite/carbonzipper/zipper" "go.uber.org/zap" ) var errNoMetrics = errors.New("no metrics") type zipper struct { z *realZipper.Zipper logger *zap.Logger statsSender func(*realZipper.Stats) ignoreClientTimeout bool } // The CarbonZipper interface exposes access to realZipper // Exposes the functionality to find, get info or render metrics. type CarbonZipper interface { Find(ctx context.Context, metric string) (pb.GlobResponse, error) Info(ctx context.Context, metric string) (map[string]pb.InfoResponse, error) Render(ctx context.Context, metric string, from, until int32) ([]*expr.MetricData, error) } func newZipper(sender func(*realZipper.Stats), config *realZipper.Config, ignoreClientTimeout bool, logger *zap.Logger) *zipper { z := &zipper{ z: realZipper.NewZipper(sender, config, logger), logger: logger, statsSender: sender, ignoreClientTimeout: ignoreClientTimeout, } return z } func (z zipper) Find(ctx context.Context, metric string) (pb.GlobResponse, error) { var pbresp pb.GlobResponse newCtx := ctx if z.ignoreClientTimeout { newCtx = context.Background() } res, stats, err := z.z.Find(newCtx, z.logger, metric) if err != nil { return pbresp, err } pbresp.Name = metric pbresp.Matches = res z.statsSender(stats) return pbresp, err } func (z zipper) Info(ctx context.Context, metric string) (map[string]pb.InfoResponse, error) { newCtx := ctx if z.ignoreClientTimeout { newCtx = context.Background() } resp, stats, err := z.z.Info(newCtx, z.logger, metric) if err != nil { return nil, fmt.Errorf("http.Get: %+v", err) } z.statsSender(stats) return resp, nil } func (z zipper) Render(ctx context.Context, metric string, from, until int32) ([]*expr.MetricData, error) { var result []*expr.MetricData newCtx := ctx if z.ignoreClientTimeout { newCtx = context.Background() } pbresp, stats, err := z.z.Render(newCtx, z.logger, metric, from, until) if err != nil { return result, err } z.statsSender(stats) if m := pbresp.Metrics; len(m) == 0 { return result, errNoMetrics } for i := range pbresp.Metrics { result = append(result, &expr.MetricData{FetchResponse: pbresp.Metrics[i]}) } return result, nil }
package handler import ( "net/http" "github.com/gin-gonic/gin" "github.com/sirupsen/logrus" "github.com/solntsevatv/url_translater/internal/url_translater" ) func (h *Handler) longToShort(c *gin.Context) { var input url_translater.LongURL if err := c.BindJSON(&input); err != nil { newErrorResponse(c, http.StatusBadRequest, "invalid input body") return } short_url, err := h.services.UrlTranslation.CreateShortURL(input) if err != nil { newErrorResponse(c, http.StatusInternalServerError, err.Error()) return } c.JSON(http.StatusOK, map[string]interface{}{ "url": short_url, }) logrus.Info("short_url=", short_url, " was added in db") } func (h *Handler) ShortToLong(c *gin.Context) { input := url_translater.ShortURL{Id: 1, LinkUrl: ""} if err := c.BindJSON(&input); err != nil { newErrorResponse(c, http.StatusBadRequest, "invalid input body") return } long_url, err := h.services.UrlTranslation.GetLongURL(input) if err != nil { newErrorResponse(c, http.StatusInternalServerError, err.Error()) return } c.JSON(http.StatusOK, map[string]interface{}{ "url": long_url, }) logrus.Info("long_url=", long_url, " was gotten from db") }
package graph // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. import ( "BackEnd/graph/generated" "BackEnd/middleware" "BackEnd/models" "context" "errors" "time" ) func (r *membershipDetailResolver) Membership(ctx context.Context, obj *models.MembershipDetail) (*models.Membership, error) { var membership models.Membership err := r.DB.Model(&membership).Where("id=?", obj.MembershipID).Select() if err != nil { return nil, errors.New("failed get membership from detail") } return &membership, nil } func (r *mutationResolver) CreateMembershipDetail(ctx context.Context, input *models.NewMembershipDetail) (*models.MembershipDetail, error) { currentUser, err := middleware.GetCurrentUserFromCTX(ctx) if err != nil { return nil, errors.New("unauthenticated") } var membershipDetail models.MembershipDetail err = r.DB.Model(&membershipDetail).Where("user_id = ?", currentUser.ID).Select() if err != nil { newMembershipDetail := models.MembershipDetail{ Bill: input.Bill, MembershipID: *input.MembershipID, Date: time.Now().Format("2006-01-02 15:04:05"), UserID: currentUser.ID, } _, err = r.DB.Model(&newMembershipDetail).Insert() if err != nil { return nil, errors.New("insert membership detail failed") } return &newMembershipDetail, nil } addDate, parseErr := time.Parse("2006-01-02 15:04:05", membershipDetail.Date) if parseErr != nil { return nil, errors.New(addDate.String()) } addDate = addDate.AddDate(0, 1, 0) membershipDetail.Date = addDate.Format("2006-01-02 15:04:05") _, updateErr := r.DB.Model(&membershipDetail).Where("user_id=?", currentUser.ID).Update() if updateErr != nil { return nil, errors.New("update membership error") } return &membershipDetail, nil } func (r *mutationResolver) UpdateMembershipDetail(ctx context.Context, userID string, input *models.NewMembershipDetail) (*models.MembershipDetail, error) { var membershipDetail models.MembershipDetail err := r.DB.Model(&membershipDetail).Where("userId=?", userID).First() if err != nil { return nil, errors.New("membership detail not found") } membershipDetail.Bill += input.Bill _, updateErr := r.DB.Model(&membershipDetail).Where("userId=?", userID).Update() if updateErr != nil { return nil, errors.New("update membership detail failed") } return &membershipDetail, nil } func (r *mutationResolver) DeleteMembershipDetail(ctx context.Context, userID string) (bool, error) { var membershipDetail models.MembershipDetail err := r.DB.Model(&membershipDetail).Where("userId=?", userID).First() if err != nil { return false, errors.New("membership detail not found") } _, deleteErr := r.DB.Model(&membershipDetail).Where("userId=?", userID).Delete() if deleteErr != nil { return false, errors.New("delete membership detail failed") } return true, nil } func (r *mutationResolver) CreateMembership(ctx context.Context, input *models.NewMembership) (*models.Membership, error) { membership := models.Membership{ Type: input.Type, } _, err := r.DB.Model(&membership).Insert() if err != nil { return nil, errors.New("insert membership failed") } return &membership, nil } func (r *mutationResolver) UpdateMembership(ctx context.Context, id string, input *models.NewMembership) (*models.Membership, error) { var membership models.Membership err := r.DB.Model(&membership).Where("id=?", id).First() if err != nil { return nil, errors.New("membership not found") } membership.Type = input.Type _, updateErr := r.DB.Model(&membership).Where("id=?", id).Update() if updateErr != nil { return nil, errors.New("update membership failed") } return &membership, nil } func (r *mutationResolver) DeleteMembership(ctx context.Context, id string) (bool, error) { var membership models.Membership err := r.DB.Model(&membership).Where("id=?", id).First() if err != nil { return false, errors.New("membership not found") } _, deleteErr := r.DB.Model(&membership).Where("id=?", id).Delete() if deleteErr != nil { return false, errors.New("delete membership failed") } return true, nil } func (r *queryResolver) MembershipDetails(ctx context.Context) ([]*models.MembershipDetail, error) { var membershipDetails []*models.MembershipDetail err := r.DB.Model(&membershipDetails).Order("bill").Select() if err != nil { return nil, errors.New("membership detail query failed") } return membershipDetails, nil } func (r *queryResolver) Memberships(ctx context.Context) ([]*models.Membership, error) { var memberships []*models.Membership err := r.DB.Model(&memberships).Order("id").Select() if err != nil { return nil, errors.New("memberships query failed") } return memberships, nil } // MembershipDetail returns generated.MembershipDetailResolver implementation. func (r *Resolver) MembershipDetail() generated.MembershipDetailResolver { return &membershipDetailResolver{r} } type membershipDetailResolver struct{ *Resolver }
/******************************************************************************* * Copyright 2017 Samsung Electronics All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *******************************************************************************/ // Package api provides web server for Service Deployment Agent Manager. // and also provides functionality of request processing and response making. package api import ( "api/agent" "api/common" "api/group" "commons/logger" "commons/errors" URL "commons/url" "net/http" "strconv" "strings" ) // RunSDAMWebServer starts web server service with given address and port number. func RunSDAMWebServer(addr string, port int) { http.ListenAndServe(addr+":"+strconv.Itoa(port), &_SDAMApis) } var _SDAMApis _SDAMApisHandler type _SDAMApisHandler struct{} // ServeHTTP implements a http serve interface. // Check if the url contains a given string and call a proper function. // // agents: agent.SdamAgentHandle.Handle will be called. // groups: group.SdamGroupHandle.Handle will be called. // others: NotFoundURL error will be used to send an error message. func (_SDAMApis *_SDAMApisHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { logger.Logging(logger.DEBUG, "receive msg", req.Method, req.URL.Path) defer logger.Logging(logger.DEBUG, "OUT") switch url := req.URL.Path; { default: logger.Logging(logger.DEBUG, "Unknown URL") common.WriteError(w, errors.NotFoundURL{}) case !strings.Contains(url, URL.Base()): logger.Logging(logger.DEBUG, "Unknown URL") common.WriteError(w, errors.NotFoundURL{}) case strings.Contains(url, URL.Agents()): logger.Logging(logger.DEBUG, "Request Agents APIs") agent.SdamAgentHandle.Handle(w, req) case strings.Contains(url, URL.Groups()): logger.Logging(logger.DEBUG, "Request Groups APIs") group.SdamGroupHandle.Handle(w, req) } }
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package p9test import ( "bytes" "fmt" "io" "math/rand" "os" "reflect" "strings" "testing" "time" "github.com/golang/mock/gomock" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/fd" "gvisor.dev/gvisor/pkg/p9" "gvisor.dev/gvisor/pkg/sync" ) func TestPanic(t *testing.T) { h, c := NewHarness(t) defer h.Finish() // Create a new root. d := h.NewDirectory(nil)(nil) defer d.Close() // Needed manually. h.Attacher.EXPECT().Attach().Return(d, nil).Do(func() { // Panic here, and ensure that we get back EFAULT. panic("handler") }) // Attach to the client. if _, err := c.Attach("/"); err != unix.EREMOTEIO { t.Fatalf("got attach err %v, want EREMOTEIO", err) } } func TestAttachNoLeak(t *testing.T) { h, c := NewHarness(t) defer h.Finish() // Create a new root. d := h.NewDirectory(nil)(nil) h.Attacher.EXPECT().Attach().Return(d, nil).Times(1) // Attach to the client. f, err := c.Attach("/") if err != nil { t.Fatalf("got attach err %v, want nil", err) } // Don't close the file. This should be closed automatically when the // client disconnects. The mock asserts that everything is closed // exactly once. This statement just removes the unused variable error. _ = f } func TestBadAttach(t *testing.T) { h, c := NewHarness(t) defer h.Finish() // Return an error on attach. h.Attacher.EXPECT().Attach().Return(nil, unix.EINVAL).Times(1) // Attach to the client. if _, err := c.Attach("/"); err != unix.EINVAL { t.Fatalf("got attach err %v, want unix.EINVAL", err) } } func TestWalkAttach(t *testing.T) { h, c := NewHarness(t) defer h.Finish() // Create a new root. d := h.NewDirectory(map[string]Generator{ "a": h.NewDirectory(map[string]Generator{ "b": h.NewFile(), }), })(nil) h.Attacher.EXPECT().Attach().Return(d, nil).Times(1) // Attach to the client as a non-root, and ensure that the walk above // occurs as expected. We should get back b, and all references should // be dropped when the file is closed. f, err := c.Attach("/a/b") if err != nil { t.Fatalf("got attach err %v, want nil", err) } defer f.Close() // Check that's a regular file. if _, _, attr, err := f.GetAttr(p9.AttrMaskAll()); err != nil { t.Errorf("got err %v, want nil", err) } else if !attr.Mode.IsRegular() { t.Errorf("got mode %v, want regular file", err) } } // newTypeMap returns a new type map dictionary. func newTypeMap(h *Harness) map[string]Generator { return map[string]Generator{ "directory": h.NewDirectory(map[string]Generator{}), "file": h.NewFile(), "symlink": h.NewSymlink(), "block-device": h.NewBlockDevice(), "character-device": h.NewCharacterDevice(), "named-pipe": h.NewNamedPipe(), "socket": h.NewSocket(), } } // newRoot returns a new root filesystem. // // This is set up in a deterministic way for testing most operations. // // The represented file system looks like: // - file // - symlink // - directory // // ... // + one // - file // - symlink // - directory // ... // - two // - file // - symlink // - directory // ... // // + three // - file // - symlink // - directory // ... func newRoot(h *Harness, c *p9.Client) (*Mock, p9.File) { root := newTypeMap(h) one := newTypeMap(h) two := newTypeMap(h) three := newTypeMap(h) one["two"] = h.NewDirectory(two) // Will be nested in one. root["one"] = h.NewDirectory(one) // Top level. root["three"] = h.NewDirectory(three) // Alternate top-level. // Create a new root. rootBackend := h.NewDirectory(root)(nil) h.Attacher.EXPECT().Attach().Return(rootBackend, nil) // Attach to the client. r, err := c.Attach("/") if err != nil { h.t.Fatalf("got attach err %v, want nil", err) } return rootBackend, r } func allInvalidNames(from string) []string { return []string{ from + "/other", from + "/..", from + "/.", from + "/", "other/" + from, "/" + from, "./" + from, "../" + from, ".", "..", "/", "", } } func TestWalkInvalid(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() // Run relevant tests. for name := range newTypeMap(h) { // These are all the various ways that one might attempt to // construct compound paths. They should all be rejected, as // any compound that contains a / is not allowed, as well as // the singular paths of '.' and '..'. if _, _, err := root.Walk([]string{".", name}); err != unix.EINVAL { t.Errorf("Walk through . %s wanted EINVAL, got %v", name, err) } if _, _, err := root.Walk([]string{"..", name}); err != unix.EINVAL { t.Errorf("Walk through . %s wanted EINVAL, got %v", name, err) } if _, _, err := root.Walk([]string{name, "."}); err != unix.EINVAL { t.Errorf("Walk through %s . wanted EINVAL, got %v", name, err) } if _, _, err := root.Walk([]string{name, ".."}); err != unix.EINVAL { t.Errorf("Walk through %s .. wanted EINVAL, got %v", name, err) } for _, invalidName := range allInvalidNames(name) { if _, _, err := root.Walk([]string{invalidName}); err != unix.EINVAL { t.Errorf("Walk through %s wanted EINVAL, got %v", invalidName, err) } } wantErr := unix.EINVAL if name == "directory" { // We can attempt a walk through a directory. However, // we should never see a file named "other", so we // expect this to return ENOENT. wantErr = unix.ENOENT } if _, _, err := root.Walk([]string{name, "other"}); err != wantErr { t.Errorf("Walk through %s/other wanted %v, got %v", name, wantErr, err) } // Do a successful walk. _, f, err := root.Walk([]string{name}) if err != nil { t.Errorf("Walk to %s wanted nil, got %v", name, err) } defer f.Close() local := h.Pop(f) // Check that the file matches. _, localMask, localAttr, localErr := local.GetAttr(p9.AttrMaskAll()) if _, mask, attr, err := f.GetAttr(p9.AttrMaskAll()); mask != localMask || attr != localAttr || err != localErr { t.Errorf("GetAttr got (%v, %v, %v), wanted (%v, %v, %v)", mask, attr, err, localMask, localAttr, localErr) } // Ensure we can't walk backwards. if _, _, err := f.Walk([]string{"."}); err != unix.EINVAL { t.Errorf("Walk through %s/. wanted EINVAL, got %v", name, err) } if _, _, err := f.Walk([]string{".."}); err != unix.EINVAL { t.Errorf("Walk through %s/.. wanted EINVAL, got %v", name, err) } } } // fileGenerator is a function to generate files via walk or create. // // Examples are: // - walkHelper // - walkAndOpenHelper // - createHelper type fileGenerator func(*Harness, string, p9.File) (*Mock, *Mock, p9.File) // walkHelper walks to the given file. // // The backends of the parent and walked file are returned, as well as the // walked client file. func walkHelper(h *Harness, name string, dir p9.File) (parentBackend *Mock, walkedBackend *Mock, walked p9.File) { _, parent, err := dir.Walk(nil) if err != nil { h.t.Fatalf("Walk(nil) got err %v, want nil", err) } defer parent.Close() parentBackend = h.Pop(parent) _, walked, err = parent.Walk([]string{name}) if err != nil { h.t.Fatalf("Walk(%s) got err %v, want nil", name, err) } walkedBackend = h.Pop(walked) return parentBackend, walkedBackend, walked } // walkAndOpenHelper additionally opens the walked file, if possible. func walkAndOpenHelper(h *Harness, name string, dir p9.File) (*Mock, *Mock, p9.File) { parentBackend, walkedBackend, walked := walkHelper(h, name, dir) if p9.CanOpen(walkedBackend.Attr.Mode) { // Open for all file types that we can. We stick to a read-only // open here because directories may not be opened otherwise. walkedBackend.EXPECT().Open(p9.ReadOnly).Times(1) if _, _, _, err := walked.Open(p9.ReadOnly); err != nil { h.t.Errorf("got open err %v, want nil", err) } } else { // ... or assert an error for others. if _, _, _, err := walked.Open(p9.ReadOnly); err != unix.EINVAL { h.t.Errorf("got open err %v, want EINVAL", err) } } return parentBackend, walkedBackend, walked } // createHelper creates the given file and returns the parent directory, // created file and client file, which must be closed when done. func createHelper(h *Harness, name string, dir p9.File) (*Mock, *Mock, p9.File) { // Clone the directory first, since Create replaces the existing file. // We change the type after calling create. _, dirThenFile, err := dir.Walk(nil) if err != nil { h.t.Fatalf("got walk err %v, want nil", err) } // Create a new server-side file. On the server-side, the a new file is // returned from a create call. The client will reuse the same file, // but we still expect the normal chain of closes. This complicates // things a bit because the "parent" will always chain to the cloned // dir above. dirBackend := h.Pop(dirThenFile) // New backend directory. newFile := h.NewFile()(dirBackend) // New file with backend parent. dirBackend.EXPECT().Create(name, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, newFile, newFile.QID, uint32(0), nil) // Create via the client. _, dirThenFile, _, _, err = dirThenFile.Create(name, p9.ReadOnly, 0, 0, 0) if err != nil { h.t.Fatalf("got create err %v, want nil", err) } // Ensure subsequent walks succeed. dirBackend.AddChild(name, h.NewFile()) return dirBackend, newFile, dirThenFile } // deprecatedRemover allows us to access the deprecated Remove operation within // the p9.File client object. type deprecatedRemover interface { Remove() error } // checkDeleted asserts that relevant methods fail for an unlinked file. // // This function will close the file at the end. func checkDeleted(h *Harness, file p9.File) { defer file.Close() // See doc. if _, _, _, err := file.Open(p9.ReadOnly); err != unix.EINVAL { h.t.Errorf("open while deleted, got %v, want EINVAL", err) } if _, _, _, _, err := file.Create("created", p9.ReadOnly, 0, 0, 0); err != unix.EINVAL { h.t.Errorf("create while deleted, got %v, want EINVAL", err) } if _, err := file.Symlink("old", "new", 0, 0); err != unix.EINVAL { h.t.Errorf("symlink while deleted, got %v, want EINVAL", err) } // N.B. This link is technically invalid, but if a call to link is // actually made in the backend then the mock will panic. if err := file.Link(file, "new"); err != unix.EINVAL { h.t.Errorf("link while deleted, got %v, want EINVAL", err) } if err := file.RenameAt("src", file, "dst"); err != unix.EINVAL { h.t.Errorf("renameAt while deleted, got %v, want EINVAL", err) } if err := file.UnlinkAt("file", 0); err != unix.EINVAL { h.t.Errorf("unlinkAt while deleted, got %v, want EINVAL", err) } if err := file.Rename(file, "dst"); err != unix.EINVAL { h.t.Errorf("rename while deleted, got %v, want EINVAL", err) } if _, err := file.Readlink(); err != unix.EINVAL { h.t.Errorf("readlink while deleted, got %v, want EINVAL", err) } if _, err := file.Mkdir("dir", p9.ModeDirectory, 0, 0); err != unix.EINVAL { h.t.Errorf("mkdir while deleted, got %v, want EINVAL", err) } if _, err := file.Mknod("dir", p9.ModeDirectory, 0, 0, 0, 0); err != unix.EINVAL { h.t.Errorf("mknod while deleted, got %v, want EINVAL", err) } if _, err := file.Readdir(0, 1); err != unix.EINVAL { h.t.Errorf("readdir while deleted, got %v, want EINVAL", err) } if _, err := file.Connect(p9.SocketType(0)); err != unix.EINVAL { h.t.Errorf("connect while deleted, got %v, want EINVAL", err) } // The remove method is technically deprecated, but we want to ensure // that it still checks for deleted appropriately. We must first clone // the file because remove is equivalent to close. _, newFile, err := file.Walk(nil) if err == unix.EBUSY { // We can't walk from here because this reference is open // already. Okay, we will also have unopened cases through // TestUnlink, just skip the remove operation for now. return } else if err != nil { h.t.Fatalf("clone failed, got %v, want nil", err) } if err := newFile.(deprecatedRemover).Remove(); err != unix.EINVAL { h.t.Errorf("remove while deleted, got %v, want EINVAL", err) } } // deleter is a function to remove a file. type deleter func(parent p9.File, name string) error // unlinkAt is a deleter. func unlinkAt(parent p9.File, name string) error { // Call unlink. Note that a filesystem may normally impose additional // constaints on unlinkat success, such as ensuring that a directory is // empty, requiring AT_REMOVEDIR in flags to remove a directory, etc. // None of that is required internally (entire trees can be marked // deleted when this operation succeeds), so the mock will succeed. return parent.UnlinkAt(name, 0) } // remove is a deleter. func remove(parent p9.File, name string) error { // See notes above re: remove. _, newFile, err := parent.Walk([]string{name}) if err != nil { // Should not be expected. return err } // Do the actual remove. if err := newFile.(deprecatedRemover).Remove(); err != nil { return err } // Ensure that the remove closed the file. if err := newFile.(deprecatedRemover).Remove(); err != unix.EBADF { return unix.EBADF // Propagate this code. } return nil } // unlinkHelper unlinks the noted path, and ensures that all relevant // operations on that path, acquired from multiple paths, start failing. func unlinkHelper(h *Harness, root p9.File, targetNames []string, targetGen fileGenerator, deleteFn deleter) { // name is the file to be unlinked. name := targetNames[len(targetNames)-1] // Walk to the directory containing the target. _, parent, err := root.Walk(targetNames[:len(targetNames)-1]) if err != nil { h.t.Fatalf("got walk err %v, want nil", err) } defer parent.Close() parentBackend := h.Pop(parent) // Walk to or generate the target file. _, _, target := targetGen(h, name, parent) defer checkDeleted(h, target) // Walk to a second reference. _, second, err := parent.Walk([]string{name}) if err != nil { h.t.Fatalf("got walk err %v, want nil", err) } defer checkDeleted(h, second) // Walk to a third reference, from the start. _, third, err := root.Walk(targetNames) if err != nil { h.t.Fatalf("got walk err %v, want nil", err) } defer checkDeleted(h, third) // This will be translated in the backend to an unlinkat. parentBackend.EXPECT().UnlinkAt(name, uint32(0)).Return(nil) // Actually perform the deletion. if err := deleteFn(parent, name); err != nil { h.t.Fatalf("got delete err %v, want nil", err) } } func unlinkTest(t *testing.T, targetNames []string, targetGen fileGenerator) { t.Run(fmt.Sprintf("unlinkAt(%s)", strings.Join(targetNames, "/")), func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() unlinkHelper(h, root, targetNames, targetGen, unlinkAt) }) t.Run(fmt.Sprintf("remove(%s)", strings.Join(targetNames, "/")), func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() unlinkHelper(h, root, targetNames, targetGen, remove) }) } func TestUnlink(t *testing.T) { // Unlink all files. for name := range newTypeMap(nil) { unlinkTest(t, []string{name}, walkHelper) unlinkTest(t, []string{name}, walkAndOpenHelper) unlinkTest(t, []string{"one", name}, walkHelper) unlinkTest(t, []string{"one", name}, walkAndOpenHelper) unlinkTest(t, []string{"one", "two", name}, walkHelper) unlinkTest(t, []string{"one", "two", name}, walkAndOpenHelper) } // Unlink a directory. unlinkTest(t, []string{"one"}, walkHelper) unlinkTest(t, []string{"one"}, walkAndOpenHelper) unlinkTest(t, []string{"one", "two"}, walkHelper) unlinkTest(t, []string{"one", "two"}, walkAndOpenHelper) // Unlink created files. unlinkTest(t, []string{"created"}, createHelper) unlinkTest(t, []string{"one", "created"}, createHelper) unlinkTest(t, []string{"one", "two", "created"}, createHelper) } func TestUnlinkAtInvalid(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() for name := range newTypeMap(nil) { for _, invalidName := range allInvalidNames(name) { if err := root.UnlinkAt(invalidName, 0); err != unix.EINVAL { t.Errorf("got %v for name %q, want EINVAL", err, invalidName) } } } } // expectRenamed asserts an ordered sequence of rename calls, based on all the // elements in elements being the source, and the first element therein // changing to dstName, parented at dstParent. func expectRenamed(file *Mock, elements []string, dstParent *Mock, dstName string) *gomock.Call { if len(elements) > 0 { // Recurse to the parent, if necessary. call := expectRenamed(file.parent, elements[:len(elements)-1], dstParent, dstName) // Recursive case: this element is unchanged, but should have // it's hook called after the parent. return file.EXPECT().Renamed(file.parent, elements[len(elements)-1]).Do(func(p p9.File, _ string) { file.parent = p.(*Mock) }).After(call) } // Base case: this is the changed element. return file.EXPECT().Renamed(dstParent, dstName).Do(func(p p9.File, name string) { file.parent = p.(*Mock) }) } // renamer is a rename function. type renamer func(h *Harness, srcParent, dstParent p9.File, origName, newName string, selfRename bool) error // renameAt is a renamer. func renameAt(_ *Harness, srcParent, dstParent p9.File, srcName, dstName string, selfRename bool) error { return srcParent.RenameAt(srcName, dstParent, dstName) } // rename is a renamer. func rename(h *Harness, srcParent, dstParent p9.File, srcName, dstName string, selfRename bool) error { _, f, err := srcParent.Walk([]string{srcName}) if err != nil { return err } defer f.Close() if !selfRename { backend := h.Pop(f) backend.EXPECT().Renamed(gomock.Any(), dstName).Do(func(p p9.File, name string) { backend.parent = p.(*Mock) // Required for close ordering. }) } return f.Rename(dstParent, dstName) } // renameHelper executes a rename, and asserts that all relevant elements // receive expected notifications. If overwriting a file, this includes // ensuring that the target has been appropriately marked as unlinked. func renameHelper(h *Harness, root p9.File, srcNames []string, dstNames []string, target fileGenerator, renameFn renamer) { // Walk to the directory containing the target. srcQID, targetParent, err := root.Walk(srcNames[:len(srcNames)-1]) if err != nil { h.t.Fatalf("got walk err %v, want nil", err) } defer targetParent.Close() targetParentBackend := h.Pop(targetParent) // Walk to or generate the target file. _, targetBackend, src := target(h, srcNames[len(srcNames)-1], targetParent) defer src.Close() // Walk to a second reference. _, second, err := targetParent.Walk([]string{srcNames[len(srcNames)-1]}) if err != nil { h.t.Fatalf("got walk err %v, want nil", err) } defer second.Close() secondBackend := h.Pop(second) // Walk to a third reference, from the start. _, third, err := root.Walk(srcNames) if err != nil { h.t.Fatalf("got walk err %v, want nil", err) } defer third.Close() thirdBackend := h.Pop(third) // Find the common suffix to identify the rename parent. var ( renameDestPath []string renameSrcPath []string selfRename bool ) for i := 1; i <= len(srcNames) && i <= len(dstNames); i++ { if srcNames[len(srcNames)-i] != dstNames[len(dstNames)-i] { // Take the full prefix of dstNames up until this // point, including the first mismatched name. The // first mismatch must be the renamed entry. renameDestPath = dstNames[:len(dstNames)-i+1] renameSrcPath = srcNames[:len(srcNames)-i+1] // Does the renameDestPath fully contain the // renameSrcPath here? If yes, then this is a mismatch. // We can't rename the src to some subpath of itself. if len(renameDestPath) > len(renameSrcPath) && reflect.DeepEqual(renameDestPath[:len(renameSrcPath)], renameSrcPath) { renameDestPath = nil renameSrcPath = nil continue } break } } if len(renameSrcPath) == 0 || len(renameDestPath) == 0 { // This must be a rename to self, or a tricky look-alike. This // happens iff we fail to find a suitable divergence in the two // paths. It's a true self move if the path length is the same. renameDestPath = dstNames renameSrcPath = srcNames selfRename = len(srcNames) == len(dstNames) } // Walk to the source parent. _, srcParent, err := root.Walk(renameSrcPath[:len(renameSrcPath)-1]) if err != nil { h.t.Fatalf("got walk err %v, want nil", err) } defer srcParent.Close() srcParentBackend := h.Pop(srcParent) // Walk to the destination parent. _, dstParent, err := root.Walk(renameDestPath[:len(renameDestPath)-1]) if err != nil { h.t.Fatalf("got walk err %v, want nil", err) } defer dstParent.Close() dstParentBackend := h.Pop(dstParent) // expectedErr is the result of the rename operation. var expectedErr error // Walk to the target file, if one exists. dstQID, dst, err := root.Walk(renameDestPath) if err == nil { if !selfRename && srcQID[0].Type == dstQID[0].Type { // If there is a destination file, and is it of the // same type as the source file, then we expect the // rename to succeed. We expect the destination file to // be deleted, so we run a deletion test on it in this // case. defer checkDeleted(h, dst) } else { // If the type is different than the destination, then // we expect the rename to fail. We expect that this // is returned. // // If the file being renamed to itself, this is // technically allowed and a no-op, but all the // triggers will fire. if !selfRename { expectedErr = unix.EINVAL } dst.Close() } } dstName := renameDestPath[len(renameDestPath)-1] // Renamed element. srcName := renameSrcPath[len(renameSrcPath)-1] // Renamed element. if expectedErr == nil && !selfRename { // Expect all to be renamed appropriately. Note that if this is // a final file being renamed, then we expect the file to be // called with the new parent. If not, then we expect the // rename hook to be called, but the parent will remain // unchanged. elements := srcNames[len(renameSrcPath):] expectRenamed(targetBackend, elements, dstParentBackend, dstName) expectRenamed(secondBackend, elements, dstParentBackend, dstName) expectRenamed(thirdBackend, elements, dstParentBackend, dstName) // The target parent has also been opened, and may be moved // directly or indirectly. if len(elements) > 1 { expectRenamed(targetParentBackend, elements[:len(elements)-1], dstParentBackend, dstName) } } // Expect the rename if it's not the same file. Note that like unlink, // renames are always translated to the at variant in the backend. if !selfRename { srcParentBackend.EXPECT().RenameAt(srcName, dstParentBackend, dstName).Return(expectedErr) } // Perform the actual rename; everything has been lined up. if err := renameFn(h, srcParent, dstParent, srcName, dstName, selfRename); err != expectedErr { h.t.Fatalf("got rename err %v, want %v", err, expectedErr) } } func renameTest(t *testing.T, srcNames []string, dstNames []string, target fileGenerator) { t.Run(fmt.Sprintf("renameAt(%s->%s)", strings.Join(srcNames, "/"), strings.Join(dstNames, "/")), func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() renameHelper(h, root, srcNames, dstNames, target, renameAt) }) t.Run(fmt.Sprintf("rename(%s->%s)", strings.Join(srcNames, "/"), strings.Join(dstNames, "/")), func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() renameHelper(h, root, srcNames, dstNames, target, rename) }) } func TestRename(t *testing.T) { // In-directory rename, simple case. for name := range newTypeMap(nil) { // Within the root. renameTest(t, []string{name}, []string{"renamed"}, walkHelper) renameTest(t, []string{name}, []string{"renamed"}, walkAndOpenHelper) // Within a subdirectory. renameTest(t, []string{"one", name}, []string{"one", "renamed"}, walkHelper) renameTest(t, []string{"one", name}, []string{"one", "renamed"}, walkAndOpenHelper) } // ... with created files. renameTest(t, []string{"created"}, []string{"renamed"}, createHelper) renameTest(t, []string{"one", "created"}, []string{"one", "renamed"}, createHelper) // Across directories. for name := range newTypeMap(nil) { // Down one level. renameTest(t, []string{"one", name}, []string{"one", "two", "renamed"}, walkHelper) renameTest(t, []string{"one", name}, []string{"one", "two", "renamed"}, walkAndOpenHelper) // Up one level. renameTest(t, []string{"one", "two", name}, []string{"one", "renamed"}, walkHelper) renameTest(t, []string{"one", "two", name}, []string{"one", "renamed"}, walkAndOpenHelper) // Across at the same level. renameTest(t, []string{"one", name}, []string{"three", "renamed"}, walkHelper) renameTest(t, []string{"one", name}, []string{"three", "renamed"}, walkAndOpenHelper) } // ... with created files. renameTest(t, []string{"one", "created"}, []string{"one", "two", "renamed"}, createHelper) renameTest(t, []string{"one", "two", "created"}, []string{"one", "renamed"}, createHelper) renameTest(t, []string{"one", "created"}, []string{"three", "renamed"}, createHelper) // Renaming parents. for name := range newTypeMap(nil) { // Rename a parent. renameTest(t, []string{"one", name}, []string{"renamed", name}, walkHelper) renameTest(t, []string{"one", name}, []string{"renamed", name}, walkAndOpenHelper) // Rename a super parent. renameTest(t, []string{"one", "two", name}, []string{"renamed", name}, walkHelper) renameTest(t, []string{"one", "two", name}, []string{"renamed", name}, walkAndOpenHelper) } // ... with created files. renameTest(t, []string{"one", "created"}, []string{"renamed", "created"}, createHelper) renameTest(t, []string{"one", "two", "created"}, []string{"renamed", "created"}, createHelper) // Over existing files, including itself. for name := range newTypeMap(nil) { for other := range newTypeMap(nil) { // Overwrite the noted file (may be itself). renameTest(t, []string{"one", name}, []string{"one", other}, walkHelper) renameTest(t, []string{"one", name}, []string{"one", other}, walkAndOpenHelper) // Overwrite other files in another directory. renameTest(t, []string{"one", name}, []string{"one", "two", other}, walkHelper) renameTest(t, []string{"one", name}, []string{"one", "two", other}, walkAndOpenHelper) } // Overwrite by moving the parent. renameTest(t, []string{"three", name}, []string{"one", name}, walkHelper) renameTest(t, []string{"three", name}, []string{"one", name}, walkAndOpenHelper) // Create over the types. renameTest(t, []string{"one", "created"}, []string{"one", name}, createHelper) renameTest(t, []string{"one", "created"}, []string{"one", "two", name}, createHelper) renameTest(t, []string{"three", "created"}, []string{"one", name}, createHelper) } } func TestRenameInvalid(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() for name := range newTypeMap(nil) { for _, invalidName := range allInvalidNames(name) { if err := root.Rename(root, invalidName); err != unix.EINVAL { t.Errorf("got %v for name %q, want EINVAL", err, invalidName) } } } } func TestRenameAtInvalid(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() for name := range newTypeMap(nil) { for _, invalidName := range allInvalidNames(name) { if err := root.RenameAt(invalidName, root, "okay"); err != unix.EINVAL { t.Errorf("got %v for name %q, want EINVAL", err, invalidName) } if err := root.RenameAt("okay", root, invalidName); err != unix.EINVAL { t.Errorf("got %v for name %q, want EINVAL", err, invalidName) } } } } // TestRenameSecondOrder tests that indirect rename targets continue to receive // Renamed calls after a rename of its renamed parent. i.e., // // 1. Create /one/file // 2. Create /directory // 3. Rename /one -> /directory/one // 4. Rename /directory -> /three/foo // 5. file from (1) should still receive Renamed. // // This is a regression test for b/135219260. func TestRenameSecondOrder(t *testing.T) { h, c := NewHarness(t) defer h.Finish() rootBackend, root := newRoot(h, c) defer root.Close() // Walk to /one. _, oneBackend, oneFile := walkHelper(h, "one", root) defer oneFile.Close() // Walk to and generate /one/file. // // walkHelper re-walks to oneFile, so we need the second backend, // which will also receive Renamed calls. oneSecondBackend, fileBackend, fileFile := walkHelper(h, "file", oneFile) defer fileFile.Close() // Walk to and generate /directory. _, directoryBackend, directoryFile := walkHelper(h, "directory", root) defer directoryFile.Close() // Rename /one to /directory/one. rootBackend.EXPECT().RenameAt("one", directoryBackend, "one").Return(nil) expectRenamed(oneBackend, []string{}, directoryBackend, "one") expectRenamed(oneSecondBackend, []string{}, directoryBackend, "one") expectRenamed(fileBackend, []string{}, oneBackend, "file") if err := renameAt(h, root, directoryFile, "one", "one", false); err != nil { h.t.Fatalf("got rename err %v, want nil", err) } // Walk to /three. _, threeBackend, threeFile := walkHelper(h, "three", root) defer threeFile.Close() // Rename /directory to /three/foo. rootBackend.EXPECT().RenameAt("directory", threeBackend, "foo").Return(nil) expectRenamed(directoryBackend, []string{}, threeBackend, "foo") expectRenamed(oneBackend, []string{}, directoryBackend, "one") expectRenamed(oneSecondBackend, []string{}, directoryBackend, "one") expectRenamed(fileBackend, []string{}, oneBackend, "file") if err := renameAt(h, root, threeFile, "directory", "foo", false); err != nil { h.t.Fatalf("got rename err %v, want nil", err) } } func TestReadlink(t *testing.T) { for name := range newTypeMap(nil) { t.Run(name, func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() // Walk to the file normally. _, f, err := root.Walk([]string{name}) if err != nil { t.Fatalf("walk failed: got %v, wanted nil", err) } defer f.Close() backend := h.Pop(f) const symlinkTarget = "symlink-target" if backend.Attr.Mode.IsSymlink() { // This should only go through on symlinks. backend.EXPECT().Readlink().Return(symlinkTarget, nil) } // Attempt a Readlink operation. target, err := f.Readlink() if err != nil && err != unix.EINVAL { t.Errorf("readlink got %v, wanted EINVAL", err) } else if err == nil && target != symlinkTarget { t.Errorf("readlink got %v, wanted %v", target, symlinkTarget) } }) } } // fdTest is a wrapper around operations that may send file descriptors. This // asserts that the file descriptors are working as intended. func fdTest(t *testing.T, sendFn func(*fd.FD) *fd.FD) { // Create a pipe that we can read from. r, w, err := os.Pipe() if err != nil { t.Fatalf("unable to create pipe: %v", err) } defer r.Close() defer w.Close() // Attempt to send the write end. wFD, err := fd.NewFromFile(w) if err != nil { t.Fatalf("unable to convert file: %v", err) } defer wFD.Close() // This is a copy. // Send wFD and receive newFD. newFD := sendFn(wFD) defer newFD.Close() // Attempt to write. const message = "hello" if _, err := newFD.Write([]byte(message)); err != nil { t.Fatalf("write got %v, wanted nil", err) } // Should see the message on our end. buffer := []byte(message) if _, err := io.ReadFull(r, buffer); err != nil { t.Fatalf("read got %v, wanted nil", err) } if string(buffer) != message { t.Errorf("got message %v, wanted %v", string(buffer), message) } } func TestConnect(t *testing.T) { for name := range newTypeMap(nil) { t.Run(name, func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() // Walk to the file normally. _, backend, f := walkHelper(h, name, root) defer f.Close() // Catch all the non-socket cases. if !backend.Attr.Mode.IsSocket() { // This has been set up to fail if Connect is called. if _, err := f.Connect(p9.SocketType(0)); err != unix.EINVAL { t.Errorf("connect got %v, wanted EINVAL", err) } return } // Ensure the fd exchange works. fdTest(t, func(send *fd.FD) *fd.FD { backend.EXPECT().Connect(p9.SocketType(0)).Return(send, nil) recv, err := backend.Connect(p9.SocketType(0)) if err != nil { t.Fatalf("connect got %v, wanted nil", err) } return recv }) }) } } func TestReaddir(t *testing.T) { for name := range newTypeMap(nil) { t.Run(name, func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() // Walk to the file normally. _, backend, f := walkHelper(h, name, root) defer f.Close() // Catch all the non-directory cases. if !backend.Attr.Mode.IsDir() { // This has also been set up to fail if Readdir is called. if _, err := f.Readdir(0, 1); err != unix.EINVAL { t.Errorf("readdir got %v, wanted EINVAL", err) } return } // Ensure that readdir works for directories. if _, err := f.Readdir(0, 1); err != unix.EINVAL { t.Errorf("readdir got %v, wanted EINVAL", err) } if _, _, _, err := f.Open(p9.ReadWrite); err != unix.EISDIR { t.Errorf("readdir got %v, wanted EISDIR", err) } if _, _, _, err := f.Open(p9.WriteOnly); err != unix.EISDIR { t.Errorf("readdir got %v, wanted EISDIR", err) } backend.EXPECT().Open(p9.ReadOnly).Times(1) if _, _, _, err := f.Open(p9.ReadOnly); err != nil { t.Errorf("readdir got %v, wanted nil", err) } backend.EXPECT().Readdir(uint64(0), uint32(1)).Times(1) if _, err := f.Readdir(0, 1); err != nil { t.Errorf("readdir got %v, wanted nil", err) } }) } } func TestOpen(t *testing.T) { type openTest struct { name string flags p9.OpenFlags err error match func(p9.FileMode) bool } cases := []openTest{ { name: "not-openable-read-only", flags: p9.ReadOnly, err: unix.EINVAL, match: func(mode p9.FileMode) bool { return !p9.CanOpen(mode) }, }, { name: "not-openable-write-only", flags: p9.WriteOnly, err: unix.EINVAL, match: func(mode p9.FileMode) bool { return !p9.CanOpen(mode) }, }, { name: "not-openable-read-write", flags: p9.ReadWrite, err: unix.EINVAL, match: func(mode p9.FileMode) bool { return !p9.CanOpen(mode) }, }, { name: "directory-read-only", flags: p9.ReadOnly, err: nil, match: func(mode p9.FileMode) bool { return mode.IsDir() }, }, { name: "directory-read-write", flags: p9.ReadWrite, err: unix.EISDIR, match: func(mode p9.FileMode) bool { return mode.IsDir() }, }, { name: "directory-write-only", flags: p9.WriteOnly, err: unix.EISDIR, match: func(mode p9.FileMode) bool { return mode.IsDir() }, }, { name: "read-only", flags: p9.ReadOnly, err: nil, match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) }, }, { name: "write-only", flags: p9.WriteOnly, err: nil, match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) && !mode.IsDir() }, }, { name: "read-write", flags: p9.ReadWrite, err: nil, match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) && !mode.IsDir() }, }, { name: "directory-read-only-truncate", flags: p9.ReadOnly | p9.OpenTruncate, err: unix.EISDIR, match: func(mode p9.FileMode) bool { return mode.IsDir() }, }, { name: "read-only-truncate", flags: p9.ReadOnly | p9.OpenTruncate, err: nil, match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) && !mode.IsDir() }, }, { name: "write-only-truncate", flags: p9.WriteOnly | p9.OpenTruncate, err: nil, match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) && !mode.IsDir() }, }, { name: "read-write-truncate", flags: p9.ReadWrite | p9.OpenTruncate, err: nil, match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) && !mode.IsDir() }, }, } // Open(flags OpenFlags) (*fd.FD, QID, uint32, error) // - only works on Regular, NamedPipe, BLockDevice, CharacterDevice // - returning a file works as expected for name := range newTypeMap(nil) { for _, tc := range cases { t.Run(fmt.Sprintf("%s-%s", tc.name, name), func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() // Walk to the file normally. _, backend, f := walkHelper(h, name, root) defer f.Close() // Does this match the case? if !tc.match(backend.Attr.Mode) { t.SkipNow() } // Ensure open-required operations fail. if _, err := f.ReadAt([]byte("hello"), 0); err != unix.EINVAL { t.Errorf("readAt got %v, wanted EINVAL", err) } if _, err := f.WriteAt(make([]byte, 6), 0); err != unix.EINVAL { t.Errorf("writeAt got %v, wanted EINVAL", err) } if err := f.FSync(); err != unix.EINVAL { t.Errorf("fsync got %v, wanted EINVAL", err) } if _, err := f.Readdir(0, 1); err != unix.EINVAL { t.Errorf("readdir got %v, wanted EINVAL", err) } // Attempt the given open. if tc.err != nil { // We expect an error, just test and return. if _, _, _, err := f.Open(tc.flags); err != tc.err { t.Fatalf("open with flags %v got %v, want %v", tc.flags, err, tc.err) } return } // Run an FD test, since we expect success. fdTest(t, func(send *fd.FD) *fd.FD { backend.EXPECT().Open(tc.flags).Return(send, p9.QID{}, uint32(0), nil).Times(1) recv, _, _, err := f.Open(tc.flags) if err != tc.err { t.Fatalf("open with flags %v got %v, want %v", tc.flags, err, tc.err) } return recv }) // If the open was successful, attempt another one. if _, _, _, err := f.Open(tc.flags); err != unix.EINVAL { t.Errorf("second open with flags %v got %v, want EINVAL", tc.flags, err) } // Ensure that all illegal operations fail. if _, _, err := f.Walk(nil); err != unix.EINVAL && err != unix.EBUSY { t.Errorf("walk got %v, wanted EINVAL or EBUSY", err) } if _, _, _, _, err := f.WalkGetAttr(nil); err != unix.EINVAL && err != unix.EBUSY { t.Errorf("walkgetattr got %v, wanted EINVAL or EBUSY", err) } }) } } } func TestClose(t *testing.T) { type closeTest struct { name string closeFn func(backend *Mock, f p9.File) error } cases := []closeTest{ { name: "close", closeFn: func(_ *Mock, f p9.File) error { return f.Close() }, }, { name: "remove", closeFn: func(backend *Mock, f p9.File) error { // Allow the rename call in the parent, automatically translated. backend.parent.EXPECT().UnlinkAt(gomock.Any(), gomock.Any()).Times(1) return f.(deprecatedRemover).Remove() }, }, { name: "setAttrClose", closeFn: func(backend *Mock, f p9.File) error { valid := p9.SetAttrMask{ATime: true} attr := p9.SetAttr{ATimeSeconds: 1, ATimeNanoSeconds: 2} backend.EXPECT().SetAttr(valid, attr).Times(1) return f.SetAttrClose(valid, attr) }, }, } for name := range newTypeMap(nil) { for _, tc := range cases { t.Run(fmt.Sprintf("%s(%s)", tc.name, name), func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() // Walk to the file normally. _, backend, f := walkHelper(h, name, root) // Close via the prescribed method. if err := tc.closeFn(backend, f); err != nil { t.Fatalf("closeFn failed: %v", err) } // Everything should fail with EBADF. if _, _, err := f.Walk(nil); err != unix.EBADF { t.Errorf("walk got %v, wanted EBADF", err) } if _, err := f.StatFS(); err != unix.EBADF { t.Errorf("statfs got %v, wanted EBADF", err) } if _, _, _, err := f.GetAttr(p9.AttrMaskAll()); err != unix.EBADF { t.Errorf("getattr got %v, wanted EBADF", err) } if err := f.SetAttr(p9.SetAttrMask{}, p9.SetAttr{}); err != unix.EBADF { t.Errorf("setattrk got %v, wanted EBADF", err) } if err := f.Rename(root, "new-name"); err != unix.EBADF { t.Errorf("rename got %v, wanted EBADF", err) } if err := f.Close(); err != unix.EBADF { t.Errorf("close got %v, wanted EBADF", err) } if _, _, _, err := f.Open(p9.ReadOnly); err != unix.EBADF { t.Errorf("open got %v, wanted EBADF", err) } if _, err := f.ReadAt([]byte("hello"), 0); err != unix.EBADF { t.Errorf("readAt got %v, wanted EBADF", err) } if _, err := f.WriteAt(make([]byte, 6), 0); err != unix.EBADF { t.Errorf("writeAt got %v, wanted EBADF", err) } if err := f.FSync(); err != unix.EBADF { t.Errorf("fsync got %v, wanted EBADF", err) } if _, _, _, _, err := f.Create("new-file", p9.ReadWrite, 0, 0, 0); err != unix.EBADF { t.Errorf("create got %v, wanted EBADF", err) } if _, err := f.Mkdir("new-directory", 0, 0, 0); err != unix.EBADF { t.Errorf("mkdir got %v, wanted EBADF", err) } if _, err := f.Symlink("old-name", "new-name", 0, 0); err != unix.EBADF { t.Errorf("symlink got %v, wanted EBADF", err) } if err := f.Link(root, "new-name"); err != unix.EBADF { t.Errorf("link got %v, wanted EBADF", err) } if _, err := f.Mknod("new-block-device", 0, 0, 0, 0, 0); err != unix.EBADF { t.Errorf("mknod got %v, wanted EBADF", err) } if err := f.RenameAt("old-name", root, "new-name"); err != unix.EBADF { t.Errorf("renameAt got %v, wanted EBADF", err) } if err := f.UnlinkAt("name", 0); err != unix.EBADF { t.Errorf("unlinkAt got %v, wanted EBADF", err) } if _, err := f.Readdir(0, 1); err != unix.EBADF { t.Errorf("readdir got %v, wanted EBADF", err) } if _, err := f.Readlink(); err != unix.EBADF { t.Errorf("readlink got %v, wanted EBADF", err) } if err := f.Flush(); err != unix.EBADF { t.Errorf("flush got %v, wanted EBADF", err) } if _, _, _, _, err := f.WalkGetAttr(nil); err != unix.EBADF { t.Errorf("walkgetattr got %v, wanted EBADF", err) } if _, err := f.Connect(p9.SocketType(0)); err != unix.EBADF { t.Errorf("connect got %v, wanted EBADF", err) } }) } } } // onlyWorksOnOpenThings is a helper test method for operations that should // only work on files that have been explicitly opened. func onlyWorksOnOpenThings(h *Harness, t *testing.T, name string, root p9.File, mode p9.OpenFlags, expectedErr error, fn func(backend *Mock, f p9.File, shouldSucceed bool) error) { // Walk to the file normally. _, backend, f := walkHelper(h, name, root) defer f.Close() // Does it work before opening? if err := fn(backend, f, false); err != unix.EINVAL { t.Errorf("operation got %v, wanted EINVAL", err) } // Is this openable? if !p9.CanOpen(backend.Attr.Mode) { return // Nothing to do. } // If this is a directory, we can't handle writing. if backend.Attr.Mode.IsDir() && (mode == p9.ReadWrite || mode == p9.WriteOnly) { return // Skip. } // Open the file. backend.EXPECT().Open(mode) if _, _, _, err := f.Open(mode); err != nil { t.Fatalf("open got %v, wanted nil", err) } // Attempt the operation. if err := fn(backend, f, expectedErr == nil); err != expectedErr { t.Fatalf("operation got %v, wanted %v", err, expectedErr) } } func TestRead(t *testing.T) { type readTest struct { name string mode p9.OpenFlags err error } cases := []readTest{ { name: "read-only", mode: p9.ReadOnly, err: nil, }, { name: "read-write", mode: p9.ReadWrite, err: nil, }, { name: "write-only", mode: p9.WriteOnly, err: unix.EPERM, }, } for name := range newTypeMap(nil) { for _, tc := range cases { t.Run(fmt.Sprintf("%s-%s", tc.name, name), func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() const message = "hello" onlyWorksOnOpenThings(h, t, name, root, tc.mode, tc.err, func(backend *Mock, f p9.File, shouldSucceed bool) error { if !shouldSucceed { _, err := f.ReadAt([]byte(message), 0) return err } // Prepare for the call to readAt in the backend. backend.EXPECT().ReadAt(gomock.Any(), uint64(0)).Do(func(p []byte, offset uint64) { copy(p, message) }).Return(len(message), nil) // Make the client call. p := make([]byte, 2*len(message)) // Double size. n, err := f.ReadAt(p, 0) // Sanity check result. if err != nil { return err } if n != len(message) { t.Fatalf("message length incorrect, got %d, want %d", n, len(message)) } if !bytes.Equal(p[:n], []byte(message)) { t.Fatalf("message incorrect, got %v, want %v", p, []byte(message)) } return nil // Success. }) }) } } } func TestWrite(t *testing.T) { type writeTest struct { name string mode p9.OpenFlags err error } cases := []writeTest{ { name: "read-only", mode: p9.ReadOnly, err: unix.EPERM, }, { name: "read-write", mode: p9.ReadWrite, err: nil, }, { name: "write-only", mode: p9.WriteOnly, err: nil, }, } for name := range newTypeMap(nil) { for _, tc := range cases { t.Run(fmt.Sprintf("%s-%s", tc.name, name), func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() const message = "hello" onlyWorksOnOpenThings(h, t, name, root, tc.mode, tc.err, func(backend *Mock, f p9.File, shouldSucceed bool) error { if !shouldSucceed { _, err := f.WriteAt([]byte(message), 0) return err } // Prepare for the call to readAt in the backend. var output []byte // Saved by Do below. backend.EXPECT().WriteAt(gomock.Any(), uint64(0)).Do(func(p []byte, offset uint64) { output = p }).Return(len(message), nil) // Make the client call. n, err := f.WriteAt([]byte(message), 0) // Sanity check result. if err != nil { return err } if n != len(message) { t.Fatalf("message length incorrect, got %d, want %d", n, len(message)) } if !bytes.Equal(output, []byte(message)) { t.Fatalf("message incorrect, got %v, want %v", output, []byte(message)) } return nil // Success. }) }) } } } func TestFSync(t *testing.T) { for name := range newTypeMap(nil) { for _, mode := range []p9.OpenFlags{p9.ReadOnly, p9.WriteOnly, p9.ReadWrite} { t.Run(fmt.Sprintf("%s-%s", mode, name), func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() onlyWorksOnOpenThings(h, t, name, root, mode, nil, func(backend *Mock, f p9.File, shouldSucceed bool) error { if shouldSucceed { backend.EXPECT().FSync().Times(1) } return f.FSync() }) }) } } } func TestFlush(t *testing.T) { for name := range newTypeMap(nil) { t.Run(name, func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() _, backend, f := walkHelper(h, name, root) defer f.Close() backend.EXPECT().Flush() f.Flush() }) } } // onlyWorksOnDirectories is a helper test method for operations that should // only work on unopened directories, such as create, mkdir and symlink. func onlyWorksOnDirectories(h *Harness, t *testing.T, name string, root p9.File, fn func(backend *Mock, f p9.File, shouldSucceed bool) error) { // Walk to the file normally. _, backend, f := walkHelper(h, name, root) defer f.Close() // Only directories support mknod. if !backend.Attr.Mode.IsDir() { if err := fn(backend, f, false); err != unix.EINVAL { t.Errorf("operation got %v, wanted EINVAL", err) } return // Nothing else to do. } // Should succeed. if err := fn(backend, f, true); err != nil { t.Fatalf("operation got %v, wanted nil", err) } // Open the directory. backend.EXPECT().Open(p9.ReadOnly).Times(1) if _, _, _, err := f.Open(p9.ReadOnly); err != nil { t.Fatalf("open got %v, wanted nil", err) } // Should not work again. if err := fn(backend, f, false); err != unix.EINVAL { t.Fatalf("operation got %v, wanted EINVAL", err) } } func TestCreate(t *testing.T) { for name := range newTypeMap(nil) { t.Run(name, func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() onlyWorksOnDirectories(h, t, name, root, func(backend *Mock, f p9.File, shouldSucceed bool) error { if !shouldSucceed { _, _, _, _, err := f.Create("new-file", p9.ReadWrite, 0, 1, 2) return err } // If the create is going to succeed, then we // need to create a new backend file, and we // clone to ensure that we don't close the // original. _, newF, err := f.Walk(nil) if err != nil { t.Fatalf("clone got %v, wanted nil", err) } defer newF.Close() newBackend := h.Pop(newF) // Run a regular FD test to validate that path. fdTest(t, func(send *fd.FD) *fd.FD { // Return the send FD on success. newFile := h.NewFile()(backend) // New file with the parent backend. newBackend.EXPECT().Create("new-file", p9.ReadWrite, p9.FileMode(0), p9.UID(1), p9.GID(2)).Return(send, newFile, p9.QID{}, uint32(0), nil) // Receive the fd back. recv, _, _, _, err := newF.Create("new-file", p9.ReadWrite, 0, 1, 2) if err != nil { t.Fatalf("create got %v, wanted nil", err) } return recv }) // The above will fail via normal test flow, so // we can assume that it passed. return nil }) }) } } func TestCreateInvalid(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() for name := range newTypeMap(nil) { for _, invalidName := range allInvalidNames(name) { if _, _, _, _, err := root.Create(invalidName, p9.ReadWrite, 0, 0, 0); err != unix.EINVAL { t.Errorf("got %v for name %q, want EINVAL", err, invalidName) } } } } func TestMkdir(t *testing.T) { for name := range newTypeMap(nil) { t.Run(name, func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() onlyWorksOnDirectories(h, t, name, root, func(backend *Mock, f p9.File, shouldSucceed bool) error { if shouldSucceed { backend.EXPECT().Mkdir("new-directory", p9.FileMode(0), p9.UID(1), p9.GID(2)) } _, err := f.Mkdir("new-directory", 0, 1, 2) return err }) }) } } func TestMkdirInvalid(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() for name := range newTypeMap(nil) { for _, invalidName := range allInvalidNames(name) { if _, err := root.Mkdir(invalidName, 0, 0, 0); err != unix.EINVAL { t.Errorf("got %v for name %q, want EINVAL", err, invalidName) } } } } func TestSymlink(t *testing.T) { for name := range newTypeMap(nil) { t.Run(name, func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() onlyWorksOnDirectories(h, t, name, root, func(backend *Mock, f p9.File, shouldSucceed bool) error { if shouldSucceed { backend.EXPECT().Symlink("old-name", "new-name", p9.UID(1), p9.GID(2)) } _, err := f.Symlink("old-name", "new-name", 1, 2) return err }) }) } } func TestSyminkInvalid(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() for name := range newTypeMap(nil) { for _, invalidName := range allInvalidNames(name) { // We need only test for invalid names in the new name, // the target can be an arbitrary string and we don't // need to sanity check it. if _, err := root.Symlink("old-name", invalidName, 0, 0); err != unix.EINVAL { t.Errorf("got %v for name %q, want EINVAL", err, invalidName) } } } } func TestLink(t *testing.T) { for name := range newTypeMap(nil) { t.Run(name, func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() onlyWorksOnDirectories(h, t, name, root, func(backend *Mock, f p9.File, shouldSucceed bool) error { if shouldSucceed { backend.EXPECT().Link(gomock.Any(), "new-link") } return f.Link(f, "new-link") }) }) } } func TestLinkInvalid(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() for name := range newTypeMap(nil) { for _, invalidName := range allInvalidNames(name) { if err := root.Link(root, invalidName); err != unix.EINVAL { t.Errorf("got %v for name %q, want EINVAL", err, invalidName) } } } } func TestMknod(t *testing.T) { for name := range newTypeMap(nil) { t.Run(name, func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() onlyWorksOnDirectories(h, t, name, root, func(backend *Mock, f p9.File, shouldSucceed bool) error { if shouldSucceed { backend.EXPECT().Mknod("new-block-device", p9.FileMode(0), uint32(1), uint32(2), p9.UID(3), p9.GID(4)).Times(1) } _, err := f.Mknod("new-block-device", 0, 1, 2, 3, 4) return err }) }) } } // concurrentFn is a specification of a concurrent operation. This is used to // drive the concurrency tests below. type concurrentFn struct { name string match func(p9.FileMode) bool op func(h *Harness, backend *Mock, f p9.File, callback func()) } func concurrentTest(t *testing.T, name string, fn1, fn2 concurrentFn, sameDir, expectedOkay bool) { var ( names1 []string names2 []string ) if sameDir { // Use the same file one directory up. names1, names2 = []string{"one", name}, []string{"one", name} } else { // For different directories, just use siblings. names1, names2 = []string{"one", name}, []string{"three", name} } t.Run(fmt.Sprintf("%s(%v)+%s(%v)", fn1.name, names1, fn2.name, names2), func(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() // Walk to both files as given. _, f1, err := root.Walk(names1) if err != nil { t.Fatalf("error walking, got %v, want nil", err) } defer f1.Close() b1 := h.Pop(f1) _, f2, err := root.Walk(names2) if err != nil { t.Fatalf("error walking, got %v, want nil", err) } defer f2.Close() b2 := h.Pop(f2) // Are these a good match for the current test case? if !fn1.match(b1.Attr.Mode) { t.SkipNow() } if !fn2.match(b2.Attr.Mode) { t.SkipNow() } // Construct our "concurrency creator". in1 := make(chan struct{}, 1) in2 := make(chan struct{}, 1) var top sync.WaitGroup var fns sync.WaitGroup defer top.Wait() top.Add(2) // Accounting for below. defer fns.Done() fns.Add(1) // See line above; released before top.Wait. go func() { defer top.Done() fn1.op(h, b1, f1, func() { in1 <- struct{}{} fns.Wait() }) }() go func() { defer top.Done() fn2.op(h, b2, f2, func() { in2 <- struct{}{} fns.Wait() }) }() // Compute a reasonable timeout. If we expect the operation to hang, // give it 10 milliseconds before we assert that it's fine. After all, // there will be a lot of these tests. If we don't expect it to hang, // give it a full minute, since the machine could be slow. timeout := 10 * time.Millisecond if expectedOkay { timeout = 1 * time.Minute } // Read the first channel. var second chan struct{} select { case <-in1: second = in2 case <-in2: second = in1 } // Catch concurrency. select { case <-second: // We finished successful. Is this good? Depends on the // expected result. if !expectedOkay { t.Errorf("%q and %q proceeded concurrently!", fn1.name, fn2.name) } case <-time.After(timeout): // Great, things did not proceed concurrently. Is that what we // expected? if expectedOkay { t.Errorf("%q and %q hung concurrently!", fn1.name, fn2.name) } } }) } func randomFileName() string { return fmt.Sprintf("%x", rand.Int63()) } func TestConcurrency(t *testing.T) { readExclusive := []concurrentFn{ { // N.B. We can't explicitly check WalkGetAttr behavior, // but we rely on the fact that the internal code paths // are the same. name: "walk", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { // See the documentation of WalkCallback. // Because walk is actually implemented by the // mock, we need a special place for this // callback. // // Note that a clone actually locks the parent // node. So we walk from this node to test // concurrent operations appropriately. backend.WalkCallback = func() error { callback() return nil } f.Walk([]string{randomFileName()}) // Won't exist. }, }, { name: "fsync", match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().Open(gomock.Any()) backend.EXPECT().FSync().Do(func() { callback() }) f.Open(p9.ReadOnly) // Required. f.FSync() }, }, { name: "readdir", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().Open(gomock.Any()) backend.EXPECT().Readdir(gomock.Any(), gomock.Any()).Do(func(uint64, uint32) { callback() }) f.Open(p9.ReadOnly) // Required. f.Readdir(0, 1) }, }, { name: "readlink", match: func(mode p9.FileMode) bool { return mode.IsSymlink() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().Readlink().Do(func() { callback() }) f.Readlink() }, }, { name: "connect", match: func(mode p9.FileMode) bool { return mode.IsSocket() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().Connect(gomock.Any()).Do(func(p9.SocketType) { callback() }) f.Connect(0) }, }, { name: "open", match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().Open(gomock.Any()).Do(func(p9.OpenFlags) { callback() }) f.Open(p9.ReadOnly) }, }, { name: "flush", match: func(mode p9.FileMode) bool { return true }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().Flush().Do(func() { callback() }) f.Flush() }, }, } writeExclusive := []concurrentFn{ { // N.B. We can't really check getattr. But this is an // extremely low-risk function, it seems likely that // this check is paranoid anyways. name: "setattr", match: func(mode p9.FileMode) bool { return true }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().SetAttr(gomock.Any(), gomock.Any()).Do(func(p9.SetAttrMask, p9.SetAttr) { callback() }) f.SetAttr(p9.SetAttrMask{}, p9.SetAttr{}) }, }, { name: "unlinkAt", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().UnlinkAt(gomock.Any(), gomock.Any()).Do(func(string, uint32) { callback() }) f.UnlinkAt(randomFileName(), 0) }, }, { name: "mknod", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().Mknod(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(string, p9.FileMode, uint32, uint32, p9.UID, p9.GID) { callback() }) f.Mknod(randomFileName(), 0, 0, 0, 0, 0) }, }, { name: "link", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().Link(gomock.Any(), gomock.Any()).Do(func(p9.File, string) { callback() }) f.Link(f, randomFileName()) }, }, { name: "symlink", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().Symlink(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(string, string, p9.UID, p9.GID) { callback() }) f.Symlink(randomFileName(), randomFileName(), 0, 0) }, }, { name: "mkdir", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().Mkdir(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(string, p9.FileMode, p9.UID, p9.GID) { callback() }) f.Mkdir(randomFileName(), 0, 0, 0) }, }, { name: "create", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { // Return an error for the creation operation, as this is the simplest. backend.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, p9.QID{}, uint32(0), unix.EINVAL).Do(func(string, p9.OpenFlags, p9.FileMode, p9.UID, p9.GID) { callback() }) f.Create(randomFileName(), p9.ReadOnly, 0, 0, 0) }, }, } globalExclusive := []concurrentFn{ { name: "remove", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { // Remove operates on a locked parent. So we // add a child, walk to it and call remove. // Note that because this operation can operate // concurrently with itself, we need to // generate a random file name. randomFile := randomFileName() backend.AddChild(randomFile, h.NewFile()) defer backend.RemoveChild(randomFile) _, file, err := f.Walk([]string{randomFile}) if err != nil { h.t.Fatalf("walk got %v, want nil", err) } // Remove is automatically translated to the parent. backend.EXPECT().UnlinkAt(gomock.Any(), gomock.Any()).Do(func(string, uint32) { callback() }) // Remove is also a close. file.(deprecatedRemover).Remove() }, }, { name: "rename", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { // Similarly to remove, because we need to // operate on a child, we allow a walk. randomFile := randomFileName() backend.AddChild(randomFile, h.NewFile()) defer backend.RemoveChild(randomFile) _, file, err := f.Walk([]string{randomFile}) if err != nil { h.t.Fatalf("walk got %v, want nil", err) } defer file.Close() fileBackend := h.Pop(file) // Rename is automatically translated to the parent. backend.EXPECT().RenameAt(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(string, p9.File, string) { callback() }) // Attempt the rename. fileBackend.EXPECT().Renamed(gomock.Any(), gomock.Any()) file.Rename(f, randomFileName()) }, }, { name: "renameAt", match: func(mode p9.FileMode) bool { return mode.IsDir() }, op: func(h *Harness, backend *Mock, f p9.File, callback func()) { backend.EXPECT().RenameAt(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(string, p9.File, string) { callback() }) // Attempt the rename. There are no active fids // with this name, so we don't need to expect // Renamed hooks on anything. f.RenameAt(randomFileName(), f, randomFileName()) }, }, } for _, fn1 := range readExclusive { for _, fn2 := range readExclusive { for name := range newTypeMap(nil) { // Everything should be able to proceed in parallel. concurrentTest(t, name, fn1, fn2, true, true) concurrentTest(t, name, fn1, fn2, false, true) } } } for _, fn1 := range append(readExclusive, writeExclusive...) { for _, fn2 := range writeExclusive { for name := range newTypeMap(nil) { // Only cross-directory functions should proceed in parallel. concurrentTest(t, name, fn1, fn2, true, false) concurrentTest(t, name, fn1, fn2, false, true) } } } for _, fn1 := range append(append(readExclusive, writeExclusive...), globalExclusive...) { for _, fn2 := range globalExclusive { for name := range newTypeMap(nil) { // Nothing should be able to run in parallel. concurrentTest(t, name, fn1, fn2, true, false) concurrentTest(t, name, fn1, fn2, false, false) } } } } func TestReadWriteConcurrent(t *testing.T) { h, c := NewHarness(t) defer h.Finish() _, root := newRoot(h, c) defer root.Close() const ( instances = 10 iterations = 10000 dataSize = 1024 ) var ( dataSets [instances][dataSize]byte backends [instances]*Mock files [instances]p9.File ) // Walk to the file normally. for i := 0; i < instances; i++ { _, backends[i], files[i] = walkHelper(h, "file", root) defer files[i].Close() } // Open the files. for i := 0; i < instances; i++ { backends[i].EXPECT().Open(p9.ReadWrite) if _, _, _, err := files[i].Open(p9.ReadWrite); err != nil { t.Fatalf("open got %v, wanted nil", err) } } // Initialize random data for each instance. for i := 0; i < instances; i++ { if _, err := rand.Read(dataSets[i][:]); err != nil { t.Fatalf("error initializing dataSet#%d, got %v", i, err) } } // Define our random read/write mechanism. randRead := func(h *Harness, backend *Mock, f p9.File, data, test []byte) { // Prepare the backend. backend.EXPECT().ReadAt(gomock.Any(), uint64(0)).Do(func(p []byte, offset uint64) { if n := copy(p, data); n != len(data) { // Note that we have to assert the result here, as the Return statement // below cannot be dynamic: it will be bound before this call is made. h.t.Errorf("wanted length %d, got %d", len(data), n) } }).Return(len(data), nil) // Execute the read. if n, err := f.ReadAt(test, 0); n != len(test) || err != nil { t.Errorf("failed read: wanted (%d, nil), got (%d, %v)", len(test), n, err) return // No sense doing check below. } if !bytes.Equal(test, data) { t.Errorf("data integrity failed during read") // Not as expected. } } randWrite := func(h *Harness, backend *Mock, f p9.File, data []byte) { // Prepare the backend. backend.EXPECT().WriteAt(gomock.Any(), uint64(0)).Do(func(p []byte, offset uint64) { if !bytes.Equal(p, data) { h.t.Errorf("data integrity failed during write") // Not as expected. } }).Return(len(data), nil) // Execute the write. if n, err := f.WriteAt(data, 0); n != len(data) || err != nil { t.Errorf("failed read: wanted (%d, nil), got (%d, %v)", len(data), n, err) } } randReadWrite := func(n int, h *Harness, backend *Mock, f p9.File, data []byte) { test := make([]byte, len(data)) for i := 0; i < n; i++ { if rand.Intn(2) == 0 { randRead(h, backend, f, data, test) } else { randWrite(h, backend, f, data) } } } // Start reading and writing. var wg sync.WaitGroup for i := 0; i < instances; i++ { wg.Add(1) go func(i int) { defer wg.Done() randReadWrite(iterations, h, backends[i], files[i], dataSets[i][:]) }(i) } wg.Wait() }
package main import ( "encoding/json" "fmt" "log" "os" pb "github.com/inigofu/temac-user-service/proto/auth" microclient "github.com/micro/go-micro/client" "github.com/micro/go-micro/cmd" "github.com/micro/go-micro/metadata" "golang.org/x/net/context" ) func main() { cmd.Init() // Create new greeter client client := pb.NewAuthService("temac.auth", microclient.DefaultClient) var user pb.User configFile, err := os.Open("user.json") defer configFile.Close() if err != nil { fmt.Println(err.Error()) } jsonParser := json.NewDecoder(configFile) jsonParser.Decode(&user) log.Print("user", user) ruser, err := client.Create(context.TODO(), &user) if err != nil { log.Println("Could not create: %v", err) } else { log.Printf("Created: %s", ruser.User.Idcode) } rauth, err := client.Auth(context.TODO(), &user) if err != nil { log.Fatalf("Could not auth: %v", err) } // let's just exit because log.Println("autg with token", rauth.Token.Token) var menu pb.Menu configFile, err = os.Open("menu.json") defer configFile.Close() if err != nil { fmt.Println(err.Error()) } jsonParser = json.NewDecoder(configFile) jsonParser.Decode(&menu) log.Println("menu", menu) ctx := metadata.NewContext(context.TODO(), map[string]string{ "Authorization": rauth.Token.Token, }) rmenu, err := client.CreateMenu(ctx, &menu) if err != nil { log.Println("Could not create: %v", err) } else { log.Printf("Created menu: %s", rmenu) } var role pb.Role configFile, err = os.Open("role.json") defer configFile.Close() if err != nil { fmt.Println(err.Error()) } jsonParser = json.NewDecoder(configFile) jsonParser.Decode(&role) log.Println("role", role) rrole, err := client.CreateRole(ctx, &role) if err != nil { log.Println("Could not create: %v", err) } else { log.Printf("Created role: %s", rrole) } temprole := make([]*pb.Role, 1) temprole[0] = &pb.Role{Idcode: rrole.Role.Idcode} user = *ruser.User user.Roles = temprole log.Printf("Updating user: %s", user) ruser, err = client.UpdateUser(ctx, &user) if err != nil { log.Println("Could not update: %v", err) } else { log.Printf("Created use: %s", ruser) } var form []pb.Form configFile, err = os.Open("form.json") defer configFile.Close() if err != nil { fmt.Println(err.Error()) } jsonParser = json.NewDecoder(configFile) jsonParser.Decode(&form) log.Println("form", form) for _, element := range form { rform, err := client.CreateForm(ctx, &element) if err != nil { log.Println("Could not create form: %v", err) } else { log.Printf("Created form: %s", rform) } } log.Printf("Procedure finished") os.Exit(0) }
package linklist import ( "fmt" "DA/2_linklist/linknode" ) // LList ... type LList struct { Head *linknode.LNode Tail *linknode.LNode Length int PrtHead bool Duplicate bool } // Output ... func (ll *LList) Output() { if ll.Head == nil { return; } prtLen := ll.Length if ll.PrtHead { prtLen = ll.Length + 1 } iter := ll.Head for i := 0; i < prtLen; i++ { if iter != nil { fmt.Printf("%v ", iter.Data) iter = iter.Next } } fmt.Println() } // Insert ... func (ll *LList) Insert(pos int, data linknode.Item) { } // Delete ... func (ll *LList) Delete(pos int) linknode.Item { return nil } // Update ... func (ll *LList) Update(pos int, data linknode.Item) { } // Search ... func (ll *LList) Search(data linknode.Item) int { index := -1 return index } // Append ... func (ll *LList) Append(data linknode.Item) { if !ll.Duplicate && ll.Tail.Data == data { return } ll.Length++ node := &linknode.LNode{Data: data} ll.Tail.Next = node ll.Tail = node } // IgnoreHead ... func (ll *LList) IgnoreHead() { ll.PrtHead = false ll.Tail.Next = ll.Head.Next ll.Head = ll.Tail.Next } // RecoverHead ... func (ll *LList) RecoverHead() { ll.PrtHead = true node := &linknode.LNode{} node.Next = ll.Tail.Next ll.Tail.Next = node ll.Head = ll.Tail.Next } // Init ... func Init() *LList { node := &linknode.LNode{} ll := &LList{ Head: node, Tail: node, Length: 0, PrtHead: true, Duplicate: true, } return ll } // Test ... func Test () { ll := Init() for i := 0; i < 3; i++ { ll.Append(i) } ll.Output() ll.IgnoreHead() ll.Output() ll.RecoverHead() ll.Output() fmt.Println() }
package leetcode /** * Definition for a binary tree node. * type TreeNode struct { * Val int * Left *TreeNode * Right *TreeNode * } */ func postorderTraversal(root *TreeNode) []int { result := []int{} postOrder(root, &result) return result } func postOrder(root *TreeNode, result *[]int) { if root == nil { return } if root != nil { postOrder(root.Left, result) postOrder(root.Right, result) *result = append(*result, root.Val) } } func postorderTraversal(root *TreeNode) []int { if root == nil { return nil } result := []int{} stack := []*TreeNode{} var visited *TreeNode for root != nil || len(stack) != 0 { for root != nil { stack = append(stack, root) root = root.Left } node := stack[len(stack)-1] if node.Right == nil || node.Right == visited { stack = stack[:len(stack)-1] result = append(result, node.Val) visited = node } else { root = node.Right } } return result }
// Tomato static website generator // Copyright Quentin Ribac, 2018 // Free software license can be found in the LICENSE file. package main import ( "fmt" ) // Siteinfo contains the site-wide meta. There should be only one of them. // Title and Subtitle will be printed in the header, // Description will be printed in the menu, // Copyright will be printed in the footer. // Authors must contain all possible authors for the website. type Siteinfo struct { Locales map[string]struct { Path string `json: "path"` Title string `json: "title"` Subtitle string `json: "subtitle"` Description string `json: "description"` Copyright string `json: "copyright"` } `json: "locales"` Authors []Author `json: "authors"` } // MainAuthorHelper prints a html link to the first author of the siteinfo. func (siteinfo Siteinfo) MainAuthorHelper() string { return siteinfo.Authors[0].Helper() } // Title helper prints html for the site title. func (siteinfo Siteinfo) TitleHelper(page *Page, locale string) string { return siteinfo.Locales[locale].Title } // SubtitleHelper prints html for the site subtitle. func (siteinfo Siteinfo) SubtitleHelper(page *Page, locale string) string { return string(Html([]byte(siteinfo.Locales[locale].Subtitle), page, siteinfo.Locales[locale].Path)) } // DescriptionHelper prints html for the site description. func (siteinfo Siteinfo) DescriptionHelper(page *Page, locale string) string { return string(Html([]byte(siteinfo.Locales[locale].Description), page, siteinfo.Locales[locale].Path)) } // CopyrightHelper prints html for the copyright information. func (siteinfo Siteinfo) CopyrightHelper(page *Page, locale string) string { return string(Html([]byte(siteinfo.Locales[locale].Copyright), page, siteinfo.Locales[locale].Path)) } // FindAuthor returns an existing author by its name or nil and an error if there is no author with this name. func (si *Siteinfo) FindAuthor(name string) (*Author, error) { for i := range si.Authors { if si.Authors[i].Name == name { return &si.Authors[i], nil } } return nil, fmt.Errorf("unable to find author %q", name) }
package main import ( "fmt" "log" "github.com/bambocher/opencorpora" ) func main() { dict, err := opencorpora.NewDictionary("../../dict.opcorpora.xml") if err != nil { log.Panicln(err) } for _, lemma := range dict.Lemmata { fmt.Println(lemma.Default.Name) } }
package middleware import ( "github.com/gin-gonic/gin" "github.com/jinzhu/gorm" "github.com/streadway/amqp" ) func SetDBtoContext(db *gorm.DB) gin.HandlerFunc { return func(c *gin.Context) { c.Set("DB", db) c.Next() } } func SetRabbitMQContext(conn *amqp.Connection) gin.HandlerFunc { return func(c *gin.Context) { c.Set("AMQP", conn) c.Next() } }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package kvserver import ( "context" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftentry" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" "go.etcd.io/etcd/raft/v3/raftpb" ) var errSideloadedFileNotFound = errors.New("sideloaded file not found") // SideloadStorage is the interface used for Raft SSTable sideloading. // Implementations do not need to be thread safe. type SideloadStorage interface { // The directory in which the sideloaded files are stored. May or may not // exist. Dir() string // Writes the given contents to the file specified by the given index and // term. Overwrites the file if it already exists. Put(_ context.Context, index, term uint64, contents []byte) error // Load the file at the given index and term. Return errSideloadedFileNotFound when no // such file is present. Get(_ context.Context, index, term uint64) ([]byte, error) // Purge removes the file at the given index and term. It may also // remove any leftover files at the same index and earlier terms, but // is not required to do so. When no file at the given index and term // exists, returns errSideloadedFileNotFound. // // Returns the total size of the purged payloads. Purge(_ context.Context, index, term uint64) (int64, error) // Clear files that may have been written by this SideloadStorage. Clear(context.Context) error // TruncateTo removes all files belonging to an index strictly smaller than // the given one. Returns the number of bytes freed, the number of bytes in // files that remain, or an error. TruncateTo(_ context.Context, index uint64) (freed, retained int64, _ error) // Returns an absolute path to the file that Get() would return the contents // of. Does not check whether the file actually exists. Filename(_ context.Context, index, term uint64) (string, error) } // maybeSideloadEntriesRaftMuLocked should be called with a slice of "fat" // entries before appending them to the Raft log. For those entries which are // sideloadable, this is where the actual sideloading happens: in come fat // proposals, out go thin proposals. Note that this method is to be called // before modifications are persisted to the log. The other way around is // incorrect since an ill-timed crash gives you thin proposals and no files. // // The passed-in slice is not mutated. func (r *Replica) maybeSideloadEntriesRaftMuLocked( ctx context.Context, entriesToAppend []raftpb.Entry, ) (_ []raftpb.Entry, sideloadedEntriesSize int64, _ error) { return maybeSideloadEntriesImpl(ctx, entriesToAppend, r.raftMu.sideloaded) } // maybeSideloadEntriesImpl iterates through the provided slice of entries. If // no sideloadable entries are found, it returns the same slice. Otherwise, it // returns a new slice in which all applicable entries have been sideloaded to // the specified SideloadStorage. func maybeSideloadEntriesImpl( ctx context.Context, entriesToAppend []raftpb.Entry, sideloaded SideloadStorage, ) (_ []raftpb.Entry, sideloadedEntriesSize int64, _ error) { cow := false for i := range entriesToAppend { if sniffSideloadedRaftCommand(entriesToAppend[i].Data) { log.Event(ctx, "sideloading command in append") if !cow { // Avoid mutating the passed-in entries directly. The caller // wants them to remain "fat". log.Eventf(ctx, "copying entries slice of length %d", len(entriesToAppend)) cow = true entriesToAppend = append([]raftpb.Entry(nil), entriesToAppend...) } ent := &entriesToAppend[i] cmdID, data := DecodeRaftCommand(ent.Data) // cheap // Unmarshal the command into an object that we can mutate. var strippedCmd kvserverpb.RaftCommand if err := protoutil.Unmarshal(data, &strippedCmd); err != nil { return nil, 0, err } if strippedCmd.ReplicatedEvalResult.AddSSTable == nil { // Still no AddSSTable; someone must've proposed a v2 command // but not because it contains an inlined SSTable. Strange, but // let's be future proof. log.Warning(ctx, "encountered sideloaded Raft command without inlined payload") continue } // Actually strip the command. dataToSideload := strippedCmd.ReplicatedEvalResult.AddSSTable.Data strippedCmd.ReplicatedEvalResult.AddSSTable.Data = nil // Marshal the command and attach to the Raft entry. { data := make([]byte, raftCommandPrefixLen+strippedCmd.Size()) encodeRaftCommandPrefix(data[:raftCommandPrefixLen], raftVersionSideloaded, cmdID) _, err := protoutil.MarshalTo(&strippedCmd, data[raftCommandPrefixLen:]) if err != nil { return nil, 0, errors.Wrap(err, "while marshaling stripped sideloaded command") } ent.Data = data } log.Eventf(ctx, "writing payload at index=%d term=%d", ent.Index, ent.Term) if err := sideloaded.Put(ctx, ent.Index, ent.Term, dataToSideload); err != nil { return nil, 0, err } sideloadedEntriesSize += int64(len(dataToSideload)) } } return entriesToAppend, sideloadedEntriesSize, nil } func sniffSideloadedRaftCommand(data []byte) (sideloaded bool) { return len(data) > 0 && data[0] == byte(raftVersionSideloaded) } // maybeInlineSideloadedRaftCommand takes an entry and inspects it. If its // command encoding version indicates a sideloaded entry, it uses the entryCache // or SideloadStorage to inline the payload, returning a new entry (which must // be treated as immutable by the caller) or nil (if inlining does not apply) // // If a payload is missing, returns an error whose Cause() is // errSideloadedFileNotFound. func maybeInlineSideloadedRaftCommand( ctx context.Context, rangeID roachpb.RangeID, ent raftpb.Entry, sideloaded SideloadStorage, entryCache *raftentry.Cache, ) (*raftpb.Entry, error) { if !sniffSideloadedRaftCommand(ent.Data) { return nil, nil } log.Event(ctx, "inlining sideloaded SSTable") // We could unmarshal this yet again, but if it's committed we // are very likely to have appended it recently, in which case // we can save work. cachedSingleton, _, _, _ := entryCache.Scan( nil, rangeID, ent.Index, ent.Index+1, 1<<20, ) if len(cachedSingleton) > 0 { log.Event(ctx, "using cache hit") return &cachedSingleton[0], nil } // Make a shallow copy. entCpy := ent ent = entCpy log.Event(ctx, "inlined entry not cached") // Out of luck, for whatever reason the inlined proposal isn't in the cache. cmdID, data := DecodeRaftCommand(ent.Data) var command kvserverpb.RaftCommand if err := protoutil.Unmarshal(data, &command); err != nil { return nil, err } if len(command.ReplicatedEvalResult.AddSSTable.Data) > 0 { // The entry we started out with was already "fat". This happens when // the entry reached us through a preemptive snapshot (when we didn't // have a ReplicaID yet). log.Event(ctx, "entry already inlined") return &ent, nil } sideloadedData, err := sideloaded.Get(ctx, ent.Index, ent.Term) if err != nil { return nil, errors.Wrap(err, "loading sideloaded data") } command.ReplicatedEvalResult.AddSSTable.Data = sideloadedData { data := make([]byte, raftCommandPrefixLen+command.Size()) encodeRaftCommandPrefix(data[:raftCommandPrefixLen], raftVersionSideloaded, cmdID) _, err := protoutil.MarshalTo(&command, data[raftCommandPrefixLen:]) if err != nil { return nil, err } ent.Data = data } return &ent, nil } // assertSideloadedRaftCommandInlined asserts that if the provided entry is a // sideloaded entry, then its payload has already been inlined. Doing so // requires unmarshalling the raft command, so this assertion should be kept out // of performance critical paths. func assertSideloadedRaftCommandInlined(ctx context.Context, ent *raftpb.Entry) { if !sniffSideloadedRaftCommand(ent.Data) { return } var command kvserverpb.RaftCommand _, data := DecodeRaftCommand(ent.Data) if err := protoutil.Unmarshal(data, &command); err != nil { log.Fatalf(ctx, "%v", err) } if len(command.ReplicatedEvalResult.AddSSTable.Data) == 0 { // The entry is "thin", which is what this assertion is checking for. log.Fatalf(ctx, "found thin sideloaded raft command: %+v", command) } } // maybePurgeSideloaded removes [firstIndex, ..., lastIndex] at the given term // and returns the total number of bytes removed. Nonexistent entries are // silently skipped over. func maybePurgeSideloaded( ctx context.Context, ss SideloadStorage, firstIndex, lastIndex uint64, term uint64, ) (int64, error) { var totalSize int64 for i := firstIndex; i <= lastIndex; i++ { size, err := ss.Purge(ctx, i, term) if err != nil && !errors.Is(err, errSideloadedFileNotFound) { return totalSize, err } totalSize += size } return totalSize, nil }
package types import ( "fmt" "reflect" "strconv" ) var ( // ErrNotSliceOrArray is returned when the value is not a slice. ErrNotSliceOrArray = fmt.Errorf("the value is not a slice or array") // ErrNotMap is returned when the value is not a map. ErrNotMap = fmt.Errorf("the value is not a map") // ErrNotString is returned when the type of the key is not string. ErrNotString = fmt.Errorf("the type of the key is not string") ) // bool2Int64 converts bool to int64. func bool2Int64(b bool) int64 { if b { return 1 } return 0 } // ToSlice converts any slice type of []interface{}. // // Return nil and an error if v is not a slice type. // // For []interface{}, []string and []int, they have already been optimized. func ToSlice(v interface{}) ([]interface{}, error) { switch vs := v.(type) { case nil: return nil, ErrNotSliceOrArray case []interface{}: return vs, nil case []string: results := make([]interface{}, len(vs)) for i, v := range vs { results[i] = v } return results, nil case []int: results := make([]interface{}, len(vs)) for i, v := range vs { results[i] = v } return results, nil } _v := reflect.ValueOf(v) kind := _v.Kind() if kind != reflect.Slice && kind != reflect.Array { return nil, ErrNotSliceOrArray } _len := _v.Len() results := make([]interface{}, _len) for i := 0; i < _len; i++ { results[i] = _v.Index(i).Interface() } return results, nil } // MustToSlice is equal to ToSlice, but panic if there is an error. func MustToSlice(v interface{}) []interface{} { _v, err := ToSlice(v) if err != nil { panic(err) } return _v } // ToMap converts any map type that the key is string to map[string]interface{}. // // Return nil and an error if v is not a map type or its key is not the string // type. // // If you ensure that v is a map, and its key is the string type, you can ignore // the error. // // For []interface{}, []string and []int, they have already been optimized. func ToMap(v interface{}) (map[string]interface{}, error) { if v == nil { return nil, ErrNotMap } if _v, ok := v.(map[string]interface{}); ok { return _v, nil } _v := reflect.ValueOf(v) if !_v.IsValid() || _v.Kind() != reflect.Map { return nil, ErrNotMap } results := make(map[string]interface{}, _v.Len()) for _, key := range _v.MapKeys() { if key.Kind() != reflect.String { return nil, ErrNotString } results[key.String()] = _v.MapIndex(key).Interface() } return results, nil } // MustToMap is equal to ToMap, but panic if there is an error. func MustToMap(v interface{}) map[string]interface{} { _v, err := ToMap(v) if err != nil { panic(err) } return _v } // ToMapKeys returns all the keys of a map. // // If the value is not a map or the key is not string, it returns an error. // But if the value is nil, it will return a empty slice, not an error instead. // // If you ensure that v is a map, and its key is the string type, you can ignore // the error. // // For map[string]interface{}, map[string]string and map[string]int, they have // already been optimized. func ToMapKeys(v interface{}) ([]string, error) { switch _v := v.(type) { case nil: return []string{}, nil case map[string]interface{}: results := make([]string, len(_v)) for k := range _v { results = append(results, k) } return results, nil case map[string]string: results := make([]string, len(_v)) for k := range _v { results = append(results, k) } return results, nil case map[string]int: results := make([]string, len(_v)) for k := range _v { results = append(results, k) } return results, nil } _v := reflect.ValueOf(v) if !_v.IsValid() || _v.Kind() != reflect.Map { return nil, ErrNotMap } results := make([]string, _v.Len()) for i, key := range _v.MapKeys() { if key.Kind() != reflect.String { return nil, ErrNotString } results[i] = key.String() } return results, nil } // MustToMapKeys is equal to ToMapKeys, but panic if there is an error. func MustToMapKeys(v interface{}) []string { _v, err := ToMapKeys(v) if err != nil { panic(err) } return _v } // ToMapValues returns all the values of a map. // // If the value is not a map, it returns an error. // But if the value is nil, it will return a empty slice, not an error instead. // // If you ensure that v is a map, you can ignore the error. // // For map[string]interface{}, map[string]string and map[string]int, they have // already been optimized. func ToMapValues(v interface{}) ([]interface{}, error) { switch _v := v.(type) { case nil: return []interface{}{}, nil case map[string]interface{}: results := make([]interface{}, len(_v)) for k := range _v { results = append(results, k) } return results, nil case map[string]string: results := make([]interface{}, len(_v)) for k := range _v { results = append(results, k) } return results, nil case map[string]int: results := make([]interface{}, len(_v)) for k := range _v { results = append(results, k) } return results, nil } _v := reflect.ValueOf(v) if !_v.IsValid() || _v.Kind() != reflect.Map { return nil, ErrNotMap } results := make([]interface{}, _v.Len()) for i, key := range _v.MapKeys() { results[i] = _v.MapIndex(key).Interface() } return results, nil } // MustToMapValues is equal to ToMapValues, but panic if there is an error. func MustToMapValues(v interface{}) []interface{} { _v, err := ToMapValues(v) if err != nil { panic(err) } return _v } // ToBool does the best to convert any certain value to bool. // // When the value is string, for "t", "T", "1", "on", "On", "ON", "true", // "True", "TRUE", it's true, for "f", "F", "0", "off", "Off", "OFF", "false", // "False", "FALSE", "", it's false. // // For other types, if the value is ZERO of the type, it's false. Or it's true. func ToBool(v interface{}) (bool, error) { switch _v := v.(type) { case string: switch _v { case "t", "T", "1", "on", "On", "ON", "true", "True", "TRUE": return true, nil case "f", "F", "0", "off", "Off", "OFF", "false", "False", "FALSE", "": return false, nil default: return false, fmt.Errorf("unrecognized bool string: %s", _v) } } return !IsZero(v), nil } // MustToBool is equal to ToBool, but panic if there is an error. func MustToBool(v interface{}) bool { _v, err := ToBool(v) if err != nil { panic(err) } return _v } // ToInt64 does the best to convert any certain value to int64. func ToInt64(_v interface{}) (v int64, err error) { switch _v.(type) { case nil: case complex64, complex128: v = int64(real(reflect.ValueOf(_v).Complex())) case bool: v = int64(bool2Int64(_v.(bool))) case int, int8, int16, int32, int64: v = reflect.ValueOf(_v).Int() case uint, uint8, uint16, uint32, uint64: v = int64(reflect.ValueOf(_v).Uint()) case float32, float64: v = int64(reflect.ValueOf(_v).Float()) case string: return strconv.ParseInt(_v.(string), 10, 64) default: err = fmt.Errorf("unknown type of %t", _v) } return } // MustToInt64 is equal to ToInt64, but panic if there is an error. func MustToInt64(v interface{}) int64 { _v, err := ToInt64(v) if err != nil { panic(err) } return _v } // ToUint64 does the best to convert any certain value to uint64. func ToUint64(_v interface{}) (v uint64, err error) { switch _v.(type) { case nil: case complex64, complex128: v = uint64(real(reflect.ValueOf(_v).Complex())) case bool: v = uint64(bool2Int64(_v.(bool))) case int, int8, int16, int32, int64: v = reflect.ValueOf(_v).Uint() case uint, uint8, uint16, uint32, uint64: v = uint64(reflect.ValueOf(_v).Uint()) case float32, float64: v = uint64(reflect.ValueOf(_v).Float()) case string: return strconv.ParseUint(_v.(string), 10, 64) default: err = fmt.Errorf("unknown type of %t", _v) } return } // MustToUint64 is equal to ToUint64, but panic if there is an error. func MustToUint64(v interface{}) uint64 { _v, err := ToUint64(v) if err != nil { panic(err) } return _v } // ToString does the best to convert any certain value to string. func ToString(_v interface{}) (v string, err error) { switch _v.(type) { case nil: case string: v = _v.(string) case []byte: v = string(_v.([]byte)) case bool, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: v = fmt.Sprintf("%d", _v) case float32, float64: v = fmt.Sprintf("%f", _v) default: err = fmt.Errorf("unknown type of %t", _v) } return } // MustToString is equal to ToString, but panic if there is an error. func MustToString(v interface{}) string { _v, err := ToString(v) if err != nil { panic(err) } return _v } // ToFloat64 does the best to convert any certain value to float64. func ToFloat64(_v interface{}) (v float64, err error) { switch _v.(type) { case nil: case complex64, complex128: v = float64(real(reflect.ValueOf(_v).Complex())) case bool: v = float64(bool2Int64(_v.(bool))) case int, int8, int16, int32, int64: v = float64(reflect.ValueOf(_v).Int()) case uint, uint8, uint16, uint32, uint64: v = float64(reflect.ValueOf(_v).Uint()) case float32, float64: v = reflect.ValueOf(_v).Float() case string: return strconv.ParseFloat(_v.(string), 64) default: err = fmt.Errorf("unknown type of %t", _v) } return } // MustToFloat64 is equal to ToFloat64, but panic if there is an error. func MustToFloat64(v interface{}) float64 { _v, err := ToFloat64(v) if err != nil { panic(err) } return _v } // ToComplex128 does the best to convert any certain value to complex128. func ToComplex128(_v interface{}) (v complex128, err error) { switch _v.(type) { case nil: case complex64, complex128: v = complex128(reflect.ValueOf(_v).Complex()) case bool: v = complex(float64(bool2Int64(_v.(bool))), 0) case int, int8, int16, int32, int64: v = complex(float64(reflect.ValueOf(_v).Int()), 0) case uint, uint8, uint16, uint32, uint64: v = complex(float64(reflect.ValueOf(_v).Uint()), 0) case float32, float64: v = complex(reflect.ValueOf(_v).Float(), 0) default: err = fmt.Errorf("unknown type of %t", _v) } return } // MustToComplex128 is equal to ToComplex128, but panic if there is an error. func MustToComplex128(v interface{}) complex128 { _v, err := ToComplex128(v) if err != nil { panic(err) } return _v } // ToInt does the best to convert any certain value to int. func ToInt(v interface{}) (int, error) { _v, err := ToInt64(v) return int(_v), err } // MustToInt is equal to ToInt, but panic if there is an error. func MustToInt(v interface{}) int { _v, err := ToInt(v) if err != nil { panic(err) } return _v } // ToUint does the best to convert any certain value to uint. func ToUint(v interface{}) (uint, error) { _v, err := ToUint64(v) return uint(_v), err } // MustToUint is equal to ToUint, but panic if there is an error. func MustToUint(v interface{}) uint { _v, err := ToUint(v) if err != nil { panic(err) } return _v } // ToInt32 does the best to convert any certain value to int32. func ToInt32(v interface{}) (int32, error) { _v, err := ToInt64(v) return int32(_v), err } // MustToInt32 is equal to ToInt32, but panic if there is an error. func MustToInt32(v interface{}) int32 { _v, err := ToInt32(v) if err != nil { panic(err) } return _v } // ToUint32 does the best to convert any certain value to uint32. func ToUint32(v interface{}) (uint32, error) { _v, err := ToUint64(v) return uint32(_v), err } // MustToUint32 is equal to ToUint32, but panic if there is an error. func MustToUint32(v interface{}) uint32 { _v, err := ToUint32(v) if err != nil { panic(err) } return _v }
package nanovgo import ( "image" "image/color" "image/draw" "log" "github.com/shibukawa/nanovgo/fontstashmini" ) type Context struct { gl *glContext commands []float32 commandX float32 commandY float32 states []nvgState cache nvgPathCache tessTol float32 distTol float32 fringeWidth float32 devicePxRatio float32 fs *fontstashmini.FontStash fontImages []int fontImageIdx int drawCallCount int fillTriCount int strokeTriCount int textTriCount int } // Delete is called when tearing down NanoVGo context func (c *Context) Delete() { for i, fontImage := range c.fontImages { if fontImage != 0 { c.DeleteImage(fontImage) c.fontImages[i] = 0 } } c.gl.renderDelete() c.gl = nil } // BeginFrame begins drawing a new frame // Calls to NanoVGo drawing API should be wrapped in Context.BeginFrame() & Context.EndFrame() // Context.BeginFrame() defines the size of the window to render to in relation currently // set viewport (i.e. glViewport on GL backends). Device pixel ration allows to // control the rendering on Hi-DPI devices. // For example, GLFW returns two dimension for an opened window: window size and // frame buffer size. In that case you would set windowWidth/Height to the window size // devicePixelRatio to: frameBufferWidth / windowWidth. func (c *Context) BeginFrame(windowWidth, windowHeight int, devicePixelRatio float32) { c.states = c.states[:0] c.Save() c.getState().reset() c.setDevicePixelRatio(devicePixelRatio) c.gl.renderViewport(windowWidth, windowHeight) c.drawCallCount = 0 c.fillTriCount = 0 c.strokeTriCount = 0 c.textTriCount = 0 } // EndFrame ends drawing flushing remaining render state. func (c *Context) EndFrame() { c.gl.renderFlush() if c.fontImageIdx != 0 { fontImage := c.fontImages[c.fontImageIdx] if fontImage == 0 { return } iw, ih, _ := c.ImageSize(fontImage) j := 0 for i := 0; i < c.fontImageIdx; i++ { nw, nh, _ := c.ImageSize(c.fontImages[i]) if nw < iw || nh < ih { c.DeleteImage(c.fontImages[i]) } else { c.fontImages[j] = c.fontImages[i] j++ } } // make current font image to first c.fontImages[j] = c.fontImages[0] j++ c.fontImages[0] = fontImage c.fontImageIdx = 0 // clear all image after j for i := j; i < nvgMaxFontImages; i++ { c.fontImages[i] = 0 } } } // Save pushes and saves the current render state into a state stack. // A matching Restore() must be used to restore the state. func (c *Context) Save() { if len(c.states) >= nvgMaxStates { return } if len(c.states) > 0 { c.states = append(c.states, c.states[len(c.states)-1]) } else { c.states = append(c.states, nvgState{}) } } // Restore pops and restores current render state. func (c *Context) Restore() { nStates := len(c.states) if nStates > 1 { c.states = c.states[:nStates-1] } } // Block makes Save/Restore block. func (c *Context) Block(block func()) { c.Save() defer c.Restore() block() } // SetStrokeWidth sets the stroke width of the stroke style. func (c *Context) SetStrokeWidth(width float32) { c.getState().strokeWidth = width } // SetTransformByValue premultiplies current coordinate system by specified matrix. // The parameters are interpreted as matrix as follows: // [a c e] // [b d f] // [0 0 1] func (c *Context) SetTransformByValue(a, b, cc, d, e, f float32) { t := TransformMatrix{a, b, cc, d, e, f} state := c.getState() state.xform = state.xform.PreMultiply(t) } // ResetTransform resets current transform to a identity matrix. func (c *Context) ResetTransform() { state := c.getState() state.xform = IdentityMatrix() } // Translate translates current coordinate system. func (c *Context) Translate(x, y float32) { state := c.getState() state.xform = state.xform.PreMultiply(TranslateMatrix(x, y)) } // CurrentTransform returns the top part (a-f) of the current transformation matrix. // [a c e] // [b d f] // [0 0 1] // There should be space for 6 floats in the return buffer for the values a-f. func (c *Context) CurrentTransform() TransformMatrix { return c.getState().xform } // SetStrokeColor sets current stroke style to a solid color. func (c *Context) SetStrokeColor(color color.Color) { c.getState().stroke.setPaintColor(color) } // SetFillColor sets current fill style to a solid color. func (c *Context) SetFillColor(color color.Color) { c.getState().fill.setPaintColor(color) } func (c *Context) SetFillImage() { //c.getState().fill.image = } // CreateImageFromGoImage creates image by loading it from the specified image.Image object. // Returns handle to the image. func (c *Context) CreateImage(img image.Image) int { bounds := img.Bounds() size := bounds.Size() var rgba *image.RGBA switch i := img.(type) { case *image.RGBA: rgba = i default: rgba = image.NewRGBA(bounds) draw.Draw(rgba, bounds, img, bounds.Min, draw.Src) } return c.gl.renderCreateTexture(nvgTextureRGBA, size.X, size.Y, rgba.Pix) } // ImageSize returns the dimensions of a created image. func (c *Context) ImageSize(img int) (int, int, error) { return c.gl.renderGetTextureSize(img) } // DeleteImage deletes created image. func (c *Context) DeleteImage(img int) { c.gl.renderDeleteTexture(img) } // Scissor sets the current scissor rectangle. // The scissor rectangle is transformed by the current transform. func (c *Context) Scissor(x, y, w, h float32) { state := c.getState() w = maxF(0.0, w) h = maxF(0.0, h) state.scissor.xform = TranslateMatrix(x+w*0.5, y+h*0.5).Multiply(state.xform) state.scissor.extent = [2]float32{w * 0.5, h * 0.5} } // IntersectScissor calculates intersects current scissor rectangle with the specified rectangle. // The scissor rectangle is transformed by the current transform. // Note: in case the rotation of previous scissor rect differs from // the current one, the intersection will be done between the specified // rectangle and the previous scissor rectangle transformed in the current // transform space. The resulting shape is always rectangle. func (c *Context) IntersectScissor(x, y, w, h float32) { state := c.getState() if state.scissor.extent[0] < 0 { c.Scissor(x, y, w, h) return } pXform := state.scissor.xform.Multiply(state.xform.Inverse()) ex := state.scissor.extent[0] ey := state.scissor.extent[1] teX := ex * absF(pXform[0]) * ey * absF(pXform[2]) teY := ex * absF(pXform[1]) * ey * absF(pXform[3]) rect := intersectRects(pXform[4]-teX, pXform[5]-teY, teX*2, teY*2, x, y, w, h) c.Scissor(rect[0], rect[1], rect[2], rect[3]) } // ResetScissor resets and disables scissoring. func (c *Context) ResetScissor() { state := c.getState() state.scissor.xform = TransformMatrix{0, 0, 0, 0, 0, 0} state.scissor.extent = [2]float32{-1.0, -1.0} } // BeginPath clears the current path and sub-paths. func (c *Context) BeginPath() { c.commands = c.commands[:0] c.cache.clearPathCache() } // Rect creates new rectangle shaped sub-path. func (c *Context) Rect(x, y, w, h float32) { c.appendCommand([]float32{ float32(nvgMOVETO), x, y, float32(nvgLINETO), x, y + h, float32(nvgLINETO), x + w, y + h, float32(nvgLINETO), x + w, y, float32(nvgCLOSE), }) } // RoundedRect creates new rounded rectangle shaped sub-path. func (c *Context) RoundedRect(x, y, w, h, r float32) { if r < 0.1 { c.Rect(x, y, w, h) } else { rx := minF(r, absF(w)*0.5) * signF(w) ry := minF(r, absF(h)*0.5) * signF(h) c.appendCommand([]float32{ float32(nvgMOVETO), x, y + ry, float32(nvgLINETO), x, y + h - ry, float32(nvgBEZIERTO), x, y + h - ry*(1-Kappa90), x + rx*(1-Kappa90), y + h, x + rx, y + h, float32(nvgLINETO), x + w - rx, y + h, float32(nvgBEZIERTO), x + w - rx*(1-Kappa90), y + h, x + w, y + h - ry*(1-Kappa90), x + w, y + h - ry, float32(nvgLINETO), x + w, y + ry, float32(nvgBEZIERTO), x + w, y + ry*(1-Kappa90), x + w - rx*(1-Kappa90), y, x + w - rx, y, float32(nvgLINETO), x + rx, y, float32(nvgBEZIERTO), x + rx*(1-Kappa90), y, x, y + ry*(1-Kappa90), x, y + ry, float32(nvgCLOSE), }) } } // Ellipse creates new ellipse shaped sub-path. func (c *Context) Ellipse(cx, cy, rx, ry float32) { c.appendCommand([]float32{ float32(nvgMOVETO), cx - rx, cy, float32(nvgBEZIERTO), cx - rx, cy + ry*Kappa90, cx - rx*Kappa90, cy + ry, cx, cy + ry, float32(nvgBEZIERTO), cx + rx*Kappa90, cy + ry, cx + rx, cy + ry*Kappa90, cx + rx, cy, float32(nvgBEZIERTO), cx + rx, cy - ry*Kappa90, cx + rx*Kappa90, cy - ry, cx, cy - ry, float32(nvgBEZIERTO), cx - rx*Kappa90, cy - ry, cx - rx, cy - ry*Kappa90, cx - rx, cy, float32(nvgCLOSE), }) } // Circle creates new circle shaped sub-path. func (c *Context) Circle(cx, cy, r float32) { c.Ellipse(cx, cy, r, r) } // ClosePath closes current sub-path with a line segment. func (c *Context) ClosePath() { c.appendCommand([]float32{float32(nvgCLOSE)}) } // PathWinding sets the current sub-path winding, see Winding. func (c *Context) PathWinding(winding Winding) { c.appendCommand([]float32{float32(nvgWINDING), float32(winding)}) } // DebugDumpPathCache prints cached path information to console func (c *Context) DebugDumpPathCache() { log.Printf("Dumping %d cached paths\n", len(c.cache.paths)) for i := 0; i < len(c.cache.paths); i++ { path := &c.cache.paths[i] log.Printf(" - Path %d\n", i) if len(path.fills) > 0 { log.Printf(" - fill: %d\n", len(path.fills)) for _, fill := range path.fills { log.Printf("%f\t%f\n", fill.x, fill.y) } } if len(path.strokes) > 0 { log.Printf(" - strokes: %d\n", len(path.strokes)) for _, stroke := range path.strokes { log.Printf("%f\t%f\n", stroke.x, stroke.y) } } } } // Fill fills the current path with current fill style. func (c *Context) Fill() { state := c.getState() fillPaint := state.fill c.flattenPaths() if c.gl.edgeAntiAlias() { c.cache.expandFill(c.fringeWidth, Miter, 2.4, c.fringeWidth) } else { c.cache.expandFill(0.0, Miter, 2.4, c.fringeWidth) } c.gl.renderFill(&fillPaint, &state.scissor, c.fringeWidth, c.cache.bounds, c.cache.paths) // Count triangles for i := 0; i < len(c.cache.paths); i++ { path := &c.cache.paths[i] c.fillTriCount += len(path.fills) - 2 c.strokeTriCount += len(path.strokes) - 2 c.drawCallCount += 2 } } // Stroke draws the current path with current stroke style. func (c *Context) Stroke() { state := c.getState() scale := state.xform.getAverageScale() strokeWidth := clampF(state.strokeWidth*scale, 0.0, 200.0) strokePaint := state.stroke if strokeWidth < c.fringeWidth { // If the stroke width is less than pixel size, use alpha to emulate coverage. strokeWidth = c.fringeWidth } c.flattenPaths() for _, path := range c.cache.paths { if path.count == 1 { panic("") } } const miterLimit = 10 // TODO: remove const lineCap = Butt const lineJoin = Miter // or Round if c.gl.edgeAntiAlias() { c.cache.expandStroke(strokeWidth*0.5+c.fringeWidth*0.5, lineCap, lineJoin, miterLimit, c.fringeWidth, c.tessTol) } else { c.cache.expandStroke(strokeWidth*0.5, lineCap, lineJoin, miterLimit, c.fringeWidth, c.tessTol) } c.gl.renderStroke(&strokePaint, &state.scissor, c.fringeWidth, strokeWidth, c.cache.paths) // Count triangles for i := 0; i < len(c.cache.paths); i++ { path := &c.cache.paths[i] c.strokeTriCount += len(path.strokes) - 2 c.drawCallCount += 2 } } // CreateFont creates font by loading it from the disk from specified file name. // Returns handle to the font. func (c *Context) CreateFont(name, filePath string) int { return c.fs.AddFont(name, filePath) } // CreateFontFromMemory creates image by loading it from the specified memory chunk. // Returns handle to the font. func (c *Context) CreateFontFromMemory(name string, data []byte, freeData uint8) int { return c.fs.AddFontFromMemory(name, data, freeData) } // FindFont finds a loaded font of specified name, and returns handle to it, or -1 if the font is not found. func (c *Context) FindFont(name string) int { return c.fs.GetFontByName(name) } // SetFontSize sets the font size of current text style. func (c *Context) SetFontSize(size float32) { c.getState().fontSize = size } // SetTextLetterSpacing sets the letter spacing of current text style. func (c *Context) SetTextLetterSpacing(spacing float32) { c.getState().letterSpacing = spacing } // SetTextLineHeight sets the line height of current text style. func (c *Context) SetTextLineHeight(lineHeight float32) { c.getState().lineHeight = lineHeight } // SetTextAlign sets the text align of current text style. func (c *Context) SetTextAlign(align Align) { c.getState().textAlign = align } // SetFontFaceID sets the font face based on specified id of current text style. func (c *Context) SetFontFaceID(font int) { c.getState().fontID = font } // SetFontFace sets the font face based on specified name of current text style. func (c *Context) SetFontFace(font string) { c.getState().fontID = c.fs.GetFontByName(font) } // Text draws text string at specified location. If end is specified only the sub-string up to the end is drawn. func (c *Context) Text(x, y float32, str string) float32 { return c.TextRune(x, y, []rune(str)) } // TextRune is an alternate version of Text that accepts rune slice. func (c *Context) TextRune(x, y float32, runes []rune) float32 { state := c.getState() scale := state.getFontScale() * c.devicePxRatio invScale := 1.0 / scale if state.fontID == fontstashmini.INVALID { return 0 } c.fs.SetSize(state.fontSize * scale) c.fs.SetSpacing(state.letterSpacing * scale) c.fs.SetBlur(0) c.fs.SetAlign(fontstashmini.FONSAlign(state.textAlign)) c.fs.SetFont(state.fontID) vertexCount := maxI(2, len(runes)) * 4 // conservative estimate. vertexes := c.cache.allocVertexes(vertexCount) iter := c.fs.TextIterForRunes(x*scale, y*scale, runes) prevIter := iter index := 0 for { quad, ok := iter.Next() if !ok { break } if iter.PrevGlyph == nil || iter.PrevGlyph.Index == -1 { if !c.allocTextAtlas() { break // no memory :( } if index != 0 { c.renderText(vertexes[:index]) index = 0 } iter = prevIter quad, _ = iter.Next() // try again if iter.PrevGlyph == nil || iter.PrevGlyph.Index == -1 { // still can not find glyph? break } } prevIter = iter // Transform corners. c0, c1 := state.xform.TransformPoint(quad.X0*invScale, quad.Y0*invScale) c2, c3 := state.xform.TransformPoint(quad.X1*invScale, quad.Y0*invScale) c4, c5 := state.xform.TransformPoint(quad.X1*invScale, quad.Y1*invScale) c6, c7 := state.xform.TransformPoint(quad.X0*invScale, quad.Y1*invScale) //log.Printf("quad(%c) x0=%d, x1=%d, y0=%d, y1=%d, s0=%d, s1=%d, t0=%d, t1=%d\n", iter.CodePoint, int(quad.X0), int(quad.X1), int(quad.Y0), int(quad.Y1), int(1024*quad.S0), int(quad.S1*1024), int(quad.T0*1024), int(quad.T1*1024)) // Create triangles if index+4 <= vertexCount { (&vertexes[index]).set(c2, c3, quad.S1, quad.T0) (&vertexes[index+1]).set(c0, c1, quad.S0, quad.T0) (&vertexes[index+2]).set(c4, c5, quad.S1, quad.T1) (&vertexes[index+3]).set(c6, c7, quad.S0, quad.T1) index += 4 } } c.flushTextTexture() c.renderText(vertexes[:index]) return iter.X } // TextBounds measures the specified text string. Parameter bounds should be a pointer to float[4], // if the bounding box of the text should be returned. The bounds value are [xmin,ymin, xmax,ymax] // Returns the horizontal advance of the measured text (i.e. where the next character should drawn). // Measured values are returned in local coordinate space. func (c *Context) TextBounds(x, y float32, str string) (float32, []float32) { state := c.getState() scale := state.getFontScale() * c.devicePxRatio invScale := 1.0 / scale if state.fontID == fontstashmini.INVALID { return 0, nil } c.fs.SetSize(state.fontSize * scale) c.fs.SetSpacing(state.letterSpacing * scale) c.fs.SetBlur(0) c.fs.SetAlign(fontstashmini.FONSAlign(state.textAlign)) c.fs.SetFont(state.fontID) width, bounds := c.fs.TextBounds(x*scale, y*scale, str) if bounds != nil { bounds[1], bounds[3] = c.fs.LineBounds(y * scale) bounds[0] *= invScale bounds[1] *= invScale bounds[2] *= invScale bounds[3] *= invScale } return width * invScale, bounds } // TextMetrics returns the vertical metrics based on the current text style. // Measured values are returned in local coordinate space. func (c *Context) TextMetrics() (float32, float32, float32) { state := c.getState() scale := state.getFontScale() * c.devicePxRatio invScale := 1.0 / scale if state.fontID == fontstashmini.INVALID { return 0, 0, 0 } c.fs.SetSize(state.fontSize * scale) c.fs.SetSpacing(state.letterSpacing * scale) c.fs.SetBlur(0) c.fs.SetAlign(fontstashmini.FONSAlign(state.textAlign)) c.fs.SetFont(state.fontID) ascender, descender, lineH := c.fs.VerticalMetrics() return ascender * invScale, descender * invScale, lineH * invScale } func (c *Context) setDevicePixelRatio(ratio float32) { c.tessTol = 0.25 / ratio c.distTol = 0.01 / ratio c.fringeWidth = 1.0 / ratio c.devicePxRatio = ratio } func (c *Context) getState() *nvgState { return &c.states[len(c.states)-1] } func (c *Context) appendCommand(vals []float32) { xForm := c.getState().xform if nvgCommands(vals[0]) != nvgCLOSE && nvgCommands(vals[0]) != nvgWINDING { c.commandX = vals[len(vals)-2] c.commandY = vals[len(vals)-1] } i := 0 for i < len(vals) { switch nvgCommands(vals[i]) { case nvgMOVETO: vals[i+1], vals[i+2] = xForm.TransformPoint(vals[i+1], vals[i+2]) i += 3 case nvgLINETO: vals[i+1], vals[i+2] = xForm.TransformPoint(vals[i+1], vals[i+2]) i += 3 case nvgBEZIERTO: vals[i+1], vals[i+2] = xForm.TransformPoint(vals[i+1], vals[i+2]) vals[i+3], vals[i+4] = xForm.TransformPoint(vals[i+3], vals[i+4]) vals[i+5], vals[i+6] = xForm.TransformPoint(vals[i+5], vals[i+6]) i += 7 case nvgCLOSE: i++ case nvgWINDING: i += 2 default: i++ } } c.commands = append(c.commands, vals...) } func (c *Context) flattenPaths() { cache := &c.cache if len(cache.paths) > 0 { return } // Flatten i := 0 for i < len(c.commands) { switch nvgCommands(c.commands[i]) { case nvgMOVETO: cache.addPath() cache.addPoint(c.commands[i+1], c.commands[i+2], nvgPtCORNER, c.distTol) i += 3 case nvgLINETO: cache.addPoint(c.commands[i+1], c.commands[i+2], nvgPtCORNER, c.distTol) i += 3 case nvgBEZIERTO: last := cache.lastPoint() if last != nil { cache.tesselateBezier( last.x, last.y, c.commands[i+1], c.commands[i+2], c.commands[i+3], c.commands[i+4], c.commands[i+5], c.commands[i+6], 0, nvgPtCORNER, c.tessTol, c.distTol) } i += 7 case nvgCLOSE: cache.closePath() i++ case nvgWINDING: cache.pathWinding(Winding(c.commands[i+1])) i += 2 default: i++ } } cache.bounds = [4]float32{1e6, 1e6, -1e6, -1e6} // Calculate the direction and length of line segments. for j := 0; j < len(cache.paths); j++ { path := &cache.paths[j] points := cache.points[path.first:] p0 := &points[path.count-1] p1Index := 0 p1 := &points[p1Index] if ptEquals(p0.x, p0.y, p1.x, p1.y, c.distTol) && path.count > 2 { path.count-- p0 = &points[path.count-1] path.closed = true } // Enforce winding. if path.count > 2 { area := polyArea(points, path.count) if path.winding == Solid && area < 0.0 { polyReverse(points, path.count) } else if path.winding == Hole && area > 0.0 { polyReverse(points, path.count) } } for i := 0; i < path.count; i++ { // Calculate segment direction and length p0.len, p0.dx, p0.dy = normalize(p1.x-p0.x, p1.y-p0.y) // Update bounds cache.bounds = [4]float32{ minF(cache.bounds[0], p0.x), minF(cache.bounds[1], p0.y), maxF(cache.bounds[2], p0.x), maxF(cache.bounds[3], p0.y), } // Advance p1Index++ p0 = p1 if len(points) != p1Index { p1 = &points[p1Index] } } } } func (c *Context) flushTextTexture() { dirty := c.fs.ValidateTexture() if dirty != nil { fontImage := c.fontImages[c.fontImageIdx] // Update texture if fontImage != 0 { data, _, _ := c.fs.GetTextureData() x := dirty[0] y := dirty[1] w := dirty[2] - x h := dirty[3] - y c.gl.renderUpdateTexture(fontImage, x, y, w, h, data) } } } func (c *Context) allocTextAtlas() bool { c.flushTextTexture() if c.fontImageIdx >= nvgMaxFontImages-1 { return false } var iw, ih int // if next fontImage already have a texture if c.fontImages[c.fontImageIdx+1] != 0 { iw, ih, _ = c.ImageSize(c.fontImages[c.fontImageIdx+1]) } else { // calculate the new font image size and create it. iw, ih, _ = c.ImageSize(c.fontImages[c.fontImageIdx]) if iw > ih { ih *= 2 } else { iw *= 2 } if iw > nvgMaxFontImageSize || ih > nvgMaxFontImageSize { iw = nvgMaxFontImageSize ih = nvgMaxFontImageSize } c.fontImages[c.fontImageIdx+1] = c.gl.renderCreateTexture(nvgTextureALPHA, iw, ih, nil) } c.fontImageIdx++ c.fs.ResetAtlas(iw, ih) return true } func (c *Context) renderText(vertexes []nvgVertex) { state := c.getState() paint := state.fill // Render triangles paint.image = c.fontImages[c.fontImageIdx] c.gl.renderTriangleStrip(&paint, &state.scissor, vertexes) c.drawCallCount++ c.textTriCount += len(vertexes) / 3 }
/* Copyright 2019 NetApp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // CloudProvider is an emum that is used to determine which cloud a cluster belongs to type CloudProvider string const ( // CloudProviderAWS represents Amazon Web Services CloudProviderAWS CloudProvider = "aws" // CloudProviderGCP represents Google Cloud Platform CloudProviderGCP CloudProvider = "gcp" // CloudProviderAzure represents Azure services CloudProviderAzure CloudProvider = "azure" ) // NKSClusterProviderSpecSpec defines the desired state of NKSClusterProviderSpec type NKSClusterProviderSpecSpec struct { NetworkID string `json:"networkID,omitempty"` NodeSubnetID string `json:"nodeSubnetID,omitempty"` MasterSubnetID string `json:"masterSubnetID,omitempty"` Region string `json:"region,omitempty"` CloudProvider CloudProvider `json:"cloudProvider"` // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file } // NKSClusterProviderSpecStatus defines the observed state of NKSClusterProviderSpec type NKSClusterProviderSpecStatus struct { State string `json:"state,omitempty"` // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NKSClusterProviderSpec is the Schema for the nksclusterproviderspecs API // +k8s:openapi-gen=true type NKSClusterProviderSpec struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec NKSClusterProviderSpecSpec `json:"spec,omitempty"` Status NKSClusterProviderSpecStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NKSClusterProviderSpecList contains a list of NKSClusterProviderSpec type NKSClusterProviderSpecList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []NKSClusterProviderSpec `json:"items"` } func init() { SchemeBuilder.Register(&NKSClusterProviderSpec{}, &NKSClusterProviderSpecList{}) }
/* @File : grpcServer.go @Time : 2022/02/08 10:13:28 @Author : lpp @Version : 1.0.0 @Contact : golpp@qq.com @Desc : grpc服务 */ package grpcServer import ( "context" "net" "grpc-rest/protos" "google.golang.org/grpc" ) // type OrderServiceImpl struct { } func (OrderServiceImpl) Create(context.Context, *protos.CreateOrderRequest) (*protos.CreateOrderResponse, error) { return nil, nil } func (OrderServiceImpl) Retrieve(context.Context, *protos.RetrieveOrderRequest) (*protos.RetrieveOrderResponse, error) { return nil, nil } func (OrderServiceImpl) Update(context.Context, *protos.UpdateOrderRequest) (*protos.UpdateOrderResponse, error) { return nil, nil } func (OrderServiceImpl) Delete(context.Context, *protos.DeleteOrderRequest) (*protos.DeleteOrderResponse, error) { return nil, nil } func (OrderServiceImpl) List(context.Context, *protos.ListOrderRequest) (*protos.ListOrderResponse, error) { return nil, nil } // GrpcServer 为订单服务实现 gRPC 服务 type GrpcServer struct { server *grpc.Server listener net.Listener errCh chan error } //NewGrpcServer 是一个创建 GrpcServer 的便捷函数 func NewGrpcServer(service protos.OrderServiceServer, port string) (GrpcServer, error) { lis, err := net.Listen("tcp", ":"+port) if err != nil { return GrpcServer{}, err } server := grpc.NewServer() orderService := OrderServiceImpl{} protos.RegisterOrderServiceServer(server, &orderService) return GrpcServer{ server: server, listener: lis, errCh: make(chan error), }, nil } // Start 在后台启动服务,将任何错误传入错误通道 func (g GrpcServer) Start() { go func() { g.errCh <- g.server.Serve(g.listener) }() } // Stop 停止 gRPC 服务 func (g GrpcServer) Stop() { g.server.GracefulStop() } //Error 返回服务的错误通道 func (g GrpcServer) Error() chan error { return g.errCh }
package main import ( "bytes" "flag" "fmt" "github.com/bwmarrin/discordgo" "github.com/spf13/viper" "goed/cyborg" "io" "io/ioutil" "log" "net/http" _ "net/http/pprof" "os" "os/signal" "syscall" ) type CyborgBotConfig struct { DiscordConf cyborg.CyborgBotDiscordConfig GalaxyInfoCenter struct { Address string } } func (c *CyborgBotConfig) check() error { return c.DiscordConf.CheckConfig() } func loadConfig(path string) (*CyborgBotConfig, error) { var cfg CyborgBotConfig data, err := ioutil.ReadFile(path) if err != nil { return nil, err } viper.SetConfigType("yaml") err = viper.ReadConfig(bytes.NewBuffer(data)) if err != nil { return nil, err } viper.Unmarshal(&cfg) err = cfg.check() return &cfg, err } func main() { logFile, err := os.OpenFile("log.txt", os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666) if err == nil { mw := io.MultiWriter(os.Stdout, logFile) log.SetOutput(mw) } else { fmt.Println("Failer to create log file %v", err) } debug := flag.Bool("debug", false, "switch on debuging mode") silent := flag.Bool("silent", false, "be silent") pprofAddr := flag.String("pprof", "", "host:port for pprof") flag.Parse() loglevel := discordgo.LogInformational if *debug { loglevel = discordgo.LogDebug } if *silent { loglevel = discordgo.LogWarning } cfg, err := loadConfig(flag.Arg(0)) if err != nil { log.Fatalf("Failed to read config file %s: %v\n", flag.Arg(0), err) return } bot := cyborg.NewCybordBot(&cfg.DiscordConf, cfg.GalaxyInfoCenter.Address) err = bot.Connect(loglevel) if err != nil { fmt.Println("error creating bot,", err) return } if len(*pprofAddr) > 4 { go func() { log.Println(http.ListenAndServe(*pprofAddr, nil)) }() } // Wait here until CTRL-C or other term signal is received. log.Println("Bot is now running. Press CTRL-C to exit.") sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill) <-sc // Cleanly close down the Discord session. bot.Close() }
package alphabet_soup import ( "fmt" "testing" ) func Test_AlphabetSoup(t *testing.T) { fmt.Println(AlphabetSoup("hello")) fmt.Println(AlphabetSoup("world")) fmt.Println(AlphabetSoup("coderbyte")) fmt.Println(AlphabetSoup("hooplah")) }
package monito import ( "fmt" "io" "time" "github.com/PI-Victor/monito/pkg" "github.com/PI-Victor/monito/pkg/log" ) // Server - Main monitoring service core struct type Server struct { configDir string ConfigFile string output io.Writer MainNode bool } func New(confFile string) *Server { return &Server{ConfigFile: confFile} } //MainService validate the loading of assets. func (m *Server) loadService() error { validateConfig(m.ConfigFile) return nil } // Start - Starts the main service func (m *Server) Start() { log.Info("Loading Services for monito...") if err := m.loadService(); err != nil { log.Panic("Failed to start monito... ") } for { log.Info("Logging System metrics...") ReadSystemMetrics() time.Sleep(10 * time.Second) } } // ValidateAssets - load and test configured components for runtime func (m *Server) ValidateAssets() error { return nil } func validateConfig(confFile string) error { // if there's no config file passed we generate a default one. if len(confFile) == 0 { return nil } configFile, err := util.ReadConfigFile(confFile) if err != nil { return err } fmt.Println(configFile) return nil }
// method forwading in the form of struct embedding - specify a type without field name // embed structure in line 9,10 package main import "fmt" type report struct { sol int temperature location } type temperature struct { high, low celsius } type location struct { lat, long float64 } type celsius float64 func (t temperature) average() celsius { return (t.high + t.low) / 2 } func main() { report := report{ sol: 15, location: location{-4.5895, 137.4417}, temperature: temperature{high: -1.0, low: -78.0}, } fmt.Printf("average %v° C\n", report.average()) // access the same data of other struct fmt.Printf("%v° C\n", report.high) report.high = 32 fmt.Printf("%v° C\n", report.temperature.high) } // average -39.5° C // -1° C // 32° C
package runtime_test import ( . "github.com/d11wtq/bijou/runtime" "testing" ) func TestStringType(t *testing.T) { s := String("example") if s.Type() != StringType { t.Fatalf(`expected s.Type() == StringType, got %s`, s.Type()) } } func TestStringEvalToSelf(t *testing.T) { s := String("example") env := FakeEnv() if v, err := s.Eval(env); err != nil { t.Fatalf(`expected err == nil, got %s`, err) } else if v != String("example") { t.Fatalf(`expected v == String("example"), got %s`, v) } } func TestStringEq(t *testing.T) { if !String("foo").Eq(String("foo")) { t.Fatalf(`expected String("foo").Eq(String("foo")), got false`) } if String("foo").Eq(String("bar")) { t.Fatalf(`expected !String("foo").Eq(String("bar")), got true`) } } func TestStringGt(t *testing.T) { if String("foo").Gt(String("foo")) { t.Fatalf(`expected !String("foo").Gt(String("foo")), got true`) } if !String("foo").Gt(String("bar")) { t.Fatalf(`expected String("foo").Gt(String("bar")), got false`) } if !String("foo").Gt(String("fo")) { t.Fatalf(`expected String("foo").Gt(String("fo")), got false`) } if String("foo").Gt(EmptyList) { t.Fatalf(`expected !String("foo").Gt(EmptyList), got true`) } if !String("41").Gt(Int(42)) { t.Fatalf(`expected String("41").Gt(Int(42)), got false`) } } func TestStringLt(t *testing.T) { if String("foo").Lt(String("foo")) { t.Fatalf(`expected !String("foo").Lt(String("foo")), got true`) } if !String("bar").Lt(String("foo")) { t.Fatalf(`expected String("bar").Lt(String("foo")), got false`) } if !String("fo").Lt(String("foo")) { t.Fatalf(`expected String("fo").Lt(String("foo")), got false`) } if !String("foo").Lt(EmptyList) { t.Fatalf(`expected String("foo").Lt(EmptyList), got false`) } if String("41").Lt(Int(42)) { t.Fatalf(`expected !String("41").Lt(Int(42)), got true`) } } func TestStringHead(t *testing.T) { if v := String("foo").Head(); v != Int('f') { t.Fatalf(`expected String("foo").Head() == Int('f'), got`, v) } if v := String("").Head(); v != Nil { t.Fatalf(`expected String("").Head() == Nil, got`, v) } } func TestStringTail(t *testing.T) { if v := String("foo").Tail(); v != String("oo") { t.Fatalf(`expected String("foo").Tail() == String("oo"), got`, v) } if v := String("").Tail(); v != String("") { t.Fatalf(`expected String("").Tail() == String(""), got`, v) } } func TestStringPut(t *testing.T) { v, err := String("foo").Put(Int(100)) if err != nil { t.Fatalf(`expected err == nil, got %s`, err) } if v != String("food") { t.Fatalf(`expected v == String("food"), got`, v) } v, err = String("foo").Put(String("d")) if err == nil { t.Fatalf(`expected err != nil, got nil`) } if v != nil { t.Fatalf(`expected v == nil, got`, v) } } func TestStringEmpty(t *testing.T) { if v := String("foo").Empty(); v == true { t.Fatalf(`expected !String("foo").Empty(), got true`) } if v := String("").Empty(); v == false { t.Fatalf(`expected String("").Empty(), got false`) } } func TestStringString(t *testing.T) { v := String("hello \"world\" \\ \n \r \t") s := v.String() if s != `"hello \"world\" \\ \n \r \t"` { t.Fatalf(`expected s == `+"`"+`"hello \"world\" \\ \n \r \t"`+"` got %s", s) } }
package main import ( "encoding/json" "github.com/go-redis/redis" "github.com/gorilla/mux" "github.com/satori/go.uuid" "log" "net/http" "strings" ) type Person struct { ID string `json:"id,omitempty"` Firstname string `json:"firstname,omitempty"` Lastname string `json:"lastname,omitempty"` Social []SocialMedia `json:"socialmedia,omitempty"` } type SocialMedia struct { Title string `json:"title"` Link string `json:"link"` } const ( db = "people" ) var client *redis.Client func GetPeopleEndpoint(w http.ResponseWriter,req *http.Request){ all,_ := client.HGetAll(db).Result() var people []string for _,v := range all { people = append(people,v) } var p []Person json.Unmarshal([]byte("["+strings.Join(people,",")+"]"),&p) json.NewEncoder(w).Encode(p) } func GetPersonEndpoint(w http.ResponseWriter,req *http.Request){ params := mux.Vars(req) p,_ := client.HGet(db, params["id"]).Bytes() var person Person json.Unmarshal(p,&person) json.NewEncoder(w).Encode(person) } func CreatePersonEndpoint(w http.ResponseWriter,req *http.Request){ var person Person json.NewDecoder(req.Body).Decode(&person) if person.ID == "" { person.ID = uuid.Must(uuid.NewV4()).String() } bytes, _ := json.Marshal(person) err := client.HSet(db, person.ID, bytes).Err() if err != nil{ w.WriteHeader(401) w.Write([]byte(err.Error())) return } json.NewEncoder(w).Encode(person.ID) } func UpdatePersonEndpoint(w http.ResponseWriter,req *http.Request){ params := mux.Vars(req) data,err := client.HGet(db,params["id"]).Bytes() if err != nil{ w.WriteHeader(404) if len(data) == 0 { w.Write([]byte("用户不存在")) return } w.Write([]byte(err.Error())) return } var person,old Person json.NewDecoder(req.Body).Decode(&person) json.Unmarshal(data,&old) person.ID = old.ID bytes, _ := json.Marshal(person) client.HSet(db,person.ID,bytes) json.NewEncoder(w).Encode(&person) } func DeletePersonEndpoint(w http.ResponseWriter,req *http.Request){ params := mux.Vars(req) _, e := client.HDel(db, params["id"]).Result() if e != nil{ w.WriteHeader(401) w.Write([]byte(e.Error())) return } json.NewEncoder(w).Encode("删除成功!") } func GetRedisClient() *redis.Client { client := redis.NewClient(&redis.Options{ Addr: "localhost:6379", Password: "", // no password set DB: 0, // use default DB }) return client } func ExampleNewClient() { client := redis.NewClient(&redis.Options{ Addr: "localhost:6379", Password: "", // no password set DB: 0, // use default DB }) person := Person{ ID: "1", Firstname: "刘国", Lastname: "李菲蓉", Social: []SocialMedia{ {Title: "Github", Link: "http://gslg.github.com"}, {Title: "Twitter", Link: "http://www.twitter.com/liuguo"}, }, } data, _ := json.Marshal(person) err := client.HSet("people", "1", data).Err() if err!=nil{ log.Fatalf("Put Error:%s",err) } res,err := client.HGet("people","1").Result() if err!=nil{ log.Fatalf("Get Error:%s",err) } var p Person json.Unmarshal([]byte(res), &p) log.Println(p) } func main() { client=GetRedisClient() router := mux.NewRouter() router.HandleFunc("/people",GetPeopleEndpoint).Methods("GET") router.HandleFunc("/person/{id}",GetPersonEndpoint).Methods("GET") router.HandleFunc("/person",CreatePersonEndpoint).Methods("PUT") router.HandleFunc("/person/{id}",UpdatePersonEndpoint).Methods("POST") router.HandleFunc("/person/{id}",DeletePersonEndpoint).Methods("DELETE") log.Fatal(http.ListenAndServe(":12345",router)) //ExampleNewClient() }
package utils import ( cryptorand "crypto/rand" "encoding/hex" "fmt" ) func NewNamespaceName() (string, error) { p := make([]byte, 12) _, err := cryptorand.Read(p) if err != nil { return "", err } return fmt.Sprintf("test-%s", hex.EncodeToString(p))[:12], nil }
package cmd import "strings" // validateString compares the two strings. func validateString(expected string, actual string) bool { if strings.Compare(expected, actual) != 0 { return false } return true }
// Copyright 2018 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Package p2p contains the common code for P2P tests. // // The most challenging part of P2P tests is that the P2P protocol uses // multicasts for DNS-SD, but multicasts do not work properly with loopback // network interfaces. // // To tackle with the problem, we use network namespaces. We set up an isolated // network namespace named "tastns", create virtual network interface pair // connecting tastns and the default network namespace, and run avahi-daemon in // tastns. Tast test process runs in the default namespace. This way we can make // sure multicast packets go through routing instead of loopback. // // Note that p2p_server and p2p_client also run in the default namespace, but // they communicate with avahi-daemon running in tastns via D-Bus IPC, so they // work just as if they are in tastns. // // Below is a diagram illustrating the setup. // // [Default network namespace] +------------------+-------------+ // | | | // +--------+ +--------------+ +--------------+ | // | Tast |----->| p2p_server | | p2p_client | | // +--------+ HTTP +--------------+ +--------------+ | // | | D-Bus // +------------------------------------------------+ | IPC // | veth-default (169.254.100.1) | | // +------------------------------------------------+ | // | mDNS/DNS-SD | // ----------------------------+----------------------------------------- // | | // +------------------------------------------------+ | // | veth-isolated (169.254.100.2) | | // +------------------------------------------------+ | // | | // +----------------+ | // | avahi-daemon |<----------------------+ // +----------------+ // // [Isolated network namespace "tastns"] // // Note that it would be more preferable to run the Tast test process, instead // of avahi-daemon, in an isolated network namespace, because it is more similar // to the real configurations (e.g. iptables rules apply to avahi-daemon). But // it is difficult to make the Tast test process enter a network namespace since // setns(2) works per-thread but Go programs do not have a way to call system // calls on all threads at once. See the following issues for details: // // https://github.com/vishvananda/netns/issues/17 // https://github.com/golang/go/issues/1435 // package p2p import ( "context" "os" "chromiumos/tast/common/testexec" "chromiumos/tast/errors" "chromiumos/tast/local/upstart" "chromiumos/tast/shutil" "chromiumos/tast/testing" ) const ( // ServiceType is the type of the P2P service (in terms of DNS-SD). ServiceType = "_cros_p2p._tcp" // ServicePort is the TCP port number where p2p-http-server serves HTTP // requests by default. ServicePort = 16725 // SharedDir is the directory to hold the files served by p2p-http-server. SharedDir = "/var/cache/p2p" // NSName is the name of an isolated network namespace P2P tests create to // run avahi-daemon in. NSName = "tastns" // DefaultNSIP is the primary IPv4 address of the virtual network interface in // the default network namespace where Tast test process runs. DefaultNSIP = "169.254.100.1" // IsolatedNSIP is the IPv4 address of the virtual network interface in the // "tastns" network namespace where avahi-daemon runs. IsolatedNSIP = "169.254.100.2" defaultIFName = "veth-default" isolatedIFName = "veth-isolated" ) // SetUp does some setup for P2P tests. func SetUp(ctx context.Context) error { if err := upstart.StopJob(ctx, "p2p"); err != nil { return errors.Wrap(err, "failed to stop p2p") } if err := clearSharedDir(); err != nil { return errors.Wrap(err, "failed to clear the p2p shared directory") } if err := createVirtualNetwork(ctx); err != nil { return errors.Wrap(err, "failed to create the virtual network") } // Restart avahi in the network namespace and wait to be ready. if err := upstart.RestartJob(ctx, "avahi", upstart.WithArg("NETNS", NSName)); err != nil { return errors.Wrap(err, "failed to restart avahi") } if err := testing.Poll(ctx, func(ctx context.Context) error { return testexec.CommandContext(ctx, "p2p-client", "--num-connections").Run() }, nil); err != nil { return errors.Wrap(err, "failed to wait avahi startup") } return nil } // CleanUp does some cleanup for P2P tests. func CleanUp(ctx context.Context) error { // Restart avahi in the default network namespace. if err := upstart.RestartJob(ctx, "avahi"); err != nil { return errors.Wrap(err, "failed to restart avahi") } if err := destroyVirtualNetwork(ctx, failOnErrors); err != nil { return errors.Wrap(err, "failed to destroy the virtual network") } return nil } func clearSharedDir() error { if err := os.RemoveAll(SharedDir); err != nil { return err } if err := os.Mkdir(SharedDir, 0755); err != nil { return err } return os.Chmod(SharedDir, 0755) } func createVirtualNetwork(ctx context.Context) error { destroyVirtualNetwork(ctx, ignoreErrors) const multicastSubnet = "224.0.0.0/4" for _, args := range [][]string{ // Create an isolated network namespace. {"netns", "add", NSName}, // Create a virtual network interface pair, and put one of them to the isolated // network namespace. {"link", "add", defaultIFName, "type", "veth", "peer", "name", isolatedIFName}, {"link", "set", isolatedIFName, "netns", NSName}, // Set up the network interface in the default network namespace. {"addr", "add", DefaultNSIP + "/24", "dev", defaultIFName}, {"link", "set", defaultIFName, "up"}, {"route", "add", multicastSubnet, "dev", defaultIFName}, // Set up the network interface in the isolated network namespace. {"netns", "exec", NSName, "ip", "addr", "add", IsolatedNSIP + "/24", "dev", isolatedIFName}, {"netns", "exec", NSName, "ip", "link", "set", isolatedIFName, "up"}, {"netns", "exec", NSName, "ip", "route", "add", multicastSubnet, "dev", isolatedIFName}, } { cmd := testexec.CommandContext(ctx, "ip", args...) if err := cmd.Run(); err != nil { cmd.DumpLog(ctx) return errors.Wrapf(err, "ip %s failed", shutil.EscapeSlice(args)) } } return nil } type errorMode int const ( failOnErrors errorMode = iota ignoreErrors ) func destroyVirtualNetwork(ctx context.Context, mode errorMode) error { for _, args := range [][]string{ // Delete the network interface in the default network namespace. This automatically // deletes the peer in the isolated network namespace. {"link", "del", defaultIFName}, // Delete the isolated network namespace. {"netns", "del", NSName}, } { cmd := testexec.CommandContext(ctx, "ip", args...) if err := cmd.Run(); err != nil && mode == failOnErrors { cmd.DumpLog(ctx) return errors.Wrapf(err, "ip %s failed", shutil.EscapeSlice(args)) } } return nil }
// 138. Writer interface 各種 寫與印 的方法(func) // Encorder 144. main 和 看圖 // 扯到file 都會友 Write的type //https://golang.org/pkg/os/#NewFile // func WriteFile ¶ https://golang.org/pkg/io/ioutil/#WriteFile // func (*File) Write ¶ https://golang.org/pkg/os/#File.Write package main import ( "fmt" "io" "os" ) func main() { fmt.Println("hello playword") fmt.Fprint(os.Stdout, "hello playword") io.WriteString(os.Stdout, "\nhello playword") }
package wyre import ( "fmt" ) type APIError struct { Language string `json:"language"` ExceptionID string `json:"exceptionId"` CompositeType string `json:"compositeType"` SubType APIErrorSubType `json:"subType"` Message string `json:"message"` Type APIErrorType `json:"type"` Transient bool `json:"transient"` } func (e *APIError) Error() string { if e.Type == ERR_TYPE_VALIDATION { return fmt.Sprintf("[%s] %s(%s): %s", e.ExceptionID, e.Type, e.SubType, e.Message) } else { return fmt.Sprintf("[%s] %s: %s", e.ExceptionID, e.Type, e.Message) } } type APIErrorType string const ( ERR_TYPE_VALIDATION APIErrorType = "ValidationException" ERR_TYPE_UNKNOWN APIErrorType = "UnknownException" ERR_TYPE_INSUFFICIENT_FUNDS APIErrorType = "InsufficientFundsException" ERR_TYPE_RATE_LIMIT APIErrorType = "RateLimitException" ERR_TYPE_ACCESS_DENIED APIErrorType = "AccessDeniedException" ERR_TYPE_TRANSFER APIErrorType = "TransferException" ERR_TYPE_NOT_FOUND APIErrorType = "NotFoundException" ERR_TYPE_CUSTOMER_SUPPORT APIErrorType = "CustomerSupportException" ERR_TYPE_MFA_REQUIRED APIErrorType = "MFARequiredException" ) type APIErrorSubType string const ( ERR_SUBTYPE_FIELD_REQUIRED APIErrorSubType = "FIELD_REQUIRED" ERR_SUBTYPE_INVALID_VALUE APIErrorSubType = "INVALID_VALUE" ERR_SUBTYPE_TRANSACTION_AMOUNT_TOO_SMALL APIErrorSubType = "TRANSACTION_AMOUNT_TOO_SMALL" ERR_SUBTYPE_UNSUPPORTED_SOURCE_CURRENCY APIErrorSubType = "UNSUPPORTED_SOURCE_CURRENCY" ERR_SUBTYPE_SENDER_PROVIDED_ID_IN_USE APIErrorSubType = "SENDER_PROVIDED_ID_IN_USE" ERR_SUBTYPE_CANNOT_SEND_SELF_FUNDS APIErrorSubType = "CANNOT_SEND_SELF_FUNDS" ERR_SUBTYPE_INVALID_PAYMENT_METHOD APIErrorSubType = "INVALID_PAYMENT_METHOD" ERR_SUBTYPE_PAYMENT_METHOD_INACTIVE APIErrorSubType = "PAYMENT_METHOD_INACTIVE" ERR_SUBTYPE_PAYMENT_METHOD_UNSUPPORTED_CHARGE_CURRENCY APIErrorSubType = "PAYMENT_METHOD_UNSUPPORTED_CHARGE_CURRENCY" ERR_SUBTYPE_PAYMENT_METHOD_UNCHARGEABLE APIErrorSubType = "PAYMENT_METHOD_UNCHARGEABLE" ERR_SUBTYPE_PAYMENT_METHOD_UNSUPPORTED_DEPOSIT_CURRENCY APIErrorSubType = "PAYMENT_METHOD_UNSUPPORTED_DEPOSIT_CURRENCY" ERR_SUBTYPE_PAYMENT_METHOD_UNDEPOSITABLE APIErrorSubType = "PAYMENT_METHOD_UNDEPOSITABLE" ERR_SUBTYPE_PAYMENT_METHOD_DOESNT_SUPPORT_FOLLOWUPS APIErrorSubType = "PAYMENT_METHOD_DOESNT_SUPPORT_FOLLOWUPS" ERR_SUBTYPE_PAYMENT_METHOD_DOESNT_SUPPORT_MICRODEPOSIT_VERIFICATION APIErrorSubType = "PAYMENT_METHOD_DOESNT_SUPPORT_MICRODEPOSIT_VERIFICATION" )
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "fmt" "strings" exv1beta1 "k8s.io/api/extensions/v1beta1" "github.com/tmax-cloud/registry-operator/controllers/repoctl" "github.com/tmax-cloud/registry-operator/controllers/signctl" "github.com/tmax-cloud/registry-operator/internal/common/config" "github.com/tmax-cloud/registry-operator/internal/schemes" "github.com/tmax-cloud/registry-operator/pkg/image" "github.com/tmax-cloud/registry-operator/internal/utils" corev1 "k8s.io/api/core/v1" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" regv1 "github.com/tmax-cloud/registry-operator/api/v1" tmaxiov1 "github.com/tmax-cloud/registry-operator/api/v1" ) const ( DefaultHarborCoreIngress = "tmax-harbor-ingress" DefaultHarborNotaryIngress = "tmax-harbor-ingress-notary" DefaultHarborNamespace = "harbor" ) // ImageSignRequestReconciler reconciles a ImageSignRequest object type ImageSignRequestReconciler struct { client.Client Log logr.Logger Scheme *runtime.Scheme } // +kubebuilder:rbac:groups=tmax.io,resources=imagesignrequests,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=tmax.io,resources=imagesignrequests/status,verbs=get;update;patch // +kubebuilder:rbac:groups=tmax.io,resources=signerkeys,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=tmax.io,resources=signerkeys/status,verbs=get;update;patch // +kubebuilder:rbac:groups=apiregistration.k8s.io,resourceNames=v1.registry.tmax.io,resources=apiservices,verbs=get;update;patch // +kubebuilder:rbac:groups=admissionregistration.k8s.io,resourceNames=registry-operator-webhook-cfg,resources=mutatingwebhookconfigurations,verbs=get;update;patch // +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core,resourceNames=extension-apiserver-authentication,resources=configmaps,verbs=get // +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;update;patch;delete func (r *ImageSignRequestReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { _ = context.Background() log := r.Log.WithValues("imagesignrequest", req.NamespacedName) // get image sign request log.Info("get image sign request") signReq := &tmaxiov1.ImageSignRequest{} if err := r.Get(context.TODO(), req.NamespacedName, signReq); err != nil { log.Error(err, "") return ctrl.Result{}, nil } defer func() { if err := response(r.Client, signReq); err != nil { log.Error(err, "") } }() if signReq.Status.ImageSignResponse == nil { makeInitResponse(signReq) return ctrl.Result{}, nil } if signReq.Status.ImageSignResponse != nil && signReq.Status.ImageSignResponse.Result != regv1.ResponseResultSigning { return ctrl.Result{}, nil } // get image signer log.Info("get image signer") signer := &tmaxiov1.ImageSigner{} if err := r.Get(context.TODO(), types.NamespacedName{Name: signReq.Spec.Signer}, signer); err != nil { log.Error(err, "") makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } // get sign key log.Info("get sign key") signerKey := &tmaxiov1.SignerKey{} if err := r.Get(context.TODO(), types.NamespacedName{Name: signReq.Spec.Signer}, signerKey); err != nil { log.Error(err, "") makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } // Get secret regSecret := &corev1.Secret{} if signReq.Spec.DcjSecretName != "" { if err := r.Get(context.TODO(), types.NamespacedName{Name: signReq.Spec.DcjSecretName, Namespace: signReq.Namespace}, regSecret); err != nil { log.Error(err, "") makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } } regCert := &corev1.Secret{} var ca []byte if signReq.Spec.CertSecretName != "" { if err := r.Get(context.TODO(), types.NamespacedName{Name: signReq.Spec.CertSecretName, Namespace: signReq.Namespace}, regCert); err != nil { log.Error(err, "") makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } ca = regCert.Data[schemes.TLSCert] } // Start signing procedure img, err := image.NewImage(signReq.Spec.Image, "", "", ca) if err != nil { log.Error(err, "") makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } // Check if it's Harbor registry isHarbor := false regIng := &exv1beta1.Ingress{} harborNamespace := config.Config.GetString(config.ConfigHarborNamespace) if harborNamespace == "" { harborNamespace = DefaultHarborNamespace } harborCoreIngress := config.Config.GetString(config.ConfigHarborCoreIngress) if harborCoreIngress == "" { harborCoreIngress = DefaultHarborCoreIngress } harborNotaryIngress := config.Config.GetString(config.ConfigHarborNotaryIngress) if harborNotaryIngress == "" { harborNotaryIngress = DefaultHarborNotaryIngress } if err := r.Client.Get(context.Background(), types.NamespacedName{Name: harborCoreIngress, Namespace: harborNamespace}, regIng); err != nil { log.Error(err, "") } notaryURL := "" if regIng.ResourceVersion != "" && len(regIng.Spec.Rules) == 1 && img.Host == regIng.Spec.Rules[0].Host { isHarbor = true notIng := &exv1beta1.Ingress{} if err := r.Client.Get(context.Background(), types.NamespacedName{Name: harborNotaryIngress, Namespace: harborNamespace}, notIng); err != nil { log.Error(err, "") makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } if len(notIng.Spec.Rules) == 0 { err := fmt.Errorf("harbor notary ingress is misconfigured") log.Error(err, "") makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } coreScheme := "https" if len(regIng.Spec.TLS) == 0 { coreScheme = "http" } img.ServerURL = fmt.Sprintf("%s://%s", coreScheme, regIng.Spec.Rules[0].Host) notScheme := "https" if len(notIng.Spec.TLS) == 0 { notScheme = "http" } notaryURL = fmt.Sprintf("%s://%s", notScheme, notIng.Spec.Rules[0].Host) } var targetReg *regv1.Registry // List registries and filter target registry - if it's not harbor registry if !isHarbor { log.Info("list registries") targetReg, err = r.findRegistryByHost(img.Host) if err != nil { log.Error(err, "") makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } // Initialize Sign controller signCtl := signctl.NewSigningController(r.Client, r.Scheme, signer, targetReg.Name, targetReg.Namespace) img.ServerURL = signCtl.Regctl.GetEndpoint() notaryURL = signCtl.Regctl.GetNotaryEndpoint() // Verify if registry is valid now if len(img.ServerURL) == 0 { makeResponse(signReq, false, "RegistryMisconfigured", "serverUrl is not set for the registry") return ctrl.Result{}, nil } if len(notaryURL) == 0 { makeResponse(signReq, false, "RegistryMisconfigured", "notaryUrl is not set for the registry, maybe notary is disabled for the registry") return ctrl.Result{}, nil } } if regSecret.ResourceVersion != "" { basicAuth, err := utils.ParseBasicAuth(regSecret, img.Host) if err != nil { log.Error(err, "") makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } img.BasicAuth = basicAuth } // Sign image log.Info("sign image") signCtl := signctl.NewSigningController(r.Client, r.Scheme, signer, "", "") if err := signCtl.SignImage(signerKey, img, notaryURL, ca); err != nil { log.Error(err, "sign image") makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } if !isHarbor { // Update repository with signer log.Info(fmt.Sprintf("update repository with signer %s", signer.Name)) repoCtl := repoctl.New() repo, err := repoCtl.Get(r.Client, targetReg, img.Name) if err != nil { log.Error(err, fmt.Sprintf("failed to update repository with signer %s", signer.Name)) makeResponse(signReq, false, err.Error(), "") return ctrl.Result{}, nil } for i, v := range repo.Spec.Versions { if v.Version == img.Tag { repo.Spec.Versions[i].Signer = signer.Name break } } if err := repoCtl.Update(r.Client, repo); err != nil { log.Error(err, fmt.Sprintf("failed to update repository with signer %s", signer.Name)) makeResponse(signReq, false, err.Error(), "") } } makeResponse(signReq, true, "", "") return ctrl.Result{}, nil } func (r *ImageSignRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&tmaxiov1.ImageSignRequest{}). Complete(r) } func (r *ImageSignRequestReconciler) findRegistryByHost(hostname string) (*tmaxiov1.Registry, error) { regList := &tmaxiov1.RegistryList{} if err := r.List(context.TODO(), regList); err != nil { return nil, err } var targetReg tmaxiov1.Registry targetFound := false for _, r := range regList.Items { log.Info(r.Name) serverUrl := strings.TrimPrefix(r.Status.ServerURL, "https://") serverUrl = strings.TrimPrefix(serverUrl, "http://") serverUrl = strings.TrimSuffix(serverUrl, "/") if serverUrl == hostname { targetReg = r targetFound = true } } if !targetFound { return nil, fmt.Errorf("target registry is not an internal registry") } return &targetReg, nil }
package daemon import ( "io" "os/exec" ) // Service is a process wrapper for 3rd party binaries. It will spawn an instance // of the binary and manage the life-cycle and IO of the process. type Service interface { Setup() Stop(pid int) (bool, error) List() map[int]*exec.Cmd Command() *exec.Cmd Start() *exec.Cmd Run(io.Writer) (*exec.Cmd, error) NewService(args []string) Service }
package controllers import ( "encoding/json" "fmt" "net/http" "unit-tested-controllers.com/externals" ) func CallApiPlaceholder(apiClient externals.IApiClient) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { posts, err := apiClient.GetPosts() if err != nil { fmt.Fprint(w, "Oops! Something went wrong!") } else { jData, err := json.Marshal(posts) if err != nil { fmt.Fprint(w, "Oops! Something went wrong!") } w.Write(jData) } } }
package model import ( "encoding/json" "github.com/caos/zitadel/internal/errors" "github.com/caos/zitadel/internal/eventstore/models" es_models "github.com/caos/zitadel/internal/eventstore/models" org_model "github.com/caos/zitadel/internal/org/model" ) type OrgIamPolicy struct { models.ObjectRoot Description string `json:"description,omitempty"` State int32 `json:"-"` UserLoginMustBeDomain bool `json:"userLoginMustBeDomain"` } func OrgIamPolicyToModel(policy *OrgIamPolicy) *org_model.OrgIamPolicy { return &org_model.OrgIamPolicy{ ObjectRoot: policy.ObjectRoot, State: org_model.PolicyState(policy.State), UserLoginMustBeDomain: policy.UserLoginMustBeDomain, } } func OrgIamPolicyFromModel(policy *org_model.OrgIamPolicy) *OrgIamPolicy { return &OrgIamPolicy{ ObjectRoot: policy.ObjectRoot, State: int32(policy.State), UserLoginMustBeDomain: policy.UserLoginMustBeDomain, } } func (o *Org) appendAddOrgIamPolicyEvent(event *es_models.Event) error { o.OrgIamPolicy = new(OrgIamPolicy) err := o.OrgIamPolicy.SetData(event) if err != nil { return err } o.OrgIamPolicy.ObjectRoot.CreationDate = event.CreationDate return nil } func (o *Org) appendChangeOrgIamPolicyEvent(event *es_models.Event) error { return o.OrgIamPolicy.SetData(event) } func (o *Org) appendRemoveOrgIamPolicyEvent() { o.OrgIamPolicy = nil } func (p *OrgIamPolicy) Changes(changed *OrgIamPolicy) map[string]interface{} { changes := make(map[string]interface{}, 2) if changed.Description != p.Description { changes["description"] = changed.Description } if changed.UserLoginMustBeDomain != p.UserLoginMustBeDomain { changes["userLoginMustBeDomain"] = changed.UserLoginMustBeDomain } return changes } func (p *OrgIamPolicy) SetData(event *es_models.Event) error { err := json.Unmarshal(event.Data, p) if err != nil { return errors.ThrowInternal(err, "EVENT-7JS9d", "unable to unmarshal data") } return nil }
package main import ( "context" "crypto/sha256" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "os" "strconv" "strings" "time" "github.com/olivere/elastic/v7" "github.com/pimmytrousers/pastescraper/parse" "github.com/pimmytrousers/pastescraper/parse/individualparsers" log "github.com/sirupsen/logrus" "github.com/williballenthin/govt" "gopkg.in/sohlich/elogrus.v7" ) const ( SCRAPINGURL = "https://scrape.pastebin.com/api_scraping.php?limit=%d" RAWURL = "https://scrape.pastebin.com/api_scrape_item.php?i=%s" VTAPI = "https://www.virustotal.com/vtapi/v2/" ) func New(c *config, parser *parse.Parser) (*Scraper, error) { s := &Scraper{} s.outputDir = c.OutputDir s.debug = c.Debug s.parser = parser s.seenKeys = newKeyQueue(c.MaxQueueSize * 10) s.maxQueue = c.MaxQueueSize s.scrapingUrl = SCRAPINGURL s.rawUrl = RAWURL s.pastesPerQuery = c.MaxQueueSize s.logger = log.New() if s.debug { s.logger.SetLevel(log.DebugLevel) } if c.Elastic.Host != "" { var proto string if c.Elastic.HTTPS { proto = "https://" } else { proto = "http://" } var err error var client *elastic.Client url := proto + c.Elastic.Host + ":" + strconv.Itoa(c.Elastic.Port) if c.Elastic.Password != "" && c.Elastic.Username != "" { client, err = elastic.NewSimpleClient(elastic.SetURL(url), elastic.SetBasicAuth(c.Elastic.Username, c.Elastic.Password)) } else { client, err = elastic.NewSimpleClient(elastic.SetURL(url)) } if err != nil { s.logger.Fatal(err) } hook, err := elogrus.NewAsyncElasticHook(client, c.Elastic.Host, log.DebugLevel, c.Elastic.Index) if err != nil { s.logger.Fatal(err) } s.logger.WithFields(log.Fields{ "url": url, }).Debug("connected to ELK instance") s.logger.Hooks.Add(hook) } if c.VTKey != "" { vtc, err := govt.New(govt.SetApikey(c.VTKey), govt.SetUrl(VTAPI)) if err != nil { s.logger.Fatal(err) } s.vtClient = vtc s.logger.Debug("connected to VT") } return s, nil } func (s *Scraper) GetRawPaste(key string) ([]byte, error) { client := http.Client{ Timeout: time.Second * 3, } rawUrlWithKey := fmt.Sprintf(s.rawUrl, key) resp, err := client.Get(rawUrlWithKey) if err != nil { return nil, err } buf, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } if len(buf) == 0 { return nil, errors.New("paste contents has length 0") } if s.debug { s.logger.WithFields(log.Fields{ "rawContentsURL": rawUrlWithKey, "key": key, }).Debug("got raw contents of paste") } return buf, nil } func unmarshalPasteStream(data []byte) ([]PasteMetadata, error) { var r []PasteMetadata err := json.Unmarshal(data, &r) return r, err } func (s *Scraper) getStreamChannel() ([]PasteMetadata, error) { client := http.Client{ Timeout: time.Second * 3, } resp, err := client.Get(fmt.Sprintf(s.scrapingUrl, s.pastesPerQuery)) if err != nil { return nil, err } defer resp.Body.Close() buf, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } stream, err := unmarshalPasteStream(buf) if err != nil { return nil, err } if len(stream) == 0 { return nil, errors.New("unable to acquire a paste stream - most likely due to unwhitelisted IP") } if s.debug { s.logger.WithFields(log.Fields{ "pastesAdded": len(stream), }).Debug("acquired pastes from pastebin API") } return stream, nil } func (s *Scraper) start(ctx context.Context, waitDuration time.Duration) error { for { var stream []PasteMetadata var err error for i := 0; i < 5; i++ { stream, err = s.getStreamChannel() if err != nil { s.logger.WithFields(log.Fields{ "error": err, }).Warning("unable to get paste stream, trying again") } if stream != nil { break } time.Sleep(time.Second * 3) } if stream == nil { s.logger.WithFields(log.Fields{ "error": err, }).Warning("unable to get paste stream") return errors.New("invalid stream") } for _, pasteMetaData := range stream { //start a goroutine to match each paste in the stream go func(metadata PasteMetadata) { pasteKey := metadata.Key if s.seenKeys.doesExist(pasteKey) { if s.debug { s.logger.WithFields(log.Fields{ "full-url": metadata.FullURL, "key": pasteKey, }).Debug("already parsed paste") } return } pasteContent, err := s.GetRawPaste(pasteKey) if err != nil { s.logger.WithFields(log.Fields{ "full-url": metadata.FullURL, "key": pasteKey, "error": err, }).Warning("unable to get raw paste") return } matchedSig, action, normalizedContent, err := s.parser.MatchAndNormalize(pasteContent) if err != nil { s.logger.WithFields(log.Fields{ "full-url": metadata.FullURL, "key": pasteKey, "error": err, }).Warning("unable to match against parsers") return } s.seenKeys.add(pasteKey) size, err := strconv.Atoi(metadata.Size) if err != nil { s.logger.WithFields(log.Fields{ "error": err, }).Warning("invalid size") return } if matchedSig != "" { s.logger.WithFields(log.Fields{ "signature match": matchedSig, "author": metadata.User, "size": size, "title": metadata.Title, "full-url": metadata.FullURL, "key": pasteKey, }).Info("matched a paste") filename, err := s.postActionExec(action, normalizedContent) if err != nil { s.logger.WithFields(log.Fields{ "full-url": metadata.FullURL, "key": pasteKey, "error": err, }).Warning("unable to execute post actions, using key as filename") filename = pasteKey } // if a new filename was passed by a post action, then use that otherwise stick to the paste key if filename == "" { filename = pasteKey } err = s.writePaste(matchedSig, filename, normalizedContent) if err != nil { s.logger.WithFields(log.Fields{ "full-url": metadata.FullURL, "key": pasteKey, "error": err, }).Warning("unable to write paste") return } } else { if s.debug { s.logger.WithFields(log.Fields{ "author": metadata.User, "size": size, "title": metadata.Title, "full-url": metadata.FullURL, "key": pasteKey, }).Info("unable to match paste") } } }(pasteMetaData) } select { case <-ctx.Done(): return ctx.Err() default: } time.Sleep(waitDuration) } } func (s *Scraper) writePaste(key string, pasteKey string, content []byte) error { if _, err := os.Stat(s.outputDir); os.IsNotExist(err) { err = os.Mkdir(s.outputDir, 0755) if err != nil { return err } } parseSpecificPath := s.outputDir + "/" + key if _, err := os.Stat(parseSpecificPath); os.IsNotExist(err) { err = os.Mkdir(parseSpecificPath, 0755) if err != nil { return err } } if s.debug { s.logger.WithFields(log.Fields{ "pastekey": pasteKey, "paste location": parseSpecificPath + "/" + pasteKey, }).Debug("wrote paste contents to disk") } err := ioutil.WriteFile(parseSpecificPath+"/"+pasteKey, content, 0644) if err != nil { return err } return nil } func (s *Scraper) postActionExec(action int, content []byte) (string, error) { switch action { case individualparsers.KeyRawExecutable: // TODO: does not upload a byte slice, rather takes a file written to disk and uploads that hexDigest := sha256.Sum256(content) hash := fmt.Sprintf("%x", hexDigest) report, err := s.vtClient.GetFileReport(hash) if err != nil { return "", err } filename := &strings.Builder{} var signatureMatches []string filename.WriteString("p" + strconv.Itoa(int(report.Positives))) for _, scan := range report.Scans { splitSig := strings.Split(scan.Result, ".") for _, word := range splitSig { signatureMatches = append(signatureMatches, word) } } wordsWithCount := wordCountWithBlacklist(signatureMatches, []string{"a", "potentially", "variant", "agent", "gen", "linux", "win32", "generic", "unsafe", "malicious", "heuristic", "application", "suspicious", "win64"}) //p16_elf_go_trojan for i := 0; i < 3; i++ { word := maxCount(wordsWithCount) filename.WriteString("_") filename.WriteString(word) delete(wordsWithCount, word) } // add a timestamp to deal with any collisions filename.WriteString("_" + strconv.Itoa(int(time.Now().Unix()))) s.logger.WithFields(log.Fields{ "filename": filename.String(), "positives": report.Positives, "url": report.Permalink, "MD5": report.Md5, "SHA1": report.Sha1, "SHA256": report.Sha256, }).Info("Successfully retrieved report for Sample") return filename.String(), nil default: return "", nil } }
package mvc import ( "github.com/rcrowley/go-metrics" "github.com/zerolinke/pudge/src/pudge/proto" ) //UpdateStatus 更新状态 type UpdateStatus int const ( //TODO 枚举 UpdateNone = -1 * iota //更新无:-0 UpdateInstalling //更新安装中:-1 UpdateReady //更新准备:-2 UpdateAvailable //更新活动:-3 ) //ConnStatus 连接状态 type ConnStatus int const ( ConnConnecting = iota //连接中:0 ConnReconnecting //重连中:1 ConnOnline //在线:2 ) //State type State interface { GetClientVersion() string //获取客户端版本信息 GetServerVersion() string //获取服务端版本信息 GetTunnels() []Tunnel //获取通信通道 GetProtocols() []proto.Protocol //获取网络协议 GetUpdateStatus() UpdateStatus //int 自定义更新状态 GetConnStatus() ConnStatus //int 自定义连接状态 GetConnectionMetrics() (metrics.Meter, metrics.Timer) //TODO metrics.Meter metrics.Timer GetBytesInMetrics() (metrics.Counter, metrics.Histogram) //TODO metrics.Counter metrics.Histogram GetBytesOutMetrics() (metrics.Counter, metrics.Histogram) // SetUpdateStatus(UpdateStatus) // } //Tunnel 通信通道 type Tunnel struct { PublicUrl string // Protocol proto.Protocol //网络协议 LocalAddr string // } //ConnectionContext type ConnectionContext struct { Tunnel Tunnel ClientAddr string }
package yaraparser_test import ( "reflect" "strings" "testing" "github.com/nbareil/yaraparser-go" ) func TestParser_Test(t *testing.T) { var tests = []struct { s string rule *yaraparser.YaraRule err string }{ // Single field statement { s: `rule foobar {}`, rule: &yaraparser.YaraRule{ Name: "foobar", }, }, { s: `rule foobar { meta: author = "Roger"}`, rule: &yaraparser.YaraRule{ Name: "foobar", Metas: map[string]string{"author": "Roger"}, }, }, { s: `rule foobar { strings: $a = "b"}`, rule: &yaraparser.YaraRule{ Name: "foobar", Strings: map[string]yaraparser.YaraPattern{ "$a": yaraparser.YaraPattern{ Type: yaraparser.RegularString, String: "b", }, }, }, }, { s: `rule foobar { meta: author = "Roger"}`, rule: &yaraparser.YaraRule{ Name: "foobar", Metas: map[string]string{"author": "Roger"}, }, }, // quoted string { s: `rule foobar { meta: author = "she said \"hello\""}`, rule: &yaraparser.YaraRule{ Name: "foobar", Metas: map[string]string{"author": `she said \"hello\"`}, }, }, // comment { s: `/* embedded comment */rule foobar {}`, rule: &yaraparser.YaraRule{ Name: "foobar", }, }, { s: "// simple comment\nrule foobar {}", rule: &yaraparser.YaraRule{ Name: "foobar", }, }, // Errors {s: `foo`, err: `found "foo", expected keyword "rule"`}, {s: `rule * {}`, err: `found "*", expected a valid rule identifier`}, {s: `rule foo {bar}`, err: `found "}", expecting ':'`}, //{s: `rule foo {meta: bar}`, err: `found "}", expecting a value assignment`}, {s: `rule foo : {}`, err: `invalid tag name`}, {s: `rule foo : * {}`, err: `invalid tag name`}, {s: `rule foo * {}`, err: `found "*" after rulename, expecting "{"`}, {s: `rule !`, err: `found "!", expected a valid rule identifier`}, {s: `rule foo { meta: author="foobar" `, err: `found EOF, expecting '}'`}, {s: `rule foo { meta: author="foobar" XXX `, err: `found "", expecting a value assignment`}, } for i, tt := range tests { rule, err := yaraparser.NewParser(strings.NewReader(tt.s)).Parse() if !reflect.DeepEqual(tt.err, errstring(err)) { t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err) } else if tt.err == "" && !reflect.DeepEqual(tt.rule, rule) { t.Errorf("%d. %q\n\nYaraRule mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.rule, rule) } } } // errstring returns the string representation of an error. func errstring(err error) string { if err != nil { return err.Error() } return "" }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package arc import ( "context" "time" "chromiumos/tast/local/arc" "chromiumos/tast/local/arc/optin" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/familylink" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: UnicornPlaystoreOn, LacrosStatus: testing.LacrosVariantUnneeded, Desc: "Checks if PlayStore is Enabled for Unicorn Acccount", Contacts: []string{"cpiao@google.com", "cros-arc-te@google.com"}, Attr: []string{"group:mainline", "informational", "group:arc-functional"}, SoftwareDeps: []string{"chrome"}, Timeout: 4 * time.Minute, Params: []testing.Param{{ ExtraSoftwareDeps: []string{"android_p"}, }, { Name: "vm", ExtraSoftwareDeps: []string{"android_vm"}, }}, Fixture: "familyLinkUnicornArcLogin", }) } func UnicornPlaystoreOn(ctx context.Context, s *testing.State) { cr := s.FixtValue().(chrome.HasChrome).Chrome() tconn := s.FixtValue().(familylink.HasTestConn).TestConn() st, err := arc.GetState(ctx, tconn) if err != nil { s.Fatal("Failed to get ARC state: ", err) } if st.Provisioned { s.Log("ARC is already provisioned. Skipping the Play Store setup") } else { if err := optin.PerformAndClose(ctx, cr, tconn); err != nil { s.Fatal("Failed to optin to Play Store and Close: ", err) } } }
package application import ( "encoding/json" "fmt" "github.com/brooklyncentral/brooklyn-cli/models" "github.com/brooklyncentral/brooklyn-cli/net" ) //WIP func Fetch(network *net.Network) (string, error) { url := "/v1/applications/fetch" body, err := network.SendGetRequest(url) if err != nil { return "", err } // TODO return model return string(body), nil } func Applications(network *net.Network) ([]models.ApplicationSummary, error) { url := fmt.Sprintf("/v1/applications") var appSummary []models.ApplicationSummary body, err := network.SendGetRequest(url) if err != nil { return appSummary, err } err = json.Unmarshal(body, &appSummary) return appSummary, err } func Create(network *net.Network, filePath string) (models.TaskSummary, error) { url := "/v1/applications" var response models.TaskSummary body, err := network.SendPostFileRequest(url, filePath, "application/json") if err != nil { return response, err } err = json.Unmarshal(body, &response) return response, err } func CreateFromBytes(network *net.Network, blueprint []byte) (models.TaskSummary, error) { url := "/v1/applications" var response models.TaskSummary body, err := network.SendPostRequest(url, blueprint) if err != nil { return response, err } err = json.Unmarshal(body, &response) return response, err } // WIP func Descendants(network *net.Network, app string) (string, error) { url := fmt.Sprintf("/v1/applications/%s/descendants", app) body, err := network.SendGetRequest(url) // TODO return model if nil != err { return "", err } return string(body), nil } // WIP func DescendantsSensor(network *net.Network, app, sensor string) (string, error) { url := fmt.Sprintf("/v1/applications/%s/descendants/sensor/%s", app, sensor) body, err := network.SendGetRequest(url) // TODO return model if nil != err { return "", err } return string(body), nil } func Tree(network *net.Network) ([]models.Tree, error) { url := "/v1/applications/fetch" var tree []models.Tree body, err := network.SendGetRequest(url) if err != nil { return tree, err } err = json.Unmarshal(body, &tree) return tree, err } func Application(network *net.Network, app string) (models.ApplicationSummary, error) { url := fmt.Sprintf("/v1/applications/%s", app) var appSummary models.ApplicationSummary body, err := network.SendGetRequest(url) if err != nil { return appSummary, err } err = json.Unmarshal(body, &appSummary) return appSummary, err } func Delete(network *net.Network, application string) (models.TaskSummary, error) { url := fmt.Sprintf("/v1/applications/%s", application) var response models.TaskSummary body, err := network.SendDeleteRequest(url) if err != nil { return response, err } err = json.Unmarshal(body, &response) return response, err } // WIP func CreateLegacy(network *net.Network) (string, error) { url := fmt.Sprintf("/v1/applications/createLegacy") body, err := network.SendEmptyPostRequest(url) if err != nil { return "", err } // TODO return model return string(body), nil }
package main import ( "crypto/hmac" "crypto/sha256" "fmt" "html/template" "log" "net/http" "regexp" "sort" "strings" ) const POW_BITS = 12 const POW_FIELD = "__pow__" var indexTemplate *template.Template var enrollTemplate *template.Template var flagTemplate *template.Template var cookieName = "1337_AUTH" type LoginPageData struct { LoggedIn bool Register bool Error string MFA bool Username string } func serveStaticFile(fname string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { http.ServeFile(w, r, fname) } } func serveIndex(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFoundHandler().ServeHTTP(w, r) return } if err := indexTemplate.ExecuteTemplate(w, "base", nil); err != nil { log.Printf("Error rendering index: %s", err) http.Error(w, "Server Error", http.StatusInternalServerError) } } func serveLogin(w http.ResponseWriter, r *http.Request) { username := r.FormValue("username") password := r.FormValue("password") renderLoginError := func(msg string) { errs := LoginPageData{Error: msg} if err := indexTemplate.ExecuteTemplate(w, "base", errs); err != nil { log.Printf("Error rendering index: %s", err) http.Error(w, "Server Error", http.StatusInternalServerError) } } // Validate username if m, err := regexp.MatchString(`^[a-zA-Z0-9]+$`, username); err != nil { log.Printf("Error regexp: %s", err) http.Error(w, "Server Error", http.StatusInternalServerError) return } else { if !m { renderLoginError("Username must be alphanumeric.") return } } if !verifyProofOfWork(w, r, POW_FIELD, []string{"username", "password"}) { return } valid := verifyUsernamePassword(username, password) if valid == LoginFailed { renderLoginError("Username/password mismatch.") return } // Set cookie session := &Session{ Username: username, LoginPassed: true, } var dest string if valid == LoginNeedsMFA { session.MFAState = Needed dest = "/2fa" } else { session.MFAState = NotNeeded dest = "/flag" } saveSessionCookie(w, session) http.Redirect(w, r, dest, http.StatusFound) } func saveSessionCookie(w http.ResponseWriter, s *Session) { cookie := &http.Cookie{ Name: cookieName, Value: s.ToString(), HttpOnly: true, } http.SetCookie(w, cookie) } func getSession(w http.ResponseWriter, r *http.Request) *Session { failed := func() *Session { log.Printf("No session for user when required.") http.Error(w, "Forbidden", http.StatusForbidden) return nil } if val, err := r.Cookie(cookieName); err != nil { if err == http.ErrNoCookie { return failed() } log.Printf("Unexpected error getting cookie: %s", err) return failed() } else { s, err := SessionFromCookie(val.Value) if err != nil { log.Printf("Error getting session: %s", err) return failed() } return s } } func serveRegister(w http.ResponseWriter, r *http.Request) { if _, err := r.Cookie(cookieName); err != http.ErrNoCookie { http.Error(w, "Forbidden", http.StatusForbidden) return } renderRegister := func(estr string) { tmpData := LoginPageData{Register: true} if estr != "" { tmpData.Error = estr } if err := indexTemplate.ExecuteTemplate(w, "base", tmpData); err != nil { log.Printf("Error rendering register: %s", err) http.Error(w, "Server Error", http.StatusInternalServerError) } } if r.Method != "POST" { renderRegister("") return } username := r.FormValue("username") password := r.FormValue("password") if username == "" || password == "" || username == "flag" { renderRegister("Must specify a username/password") return } if m, err := regexp.MatchString(`^[a-zA-Z0-9]+$`, username); err != nil { log.Printf("Error regexp: %s", err) http.Error(w, "Server Error", http.StatusInternalServerError) return } else { if !m { renderRegister("Username must be alphanumeric.") return } } if !verifyProofOfWork(w, r, POW_FIELD, []string{"username", "password"}) { return } if err := dbRegister(username, password); err != nil { renderRegister(fmt.Sprintf("Error registering: %s", err)) return } session := &Session{ Username: username, LoginPassed: true, MFAState: NotNeeded, } saveSessionCookie(w, session) http.Redirect(w, r, "/flag", http.StatusFound) } func serve2FA(w http.ResponseWriter, r *http.Request) { session := getSession(w, r) if session == nil { return } continueAndRedir := func() { saveSessionCookie(w, session) http.Redirect(w, r, "/flag", http.StatusFound) } if session.AuthnDone() { continueAndRedir() return } if !session.LoginPassed { http.Error(w, "Forbidden", http.StatusForbidden) return } renderMFA := func(estr string) { tmpData := LoginPageData{MFA: true, Username: session.Username} if estr != "" { tmpData.Error = estr } if err := indexTemplate.ExecuteTemplate(w, "base", tmpData); err != nil { log.Printf("Error rendering 2FA: %s", err) http.Error(w, "Server Error", http.StatusInternalServerError) } } if r.Method != "POST" { renderMFA("") return } username := r.FormValue("username") if session.Username != username { log.Printf("Expected username %s, got %s", session.Username, username) session.Username = username } passcode := r.FormValue("passcode") if passcode == "" { renderMFA("Passcode is required.") return } if !verifyProofOfWork(w, r, POW_FIELD, []string{"username", "passcode"}) { return } if !validateTOTP(username, passcode) { renderMFA("Invalid OTP.") return } session.MFAState = Fulfilled continueAndRedir() } func serveSetup(w http.ResponseWriter, r *http.Request) { session := getSession(w, r) if session == nil { return } if !session.AuthnDone() { http.Redirect(w, r, "/", http.StatusFound) return } var message string if r.Method == "POST" { message = "2FA Enrolled" if err := dbEnroll2FA(session.Username); err != nil { log.Printf("Error enrolling: %s", err) message = "2FA Enrollment Failed!" } else { session.MFAState = Fulfilled saveSessionCookie(w, session) } } tmpData := struct { LoggedIn bool Username string Enrolled bool Message string }{ LoggedIn: true, Username: session.Username, Enrolled: session.MFAState == Fulfilled, Message: message, } if err := enrollTemplate.ExecuteTemplate(w, "base", tmpData); err != nil { log.Printf("Error rendering enrollment: %s", err) http.Error(w, "Server Error", http.StatusInternalServerError) } } func serveFlag(w http.ResponseWriter, r *http.Request) { session := getSession(w, r) if session == nil { return } if !session.AuthnDone() { http.Redirect(w, r, "/", http.StatusFound) return } fd := getFlagDataForUser(session.Username) if fd == nil { log.Printf("Error getting flag data.") http.Error(w, "Not Found", http.StatusNotFound) return } if r.Method == "POST" { if fd.Readonly { log.Printf("Update attempt for readonly!") http.Error(w, "Bad Request", http.StatusBadRequest) return } fd.Flag = r.FormValue("flag") fd.Save() } tmpData := struct { LoggedIn bool Username string Flag string Readonly bool }{ LoggedIn: true, Username: session.Username, Flag: fd.Flag, Readonly: fd.Readonly, } if err := flagTemplate.ExecuteTemplate(w, "base", tmpData); err != nil { log.Printf("Error rendering flag page: %s", err) http.Error(w, "Server Error", http.StatusInternalServerError) } } func serveLogout(w http.ResponseWriter, r *http.Request) { cookie := &http.Cookie{ Name: cookieName, Value: "", HttpOnly: true, } http.SetCookie(w, cookie) http.Redirect(w, r, "/", http.StatusFound) } func verifyProofOfWork(w http.ResponseWriter, r *http.Request, powField string, coveredFields []string) bool { sort.Strings(coveredFields) fieldValues := make([]string, 0, len(coveredFields)) for _, k := range coveredFields { fieldValues = append(fieldValues, r.FormValue(k)) } provenString := strings.Join(fieldValues, ";") mac := hmac.New(sha256.New, []byte(r.FormValue(powField))) mac.Write([]byte(provenString)) sum := mac.Sum(nil) if countLeadingZeroBits(sum) < POW_BITS { log.Printf("Proof of work failed!") http.Error(w, "Forbidden", http.StatusForbidden) return false } return true } func countLeadingZeroBits(v []byte) int { count := 0 for _, b := range v { if b == byte(0) { count += 8 continue } for i := uint32(7); i >= 0; i-- { if (b & byte(1<<i)) != 0 { break } count++ } break } return count } func loadTemplates() { indexTemplate = template.Must(template.ParseFiles( "templates/base.html", "templates/index.html")) enrollTemplate = template.Must(template.ParseFiles( "templates/base.html", "templates/enroll.html")) flagTemplate = template.Must(template.ParseFiles( "templates/base.html", "templates/flag.html")) } func main() { // Load templates loadTemplates() // Setup handlers fs := http.FileServer(http.Dir("./static/")) http.Handle("/static/", http.StripPrefix("/static/", fs)) http.HandleFunc("/", serveIndex) http.HandleFunc("/login", serveLogin) http.HandleFunc("/logout", serveLogout) http.HandleFunc("/register", serveRegister) http.HandleFunc("/2fa", serve2FA) http.HandleFunc("/setup2fa", serveSetup) http.HandleFunc("/flag", serveFlag) // Prepare to serve port := ":8081" log.Printf("Starting serving on %s", port) log.Fatal(http.ListenAndServe(port, nil)) }