text
stringlengths
11
4.05M
package config import ( "errors" "net/url" "strings" ) // URL stands for URL from configuration type URL string func (u URL) String() string { return string(u) } // IsEmpty returns true if URL contains empty string func (u URL) IsEmpty() bool { return len(u) == 0 } // ToGoURL converts URL struct into Golang URL func (u URL) ToGoURL() (*url.URL, error) { return url.Parse(u.String()) } // Slashed appends slash to URL if was not has it func (u URL) Slashed() URL { if strings.HasSuffix(u.String(), "/") { return u } return URL(u + "/") } // Validate performs value validation func (u URL) Validate() error { if u.IsEmpty() { return errors.New("empty URL") } g, err := u.ToGoURL() if err == nil { if g.Scheme != "http" && g.Scheme != "https" { return errors.New(" expected http(s) scheme, but got " + g.Scheme) } } return err }
package taco_box // const TacoBoxMyDaily = "@MY_DAILY" // const TacoBoxImportant = "@IMPORTANT" // const TacoBoxTask = "@TASK" // const TacoBoxSchedule = "@SCHEDULE" // const TacoBoxAll = "@ALL" // var CommonTacoBoxes = [...]string{TacoBoxMyDaily, TacoBoxImportant, TacoBoxTask, TacoBoxSchedule, TacoBoxAll} // var TypeTacoBoxes = [...]string{TacoBoxMyDaily, TacoBoxImportant, TacoBoxTask, TacoBoxSchedule} // func ContainCommonTacoBox(e string) bool { // for _, a := range CommonTacoBoxes[:] { // if a == e { // return true // } // } // return false // } // func ContainTypeTacoBox(e string) bool { // for _, a := range TypeTacoBoxes[:] { // if a == e { // return true // } // } // return false // }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package distsql import ( "context" "math" "math/rand" "sort" "strconv" "strings" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/sql/colexec" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colbuilder" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/cockroachdb/errors" ) type verifyColOperatorArgs struct { // anyOrder determines whether the results should be matched in order (when // anyOrder is false) or as sets (when anyOrder is true). anyOrder bool // colIdxsToCheckForEquality determines which columns of the rows to use // for equality check. If left unset, full rows are compared. Use this // with caution and leave a comment that justifies using this knob. colIdxsToCheckForEquality []int inputTypes [][]*types.T inputs []rowenc.EncDatumRows pspec *execinfrapb.ProcessorSpec // forceDiskSpill, if set, will force the operator to spill to disk. forceDiskSpill bool // forcedDiskSpillMightNotOccur determines whether we error out if // forceDiskSpill is true but the spilling doesn't occur. Please leave an // explanation for why that could be the case. forcedDiskSpillMightNotOccur bool // numForcedRepartitions specifies a number of "repartitions" that a // disk-backed operator should be forced to perform. "Repartition" can mean // different things depending on the operator (for example, for hash joiner // it is dividing original partition into multiple new partitions; for sorter // it is merging already created partitions into new one before proceeding // to the next partition from the input). numForcedRepartitions int // rng (if set) will be used to randomize batch size. rng *rand.Rand } // verifyColOperator passes inputs through both the processor defined by pspec // and the corresponding columnar operator and verifies that the results match. func verifyColOperator(t *testing.T, args verifyColOperatorArgs) error { const floatPrecision = 0.0000001 rng := args.rng if rng == nil { rng, _ = randutil.NewPseudoRand() } if rng.Float64() < 0.5 { randomBatchSize := 1 + rng.Intn(3) if err := coldata.SetBatchSizeForTests(randomBatchSize); err != nil { return err } } ctx := context.Background() st := cluster.MakeTestingClusterSettings() tempEngine, tempFS, err := storage.NewTempEngine(ctx, base.DefaultTestTempStorageConfig(st), base.DefaultTestStoreSpec) if err != nil { return err } defer tempEngine.Close() evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(ctx) diskMonitor := execinfra.NewTestDiskMonitor(ctx, st) defer diskMonitor.Stop(ctx) flowCtx := &execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{ Settings: st, TempStorage: tempEngine, }, DiskMonitor: diskMonitor, } flowCtx.Cfg.TestingKnobs.ForceDiskSpill = args.forceDiskSpill inputsProc := make([]execinfra.RowSource, len(args.inputs)) inputsColOp := make([]execinfra.RowSource, len(args.inputs)) for i, input := range args.inputs { inputsProc[i] = execinfra.NewRepeatableRowSource(args.inputTypes[i], input) inputsColOp[i] = execinfra.NewRepeatableRowSource(args.inputTypes[i], input) } proc, err := rowexec.NewProcessor( ctx, flowCtx, 0, &args.pspec.Core, &args.pspec.Post, inputsProc, []execinfra.RowReceiver{nil}, nil, ) if err != nil { return err } outProc, ok := proc.(execinfra.RowSource) if !ok { return errors.New("processor is unexpectedly not a RowSource") } acc := evalCtx.Mon.MakeBoundAccount() defer acc.Close(ctx) testAllocator := colmem.NewAllocator(ctx, &acc, coldataext.NewExtendedColumnFactory(&evalCtx)) columnarizers := make([]colexecop.Operator, len(args.inputs)) for i, input := range inputsColOp { c, err := colexec.NewBufferingColumnarizer(ctx, testAllocator, flowCtx, int32(i)+1, input) if err != nil { return err } columnarizers[i] = c } constructorArgs := &colexecargs.NewColOperatorArgs{ Spec: args.pspec, Inputs: columnarizers, StreamingMemAccount: &acc, DiskQueueCfg: colcontainer.DiskQueueCfg{ FS: tempFS, GetPather: colcontainer.GetPatherFunc(func(context.Context) string { return "" }), }, FDSemaphore: colexecop.NewTestingSemaphore(256), // TODO(yuzefovich): adjust expression generator to not produce // mixed-type timestamp-related expressions and then disallow the // fallback again. ProcessorConstructor: rowexec.NewProcessor, } var spilled bool if args.forceDiskSpill { constructorArgs.TestingKnobs.SpillingCallbackFn = func() { spilled = true } } constructorArgs.TestingKnobs.NumForcedRepartitions = args.numForcedRepartitions result, err := colbuilder.NewColOperator(ctx, flowCtx, constructorArgs) if err != nil { return err } defer func() { for _, memAccount := range result.OpAccounts { memAccount.Close(ctx) } for _, memMonitor := range result.OpMonitors { memMonitor.Stop(ctx) } }() outColOp, err := colexec.NewMaterializer( flowCtx, int32(len(args.inputs))+2, result.Op, args.pspec.ResultTypes, nil, /* output */ nil, /* getStats */ result.MetadataSources, result.ToClose, nil, /* cancelFlow */ ) if err != nil { return err } outProc.Start(ctx) outColOp.Start(ctx) defer outProc.ConsumerClosed() defer outColOp.ConsumerClosed() printRowForChecking := func(r rowenc.EncDatumRow) []string { res := make([]string, len(args.pspec.ResultTypes)) for i, col := range r { res[i] = col.String(args.pspec.ResultTypes[i]) } return res } var procRows, colOpRows [][]string var procMetas, colOpMetas []execinfrapb.ProducerMetadata for { rowProc, metaProc := outProc.Next() if rowProc != nil { procRows = append(procRows, printRowForChecking(rowProc)) } if metaProc != nil { if metaProc.Err == nil { return errors.Errorf("unexpectedly processor returned non-error "+ "meta\n%+v", metaProc) } procMetas = append(procMetas, *metaProc) } rowColOp, metaColOp := outColOp.Next() if rowColOp != nil { colOpRows = append(colOpRows, printRowForChecking(rowColOp)) } if metaColOp != nil { if metaColOp.Err == nil { return errors.Errorf("unexpectedly columnar operator returned "+ "non-error meta\n%+v", metaColOp) } colOpMetas = append(colOpMetas, *metaColOp) } if rowProc == nil && metaProc == nil && rowColOp == nil && metaColOp == nil { break } } if len(procMetas) != len(colOpMetas) { return errors.Errorf("different number of metas returned:\n"+ "processor returned\n%+v\n\ncolumnar operator returned\n%+v", procMetas, colOpMetas) } // It is possible that a query will hit an error (for example, integer out of // range). We then expect that both the processor and the operator returned // such error. if len(procMetas) > 1 { return errors.Errorf("unexpectedly multiple metas returned:\n"+ "processor returned\n%+v\n\ncolumnar operator returned\n%+v", procMetas, colOpMetas) } else if len(procMetas) == 1 { procErr := procMetas[0].Err.Error() colOpErr := colOpMetas[0].Err.Error() if procErr != colOpErr { return errors.Errorf("different errors returned:\n"+ "processor returned\n%+v\ncolumnar operator returned\n%+v", procMetas[0].Err, colOpMetas[0].Err) } // The errors are the same, so the rows that were returned do not matter. return nil } if len(procRows) != len(colOpRows) { return errors.Errorf("different number of rows returned:\n"+ "processor returned\n%+v\n\ncolumnar operator returned\n%+v\n"+ "processor metas\n%+v\ncolumnar operator metas\n%+v\n", procRows, colOpRows, procMetas, colOpMetas) } datumsMatch := func(expected, actual string, typ *types.T) (bool, error) { switch typ.Family() { case types.FloatFamily: // Some operations on floats (for example, aggregation) can produce // slightly different results in the row-by-row and vectorized engines. // That's why we handle them separately. // We first try direct string matching. If that succeeds, then great! if expected == actual { return true, nil } // If only one of the values is NULL, then the datums do not match. if expected == `NULL` || actual == `NULL` { return false, nil } // Now we will try parsing both strings as floats and check whether they // are within allowed precision from each other. expFloat, err := strconv.ParseFloat(expected, 64) if err != nil { return false, err } actualFloat, err := strconv.ParseFloat(actual, 64) if err != nil { return false, err } return math.Abs(expFloat-actualFloat) < floatPrecision, nil default: return expected == actual, nil } } colIdxsToCheckForEquality := args.colIdxsToCheckForEquality if len(colIdxsToCheckForEquality) == 0 { colIdxsToCheckForEquality = make([]int, len(args.pspec.ResultTypes)) for i := range colIdxsToCheckForEquality { colIdxsToCheckForEquality[i] = i } } if args.anyOrder { // The rows are allowed to be in any order, so in order to use the // ordered comparison below we will sort rows from both the processor // and the operator lexicographically. getLessFn := func(rows [][]string) func(int, int) bool { return func(i, j int) bool { for _, colIdx := range colIdxsToCheckForEquality { if cmp := strings.Compare(rows[i][colIdx], rows[j][colIdx]); cmp != 0 { return cmp < 0 } } return false } } sort.Slice(procRows, getLessFn(procRows)) sort.Slice(colOpRows, getLessFn(colOpRows)) } for i, expStrRow := range procRows { retStrRow := colOpRows[i] for _, colIdx := range colIdxsToCheckForEquality { match, err := datumsMatch(expStrRow[colIdx], retStrRow[colIdx], args.pspec.ResultTypes[colIdx]) if err != nil { return errors.Errorf("error while parsing datum in rows\n%v\n%v\n%s", expStrRow, retStrRow, err.Error()) } if !match { return errors.Errorf( "different results on row %d;\nexpected:\n%s\ngot:\n%s", i, expStrRow, retStrRow, ) } } } if args.forceDiskSpill { // Check that the spilling did occur. if !spilled && !args.forcedDiskSpillMightNotOccur { return errors.Errorf("expected spilling to disk but it did *not* occur") } } return nil }
package main import ( "fmt" "strconv" ) type Person struct { //first string //last string //age int //gender string first, last, gender string age int } // value receiver (just reading values) func (p Person) greet() string { return "Hello, my name is " + p.first + " " + p.last + " and I'm " + strconv.Itoa(p.age) } // pointer receiver (changing values) func (p *Person) hasBirthday() { p.age++ } func (p *Person) getMarried(spouseLastName string) { if p.gender == "M" { return } else { p.last = spouseLastName } } func main() { // long way (like a map) person := Person{first: "Kelly", last: "Deng", age: 21, gender: "F"} // short way person2 := Person{"Tammy", "Chan", "F", 21} fmt.Println(person) fmt.Println(person.first) person.first = "Avacadro" fmt.Println(person.first) fmt.Println(person2.last) fmt.Println(person.greet()) person.hasBirthday() person.getMarried("Crunk") fmt.Println(person.greet()) }
package main import ( "fmt" "math" ) // Square ... type Square struct { side float64 } func (z Square) area() float64 { return z.side * z.side } // Circle ... type Circle struct { radius float64 } func (z Circle) area() float64 { return math.Pi * z.radius * z.radius } // Shape ... type Shape interface { area() float64 } // Takes any struct that implements the Shape interface func info(z Shape) { fmt.Println("z...(struct)", z) fmt.Println("z area...(area)", z.area()) } // func info(z string) { // fmt.Println("z...(struct)", z) // fmt.Println("z area...(area)", z.area()) // } func main() { s := Square{10} c := Circle{5} info(s) info(c) } // The power of interfaces // is polymorphism(in this case: one thing can change/morph many other things) // The interface allows for a function to be able to take // Different types of structs as long as they have the // correct signatures to implement the Shape interface // When a type implements an interface // an entire world of functionality // can be opened up to a value of that interface's type // When passing an interface to a func // Only the fields/methods defined in the interface // declaration are exposed in that method // NOT the additional fields/methods that // are on the original type implementing the interface
package qcloud import "yunion.io/x/pkg/errors" type SElasticcacheTask struct { Status string `json:"Status"` StartTime string `json:"StartTime"` TaskType string `json:"TaskType"` InstanceID string `json:"InstanceId"` TaskMessage string `json:"TaskMessage"` RequestID string `json:"RequestId"` } // https://cloud.tencent.com/document/product/239/30601 func (self *SRegion) DescribeTaskInfo(taskId string) (*SElasticcacheTask, error) { params := map[string]string{} params["TaskId"] = taskId resp, err := self.redisRequest("DescribeTaskInfo", params) if err != nil { return nil, errors.Wrap(err, "DescribeTaskInfo") } ret := &SElasticcacheTask{} err = resp.Unmarshal(ret) if err != nil { return nil, errors.Wrap(err, "Unmarshal") } return ret, nil }
package main import ( "context" "log" "net/http" "os" "time" "urfu-abiturient-api/ent" "urfu-abiturient-api/ent/abituriententry" "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" _ "github.com/lib/pq" ) func main() { DBURL := os.Getenv("DB_URL") if DBURL == "" { DBURL = "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable" } client, err := ent.Open("postgres", DBURL) if err != nil { log.Fatalf("failed opening connection to sqlite: %v", err) } defer client.Close() if err := client.Schema.Create(context.Background()); err != nil { log.Fatalf("failed creating schema resources: %v", err) } e := echo.New() e.HideBanner = true e.Use(middleware.Logger(), middleware.Recover(), middleware.Gzip()) dictCache := NewDictionaryCache(GetDictFromDB(client)) e.Use(func(h echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { c.Set("dict_cache", dictCache) return h(c) } }) e.GET("/last_update_time", func(c echo.Context) error { return c.JSON(http.StatusOK, LastUpdateTimeResponse{LastUpdateTime: client.LastUpdated.GetX(context.TODO(), 1).LastUpdated.Format(time.RFC3339)}) }) e.POST("/query", func(c echo.Context) error { var request QueryRequest err := c.Bind(&request) if err != nil { return err } if request.Program == "" && len(request.Name) < 3 && request.Number == 0 { return echo.NewHTTPError(400, "program or name more that 3 symbols or number is required") } q := client.AbiturientEntry.Query(). Order(ent.Desc(abituriententry.FieldSum)) if len(request.Basis) > 0 { q = q.Where(abituriententry.BasisIn(request.Basis...)) } if len(request.Form) > 0 { forms := make([]abituriententry.Form, 0, len(request.Form)) for _, f := range request.Form { forms = append(forms, abituriententry.Form(f)) } q = q.Where(abituriententry.FormIn(forms...)) } if request.Sum.LTE != nil { q = q.Where(abituriententry.SumLTE(*request.Sum.LTE)) } if request.Sum.GTE != nil { q = q.Where(abituriententry.SumGTE(*request.Sum.GTE)) } if request.Program != "" { q = q.Where(abituriententry.Program(request.Program)) } if request.StatementGiven { q = q.Where(abituriententry.StatementGiven(request.StatementGiven)) } if request.OriginalGiven { q = q.Where(abituriententry.OriginalGiven(request.OriginalGiven)) } if len(request.Type) > 0 { q = q.Where(abituriententry.TypeIn(request.Type...)) } if len(request.Status) > 0 { q = q.Where(abituriententry.StatusIn(request.Status...)) } if len(request.Name) > 0 { q = q.Where(abituriententry.NameContainsFold(request.Name)) } if request.Number != 0 { q = q.Where(abituriententry.Number(request.Number)) } entries := q.AllX(context.Background()) return c.JSON(http.StatusOK, entries) }) e.GET("/dictionary", func(c echo.Context) error { dict, needUpdate := c.Get("dict_cache").(*DictionaryCache).Get() if needUpdate { dict = GetDictFromDB(client) c.Get("dict_cache").(*DictionaryCache).Update(dict) } return c.JSON(http.StatusOK, dict) }) e.Logger.Fatal(e.Start(":8000")) }
package main import ( "fmt" ) func printStatus(s status) { fmt.Println("The status is:", s) } type status string const ( running status = "running" waiting status = "blocked" ) func main() { var s status s = running // s = "undefined" fmt.Println(s) // Default value fmt.Printf("%v, %T\n", s, s) }
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import "github.com/gardener/test-infra/pkg/apis/testmachinery/v1beta1" func NewSet(elements ...*Element) Set { s := make(Set) for _, e := range elements { s[e.Info.Name] = e } return s } func (s Set) List() []*Element { list := make([]*Element, len(s)) i := 0 for _, e := range s { list[i] = e i++ } return list } func (s Set) RawList() []*v1beta1.ConfigElement { list := make([]*v1beta1.ConfigElement, len(s)) i := 0 for _, e := range s { list[i] = e.Info i++ } return list } func (s Set) Copy() Set { newSet := make(Set) for key, value := range s { newSet[key] = value } return newSet } // Set adds an element to the set. // If the element already exist this element will be overwritten func (s Set) Set(e *Element) { s[e.Info.Name] = e } // Add adds elements to the set if the element does not exist // or the element level is higher than the existing one func (s Set) Add(elements ...*Element) { for _, e := range elements { if !s.Has(e) { s.Set(e) continue } if s[e.Info.Name].Level < e.Level { s.Set(e) } } } // Has checks if the elemnt is already in the set func (s Set) Has(e *Element) bool { _, ok := s[e.Info.Name] return ok }
package leetcode //func removeElement(nums []int, val int) int { // if len(nums) == 0 { // return 0 // } // // ans := 0 // for _, v := range nums { // if v != val { // nums[ans] = v // ans++ // } // } // return ans //} func removeElement(nums []int, val int) int { left, right := 0, len(nums) if right == 0 { return 0 } for left < right { if nums[left] == val { nums[left] = nums[right-1] right-- } else { left++ } } return left }
package utils import ( "fmt" "reflect" "strings" ) // FillStruct set the field value of ptr according data kv map. func FillStruct(ptr interface{}, data map[string]interface{}) { err := Bind(ptr, "", data) if err != nil { panic(err) } } var defaultStructFieldTag = "field" // FillStructByTag set the field value of struct s according func FillStructByTag(ptr interface{}, tag string, input map[string]interface{}) (filled []string, err error) { val := reflect.ValueOf(ptr) ind := reflect.Indirect(val) typ := ind.Type() fullName := typ.PkgPath() + "." + typ.Name() if val.Kind() != reflect.Ptr { panic(fmt.Errorf("FillStructByTag: cannot use non-ptr struct `%s`", fullName)) } if typ.Kind() != reflect.Struct { panic(fmt.Errorf("FillStructByTag: only allow ptr of struct")) } filled = make([]string, 0, len(input)) numField := ind.NumField() for i := 0; i < numField; i++ { structField := typ.Field(i) field := ind.Field(i) if !field.CanSet() { continue } fieldTagStr := structField.Tag.Get(defaultStructFieldTag) if fieldTagStr == "" { continue } match := false fieldTags := strings.Split(fieldTagStr, ",") for _, v := range fieldTags { if tag == v { match = true break } } if !match { continue } value, ok := input[structField.Name] if !ok { continue } if err := bindValue(field, value); err != nil { return nil, err } filled = append(filled, structField.Name) } return filled, nil } // BindSliceSep the separator for parsing slice field var BindSliceSep = "," // BindUnmarshaler the bind unmarshal interface type BindUnmarshaler interface { UnmarshalBind(value string) error } // Bind bind values to struct ptr func Bind(ptr interface{}, tag string, input map[string]interface{}) error { val := reflect.ValueOf(ptr) ind := reflect.Indirect(val) typ := ind.Type() fullName := typ.PkgPath() + "." + typ.Name() if val.Kind() != reflect.Ptr { panic(fmt.Errorf("bind: cannot use non-ptr struct `%s`", fullName)) } if typ.Kind() != reflect.Struct { panic(fmt.Errorf("bind: only allow ptr of struct")) } for i := 0; i < ind.NumField(); i++ { structField := ind.Type().Field(i) field := ind.Field(i) if !field.CanSet() { continue } name := "" if tag != "" { name = structField.Tag.Get(tag) if name == "" { continue } } else { name = structField.Name } value, ok := input[name] if !ok { continue } if err := bindValue(field, value); err != nil { return err } } return nil } func bindSlice(field reflect.Value, value interface{}) error { strValue, ok := value.(string) if !ok { field.Set(reflect.ValueOf(value)) return nil } vals := strings.Split(strValue, BindSliceSep) if len(vals) == 0 { return nil } ind := reflect.Indirect(field) typ := ind.Type().Elem() isPtr := typ.Kind() == reflect.Ptr if isPtr { typ = typ.Elem() } slice := reflect.New(ind.Type()).Elem() for _, val := range vals { elem := reflect.New(typ) elemInd := reflect.Indirect(elem) if err := bindValue(elemInd, val); err != nil { return err } if isPtr { slice = reflect.Append(slice, elemInd.Addr()) } else { slice = reflect.Append(slice, elemInd) } } ind.Set(slice) return nil } func bindValuePtr(field reflect.Value, value interface{}) error { vv := reflect.ValueOf(value) if vv.Kind() == reflect.Ptr { field.Set(vv) return nil } typ := field.Type().Elem() newValue := reflect.New(typ) err := bindValue(newValue.Elem(), value) if err != nil { return err } field.Set(newValue) return nil } // nolint:gocyclo func bindValue(field reflect.Value, value interface{}) error { ok, err := unmarshalBind(field, value) if err != nil { return err } if ok { return nil } switch field.Kind() { case reflect.Ptr: return bindValuePtr(field, value) case reflect.Bool: field.SetBool(GetBool(value)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: field.SetInt(GetInt64(value)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: field.SetUint(GetUint64(value)) case reflect.Float32, reflect.Float64: field.SetFloat(GetFloat64(value)) case reflect.String: field.SetString(GetString(value)) case reflect.Slice: if err := bindSlice(field, value); err != nil { return err } default: field.Set(reflect.ValueOf(value)) } return nil } func unmarshalBind(field reflect.Value, value interface{}) (ok bool, err error) { strValue, ok := value.(string) if !ok { return false, nil } ptr := reflect.New(field.Type()) if !ptr.CanInterface() { return false, nil } unmarshaler, ok := ptr.Interface().(BindUnmarshaler) if !ok { return false, nil } if err = unmarshaler.UnmarshalBind(strValue); err != nil { return false, err } field.Set(reflect.Indirect(ptr)) return true, nil }
package bd import ( "context" "log" "time" "github.com/Estiven9644/twittor-backend/models" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo/options" ) func LeoTweets(ID string, pagina int64) ([]*models.DevuelvoTweets, bool) { ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) defer cancel() // me cancela el contexto de arriba pero de ultimo por el defer db := MongoCN.Database("twittor") //el mongo CN ya lo reconoce por que está en el mismo package col := db.Collection("tweet") // acá se conecta a la colección de usuario dentro de la database twittor var results []*models.DevuelvoTweets condicion := bson.M{ "userid": ID, } opciones := options.Find() //para hacer un find en mongo opciones.SetLimit(20) // el limit para el limite que me quiero traer opciones.SetSort(bson.D{{Key: "fecha", Value: -1}}) // me organiza según la fecha en orden descendente opciones.SetSkip((pagina - 1) * 20) //se le resta de a 1 por la pagina para ir trayendo de a 20 cursor, err := col.Find(ctx, condicion, opciones) if err != nil { log.Fatal(err.Error()) return results, false } for cursor.Next(context.TODO()) { // el next hace el for con el primero y el siguiente dentre de él así lo va recorriendo todo y se crea el context.TODO() por crear un contexto vacío var registro models.DevuelvoTweets err := cursor.Decode(&registro) if err != nil { return results, false } results = append(results, &registro) } return results, true }
package distributed import ( "time" "github.com/dbogatov/dac-lib/dac" "github.com/dbogatov/fabric-amcl/amcl" "github.com/dbogatov/fabric-amcl/amcl/FP256BN" "github.com/dbogatov/fabric-simulator/helpers" ) // RPCRevocation ... type RPCRevocation struct { keys KeysHolder } var epoch int = 1 // MakeRPCRevocation ... func MakeRPCRevocation(prg *amcl.RAND) (rpcRevocation *RPCRevocation) { groth := dac.MakeGroth(helpers.NewRand(), true, sysParams.Ys[1]) sk, pk := groth.Generate() rpcRevocation = &RPCRevocation{ keys: KeysHolder{ pk: pk, sk: sk, }, } go func() { for { select { case <-time.After(time.Duration(sysParams.Epoch) * time.Second): epoch++ logger.Debugf("Epoch advanced to %d", epoch) continue } } }() return } // GetEpoch ... func (rpcRevocation *RPCRevocation) GetEpoch(args *int, reply *int) (e error) { *reply = epoch logger.Debug("Epoch read") return } // GetPK ... func (rpcRevocation *RPCRevocation) GetPK(args *int, reply *[]byte) (e error) { *reply = dac.PointToBytes(rpcRevocation.keys.pk) logger.Debug("PK requested") return } // ProcessNRR ... func (rpcRevocation *RPCRevocation) ProcessNRR(args *NonRevocationRequest, reply *NonRevocationHandle) (e error) { prg := helpers.NewRand() nrr, _ := dac.PointFromBytes(args.PK) nrh := dac.SignNonRevoke(prg, rpcRevocation.keys.sk, nrr, FP256BN.NewBIGint(epoch), sysParams.Ys[1]) *&reply.Handle = nrh.ToBytes() logger.Debug("Non-revocation handle granted") return }
package handlers import ( "encoding/json" "net/http" "net/http/httptest" "testing" "github.com/tmaesaka/cellar/config" ) func TestIndexConfigHandler(t *testing.T) { cfg := config.NewApiConfig() handler := IndexConfigHandler(cfg) req, _ := http.NewRequest("GET", "/config", nil) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) if recorder.Code != http.StatusOK { t.Errorf("Exepected status code 200; got %d", recorder.Code) } contentType := recorder.HeaderMap["Content-Type"][0] if contentType != "application/json" { t.Errorf("Expected Content-Type to be application/json; got %v", contentType) } var resp config.ApiConfig if err := json.NewDecoder(recorder.Body).Decode(&resp); err != nil { t.Error(err) } }
package utils import ( "github.com/wcharczuk/go-chart" "github.com/wcharczuk/go-chart/drawing" "os" "time" ) func DrawChart(timestamps [][]time.Time, datas [][]float64, chartName string, seriesNames []string) { var timeseries []chart.Series for i := 0; i < len(datas); i++ { timeseries = append(timeseries, chart.TimeSeries{ Name: chartName + " " + seriesNames[i], XValues: timestamps[i], YValues: datas[i], }) } graph := chart.Chart{ Background: chart.Style{ Padding: chart.Box{Top: 50, Left: 25, Right: 25, Bottom: 10}, FillColor: drawing.ColorFromHex("efefef"), }, XAxis: chart.XAxis{Name: "Time", NameStyle: chart.StyleShow(), Style: chart.StyleShow(), ValueFormatter: chart.TimeValueFormatterWithFormat(("02/01 3:04PM"))}, YAxis: chart.YAxis{Name: chartName, AxisType: chart.YAxisSecondary, NameStyle: chart.StyleShow(), Style: chart.StyleShow()}, //YAxisSecondary: chart.YAxis{Name: chartName", NameStyle: chart.StyleShow(), Style: chart.StyleShow()}, Series: timeseries, } graph.Elements = []chart.Renderable{chart.Legend(&graph)} file, _ := os.Create("chart-" + chartName + ".png") graph.Render(chart.PNG, file) }
package main import ( "bufio" "encoding/json" "fmt" "io/ioutil" "os" "strings" ) type Comm struct { Key string `json:"key"` //参数个数 Num int `json:"num"` //参数个数 Param []string `json:"param"` //各个参数类型 } var Commend map[string]Comm // 练习从终端输入 func main() { initComm() input := bufio.NewScanner(os.Stdin) //初始化一个扫表对象 fmt.Print(">> ") for input.Scan() { //扫描输入内容 fmt.Print(">> ") line := input.Text() //把输入内容转换为字符串 com := strings.Fields(line) if com[0] == "Exit" { os.Exit(1) } b, m := checkComm(com) if !b { fmt.Print(m) } else { for _, v := range com { fmt.Print(v) //输出到标准输出 } } fmt.Println("") fmt.Print(">> ") } } func initComm() { //读取配置文件 //GOPATH根目录下开始 ,所以用 com.json时找不到文件得 //data, err := ioutil.ReadFile("./src/ospro/com.json") data, err := ioutil.ReadFile("E:/workspace/go/pro1/src/ospro/com.json") if err != nil { fmt.Println(err) os.Exit(1) } var config []Comm err2 := json.Unmarshal(data, &config) if err2 != nil { fmt.Println("error:", err2) } //初始化命令map Commend = make(map[string]Comm) for _, v := range config { Commend[v.Key] = v } } func checkComm(c []string) (bool, string) { if _, ok := Commend[c[0]]; !ok { return false, "不存在" + c[0] + "命令" } if len(c)-1 != Commend[c[0]].Num { return false, "参数丢失" } return true, "ok" }
// Package domain describes the (simplified) models used in the system and their relations to each other package domain // User represents a user in the system. // It can have many Subscriptions. type User struct { Username string Email string Subscriptions []Subscription } // Subscription represents a subscription in the system. // It belongs to a User and a Plan, and can have many Apps type Subscription struct { User User Plan Plan Public bool Apps []App } // Plan represents an offering to which a User can subscribe. // It has many Limits, which are the default limits for an App belonging to a Subscription with that Plan. type Plan struct { Name string Price int Limits []Limit } // Limit represents an abstract resource limit in the system. type Limit struct { Key string Value int } // App represents a registered app in the system. // It belongs to the owner User and to a Subscription which defines the default Limits which can be overridden by many LimitOverrides. type App struct { Name string Owner User Public bool Subscription Subscription LimitOverride []Limit Builds []Build } // Build represents a build job for an app and currently serves as a placeholder to illustrate the data model type Build struct{}
package list import ( "context" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" ) type ListAdapterInterface interface { ListNs(ctx context.Context) (*corev1.NamespaceList, error) ListPod(ctx context.Context, nameSpace string) (*corev1.PodList, error) ListDeployment(ctx context.Context, nameSpace string) (*appsv1.DeploymentList, error) ListService(ctx context.Context, nameSpace string) (*corev1.ServiceList, error) } type ListAdapter struct { ClientSet *kubernetes.Clientset } func (a ListAdapter) ListNs(ctx context.Context) (*corev1.NamespaceList, error) { return a.ClientSet.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) } func (a ListAdapter) ListPod(ctx context.Context, nameSpace string) (*corev1.PodList, error) { return a.ClientSet.CoreV1().Pods(nameSpace).List(ctx, metav1.ListOptions{}) } func (a ListAdapter) ListDeployment(ctx context.Context, nameSpace string) (*appsv1.DeploymentList, error) { return a.ClientSet.AppsV1().Deployments(nameSpace).List(ctx, metav1.ListOptions{}) } func (a ListAdapter) ListService(ctx context.Context, nameSpace string) (*corev1.ServiceList, error) { return a.ClientSet.CoreV1().Services(nameSpace).List(ctx, metav1.ListOptions{}) }
package main import "fmt" func largest(args ...int) int{ smallest := args[0] for _, value := range args{ if value > smallest{ smallest = value } } return smallest } func main(){ fmt.Println(largest(34, 534, 3, 133, 90)) }
package info import ( "encoding/json" "net/http" "sort" "strings" "github.com/golang/glog" "github.com/julienschmidt/httprouter" "github.com/prebid/prebid-server/config" ) var invalidEnabledOnly = []byte(`Invalid value for 'enabledonly' query param, must be of boolean type`) // NewBiddersEndpoint builds a handler for the /info/bidders endpoint. func NewBiddersEndpoint(bidders config.BidderInfos, aliases map[string]string) httprouter.Handle { responseAll, err := prepareBiddersResponseAll(bidders, aliases) if err != nil { glog.Fatalf("error creating /info/bidders endpoint all bidders response: %v", err) } responseEnabledOnly, err := prepareBiddersResponseEnabledOnly(bidders, aliases) if err != nil { glog.Fatalf("error creating /info/bidders endpoint enabled only response: %v", err) } return func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { var writeErr error switch readEnabledOnly(r) { case "true": w.Header().Set("Content-Type", "application/json") _, writeErr = w.Write(responseEnabledOnly) case "false": w.Header().Set("Content-Type", "application/json") _, writeErr = w.Write(responseAll) default: w.WriteHeader(http.StatusBadRequest) _, writeErr = w.Write(invalidEnabledOnly) } if writeErr != nil { glog.Errorf("error writing response to /info/bidders: %v", writeErr) } } } func readEnabledOnly(r *http.Request) string { q := r.URL.Query() v, exists := q["enabledonly"] if !exists || len(v) == 0 { // if the enabledOnly query parameter is not specified, default to false to match // previous behavior of returning all adapters regardless of their enabled status. return "false" } return strings.ToLower(v[0]) } func prepareBiddersResponseAll(bidders config.BidderInfos, aliases map[string]string) ([]byte, error) { bidderNames := make([]string, 0, len(bidders)+len(aliases)) for name := range bidders { bidderNames = append(bidderNames, name) } for name := range aliases { bidderNames = append(bidderNames, name) } sort.Strings(bidderNames) return json.Marshal(bidderNames) } func prepareBiddersResponseEnabledOnly(bidders config.BidderInfos, aliases map[string]string) ([]byte, error) { bidderNames := make([]string, 0, len(bidders)+len(aliases)) for name, info := range bidders { if info.IsEnabled() { bidderNames = append(bidderNames, name) } } for name, bidder := range aliases { if info, ok := bidders[bidder]; ok && info.IsEnabled() { bidderNames = append(bidderNames, name) } } sort.Strings(bidderNames) return json.Marshal(bidderNames) }
// Copyright 2019 The Kubernetes Authors. // SPDX-License-Identifier: Apache-2.0 package commands import ( "fmt" "github.com/go-openapi/spec" "github.com/spf13/cobra" "sigs.k8s.io/kustomize/cmd/config/ext" "sigs.k8s.io/kustomize/kyaml/errors" "sigs.k8s.io/kustomize/kyaml/fieldmeta" "sigs.k8s.io/kustomize/kyaml/openapi" "sigs.k8s.io/kustomize/kyaml/setters2/settersutil" ) // NewCreateSubstitutionRunner returns a command runner. func NewCreateSubstitutionRunner(parent string) *CreateSubstitutionRunner { r := &CreateSubstitutionRunner{} cs := &cobra.Command{ Use: "create-subst DIR NAME", Args: cobra.ExactArgs(2), PreRunE: r.preRunE, RunE: r.runE, } cs.Flags().StringVar(&r.CreateSubstitution.FieldName, "field", "", "name of the field to set -- e.g. --field image") cs.Flags().StringVar(&r.CreateSubstitution.FieldValue, "field-value", "", "value of the field to create substitution for -- e.g. --field-value nginx:0.1.0") cs.Flags().StringVar(&r.CreateSubstitution.Pattern, "pattern", "", `substitution pattern -- e.g. --pattern \${my-image-setter}:\${my-tag-setter}`) _ = cs.MarkFlagRequired("pattern") _ = cs.MarkFlagRequired("field-value") fixDocs(parent, cs) r.Command = cs return r } func CreateSubstitutionCommand(parent string) *cobra.Command { return NewCreateSubstitutionRunner(parent).Command } type CreateSubstitutionRunner struct { Command *cobra.Command CreateSubstitution settersutil.SubstitutionCreator OpenAPIFile string Values []string } func (r *CreateSubstitutionRunner) runE(c *cobra.Command, args []string) error { return handleError(c, r.CreateSubstitution.Create(r.OpenAPIFile, args[0])) } func (r *CreateSubstitutionRunner) preRunE(c *cobra.Command, args []string) error { var err error r.CreateSubstitution.Name = args[1] if err != nil { return err } r.OpenAPIFile, err = ext.GetOpenAPIFile(args) if err != nil { return err } if err := openapi.AddSchemaFromFile(r.OpenAPIFile); err != nil { return err } // check if substitution with same name exists and throw error ref, err := spec.NewRef(fieldmeta.DefinitionsPrefix + fieldmeta.SubstitutionDefinitionPrefix + r.CreateSubstitution.Name) if err != nil { return err } subst, _ := openapi.Resolve(&ref) // if substitution already exists with the input substitution name, throw error if subst != nil { return errors.Errorf("substitution with name %s already exists", r.CreateSubstitution.Name) } // check if setter with same name exists and throw error ref, err = spec.NewRef(fieldmeta.DefinitionsPrefix + fieldmeta.SetterDefinitionPrefix + r.CreateSubstitution.Name) if err != nil { return err } setter, _ := openapi.Resolve(&ref) // if setter already exists with input substitution name, throw error if setter != nil { return errors.Errorf(fmt.Sprintf("setter with name %s already exists, "+ "substitution and setter can't have same name", r.CreateSubstitution.Name)) } return nil }
package compose import ( "os" "path/filepath" "strings" "testing" "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" "github.com/loft-sh/devspace/pkg/util/log" "gopkg.in/yaml.v3" "gotest.tools/assert" "gotest.tools/assert/cmp" ) func TestLoad(t *testing.T) { dirs, err := os.ReadDir("testdata") if err != nil { t.Error(err) } if len(dirs) == 0 { t.Error("No test cases found. Add some!") } focused := []string{} for _, dir := range dirs { if strings.HasPrefix(dir.Name(), "f_") { focused = append(focused, dir.Name()) } } if len(focused) > 0 { for _, focus := range focused { testLoad(focus, t) } } else { for _, dir := range dirs { if !strings.HasPrefix(dir.Name(), "x_") { testLoad(dir.Name(), t) } } } } func testLoad(dir string, t *testing.T) { wd, err := os.Getwd() if err != nil { t.Error(err) } err = os.Chdir(filepath.Join(wd, "testdata", dir)) if err != nil { t.Error(err) } defer func() { err := os.Chdir(wd) if err != nil { t.Error(err) } }() dockerComposePath := GetDockerComposePath() dockerCompose, err := LoadDockerComposeProject(dockerComposePath) if err != nil { t.Errorf("Unexpected error occurred loading the docker-compose.yaml: %s", err.Error()) } loader := NewComposeManager(dockerCompose) actualError := loader.Load(log.Discard) if actualError != nil { expectedError, err := os.ReadFile("error.txt") if err != nil { t.Errorf("Unexpected error occurred loading the docker-compose.yaml: %s", err.Error()) } assert.Equal(t, string(expectedError), actualError.Error(), "Expected error:\n%s\nbut got:\n%s\n in testCase %s", string(expectedError), actualError.Error(), dir) } for path, actualConfig := range loader.Configs() { data, err := os.ReadFile(path) if err != nil { t.Errorf("Please create the expected DevSpace configuration by creating a %s in the testdata/%s folder", path, dir) } expectedConfig := &latest.Config{} err = yaml.Unmarshal(data, expectedConfig) if err != nil { t.Errorf("Error unmarshaling the expected configuration: %s", err.Error()) } assert.Check( t, cmp.DeepEqual(expectedConfig, actualConfig), "configs did not match in test case %s", dir, ) } }
/* Copyright 2021 The KodeRover Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package gerrit import ( "fmt" "os" "path" "strings" "github.com/koderover/zadig/lib/microservice/aslan/config" "github.com/koderover/zadig/lib/microservice/aslan/core/common/dao/models" "github.com/koderover/zadig/lib/microservice/aslan/core/common/service/codehost" "github.com/koderover/zadig/lib/tool/gerrit" "github.com/koderover/zadig/lib/tool/xlog" "github.com/koderover/zadig/lib/util" ) func DeleteGerritWebhook(workflow *models.Workflow, log *xlog.Logger) error { if workflow != nil && workflow.HookCtl != nil { for _, workflowWebhook := range workflow.HookCtl.Items { if workflowWebhook == nil { continue } detail, err := codehost.GetCodeHostInfoByID(workflowWebhook.MainRepo.CodehostID) if err != nil { log.Errorf("DeleteGerritWebhook GetCodehostDetail err:%v", err) continue } if detail.Type == gerrit.CodehostTypeGerrit { webhookURLPrefix := fmt.Sprintf("%s/%s/%s/%s", detail.Address, "a/config/server/webhooks~projects", gerrit.Escape(workflowWebhook.MainRepo.RepoName), "remotes") _, _ = gerrit.Do(fmt.Sprintf("%s/%s", webhookURLPrefix, gerrit.RemoteName), "DELETE", detail.AccessToken, util.GetRequestBody(&gerrit.GerritWebhook{})) _, err = gerrit.Do(fmt.Sprintf("%s/%s", webhookURLPrefix, workflow.Name), "DELETE", detail.AccessToken, util.GetRequestBody(&gerrit.GerritWebhook{})) if err != nil { log.Errorf("DeleteGerritWebhook err:%v", err) } } } } return nil } func GetGerritWorkspaceBasePath(repoName string) (string, error) { if strings.Contains(repoName, "/") { repoName = strings.Replace(repoName, "/", "-", -1) } base := path.Join(config.S3StoragePath(), repoName) if _, err := os.Stat(base); os.IsNotExist(err) { return base, err } return base, nil }
package auth import ( "dena-hackathon21/entity" "fmt" jwt "github.com/dgrijalva/jwt-go" "os" "strconv" "time" ) type JWTHandler struct { SigninKey string } func NewJWTHandler() (*JWTHandler, error) { return &JWTHandler{}, nil } func (j JWTHandler) GenerateJWTToken(userID uint64) (string, error) { claims := entity.Claims{ Sub: strconv.Itoa(int(userID)), Iat: time.Now().Unix(), Exp: time.Now().Add(time.Hour * 24).Unix(), } jwtEntity := entity.JWT{ SigninMethod: jwt.SigningMethodHS256, Claims: claims, } // 電子署名 tokenString, _ := jwtEntity.ToTokenString(os.Getenv("SIGNINGKEY")) return tokenString, nil } func (j JWTHandler) GetClaimsFromToken(tokenStr string) (*entity.Claims, error) { claims, err := entity.ParseJwtToken(tokenStr, jwt.SigningMethodHS256, os.Getenv("SIGNINGKEY")) return claims, err } func (j JWTHandler) Valid(tokenStr string) (bool, error) { claims, err := j.GetClaimsFromToken(tokenStr) if err != nil { return false, err } if claims.Exp < time.Now().Unix() { return false, fmt.Errorf("timeout") } return true, nil } func (j JWTHandler) GetUserIDFromToken(tokenStr string) (uint64, error) { claims, err := j.GetClaimsFromToken(tokenStr) if err != nil { return 0, err } i, err := strconv.Atoi(claims.Sub) if err != nil { return 0, err } return uint64(i), nil }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package security import ( "context" "strings" chk "chromiumos/tast/local/bundles/cros/security/filecheck" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: RunFiles, Desc: "Checks ownership and permissions of files in /run", Contacts: []string{ "jorgelo@chromium.org", // Security team "yusukes@chromium.org", // Initial author "chromeos-security@google.com", }, Attr: []string{"group:mainline"}, }) } func RunFiles(ctx context.Context, s *testing.State) { const ( root = "/run" ) patterns := []*chk.Pattern{ // ARC/ARCVM files (crbug.com/1163122) chk.NewPattern(chk.PathRegexp("arc(vm)?/host_generated/.*\\.prop"), chk.UID(0), chk.GID(0), chk.Mode(0644)), // ARCVM-specific files (ignored on ARC builds) chk.NewPattern(chk.Path("arcvm/host_generated/fstab"), chk.UID(0), chk.GID(0), chk.Mode(0644)), chk.NewPattern(chk.Path("arcvm/host_generated/oem/etc/media_profiles.xml"), chk.Users("arc-camera"), chk.Groups("arc-camera"), chk.Mode(0644)), chk.NewPattern(chk.Path("arcvm/host_generated/oem/etc/permissions/platform.xml"), chk.Users("crosvm"), chk.Groups("crosvm"), chk.Mode(0644)), } problems, _, err := chk.Check(ctx, root, patterns) if err != nil { s.Errorf("Failed to check %v: %v", root, err) } for path, msgs := range problems { s.Errorf("%v: %v", path, strings.Join(msgs, ", ")) } }
// SPDX-License-Identifier: MIT package ast import ( "strconv" "testing" "github.com/issue9/assert/v3" "github.com/issue9/version" ) func TestVersion(t *testing.T) { a := assert.New(t, false) a.True(version.SemVerValid(Version)) v := &version.SemVersion{} a.NotError(version.Parse(v, Version)) major, err := strconv.Atoi(MajorVersion[1:]) a.NotError(err) a.Equal(major, v.Major) } func TestParseType(t *testing.T) { a := assert.New(t, false) p, s := ParseType(TypeString) a.Equal(p, TypeString).Empty(s) p, s = ParseType(TypeURL) a.Equal(p, TypeString).Equal(s, "url") p, s = ParseType(TypeInt) a.Equal(p, TypeNumber).Equal(s, "int") } func TestTrimLeftSpace(t *testing.T) { a := assert.New(t, false) data := []*struct { input, output string }{ {}, { input: `abc`, output: `abc`, }, { input: ` abc`, output: `abc`, }, { input: " abc\n", output: "abc\n", }, { // 缩进一个空格 input: " abc\n abc\n", output: " abc\nabc\n", }, { // 缩进一个空格 input: "\n abc\n abc\n", output: "\n abc\nabc\n", }, { // 缩进格式不相同,不会有缩进 input: "\t abc\n abc\n", output: "\t abc\n abc\n", }, { input: "\t abc\n\t abc\n\t xx\n", output: " abc\nabc\nxx\n", }, { input: "\t abc\n\t abc\nxx\n", output: "\t abc\n\t abc\nxx\n", }, { // 包含相同的 \t 内容 input: "\t abc\n\t abc\n\t xx\n", output: "abc\nabc\nxx\n", }, { // 部分空格相同 input: "\t\t abc\n\t abc\n\t xx\n", output: "\t abc\n abc\n xx\n", }, } for i, item := range data { output := trimLeftSpace(item.input) a.Equal(output, item.output, "not equal @ %d\nv1=%#v\nv2=%#v\n", i, output, item.output) } }
package pkg type MySQLStatus struct { File string Position uint32 Binlog_Do_DB string Binlog_lgnore_DB string Executed_Gtid_Set string } type MySQLSchema struct { Field string Type string Collation *string Null string Key *string Default *string Extra *string Privileges string Comment *string } // HistorySchemas mem <=> db 需要内存和本地存储互相映射 // 不用太过于强调binlog schema, history deltas, new deltas type HistorySchemas struct { Db string `json:"db"` Table string `json:"table"` Deltas Deltas `json:"deltas"` } type Deltas struct { Old DeltasItem `json:"old"` Def DeltasItem `json:"def"` } type DeltasItem struct { Database string `json:"database"` Table string `json:"table"` Columns []Columns `json:"columns"` } type Columns struct { Type string `json:"type"` Name string `json:"name"` NotNull bool `json:"not_null"` // 非空 }
package counter import ( "math" "sync/atomic" ) type CASFloatCounter struct { number uint64 } func NewCASFloatCounter() *CASFloatCounter { return &CASFloatCounter{0} } func (c *CASFloatCounter) Add(num float64) { for { v := atomic.LoadUint64(&c.number) newValue := math.Float64bits(math.Float64frombits(v) + num) if atomic.CompareAndSwapUint64(&c.number, v, newValue) { return } } } func (c *CASFloatCounter) Read() float64 { return math.Float64frombits(atomic.LoadUint64(&c.number)) }
/* Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tips import ( "io" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/config" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output" ) // PrintForRun prints tips to the user who has run `skaffold run`. func PrintForRun(out io.Writer, opts config.SkaffoldOptions) { if !opts.Tail { printTip(out, "You can also run [skaffold run --tail] to get the logs") } } // PrintForInit prints tips to the user who has run `skaffold init`. func PrintForInit(out io.Writer, opts config.SkaffoldOptions) { printTip(out, "You can now run [skaffold build] to build the artifacts") printTip(out, "or [skaffold run] to build and deploy") printTip(out, "or [skaffold dev] to enter development mode, with auto-redeploy") } // PrintForTest prints tips on when to use skaffold test. func PrintForTest(out io.Writer) { printTip(out, "You need to:") printTip(out, "run [skaffold test] with [--build-artifacts <file-output>] for running tests on artifacts from a given file.") } // PrintUseRunVsDeploy prints tips on when to use skaffold run vs deploy. func PrintUseRunVsDeploy(out io.Writer) { printTip(out, "You either need to:") printTip(out, "run [skaffold deploy] with [--images TAG] for each pre-built artifact") printTip(out, "or [skaffold run] instead, to let Skaffold build, tag and deploy artifacts.") } // PrintUseBuildAndExec prints tip to use artifacts from previous build in skaffold exec. func PrintUseBuildAndExec(out io.Writer) { printTip(out, "Check all the images have a tag assigned:") printTip(out, "run [skaffold exec] with [--build-artifacts <file-output>] for running an action using images from a previous build") } func printTip(out io.Writer, message string) { output.Green.Fprintln(out, message) }
package groto import ( "errors" "log" "net" ) type Client struct { user string password string id []byte pwHashKey []byte } func NewClient(user, password string) *Client { return &Client{ user: user, password: password, } } func (c *Client) Do(conn net.Conn) error { if err := c.stepHandshake(conn); err != nil { return err } if err := c.stepAuthN(conn); err != nil { return err } return nil } func (c *Client) stepHandshake(conn net.Conn) error { _, err := conn.Write([]byte{byte(Init)}) if err != nil { return err } b := make([]byte, initLen) _, err = conn.Read(b) if err != nil { return err } i, err := UnmarshalHandshake(b) if err != nil { return err } if i.status != OK { return errors.New("failed init") } log.Println("OK Handshake") c.id = i.id c.pwHashKey = i.pwHashKey return nil } func (c *Client) stepAuthN(conn net.Conn) error { hPw := HashPw(c.pwHashKey, []byte(c.password)) proto := NewProtoConfirm(c.id, []byte(c.user), hPw[:]) _, err := conn.Write(proto.Marshal()) if err != nil { return err } b := make([]byte, confirmLen) _, err = conn.Read(b) if err != nil { return err } r, err := UnmarshalAuthNResult(b) if err != nil { return err } if !r.IsOk() { return errors.New("user/password do not match") } log.Println("OK Confirm") return nil }
package csigc import ( "context" "os" "testing" "github.com/Dynatrace/dynatrace-operator/src/controllers/csi/metadata" "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( testImageDigest = "5f50f658891613c752d524b72fc" ) var ( testPathResolver = metadata.PathResolver{ RootDir: "test", } ) func TestRunSharedImagesGarbageCollection(t *testing.T) { ctx := context.TODO() t.Run("bad database", func(t *testing.T) { fs := createTestSharedImageDir(t) gc := CSIGarbageCollector{ fs: fs, db: &metadata.FakeFailDB{}, path: testPathResolver, } err := gc.runSharedImagesGarbageCollection(ctx) require.Error(t, err) }) t.Run("no error on empty fs", func(t *testing.T) { gc := CSIGarbageCollector{ fs: afero.NewMemMapFs(), } err := gc.runSharedImagesGarbageCollection(ctx) require.NoError(t, err) }) t.Run("deletes unused", func(t *testing.T) { fs := createTestSharedImageDir(t) gc := CSIGarbageCollector{ fs: fs, db: metadata.FakeMemoryDB(), path: testPathResolver, } err := gc.runSharedImagesGarbageCollection(ctx) require.NoError(t, err) _, err = fs.Stat(gc.path.AgentSharedBinaryDirForImage(testImageDigest)) require.Error(t, err) assert.True(t, os.IsNotExist(err)) }) t.Run("deletes nothing, because of dynakube metadata present", func(t *testing.T) { fs := createTestSharedImageDir(t) gc := CSIGarbageCollector{ fs: fs, db: metadata.FakeMemoryDB(), } gc.db.InsertDynakube(ctx, &metadata.Dynakube{ Name: "test", TenantUUID: "test", LatestVersion: "test", ImageDigest: testImageDigest, }) err := gc.runSharedImagesGarbageCollection(ctx) require.NoError(t, err) _, err = fs.Stat(testPathResolver.AgentSharedBinaryDirForImage(testImageDigest)) require.NoError(t, err) }) t.Run("deletes nothing, because of volume metadata present", func(t *testing.T) { fs := createTestSharedImageDir(t) gc := CSIGarbageCollector{ fs: fs, db: metadata.FakeMemoryDB(), } gc.db.InsertVolume(ctx, &metadata.Volume{ VolumeID: "test", TenantUUID: "test", Version: testImageDigest, PodName: "test", }) err := gc.runSharedImagesGarbageCollection(ctx) require.NoError(t, err) _, err = fs.Stat(testPathResolver.AgentSharedBinaryDirForImage(testImageDigest)) require.NoError(t, err) }) } func TestGetSharedImageDirs(t *testing.T) { t.Run("no error on empty fs", func(t *testing.T) { fs := afero.NewMemMapFs() gc := CSIGarbageCollector{ fs: fs, path: testPathResolver, } dirs, err := gc.getSharedImageDirs() require.NoError(t, err) assert.Nil(t, dirs) }) t.Run("get image cache dirs", func(t *testing.T) { fs := createTestSharedImageDir(t) gc := CSIGarbageCollector{ fs: fs, path: testPathResolver, } dirs, err := gc.getSharedImageDirs() require.NoError(t, err) assert.Len(t, dirs, 1) }) } func TestCollectUnusedImageDirs(t *testing.T) { ctx := context.TODO() t.Run("bad database", func(t *testing.T) { gc := CSIGarbageCollector{ db: &metadata.FakeFailDB{}, path: testPathResolver, } _, err := gc.collectUnusedImageDirs(ctx, nil) require.Error(t, err) }) t.Run("no error on empty db", func(t *testing.T) { gc := CSIGarbageCollector{ db: metadata.FakeMemoryDB(), path: testPathResolver, } dirs, err := gc.collectUnusedImageDirs(ctx, nil) require.NoError(t, err) assert.Nil(t, dirs) }) t.Run("get unused", func(t *testing.T) { gc := CSIGarbageCollector{ db: metadata.FakeMemoryDB(), path: testPathResolver, } fs := createTestSharedImageDir(t) testDir := testPathResolver.AgentSharedBinaryDirForImage(testImageDigest) fileInfo, err := fs.Stat(testDir) require.NoError(t, err) dirs, err := gc.collectUnusedImageDirs(ctx, []os.FileInfo{fileInfo}) require.NoError(t, err) assert.Len(t, dirs, 1) assert.Equal(t, testDir, dirs[0]) }) t.Run("gets nothing", func(t *testing.T) { gc := CSIGarbageCollector{ db: metadata.FakeMemoryDB(), path: testPathResolver, } gc.db.InsertDynakube(ctx, &metadata.Dynakube{ Name: "test", TenantUUID: "test", LatestVersion: "test", ImageDigest: testImageDigest, }) fs := createTestSharedImageDir(t) fileInfo, err := fs.Stat(testPathResolver.AgentSharedBinaryDirForImage(testImageDigest)) require.NoError(t, err) dirs, err := gc.collectUnusedImageDirs(ctx, []os.FileInfo{fileInfo}) require.NoError(t, err) assert.Len(t, dirs, 0) }) } func TestGetUsedImageDigests(t *testing.T) { ctx := context.TODO() t.Run("bad database", func(t *testing.T) { fs := createTestSharedImageDir(t) gc := CSIGarbageCollector{ fs: fs, db: &metadata.FakeFailDB{}, path: testPathResolver, } _, err := gc.getUsedImageDigests(ctx) require.Error(t, err) }) t.Run("no error on db", func(t *testing.T) { gc := CSIGarbageCollector{ db: metadata.FakeMemoryDB(), } usedDigests, err := gc.getUsedImageDigests(ctx) require.NoError(t, err) assert.Empty(t, usedDigests) }) t.Run("finds used digest, because of dynakube metadata present", func(t *testing.T) { fs := createTestSharedImageDir(t) gc := CSIGarbageCollector{ fs: fs, db: metadata.FakeMemoryDB(), } gc.db.InsertDynakube(ctx, &metadata.Dynakube{ Name: "test", TenantUUID: "test", LatestVersion: "test", ImageDigest: testImageDigest, }) usedDigests, err := gc.getUsedImageDigests(ctx) require.NoError(t, err) assert.True(t, usedDigests[testImageDigest]) }) t.Run("finds used digest,, because of volume metadata present", func(t *testing.T) { fs := createTestSharedImageDir(t) gc := CSIGarbageCollector{ fs: fs, db: metadata.FakeMemoryDB(), } gc.db.InsertVolume(ctx, &metadata.Volume{ VolumeID: "test", TenantUUID: "test", Version: testImageDigest, PodName: "test", }) usedDigests, err := gc.getUsedImageDigests(ctx) require.NoError(t, err) assert.True(t, usedDigests[testImageDigest]) }) } func TestDeleteImageDirs(t *testing.T) { t.Run("deletes, no panic/error", func(t *testing.T) { fs := createTestSharedImageDir(t) testDir := testPathResolver.AgentSharedBinaryDirForImage(testImageDigest) err := deleteImageDirs(fs, []string{testDir}) require.NoError(t, err) _, err = fs.Stat(testDir) assert.True(t, os.IsNotExist(err)) }) t.Run("not exists, no panic/error", func(t *testing.T) { fs := afero.NewMemMapFs() testDir := testPathResolver.AgentSharedBinaryDirForImage(testImageDigest) err := deleteImageDirs(fs, []string{testDir}) require.NoError(t, err) }) } func createTestSharedImageDir(t *testing.T) afero.Fs { fs := afero.NewMemMapFs() require.NoError(t, fs.MkdirAll(testPathResolver.AgentSharedBinaryDirForImage(testImageDigest), 0755)) return fs }
package main import ( "fmt" "kafka_study/conf" "gopkg.in/ini.v1" ) func main() { p := new(conf.AppConf) ini.MapTo(&p, "conf/config.ini") fmt.Println(p) }
package eventstore import ( _ "bytes" "crypto/rand" "github.com/FoundationDB/fdb-go/fdb" "github.com/FoundationDB/fdb-go/fdb/subspace" _ "sync" "time" ) func nextRandom() []byte { b := make([]byte, 20) if _, err := rand.Read(b); err == nil { return b } else { panic(err) } } type EventRecord struct { contract string Data []byte Meta []byte } type EventStore struct { space subspace.Subspace } func (es *EventStore) Clear(db fdb.Database) { db.Transact(func(tr fdb.Transaction) (interface{}, error) { tr.ClearRange(es.space) return nil, nil }) } func (es *EventStore) Append(db fdb.Database, stream string, records []EventRecord) { rand := nextRandom() globalSpace := es.space.Sub("glob", rand) // TODO add random key to reduce contention _, err := db.Transact(func(tr fdb.Transaction) (interface{}, error) { // TODO : use get next index to sort them more nicely for _, evt := range records { gKey := globalSpace.Sub(time.Now().Unix(), evt.contract) //sKey := streamSpace.Item(tuple.Tuple{time.Now().Unix(), evt.contract}) // TODO - join data and meta tr.Set(gKey.Sub("data"), evt.Data) tr.Set(gKey.Sub("meta"), evt.Meta) //tr.Set(sKey.Item(tuple.Tuple{"data"}).AsFoundationDbKey(), evt.Data) //tr.Set(sKey.Item(tuple.Tuple{"meta"}).AsFoundationDbKey(), evt.Meta) } return nil, nil }) if err != nil { panic(err) } }
package application import ( metricapi "alauda.io/diablo/src/backend/integration/metric/api" "alauda.io/diablo/src/backend/resource/dataselect" ) type ApplicationCell Application func (self ApplicationCell) GetProperty(name dataselect.PropertyName) dataselect.ComparableValue { switch name { case dataselect.NameProperty: return dataselect.StdComparableString(self.ObjectMeta.Name) case dataselect.ExactNameProperty: return dataselect.StdExactString(self.ObjectMeta.Name) default: } // if name is not supported then just return a constant dummy value, sort will have no effect. return nil } func (self ApplicationCell) GetResourceSelector() *metricapi.ResourceSelector { return &metricapi.ResourceSelector{ Namespace: self.ObjectMeta.Namespace, ResourceName: self.ObjectMeta.Name, } } func toCells(std []Application) []dataselect.DataCell { cells := make([]dataselect.DataCell, len(std)) for i := range std { cells[i] = ApplicationCell(std[i]) } return cells } func fromCells(cells []dataselect.DataCell) []Application { std := make([]Application, len(cells)) for i := range std { std[i] = Application(cells[i].(ApplicationCell)) } return std }
package main import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/typed/apps/v1beta1" ) func getDeployments(client v1beta1.AppsV1beta1Interface, namespace string) ([]string, error) { objects, err := client.Deployments(namespace).List(metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf(err.Error()) } var names []string for _, object := range objects.Items { names = append(names, object.ObjectMeta.Name) } return names, nil }
package users import ( "encoding/json" "log" "net/http" "github.com/zuhrulumam/learning-go/pkg/database" "github.com/go-chi/chi" "github.com/go-chi/render" ) func v1UpdateUsersHandler(w http.ResponseWriter, r *http.Request) { log.Println("updating user") id := chi.URLParam(r, "id") var req v1CreateUsersPayload if err := json.NewDecoder(r.Body).Decode(&req); err != nil { log.Println("error on reading json") return } query := ` update users set name = ?, address = ?, updated_at = now() where id = ? ` err := database.Exec(query, req.Name, req.Address, id) if err != nil { log.Println("error updating " + err.Error()) } render.Status(r, 200) render.JSON(w, r, "success updating user") }
package main import ( "errors" "fmt" "testing" ) func TestRecover(t *testing.T) { fmt.Println("Enter function main") defer func() { if p := recover(); p != nil { fmt.Printf("panic: %s\n", p) } fmt.Println("Exit function defer") }() panic(errors.New("something error")) fmt.Println("Exit function main") }
package kubernetes import ( "context" "encoding/json" "fmt" "strconv" "strings" "github.com/epmd-edp/admin-console-operator/v2/pkg/apis/edp/v1alpha1" "github.com/epmd-edp/admin-console-operator/v2/pkg/client/admin_console" adminConsoleSpec "github.com/epmd-edp/admin-console-operator/v2/pkg/service/admin_console/spec" platformHelper "github.com/epmd-edp/admin-console-operator/v2/pkg/service/platform/helper" edpCompApi "github.com/epmd-edp/edp-component-operator/pkg/apis/v1/v1alpha1" edpCompClient "github.com/epmd-edp/edp-component-operator/pkg/client" keycloakV1Api "github.com/epmd-edp/keycloak-operator/pkg/apis/v1/v1alpha1" "github.com/pkg/errors" appsV1Api "k8s.io/api/apps/v1" coreV1Api "k8s.io/api/core/v1" "k8s.io/api/extensions/v1beta1" authV1Api "k8s.io/api/rbac/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" appsV1Client "k8s.io/client-go/kubernetes/typed/apps/v1" coreV1Client "k8s.io/client-go/kubernetes/typed/core/v1" extensionsV1Client "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" authV1Client "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" ) var log = logf.Log.WithName("platform") type K8SService struct { Scheme *runtime.Scheme CoreClient coreV1Client.CoreV1Client ExtensionsV1Client extensionsV1Client.ExtensionsV1beta1Client EdpClient admin_console.EdpV1Client k8sUnstructuredClient client.Client AppsClient appsV1Client.AppsV1Client AuthClient authV1Client.RbacV1Client edpCompClient edpCompClient.EDPComponentV1Client } func (service K8SService) CreateDeployConf(ac v1alpha1.AdminConsole, url string) error { k := "false" t := true f := false var rc int32 = 1 var id int64 = 1001 basePath := "" if len(ac.Spec.BasePath) != 0 { basePath = fmt.Sprintf("/%v", ac.Spec.BasePath) } l := platformHelper.GenerateLabels(ac.Name) do := &appsV1Api.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: ac.Name, Namespace: ac.Namespace, Labels: l, }, Spec: appsV1Api.DeploymentSpec{ Replicas: &rc, Selector: &metav1.LabelSelector{ MatchLabels: l, }, Template: coreV1Api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: l, }, Spec: coreV1Api.PodSpec{ ImagePullSecrets: ac.Spec.ImagePullSecrets, Containers: []coreV1Api.Container{ { SecurityContext: &coreV1Api.SecurityContext{ Privileged: &f, ReadOnlyRootFilesystem: &t, AllowPrivilegeEscalation: &f, }, Name: ac.Name, Image: fmt.Sprintf("%s:%s", ac.Spec.Image, ac.Spec.Version), ImagePullPolicy: coreV1Api.PullAlways, Env: []coreV1Api.EnvVar{ { Name: "NAMESPACE", ValueFrom: &coreV1Api.EnvVarSource{ FieldRef: &coreV1Api.ObjectFieldSelector{ FieldPath: "metadata.namespace", }, }, }, { Name: "HOST", Value: url, }, { Name: "BASE_PATH", Value: basePath, }, { Name: "EDP_ADMIN_CONSOLE_VERSION", Value: ac.Spec.Version, }, { Name: "EDP_VERSION", ValueFrom: &coreV1Api.EnvVarSource{ ConfigMapKeyRef: &coreV1Api.ConfigMapKeySelector{ LocalObjectReference: coreV1Api.LocalObjectReference{ Name: "edp-config", }, Key: "edp_version", }, }, }, { Name: "AUTH_KEYCLOAK_ENABLED", Value: k, }, { Name: "DNS_WILDCARD", ValueFrom: &coreV1Api.EnvVarSource{ ConfigMapKeyRef: &coreV1Api.ConfigMapKeySelector{ LocalObjectReference: coreV1Api.LocalObjectReference{ Name: "edp-config", }, Key: "dns_wildcard", }, }, }, { Name: "PG_USER", ValueFrom: &coreV1Api.EnvVarSource{ SecretKeyRef: &coreV1Api.SecretKeySelector{ LocalObjectReference: coreV1Api.LocalObjectReference{ Name: "db-admin-console", }, Key: "username", }, }, }, { Name: "PG_PASSWORD", ValueFrom: &coreV1Api.EnvVarSource{ SecretKeyRef: &coreV1Api.SecretKeySelector{ LocalObjectReference: coreV1Api.LocalObjectReference{ Name: "db-admin-console", }, Key: "password", }, }, }, { Name: "INTEGRATION_STRATEGIES", Value: ac.Spec.EdpSpec.IntegrationStrategies, }, { Name: "BUILD_TOOLS", Value: "maven", }, { Name: "TEST_REPORT_TOOLS", Value: ac.Spec.EdpSpec.TestReportTools, }, { Name: "DEPLOYMENT_SCRIPT", Value: "helm-chart", }, { Name: "PLATFORM_TYPE", Value: "kubernetes", }, { Name: "VERSIONING_TYPES", Value: "default,edp", }, { Name: "CI_TOOLS", Value: "Jenkins,GitLab CI", }, { Name: "PERF_DATA_SOURCES", Value: "Sonar,Jenkins,GitLab", }, { Name: "VCS_INTEGRATION_ENABLED", ValueFrom: &coreV1Api.EnvVarSource{ ConfigMapKeyRef: &coreV1Api.ConfigMapKeySelector{ LocalObjectReference: coreV1Api.LocalObjectReference{ Name: "edp-config", }, Key: "vcs_integration_enabled", }, }, }, { Name: "EDP_NAME", ValueFrom: &coreV1Api.EnvVarSource{ ConfigMapKeyRef: &coreV1Api.ConfigMapKeySelector{ LocalObjectReference: coreV1Api.LocalObjectReference{ Name: "edp-config", }, Key: "edp_name", }, }, }, }, Ports: []coreV1Api.ContainerPort{ { ContainerPort: adminConsoleSpec.AdminConsolePort, }, }, LivenessProbe: &coreV1Api.Probe{ FailureThreshold: 5, InitialDelaySeconds: 180, PeriodSeconds: 20, SuccessThreshold: 1, Handler: coreV1Api.Handler{ TCPSocket: &coreV1Api.TCPSocketAction{ Port: intstr.FromInt(adminConsoleSpec.AdminConsolePort), }, }, }, ReadinessProbe: &coreV1Api.Probe{ FailureThreshold: 5, InitialDelaySeconds: 60, PeriodSeconds: 20, SuccessThreshold: 1, Handler: coreV1Api.Handler{ TCPSocket: &coreV1Api.TCPSocketAction{ Port: intstr.FromInt(adminConsoleSpec.AdminConsolePort), }, }, }, TerminationMessagePath: "/dev/termination-log", Resources: coreV1Api.ResourceRequirements{ Requests: map[coreV1Api.ResourceName]resource.Quantity{ coreV1Api.ResourceMemory: resource.MustParse(adminConsoleSpec.MemoryRequest), }, }, }, }, ServiceAccountName: ac.Name, SecurityContext: &coreV1Api.PodSecurityContext{ RunAsUser: &id, RunAsGroup: &id, RunAsNonRoot: &t, FSGroup: &id, }, }, }, Strategy: appsV1Api.DeploymentStrategy{ Type: appsV1Api.RollingUpdateDeploymentStrategyType, }, }, } if err := controllerutil.SetControllerReference(&ac, do, service.Scheme); err != nil { return err } d, err := service.AppsClient.Deployments(do.Namespace).Get(do.Name, metav1.GetOptions{}) if !k8serrors.IsNotFound(err) { return err } dbEnvVars, err := service.GenerateDbSettings(ac) if err != nil { return errors.Wrap(err, "Failed to generate environment variables for shared database!") } do.Spec.Template.Spec.Containers[0].Env = append(do.Spec.Template.Spec.Containers[0].Env, dbEnvVars...) d, err = service.AppsClient.Deployments(do.Namespace).Create(do) if err != nil { return err } log.Info("Deployment has been created", "Namespace", d.Name, "Name", d.Name) return nil } func (service K8SService) CreateSecurityContext(ac v1alpha1.AdminConsole) error { return nil } func (service K8SService) CreateRole(ac v1alpha1.AdminConsole) error { ro := &authV1Api.Role{ ObjectMeta: metav1.ObjectMeta{ Name: "edp-resources-admin", Namespace: ac.Namespace, }, Rules: []authV1Api.PolicyRule{ { APIGroups: []string{"*"}, Resources: []string{"codebases", "codebasebranches", "cdpipelines", "stages", "codebases/finalizers", "codebasebranches/finalizers", "cdpipelines/finalizers", "stages/finalizers"}, Verbs: []string{"get", "create", "update", "delete", "patch"}, }, }, } if err := controllerutil.SetControllerReference(&ac, ro, service.Scheme); err != nil { return err } r, err := service.AuthClient.Roles(ro.Namespace).Get(ro.Name, metav1.GetOptions{}) if err == nil { return nil } if !k8serrors.IsNotFound(err) { return err } log.V(1).Info("Creating Role for Admin Console", "Namespace", ro.Namespace, "Name", ro.Name) r, err = service.AuthClient.Roles(ro.Namespace).Create(ro) if err != nil { return err } log.Info("Role for Admin Console created", "Namespace", r.Namespace, "Name", r.Name) return nil } func (service K8SService) CreateClusterRole(ac v1alpha1.AdminConsole) error { cro := &authV1Api.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: "admin-console-sc-access", }, Rules: []authV1Api.PolicyRule{ { APIGroups: []string{"storage.k8s.io"}, Resources: []string{"storageclasses"}, Verbs: []string{"get", "list"}, }, }, } if err := controllerutil.SetControllerReference(&ac, cro, service.Scheme); err != nil { return err } cr, err := service.AuthClient.ClusterRoles().Get(cro.Name, metav1.GetOptions{}) if err == nil { return nil } if !k8serrors.IsNotFound(err) { return err } log.V(1).Info("Creating Role for Admin Console", "Name", cro.Name, "ClusterRoleName", cro.Name) cr, err = service.AuthClient.ClusterRoles().Create(cro) if err != nil { return err } log.Info("Role for Admin Console created", "Name", cr.Name, "ClusterRoleName", cro.Name) return nil } func (service K8SService) CreateClusterRoleBinding(ac v1alpha1.AdminConsole, binding string) error { crbo := &authV1Api.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", ac.Name, ac.Namespace), }, RoleRef: authV1Api.RoleRef{ Kind: "ClusterRole", Name: binding, }, Subjects: []authV1Api.Subject{ { Kind: "ServiceAccount", Name: ac.Name, Namespace: ac.Namespace, }, }, } if err := controllerutil.SetControllerReference(&ac, crbo, service.Scheme); err != nil { return err } crb, err := service.AuthClient.ClusterRoleBindings().Get(crbo.Name, metav1.GetOptions{}) if err == nil { return nil } if !k8serrors.IsNotFound(err) { return err } log.V(1).Info("Creating a new ClusterRoleBinding for Admin Console", "Namespace", ac.Namespace, "Name", ac.Name) crb, err = service.AuthClient.ClusterRoleBindings().Create(crbo) if err != nil { return err } log.Info("ClusterRoleBinding has been created", "Namespace", crb.Namespace, "Name", crb.Name) return nil } func (service K8SService) CreateRoleBinding(ac v1alpha1.AdminConsole, name string, binding string, kind string) error { rbo := &authV1Api.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ac.Namespace, }, RoleRef: authV1Api.RoleRef{ Kind: kind, Name: binding, }, Subjects: []authV1Api.Subject{ { Kind: "ServiceAccount", Name: ac.Name, }, }, } if err := controllerutil.SetControllerReference(&ac, rbo, service.Scheme); err != nil { return err } rb, err := service.AuthClient.RoleBindings(rbo.Namespace).Get(rbo.Name, metav1.GetOptions{}) if err == nil { return nil } if !k8serrors.IsNotFound(err) { return err } log.V(1).Info("Creating a new RoleBinding for Admin Console", "Namespace", ac.Namespace, "Name", ac.Name) rb, err = service.AuthClient.RoleBindings(rbo.Namespace).Create(rbo) if err != nil { return err } log.Info("RoleBinding has been created", "Namespace", rb.Namespace, "Name", rb.Name) return nil } func (service K8SService) GetDisplayName(ac v1alpha1.AdminConsole) (string, error) { return "", nil } func (service K8SService) GenerateDbSettings(ac v1alpha1.AdminConsole) ([]coreV1Api.EnvVar, error) { if !ac.Spec.DbSpec.Enabled { return []coreV1Api.EnvVar{ { Name: "DB_ENABLED", Value: "false", }, }, nil } log.V(1).Info("Generating DB settings for Admin Console ", "Namespace", ac.Namespace, "Name", ac.Name) if platformHelper.ContainsEmptyString(ac.Spec.DbSpec.Name, ac.Spec.DbSpec.Hostname, ac.Spec.DbSpec.Port) { return nil, errors.New("One or many DB settings field are empty!") } return []coreV1Api.EnvVar{ { Name: "PG_HOST", Value: ac.Spec.DbSpec.Hostname, }, { Name: "PG_PORT", Value: ac.Spec.DbSpec.Port, }, { Name: "PG_DATABASE", Value: ac.Spec.DbSpec.Name, }, { Name: "DB_ENABLED", Value: strconv.FormatBool(ac.Spec.DbSpec.Enabled), }, }, nil } func (service K8SService) GenerateKeycloakSettings(ac v1alpha1.AdminConsole, keycloakUrl string) ([]coreV1Api.EnvVar, error) { log.V(1).Info("Generating Keycloak settings for Admin Console", "Namespace", ac.Namespace, "Name", ac.Name) if !ac.Spec.KeycloakSpec.Enabled { return []coreV1Api.EnvVar{}, nil } return []coreV1Api.EnvVar{ { Name: "KEYCLOAK_CLIENT_ID", ValueFrom: &coreV1Api.EnvVarSource{ SecretKeyRef: &coreV1Api.SecretKeySelector{ LocalObjectReference: coreV1Api.LocalObjectReference{ Name: "admin-console-client", }, Key: "username", }, }, }, { Name: "KEYCLOAK_CLIENT_SECRET", ValueFrom: &coreV1Api.EnvVarSource{ SecretKeyRef: &coreV1Api.SecretKeySelector{ LocalObjectReference: coreV1Api.LocalObjectReference{ Name: "admin-console-client", }, Key: "password", }, }, }, { Name: "KEYCLOAK_URL", Value: keycloakUrl, }, { Name: "AUTH_KEYCLOAK_ENABLED", Value: strconv.FormatBool(ac.Spec.KeycloakSpec.Enabled), }, }, nil } func (service K8SService) PatchDeploymentEnv(ac v1alpha1.AdminConsole, env []coreV1Api.EnvVar) error { if len(env) == 0 { return nil } dc, err := service.AppsClient.Deployments(ac.Namespace).Get(ac.Name, metav1.GetOptions{}) if err != nil { if k8serrors.IsNotFound(err) { log.Info("Deployment not found!", "Namespace", ac.Namespace, "Name", ac.Name) return nil } return err } container, err := platformHelper.SelectContainer(dc.Spec.Template.Spec.Containers, ac.Name) if err != nil { return err } container.Env = platformHelper.UpdateEnv(container.Env, env) dc.Spec.Template.Spec.Containers = append(dc.Spec.Template.Spec.Containers, container) jsonDc, err := json.Marshal(dc) if err != nil { return err } _, err = service.AppsClient.Deployments(dc.Namespace).Patch(dc.Name, types.StrategicMergePatchType, jsonDc) if err != nil { return err } return err } func (service K8SService) GetExternalUrl(namespace string, name string) (*string, error) { ingress, err := service.ExtensionsV1Client.Ingresses(namespace).Get(name, metav1.GetOptions{}) if err != nil { if k8serrors.IsNotFound(err) { log.Info("Ingress not found", "Namespace", namespace, "Name", name) return nil, nil } return nil, err } routeScheme := "https" u := fmt.Sprintf("%s://%s%s", routeScheme, ingress.Spec.Rules[0].Host, strings.TrimRight(ingress.Spec.Rules[0].HTTP.Paths[0].Path, platformHelper.UrlCutset)) return &u, nil } func (service K8SService) IsDeploymentReady(instance v1alpha1.AdminConsole) (bool, error) { deploymentConfig, err := service.AppsClient.Deployments(instance.Namespace).Get(instance.Name, metav1.GetOptions{}) if err != nil { return false, err } if deploymentConfig.Status.UpdatedReplicas == 1 && deploymentConfig.Status.AvailableReplicas == 1 { return true, nil } return false, nil } func (service K8SService) CreateSecret(ac v1alpha1.AdminConsole, name string, data map[string][]byte) error { labels := platformHelper.GenerateLabels(ac.Name) consoleSecretObject := &coreV1Api.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ac.Namespace, Labels: labels, }, Data: data, Type: "Opaque", } if err := controllerutil.SetControllerReference(&ac, consoleSecretObject, service.Scheme); err != nil { return err } consoleSecret, err := service.CoreClient.Secrets(consoleSecretObject.Namespace).Get(consoleSecretObject.Name, metav1.GetOptions{}) if err != nil { if k8serrors.IsNotFound(err) { msg := fmt.Sprintf("Creating a new Secret %s/%s for Admin Console", consoleSecretObject.Namespace, consoleSecretObject.Name) log.V(1).Info(msg) consoleSecret, err = service.CoreClient.Secrets(consoleSecretObject.Namespace).Create(consoleSecretObject) if err != nil { return err } log.Info(fmt.Sprintf("Secret %s/%s has been created", consoleSecret.Namespace, consoleSecret.Name)) // Successfully created return nil } // Some error occurred return err } // Nothing to do return nil } func (service K8SService) CreateService(ac v1alpha1.AdminConsole) error { labels := platformHelper.GenerateLabels(ac.Name) consoleServiceObject := &coreV1Api.Service{ ObjectMeta: metav1.ObjectMeta{ Name: ac.Name, Namespace: ac.Namespace, Labels: labels, }, Spec: coreV1Api.ServiceSpec{ Selector: labels, Ports: []coreV1Api.ServicePort{ { TargetPort: intstr.IntOrString{StrVal: ac.Name}, Port: 8080, }, }, }, } if err := controllerutil.SetControllerReference(&ac, consoleServiceObject, service.Scheme); err != nil { return err } consoleService, err := service.CoreClient.Services(ac.Namespace).Get(ac.Name, metav1.GetOptions{}) if err != nil { if k8serrors.IsNotFound(err) { msg := fmt.Sprintf("Creating a new service %s/%s for Admin Console %s", consoleServiceObject.Namespace, consoleServiceObject.Name, ac.Name) log.V(1).Info(msg) consoleService, err = service.CoreClient.Services(consoleServiceObject.Namespace).Create(consoleServiceObject) if err != nil { return err } log.Info(fmt.Sprintf("Service %s/%s has been created", consoleService.Namespace, consoleService.Name)) return nil } return err } return nil } func (service K8SService) CreateServiceAccount(ac v1alpha1.AdminConsole) error { labels := platformHelper.GenerateLabels(ac.Name) consoleServiceAccountObject := &coreV1Api.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: ac.Name, Namespace: ac.Namespace, Labels: labels, }, } if err := controllerutil.SetControllerReference(&ac, consoleServiceAccountObject, service.Scheme); err != nil { return err } consoleServiceAccount, err := service.CoreClient.ServiceAccounts(consoleServiceAccountObject.Namespace).Get(consoleServiceAccountObject.Name, metav1.GetOptions{}) if err != nil { if k8serrors.IsNotFound(err) { msg := fmt.Sprintf("Creating ServiceAccount %s/%s for Admin Console %s", consoleServiceAccountObject.Namespace, consoleServiceAccountObject.Name, ac.Name) log.V(1).Info(msg) consoleServiceAccount, err = service.CoreClient.ServiceAccounts(consoleServiceAccountObject.Namespace).Create(consoleServiceAccountObject) if err != nil { return err } msg = fmt.Sprintf("ServiceAccount %s/%s has been created", consoleServiceAccount.Namespace, consoleServiceAccount.Name) log.Info(msg) // Successfully created return nil } // Some error occurred return err } // Nothing to do return nil } func (service K8SService) CreateExternalEndpoint(ac v1alpha1.AdminConsole) error { c, err := service.GetConfigmapData(ac.Namespace, "edp-config") if err != nil { return err } log.V(1).Info("Creating Admin Console external endpoint.", "Namespace", ac.Namespace, "Name", ac.Name) l := platformHelper.GenerateLabels(ac.Name) so, err := service.CoreClient.Services(ac.Namespace).Get(ac.Name, metav1.GetOptions{}) if err != nil { log.Info("Console Service has not been found", "Namespace", ac.Namespace, "Name", ac.Name) return err } basePath := "/" if len(ac.Spec.BasePath) != 0 { basePath = fmt.Sprintf("/%v", ac.Spec.BasePath) } io := &v1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: ac.Name, Namespace: ac.Namespace, Labels: l, }, Spec: v1beta1.IngressSpec{ Rules: []v1beta1.IngressRule{ { Host: fmt.Sprintf("%s-%s.%s", ac.Name, ac.Namespace, c["dns_wildcard"]), IngressRuleValue: v1beta1.IngressRuleValue{ HTTP: &v1beta1.HTTPIngressRuleValue{ Paths: []v1beta1.HTTPIngressPath{ { Path: basePath, Backend: v1beta1.IngressBackend{ ServiceName: ac.Name, ServicePort: intstr.IntOrString{ IntVal: so.Spec.Ports[0].TargetPort.IntVal, }, }, }, }, }, }, }, }, }, } if err := controllerutil.SetControllerReference(&ac, io, service.Scheme); err != nil { return err } i, err := service.ExtensionsV1Client.Ingresses(io.Namespace).Get(io.Name, metav1.GetOptions{}) if !k8serrors.IsNotFound(err) { return err } i, err = service.ExtensionsV1Client.Ingresses(io.Namespace).Create(io) if err != nil { return err } log.Info("Ingress has been created", "Namespace", i.Namespace, "Name", i.Name) return nil } func (service K8SService) GetConfigmapData(namespace string, name string) (map[string]string, error) { c, err := service.CoreClient.ConfigMaps(namespace).Get(name, metav1.GetOptions{}) if err != nil { if k8serrors.IsNotFound(err) { log.Info("Config Map not found", "Namespace", namespace, "Name", name) return map[string]string{}, nil } return map[string]string{}, err } return c.Data, nil } func (service K8SService) GetSecret(namespace string, name string) (map[string][]byte, error) { out := map[string][]byte{} adminDBSecret, err := service.CoreClient.Secrets(namespace).Get(name, metav1.GetOptions{}) if err != nil { if k8serrors.IsNotFound(err) { log.Info(fmt.Sprintf("Secret %v in namespace %v not found", name, namespace)) return nil, nil } return out, err } out = adminDBSecret.Data return out, nil } func (service K8SService) GetAdminConsole(ac v1alpha1.AdminConsole) (*v1alpha1.AdminConsole, error) { AdminConsoleInstance, err := service.EdpClient.Get(ac.Name, ac.Namespace, metav1.GetOptions{}) if err != nil { return nil, err } return AdminConsoleInstance, nil } func (service K8SService) GetPods(namespace string) (*coreV1Api.PodList, error) { PodList, err := service.CoreClient.Pods(namespace).List(metav1.ListOptions{}) if err != nil { return &coreV1Api.PodList{}, err } return PodList, nil } func (service K8SService) UpdateAdminConsole(ac v1alpha1.AdminConsole) (*v1alpha1.AdminConsole, error) { instance, err := service.EdpClient.Update(&ac) if err != nil { return nil, err } return instance, nil } func (service *K8SService) Init(config *rest.Config, scheme *runtime.Scheme, k8sClient *client.Client) error { coreClient, err := coreV1Client.NewForConfig(config) if err != nil { return errors.Wrap(err, "Core Client initialization failed!") } edpClient, err := admin_console.NewForConfig(config) if err != nil { return errors.Wrap(err, "EDP Client initialization failed!") } appsClient, err := appsV1Client.NewForConfig(config) if err != nil { return errors.New("appsV1 client initialization failed!") } extensionsClient, err := extensionsV1Client.NewForConfig(config) if err != nil { return errors.New("extensionsV1 client initialization failed!") } rbacClient, err := authV1Client.NewForConfig(config) if err != nil { return errors.New("extensionsV1 client initialization failed!") } compCl, err := edpCompClient.NewForConfig(config) if err != nil { return errors.Wrap(err, "failed to init edp component client") } service.EdpClient = *edpClient service.CoreClient = *coreClient service.Scheme = scheme service.k8sUnstructuredClient = *k8sClient service.AppsClient = *appsClient service.ExtensionsV1Client = *extensionsClient service.AuthClient = *rbacClient service.edpCompClient = *compCl return nil } func (service K8SService) CreateKeycloakClient(kc *keycloakV1Api.KeycloakClient) error { nsn := types.NamespacedName{ Namespace: kc.Namespace, Name: kc.Name, } err := service.k8sUnstructuredClient.Get(context.TODO(), nsn, kc) if err != nil { if k8serrors.IsNotFound(err) { err := service.k8sUnstructuredClient.Create(context.TODO(), kc) if err != nil { return errors.Wrapf(err, "Failed to create Keycloak client %s/%s", kc.Namespace, kc.Name) } log.Info(fmt.Sprintf("Keycloak client %s/%s created", kc.Namespace, kc.Name)) // Successfully created return nil } // Some error occurred return errors.Wrapf(err, "Failed to create Keycloak client %s/%s", kc.Namespace, kc.Name) } // Nothing to do return nil } func (service K8SService) GetKeycloakClient(name string, namespace string) (keycloakV1Api.KeycloakClient, error) { out := keycloakV1Api.KeycloakClient{} nsn := types.NamespacedName{ Namespace: namespace, Name: name, } err := service.k8sUnstructuredClient.Get(context.TODO(), nsn, &out) if err != nil { return out, err } // Success return out, nil } func (service K8SService) CreateEDPComponentIfNotExist(ac v1alpha1.AdminConsole, url string, icon string) error { comp, err := service.edpCompClient. EDPComponents(ac.Namespace). Get(ac.Name, metav1.GetOptions{}) if err == nil { log.Info("edp component already exists", "name", comp.Name) return nil } if k8serrors.IsNotFound(err) { return service.createEDPComponent(ac, url, icon) } return errors.Wrapf(err, "failed to get edp component: %v", ac.Name) } func (service K8SService) createEDPComponent(ac v1alpha1.AdminConsole, url string, icon string) error { obj := &edpCompApi.EDPComponent{ ObjectMeta: metav1.ObjectMeta{ Name: ac.Name, }, Spec: edpCompApi.EDPComponentSpec{ Type: "admin-console", Url: url, Icon: icon, Visible: true, }, } if err := controllerutil.SetControllerReference(&ac, obj, service.Scheme); err != nil { return err } _, err := service.edpCompClient. EDPComponents(ac.Namespace). Create(obj) return err }
package main import "fmt" // linear search function // input is an integer array and a target value // output is the index of element if found or else -1 func search(array []int, target int) int { for i, val := range array { if val == target { return i } } return -1 } func main() { var array []int = []int{1, 3, 4, 6, 7, 9, 10, 11, 13} // searching for 7 is a positive scenario and should // output the index of the found variable fmt.Println(search(array, 7)) // searching for 5 is a negative scenario and should // output -1 indicating the value isn't in the array fmt.Println(search(array, 5)) }
package main import "fmt" type Vertex struct { X, Y int } func (this *Vertex) ShowX() { fmt.Println(this.X) } func main () { v := Vertex{2,4} v.ShowX() }
/* * Copyright 1999-2020 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package main import ( "context" "testing" "github.com/chaosblade-io/chaosblade-spec-go/channel" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-exec-os/exec/bin" ) func Test_startDropNet_failed(t *testing.T) { var exitCode int bin.ExitFunc = func(code int) { exitCode = code } tests := []struct { sourceIp string destinationIp string sourcePort string destinationPort string stringPattern string networkTraffic string }{ {"", "", "", "", "", ""}, } for _, tt := range tests { startDropNet(tt.sourceIp, tt.destinationIp, tt.sourcePort, tt.destinationPort, tt.stringPattern, tt.networkTraffic) if exitCode != 1 { t.Errorf("unexpected result: %d, expected result: %d", exitCode, 1) } } } func Test_handleDropSpecifyPort(t *testing.T) { type input struct { sourceIp string destinationIp string sourcePort string destinationPort string stringPattern string networkTraffic string response *spec.Response } type expect struct { exitCode int invokeTime int } tests := []struct { input input expect expect }{ {input{"", "", "80", "", "", "", spec.ReturnFail(spec.Code[spec.CommandNotFound], "iptables command not found")}, expect{1, 1}}, {input{"", "", "", "80", "", "", spec.ReturnFail(spec.Code[spec.CommandNotFound], "iptables command not found")}, expect{1, 1}}, {input{"", "", "80", "", "", "", spec.ReturnSuccess("success")}, expect{0, 0}}, } var exitCode int bin.ExitFunc = func(code int) { exitCode = code } var invokeTime int stopDropNetFunc = func(sourceIp, destinationIp, sourcePort, destinationPort, stringPattern, networkTraffic string) { invokeTime++ } for _, tt := range tests { cl = channel.NewMockLocalChannel() mockChannel := cl.(*channel.MockLocalChannel) mockChannel.RunFunc = func(ctx context.Context, script, args string) *spec.Response { return tt.input.response } handleDropSpecifyPort(tt.input.sourceIp, tt.input.destinationIp, tt.input.sourcePort, tt.input.destinationPort, tt.input.stringPattern, tt.input.networkTraffic, context.Background()) if exitCode != tt.expect.exitCode { t.Errorf("unexpected result: %d, expected result: %d", exitCode, tt.expect.exitCode) } } }
package main import ( "bytes" "fmt" ) func main() { s := "1234567" s = comma(s) fmt.Println(s) } func comma(s string) string { n := len(s) if n <= 3 { return s } var buf bytes.Buffer for i := 0; i < n; i++ { buf.WriteString(string(s[i])) if (i < n-1) && ((i+1)%3 == n%3) { buf.WriteString(",") } } return buf.String() }
package main import ( "container/ring" "encoding/json" "errors" "github.com/satori/go.uuid" "sync" ) type BalanceStrategy string const ( Round_Robin BalanceStrategy = "round-robin" Source_Hashing BalanceStrategy = "source-hashing" ) type Service struct { UniqueId string `json:"id" bson:"_id,omitempty"` ServiceUrl string `json:"serviceUrl"` Nested bool `json:"nested"` Endpoints map[string]bool `json:"endpoints"` Tps int `json:"tps"` CurrentState State `json:"state" bson:"state"` BalanceStrategy BalanceStrategy `json:"balanceStrategy"` sync.RWMutex `json:"-" bson:"-"` serviceEPRing *ring.Ring serviceEPList []*Endpoint slb sync.RWMutex } func (s *Service) Id() string { return s.UniqueId } func (s *Service) EndpointsSet() map[string]bool { s.RLock() defer s.RUnlock() result := make(map[string]bool, len(s.Endpoints)) for k, v := range s.Endpoints { result[k] = v } return result } func (s *Service) AddEndpoint(id string) error { s.Lock() defer s.Unlock() if s.Endpoints[id] { return AlreadyPresentError } s.Endpoints[id] = true return nil } func (s *Service) RemoveEndpoint(id string) { s.Lock() defer s.Unlock() delete(s.Endpoints, id) } func (s *Service) State() State { s.RLock() defer s.RUnlock() return s.CurrentState } func (s *Service) SetState(state State) { s.Lock() defer s.Unlock() s.CurrentState = state } func (s *Service) Init() error { s.UniqueId = uuid.NewV4().String() if s.CurrentState == "" { s.SetState(Active) } if s.BalanceStrategy == "" { s.BalanceStrategy = Round_Robin } if s.Endpoints == nil { s.Endpoints = make(map[string]bool) } if s.ServiceUrl == "" { return errors.New("Missing serviceUrl definition") } return nil } func (s *Service) String() string { ss, err := json.Marshal(s) if err != nil { return err.Error() } return string(ss) }
package cmd import ( "bufio" "fmt" "io" "io/ioutil" "log" "os" "regexp" "strconv" "strings" "sync" "github.com/btm6084/utilities/fileutil" "github.com/spf13/cobra" "github.com/spf13/viper" "golang.org/x/crypto/ssh/terminal" ) const ( // VERSION is the current version of goack VERSION = `1.0.0` ) var ( printLock sync.Mutex c chan bool openSearches = 0 searchLimit = 10 filesProcessed = 0 extensions = regexp.MustCompile(`[.]svg|jpg|png$`) ) type options struct { After int Before int Config Config FileNameOnly bool FollowSyms bool Help bool Insensitive bool Inverse bool // IsTerminal records whether we're writing to a terminal or not. e.g. when piping output. IsTerminal bool MatchOnly bool NoColor bool AllowBinary bool Regex *regexp.Regexp Skip string Term string } func init() { searchCmd.Flags().BoolP("help", "h", false, "Display Help Text") viper.BindPFlag("help", searchCmd.Flags().Lookup("help")) searchCmd.Flags().BoolP("nameonly", "l", false, "Display File Name Only") viper.BindPFlag("nameonly", searchCmd.Flags().Lookup("nameonly")) searchCmd.Flags().BoolP("follow", "f", false, "Follow Symlinks") viper.BindPFlag("follow", searchCmd.Flags().Lookup("follow")) searchCmd.Flags().BoolP("insensitive", "i", false, "Case Insensitive Search") viper.BindPFlag("insensitive", searchCmd.Flags().Lookup("insensitive")) searchCmd.Flags().BoolP("inverse", "v", false, "Print Only Lines that Do Not Match") viper.BindPFlag("inverse", searchCmd.Flags().Lookup("inverse")) searchCmd.Flags().BoolP("match-only", "m", false, "Print Only the Matching Text") viper.BindPFlag("match-only", searchCmd.Flags().Lookup("match-only")) searchCmd.Flags().BoolP("no-color", "", false, "Print Lines without Color") viper.BindPFlag("no-color", searchCmd.Flags().Lookup("no-color")) searchCmd.Flags().IntP("after", "A", 0, "Number of Lines to Print After Matches") viper.BindPFlag("after", searchCmd.Flags().Lookup("after")) searchCmd.Flags().IntP("before", "B", 0, "Number of Lines to Print Before Matches") viper.BindPFlag("before", searchCmd.Flags().Lookup("before")) searchCmd.Flags().IntP("context", "C", 0, "Number of Lines to Print Before and After Matches. Overrides Before and After Values") viper.BindPFlag("context", searchCmd.Flags().Lookup("context")) searchCmd.Flags().StringP("skip", "k", "", "Skip searching files whose filenames contain this string.") viper.BindPFlag("skip", searchCmd.Flags().Lookup("skip")) searchCmd.Flags().BoolP("binary", "b", false, "Allow searching binary files") viper.BindPFlag("binary", searchCmd.Flags().Lookup("binary")) c = make(chan bool) } var searchCmd = &cobra.Command{ Args: cobra.RangeArgs(1, 2), Use: "goack [flags] <search term> [directory]", Short: "Search for patterns in text files", Long: `Use regular expressions to search text. Defaults to current directory Version: ` + VERSION, Run: search, } func search(cmd *cobra.Command, args []string) { opts := options{ After: viper.GetInt("after"), Before: viper.GetInt("before"), Config: loadConfig(), FileNameOnly: viper.GetBool("nameonly"), FollowSyms: viper.GetBool("follow"), Help: viper.GetBool("help"), Insensitive: viper.GetBool("insensitive"), Inverse: viper.GetBool("inverse"), IsTerminal: terminal.IsTerminal(int(os.Stdout.Fd())), MatchOnly: viper.GetBool("match-only"), AllowBinary: viper.GetBool("binary"), NoColor: viper.GetBool("no-color"), Skip: viper.GetString("skip"), Term: args[0], } ctx := viper.GetInt("context") if ctx > 0 { opts.After = ctx opts.Before = ctx } if opts.Insensitive { opts.Regex = regexp.MustCompile("((?i)" + args[0] + ")") } else { opts.Regex = regexp.MustCompile("(" + args[0] + ")") } // Don't color match-only results. if opts.MatchOnly { opts.NoColor = true } file := "." if len(args) > 1 { file = args[1] } if opts.Help { cmd.Help() return } stat, _ := os.Stdin.Stat() if (stat.Mode() & os.ModeCharDevice) == 0 { processFile(os.Stdin, "stdin", &opts, false) } else { fileSystemSearch(file, &opts) // Wait for any open searches to wrap up. for i := 0; i < openSearches; i++ { <-c } } } // Recursively decend through a directory and search each regular file found. // If file is a regular file, search it and return. func fileSystemSearch(file string, opts *options) { filesProcessed++ if !opts.FollowSyms && fileutil.IsSymlink(file) { return } if fileutil.IsDir(file) { if opts.Config.IgnoreDir(file) { return } files, err := ioutil.ReadDir(file) if err != nil { log.Println(err) return } for _, f := range files { if opts.Skip != "" && strings.Contains(f.Name(), opts.Skip) { continue } if opts.Config.IgnoreDir(f.Name()) || opts.Config.IgnoreExt(f.Name()) { continue } fileSystemSearch(strings.TrimRight(file, "/")+"/"+strings.TrimLeft(f.Name(), "/"), opts) } return } if fileutil.IsFile(file) { openSearches++ if openSearches > searchLimit { <-c openSearches-- } go fileSearch(file, opts) return } } func fileSearch(file string, opts *options) { f, err := os.Open(file) if err != nil { c <- false return } defer f.Close() processFile(f, file, opts, true) } func processFile(f *os.File, fileName string, opts *options, async bool) { var lines []string var lineNums []int var lineNum int reader := bufio.NewReader(f) for { // Read the next line of the file. s, err := reader.ReadString('\n') if err != nil && err != io.EOF { if async { c <- false } return } // We don't search binary files. if !opts.AllowBinary && fileutil.IsBinaryData([]byte(s)) { if async { c <- false } return } // Break if there's nothing to do. if len(s) == 0 && err == io.EOF { break } lineNum++ lines = append(lines, s) // Find any matches in a non-inverse situation. if !opts.Inverse && opts.Regex.MatchString(s) { lineNums = append(lineNums, lineNum) if err == io.EOF { break } continue } // Find any matches in an inverse situation. if opts.Inverse && !opts.Regex.MatchString(s) { lineNums = append(lineNums, lineNum) if err == io.EOF { break } continue } } if len(lineNums) > 0 { Print(fileName, lines, lineNums, opts) } if async { c <- true } } // Execute performs the root command. func Execute() { if err := searchCmd.Execute(); err != nil { fmt.Println(err) os.Exit(1) } } // Print the matching items from a given file. func Print(file string, lines []string, lineNums []int, opts *options) { file = strings.TrimPrefix(file, "./") if opts.IsTerminal && !opts.NoColor { file = "\x1b[96m" + file + "\x1b[0m" } printLock.Lock() // Suppress output of the filename when piping / non-terminal. if opts.IsTerminal { fmt.Println(file) } // Don't process contents if FilenameOnly if opts.FileNameOnly { printLock.Unlock() return } for _, n := range lineNums { // Before for i := opts.Before; i > 0; i-- { if n-i-1 >= 0 { if opts.MatchOnly { matches := getMatchingText(lines[n-i-1], opts.Regex) for _, m := range matches { writeLine(m, strconv.Itoa(n-i)+"-", opts) } } else { writeLine(lines[n-i-1], strconv.Itoa(n-i)+"-", opts) } } } if opts.MatchOnly { matches := getMatchingText(lines[n-1], opts.Regex) for _, m := range matches { writeLine(m, strconv.Itoa(n)+"-", opts) } } else { writeLine(lines[n-1], strconv.Itoa(n)+":", opts) } // After for i := 1; i <= opts.After; i++ { if n+i < len(lines) { if opts.MatchOnly { matches := getMatchingText(lines[n+1], opts.Regex) for _, m := range matches { writeLine(m, strconv.Itoa(n+i)+"-", opts) } } else { writeLine(lines[n+i], strconv.Itoa(n+i)+"+", opts) } } } if opts.Before > 0 || opts.After > 0 { fmt.Println() } } fmt.Println() printLock.Unlock() } // getMatchingText returns ONLY the matching text. func getMatchingText(s string, re *regexp.Regexp) []string { var matches []string for _, m := range re.FindAllStringSubmatch(s, -1) { matches = append(matches, m[0]) } return matches } func writeLine(s, l string, opts *options) { s = strings.TrimRight(s, "\n") if opts.IsTerminal && !opts.NoColor { s = opts.Regex.ReplaceAllString(s, "\x1b[30;42m$1\x1b[0m") l = "\x1b[93m" + l + "\x1b[0m" } // Suppress output of line numbers when piping / non-terminal. if opts.IsTerminal { fmt.Println(l, s) } else { fmt.Println(s) } }
package lexers import ( "embed" "io/fs" "github.com/alecthomas/chroma/v2" ) //go:embed embedded var embedded embed.FS // GlobalLexerRegistry is the global LexerRegistry of Lexers. var GlobalLexerRegistry = func() *chroma.LexerRegistry { reg := chroma.NewLexerRegistry() // index(reg) paths, err := fs.Glob(embedded, "embedded/*.xml") if err != nil { panic(err) } for _, path := range paths { reg.Register(chroma.MustNewXMLLexer(embedded, path)) } return reg }() // Names of all lexers, optionally including aliases. func Names(withAliases bool) []string { return GlobalLexerRegistry.Names(withAliases) } // Get a Lexer by name, alias or file extension. func Get(name string) chroma.Lexer { return GlobalLexerRegistry.Get(name) } // MatchMimeType attempts to find a lexer for the given MIME type. func MatchMimeType(mimeType string) chroma.Lexer { return GlobalLexerRegistry.MatchMimeType(mimeType) } // Match returns the first lexer matching filename. func Match(filename string) chroma.Lexer { return GlobalLexerRegistry.Match(filename) } // Register a Lexer with the global registry. func Register(lexer chroma.Lexer) chroma.Lexer { return GlobalLexerRegistry.Register(lexer) } // Analyse text content and return the "best" lexer.. func Analyse(text string) chroma.Lexer { return GlobalLexerRegistry.Analyse(text) } // PlaintextRules is used for the fallback lexer as well as the explicit // plaintext lexer. func PlaintextRules() chroma.Rules { return chroma.Rules{ "root": []chroma.Rule{ {`.+`, chroma.Text, nil}, {`\n`, chroma.Text, nil}, }, } } // Fallback lexer if no other is found. var Fallback chroma.Lexer = chroma.MustNewLexer(&chroma.Config{ Name: "fallback", Filenames: []string{"*"}, Priority: -1, }, PlaintextRules)
// GridIndexer package GridSearch import ( "github.com/rcrowley/go-metrics" "log" "os" ) type gridIndexer struct { gridTopArray []gridTop //top grid array InputDataFlow chan []GridData //Data Index Flow memIxr []*memIndexer //Memory Index indexMeter metrics.Meter //Indexing speed monitoring el *EngineLog //Engine backup system } //func make([]T, len, cap) []T func NewGridIndexer() *gridIndexer { gi := gridIndexer{ gridTopArray: make([]gridTop, gridNum, gridNum), InputDataFlow: make(chan []GridData, 1000), memIxr: make([]*memIndexer, gridNum, gridNum), indexMeter: metrics.NewMeter(), el: NewEngineLog(), } //Divide the top grid var i int32 = 0 for i = 0; i < gridNum; i++ { tRow := i / GRID_COL_NUM tCol := i % GRID_COL_NUM gi.gridTopArray[i].pRect = &rect{ chinaRect().Left + GRID_TOP_WIDTH*tCol, chinaRect().Top - GRID_TOP_HEIGHT*tRow, chinaRect().Left + GRID_TOP_WIDTH*tCol + GRID_TOP_WIDTH, chinaRect().Top - GRID_TOP_HEIGHT*tRow - GRID_TOP_HEIGHT, } //Memory indexed array gi.memIxr[i] = NewMemIndexer(i) } metrics.Register("indexing", gi.indexMeter) return &gi } func (gi *gridIndexer) close() { close(gi.InputDataFlow) var i int32 = 0 for i = 0; i < gridNum; i++ { gi.memIxr[i].close() } } func (gi *gridIndexer) worker(inFlow <-chan []GridData) { for data := range inFlow { for _, ix := range data { //Indexers single data gi.el.LogData(&ix) gridTopID := getGridTopIndexKey(ix.LO, ix.LA) if gridTopID < 0 || gridTopID >= gridNum { continue } if gridTopID != -1 { bid := gi.getBottomGridID(gridTopID, &ix) gi.indexMeter.Mark(1) gi.memIxr[gridTopID].dataFlow <- mData{ix.ID, bid} } } } } func (gi *gridIndexer) indexing() { go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) for i := 0; i < INDEXTHREADNUM; i++ { go gi.worker(gi.InputDataFlow) } } func (gi *gridIndexer) indexDocs(pts []GridData) { gi.InputDataFlow <- pts } /* The point is mapped to the underlying quadtree mesh and returns the underlying grid quadtree id number */ func (gi *gridIndexer) getBottomGridID(topGridID int32, d *GridData) int32 { var layer, curID int32 = 0, 0 return get4TreeBottomGridID(gi.gridTopArray[topGridID].pRect, &point{d.LO, d.LA}, layer, curID) } func get4TreeBottomGridID(curRect *rect, p *point, curLayer, curID int32) int32 { if curLayer >= TREEDEPTH-1 { return curID } qd := curRect.getQD(p) nextRect := curRect.getQDRect(qd) return get4TreeBottomGridID(&nextRect, p, curLayer+1, curID*4+1+qd) }
package eventbus import ( "time" uuid "github.com/satori/go.uuid" ) type Event interface { TriggeredAt() time.Time EventID() uuid.UUID EventName() string }
package core import ( "flag" "fmt" "mqtts/utils" "os" "strconv" "strings" ) type ScanArgs struct { CommonScan bool UnauthScan bool AnyPwdScan bool SystemInfo bool BruteScan bool AutoScan bool TopicsList bool WaitTime int UserPath string PwdPath string TargetFile string GoroutinePoolSize int } func CmdArgsParser() *ScanArgs { var host string var port int var username string var password string var protocol string var clientId string var unauthScan bool var anyPwdScan bool var autoScan bool var systemInfo bool var topicsList bool var userPath string var pwdPath string var waitTime int var bruteScan bool var targetFile string var goroutinePoolSize int var showVersion bool flag.StringVar(&host, "t", "", "input target ip or host") flag.IntVar(&port, "p", 1883, "input target port default is 1883/tcp") flag.StringVar(&username, "username", "", "input username") flag.StringVar(&password, "password", "", "input password") flag.StringVar(&protocol, "protocol", "", "input protocol tcp/ssl/ws/wss") flag.StringVar(&clientId, "clientid", "", "input password default is 6 random string") flag.BoolVar(&unauthScan, "u", false, "unauth scan (support batch scanning)") flag.BoolVar(&anyPwdScan, "a", false, "any username/password login scan for EMQX emqx_plugin_template plugin (support batch scanning)") flag.BoolVar(&bruteScan, "b", false, "username and password brute force") flag.BoolVar(&autoScan, "au", false, "automatic scanning according to service conditions") flag.BoolVar(&systemInfo, "s", false, "mqtt server system topic info scan (batch scanning is not supported)") flag.BoolVar(&topicsList, "ts", false, "mqtt server topic list scan (batch scanning is not supported)") flag.StringVar(&userPath, "nf", "", "brute force username list file path, default is ./username.txt") flag.StringVar(&pwdPath, "pf", "", "brute force password list file path, default is ./password.txt") flag.IntVar(&waitTime, "w", 15, "systemInfo scan and topics scan wait time, unit: seconds, default 15s") flag.StringVar(&targetFile, "tf", "", "batch scan target file, line format split with \\t host port [protocol | clientId | username | password]") flag.IntVar(&goroutinePoolSize, "g", 10, "batch scan goroutine pool size") flag.BoolVar(&showVersion, "v", false, "show version") flag.Parse() if showVersion { fmt.Println(utils.Version) os.Exit(0) } if strings.EqualFold(host, "") && strings.EqualFold(targetFile, "") { flag.Usage() os.Exit(0) } if !strings.EqualFold(host, "") && !strings.EqualFold(targetFile, "") { utils.OutputErrorMessageWithoutOption("Single targets and batch targets cannot be set at the same time") os.Exit(0) } if !strings.EqualFold(host, "") { setSingleTarget(protocol, host, port, clientId, username, password) } if !strings.EqualFold(targetFile, "") { setTargets(targetFile) } if !unauthScan && !anyPwdScan && !systemInfo && !topicsList && !bruteScan && !autoScan { utils.OutputErrorMessageWithoutOption("Must specify the type of scan") os.Exit(0) } if !strings.EqualFold(targetFile, "") && (systemInfo || topicsList) { utils.OutputErrorMessageWithoutOption("Topic info scanning and topic list scanning do not support batch") os.Exit(0) } return &ScanArgs{ UnauthScan: unauthScan, AnyPwdScan: anyPwdScan, BruteScan: bruteScan, AutoScan: autoScan, SystemInfo: systemInfo, TopicsList: topicsList, WaitTime: waitTime, UserPath: userPath, PwdPath: pwdPath, GoroutinePoolSize: goroutinePoolSize, } } func setSingleTarget(protocol string, host string, port int, clientId string, username string, password string) { SetTargetInfo(protocol, host, port, clientId, username, password) } func setTargets(targetFile string) { lines, err := utils.ReadFileByLine(targetFile) if err != nil { utils.OutputErrorMessageWithoutOption("Load target file failed") os.Exit(0) } for num, line := range lines { targetInfo := strings.Split(line, " ") if len(targetInfo) < 2 { utils.OutputErrorMessageWithoutOption("Target format or data error in line " + strconv.Itoa(num+1) + ": " + line) } if len(targetInfo) < 6 { for range utils.IterRange(6 - len(targetInfo)) { targetInfo = append(targetInfo, "") } } host := targetInfo[0] port, err := strconv.Atoi(targetInfo[1]) if err != nil { utils.OutputErrorMessageWithoutOption("Target port parse error in line " + strconv.Itoa(num+1) + ": " + line) continue } protocol := targetInfo[2] clientId := targetInfo[3] username := targetInfo[4] password := targetInfo[5] SetTargetInfo(protocol, host, port, clientId, username, password) } }
package main import ( "fmt" "net" "sync" "time" st "github.com/TheSmallBoat/carlo/streaming_transmit" ) func main() { check := func(err error) { if err != nil { panic(err) } } st.StartPoolMetrics() ln, err := net.Listen("tcp", ":4444") check(err) client := &st.Client{Addr: ln.Addr().String()} var server st.Server go func() { defer ln.Close() defer server.Shutdown() defer client.Shutdown() check(server.Serve(ln)) }() var wg sync.WaitGroup n := 4 wg.Add(n) for i := 0; i < n; i++ { go func(i int) { defer wg.Done() for j := 0; j < 1024*256; j++ { check(client.Send([]byte(fmt.Sprintf("[%d %d] Hello from Go!", i, j)))) } fmt.Println(st.JsonStringPoolMetrics()) }(i) } wg.Wait() st.ReleasePoolMetrics() time.Sleep(200 * time.Millisecond) fmt.Println(st.JsonStringPoolMetrics()) }
package postgresql import ( "context" "database/sql" "database/sql/driver" "fmt" "github.com/adamluzsi/frameless/ports/guard" ) // Locker is a PG-based shared mutex implementation. // It depends on the existence of the frameless_locker_locks table. // Locker is safe to call from different application instances, // ensuring that only one of them can hold the lock concurrently. type Locker struct { Name string Connection Connection } const queryLock = `INSERT INTO frameless_locker_locks (name) VALUES ($1);` func (l Locker) Lock(ctx context.Context) (context.Context, error) { if ctx == nil { return nil, fmt.Errorf("missing context.Context") } if _, ok := l.lookup(ctx); ok { return ctx, nil } ctx, err := l.Connection.BeginTx(ctx) if err != nil { return nil, err } _, err = l.Connection.ExecContext(ctx, queryLock, l.Name) if err != nil { return nil, err } ctx, cancel := context.WithCancel(ctx) return context.WithValue(ctx, lockerCtxKey{}, &lockerCtxValue{ ctx: ctx, cancel: cancel, }), nil } func (l Locker) Unlock(ctx context.Context) error { if ctx == nil { return guard.ErrNoLock } lck, ok := l.lookup(ctx) if !ok { return guard.ErrNoLock } if lck.done { return nil } if err := l.Connection.RollbackTx(lck.ctx); err != nil { if driver.ErrBadConn == err && ctx.Err() != nil { return ctx.Err() } return err } lck.done = true err := ctx.Err() lck.cancel() return err } type ( lockerCtxKey struct{} lockerCtxValue struct { tx *sql.Tx done bool cancel func() ctx context.Context } ) var lockerMigrationConfig = MigratorGroup{ ID: "frameless_locker_locks", Steps: []MigratorStep{ MigrationStep{UpQuery: queryCreateLockerTable}, MigrationStep{UpQuery: queryRenameLockerTable}, }, } const queryCreateLockerTable = ` CREATE TABLE IF NOT EXISTS frameless_locker_locks ( name TEXT PRIMARY KEY ); ` const queryRenameLockerTable = ` ALTER TABLE "frameless_locker_locks" RENAME TO "frameless_guard_locks"; CREATE VIEW "frameless_locker_locks" AS SELECT * FROM "frameless_guard_locks"; ` func (l Locker) Migrate(ctx context.Context) error { return Migrator{Connection: l.Connection, Group: lockerMigrationConfig}.Migrate(ctx) } func (l Locker) lookup(ctx context.Context) (*lockerCtxValue, bool) { v, ok := ctx.Value(lockerCtxKey{}).(*lockerCtxValue) return v, ok } type LockerFactory[Key comparable] struct{ Connection Connection } func (lf LockerFactory[Key]) Migrate(ctx context.Context) error { return Locker{Connection: lf.Connection}.Migrate(ctx) } func (lf LockerFactory[Key]) LockerFor(key Key) guard.Locker { return Locker{Name: fmt.Sprintf("%T:%v", key, key), Connection: lf.Connection} }
package logger import ( "context" ) type ctxKeyDetails struct{} type ctxValue struct { Super *ctxValue Details []LoggingDetail } func ContextWith(ctx context.Context, lds ...LoggingDetail) context.Context { if len(lds) == 0 { return ctx } var v ctxValue if prev, ok := lookupValue(ctx); ok { v.Super = prev } v.Details = lds return context.WithValue(ctx, ctxKeyDetails{}, &v) } // getLoggingDetailsFromContext returns the details attached to the context func getLoggingDetailsFromContext(ctx context.Context, l *Logger) []LoggingDetail { if ctx == nil { return nil } var details []LoggingDetail if v, ok := lookupValue(ctx); ok { for { details = append(append([]LoggingDetail{}, v.Details...), details...) // unshift if v.Super == nil { break } v = v.Super } } return details } func lookupValue(ctx context.Context) (*ctxValue, bool) { if ptr, ok := ctx.Value(ctxKeyDetails{}).(*ctxValue); ok { return ptr, true } return nil, false }
package login import ( "errors" "fmt" "time" "github.com/google/uuid" "github.com/jrapoport/gothic/core/tokens" "github.com/jrapoport/gothic/core/users" "github.com/jrapoport/gothic/core/validate" "github.com/jrapoport/gothic/models/types/provider" "github.com/jrapoport/gothic/models/user" "github.com/jrapoport/gothic/store" ) // UserLogin authorizes a user and returns a bearer token func UserLogin(conn *store.Connection, p provider.Name, email, pw string) (*user.User, error) { email, err := validate.Email(email) if err != nil { return nil, err } var u *user.User err = conn.Transaction(func(tx *store.Connection) error { u, err = users.GetUserWithEmail(tx, email) if err != nil { return err } if !u.IsActive() { return errors.New("inactive user") } if u.Provider != p { return errors.New("invalid provider") } if !p.IsExternal() { err = u.Authenticate(pw) if err != nil { err = fmt.Errorf("incorrect password %w", err) return err } } now := time.Now().UTC() u.LoginAt = &now return tx.Model(u).Update("login_at", u.LoginAt).Error }) if err != nil { return nil, err } return u, nil } // UserLogout logs a user out and revokes all their refresh tokens. func UserLogout(conn *store.Connection, userID uuid.UUID) error { return tokens.RevokeAllRefreshTokens(conn, userID) }
package service import ( "encoding/json" "fmt" patternUtils "github.com/layer5io/meshery/models/pattern/utils" "github.com/layer5io/meshkit/models/oam/core/v1alpha1" meshkube "github.com/layer5io/meshkit/utils/kubernetes" v1 "k8s.io/api/core/v1" ) func Deploy(kubeClient *meshkube.Client, oamComp v1alpha1.Component, oamConfig v1alpha1.Configuration, isDel bool) error { svc, err := createSvcFromComp(oamComp) if err != nil { return err } if isDel { return patternUtils.DeleteK8sResource( kubeClient.DynamicKubeClient, "", "v1", "services", svc.Namespace, svc.Name, ) } return patternUtils.CreateK8sResource( kubeClient.DynamicKubeClient, "", "v1", "services", svc, false, ) } func createSvcFromComp(oamComp v1alpha1.Component) (v1.Service, error) { svc := v1.Service{} svc.SetName(oamComp.Name) svc.SetNamespace(oamComp.Namespace) byt, err := json.Marshal(oamComp.Spec.Settings) if err != nil { return svc, fmt.Errorf("failed to construct service from the settings") } if err := json.Unmarshal(byt, &svc.Spec); err != nil { return svc, fmt.Errorf("failed to construct service from the settings") } return svc, nil }
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // package store import ( "testing" "time" "github.com/mattermost/mattermost-cloud/internal/testlib" "github.com/mattermost/mattermost-cloud/model" "github.com/pborman/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestIsInstallationBackupRunning(t *testing.T) { logger := testlib.MakeLogger(t) sqlStore := MakeTestSQLStore(t, logger) defer CloseConnection(t, sqlStore) installation := setupStableInstallation(t, sqlStore) running, err := sqlStore.IsInstallationBackupRunning(installation.ID) require.NoError(t, err) require.False(t, running) backup := &model.InstallationBackup{ InstallationID: installation.ID, State: model.InstallationBackupStateBackupRequested, } err = sqlStore.CreateInstallationBackup(backup) require.NoError(t, err) running, err = sqlStore.IsInstallationBackupRunning(installation.ID) require.NoError(t, err) require.True(t, running) } func TestIsInstallationBackupBeingUsed(t *testing.T) { logger := testlib.MakeLogger(t) sqlStore := MakeTestSQLStore(t, logger) defer CloseConnection(t, sqlStore) installation := setupStableInstallation(t, sqlStore) // Create restoration and migration operations not associated with backup. notConnectedRestoration := &model.InstallationDBRestorationOperation{ InstallationID: installation.ID, State: model.InstallationStateDBRestorationInProgress, } err := sqlStore.CreateInstallationDBRestorationOperation(notConnectedRestoration) require.NoError(t, err) notConnectedMigration := &model.InstallationDBMigrationOperation{ InstallationID: installation.ID, State: model.InstallationStateDBRestorationInProgress, } err = sqlStore.CreateInstallationDBMigrationOperation(notConnectedMigration) require.NoError(t, err) backup := &model.InstallationBackup{ InstallationID: installation.ID, State: model.InstallationBackupStateBackupRequested, } err = sqlStore.CreateInstallationBackup(backup) require.NoError(t, err) isUsed, err := sqlStore.IsInstallationBackupBeingUsed(backup.ID) require.NoError(t, err) require.False(t, isUsed) // Restoration in progress. restorationOp := &model.InstallationDBRestorationOperation{ InstallationID: installation.ID, BackupID: backup.ID, State: model.InstallationDBRestorationStateRequested, } err = sqlStore.CreateInstallationDBRestorationOperation(restorationOp) require.NoError(t, err) isUsed, err = sqlStore.IsInstallationBackupBeingUsed(backup.ID) require.NoError(t, err) require.True(t, isUsed) restorationOp.State = model.InstallationDBRestorationStateSucceeded err = sqlStore.UpdateInstallationDBRestorationOperation(restorationOp) require.NoError(t, err) isUsed, err = sqlStore.IsInstallationBackupBeingUsed(backup.ID) require.NoError(t, err) require.False(t, isUsed) // Migration in progress. migrationOp := &model.InstallationDBMigrationOperation{ InstallationID: installation.ID, BackupID: backup.ID, State: model.InstallationDBMigrationStateRefreshSecrets, } err = sqlStore.CreateInstallationDBMigrationOperation(migrationOp) require.NoError(t, err) isUsed, err = sqlStore.IsInstallationBackupBeingUsed(backup.ID) require.NoError(t, err) require.True(t, isUsed) migrationOp.State = model.InstallationDBMigrationStateFailed err = sqlStore.UpdateInstallationDBMigrationOperation(migrationOp) require.NoError(t, err) isUsed, err = sqlStore.IsInstallationBackupBeingUsed(backup.ID) require.NoError(t, err) require.False(t, isUsed) } func TestCreateInstallationBackup(t *testing.T) { logger := testlib.MakeLogger(t) sqlStore := MakeTestSQLStore(t, logger) defer CloseConnection(t, sqlStore) installation := setupStableInstallation(t, sqlStore) backup := &model.InstallationBackup{ InstallationID: installation.ID, State: model.InstallationBackupStateBackupRequested, BackedUpDatabaseType: model.InstallationDatabaseMultiTenantRDSPostgres, } err := sqlStore.CreateInstallationBackup(backup) require.NoError(t, err) assert.NotEmpty(t, backup.ID) } func TestGetInstallationBackup(t *testing.T) { logger := testlib.MakeLogger(t) sqlStore := MakeTestSQLStore(t, logger) defer CloseConnection(t, sqlStore) installation1 := setupStableInstallation(t, sqlStore) installation2 := setupStableInstallation(t, sqlStore) backup1 := &model.InstallationBackup{ InstallationID: installation1.ID, State: model.InstallationBackupStateBackupRequested, } err := sqlStore.CreateInstallationBackup(backup1) require.NoError(t, err) backup2 := &model.InstallationBackup{ InstallationID: installation2.ID, State: model.InstallationBackupStateBackupRequested, } err = sqlStore.CreateInstallationBackup(backup2) require.NoError(t, err) fetchedMeta, err := sqlStore.GetInstallationBackup(backup1.ID) require.NoError(t, err) assert.Equal(t, backup1, fetchedMeta) t.Run("backup not found", func(t *testing.T) { fetchedMeta, err = sqlStore.GetInstallationBackup("non-existent") require.NoError(t, err) assert.Nil(t, fetchedMeta) }) } func TestGetInstallationBackupsMetadata(t *testing.T) { logger := testlib.MakeLogger(t) sqlStore := MakeTestSQLStore(t, logger) defer CloseConnection(t, sqlStore) installation1 := setupStableInstallation(t, sqlStore) installation2 := setupStableInstallation(t, sqlStore) clusterInstallation := &model.ClusterInstallation{ InstallationID: installation1.ID, } err := sqlStore.CreateClusterInstallation(clusterInstallation) require.NoError(t, err) backupsMeta := []*model.InstallationBackup{ {InstallationID: installation1.ID, State: model.InstallationBackupStateBackupRequested, ClusterInstallationID: clusterInstallation.ID}, {InstallationID: installation1.ID, State: model.InstallationBackupStateBackupInProgress, ClusterInstallationID: clusterInstallation.ID}, {InstallationID: installation1.ID, State: model.InstallationBackupStateBackupFailed}, {InstallationID: installation2.ID, State: model.InstallationBackupStateBackupRequested}, {InstallationID: installation2.ID, State: model.InstallationBackupStateBackupInProgress}, } for i := range backupsMeta { err = sqlStore.CreateInstallationBackup(backupsMeta[i]) require.NoError(t, err) time.Sleep(1 * time.Millisecond) // Ensure RequestAt is different for all installations. } err = sqlStore.DeleteInstallationBackup(backupsMeta[2].ID) require.NoError(t, err) for _, testCase := range []struct { description string filter *model.InstallationBackupFilter fetchedIds []string }{ { description: "fetch all not deleted", filter: &model.InstallationBackupFilter{Paging: model.AllPagesNotDeleted()}, fetchedIds: []string{backupsMeta[4].ID, backupsMeta[3].ID, backupsMeta[1].ID, backupsMeta[0].ID}, }, { description: "fetch all for installation 1", filter: &model.InstallationBackupFilter{InstallationID: installation1.ID, Paging: model.AllPagesWithDeleted()}, fetchedIds: []string{backupsMeta[2].ID, backupsMeta[1].ID, backupsMeta[0].ID}, }, { description: "fetch all for cluster installation ", filter: &model.InstallationBackupFilter{ClusterInstallationID: clusterInstallation.ID, Paging: model.AllPagesNotDeleted()}, fetchedIds: []string{backupsMeta[1].ID, backupsMeta[0].ID}, }, { description: "fetch for installation 1 without deleted", filter: &model.InstallationBackupFilter{InstallationID: installation1.ID, Paging: model.AllPagesNotDeleted()}, fetchedIds: []string{backupsMeta[1].ID, backupsMeta[0].ID}, }, { description: "fetch requested installations", filter: &model.InstallationBackupFilter{States: []model.InstallationBackupState{model.InstallationBackupStateBackupRequested}, Paging: model.AllPagesNotDeleted()}, fetchedIds: []string{backupsMeta[3].ID, backupsMeta[0].ID}, }, { description: "fetch with IDs", filter: &model.InstallationBackupFilter{IDs: []string{backupsMeta[0].ID, backupsMeta[3].ID, backupsMeta[4].ID}, Paging: model.AllPagesNotDeleted()}, fetchedIds: []string{backupsMeta[4].ID, backupsMeta[3].ID, backupsMeta[0].ID}, }, } { t.Run(testCase.description, func(t *testing.T) { fetchedBackups, err := sqlStore.GetInstallationBackups(testCase.filter) require.NoError(t, err) assert.Equal(t, len(testCase.fetchedIds), len(fetchedBackups)) for i, b := range fetchedBackups { assert.Equal(t, testCase.fetchedIds[i], b.ID) } }) } } func TestGetUnlockedInstallationBackupPendingWork(t *testing.T) { logger := testlib.MakeLogger(t) sqlStore := MakeTestSQLStore(t, logger) defer CloseConnection(t, sqlStore) installation := setupStableInstallation(t, sqlStore) backup1 := &model.InstallationBackup{ InstallationID: installation.ID, State: model.InstallationBackupStateBackupRequested, } err := sqlStore.CreateInstallationBackup(backup1) require.NoError(t, err) assert.NotEmpty(t, backup1.ID) backup2 := &model.InstallationBackup{ InstallationID: installation.ID, State: model.InstallationBackupStateBackupSucceeded, } err = sqlStore.CreateInstallationBackup(backup2) require.NoError(t, err) assert.NotEmpty(t, backup1.ID) backupsMeta, err := sqlStore.GetUnlockedInstallationBackupPendingWork() require.NoError(t, err) assert.Equal(t, 1, len(backupsMeta)) assert.Equal(t, backup1.ID, backupsMeta[0].ID) locaked, err := sqlStore.LockInstallationBackup(backup1.ID, "abc") require.NoError(t, err) assert.True(t, locaked) backupsMeta, err = sqlStore.GetUnlockedInstallationBackupPendingWork() require.NoError(t, err) assert.Equal(t, 0, len(backupsMeta)) } func TestUpdateInstallationBackup(t *testing.T) { logger := testlib.MakeLogger(t) sqlStore := MakeTestSQLStore(t, logger) defer CloseConnection(t, sqlStore) installation := setupStableInstallation(t, sqlStore) backup := &model.InstallationBackup{ InstallationID: installation.ID, State: model.InstallationBackupStateBackupRequested, } err := sqlStore.CreateInstallationBackup(backup) require.NoError(t, err) assert.NotEmpty(t, backup.ID) t.Run("update state only", func(t *testing.T) { backup.State = model.InstallationBackupStateBackupSucceeded backup.StartAt = -1 errTest := sqlStore.UpdateInstallationBackupState(backup) require.NoError(t, errTest) fetched, errTest := sqlStore.GetInstallationBackup(backup.ID) require.NoError(t, errTest) assert.Equal(t, model.InstallationBackupStateBackupSucceeded, fetched.State) assert.Equal(t, int64(0), fetched.StartAt) // Assert start time not updated assert.Equal(t, "", fetched.ClusterInstallationID) // Assert CI ID not updated }) t.Run("update data residency only", func(t *testing.T) { updatedResidence := &model.S3DataResidence{URL: "s3.amazon.com"} clusterInstallationID := "cluster-installation-1" backup.StartAt = -1 backup.DataResidence = updatedResidence backup.ClusterInstallationID = clusterInstallationID errTest := sqlStore.UpdateInstallationBackupSchedulingData(backup) require.NoError(t, errTest) fetched, errTest := sqlStore.GetInstallationBackup(backup.ID) require.NoError(t, errTest) assert.Equal(t, updatedResidence, fetched.DataResidence) assert.Equal(t, clusterInstallationID, fetched.ClusterInstallationID) assert.Equal(t, int64(0), fetched.StartAt) // Assert start time not updated }) t.Run("update start time", func(t *testing.T) { var startTime int64 = 10000 originalCIId := backup.ClusterInstallationID backup.StartAt = startTime backup.ClusterInstallationID = "modified-ci-id" errTest := sqlStore.UpdateInstallationBackupStartTime(backup) require.NoError(t, errTest) fetched, errTest := sqlStore.GetInstallationBackup(backup.ID) require.NoError(t, errTest) assert.Equal(t, startTime, fetched.StartAt) assert.Equal(t, originalCIId, fetched.ClusterInstallationID) // Assert ClusterInstallationID not updated }) } func setupStableInstallation(t *testing.T, sqlStore *SQLStore) *model.Installation { return setupInstallation(t, sqlStore, model.InstallationStateStable) } func setupHibernatingInstallation(t *testing.T, sqlStore *SQLStore) *model.Installation { return setupInstallation(t, sqlStore, model.InstallationStateHibernating) } func setupInstallation(t *testing.T, sqlStore *SQLStore, state string) *model.Installation { installation := &model.Installation{ Name: uuid.New()[:5], State: state, } createAndCheckInstallation(t, sqlStore, installation) return installation }
/***** Partie commande.go : liste des commandes et traitement en fonction *****/ package serveurchat import ( "./../db" // importation du package db contenant la structure Message notamment "fmt" "runtime" "strings" "time" ) /*** Fonction qui permet d'obtenir le résultat de la commande "/info" ***/ func getInformations() string { var resultat string fmt.Print("Le serveur tourne sous ") switch os := runtime.GOOS; os { case "darwin": resultat = "OS X" case "linux": resultat = "Linux" default: resultat = os } resultat += " avec une architecture " + runtime.GOARCH + " et la version de Go : " + runtime.Version() + "." return resultat } /*** Fonction principale qui en fonction de la commande en paramètre effectue le traitement correspondant ***/ func commande(cmd db.Message, s *Serveur) (string, error) { var resultat string var err error //On casse la chaine en un tableau de string elems := strings.Split(cmd.Contenu, " ") //On regarde le premier élément qui est censé être le nom de la commande et on traitre en fonction switch elems[0] { case "/time": t := time.Now() resultat = t.Format("Monday _2 January 2006 - 15:04") case "/list": resultat = "</br>Liste des commandes :" resultat = resultat + "</br>/time : Donne la date et l'heure courante </br>/list : Liste les commandes disponibles </br>/info : Affiche des informations sur le serveur</br>/names : Affiche la liste des personnes connectees </br>/nbclients : Affiche le nombre de clients connectes actuellement </br>/allUsers : Affiche l'historique des utilisateurs du chat </br>/msg &#39pseudo&#39 &#39contenu&#39 : Envoie un message privé &#39contenu&#39 à l'utilisateur &#39pseudo&#39" case "/info": resultat = getInformations() case "/nbclients": resultat = fmt.Sprintf("Nombre de clients connectes : %d", len(s.ClientActif)) case "/names": resultat = "</br>Liste des clients connectes : </br>" for cl, _ := range s.ClientActif { resultat = resultat + cl + "</br>" } case "/join": if cmd.Type == "salon" { //Si l'on veut rejoindre un salon //On vérifie l'existance du salon var salonExistant bool = false for i := 0; i < len(s.Salons); i++ { if elems[1] == s.Salons[i] { salonExistant = true break } } // On stocke le nom du salon ou etait connecte le client nomSalon := cmd.Salon // On change le salon auquel l'utilisateur appartient s.ClientActif[cmd.Createur].Salon = elems[1] // On verifie si le salon ou etait le client n'est pas vide if nomSalon != "general" { s.verificationSalonUtilise(nomSalon, cmd) } //Si le salon existe alors on envoie l'historique des messages du salon, sinon on ajoute le nouveau salon et on envoie la nouvelle liste de salons à l'ensemble des utilisateurs if salonExistant { err = s.envoiHistoriqueMessagesSalon(cmd, elems[1]) } else { s.Salons = append(s.Salons, elems[1]) err = s.envoiListeSalons(cmd) } } case "/allUsers": allUsersEver := s.getHistoriqueUtilisateurs() contentAllUsersEverSplitted := strings.Split(allUsersEver, " ") for i := 0; i < len(contentAllUsersEverSplitted); i++ { resultat += contentAllUsersEverSplitted[i] + ", " } default: resultat = "Commande incorrecte. Taper /list pour voir les commandes disponibles." } return resultat, err }
package tx import ( "bytes" "encoding/hex" "math/big" "os" "strings" "testing" "github.com/VIVelev/btcd/crypto/ecdsa" "github.com/VIVelev/btcd/crypto/elliptic" "github.com/VIVelev/btcd/script" ) var ( tx Tx txBytes []byte txBip143 Tx ) func TestMain(m *testing.M) { txBytes, _ = hex.DecodeString("0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600") tx = Tx{} tx.Unmarshal(bytes.NewReader(txBytes)) txBip143 = Tx{} txBip143.Unmarshal(hex.NewDecoder(strings.NewReader("0100000002fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f0000000000eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac11000000"))) os.Exit(m.Run()) } func TestUnmarshalVersion(t *testing.T) { if tx.Version != 1 { t.Errorf("FAIL") } } func TestUnmarshalInputs(t *testing.T) { if len(tx.TxIns) != 1 { t.Errorf("FAIL") } want, _ := hex.DecodeString("d1c789a9c60383bf715f3f6ad9d14b91fe55f3deb369fe5d9280cb1a01793f81") if !bytes.Equal(tx.TxIns[0].PrevTxId[:], want) { t.Errorf("FAIL") } if tx.TxIns[0].PrevIndex != 0 { t.Errorf("FAIL") } want, _ = hex.DecodeString("6b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278a") b, _ := tx.TxIns[0].ScriptSig.Marshal() if !bytes.Equal(b, want) { t.Errorf("FAIL") } if tx.TxIns[0].Sequence != 0xfffffffe { t.Errorf("FAIL") } } func TestUnmarshalOutputs(t *testing.T) { if len(tx.TxOuts) != 2 { t.Errorf("FAIL") } if tx.TxOuts[0].Amount != 32454049 { t.Errorf("FAIL") } want, _ := hex.DecodeString("1976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac") b, _ := tx.TxOuts[0].ScriptPubKey.Marshal() if !bytes.Equal(b, want) { t.Errorf("FAIL") } if tx.TxOuts[1].Amount != 10011545 { t.Errorf("FAIL") } want, _ = hex.DecodeString("1976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac") b, _ = tx.TxOuts[1].ScriptPubKey.Marshal() if !bytes.Equal(b, want) { t.Errorf("FAIL") } } func TestUnmarshalLocktime(t *testing.T) { if tx.LockTime != 410393 { t.Errorf("FAIL") } } func TestMarshal(t *testing.T) { b, _ := tx.Marshal() if !bytes.Equal(b, txBytes) { t.Errorf("FAIL") } } func TestInputValue(t *testing.T) { b, _ := hex.DecodeString("d1c789a9c60383bf715f3f6ad9d14b91fe55f3deb369fe5d9280cb1a01793f81") in := TxIn{} copy(in.PrevTxId[:], b) in.PrevIndex = 0 val, err := in.Value() if err != nil { t.Error(err) } if val != 42505594 { t.Errorf("FAIL") } } func TestInputScriptPubKey(t *testing.T) { b, _ := hex.DecodeString("d1c789a9c60383bf715f3f6ad9d14b91fe55f3deb369fe5d9280cb1a01793f81") in := TxIn{} copy(in.PrevTxId[:], b) in.PrevIndex = 0 spk, err := in.ScriptPubKey() if err != nil { t.Error(err) } want, _ := hex.DecodeString("1976a914a802fc56c704ce87c42d7c92eb75e7896bdc41ae88ac") b, _ = spk.Marshal() if !bytes.Equal(b, want) { t.Errorf("FAIL") } } func TestFee(t *testing.T) { if fee, _ := tx.Fee(); fee != 40000 { t.Errorf("FAIL") } newTx := Tx{} newTx.Unmarshal(hex.NewDecoder(strings.NewReader("010000000456919960ac691763688d3d3bcea9ad6ecaf875df5339e148a1fc61c6ed7a069e010000006a47304402204585bcdef85e6b1c6af5c2669d4830ff86e42dd205c0e089bc2a821657e951c002201024a10366077f87d6bce1f7100ad8cfa8a064b39d4e8fe4ea13a7b71aa8180f012102f0da57e85eec2934a82a585ea337ce2f4998b50ae699dd79f5880e253dafafb7feffffffeb8f51f4038dc17e6313cf831d4f02281c2a468bde0fafd37f1bf882729e7fd3000000006a47304402207899531a52d59a6de200179928ca900254a36b8dff8bb75f5f5d71b1cdc26125022008b422690b8461cb52c3cc30330b23d574351872b7c361e9aae3649071c1a7160121035d5c93d9ac96881f19ba1f686f15f009ded7c62efe85a872e6a19b43c15a2937feffffff567bf40595119d1bb8a3037c356efd56170b64cbcc160fb028fa10704b45d775000000006a47304402204c7c7818424c7f7911da6cddc59655a70af1cb5eaf17c69dadbfc74ffa0b662f02207599e08bc8023693ad4e9527dc42c34210f7a7d1d1ddfc8492b654a11e7620a0012102158b46fbdff65d0172b7989aec8850aa0dae49abfb84c81ae6e5b251a58ace5cfeffffffd63a5e6c16e620f86f375925b21cabaf736c779f88fd04dcad51d26690f7f345010000006a47304402200633ea0d3314bea0d95b3cd8dadb2ef79ea8331ffe1e61f762c0f6daea0fabde022029f23b3e9c30f080446150b23852028751635dcee2be669c2a1686a4b5edf304012103ffd6f4a67e94aba353a00882e563ff2722eb4cff0ad6006e86ee20dfe7520d55feffffff0251430f00000000001976a914ab0c0b2e98b1ab6dbf67d4750b0a56244948a87988ac005a6202000000001976a9143c82d7df364eb6c75be8c80df2b3eda8db57397088ac46430600"))) if fee, _ := newTx.Fee(); fee != 140500 { t.Errorf("FAIL") } } func TestSighashLegacy(t *testing.T) { newTx, err := Fetch( "452c629d67e41baec3ac6f04fe744b4b9617f8f859c63b3002f8684e7a4fee03", false, false, ) if err != nil { t.Error(err) } want, _ := hex.DecodeString("27e0c5994dec7824e56dec6b2fcb342eb7cdb0d0957c2fce9882f715e85d81a6") b, _ := newTx.SighashLegacy(0) if !bytes.Equal(b[:], want) { t.Errorf("FAIL") } } func TestHashPrevouts(t *testing.T) { want, _ := hex.DecodeString("96b827c8483d4e9b96712b6713a7b68d6e8003a781feba36c31143470b4efd37") h := txBip143.hashPrevouts() if !bytes.Equal(h[:], want) { t.Errorf("FAIL") } } func TestHashSequence(t *testing.T) { want, _ := hex.DecodeString("52b0a642eea2fb7ae638c36f6252b6750293dbe574a806984b8e4d8548339a3b") h := txBip143.hashSequence() if !bytes.Equal(h[:], want) { t.Errorf("FAIL") } } func TestHashOutputs(t *testing.T) { want, _ := hex.DecodeString("863ef3e1a92afbfdb97f31ad0fc7683ee943e9abcf2501590ff8f6551f47e5e5") h, err := txBip143.hashOutputs() if err != nil { t.Error(err) } if !bytes.Equal(h[:], want) { t.Errorf("FAIL") } } func TestSighashBip143(t *testing.T) { spk := new(script.Script).Unmarshal(hex.NewDecoder(strings.NewReader("1600141d0f172a0ecb48aee1be1f2687d2963ae33f71a1"))) h, err := txBip143.SighashBip143(1, *spk, uint64(600000000)) if err != nil { t.Error(err) } want, _ := hex.DecodeString("c37af31116d1b27caf68aae9e3ac82f1477929014d5b917657d0eb49478cb670") if !bytes.Equal(h[:], want) { t.Errorf("FAIL") } } func TestVerifyP2PKH(t *testing.T) { newTx, err := Fetch( "452c629d67e41baec3ac6f04fe744b4b9617f8f859c63b3002f8684e7a4fee03", false, false, ) if err != nil { t.Error(err) } if ok, _ := newTx.Verify(); !ok { t.Errorf("FAIL") } newTx, err = Fetch( "5418099cc755cb9dd3ebc6cf1a7888ad53a1a3beb5a025bce89eb1bf7f1650a2", true, false, ) if err != nil { t.Error(err) } if ok, _ := newTx.Verify(); !ok { t.Errorf("FAIL") } } func TestVerifyP2WPKH(t *testing.T) { newTx, err := Fetch( "d869f854e1f8788bcff294cc83b280942a8c728de71eb709a2c29d10bfe21b7c", true, false, ) if err != nil { t.Error(err) } if ok, _ := newTx.Verify(); !ok { t.Errorf("FAIL") } } func TestSignInput(t *testing.T) { priv := ecdsa.GenerateKeyFromSecret(elliptic.Secp256k1, new(big.Int).SetUint64(8675309)) newTx := Tx{} newTx.TestNet = true newTx.Unmarshal(hex.NewDecoder(strings.NewReader("010000000199a24308080ab26e6fb65c4eccfadf76749bb5bfa8cb08f291320b3c21e56f0d0d00000000ffffffff02408af701000000001976a914d52ad7ca9b3d096a38e752c2018e6fbc40cdf26f88ac80969800000000001976a914507b27411ccf7f16f10297de6cef3f291623eddf88ac00000000"))) if ok, _ := newTx.SignInput(0, priv); !ok { t.Errorf("FAIL") } want, _ := hex.DecodeString("010000000199a24308080ab26e6fb65c4eccfadf76749bb5bfa8cb08f291320b3c21e56f0d0d0000006b4830450221008ed46aa2cf12d6d81065bfabe903670165b538f65ee9a3385e6327d80c66d3b502203124f804410527497329ec4715e18558082d489b218677bd029e7fa306a72236012103935581e52c354cd2f484fe8ed83af7a3097005b2f9c60bff71d35bd795f54b67ffffffff02408af701000000001976a914d52ad7ca9b3d096a38e752c2018e6fbc40cdf26f88ac80969800000000001976a914507b27411ccf7f16f10297de6cef3f291623eddf88ac00000000") b, _ := newTx.Marshal() if !bytes.Equal(b, want) { t.Errorf("FAIL") } }
package connection import ( "errors" "fmt" "github.com/rbroggi/tlayer/acknowledge" "github.com/rbroggi/tlayer/pkg" "time" ) //NewMockConnection returns a new mockConnection with // a receiver binded and in listening state func NewMockConnection(conParam ConParam) Connection { c := &mockConnection{ segmentTopic: make(chan *pkg.Pkg), ackTopic: make(chan acknowledge.Acknowledge), } //create receiver and start listening in separate routine rec := NewDumpReceiver(conParam) go rec.Listen(c) return c } type Connection interface { // To be used by sender SendData(data *pkg.Pkg) // Waits for timeout to receive an acknowledge else return error ReceiveAck(timeout time.Duration) (*acknowledge.Acknowledge, error) // To be used by receiver // Blocking method Receive() *pkg.Pkg SendAck(ack acknowledge.Acknowledge) Close() } type mockConnection struct { segmentTopic chan *pkg.Pkg ackTopic chan acknowledge.Acknowledge } func (c *mockConnection) SendData(data *pkg.Pkg) { c.segmentTopic <- data } func (c *mockConnection) ReceiveAck(timeout time.Duration) (*acknowledge.Acknowledge, error) { select { case ret := <-c.ackTopic: return &ret, nil case <-time.After(timeout): return nil, errors.New("no acknowledge within timeout, retransmitting") } } func (c *mockConnection) Receive() *pkg.Pkg { return <-c.segmentTopic } func (c *mockConnection) SendAck(ack acknowledge.Acknowledge) { c.ackTopic <- ack } func (c *mockConnection) Close() { closingAck := acknowledge.Acknowledge{ SegIDX: -1, } c.ackTopic <- closingAck } type ConParam struct { // #segments by window WinSize int32 // #bytes by segment SegSize int32 // #total segments SegNum int32 // retransmission timeout RetransmissionTimeout time.Duration } type Receiver interface { // # Blocking method Listen(conn Connection) } func NewDumpReceiver(conParam ConParam) Receiver { return &receiver{ data: make([]byte, conParam.SegNum*conParam.SegSize), segSize: conParam.SegSize, totalSegNum: conParam.SegNum, windowSize: conParam.WinSize, } } type receiver struct { data []byte segSize int32 totalSegNum int32 windowSize int32 } func (r *receiver) Listen(conn Connection) { fmt.Printf("Receiver: starting listening\n") var lastReceivedSegIDX int32 = 0 //Listen to segment channel for incomming msg for lastReceivedSegIDX < r.totalSegNum { var windowIDX int32 = 0 for (windowIDX < r.windowSize) && (lastReceivedSegIDX < r.totalSegNum) { //Read pkg from connection segment := conn.Receive() windowIDX++ lastReceivedSegIDX++ fmt.Printf("Receiver: Received segment number %v, window idx %v\n", segment.SegNum, windowIDX) //Trascribe segment to destination startByteIDX := segment.SegNum * r.segSize for i := 0; i < len(segment.Data); i++ { r.data[startByteIDX+int32(i)] = segment.Data[i] } //Increment the windowSize } //Send acknowledge into ack connection segment with next expected segmentIDX ack := acknowledge.Acknowledge{ SegIDX: lastReceivedSegIDX, } fmt.Printf("Receiver: sending acknowledge %v\n", ack.SegIDX) conn.SendAck(ack) } // complete transmission conn.SendAck(acknowledge.ClosingAck) fmt.Printf("Received str: \n") fmt.Printf("%v\n", string(r.data)) }
package main import "github.com/bjatkin/golf-engine/golf" // conversation with joe gopher var gopherConvo *convo func initGopher() { for x := 0; x < 128; x++ { for y := 0; y < 128; y++ { if g.Mget(x, y) == 201 { // The gopher sprite // erase the gopher tiles g.Mset(x, y, 0) g.Mset(x+1, y, 0) g.Mset(x, y+1, 0) g.Mset(x+1, y+1, 0) // replace it the gopher character newMob(float64(x*8), float64(y*8), []drawable{ &ani{ //Waiting frames: []int{201, 203}, speed: 0.05, o: golf.SOp{W: 2, H: 2}, }, &ani{ //Talking frames: []int{137, 201}, speed: 0.05, o: golf.SOp{W: 2, H: 2}, }, }) break } } } }
package folder3 import ( "fmt" "github.com/hyperledger/fabric/core/chaincode/shim" pb "github.com/hyperledger/fabric/protos/peer" ) // F3Chaincode definition type F3Chaincode struct { } // F3Method1 returns a successful message from the current method func (t *F3Chaincode) F3Method1(stub shim.ChaincodeStubInterface, args []string) pb.Response { message := fmt.Sprintf("F3Method1 - chaincode method called successfully") fmt.Println(message) return shim.Success([]byte(message)) }
package wire import ( "errors" "strings" "testing" "github.com/stretchr/testify/require" ) // mockFIIntermediaryFIAdvice creates a FIIntermediaryFIAdvice func mockFIIntermediaryFIAdvice() *FIIntermediaryFIAdvice { fiifia := NewFIIntermediaryFIAdvice() fiifia.Advice.AdviceCode = AdviceCodeLetter fiifia.Advice.LineOne = "Line One" fiifia.Advice.LineTwo = "Line Two" fiifia.Advice.LineThree = "Line Three" fiifia.Advice.LineFour = "Line Four" fiifia.Advice.LineFive = "Line Five" fiifia.Advice.LineSix = "Line Six" return fiifia } // TestMockFIIntermediaryFIAdvice validates mockFIIntermediaryFIAdvice func TestMockFIIntermediaryFIAdvice(t *testing.T) { fiifia := mockFIIntermediaryFIAdvice() require.NoError(t, fiifia.Validate(), "mockFIIntermediaryFIAdvice does not validate and will break other tests") } // TestFIIntermediaryFIAdviceAdviceCodeValid validates FIIntermediaryFIAdvice AdviceCode is alphanumeric func TestFIIntermediaryFIAdviceAdviceCodeValid(t *testing.T) { fiifia := mockFIIntermediaryFIAdvice() fiifia.Advice.AdviceCode = "Z" err := fiifia.Validate() require.EqualError(t, err, fieldError("AdviceCode", ErrAdviceCode, fiifia.Advice.AdviceCode).Error()) } // TestFIIntermediaryFIAdviceLineOneAlphaNumeric validates FIIntermediaryFIAdvice LineOne is alphanumeric func TestFIIntermediaryFIAdviceLineOneAlphaNumeric(t *testing.T) { fiifia := mockFIIntermediaryFIAdvice() fiifia.Advice.LineOne = "®" err := fiifia.Validate() require.EqualError(t, err, fieldError("LineOne", ErrNonAlphanumeric, fiifia.Advice.LineOne).Error()) } // TestFIIntermediaryFIAdviceLineTwoAlphaNumeric validates FIIntermediaryFIAdvice LineTwo is alphanumeric func TestFIIntermediaryFIAdviceLineTwoAlphaNumeric(t *testing.T) { fiifia := mockFIIntermediaryFIAdvice() fiifia.Advice.LineTwo = "®" err := fiifia.Validate() require.EqualError(t, err, fieldError("LineTwo", ErrNonAlphanumeric, fiifia.Advice.LineTwo).Error()) } // TestFIIntermediaryFIAdviceLineThreeAlphaNumeric validates FIIntermediaryFIAdvice LineThree is alphanumeric func TestFIIntermediaryFIAdviceLineThreeAlphaNumeric(t *testing.T) { fiifia := mockFIIntermediaryFIAdvice() fiifia.Advice.LineThree = "®" err := fiifia.Validate() require.EqualError(t, err, fieldError("LineThree", ErrNonAlphanumeric, fiifia.Advice.LineThree).Error()) } // TestFIIntermediaryFIAdviceLineFourAlphaNumeric validates FIIntermediaryFIAdvice LineFour is alphanumeric func TestFIIntermediaryFIAdviceLineFourAlphaNumeric(t *testing.T) { fiifia := mockFIIntermediaryFIAdvice() fiifia.Advice.LineFour = "®" err := fiifia.Validate() require.EqualError(t, err, fieldError("LineFour", ErrNonAlphanumeric, fiifia.Advice.LineFour).Error()) } // TestFIIntermediaryFIAdviceLineFiveAlphaNumeric validates FIIntermediaryFIAdvice LineFive is alphanumeric func TestFIIntermediaryFIAdviceLineFiveAlphaNumeric(t *testing.T) { fiifia := mockFIIntermediaryFIAdvice() fiifia.Advice.LineFive = "®" err := fiifia.Validate() require.EqualError(t, err, fieldError("LineFive", ErrNonAlphanumeric, fiifia.Advice.LineFive).Error()) } // TestFIIntermediaryFIAdviceLineSixAlphaNumeric validates FIIntermediaryFIAdvice LineSix is alphanumeric func TestFIIntermediaryFIAdviceLineSixAlphaNumeric(t *testing.T) { fiifia := mockFIIntermediaryFIAdvice() fiifia.Advice.LineSix = "®" err := fiifia.Validate() require.EqualError(t, err, fieldError("LineSix", ErrNonAlphanumeric, fiifia.Advice.LineSix).Error()) } // TestParseFIIntermediaryFIAdviceWrongLength parses a wrong FIIntermediaryFIAdvice record length func TestParseFIIntermediaryFIAdviceWrongLength(t *testing.T) { var line = "{6210}LTRLine One Line Two Line Three Line Four Line Five Line Six " r := NewReader(strings.NewReader(line)) r.line = line err := r.parseFIIntermediaryFIAdvice() require.EqualError(t, err, r.parseError(fieldError("LineSix", ErrValidLength)).Error()) } // TestParseFIIntermediaryFIAdviceReaderParseError parses a wrong FIIntermediaryFIAdvice reader parse error func TestParseFIIntermediaryFIAdviceReaderParseError(t *testing.T) { var line = "{6210}LTRLine ®ne Line Two Line Three Line Four Line Five Line Six " r := NewReader(strings.NewReader(line)) r.line = line err := r.parseFIIntermediaryFIAdvice() expected := r.parseError(fieldError("LineOne", ErrNonAlphanumeric, "Line ®ne")).Error() require.EqualError(t, err, expected) _, err = r.Read() expected = r.parseError(fieldError("LineOne", ErrNonAlphanumeric, "Line ®ne")).Error() require.EqualError(t, err, expected) } // TestFIIntermediaryFIAdviceTagError validates a FIIntermediaryFIAdvice tag func TestFIIntermediaryFIAdviceTagError(t *testing.T) { fiifia := mockFIIntermediaryFI() fiifia.tag = "{9999}" err := fiifia.Validate() require.EqualError(t, err, fieldError("tag", ErrValidTagForType, fiifia.tag).Error()) } // TestStringFIIntermediaryFIAdviceVariableLength parses using variable length func TestStringFIIntermediaryFIAdviceVariableLength(t *testing.T) { var line = "{6210}HLD" r := NewReader(strings.NewReader(line)) r.line = line err := r.parseFIIntermediaryFIAdvice() require.Nil(t, err) line = "{6210}HLD NNN" r = NewReader(strings.NewReader(line)) r.line = line err = r.parseFIIntermediaryFIAdvice() require.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(""))).Error()) line = "{6210}HLD********" r = NewReader(strings.NewReader(line)) r.line = line err = r.parseFIIntermediaryFIAdvice() require.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(""))).Error()) line = "{6210}HLD*" r = NewReader(strings.NewReader(line)) r.line = line err = r.parseFIIntermediaryFIAdvice() require.Equal(t, err, nil) } // TestStringFIIntermediaryFIAdviceOptions validates Format() formatted according to the FormatOptions func TestStringFIIntermediaryFIAdviceOptions(t *testing.T) { var line = "{6210}HLD*" r := NewReader(strings.NewReader(line)) r.line = line err := r.parseFIIntermediaryFIAdvice() require.Equal(t, err, nil) record := r.currentFEDWireMessage.FIIntermediaryFIAdvice require.Equal(t, record.String(), "{6210}HLD ") require.Equal(t, record.Format(FormatOptions{VariableLengthFields: true}), "{6210}HLD*") require.Equal(t, record.String(), record.Format(FormatOptions{VariableLengthFields: false})) }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package webcodecs import ( "context" "fmt" "io" "math" "net/http" "os" "path/filepath" "chromiumos/tast/common/perf" "chromiumos/tast/errors" "chromiumos/tast/local/chrome/ash" "chromiumos/tast/local/chrome/browser" "chromiumos/tast/local/coords" "chromiumos/tast/local/media/devtools" "chromiumos/tast/local/media/encoding" "chromiumos/tast/local/media/videotype" "chromiumos/tast/testing" ) // TestEncodeArgs is the arguments used in RunEncodeTest. type TestEncodeArgs struct { // Codec is the codec of a bitstream produced by an encoder. Codec videotype.Codec // ScalabilityMode is a "scalabilityMode" identifier. // https://www.w3.org/TR/webrtc-svc/#scalabilitymodes ScalabilityMode string // BitrateMode is a bitrate mode identifier. // https://www.w3.org/TR/mediastream-recording/#bitratemode BitrateMode string // Acceleration denotes which encoder is used, hardware or software. Acceleration HardwareAcceleration // BrowserType indicates the type of Chrome browser to be used, // Ash Chrome or Lacros Chrome. BrowserType browser.Type } const encodeHTML = "webcodecs_encode.html" // EncodeDataFiles returns the HTML and JS files used in RunEncodeTest. func EncodeDataFiles() []string { return []string{ encodeHTML, "webcodecs_common.js", "webcodecs_encode.js", } } // Crowd720p is 720p video data used in RunEncodeTest. const Crowd720p = "crowd-1280x720_30frames.vp9.webm" const crowd720pMP4 = "crowd-1280x720_30frames.h264.mp4" // VideoDataFiles returns the webm and mp4 files used in RunEncodeTest. func VideoDataFiles() []string { return []string{ Crowd720p, crowd720pMP4, } } // computeBitstreamQuality computes SSIM and PSNR of bitstreams comparing with yuvFile. // If numTemporalLayers is more than 1, then this computes SSIM and PSNR of bitstreams // whose represented frames are in temporal layers up to tid. func computeBitstreamQuality(ctx context.Context, yuvFile, outDir string, bitstreams [][]byte, codec videotype.Codec, w, h, framerate, tid, numTemporalLayers int, tids []int) (psnr, ssim float64, err error) { var bitstreamFile string if tid == numTemporalLayers-1 { bitstreamFile, err = saveBitstream(bitstreams, codec, w, h, framerate) if err != nil { return psnr, ssim, errors.Wrap(err, "failed preparing bitstream") } defer os.Remove(bitstreamFile) } else { yuvFile, err = peelLayersFromYUVFile(ctx, yuvFile, w, h, tid, tids) if err != nil { return psnr, ssim, errors.Wrap(err, "failed preparing yuv") } defer os.Remove(yuvFile) bitstreamFile, err = saveTemporalLayerBitstream(bitstreams, codec, w, h, framerate, tid, tids) if err != nil { return psnr, ssim, errors.Wrap(err, "failed preparing bitstream") } defer os.Remove(bitstreamFile) } var decoder encoding.Decoder switch codec { case videotype.H264: decoder = encoding.OpenH264Decoder case videotype.VP8, videotype.VP9: decoder = encoding.LibvpxDecoder case videotype.AV1: decoder = encoding.LibaomDecoder } psnr, ssim, err = encoding.CompareFiles(ctx, decoder, yuvFile, bitstreamFile, outDir, coords.NewSize(w, h)) if err != nil { return psnr, ssim, errors.Wrap(err, "failed to decode and compare results") } return psnr, ssim, nil } // verifyTLStruct verifies temporalLayerIDs matches the expected temporal layer structures. // See https://www.w3.org/TR/webrtc-svc/#dependencydiagrams* for the expected temporal layer structures. func verifyTLStruct(numTemporalLayers int, temporalLayerIDs []int) error { var expectedTLIDs []int switch numTemporalLayers { case 2: expectedTLIDs = []int{0, 1} case 3: expectedTLIDs = []int{0, 2, 1, 2} default: return nil } for i, tid := range temporalLayerIDs { expectedTID := expectedTLIDs[i%len(expectedTLIDs)] if tid != expectedTID { return errors.Errorf("unexpected temporal layer structure: %v", temporalLayerIDs) } } return nil } // RunEncodeTest tests encoding in WebCodecs API. It verifies a specified encoder is used and // the produced bitstream. func RunEncodeTest(ctx context.Context, cs ash.ConnSource, fileSystem http.FileSystem, testArgs TestEncodeArgs, videoFile, outDir string) error { var crowd720pVideoConfig = videoConfig{width: 1280, height: 720, numFrames: 30, framerate: 30} cleanupCtx, server, conn, observer, deferFunc, err := prepareWebCodecsTest(ctx, cs, fileSystem, encodeHTML) if err != nil { return err } defer deferFunc() codec := toMIMECodec(testArgs.Codec) if codec == "" { return errors.Errorf("unknown codec: %s", testArgs.Codec) } // Decode video frames of crowd720pMP4. The decoded video frames are input of the following encoding. config := crowd720pVideoConfig if err := conn.Call(ctx, nil, "DecodeFrames", server.URL+"/"+crowd720pMP4, config.numFrames); err != nil { return outputJSLogAndError(cleanupCtx, conn, errors.Wrap(err, "failed executing DecodeFrames")) } bitrate := config.width * config.height * config.framerate / 10 if err := conn.Call(ctx, nil, "EncodeAndSave", codec, testArgs.Acceleration, config.width, config.height, bitrate, config.framerate, testArgs.ScalabilityMode, testArgs.BitrateMode); err != nil { return outputJSLogAndError(cleanupCtx, conn, errors.Wrap(err, "failed executing EncodeAndSave")) } var success bool if err := conn.Eval(ctx, "TEST.success()", &success); err != nil || !success { return outputJSLogAndError(cleanupCtx, conn, errors.New("WebCodecs encoding is not successfully done")) } // Check if a preferred encoder is used. isPlatform, name, err := devtools.GetVideoEncoder(ctx, observer, server.URL+"/"+encodeHTML) if err != nil { return errors.Wrap(err, "failed getting encoder type") } if testArgs.Acceleration == PreferHardware && !isPlatform { return errors.Errorf("video is encoded by a software encoder, %s", name) } else if testArgs.Acceleration == PreferSoftware && isPlatform { return errors.Errorf("video is encoded by a hardware encoder, %s", name) } // We can get the bitstream at once because the expected bitstream size, 0.34MB (= bitrate * config.numFrames / config.framerate), // is under the tast websocket limitation, 1MB declared in session.go in package cdputil. var bitstreams [][]byte if err := conn.Eval(ctx, "bitstreamSaver.getBitstream()", &bitstreams); err != nil { return outputJSLogAndError(cleanupCtx, conn, errors.Wrap(err, "error getting bitstream")) } var numTemporalLayers int switch testArgs.ScalabilityMode { case "": numTemporalLayers = 1 case "L1T2": numTemporalLayers = 2 case "L1T3": numTemporalLayers = 3 default: return errors.Errorf("unknown scalabilityMode: %s", testArgs.ScalabilityMode) } tlEncoding := numTemporalLayers > 1 var temporalLayerIds []int if tlEncoding { if err := conn.Eval(ctx, "bitstreamSaver.getTemporalLayerIds()", &temporalLayerIds); err != nil { return outputJSLogAndError(cleanupCtx, conn, errors.Wrap(err, "error getting temporal layer ids")) } if len(temporalLayerIds) != config.numFrames { return errors.Errorf("temporal layer ids mismatch: expected=%d, actual=%d", config.numFrames, len(temporalLayerIds)) } if err := verifyTLStruct(numTemporalLayers, temporalLayerIds); err != nil { return err } } yuvFile, err := encoding.PrepareYUV(ctx, videoFile, videotype.I420, coords.NewSize(0, 0) /* placeholder size */) if err != nil { return errors.Wrap(err, "failed to prepare YUV file") } defer os.Remove(yuvFile) p := perf.NewValues() for tid := 0; tid < numTemporalLayers; tid++ { psnr, ssim, err := computeBitstreamQuality(ctx, yuvFile, outDir, bitstreams, testArgs.Codec, config.width, config.height, config.framerate, tid, numTemporalLayers, temporalLayerIds) if err != nil { if tlEncoding { return errors.Wrapf(err, "failed computing bitstream quality: tid=%d", tid) } return errors.Wrap(err, "failed computing bitstream quality") } psnrStr := "PSNR" ssimStr := "SSIM" if tlEncoding { // +1 because tid is 0-indexed and scalabilityMode identifier // (https://www.w3.org/TR/webrtc-svc/#scalabilitymodes) is 1-indexed. psnrStr = fmt.Sprintf("%s.L1T%d", psnrStr, tid+1) ssimStr = fmt.Sprintf("%s.L1T%d", ssimStr, tid+1) } testing.ContextLogf(ctx, "%s: %f", psnrStr, psnr) testing.ContextLogf(ctx, "%s: %f", ssimStr, ssim) p.Set(perf.Metric{ Name: ssimStr, Unit: "percent", Direction: perf.BiggerIsBetter, }, ssim*100) p.Set(perf.Metric{ Name: psnrStr, Unit: "dB", Direction: perf.BiggerIsBetter, }, psnr) } if err := p.Save(outDir); err != nil { return errors.Wrap(err, "failed to save perf results") } // TODO: Save bitstream always, if SSIM or PSNR is bad or never? return nil } // peelLayersFromYUVFile creates a file that contains YUV frames whose temporal layer id is not more than tid. // yuvFilePath is the source of YUV frames and tids are the temporal layer ids of them. // The filepath of the created file is returned. A caller has a responsibility to remove the file. func peelLayersFromYUVFile(ctx context.Context, yuvFilePath string, w, h, tid int, tids []int) (createdFilePath string, err error) { yuvFile, err := os.Open(yuvFilePath) if err != nil { return "", errors.Wrap(err, "failed to open YUV file") } defer yuvFile.Close() newYUVFilePrefix := fmt.Sprintf("%s.L1T%d", filepath.Base(yuvFilePath), tid+1) newYUVFile, err := encoding.CreatePublicTempFile(newYUVFilePrefix) if err != nil { return "", errors.Wrap(err, "failed to create a temporary YUV file") } keep := false defer func() { newYUVFile.Close() if !keep { os.Remove(newYUVFile.Name()) } }() // This code assumes yuvFile contains YUV 4:2:0. planeLen := int(w*h) + int(math.RoundToEven(float64(w))*math.RoundToEven(float64(h))/2.0) numYUVFrames := len(tids) if stat, err := yuvFile.Stat(); err != nil { return "", errors.Wrap(err, "failed to getting a YUV file size") } else if stat.Size() != int64(planeLen*numYUVFrames) { return "", errors.Errorf("unexpected file size: expected=%d, actual=%d", planeLen*numYUVFrames, stat.Size()) } buf := make([]byte, planeLen) for i := 0; i < numYUVFrames; i++ { readSize, err := yuvFile.Read(buf) if err == io.EOF { return "", errors.Errorf("failed to less yuv frames: yuv frames=%d", i) } if err != nil { return "", err } if readSize != planeLen { return "", errors.Errorf("unexpected read size, expected=%d, actual=%d", planeLen, readSize) } if tids[i] > tid { continue } writeSize, err := newYUVFile.Write(buf) if err != nil { return "", err } if writeSize != planeLen { return "", errors.Errorf("invalid writing size, got=%d, want=%d", writeSize, planeLen) } } keep = true createdFilePath = newYUVFile.Name() return }
package html5_test import ( . "github.com/bytesparadise/libasciidoc/testsupport" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("quoted texts", func() { Context("bold content", func() { It("bold content alone", func() { source := "*bold content*" expected := `<div class="paragraph"> <p><strong>bold content</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("bold content in sentence", func() { source := "some *bold content*." expected := `<div class="paragraph"> <p>some <strong>bold content</strong>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("bold content across newline", func() { source := "some *bold\ncontent*." expected := `<div class="paragraph"> <p>some <strong>bold content</strong>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("bold content across paragraph", func() { source := "some *bold\n\ncontent*." expected := `<div class="paragraph"> <p>some *bold</p> </div> <div class="paragraph"> <p>content*.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("double bold content across newline", func() { source := "some **bold\ncontent**." expected := `<div class="paragraph"> <p>some <strong>bold content</strong>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("double bold content across paragraph", func() { source := "some **bold\n\ncontent**." expected := `<div class="paragraph"> <p>some **bold</p> </div> <div class="paragraph"> <p>content**.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) Context("italic content", func() { It("italic content alone", func() { source := "_italic content_" expected := `<div class="paragraph"> <p><em>italic content</em></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("italic content in sentence", func() { source := "some _italic content_." expected := `<div class="paragraph"> <p>some <em>italic content</em>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("italic content across newline", func() { source := "some _italic\ncontent_." expected := `<div class="paragraph"> <p>some <em>italic content</em>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("italic content across paragraph", func() { source := "some _italic\n\ncontent_." expected := `<div class="paragraph"> <p>some _italic</p> </div> <div class="paragraph"> <p>content_.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("double italic content across newline", func() { source := "some __italic\ncontent__." expected := `<div class="paragraph"> <p>some <em>italic content</em>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("double italic content across paragraph", func() { source := "some __italic\n\ncontent__." expected := `<div class="paragraph"> <p>some __italic</p> </div> <div class="paragraph"> <p>content__.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) Context("monospace content", func() { It("monospace content alone", func() { source := "`monospace content`" expected := `<div class="paragraph"> <p><code>monospace content</code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("monospace content in sentence", func() { source := "some `monospace content`." expected := `<div class="paragraph"> <p>some <code>monospace content</code>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("monospace content across newline", func() { source := "some `monospace\ncontent`." expected := `<div class="paragraph"> <p>some <code>monospace content</code>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("monospace content across paragraph", func() { source := "some `monospace\n\ncontent`." expected := "<div class=\"paragraph\">\n" + "<p>some `monospace</p>\n" + "</div>\n" + "<div class=\"paragraph\">\n" + "<p>content`.</p>\n</div>\n" Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("double monospace content across newline", func() { source := "some ``monospace\ncontent``." expected := `<div class="paragraph"> <p>some <code>monospace content</code>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("double monospace content across paragraph", func() { source := "some ``monospace\n\ncontent``." expected := "<div class=\"paragraph\">\n" + "<p>some ``monospace</p>\n" + "</div>\n" + "<div class=\"paragraph\">\n" + "<p>content``.</p>\n</div>\n" Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) Context("marked content", func() { It("marked content alone", func() { source := "#marked content#" expected := `<div class="paragraph"> <p><mark>marked content</mark></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("marked content in sentence", func() { source := "some #marked content#." expected := `<div class="paragraph"> <p>some <mark>marked content</mark>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("marked content across newline", func() { source := "some #marked\ncontent#." expected := `<div class="paragraph"> <p>some <mark>marked content</mark>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("marked content across paragraph", func() { source := "some #marked\n\ncontent#." expected := `<div class="paragraph"> <p>some #marked</p> </div> <div class="paragraph"> <p>content#.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("double marked content across newline", func() { source := "some ##marked\ncontent##." expected := `<div class="paragraph"> <p>some <mark>marked content</mark>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("double marked content across paragraph", func() { source := "some ##marked\n\ncontent##." expected := `<div class="paragraph"> <p>some ##marked</p> </div> <div class="paragraph"> <p>content##.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) Context("subscript content", func() { It("subscript content alone", func() { source := "~subscriptcontent~" expected := `<div class="paragraph"> <p><sub>subscriptcontent</sub></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("subscript content in sentence", func() { source := "some ~subscriptcontent~." expected := `<div class="paragraph"> <p>some <sub>subscriptcontent</sub>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) Context("superscript content", func() { It("superscript content alone", func() { source := "^superscriptcontent^" expected := `<div class="paragraph"> <p><sup>superscriptcontent</sup></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("superscript content in sentence", func() { source := "some ^superscriptcontent^." expected := `<div class="paragraph"> <p>some <sup>superscriptcontent</sup>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) Context("attributes", func() { It("simple role italics", func() { source := "[myrole]_italics_" expected := `<div class="paragraph"> <p><em class="myrole">italics</em></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("simple role italics unconstrained", func() { source := "it[uncle]__al__ic" expected := `<div class="paragraph"> <p>it<em class="uncle">al</em>ic</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("simple role bold", func() { source := "[myrole]*bold*" expected := `<div class="paragraph"> <p><strong class="myrole">bold</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("simple role bold unconstrained", func() { source := "it[uncle]**al**ic" expected := `<div class="paragraph"> <p>it<strong class="uncle">al</strong>ic</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("simple role mono", func() { source := "[.myrole]`true`" expected := `<div class="paragraph"> <p><code class="myrole">true</code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("short-hand role with special characters", func() { source := `[."a <role>"]**bold**` // wrapping quotes are not preserved expected := `<div class="paragraph"> <p><strong class="a <role>">bold</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("simple role mono unconstrained", func() { source := "int[.uncle]``eg``rate" expected := `<div class="paragraph"> <p>int<code class="uncle">eg</code>rate</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("role with comma truncates", func() { source := "[.myrole,and=nothing_else]_italics_" expected := `<div class="paragraph"> <p><em class="myrole">italics</em></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("short-hand ID only", func() { source := "[#here]*bold*" expected := `<div class="paragraph"> <p><strong id="here">bold</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("short-hand role only", func() { source := "[.bob]**bold**" expected := `<div class="paragraph"> <p><strong class="bob">bold</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("marked role (span) only", func() { source := "[.bob]##bold##" expected := `<div class="paragraph"> <p><span class="bob">bold</span></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("marked role id only", func() { source := "[#link]##content##" expected := `<div class="paragraph"> <p><mark id="link">content</mark></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("empty role", func() { source := "[]**bold**" expected := `<div class="paragraph"> <p><strong>bold</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("short-hand multiple roles and id", func() { source := "[.r1#anchor.r2.r3]**bold**[#here.second.class]_text_" expected := `<div class="paragraph"> <p><strong id="anchor" class="r1 r2 r3">bold</strong><em id="here" class="second class">text</em></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("quoted role", func() { source := "[.\"something <wicked>\"]**bold**" // TODO: do we need to parse SpecialCharacters in inline attributes? expected := `<div class="paragraph"> <p><strong class="something <wicked>">bold</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("quoted short-hand role", func() { source := "[.'something \"wicked\"']**bold**" expected := `<div class="paragraph"> <p><strong class="something "wicked"">bold</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) // This demonstrates that we cannot inject malicious data in these attributes. // Note that this is a divergence from asciidoctor, which lets you put whatever you want here. It("bad syntax", func() { source := "[.<something \"wicked>]**bold**" expected := `<div class="paragraph"> <p>[.&lt;something "wicked&gt;]<strong>bold</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) Context("nested content", func() { It("nested bold quote within bold quote with same punctuation", func() { // kinda invalid content, and Asciidoc has the same way of parsing this content source := "*some *nested bold* content*." expected := `<div class="paragraph"> <p><strong>some <strong>nested bold</strong> content</strong>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("italic content within bold quote in sentence", func() { source := "some *bold and _italic content_* together." expected := `<div class="paragraph"> <p>some <strong>bold and <em>italic content</em></strong> together.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("marked content within bold quote in sentence", func() { source := "some *bold and #marked content#* together." expected := `<div class="paragraph"> <p>some <strong>bold and <mark>marked content</mark></strong> together.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("span content within italic quote in sentence", func() { source := "some *bold and [.strikeout]#span content#* together." expected := `<div class="paragraph"> <p>some <strong>bold and <span class="strikeout">span content</span></strong> together.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) Context("invalid content", func() { It("italic content within invalid bold quote in sentence", func() { source := "some *bold and _italic content_ * together." expected := `<div class="paragraph"> <p>some *bold and <em>italic content</em> * together.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("invalid italic content within bold quote in sentence", func() { source := "some *bold and _italic content _ together*." expected := `<div class="paragraph"> <p>some <strong>bold and _italic content _ together</strong>.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) Context("prevented substitution", func() { It("escaped bold content in sentence", func() { source := "some \\*bold content*." expected := `<div class="paragraph"> <p>some *bold content*.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("italic content within escaped bold quote in sentence", func() { source := "some \\*bold and _italic content_* together." expected := `<div class="paragraph"> <p>some *bold and <em>italic content</em>* together.</p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) Context("mixed content", func() { It("unbalanced bold in monospace - case 1", func() { source := "`*a`" expected := `<div class="paragraph"> <p><code>*a</code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("unbalanced bold in monospace - case 2", func() { source := "`a*b`" expected := `<div class="paragraph"> <p><code>a*b</code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("italic in monospace", func() { source := "`_a_`" expected := `<div class="paragraph"> <p><code><em>a</em></code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("unbalanced italic in monospace", func() { source := "`a_b`" expected := `<div class="paragraph"> <p><code>a_b</code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("unbalanced italic in bold", func() { source := `*a_b* _c_` expected := `<div class="paragraph"> <p><strong>a_b</strong> <em>c</em></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("unparsed bold in monospace", func() { source := "`a*b*`" expected := `<div class="paragraph"> <p><code>a*b*</code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("parsed subscript in monospace", func() { source := "`a~b~`" expected := `<div class="paragraph"> <p><code>a<sub>b</sub></code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("multiline in monospace - case 1", func() { source := "`a\nb`" expected := `<div class="paragraph"> <p><code>a b</code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("multiline in monospace - case 2", func() { source := "`a\n*b*`" expected := `<div class="paragraph"> <p><code>a <strong>b</strong></code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("link in bold", func() { source := "*a link:/[b]*" expected := `<div class="paragraph"> <p><strong>a <a href="/">b</a></strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("image in bold", func() { source := "*a image:foo.png[]*" expected := `<div class="paragraph"> <p><strong>a <span class="image"><img src="foo.png" alt="foo"></span></strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("singleplus passthrough in bold", func() { source := "*a +image:foo.png[]+*" expected := `<div class="paragraph"> <p><strong>a image:foo.png[]</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("tripleplus passthrough in bold", func() { source := "*a +++image:foo.png[]+++*" expected := `<div class="paragraph"> <p><strong>a image:foo.png[]</strong></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("link in italic", func() { source := "_a link:/[b]_" expected := `<div class="paragraph"> <p><em>a <a href="/">b</a></em></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("image in italic", func() { source := "_a image:foo.png[]_" expected := `<div class="paragraph"> <p><em>a <span class="image"><img src="foo.png" alt="foo"></span></em></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("singleplus passthrough in italic", func() { source := "_a +image:foo.png[]+_" expected := `<div class="paragraph"> <p><em>a image:foo.png[]</em></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("tripleplus passthrough in italic", func() { source := "_a +++image:foo.png[]+++_" expected := `<div class="paragraph"> <p><em>a image:foo.png[]</em></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("link in monospace", func() { source := "`a link:/[b]`" expected := `<div class="paragraph"> <p><code>a <a href="/">b</a></code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("image in monospace", func() { source := "`a image:foo.png[]`" expected := `<div class="paragraph"> <p><code>a <span class="image"><img src="foo.png" alt="foo"></span></code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("singleplus passthrough in monospace", func() { source := "`a +image:foo.png[]+`" expected := `<div class="paragraph"> <p><code>a image:foo.png[]</code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("tripleplus passthrough in monospace", func() { source := "`a +++image:foo.png[]+++`" expected := `<div class="paragraph"> <p><code>a image:foo.png[]</code></p> </div> ` Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("apostrophes in single bold", func() { source := "this *mother's mothers' mothers`'*\n" expected := "<div class=\"paragraph\">\n" + "<p>this <strong>mother&#8217;s mothers' mothers&#8217;</strong></p>\n" + "</div>\n" Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("apostrophes in double bold", func() { source := "this **mother's mothers' mothers`'**\n" expected := "<div class=\"paragraph\">\n" + "<p>this <strong>mother&#8217;s mothers' mothers&#8217;</strong></p>\n" + "</div>\n" Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("apostrophes in single italic", func() { source := "this _mother's mothers' mothers`'_\n" expected := "<div class=\"paragraph\">\n" + "<p>this <em>mother&#8217;s mothers' mothers&#8217;</em></p>\n" + "</div>\n" Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("apostrophes in double italic", func() { source := "this __mother's mothers' mothers`'__\n" expected := "<div class=\"paragraph\">\n" + "<p>this <em>mother&#8217;s mothers' mothers&#8217;</em></p>\n" + "</div>\n" Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("apostrophes in single mono", func() { source := "this `mother's mothers' day`\n" // no typographic quotes here expected := "<div class=\"paragraph\">\n" + "<p>this <code>mother&#8217;s mothers' day</code></p>\n" + "</div>\n" Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("apostrophes in double mono", func() { source := "this ``mother's mothers' mothers`' day``\n" expected := "<div class=\"paragraph\">\n" + "<p>this <code>mother&#8217;s mothers' mothers&#8217; day</code></p>\n" + "</div>\n" Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("apostrophes in single marked", func() { source := "this #mother's mothers' mothers`'#\n" expected := "<div class=\"paragraph\">\n" + "<p>this <mark>mother&#8217;s mothers' mothers&#8217;</mark></p>\n" + "</div>\n" Expect(RenderHTML(source)).To(MatchHTML(expected)) }) It("apostrophes in double marked", func() { source := "this ##mother's mothers' mothers`'##\n" expected := "<div class=\"paragraph\">\n" + "<p>this <mark>mother&#8217;s mothers' mothers&#8217;</mark></p>\n" + "</div>\n" Expect(RenderHTML(source)).To(MatchHTML(expected)) }) }) })
package admin_console import ( "github.com/epmd-edp/admin-console-operator/v2/pkg/apis/edp/v1alpha1" _ "github.com/lib/pq" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" ) //var k8sConfig clientcmd.ClientConfig var SchemeGroupVersion = schema.GroupVersion{Group: "v2.edp.epam.com", Version: "v1alpha1"} type EdpV1Client struct { crClient *rest.RESTClient } func NewForConfig(config *rest.Config) (*EdpV1Client, error) { if err := createCrdClient(config); err != nil { return nil, err } crClient, err := rest.RESTClientFor(config) if err != nil { return nil, err } return &EdpV1Client{crClient: crClient}, nil } func (c *EdpV1Client) Get(name string, namespace string, options metav1.GetOptions) (result *v1alpha1.AdminConsole, err error) { result = &v1alpha1.AdminConsole{} err = c.crClient.Get(). Namespace(namespace). Resource("adminconsoles"). Name(name). VersionedParams(&options, scheme.ParameterCodec). Do(). Into(result) return } func (c *EdpV1Client) Update(ac *v1alpha1.AdminConsole) (result *v1alpha1.AdminConsole, err error) { result = &v1alpha1.AdminConsole{} err = c.crClient.Put(). Namespace(ac.Namespace). Resource("adminconsoles"). Name(ac.Name). Body(ac). Do(). Into(result) return } func createCrdClient(cfg *rest.Config) error { scheme := runtime.NewScheme() SchemeBuilder := runtime.NewSchemeBuilder(addKnownTypes) if err := SchemeBuilder.AddToScheme(scheme); err != nil { return err } config := cfg config.GroupVersion = &SchemeGroupVersion config.APIPath = "/apis" config.ContentType = runtime.ContentTypeJSON config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: serializer.NewCodecFactory(scheme)} return nil } func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &v1alpha1.AdminConsole{}, &v1alpha1.AdminConsoleList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil }
package clusters import "github.com/cohesity/management-sdk-go/models" import "github.com/cohesity/management-sdk-go/configuration" /* * Interface for the CLUSTERS_IMPL */ type CLUSTERS interface { GetExternalClientSubnets () (*models.SpecifiesTheExternalClientSubnetsThatCanCommunicateWithThisCluster, error) GetClusterKeys () (*models.ClusterPublicKeys, error) UpdateExternalClientSubnets (*models.SpecifiesTheExternalClientSubnetsThatCanCommunicateWithThisCluster) (*models.SpecifiesTheExternalClientSubnetsThatCanCommunicateWithThisCluster, error) } /* * Factory for the CLUSTERS interaface returning CLUSTERS_IMPL */ func NewCLUSTERS(config configuration.CONFIGURATION) *CLUSTERS_IMPL { client := new(CLUSTERS_IMPL) client.config = config return client }
package main import ( "bufio" "fmt" "log" "net/http" ) // doesn't work // moby dick returns a lot of data // and a panic index out of range error occurs func main() { // get the book moby dick res, err := http.Get("http://www.gutenberg.org/files/2701/2701-0.txt") if err != nil { log.Fatal(err) } // scan the page scanner := bufio.NewScanner(res.Body) // close res body right before function ends/exits // res.Body is a reader type // and will stay open and keep memory space if you don't close it defer res.Body.Close() // Set the split function for the scanning operation scanner.Split(bufio.ScanWords) // Create slice to hold counts buckets := make([]int, 200) // Loop over the words for scanner.Scan() { n := HashBucket(scanner.Text()) buckets[n]++ } // fmt.Println(buckets[65:123]) // each symbol/character has a position in the array which // its ASCII number corresponds to // each time the loop scanner.Scan() ran the value for each // character incremented fmt.Println(buckets) } // HashBucket exported func // which converts the first letter of each word to its // ASCII number equivalent func HashBucket(word string) int { return int(word[0]) }
package stdlib import ( "context" "encoding/json" "time" "github.com/niolabs/gonio-framework" "github.com/niolabs/gonio-framework/props" ) // IdentityIntervalSimulatorBlock type IdentityIntervalSimulatorBlock struct { nio.Producer Config IdentityIntervalSimulatorConfig duration time.Duration limit int64 total int64 count int64 } type IdentityIntervalSimulatorConfig struct { nio.BlockConfigAtom Interval props.TimeDeltaProperty `json:"interval"` Limit *props.IntProperty `json:"limit"` Count *props.IntProperty `json:"num_signals"` } func (iis *IdentityIntervalSimulatorBlock) Configure(config nio.RawBlockConfig) error { iis.Producer.Configure() if err := json.Unmarshal(config, &iis.Config); err != nil { return err } if err := iis.Config.Interval.Assign(&iis.duration, nil); err != nil { return err } if err := iis.Config.Limit.AssignToDefault(&iis.limit, nil, -1); err != nil { return err } if err := iis.Config.Count.AssignToDefault(&iis.count, nil, 1); err != nil { return err } return nil } func (iis *IdentityIntervalSimulatorBlock) Enqueue(terminal nio.Terminal, signals nio.SignalGroup) error { return iis.NoEnqueue(terminal) } func (iis *IdentityIntervalSimulatorBlock) Start(ctx context.Context) { t := time.NewTicker(iis.duration) defer t.Stop() for { select { case <-t.C: num := iis.count hasLimit := iis.limit > 0 isComplete := hasLimit && (iis.total+num) > iis.limit if isComplete { num = iis.limit - iis.total } iis.total += num iis.ChOut <- make(nio.SignalGroup, num) if isComplete { return } case <-ctx.Done(): return } } } var IdentityIntervalSimulator = nio.BlockTypeEntry{ Create: func() nio.Block { return &IdentityIntervalSimulatorBlock{} }, Definition: nio.BlockTypeDefinition{ Version: "0.2.0", BlockAttributes: nio.BlockAttributes{ Outputs: []nio.TerminalDefinition{ { Label: "default", Type: "output", Visible: true, Order: 0, ID: "__default_terminal_value", Default: true, }, }, Inputs: []nio.TerminalDefinition{}, }, Namespace: "goblocks.simulator.blocks.IdentityIntervalSimulator", Properties: map[nio.Property]nio.PropertyDefinition{ "interval": { "order": 0, "type": "TimeDeltaType", "advanced": false, "visible": true, "default": map[string]float64{ "seconds": 1, }, "allow_none": false, "title": "Interval", }, "num_signals": { "order": 3, "type": "IntType", "advanced": false, "visible": true, "default": 1, "allow_none": false, "title": "Number of Signals", }, "total_signals": { "order": 4, "type": "IntType", "advanced": false, "visible": true, "default": -1, "allow_none": false, "title": "Total Number of Signals", }, "version": { "order": nil, "type": "StringType", "advanced": true, "visible": true, "default": "0.2.0", "allow_none": false, "title": "Version", }, "type": { "order": nil, "advanced": false, "visible": false, "title": "Type", "type": "StringType", "readonly": true, "allow_none": false, "default": nil, }, "id": { "order": nil, "type": "StringType", "advanced": false, "visible": false, "default": nil, "allow_none": false, "title": "Id", }, "name": { "order": nil, "type": "StringType", "advanced": false, "visible": false, "default": nil, "allow_none": true, "title": "Name", }, "log_level": { "order": nil, "options": map[string]int{ "WARNING": 30, "NOTSET": 0, "ERROR": 40, "INFO": 20, "DEBUG": 10, "CRITICAL": 50, }, "advanced": true, "visible": true, "title": "Log Level", "type": "SelectType", "enum": "LogLevel", "allow_none": false, "default": "NOTSET", }, }, Commands: map[nio.Command]nio.CommandDefinition{}, Name: "IdentityIntervalSimulator", }, }
package rbn import ( "math" "math/rand" ) // RBNNode : data type that represents a node in the network type RBNNode struct { id int links []int value bool layers int } func (node *RBNNode) flip() { node.value = !node.value } func (node *RBNNode) getIntFromLinks(rbn RandomBooleanNetwork) int { result := 0 for i, neighbor := range node.links { result += int(math.Pow(2, float64(rbn.layers-i-1))) * bool2int(rbn.network[neighbor].value) } return result } // NewRBNNode : constructor func NewRBNNode(id, maxN, layers int, randGen *rand.Rand) RBNNode { links := make([]int, layers) for i := range links { links[i] = -1 } for layer := 0; layer < layers; layer++ { neighbor := randGen.Intn(maxN) for neighbor == id || in(neighbor, links) { neighbor = randGen.Intn(maxN) } links[layer] = neighbor } return RBNNode{id, links, randGen.Intn(100) > 50, layers} }
//go:build e2e package environment import ( "os" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "sigs.k8s.io/e2e-framework/klient/conf" "sigs.k8s.io/e2e-framework/pkg/env" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/envfuncs" ) const ( useKind = "TEST_ENV_USE_KIND" ) func Get() env.Environment { if os.Getenv(useKind) == "true" { environment := env.New() environment.Setup(envfuncs.CreateKindCluster(envconf.RandomName("operator-test", 10))) return environment } kubeConfigPath := conf.ResolveKubeConfigFile() envConfig := envconf.NewWithKubeConfig(kubeConfigPath) return env.NewWithConfig(envConfig) }
// Copyright 2020 Red Hat, Inc. and/or its affiliates // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package deploy import ( "fmt" "github.com/kiegroup/kogito-operator/cmd/kogito/command/context" "github.com/kiegroup/kogito-operator/cmd/kogito/command/service" "github.com/kiegroup/kogito-operator/cmd/kogito/command/shared" "github.com/kiegroup/kogito-operator/core/logger" "github.com/kiegroup/kogito-operator/core/operator" "github.com/kiegroup/kogito-operator/internal/app" "github.com/kiegroup/kogito-operator/meta" "github.com/spf13/cobra" ) type deleteServiceFlags struct { name string project string } func initDeleteServiceCommand(ctx *context.CommandContext, parent *cobra.Command) context.KogitoCommand { context := operator.Context{ Client: ctx.Client, Scheme: meta.GetRegisteredSchema(), Log: logger.GetLogger("delete_service"), } buildHandler := app.NewKogitoBuildHandler(context) cmd := &deleteServiceCommand{ CommandContext: *ctx, Parent: parent, resourceCheckService: shared.NewResourceCheckService(), buildService: service.NewBuildService(context, buildHandler), runtimeService: service.NewRuntimeService(), } cmd.RegisterHook() cmd.InitHook() return cmd } type deleteServiceCommand struct { context.CommandContext command *cobra.Command flags *deleteServiceFlags Parent *cobra.Command resourceCheckService shared.ResourceCheckService buildService service.BuildService runtimeService service.RuntimeService } func (i *deleteServiceCommand) RegisterHook() { i.command = &cobra.Command{ Example: "delete-service example-drools --project kogito", Use: "delete-service NAME [flags]", Short: "Deletes a Kogito service deployed in the given Project context", Long: `delete-service will exclude every OpenShift/Kubernetes resource created to deploy the Kogito Service into the Project context. Project context is the namespace (Kubernetes) or project (OpenShift) where the Service will be deployed. To know what's your context, use "kogito project". To set a new Project in the context use "kogito use-project NAME". Please note that this command requires the Kogito Operator installed in the cluster. For more information about the Kogito Operator installation please refer to https://github.com/kiegroup/kogito-operator#kogito-operator-installation.`, RunE: i.Exec, PreRun: i.CommonPreRun, PostRun: i.CommonPostRun, Args: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf("requires 1 arg, received %v", len(args)) } return nil }, } } func (i *deleteServiceCommand) Command() *cobra.Command { return i.command } func (i *deleteServiceCommand) InitHook() { i.flags = &deleteServiceFlags{} i.Parent.AddCommand(i.command) i.command.Flags().StringVarP(&i.flags.project, "project", "p", "", "The project name from where the service needs to be deleted") } func (i *deleteServiceCommand) Exec(_ *cobra.Command, args []string) (err error) { i.flags.name = args[0] if i.flags.project, err = i.resourceCheckService.EnsureProject(i.Client, i.flags.project); err != nil { return err } if err = i.runtimeService.DeleteRuntimeService(i.Client, i.flags.name, i.flags.project); err != nil { return err } return i.buildService.DeleteBuildService(i.flags.name, i.flags.project) }
package bench_test import ( "sync" "testing" ) // ============================================================= // goroutine を起動するにもコストがかかる // 軽い処理であればあるほどシーケンシャルに処理したほうが速い // ============================================================= func BenchmarkGoroutine(b *testing.B) { n := 10 var wg sync.WaitGroup b.ResetTimer() for i := 0; i < b.N; i++ { wg.Add(n) for j := 0; j < n; j++ { go func() { wg.Done() }() } wg.Wait() } } func BenchmarkSequential(b *testing.B) { n := 10 var wg sync.WaitGroup b.ResetTimer() for i := 0; i < b.N; i++ { wg.Add(n) for j := 0; j < n; j++ { func() { wg.Done() }() } wg.Wait() } }
package main import ( "fmt" "io" "net" "os" "sync" ) func main() { fmt.Printf("[+] Listening on 127.0.0.1:8080\n") lAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") if err != nil { panic(err) } oAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:80") if err != nil { panic(err) } ln, err := net.ListenTCP("tcp", lAddr) if err != nil { fmt.Printf("Listen(): %s\n", err) os.Exit(1) } for { conn, err := ln.AcceptTCP() if err != nil { fmt.Printf("Accept(): %s\n", err) os.Exit(1) } go proxyTcp(conn, oAddr) } } func proxyTcp(conn *net.TCPConn, oAddr *net.TCPAddr) { fmt.Printf("[+] %s opened\n", conn.RemoteAddr()) orig, err := net.DialTCP("tcp", nil, oAddr) if err != nil { fmt.Printf("[!] origin Connect(): %s\n", err) conn.Close() return } var wg sync.WaitGroup wg.Add(2) go proxyHalfDuplex(conn, orig, &wg) go proxyHalfDuplex(orig, conn, &wg) wg.Wait() fmt.Printf("[-] %s closed\n", conn.RemoteAddr()) conn.Close() orig.Close() } func proxyHalfDuplex(a, b *net.TCPConn, wg *sync.WaitGroup) { var buf [4096]byte for { n, err := a.Read(buf[:]) if err != nil { if err != io.EOF { fmt.Printf("[!] %s error Read(): %s\n", a.RemoteAddr(), err) } goto exit } if n == 0 { fmt.Printf("[!] %s error Read(): EOF?\n", a.RemoteAddr()) goto exit } s := buf[:n] for len(s) > 0 { x, err := b.Write(s) if err != nil { fmt.Printf("[!] %s error Read(): %s\n", a, err) goto exit } s = s[x:] } } exit: a.CloseRead() b.CloseWrite() wg.Done() }
/* Copyright 2022 Docker Compose CLI authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e import ( "testing" "gotest.tools/v3/icmd" ) func TestRecreateWithNoDeps(t *testing.T) { c := NewParallelCLI(t, WithEnv( "COMPOSE_PROJECT_NAME=recreate-no-deps", )) res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/dependencies/recreate-no-deps.yaml", "up", "-d") res.Assert(t, icmd.Success) res = c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/dependencies/recreate-no-deps.yaml", "up", "-d", "--force-recreate", "--no-deps", "my-service") res.Assert(t, icmd.Success) RequireServiceState(t, c, "my-service", "running") c.RunDockerComposeCmd(t, "down") }
package main import ( "flag" "fmt" "github.com/astaxie/beego" "github.com/chai2010/winsvc" "log" "os" "path/filepath" _ "smartapp/initial/common" _ "smartapp/initial/plugins" _ "smartapp/routers" ) var ( serve string="beegoTest" appPath string flagServiceName = flag.String("service-name", serve, "Set service name") flagServiceDesc = flag.String("service-desc", serve+" service", "Set service description") flagServiceInstall = flag.Bool("service-install", false, "Install service") flagServiceUninstall = flag.Bool("service-remove", false, "Remove service") flagServiceStart = flag.Bool("service-start", false, "Start service") flagServiceStop = flag.Bool("service-stop", false, "Stop service") //查询服务 flagServiceQuery = flag.Bool("service-query", false, "Query service") ) func init() { // change to current dir var err error if appPath, err = winsvc.GetAppPath(); err != nil { log.Fatal(err) } if err := os.Chdir(filepath.Dir(appPath)); err != nil { log.Fatal(err) } } func main() { flag.Parse() // install service if *flagServiceInstall { params:="" if err := winsvc.InstallService(appPath, *flagServiceName, *flagServiceDesc,params); err != nil { log.Fatalf("installService(%s, %s): %v\n", *flagServiceName, *flagServiceDesc, err) } fmt.Printf("Done\n") return } //query service if *flagServiceQuery { if serName, err := winsvc.QueryService(*flagServiceName); err != nil { log.Fatalln("removeService:", err) }else{ fmt.Printf(serName+"\n") } fmt.Printf("Done\n") return } // remove service if *flagServiceUninstall { if err := winsvc.RemoveService(*flagServiceName); err != nil { log.Fatalln("removeService:", err) } fmt.Printf("Done\n") return } // start service if *flagServiceStart { if err := winsvc.StartService(*flagServiceName); err != nil { log.Fatalln("startService:", err) } fmt.Printf("Done\n") return } // stop service if *flagServiceStop { if err := winsvc.StopService(*flagServiceName); err != nil { log.Fatalln("stopService:", err) } fmt.Printf("Done\n") return } // run as service if !winsvc.InServiceMode() { log.Println("main:", "runService") if err := winsvc.RunAsService(*flagServiceName, StartServer, StopServer, false); err != nil { log.Fatalf("svc.Run: %v\n", err) } return } // run as normal StartServer() } func StartServer() { //log.Println("StartServer, port = 8080") beego.Run() } func StopServer() { log.Println("StopServer") panic("服务被停止") }
package ast import ( "strings" "github.com/makramkd/go-monkey/token" ) type Node interface { TokenLiteral() string String() string } type Statement interface { Node statementNode() } type Expression interface { Node expressionNode() } // Program represents a Monkey program. // Monkey programs are a sequence of statements that are executed in the order // in which they are written. type Program struct { Statements []Statement } func (p *Program) TokenLiteral() string { if len(p.Statements) > 0 { return p.Statements[0].TokenLiteral() } return "" } func (p *Program) String() string { builder := strings.Builder{} for _, s := range p.Statements { builder.WriteString(s.String()) } return builder.String() } type Identifier struct { Token token.Token Value string } func (i *Identifier) expressionNode() {} func (i *Identifier) TokenLiteral() string { return i.Token.Literal } func (i *Identifier) String() string { return i.Value } // LetStatement represents a Monkey let statement. // e.g let a = b; // A let statement consists of an identifier, which appears on the left hand side // of the assignment operator, and an expression, which appears on the right hand side // of the assignment operator. type LetStatement struct { Token token.Token Name *Identifier Value Expression } func (l *LetStatement) statementNode() {} func (l *LetStatement) TokenLiteral() string { return l.Token.Literal } func (l *LetStatement) String() string { builder := strings.Builder{} builder.WriteString(l.TokenLiteral() + " ") builder.WriteString(l.Name.String()) builder.WriteString(" = ") if l.Value != nil { builder.WriteString(l.Value.String()) } builder.WriteRune(';') return builder.String() } type ReturnStatement struct { Token token.Token ReturnValue Expression } func (r *ReturnStatement) statementNode() {} func (r *ReturnStatement) TokenLiteral() string { return r.Token.Literal } func (r *ReturnStatement) String() string { builder := strings.Builder{} builder.WriteString(r.TokenLiteral() + " ") if r.ReturnValue != nil { builder.WriteString(r.ReturnValue.String()) } builder.WriteRune(';') return builder.String() } type ExpressionStatement struct { // The first token of the expression Token token.Token Expression Expression } func (e *ExpressionStatement) statementNode() {} func (e *ExpressionStatement) TokenLiteral() string { return e.Token.Literal } func (e *ExpressionStatement) String() string { if e.Expression != nil { return e.Expression.String() } return "" } type IntegerLiteral struct { Token token.Token Value int64 } func (i *IntegerLiteral) expressionNode() {} func (i *IntegerLiteral) TokenLiteral() string { return i.Token.Literal } func (i *IntegerLiteral) String() string { return i.Token.Literal } type PrefixExpression struct { Token token.Token // the prefix token, e.g ! or - Operator string Right Expression } func (p *PrefixExpression) expressionNode() {} func (p *PrefixExpression) TokenLiteral() string { return p.Token.Literal } func (p *PrefixExpression) String() string { builder := strings.Builder{} builder.WriteRune('(') builder.WriteString(p.Operator) builder.WriteString(p.Right.String()) builder.WriteRune(')') return builder.String() } type InfixExpression struct { Token token.Token // the operator token Left Expression Operator string Right Expression } func (i *InfixExpression) expressionNode() {} func (i *InfixExpression) TokenLiteral() string { return i.Token.Literal } func (i *InfixExpression) String() string { builder := strings.Builder{} builder.WriteRune('(') builder.WriteString(i.Left.String() + " ") builder.WriteString(i.Operator + " ") builder.WriteString(i.Right.String()) builder.WriteRune(')') return builder.String() } type BooleanLiteral struct { Token token.Token Value bool } func (b *BooleanLiteral) expressionNode() {} func (b *BooleanLiteral) TokenLiteral() string { return b.Token.Literal } func (b *BooleanLiteral) String() string { return b.Token.Literal } type BlockStatement struct { Token token.Token // the '{' token Statements []Statement } func (b *BlockStatement) statementNode() {} func (b *BlockStatement) TokenLiteral() string { return b.Token.Literal } func (b *BlockStatement) String() string { builder := strings.Builder{} for _, stmt := range b.Statements { builder.WriteString(stmt.String()) } return builder.String() } type IfExpression struct { Token token.Token // the 'if' token Condition Expression Consequence *BlockStatement Alternative *BlockStatement } func (i *IfExpression) expressionNode() {} func (i *IfExpression) TokenLiteral() string { return i.Token.Literal } func (i *IfExpression) String() string { builder := strings.Builder{} builder.WriteString("if") builder.WriteString(i.Condition.String()) builder.WriteString(" ") builder.WriteString(i.Consequence.String()) if i.Alternative != nil { builder.WriteString("else") builder.WriteString(i.Alternative.String()) } return builder.String() } type FunctionLiteral struct { Token token.Token // the 'fn' token Parameters []*Identifier Body *BlockStatement } func (f *FunctionLiteral) expressionNode() {} func (f *FunctionLiteral) TokenLiteral() string { return f.Token.Literal } func (f *FunctionLiteral) String() string { builder := strings.Builder{} builder.WriteString("fn") builder.WriteRune('(') for i, param := range f.Parameters { builder.WriteString(param.String()) if i < len(f.Parameters)-1 { builder.WriteRune(',') } } builder.WriteRune(')') builder.WriteRune('{') builder.WriteString(f.Body.String()) builder.WriteRune('}') return builder.String() } type CallExpression struct { Token token.Token // the '(' token Function Expression // identifier or function literal Arguments []Expression } func (c *CallExpression) expressionNode() {} func (c *CallExpression) TokenLiteral() string { return c.Token.Literal } func (c *CallExpression) String() string { builder := strings.Builder{} builder.WriteByte('(') builder.WriteString(c.Function.String()) builder.WriteRune('(') for i, arg := range c.Arguments { builder.WriteString(arg.String()) if i < len(c.Arguments)-1 { builder.WriteRune(',') } } builder.WriteRune(')') builder.WriteByte(')') return builder.String() } type StringLiteral struct { Token token.Token Value string } func (s *StringLiteral) expressionNode() {} func (s *StringLiteral) TokenLiteral() string { return s.Token.Literal } func (s *StringLiteral) String() string { return s.Token.Literal } type ArrayLiteral struct { Token token.Token Elements []Expression } func (a *ArrayLiteral) expressionNode() {} func (a *ArrayLiteral) TokenLiteral() string { return a.Token.Literal } func (a *ArrayLiteral) String() string { builder := strings.Builder{} builder.WriteByte('[') for i, e := range a.Elements { builder.WriteString(e.String()) if i < len(a.Elements)-1 { builder.WriteByte(',') } } builder.WriteByte(']') return builder.String() } type IndexAccessExpression struct { Token token.Token // The '[' token Left Expression // can either be an array literal, hash literal or an identifier that refers to either type Index Expression // The index to access from the array } func (a *IndexAccessExpression) expressionNode() {} func (a *IndexAccessExpression) TokenLiteral() string { return a.Token.Literal } func (a *IndexAccessExpression) String() string { builder := strings.Builder{} builder.WriteByte('(') builder.WriteString(a.Left.String()) builder.WriteByte('[') builder.WriteString(a.Index.String()) builder.WriteByte(']') builder.WriteByte(')') return builder.String() } type ImportStatement struct { Token token.Token // The 'import' token Module *Identifier // The module to import, will be an identifier } func (i *ImportStatement) statementNode() {} func (i *ImportStatement) TokenLiteral() string { return i.Token.Literal } func (i *ImportStatement) String() string { builder := strings.Builder{} builder.WriteString("import ") builder.WriteString(i.Module.Value) builder.WriteString(";") return builder.String() } type HashLiteral struct { Token token.Token // The '{' token Pairs map[Expression]Expression } func (h *HashLiteral) expressionNode() {} func (h *HashLiteral) TokenLiteral() string { return h.Token.Literal } func (h *HashLiteral) String() string { builder := strings.Builder{} builder.WriteByte('{') pairs := []string{} for k, v := range h.Pairs { pairs = append(pairs, k.String()+":"+v.String()) } builder.WriteString(strings.Join(pairs, ", ")) builder.WriteByte('}') return builder.String() } type ForEachStatement struct { Token token.Token // The 'for' token Identifiers []*Identifier // The variables to capture into Collection Expression // The collection being looped over Body *BlockStatement // The block to execute } func (f *ForEachStatement) statementNode() {} func (f *ForEachStatement) TokenLiteral() string { return f.Token.Literal } func (f *ForEachStatement) String() string { builder := strings.Builder{} builder.WriteString("for ") ids := []string{} for _, i := range f.Identifiers { ids = append(ids, i.Value) } builder.WriteString(strings.Join(ids, ", ")) builder.WriteString("in ") builder.WriteString(f.Collection.String()) builder.WriteString(f.Body.String()) return builder.String() } type BreakStatement struct { Token token.Token // The 'break' token } func (b *BreakStatement) statementNode() {} func (b *BreakStatement) TokenLiteral() string { return b.Token.Literal } func (b *BreakStatement) String() string { return b.Token.Literal }
package io import ( "jean/constants" "jean/native" "jean/rtda/jvmstack" ) // private static native void initIDs(); func fdInitIDs(frame *jvmstack.Frame) { // todo } // private static native long set(int d); // (I)J func set(frame *jvmstack.Frame) { // todo frame.OperandStack().PushLong(0) } func init() { native.Register(constants.JavaIoFileDescriptor, "initIDs", "()V", fdInitIDs) native.Register(constants.JavaIoFileDescriptor, "set", "(I)J", set) }
package main import ( "encoding/json" "github.com/gorilla/mux" "github.com/playgrunge/monicore/control" "github.com/playgrunge/monicore/core/api" "github.com/playgrunge/monicore/core/hub" "github.com/playgrunge/monicore/service" "log" "net/http" "time" ) var h = hub.GetHub() func main() { r := mux.NewRouter() http.HandleFunc("/websocket", hub.ServeWs) http.HandleFunc("/wsSend", wsSend) http.HandleFunc("/wsSendJSON", wsSendJSON) r.HandleFunc("/api/{key}", renderApi) r.PathPrefix("/").Handler(NoCacheFileServer(http.Dir("./doc/"))) http.Handle("/", r) go h.Run() go runTaskUpdateData(control.HockeyName, time.Minute*10) go runTaskUpdateData(control.TwitterName, time.Minute*2) go listenForNewTypes() log.Println("Listening...") http.ListenAndServe(":3000", nil) } func runTaskUpdateData(dataType string, tickerTime time.Duration) { ticker := time.NewTicker(tickerTime) defer ticker.Stop() log.Println("Ticker started") for _ = range ticker.C { if val, ok := routes[dataType]; ok { if t, ok := val.(api.ApiRequest); ok { broadcastMessageIfNew(dataType, t) } } } log.Println("Ticker stopped") } func listenForNewTypes() { for { c := <-h.ReceiveNewTypes go func() { for i := range c.Types { if val, ok := routes[c.Types[i]]; ok { if t, ok := val.(api.ApiRequest); ok { if cachedData := service.GetLastData(c.Types[i]); cachedData != nil { //Return cached data message := hub.Message{c.Types[i], cachedData} pairConMessage := &hub.PairConMessage{c.Con, &message} h.SendToConnection <- pairConMessage } else if val, _ := t.GetApi(); val != nil { //Cache data and return real data service.UpdateNewData(c.Types[i], val) var d map[string]interface{} json.Unmarshal(val, &d) message := hub.Message{c.Types[i], d} pairConMessage := &hub.PairConMessage{c.Con, &message} h.SendToConnection <- pairConMessage } } } } }() } } func broadcastMessageIfNew(dataType string, a api.ApiRequest) { if val, _ := a.GetApi(); val != nil { if isNew := service.UpdateNewData(dataType, val); isNew { lastData := service.GetLastData(dataType) message := hub.Message{dataType, lastData} hub.GetHub().Broadcast <- &message } } } func renderApi(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") log.Println("RequestURI: " + r.Host + r.RequestURI) key := mux.Vars(r)["key"] if val, ok := routes[key]; ok { switch v := val.(type) { case api.ApiRequest: v.SendApi(w, r) default: v.(func(http.ResponseWriter, *http.Request))(w, r) } } else { notFound(w, r) } } // define global map; var routes = map[string]interface{}{ control.HockeyName: new(control.HockeyApi), control.AirportName: new(control.AirportApi), control.WeatherName: new(control.WeatherApi), control.HydroName: new(control.HydroApi), control.TwitterName: new(control.TwitterApi), } func notFound(w http.ResponseWriter, r *http.Request) { w.WriteHeader(400) w.Write([]byte("400: Bad Request")) } type noCacheFileServer struct { root http.FileSystem } func NoCacheFileServer(root http.FileSystem) http.Handler { return &noCacheFileServer{root} } func (n *noCacheFileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Set("Pragma", "no-cache") w.Header().Set("Expires", "0") http.FileServer(n.root).ServeHTTP(w, r) } func wsSend(w http.ResponseWriter, r *http.Request) { message := hub.Message{} apiType := "chat" if r.FormValue("m") != "" { message = hub.Message{apiType, r.FormValue("m")} } else { message = hub.Message{apiType, "New message send from the server"} } h.Broadcast <- &message } func wsSendJSON(w http.ResponseWriter, r *http.Request) { if val, ok := routes[control.HockeyName]; ok { if t, ok := val.(api.ApiRequest); ok { broadcastMessageIfNew(control.HockeyName, t) } } } type Message struct { Name string Body string Time int64 }
package ernie var Version = "v2.0.0"
/** * Created by: Jianyi * Date: 2019/1/4 * Time: 13:37 * Description: **/ package models import ( "github.com/astaxie/beego/orm" ) type Leader struct{ Id int `pk:"auto"` Name string `orm:"size(40)"` ProField string `orm:"size(200)"` Wechat string `orm:"size(100)"` Status int `orm:"size(11)"` Avatar string `orm:"size(255)"` WechatImg string `orm:"size(255)"` } func (this *Leader) TableName() string { return TableName("leader") } func LeaderAdd(record *Leader) (int64, error) { return orm.NewOrm().Insert(record) } func (this *Leader) LeaderUpdate(fields ... string) error { if _, err := orm.NewOrm().Update(this, fields...); err != nil { return err } return nil } func LeaderGetList(page, pageSize int, filters ...interface{}) ([]*Leader, int64) { offset := (page - 1) * pageSize list := make([]*Leader, 0) query := orm.NewOrm().QueryTable(TableName("leader")) if len(filters) > 0 && filters != nil { l := len(filters) for k := 0; k < l; k += 2 { query = query.Filter(filters[k].(string), filters[k+1]) } } total, _ := query.Count() query.OrderBy("-id").Limit(pageSize, offset).All(&list) return list, total } //只显示正常状态 func LeaderAll() []*Leader { list := make([]*Leader, 0) orm.NewOrm().QueryTable(TableName("leader")).Filter("status",1).All(&list) return list } func LeaderGetById(id int) (*Leader, error) { leader := new(Leader) err := orm.NewOrm().QueryTable(TableName("leader")).Filter("id", id).One(leader) if err != nil { return nil, err } return leader, nil }
package main // Simple program to list databases and the tables import ( "context" "database/sql" "log" impala "github.com/bippio/go-impala" ) func main() { opts := impala.DefaultOptions opts.Host = "<impala host>" opts.Port = "21050" // enable LDAP authentication: opts.UseLDAP = true opts.Username = "<ldap username>" opts.Password = "<ldap password>" // enable TLS opts.UseTLS = true opts.CACertPath = "/path/to/cacert" connector := impala.NewConnector(&opts) db := sql.OpenDB(connector) defer db.Close() ctx := context.Background() rows, err := db.QueryContext(ctx, "SHOW DATABASES") if err != nil { log.Fatal(err) } r := struct { name string comment string }{} databases := make([]string, 0) // databases will contain all the DBs to enumerate later for rows.Next() { if err := rows.Scan(&r.name, &r.comment); err != nil { log.Fatal(err) } databases = append(databases, r.name) } if err := rows.Err(); err != nil { log.Fatal(err) } log.Println("List of Databases", databases) stmt, err := db.PrepareContext(ctx, "SHOW TABLES IN @p1") if err != nil { log.Fatal(err) } tbl := struct { name string }{} for _, d := range databases { rows, err := stmt.QueryContext(ctx, d) if err != nil { log.Printf("error in querying database %s: %s", d, err.Error()) continue } tables := make([]string, 0) for rows.Next() { if err := rows.Scan(&tbl.name); err != nil { log.Println(err) continue } tables = append(tables, tbl.name) } log.Printf("List of Tables in Database %s: %v\n", d, tables) } }
package main import ( "encoding/json" "fmt" "log" "net/http" ) func main() { //http.HandleFunc("/getTest",getRequest) //http.HandleFunc("/postTest",postRequest) http.HandleFunc("/postForm",postRequest2) http.HandleFunc("/getJson",getJsonData) log.Fatal(http.ListenAndServe(":8080",nil)) } // get请求 func getRequest(writer http.ResponseWriter, request *http.Request) { query:=request.URL.Query() // 方法1 name:=query["name"][0] // 方法2 name2:=query.Get("name") fmt.Print("获取到name2:"+name2) writer.Write([]byte(name+"一起学习Go Web编程吧")); } // post请求 // 请求头--application/json func postRequest(w http.ResponseWriter,r *http.Request) { decoder := json.NewDecoder(r.Body); var params map[string]string decoder.Decode(&params) // 打印获取的参数 fmt.Print("username=%s,password=%s",params["username"],params["password"]) w.Write([]byte("username:"+params["username"]+",password:"+params["password"])) } // post请求 // 请求头--application/x-www-form-urlencoded func postRequest2(w http.ResponseWriter,r *http.Request) { r.ParseForm() username:=r.Form.Get("username"); password:=r.Form.Get("password") // 打印获取的参数 fmt.Print("username=%s,password=%s",username,password) w.Write([]byte("username:"+username+",password:"+password)) } // 返回的数据为json结构 type Person struct { Name string `json:"name"` Age int `json:"age"` } type Response struct { Code int `json:"code"` Msg string `json:"msg"` Data Person `json:"data"` } func getJsonData(writer http.ResponseWriter, request *http.Request) { res := Response{ 0, "success", Person{ "Jack", 20, }, } json.NewEncoder(writer).Encode(res) }
package configuration import ( "fmt" "github.com/stretchr/testify/assert" "runtime" "sync" "testing" ) func TestParseKeyOrder(t *testing.T) { wg := &sync.WaitGroup{} fn := func() { defer func() { wg.Done() }() for i := 0; i < 100000; i++ { conf := LoadConfig("tests/configs.conf") for g := 1; g < 3; g++ { for i := 1; i < 4; i++ { key := fmt.Sprintf("test.out.a.b.c.d.groups.g%d.o%d.order", g, i) order := conf.GetInt32(key, -1) if order != int32(i) { fmt.Println(conf) t.Fatalf("order not match,group %d, except: %d, real order: %d", g, i, order) return } } } conf = nil runtime.Gosched() } } wg.Add(2) go fn() go fn() wg.Wait() } func TestConfig_GetBoolean(t *testing.T) { conf := ParseString("{k1:TRUE, k2:faLSE, k3: yes,k4:no, k5 : on , k6:oFf}") assert.Equal(t, true, conf.GetBoolean("k1")) assert.Equal(t, false, conf.GetBoolean("k2")) assert.Equal(t, true, conf.GetBoolean("k3")) assert.Equal(t, false, conf.GetBoolean("k4")) assert.Equal(t, true, conf.GetBoolean("k5")) assert.Equal(t, false, conf.GetBoolean("k6")) } func TestConfig_GetInt32(t *testing.T) { conf := ParseString("{k1:2147483647, k2:-2147483648,}") assert.Equal(t, int32(2147483647), conf.GetInt32("k1")) assert.Equal(t, int32(-2147483648), conf.GetInt32("k2")) } func TestConfig_GetInt64(t *testing.T) { conf := ParseString("{k1:9223372036854775807, k2:-9223372036854775808,}") assert.Equal(t, int64(9223372036854775807), conf.GetInt64("k1")) assert.Equal(t, int64(-9223372036854775808), conf.GetInt64("k2")) } func TestConfig_GetFloat32(t *testing.T) { conf := ParseString("{k1:1e3, k2:1e-3,}") assert.Equal(t, float32(1000), conf.GetFloat32("k1")) assert.Equal(t, float32(0.001), conf.GetFloat32("k2")) } func TestConfig_GetFloat64(t *testing.T) { conf := ParseString("{k1:1e3, k2:1e-3,}") assert.Equal(t, 1000., conf.GetFloat64("k1")) assert.Equal(t, 0.001, conf.GetFloat64("k2")) } func TestConfig_GetBooleanSafely(t *testing.T) { conf := ParseString("{k1:TRUE, k2:faLSE, k3: yes,k4:no, k5 : on , k6:oFf}") var v bool var err error v, err = conf.GetBooleanSafely("k1") if assert.Nil(t, err) { assert.Equal(t, true, v) } v, err = conf.GetBooleanSafely("k2") if assert.Nil(t, err) { assert.Equal(t, false, v) } v, err = conf.GetBooleanSafely("k3") if assert.Nil(t, err) { assert.Equal(t, true, v) } v, err = conf.GetBooleanSafely("k4") if assert.Nil(t, err) { assert.Equal(t, false, v) } v, err = conf.GetBooleanSafely("k5") if assert.Nil(t, err) { assert.Equal(t, true, v) } v, err = conf.GetBooleanSafely("k6") if assert.Nil(t, err) { assert.Equal(t, false, v) } } func TestConfig_GetBooleanSafelyError(t *testing.T) { conf := ParseString("{k1:qwerty,}") _, err := conf.GetBooleanSafely("k1") assert.Error(t, err) } func TestConfig_GetInt32Safely(t *testing.T) { conf := ParseString("{k1:2147483647, k2:-2147483648,}") var v int32 var err error v, err = conf.GetInt32Safely("k1") if assert.Nil(t, err) { assert.Equal(t, int32(2147483647), v) } v, err = conf.GetInt32Safely("k2") if assert.Nil(t, err) { assert.Equal(t, int32(-2147483648), v) } } func TestConfig_GetInt32SafelyError(t *testing.T) { conf := ParseString("{k1:2147483648, k2:-2147483649, k3: qwerty}") var err error _, err = conf.GetInt32Safely("k1") assert.Error(t, err) _, err = conf.GetInt32Safely("k2") assert.Error(t, err) _, err = conf.GetInt32Safely("k3") assert.Error(t, err) } func TestConfig_GetInt64Safely(t *testing.T) { conf := ParseString("{k1:9223372036854775807, k2:-9223372036854775808,}") var v int64 var err error v, err = conf.GetInt64Safely("k1") if assert.Nil(t, err) { assert.Equal(t, int64(9223372036854775807), v) } v, err = conf.GetInt64Safely("k2") if assert.Nil(t, err) { assert.Equal(t, int64(-9223372036854775808), v) } } func TestConfig_GetInt64SafelyError(t *testing.T) { conf := ParseString("{k1:9223372036854775808, k2:-9223372036854775809, k3: qwerty}") var err error _, err = conf.GetInt64Safely("k1") assert.Error(t, err) _, err = conf.GetInt64Safely("k2") assert.Error(t, err) _, err = conf.GetInt64Safely("k3") assert.Error(t, err) } func TestConfig_GetFloat32Safely(t *testing.T) { conf := ParseString("{k1:1e3, k2:1e-3,}") var v float32 var err error v, err = conf.GetFloat32Safely("k1") if assert.Nil(t, err) { assert.Equal(t, float32(1000), v) } v, err = conf.GetFloat32Safely("k2") if assert.Nil(t, err) { assert.Equal(t, float32(0.001), v) } } func TestConfig_GetFloat32SafelyError(t *testing.T) { conf := ParseString("{k1:qwerty,}") _, err := conf.GetFloat32Safely("k1") assert.Error(t, err) } func TestConfig_GetFloat64Safely(t *testing.T) { conf := ParseString("{k1:1e3, k2:1e-3,}") var v float64 var err error v, err = conf.GetFloat64Safely("k1") if assert.Nil(t, err) { assert.Equal(t, 1000., v) } v, err = conf.GetFloat64Safely("k2") if assert.Nil(t, err) { assert.Equal(t, 0.001, v) } } func TestConfig_GetFloat64SafelyError(t *testing.T) { conf := ParseString("{k1:qwerty,}") _, err := conf.GetFloat64Safely("k1") assert.Error(t, err) }
package main import ( "bytes" "fmt" "github.com/adiabat/btcd/wire" "github.com/adiabat/goodelivery/extract" "github.com/mit-dci/lit/portxo" ) func (g *GDsession) extractmany() error { if *g.inFileName == "" { return fmt.Errorf("extract needs input file (-in)") } filetext, err := g.inputText() if err != nil { return err } ptxos, err := extract.ParseBitcoindListUnspent(filetext) if err != nil { return err } var outstring string // go through each portxo, convert to bytes, then hex, then make a line for _, p := range ptxos { b, err := p.Bytes() if err != nil { return err } outstring += fmt.Sprintf("%x\n", b) } return g.output(outstring) } // extract takes in a hex-encoded transaction, and returns a portxo. // or if it's a listunspent, then make a bunch of portxos func (g *GDsession) extractfromtx() error { if *g.inFileName == "" { return fmt.Errorf("extract needs input file (-in)") } tx := wire.NewMsgTx() u := new(portxo.PorTxo) fileslice, err := g.inputHex() if err != nil { return err } // make buffer txbuf := bytes.NewBuffer(fileslice) err = tx.Deserialize(txbuf) if err != nil { return err } // tx did work, get index and try extracting utxo // make sure it has, like, inputs and outputs if len(tx.TxIn) < 1 || len(tx.TxOut) < 1 { fmt.Errorf("tx has no inputs (or outputs)") } idx := uint32(*g.index) u, err = portxo.ExtractFromTx(tx, idx) if err != nil { return err } // assume PKH for now... detect based on pkScript later u.Mode = portxo.TxoP2PKHComp if *g.verbose { fmt.Printf("%s\n", u.String()) } b, err := u.Bytes() if err != nil { return err } outString := fmt.Sprintf("%x", b) return g.output(outString) }
package main import ( "fmt" "io" "log" "net/url" "time" ) //Common atributes of a crawler. type crawlerInternals struct { finishTime time.Time fetcher fetcher rules accessPolicy frontier urlFrontier store urlStore sitemap sitemap } func initCommonAttributes(c *crawlerInternals, seed []string, fet fetcher, rules accessPolicy, uf urlFrontier, duration time.Duration, s urlStore, sm sitemap) { c.rules = rules c.finishTime = time.Now().Add(duration) c.fetcher = fet c.frontier = uf for _, domain := range seed { domainURL, _ := url.Parse("http://" + domain + "/") // Causes redirect if https. curl, _ := getCanonicalURLString("/", domainURL) c.frontier.addURLString(curl) } c.store = s c.sitemap = sm } // Checks if access policy allows this URL. func (c *crawlerInternals) canProcess(curl string) bool { return c.rules.checkURL(curl) } // Checks if url has been added to cache. I.e. it has been visited, or is in // urlFrontier func (c *crawlerInternals) seen(curl string) bool { if _, exists := c.store.get(curl); exists { return true } return false } // Store url in cache. func (c *crawlerInternals) storeURL(curl string, body []byte) { c.store.put(curl, body) } // Check execution timeout. func (c *crawlerInternals) isTimeout() bool { return c.finishTime.Before(time.Now()) } // Find urls in a page and returns the body of the document. func (c *crawlerInternals) findURLLinksGetBody(url *url.URL) ([]string, io.Reader, error) { content, err := c.fetcher.getURLContent(url) if err != 200 { log.Printf("Error fetching url: %v %v", url, err) return nil, nil, fmt.Errorf("Error fetching %v: %v ", url, err) } return getAllTagAttr(crawlTags, content.Body), content.Body, nil } func (c *crawlerInternals) printSitemap(s sitemap, out io.Writer) { s.printSitemap(out) }
package main import ( "fmt" // 如果想使用包的 init,而不使用它的方法,用匿名别名包,不使用它的方法不报错 // 如果把 _ 改成其他名字,就是重命名包了 _ "go-demo/_import/package" ) func main() { fmt.Print("666\n") }
// Copyright 2020 cloudeng llc. All rights reserved. // Use of this source code is governed by the Apache-2.0 // license that can be found in the LICENSE file. package signals_test import ( "context" "io" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "sync" "syscall" "testing" "time" "cloudeng.io/cmdutil/expect" "cloudeng.io/cmdutil/signals" ) func runSubprocess(t *testing.T, args []string) (*exec.Cmd, io.Reader) { rd, wr, err := os.Pipe() if err != nil { t.Fatal(err) } cl := []string{"run", filepath.Join("testdata", "signal_main.go")} cl = append(cl, args...) cmd := exec.Command("go", cl...) cmd.Stdout = wr cmd.Stderr = wr if err := cmd.Start(); err != nil { t.Fatalf("failed to run %v: %v", strings.Join(cmd.Args, " "), err) } return cmd, rd } func TestSignal(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, time.Second*5) defer cancel() runCmd := func(args ...string) (*exec.Cmd, int, *expect.Lines) { cmd, rd := runSubprocess(t, args) st := expect.NewLineStream(rd) if err := st.ExpectEventuallyRE(ctx, regexp.MustCompile(`PID=\d+`)); err != nil { t.Fatal(err) } _, line := st.LastMatch() pid, err := strconv.ParseInt(line[strings.Index(line, "=")+1:], 10, 64) if err != nil { t.Fatal(err) } return cmd, int(pid), st } var wg sync.WaitGroup wg.Add(1) cmd, pid, st := runCmd("--debounce=5s") go func() { // Make sure that multiple signals in quick succession do not // cause the process to exit. syscall.Kill(pid, syscall.SIGINT) syscall.Kill(pid, syscall.SIGINT) syscall.Kill(pid, syscall.SIGINT) wg.Done() }() if err := st.ExpectEventuallyRE(ctx, regexp.MustCompile(`CANCEL PID=\d+`)); err != nil { t.Fatal(err) } if err := st.ExpectNext(ctx, "interrupt"); err != nil { t.Fatal(err) } if err := cmd.Wait(); err != nil { t.Errorf("unexpected error: %v", err) } wg.Wait() // Make sure that a second signal after the debounce period leads to // an exit. cmd, pid, st = runCmd("--debounce=250ms") go func() { syscall.Kill(pid, syscall.SIGINT) time.Sleep(time.Millisecond * 250) syscall.Kill(pid, syscall.SIGINT) }() if err := st.ExpectEventuallyRE(ctx, regexp.MustCompile(`CANCEL PID=\d+`)); err != nil { t.Fatal(err) } if err := st.ExpectNext(ctx, "exit status 1"); err != nil { t.Fatal(err) } err := cmd.Wait() if err == nil || err.Error() != "exit status 1" { t.Errorf("unexpected error: %v", err) } } func TestCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) _, handler := signals.NotifyWithCancel(ctx, os.Interrupt) go func() { cancel() }() sig := handler.WaitForSignal() if got, want := sig.String(), "context canceled"; got != want { t.Errorf("got %v, want %v", got, want) } } func TestMultipleCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) _, handler := signals.NotifyWithCancel(ctx, os.Interrupt) out := []string{} mu := sync.Mutex{} writeString := func(m string) { mu.Lock() defer mu.Unlock() out = append(out, m) } getString := func() string { mu.Lock() defer mu.Unlock() return strings.Join(out, "..") } handler.RegisterCancel( func() { writeString("a") }, func() { writeString("b") }, ) go func() { cancel() }() sig := handler.WaitForSignal() if got, want := sig.String(), "context canceled"; got != want { t.Errorf("got %v, want %v", got, want) } time.Sleep(time.Second) if got, want := getString(), "a..b"; got != want { t.Errorf("got %v, want %v", got, want) } }
package main import ( "bufio" "fmt" "io" "log" "net" "strings" ) func serve(conn net.Conn) { body := "Hello World! \nMethod: %s \nURI: %s \n" scanner := bufio.NewScanner(conn) var i = 0 var method string var uri string for scanner.Scan() { ln := scanner.Text() if ln == "" { break } if i == 0 { method = strings.Fields(ln)[0] uri = strings.Fields(ln)[1] } i++ } defer conn.Close() // body := "Hello World" fmt.Println("Method: " + method) fmt.Println("URI: " + uri) io.WriteString(conn, "HTTP/1.1 200 OK\r\n") fmt.Fprintf(conn, "Content-Length: %d\r\n", len(body)+len(uri)+len(method)) fmt.Fprint(conn, "Content-Type: text/plain\r\n") io.WriteString(conn, "\r\n") fmt.Fprintf(conn, body, method, uri) } func main() { li, err := net.Listen("tcp", ":8080") defer li.Close() if err != nil { log.Fatal(err) } for { conn, err := li.Accept() if err != nil { log.Fatal(err) } go serve(conn) } }
package handlers type ArchLinux = archLinux type Darwin = darwin
package helper import ( "encoding/base64" "math/rand" "net/http" "strconv" "strings" "time" "regexp" "github.com/asaskevich/govalidator" "golang.org/x/crypto/bcrypt" ) func init() { alphaSpaces, _ := regexp.Compile("^[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð ,.'-]+$") govalidator.TagMap["alphaSpaces"] = govalidator.Validator(func(str string) bool { return alphaSpaces.MatchString(str) }) govalidator.TagMap["password"] = govalidator.Validator(func(str string) bool { return true }) govalidator.TagMap["encript"] = govalidator.Validator(func(str string) bool { return true }) } func ValidateStruct(obj interface{}) (bool, error) { return govalidator.ValidateStruct(obj) } // PuedoVer verifica si el usuario puede acceder al recurso func PuedoVer(relacion int8, permiso string) bool { if permiso == "private" { return false } if permiso == "public" { return true } if relacion == 1 { return true } return false } // getLetterRandom genera una letra aleatoria func getLetterRandom() string { rand.Seed(time.Now().UTC().UnixNano()) var r = rand.Intn(60) var b = make([]byte, 1) if r < 10 { b[0] = byte(r + 48) } else if r < 35 { b[0] = byte(r + 55) } else { b[0] = byte(r + 62) } return string(b) } // GenerateRandomString genera un conjunto de caracteres aleatorios func GenerateRandomStringWithowTime(s int) string { r := "" for i := 0; i < s; i++ { r += getLetterRandom() } return r } // GenerateRandomString genera un conjunto de caracteres aleatorios func GenerateRandomString(s int) string { var r = base64.StdEncoding.EncodeToString([]byte(strconv.Itoa(int(time.Now().Unix())))) r = strings.Replace(r, "==", "", 1) l := len(r) for i := 0; i < s-l; i++ { r += getLetterRandom() } return r } // Encript genera un hash aleatorio de la contraseña func Encript(password string) string { hashedPassword, _ := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) return string(hashedPassword) } // IsValid Verifica si la contraseña y el hash corresponden, es decir, si esa es la contraseña en bd func IsValid(hash string, password string) bool { return bcrypt.CompareHashAndPassword( []byte(hash), []byte(password), ) == nil } // IsValidPermision valida si el permiso mencionado corresponde a uno de los permitidos func IsValidPermision(permiso string) bool { return permiso == "private" || permiso == "friends" || permiso == "public" } // GetToken retorna el token func GetToken(r *http.Request) string { var _token = r.URL.Query().Get("token") if _token == "" { _token = GetCookie(r, "token") } if _token == "" { _token = r.Header.Get("token") } return _token }
package workers import ( "log" "github.com/nicholasjackson/sorcery/data" "github.com/nicholasjackson/sorcery/logging" ) type DeadLetterQueueWorkerFactory struct { EventDispatcher EventDispatcher `inject:"eventdispatcher"` Dal data.Dal `inject:"dal"` StatsD logging.StatsD `inject:"statsd"` Log *log.Logger `inject:""` } func (f *DeadLetterQueueWorkerFactory) Create() Worker { return NewDeadLetterWorker(f.EventDispatcher, f.Dal, f.Log, f.StatsD) }
package main import ( "context" "fmt" "sync" "time" ) //使用channel和WaitGroup同步 func main() { done:=make(chan int ,100) defer close(done) //开启线程 for i := 1; i <= cap(done); i++ { go func(i int) { fmt.Println("开启线程", i) done <- i }(i) } //使用channel阻塞的方式来出来同步 //此处不能使用range 会引起主线程deadline /* for m := range done { fmt.Println(m, "线程关闭") } */ for i := 0; i< 100; i++ { m := <-done fmt.Println(m, "线程关闭") } fmt.Println("执行完毕") //2. 使用waitGroup方式防同步协程 var wg sync.WaitGroup for i:=0; i<100; i++ { wg.Add(1) go func(i int) { fmt.Println(i, "开始执行") wg.Done() }(i) } wg.Wait() } //使用context来控制 func foo(ctx context.Context, name string) { go bar(ctx, name) for { select { case <- ctx.Done(): fmt.Println(name, "foo exit") return case <- time.After(2 * time.Second): fmt.Println("foo 超时") } } } func bar(ctx context.Context, name string) { for { select { case <-ctx.Done(): fmt.Println(name, "B Exit") return case <-time.After(2 * time.Second): fmt.Println(name, "B do something") } } } func Test() { ctx, cancel := context.WithCancel(context.Background()) go foo(ctx, "foobar") fmt.Println("client release connection, need to notify A, B exit") time.Sleep(5 * time.Second) cancel() //mock client exit, and pass the signal, ctx.Done() gets the signal time.Sleep(3 * time.Second) time.Sleep(3 * time.Second) }
package attacker import ( "io/ioutil" "net/http" "github.com/AmyangXYZ/barbarian" "../logger" "../utils" "github.com/gorilla/websocket" ) // BasicSQLi checks basic sqli vuls. // WebSocket API. type BasicSQLi struct { mconn *utils.MuxConn fuzzableURLs []string payload0 string payload1 string InjectableURL []string } // NewBasicSQLi returns a new basicSQli Attacker. func NewBasicSQLi() *BasicSQLi { return &BasicSQLi{ mconn: &utils.MuxConn{}, payload0: "/**/%26%261%3d2%23", payload1: "/**/%26%261%3d1%23", } } // Set implements Attacker interface. // Params should be {conn *websocket.Conn, fuzzableURLs []string} func (bs *BasicSQLi) Set(v ...interface{}) { bs.mconn.Conn = v[0].(*websocket.Conn) bs.fuzzableURLs = v[1].([]string) } // Report implements Attacker interface. func (bs *BasicSQLi) Report() map[string]interface{} { return map[string]interface{}{ "sqli_urls": bs.InjectableURL, } } // Run implements Attacker interface. func (bs *BasicSQLi) Run() { logger.Green.Println("Basic SQLi Checking...") bb := barbarian.New(bs.check, bs.onResult, bs.fuzzableURLs, 20) bb.Run() if len(bs.InjectableURL) == 0 { logger.Blue.Println("no sqli vuls found") } } func (bs *BasicSQLi) onResult(res interface{}) { logger.Blue.Println(res) ret := map[string]string{ "sqli_url": res.(string), } bs.mconn.Send(ret) bs.InjectableURL = append(bs.InjectableURL, res.(string)) } func (bs *BasicSQLi) check(URL string) interface{} { body0 := bs.fetch(URL + bs.payload0) body1 := bs.fetch(URL + bs.payload1) if len(body0) != len(body1) { return URL } return nil } func (bs *BasicSQLi) fetch(URL string) string { client := &http.Client{} req, _ := http.NewRequest("GET", URL, nil) req.Header.Set("User-Agent", "Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0") resp, err := client.Do(req) if err != nil { return "" } body, _ := ioutil.ReadAll(resp.Body) resp.Body.Close() return string(body) }
package lc // Time: O(n) // Benchmark: 108ms 7.4mb | 100% type ListNode struct { Val int Next *ListNode } func mergeInBetween(list1 *ListNode, a int, b int, list2 *ListNode) *ListNode { n := list1 var startRef, endRef *ListNode for i := 0; i <= b; i++ { if i == a-1 { startRef = n } if i == b { endRef = n } n = n.Next } startRef.Next = list2 n = list2 for n.Next != nil { n = n.Next } n.Next = endRef.Next return list1 }
package main import ( "bufio" "fmt" "os" "strings" ) type Command struct { name string Execute func(args []string) bool usage string } var commands []Command func commandManager() { defer commandQueue.Done() var args []string = consoleRead() inputCommand(args) } func registerCommand(command Command) { commands = append(commands, command) } func inputCommand(args []string) { command := args[0] if command == "help" { showHelp() return } else if command == "stop" { go shutdown() return } for _, command1 := range commands { if command1.name == command { if !command1.Execute(args) { info("Error while executing the command!") } return } } info("Command doesn't exist !") } func showHelp() { fmt.Printf(InfoColor, "Help commands: ") fmt.Printf(InfoColor, "stop : allow to stop the application") for _, command := range commands { help := command.usage if help != "" { fmt.Printf(InfoColor, help) } } } func consoleRead() []string { input := bufio.NewReader(os.Stdin) text, err := input.ReadString('\n') failOnError(err, "Error when send message") text = strings.ReplaceAll(text, "\n", "") return strings.Split(text, " ") }
package main import "fmt" func main0901() { a := 10 //b := 20 // 一级指针 指向变量的地址 p := &a //二级指针 指向一级指针的地址 var pp **int = &p //通过二级指针连接修改一级指针的值 //*pp = &b //通过二级指针间接修改变量的值 **pp = 100 //var ppp ***int //var pppp ****int //pp := &p //*int fmt.Printf("%T\n", p) fmt.Printf("%T\n", pp) } func main0902() { a := 10 var p *int = &a var pp **int = &p //三级指针指向二级指针的地址 var ppp ***int = &pp //引用运算符 不能连续使用 如果放在一起成为 逻辑与 //三级指针 //ppp=&pp //二级指针 //*ppp=pp=&p //一级指针 //**ppp=*pp=p=&a //变量的值 //***ppp=**pp=*p=a }
package main import ( "context" "encoding/json" "io/ioutil" "net/http" "net/http/httptest" "strings" "sync" "testing" "time" "github.com/stretchr/testify/require" ) func TestSimpleRequest(t *testing.T) { jsonUrls := ` [ "http://jsonplaceholder.typicode.com/posts/1", "http://jsonplaceholder.typicode.com/posts/2", "http://jsonplaceholder.typicode.com/posts/3" ] ` resp, err := http.Post("http://localhost:8080/", "aplication/json", strings.NewReader(jsonUrls)) require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) var res map[string]string d, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) require.NoError(t, json.Unmarshal(d, &res)) expRes := map[string]string{ "http://jsonplaceholder.typicode.com/posts/1": `{ "userId": 1, "id": 1, "title": "sunt aut facere repellat provident occaecati excepturi optio reprehenderit", "body": "quia et suscipit\nsuscipit recusandae consequuntur expedita et cum\nreprehenderit molestiae ut ut quas totam\nnostrum rerum est autem sunt rem eveniet architecto" }`, "http://jsonplaceholder.typicode.com/posts/2": `{ "userId": 1, "id": 2, "title": "qui est esse", "body": "est rerum tempore vitae\nsequi sint nihil reprehenderit dolor beatae ea dolores neque\nfugiat blanditiis voluptate porro vel nihil molestiae ut reiciendis\nqui aperiam non debitis possimus qui neque nisi nulla" }`, "http://jsonplaceholder.typicode.com/posts/3": `{ "userId": 1, "id": 3, "title": "ea molestias quasi exercitationem repellat qui ipsa sit aut", "body": "et iusto sed quo iure\nvoluptatem occaecati omnis eligendi aut ad\nvoluptatem doloribus vel accusantium quis pariatur\nmolestiae porro eius odio et labore et velit aut" }`, } require.Equal(t, expRes, res) } func TestIncorrectRequest(t *testing.T) { jsonUrls := ` [ "http://jsonplaceholder.typicode.com/posts/1", "http://jsonplaceholder.typicode.com/posts/2", "http://jsonplaceholder.typicode.com/posts/3", "http:/incorrect.qwerty.ytre", "another-incorrect-url", ] ` resp, err := http.Post("http://localhost:8080/", "aplication/json", strings.NewReader(jsonUrls)) require.NoError(t, err) require.Equal(t, http.StatusInternalServerError, resp.StatusCode) } func TestReadEmbeddedStrings(t *testing.T) { const maxUrlsCount = 4 { r := strings.NewReader(`[]`) res, err := readEmbeddedStrings(r, maxUrlsCount) require.NoError(t, err) require.Len(t, res, 0) } { r := strings.NewReader(`["url1", "url2", "ur]`) res, err := readEmbeddedStrings(r, maxUrlsCount) require.EqualError(t, err, "not completed word exists") require.Len(t, res, 0) } { r := strings.NewReader(`["url1", "url2", "url3", "url4", "url5"]`) res, err := readEmbeddedStrings(r, maxUrlsCount) require.EqualError(t, err, "request max urls count exceeded") require.Len(t, res, 0) } { r := strings.NewReader(`["url1", "url2", "url3", "url4"]`) res, err := readEmbeddedStrings(r, maxUrlsCount) require.NoError(t, err) require.Equal(t, []string{"url1", "url2", "url3", "url4"}, res) } } func TestLimitHandlersConcurrency(t *testing.T) { const concurrency = 4 const sleepTime = 200 * time.Millisecond ts := httptest.NewServer(limitedClientsHandler(func(w http.ResponseWriter, r *http.Request) { time.Sleep(sleepTime) }, concurrency)) defer ts.Close() { //without concurrency start := time.Now() for i := 0; i < 4; i++ { _, err := http.Get(ts.URL) require.NoError(t, err) } require.GreaterOrEqual(t, time.Since(start).Nanoseconds(), (sleepTime * 4).Nanoseconds()) } { //with minimal concurrency wg := sync.WaitGroup{} start := time.Now() for i := 0; i < 4; i++ { wg.Add(1) go func() { _, err := http.Get(ts.URL) require.NoError(t, err) wg.Done() }() } wg.Wait() require.GreaterOrEqual(t, time.Since(start).Nanoseconds(), (sleepTime).Nanoseconds()) require.LessOrEqual(t, time.Since(start).Nanoseconds(), (sleepTime * 2).Nanoseconds()) } { //requests more than concurrency wg := sync.WaitGroup{} start := time.Now() for i := 0; i < 9; i++ { wg.Add(1) go func() { _, err := http.Get(ts.URL) require.NoError(t, err) wg.Done() }() } wg.Wait() require.GreaterOrEqual(t, time.Since(start).Nanoseconds(), (sleepTime * 3).Nanoseconds()) require.LessOrEqual(t, time.Since(start).Nanoseconds(), (sleepTime * 4).Nanoseconds()) } } func TestCancelRequest(t *testing.T) { jsonUrls := ` [ "http://jsonplaceholder.typicode.com/posts/1", "http://jsonplaceholder.typicode.com/posts/2", "http://jsonplaceholder.typicode.com/posts/3" ] ` ctx, cancel := context.WithCancel(context.Background()) req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://localhost:8080/", strings.NewReader(jsonUrls)) require.NoError(t, err) req.Header.Set("Content-Type", "aplication/json") cancel() resp, err := http.DefaultClient.Do(req) require.EqualError(t, err, "Post \"http://localhost:8080/\": context canceled") require.Nil(t, resp) }
package dbslite import ( "database/sql" "fmt" "log" _ "github.com/mattn/go-oci8" ) var Db *sql.DB func init() { var err error Db, err = sql.Open("oci8", "wang/1824611967") if err != nil { fmt.Print(err.Error()) log.Fatal(err) } err = Db.Ping() if err != nil { fmt.Print("未连接") } }
// Package main initializes and runs the server. // This package is small, because it is main. package main import ( "log" "github.com/MangoHacks/Mango2019-API/server" ) func main() { s, err := server.New() if err != nil { log.Fatal(err) } if err := s.Start(); err != nil { log.Fatal(err) } }
package client_route import ( "ehsan_esmaeili/route/client_route/v1_client_route" "github.com/julienschmidt/httprouter" ) func ClientInit(r *httprouter.Router) { v1_client_route.V1ClientInit(r) }
package portfolio import ( "sort" ) func Join(portfolios ...Portfolio) Portfolio { jp := Portfolio{} for _, p := range portfolios { for _, t := range p.Transactions() { jp.Add(t) } } return jp } type Portfolio struct { Name string transactions []Transaction holdings map[key]Holding } type key struct { Stock string BuyDate string } func (p *Portfolio) Add(t Transaction) { if p.transactions == nil { p.transactions = []Transaction{} } p.transactions = append(p.transactions, t) if p.holdings == nil { p.holdings = map[key]Holding{} } switch t.Action { case ActionBuy: k := key{Stock: t.Stock, BuyDate: t.Date.String()} h := Holding{ Stock: t.Stock, Buy: t, Sells: []Transaction{}, } p.holdings[k] = h case ActionSell: k := key{Stock: t.Stock, BuyDate: t.BuyDate.String()} h := p.holdings[k] h.Sells = append(h.Sells, t) p.holdings[k] = h } } func (p Portfolio) Holdings() []Holding { holdings := make([]Holding, 0, len(p.holdings)) for _, h := range p.holdings { holdings = append(holdings, h) } sort.SliceStable(holdings, func(i, j int) bool { return holdings[i].Stock < holdings[j].Stock }) return holdings } func (p Portfolio) AggregateHoldings() []AggregateHolding { m := map[string]AggregateHolding{} for _, h := range p.holdings { ah := m[h.Stock] ah.Stock = h.Stock ah.Add(h) m[h.Stock] = ah } s := make([]AggregateHolding, 0, len(m)) for _, ah := range m { s = append(s, ah) } sort.SliceStable(s, func(i, j int) bool { return s[i].Stock < s[j].Stock }) return s } func (p Portfolio) Transactions() []Transaction { return p.transactions }
package main import "testing" func BenchmarkParallelRequest(b *testing.B) { for i:=0;i<b.N;i++{ ParallelRequest(5) } }