text
stringlengths
11
4.05M
package profiler import ( "fmt" "math" "sync" "sync/atomic" "time" ) const MaxTryCntSwap = 3 var m sync.Mutex var mapProfiler map[string]*profiler type Profiler interface { Start() time.Time End(time.Time) time.Duration Info() string } type profiler struct { Name string // By priority atomics operations from max >> to min CntStart uint64 CntEnd uint64 SumTime uint64 MinTime uint64 MaxTime uint64 } func init() { mapProfiler = map[string]*profiler{} } func GetProfiler(name string) Profiler { emptyProfiler := &profiler{} emptyProfiler.MinTime = math.MaxUint64 if name == "" { return emptyProfiler } emptyProfiler.Name = name m.Lock() defer m.Unlock() if profiler, ok := mapProfiler[name]; ok { return profiler } else { mapProfiler[name] = emptyProfiler return emptyProfiler } } func (p *profiler) Start() time.Time { atomic.AddUint64(&p.CntStart, 1) return time.Now() } func (p *profiler) End(startTime time.Time) time.Duration { atomic.AddUint64(&p.CntEnd, 1) delta := time.Now().Sub(startTime) deltaUint64 := uint64(delta) atomic.AddUint64(&p.SumTime, uint64(delta)) for i := 0; i < MaxTryCntSwap; i++ { if minTime := p.MinTime; deltaUint64 < minTime { if atomic.CompareAndSwapUint64(&p.MinTime, minTime, deltaUint64) { break } } } for i := 0; i < MaxTryCntSwap; i++ { if maxTime := p.MaxTime; deltaUint64 > maxTime { if atomic.CompareAndSwapUint64(&p.MaxTime, maxTime, deltaUint64) { break } } } return delta } func (p *profiler) String() string { return fmt.Sprintf( "\nName: %v"+ "\nCntStart: %v"+ "\nCntEnd: %v"+ "\nSumTime: %v"+ "\nMinTime: %v"+ "\nMaxTime: %v\n"+ "=========\nAvgTime: %v\n", p.Name, p.CntStart, p.CntEnd, time.Duration(p.SumTime), time.Duration(p.MinTime), time.Duration(p.MaxTime), time.Duration(p.SumTime/p.CntEnd)) } func (p *profiler) Info() string { return fmt.Sprintf("%+v", p) }
package main import ( "log" "fmt" "github.com/viper" ) func main() { viper.SetConfigName("config") // name of config file (without extension) viper.AddConfigPath(".") // optionally look for config in the working directory err := viper.ReadInConfig() // Find and read the config file if err != nil { // Handle errors reading the config file panic(fmt.Errorf("Fatal error config file: %s \n", err)) } // Read config log.Println(viper.GetString("isActivated")) // Update config }
package memtable type Node struct { key uint32 value uint32 } type KeyValue struct { key string value uint32 }
package main import "fmt" import "reflect" // slice 有两个特殊的属性 len, cap func main() { var s []int fmt.Println(s) s2 := make([]int, 50, 100) i := 10240000 fmt.Println(cap(s)) fmt.Println(cap(s2)) for j := 1; j <= i; j++ { s = append(s, j) } s2 = append(s2, 1) fmt.Println(cap(s2)) fmt.Println(cap(s)) // 这里加 ... 说明定义的是一个数组而不是一个 slice nums := [...]string{1:"one", 2:"two", 3:"three", 4:"four"} nums2 := []string{1:"one", 2:"two", 3:"three", 4:"four"} fmt.Println(reflect.TypeOf(nums)) fmt.Println(reflect.TypeOf(nums2)) // 这里会有一个 panic // nums2[5] = "five" // 使用 append 的话就没有问题 在底层会判断 cap 如果 // cap 不够的话就需要添加新的空间 nums2 = append(nums2, "five") }
package main import ( "fmt" "io" "os" ) func main() { sum_i := 0 sum_f := 0.0 sum_a := make([]string, 0) for { var n int var m float64 var s string i, err := fmt.Scan(&n, &m, &s) if i == 3 { sum_i += n sum_f += m sum_a = append(sum_a, s) } else if i == 0 && err == io.EOF { break } else { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } fmt.Println(sum_i, sum_f, sum_a) }
package main import ( "os"; "fmt"; "http"; "bytes"; ) func main() { fmt.Printf("***start http test***\n"); // TODO body data, AMF3 format Message // test for BlazeDS AMF data buf := [...]byte { 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x6E, 0x75, 0x6C, 0x6C, 0x00, 0x02, 0x2F, 0x30 }; fmt.Printf("##%s\n", buf); //body := bytes.NewBuffer([]byte{0x00, 0x03}); body := bytes.NewBuffer(&buf); // amf test resp, err := http.Post("http://192.168.2.16:8989/t2-piano/t2.amf", "application/x-amf", body); if err != nil { fmt.Printf("########err:%s\n", err); os.Exit(1); } fmt.Printf("%s\n", resp); }
package container import "reflect" // BindValue bind a value to container func (c *containerImpl) BindValue(key string, value interface{}) error { return c.bindValueOverride(key, value, false) } // HasBoundValue return whether the kay has bound to a value func (c *containerImpl) HasBoundValue(key string) bool { c.lock.RLock() defer c.lock.RUnlock() _, ok := c.objects[key] return ok } func (c *containerImpl) bindValueOverride(key string, value interface{}, override bool) error { if value == nil { return buildInvalidArgsError("value is nil") } if key == "" || key == "@" { return buildInvalidArgsError("key can not be empty or reserved words(@)") } c.lock.Lock() defer c.lock.Unlock() entity := Entity{ initializeFunc: nil, key: key, typ: reflect.TypeOf(value), value: value, override: override, index: len(c.objectSlices), c: c, prototype: false, } if original, ok := c.objects[key]; ok { if !original.override { return buildRepeatedBindError("key repeated, override is not allowed for this key") } entity.index = original.index c.objects[key] = &entity c.objectSlices[original.index] = &entity return nil } c.objects[key] = &entity c.objectSlices = append(c.objectSlices, &entity) return nil } // BindValueOverride bind a value to container, if key already exist, then replace it func (c *containerImpl) BindValueOverride(key string, value interface{}) error { return c.bindValueOverride(key, value, true) } // MustBindValueOverride bind a value to container, if key already exist, then replace it, if failed, panic it func (c *containerImpl) MustBindValueOverride(key string, value interface{}) { c.Must(c.BindValueOverride(key, value)) } // MustBindValue bind a value to container, if failed, panic it func (c *containerImpl) MustBindValue(key string, value interface{}) { c.Must(c.BindValue(key, value)) } // HasBound return whether a key's type has bound to an object func (c *containerImpl) HasBound(key interface{}) bool { keyTyp := reflect.ValueOf(key).Type() c.lock.RLock() defer c.lock.RUnlock() _, ok := c.objects[keyTyp] return ok } // BindWithKey bind a initialize for object with a key // initialize func(...) (value, error) func (c *containerImpl) BindWithKey(key interface{}, initialize interface{}, prototype bool, override bool) error { if _, ok := initialize.(Conditional); !ok { initialize = WithCondition(initialize, func() bool { return true }) } initF := initialize.(Conditional).getInitFunc() if !reflect.ValueOf(initF).IsValid() { return buildInvalidArgsError("initialize is nil") } if err := c.isValidKeyKind(reflect.TypeOf(key).Kind()); err != nil { return err } initializeType := reflect.ValueOf(initF).Type() if initializeType.Kind() == reflect.Func { if initializeType.NumOut() <= 0 { return buildInvalidArgsError("expect func return values count greater than 0, but got 0") } return c.bindWithOverride(key, initializeType.Out(0), initialize, prototype, override) } initFunc := WithCondition(func() interface{} { return initF }, initialize.(Conditional).matched) return c.bindWithOverride(key, initializeType, initFunc, prototype, override) } // MustBindWithKey bind a initialize for object with a key, if failed then panic func (c *containerImpl) MustBindWithKey(key interface{}, initialize interface{}, prototype bool, override bool) { c.Must(c.BindWithKey(key, initialize, prototype, override)) } // Bind bind a initialize for object // initialize func(...) (value, error) func (c *containerImpl) Bind(initialize interface{}, prototype bool, override bool) error { if _, ok := initialize.(Conditional); !ok { initialize = conditional{init: initialize, on: func() bool { return true }} } initF := initialize.(Conditional).getInitFunc() if !reflect.ValueOf(initF).IsValid() { return buildInvalidArgsError("initialize is nil") } initializeType := reflect.ValueOf(initF).Type() if initializeType.Kind() == reflect.Func { if initializeType.NumOut() <= 0 { return buildInvalidArgsError("expect func return values count greater than 0, but got 0") } typ := initializeType.Out(0) if err := c.isValidKeyKind(typ.Kind()); err != nil { return err } return c.bindWithOverride(typ, typ, initialize, prototype, override) } if err := c.isValidKeyKind(initializeType.Kind()); err != nil { return err } initFunc := WithCondition(func() interface{} { return initF }, initialize.(Conditional).getOnCondition()) return c.bindWithOverride(initializeType, initializeType, initFunc, prototype, override) } // MustBind bind a initialize, if failed then panic func (c *containerImpl) MustBind(initialize interface{}, prototype bool, override bool) { c.Must(c.Bind(initialize, prototype, override)) } func (c *containerImpl) bindWithOverride(key interface{}, typ reflect.Type, initialize interface{}, prototype bool, override bool) error { var entity *Entity if cond, ok := initialize.(Conditional); ok { matched, err := cond.matched(c) if err != nil { return err } if !matched { return nil } entity = c.newEntity(key, typ, cond.getInitFunc(), prototype, override) } else { entity = c.newEntity(key, typ, initialize, prototype, override) } c.lock.Lock() defer c.lock.Unlock() if original, ok := c.objects[key]; ok { if !original.override { return buildRepeatedBindError("key repeated, override is not allowed for this key") } entity.index = original.index c.objects[key] = entity c.objectSlices[original.index] = entity return nil } entity.index = len(c.objectSlices) c.objects[key] = entity c.objectSlices = append(c.objectSlices, entity) return nil }
package envkit import ( "bufio" "fmt" "io" "os" "sort" "strings" ) // Read to get environment map func Read(r io.Reader) map[string]string { m := make(map[string]string) sc := bufio.NewScanner(r) for sc.Scan() { line := sc.Text() if i := strings.IndexRune(line, '='); i >= 0 { m[line[:i]] = line[i+1:] } } return m } // ReadFile read file to get environment map func ReadFile(source string) (map[string]string, error) { f, err := os.Open(source) if err != nil { return nil, err } defer f.Close() return Read(f), nil } // Save envmap to writer func Save(m map[string]string, w io.Writer) error { for _, key := range SortedKeys(m) { if _, err := fmt.Fprintf(w, "%s=%s\n", key, m[key]); err != nil { return err } } return nil } // SaveFile save envmap to file func SaveFile(m map[string]string, target string) error { f, err := os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0777) if err != nil { return err } defer f.Close() return Save(m, f) } // Setenv set environment variable based on map func Setenv(m map[string]string) error { for k, v := range m { if v != "" { if err := os.Setenv(k, v); err != nil { return err } } } return nil } // Unsetenv unset environment variable func Unsetenv(m map[string]string) error { for k := range m { if err := os.Unsetenv(k); err != nil { return err } } return nil } // SortedKeys of EnvMap func SortedKeys(m map[string]string) []string { var keys []string for k := range m { keys = append(keys, k) } sort.Strings(keys) return keys }
package main import ( "encoding/json" "log" "testing" ) type JsonUser struct { Id int `json:"id"` Name string `json:"name"` Address string `json:"address"` } func BenchmarkEncodingJson(b *testing.B) { var ( user JsonUser str string = `{"id":5,"name":"hoge","address":"東京"}` ) b.ResetTimer() for i := 0; i < b.N; i++ { if err := json.Unmarshal([]byte(str), &user); err != nil { log.Fatal(err) } } } func BenchmarkJsonConverter(b *testing.B) { var ( user User str string = `{"id":5,"name":"hoge","address":"東京"}` ) b.ResetTimer() for i := 0; i < b.N; i++ { Decode(&user, str) } }
package callbacks import ( "regexp" "strconv" "strings" "../db" "../state" "github.com/bwmarrin/discordgo" ) const botID string = "439164276058488843" // Handles a MessageReactionAdd Discord event func MessageReactionAdd(s *discordgo.Session, m *discordgo.MessageReactionAdd) { // Ignore reactions on messages not from this bot message, err := s.ChannelMessage(m.ChannelID, m.MessageID) if err != nil || message.Author.ID != botID || m.UserID == botID { return } // If the reaction is in a DM, it should be a thumbs up or down if m.GuildID == "" { promptRegex, err := regexp.Compile(`^[0-9]+`) if err == nil && promptRegex.FindString(message.Content) != "" { // Read the question index var start, numLen int for message.Content[start:start + 1] != "#" { start++ } start++ for message.Content[start + numLen:start + numLen + 1] != ")" { numLen++ } index, err := strconv.ParseInt(message.Content[start:start + numLen], 10, 64) if err != nil { return } // Upvote or downvote switch m.Emoji.Name { case "\xF0\x9F\x91\x8D": db.Upvote(int(index)) case "\xF0\x9F\x91\x8E": db.Downvote(int(index)) default: } } } else { // Otherwise it is a vote voteRegex, err := regexp.Compile(`^\*\*It's time to vote\*\*`) if err == nil && voteRegex.FindString(message.Content) != "" { // Count the votes votes1, err1 := s.MessageReactions(m.ChannelID, m.MessageID, "\x31\xE2\x83\xA3", 3) votes2, err2 := s.MessageReactions(m.ChannelID, m.MessageID, "\x32\xE2\x83\xA3", 3) if err1 != nil || err2 != nil { return } // If one response has 3 votes, find the winner if len(votes1) < 3 && len(votes2) < 3 { return } var winner string if winner = "\x31\xE2\x83\xA3"; len(votes2) > len(votes1) { winner = "\x32\xE2\x83\xA3" } // Announce the winner // Get the prompt string promptIndex := strings.Index(message.Content, ")") promptIndex += 2 promptLength := 0 for message.Content[promptIndex + promptLength:promptIndex + promptLength + 1] != "\n" { promptLength++ } prompt := message.Content[promptIndex:promptIndex + promptLength] // Get the response string responseIndex := strings.Index(message.Content, winner) responseIndex += 6 responseLength := 0 for message.Content[responseIndex + responseLength:responseIndex + responseLength + 1] != "\n" { responseLength++ } response := message.Content[responseIndex:responseIndex + responseLength] // Get the user ID userIndex := strings.Index(response, "<@") userIndex += 2 userLength := 0 for response[userIndex + userLength:userIndex + userLength + 1] != ">" { userLength++ } userID := response[userIndex:userIndex + userLength] // Calculate and record the score var score int if score = (len(votes1) - len(votes2)) * 200; score < 0 { score = -score } state.RecordScore(m.GuildID, userID, score) // Send the win message reply := winner + "wins!\n\n" + prompt + "\n" + response + "\n\n" + "Scores have been updated." s.ChannelMessageSend(m.ChannelID, reply) } } }
package utils import ( "fmt" ) type Tree struct { Left *Tree Data int Right *Tree } func NewTree(v int) *Tree { return &Tree{nil, v, nil} } func InsertNodeToTree(t *Tree, v int) *Tree { if t == nil { return NewTree(v) } if v < t.Data { t.Left = InsertNodeToTree(t.Left, v) } else { t.Right = InsertNodeToTree(t.Right, v) } return t } func FindNodeInTree(t *Tree, v int) bool { if t == nil { return false } if t.Data == v { return true } else if t.Data > v { return FindNodeInTree(t.Left, v) } else { return FindNodeInTree(t.Right, v) } } // reference: https://blog.csdn.net/monster_ii/article/details/82115772 // PreOrder func PreOrder(t *Tree) { if t == nil { return } fmt.Printf("%d ", t.Data) PreOrder(t.Left) PreOrder(t.Right) } // 栈中元素都是自己和自己的左孩子都访问过了,而右孩子还没有访问到的节点 func PreOrderNonRecursive(t *Tree) { stack := make([]*Tree, 0) for t != nil || len(stack) != 0 { for t != nil { fmt.Printf("%d ", t.Data) stack = append(stack, t) t = t.Left } top := stack[len(stack)-1] stack = stack[:len(stack)-1] t = top.Right } } // InOrder func InOrder(t *Tree) { if t == nil { return } InOrder(t.Left) fmt.Printf("%d ", t.Data) InOrder(t.Right) } // 节点自身和它的右子树都没有被访问到的节点地址。 func InOrderNonRecursive(t *Tree) { stack := make([]*Tree, 0) cur := t for cur != nil || len(stack) != 0 { for cur != nil { stack = append(stack, cur) cur = cur.Left } top := stack[len(stack)-1] stack = stack[:len(stack)-1] fmt.Printf("%d ", top.Data) cur = top.Right } } // PastOrder func PastOrder(t *Tree) { if t == nil { return } PastOrder(t.Left) PastOrder(t.Right) fmt.Printf("%d ", t.Data) } // 右子树和自身都没有被遍历到的节点,但是多一个last指针指向上一次访问到的节点, // 用来确认是从根节点的左子树返回的还是从右子树返回的 func PastOrderNonRecursive(t *Tree) { stack := make([]*Tree, 0) cur := t var last *Tree for cur != nil || len(stack) != 0 { for cur != nil { stack = append(stack, cur) cur = cur.Left } top := stack[len(stack)-1] if top.Right == nil || top.Right == last { stack = stack[:len(stack)-1] fmt.Printf("%d ", top.Data) last = top } else { cur = top.Right } } } // levelOrder func LevelOrder(t *Tree) { if t == nil { return } queue := make([]*Tree, 0) queue = append(queue, t) for len(queue) != 0 { top := queue[len(queue)-1] queue = queue[:len(queue)-1] if top.Left != nil { queue = append([]*Tree{top.Left}, queue...) } if top.Right != nil { queue = append([]*Tree{top.Right}, queue...) } fmt.Printf("%d ", top.Data) } }
package qsort import "testing" func TestQuickSort1(t *testing.T) { values := [5]int {2, 3, 4, 1, 5} QuickSort(values[0:]) for i := 0; i < len(values) - 1; i++ { if values[i] > values[i+1] { t.Error("sorted1, result", values); } } } func TestQuickSort2(t *testing.T) { values := [6]int {2, 2, 3, 5, 1, 4} QuickSort(values[0:]) for i := 0; i < len(values) - 1; i++ { if values[i] > values[i+1] { t.Error("sorted2, result", values); } } }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package record import ( "context" "encoding/binary" "io" "os" "github.com/golang/protobuf/proto" "github.com/google/gapid/core/event" "github.com/google/gapid/core/log" ) // pbbHandler is an implementation of fileType that stores it's records in binary proto format. type pbbFileType struct{} type pbbHandler struct { f *os.File null proto.Message } type pbbReader struct { buf []byte f io.Reader null proto.Message } func (pbbFileType) Ext() string { return ".pb" } func (pbbFileType) Open(ctx context.Context, f *os.File, null interface{}) (LedgerInstance, error) { m, ok := null.(proto.Message) if !ok { return nil, log.Err(ctx, nil, "Cannot create proto ledger with non proto type") } return &pbbHandler{f: f, null: m}, nil } func (h *pbbHandler) Write(ctx context.Context, record interface{}) error { buf, err := proto.Marshal(record.(proto.Message)) if err != nil { return err } size := int32(len(buf)) if err := binary.Write(h.f, binary.LittleEndian, &size); err != nil { return err } _, err = h.f.Write(buf) return err } func (h *pbbHandler) Reader(ctx context.Context) event.Source { return &pbbReader{f: &readAt{f: h.f}, null: h.null} } func (h *pbbHandler) Close(ctx context.Context) { h.f.Close() } func (h *pbbHandler) New(ctx context.Context) interface{} { return proto.Clone(h.null) } func (r *pbbReader) Next(ctx context.Context) interface{} { size := int32(0) if err := binary.Read(r.f, binary.LittleEndian, &size); err != nil { if err != io.EOF { log.E(ctx, "Invalid proto record header in ledger. Error: %v", err) } return nil } if cap(r.buf) < int(size) { r.buf = make([]byte, size*2) // TODO: very naive growth algorithm } r.buf = r.buf[0:size] io.ReadFull(r.f, r.buf) message := proto.Clone(r.null) err := proto.Unmarshal(r.buf, message) if err != nil { log.E(ctx, "Invalid proto in ledger. Error: %v", err) return nil } return message } func (h *pbbReader) Close(ctx context.Context) {}
package main import ( "encoding/json" "fmt" "log" "net/http" "github.com/gorilla/mux" medium "github.com/medium/medium-sdk-go" "github.com/rs/cors" ) type ResponseResult struct { Code string `json:"code,omitempty"` Desc string `json:"desc,omitempty"` } var responseResult []ResponseResult type MediumResponse struct { URL string `json:"url,omitempty"` } var client = medium.NewClient("bfd5f510809e", "988e2718d4fe4cb78fd936333f5e4ee73b40bb59") var redirectURL string func main() { redirectURL = "http://127.0.0.1:4200/callback/medium" router := mux.NewRouter() responseResult = append(responseResult, ResponseResult{Code: "200", Desc: "KK"}) router.HandleFunc("/get", getHandle).Methods("GET") router.HandleFunc("/api/medium/init", mediumInit).Methods("POST", "OPTIONS") router.HandleFunc("/api/medium/user", getMediumUser).Methods("POST", "OPTIONS") router.Headers("Content-Type", "application/json") c := cors.New(cors.Options{ AllowedOrigins: []string{"*"}, AllowCredentials: true, AllowedHeaders: []string{"Content-Type"}, }) handler := c.Handler(router) // headersAllow := handlers.AllowedHeaders([]string{"Content-Type"}) // originsOk := handlers.AllowedOrigins([]string{"*"}) // log.Fatal(http.ListenAndServe(":3000", handlers.CORS()(router))) log.Fatal(http.ListenAndServe(":3000", handler)) } func getHandle(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(responseResult) } func mediumInit(w http.ResponseWriter, r *http.Request) { url := client.GetAuthorizationURL("secretstate", redirectURL, medium.ScopeBasicProfile, medium.ScopePublishPost) mediumResponse := MediumResponse{URL: url} // w.Header().Set("Access-Control-Allow-Origin", "*") json.NewEncoder(w).Encode(mediumResponse) } func getMediumUser(w http.ResponseWriter, r *http.Request) { // r.HeadersRegexp("Content-Type", "application/json") fmt.Println("getMediumUser") params := mux.Vars(r) fmt.Println("this val:: " + params["authen_code"]) // w.Header().Set("Access-Control-Allow-Origin", "*") // w.Header().Set("Access-Control-Allow-Headers", "Content-Type") json.NewEncoder(w).Encode(responseResult) // test }
package semaphore // IPC_RMID: // Immediately remove the semaphore set and its associated semid_ds data // structure. Any processes blocked in semop() calls waiting on semaphores in // this set are immediately awakened, with semop() reporting the error EIDRM . // The arg argument is not required. //--------------- // IPC_STAT // Place a copy of the semid_ds data structure associated with this semaphore // set in the buffer pointed to by arg.buf. We describe the semid_ds structure // in Section 47.4. //--------------- // IPC_SET // Update selected fields of the semid_ds data structure associate
package sys import ( //"github.com/gen2brain/raylib-go/raylib" //"math/rand" ) func AllCollision(obj GameObject,list []GameObject) bool{ pre_list := &list[0] pre_obj := &obj for i := 0;i <= len(list) - 1;i++{ pre_list = &list[i] if DistanceCal(pre_list.x,pre_list.y,pre_obj.x,pre_obj.y) <= 8{ return true break; } } return false } func AllDraw(list []GameObject){ pre_list := &list[0] for i := 0;i <= len(list) - 1;i++{ pre_list = &list[i] pre_list.Draw() } } func AllMove(x int32,y int32,list []GameObject){ pre_list := &list[0] for i := 0;i <= len(list) - 1;i++{ pre_list = &list[i] pre_list.Move(x,y) } } func AllRandomMove(x int32,y int32,list []GameObject){ pre_list := &list[0] for i := 0;i <= len(list) - 1;i++{ r := Random(1) b := Random(1) if r == 0{ r = Random(x) }else if r == 1{ r = -Random(x) } if b == 0{ b = Random(y) }else if b == 1{ b = -Random(y) } pre_list = &list[i] pre_list.Move(r,b) } } func AllMoveTo(x int32,y int32,list []GameObject){ pre_list := &list[0] for i := 0;i <= len(list) - 1;i++{ pre_list = &list[i] pre_list.x = x pre_list.y = y } } func CheckOutScreen(list []GameObject) (bool,GameMove){ leftboolean := len(list) - 1 rightboolean := len(list) - 1 downboolean := len(list) - 1 upboolean := len(list) - 1 boolean := len(list) - 1 for i := 0;i <= len(list) - 1;i++{ if list[i].x > WinWidth - list[i].size[0]{ leftboolean -= 1 boolean -= 1 } if list[i].x < 0{ rightboolean -= 1 boolean -= 1 } if list[i].y > WinHeight - list[i].size[1]{ downboolean -= 1 boolean -= 1 } if list[i].y < 0{ upboolean -= 1 boolean -= 1 } } if boolean == 0{ return true,GameMove{leftboolean == 0,rightboolean == 0,upboolean == 0,downboolean == 0} } return false,GameMove{leftboolean == 0,rightboolean == 0,upboolean == 0,downboolean == 0} } func CheckOutScreenSimple(list []GameObject) bool{ leftboolean := len(list) - 1 rightboolean := len(list) - 1 downboolean := len(list) - 1 upboolean := len(list) - 1 boolean := len(list) - 1 for i := 0;i <= len(list) - 1;i++{ if list[i].x > 600 - list[i].size[0]{ leftboolean -= 1 boolean -= 1 } if list[i].x < 0{ rightboolean -= 1 boolean -= 1 } if list[i].y > 600 - list[i].size[1]{ downboolean -= 1 boolean -= 1 } if list[i].y < 0{ upboolean -= 1 boolean -= 1 } } if boolean == 0{ return true } return false } func CheckOutScreenX(list []GameObject) GameMove{ leftboolean := len(list) - 1 rightboolean := len(list) - 1 for i := 0;i <= len(list) - 1;i++{ if list[i].x > 600 - list[i].size[0]{ leftboolean -= 1 } if list[i].x < 0{ rightboolean -= 1 } } return GameMove{leftboolean - leftboolean / 10 < 0 ,rightboolean - rightboolean / 10 < 0,false,false} } func CheckOutScreenNumber(list []GameObject) []int32{ leftboolean := len(list) - 1 rightboolean := len(list) - 1 downboolean := len(list) - 1 upboolean := len(list) - 1 for i := 0;i <= len(list) - 1;i++{ if list[i].x > 600 - list[i].size[0]{ leftboolean -= 1 } if list[i].x < 0{ rightboolean -= 1 } if list[i].y > 600 - list[i].size[1]{ downboolean -= 1 } if list[i].y < 0{ upboolean -= 1 } } return []int32{int32(leftboolean),int32(rightboolean),int32(upboolean),int32(downboolean)} } func MoveLogicFollow(list []GameObject,obj GameObject){ for i := 0;i < len(list) - 1;i++{ if obj.x > list[i].x{ *&list[i].x += list[i].vel } if obj.x < list[i].x{ *&list[i].x -= list[i].vel } if obj.y > list[i].y{ *&list[i].y += list[i].vel } if obj.y < list[i].y{ *&list[i].y -= list[i].vel } } } func MoveLogic1(list []GameObject,oper string){ for i := 0;i < len(list);i++{ if oper == "+"{ *&list[i].x += list[i].vel } if oper == "-"{ *&list[i].x -= list[i].vel } if oper == "*"{ *&list[i].x *= list[i].vel } if oper == "/"{ *&list[i].x /= list[i].vel } } } func (obj GameObject)Shoot(list []GameObject){ for i := 0;i <= len(list) - 1;i++{ *&list[i].x = obj.x } } func Shoot(list []GameObject,obj GameObject){ for i := 0;i < len(list);i++{ *&list[i].x = obj.x *&list[i].y = obj.y } } func MoveLogicFollow2(list []GameObject,obj GameObject,dis int32){ for i := 0;i < len(list);i++{ if int32(DistanceCal(list[i].x,list[i].y,obj.x,obj.y)) >= dis + obj.size[0]{ if obj.x > list[i].x{ *&list[i].x += list[i].vel } if obj.x < list[i].x{ *&list[i].x -= list[i].vel } if obj.y > list[i].y{ *&list[i].y += list[i].vel } if obj.y < list[i].y{ *&list[i].y -= list[i].vel } } if int32(DistanceCal(list[i].x,list[i].y,obj.x,obj.y)) <= dis + obj.size[0]{ break } } } func MoveLogicFollow3(y int32,x int32,list []GameObject,obj GameObject,dis int32){ for i := 0;i < len(list);i++{ if int32(DistanceCal(list[i].x,list[i].y,obj.x,obj.y)) >= dis + obj.size[0]{ if obj.x > list[i].x{ *&list[i].x += list[i].vel } if obj.x < list[i].x{ *&list[i].x -= list[i].vel } if obj.y > list[i].y{ *&list[i].y += list[i].vel } if obj.y < list[i].y{ *&list[i].y -= list[i].vel } }else{ r := Random(1) a := Random(1) if r == 1{ list[i].x += Random(list[i].vel * x) } if r == 0{ list[i].x -= Random(list[i].vel * x) } if a == 1{ list[i].y += Random(list[i].vel * y) } if a == 0{ list[i].y -= Random(list[i].vel * y) } } } }
package logger import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" ) // NewProductionEncoderConfig returns an opinionated EncoderConfig for // production environments. func NewProductionEncoderConfig() zapcore.EncoderConfig { return zapcore.EncoderConfig{ LevelKey: "level", MessageKey: "msg", StacktraceKey: "stacktrace", LineEnding: zapcore.DefaultLineEnding, EncodeLevel: zapcore.LowercaseLevelEncoder, } } // NewProductionConfig is a reasonable production logging configuration. // Logging is enabled at InfoLevel and above. // // It uses a JSON encoder, writes to standard error, and enables sampling. // Stacktraces are automatically included on logs of ErrorLevel and above. func NewProductionConfig() zap.Config { return zap.Config{ Level: zap.NewAtomicLevelAt(zap.InfoLevel), Development: false, Sampling: &zap.SamplingConfig{ Initial: 100, Thereafter: 100, }, Encoding: "json", EncoderConfig: NewProductionEncoderConfig(), OutputPaths: []string{"stderr"}, ErrorOutputPaths: []string{"stderr"}, } } // New creates package specific loging pipeline. func New(name string) *zap.SugaredLogger { logger, _ := NewProductionConfig().Build() defer logger.Sync() // nolint:errcheck return logger.Named(name).Sugar() }
package api import ( "github.com/jiangmitiao/cali/app/models" "github.com/jiangmitiao/cali/app/rcali" "github.com/revel/revel" "strconv" ) type Tag struct { *revel.Controller } func (c Tag) Index() revel.Result { return c.RenderJSONP(c.Request.FormValue("callback"), models.NewOKApi()) } //all tags count func (c Tag) TagsCount() revel.Result { return c.RenderJSONP( c.Request.FormValue("callback"), models.NewOKApiWithInfo(tagService.QueryTagsCount())) } //all tags info func (c Tag) Tags() revel.Result { limit, _ := strconv.Atoi(rcali.ValueOrDefault(c.Request.FormValue("limit"), rcali.ClassNumsStr)) start, _ := strconv.Atoi(rcali.ValueOrDefault(c.Request.FormValue("start"), "0")) return c.RenderJSONP( c.Request.FormValue("callback"), models.NewOKApiWithInfo(tagService.QueryTags(limit, start)), ) }
package encrypt import ( "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/rand" "crypto/rsa" "errors" ) // Sign a message based on the given key. func Sign(key interface{}, message []byte) ([]byte, error) { switch key.(type) { case *rsa.PrivateKey: return signRsa(key.(*rsa.PrivateKey), message) case *ecdsa.PrivateKey: return signEcdsa(key.(*ecdsa.PrivateKey), message) case ed25519.PrivateKey: return signEd25519(key.(ed25519.PrivateKey), message) } return nil, errors.New("Unknown key type for signing") } func signEd25519(key ed25519.PrivateKey, message []byte) ([]byte, error) { return ed25519.Sign(key, message), nil } func signEcdsa(key *ecdsa.PrivateKey, message []byte) ([]byte, error) { return key.Sign(rand.Reader, message, nil) } func signRsa(key *rsa.PrivateKey, message []byte) ([]byte, error) { return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, message) }
// Copyright 2015 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package kvserver import ( "context" "fmt" "time" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" ) const ( // splitQueueTimerDuration is the duration between splits of queued ranges. splitQueueTimerDuration = 0 // zero duration to process splits greedily. // splitQueuePurgatoryCheckInterval is the interval at which replicas in // purgatory make split attempts. Purgatory is used by the splitQueue to // store ranges that are large enough to require a split but are // unsplittable because they do not contain a suitable split key. Purgatory // prevents them from repeatedly attempting to split at an unbounded rate. splitQueuePurgatoryCheckInterval = 1 * time.Minute // splits should be relatively isolated, other than requiring expensive // RocksDB scans over part of the splitting range to recompute stats. We // allow a limitted number of splits to be processed at once. splitQueueConcurrency = 4 ) // splitQueue manages a queue of ranges slated to be split due to size // or along intersecting zone config boundaries. type splitQueue struct { *baseQueue db *kv.DB purgChan <-chan time.Time // loadBasedCount counts the load-based splits performed by the queue. loadBasedCount telemetry.Counter } // newSplitQueue returns a new instance of splitQueue. func newSplitQueue(store *Store, db *kv.DB, gossip *gossip.Gossip) *splitQueue { var purgChan <-chan time.Time if c := store.TestingKnobs().SplitQueuePurgatoryChan; c != nil { purgChan = c } else { purgTicker := time.NewTicker(splitQueuePurgatoryCheckInterval) purgChan = purgTicker.C } sq := &splitQueue{ db: db, purgChan: purgChan, loadBasedCount: telemetry.GetCounter("kv.split.load"), } sq.baseQueue = newBaseQueue( "split", sq, store, gossip, queueConfig{ maxSize: defaultQueueMaxSize, maxConcurrency: splitQueueConcurrency, needsLease: true, needsSystemConfig: true, acceptsUnsplitRanges: true, successes: store.metrics.SplitQueueSuccesses, failures: store.metrics.SplitQueueFailures, pending: store.metrics.SplitQueuePending, processingNanos: store.metrics.SplitQueueProcessingNanos, purgatory: store.metrics.SplitQueuePurgatory, }, ) return sq } func shouldSplitRange( ctx context.Context, desc *roachpb.RangeDescriptor, ms enginepb.MVCCStats, maxBytes int64, shouldBackpressureWrites bool, sysCfg *config.SystemConfig, ) (shouldQ bool, priority float64) { if sysCfg.NeedsSplit(ctx, desc.StartKey, desc.EndKey) { // Set priority to 1 in the event the range is split by zone configs. priority = 1 shouldQ = true } // Add priority based on the size of range compared to the max // size for the zone it's in. if ratio := float64(ms.Total()) / float64(maxBytes); ratio > 1 { priority += ratio shouldQ = true } // additionalPriorityDueToBackpressure is a mechanism to prioritize splitting // ranges which will actively backpressure writes. // // NB: This additional weight is totally arbitrary. The priority in the split // queue is usually 1 plus the ratio of the current size over the max size. // When a range is much larger than it is allowed to be given the // backpressureRangeSizeMultiplier and the zone config, backpressure is // not going to be applied because of the backpressureByteTolerance (see the // comment there for more details). However, when the range size is close to // the limit, we will backpressure. We strongly prefer to split over // backpressure. const additionalPriorityDueToBackpressure = 50 if shouldQ && shouldBackpressureWrites { priority += additionalPriorityDueToBackpressure } return shouldQ, priority } // shouldQueue determines whether a range should be queued for // splitting. This is true if the range is intersected by a zone config // prefix or if the range's size in bytes exceeds the limit for the zone, // or if the range has too much load on it. func (sq *splitQueue) shouldQueue( ctx context.Context, now hlc.ClockTimestamp, repl *Replica, sysCfg *config.SystemConfig, ) (shouldQ bool, priority float64) { shouldQ, priority = shouldSplitRange(ctx, repl.Desc(), repl.GetMVCCStats(), repl.GetMaxBytes(), repl.shouldBackpressureWrites(), sysCfg) if !shouldQ && repl.SplitByLoadEnabled() { if splitKey := repl.loadBasedSplitter.MaybeSplitKey(timeutil.Now()); splitKey != nil { shouldQ, priority = true, 1.0 // default priority } } return shouldQ, priority } // unsplittableRangeError indicates that a split attempt failed because a no // suitable split key could be found. type unsplittableRangeError struct{} func (unsplittableRangeError) Error() string { return "could not find valid split key" } func (unsplittableRangeError) purgatoryErrorMarker() {} var _ purgatoryError = unsplittableRangeError{} // process synchronously invokes admin split for each proposed split key. func (sq *splitQueue) process( ctx context.Context, r *Replica, sysCfg *config.SystemConfig, ) (processed bool, err error) { processed, err = sq.processAttempt(ctx, r, sysCfg) if errors.HasType(err, (*roachpb.ConditionFailedError)(nil)) { // ConditionFailedErrors are an expected outcome for range split // attempts because splits can race with other descriptor modifications. // On seeing a ConditionFailedError, don't return an error and enqueue // this replica again in case it still needs to be split. log.Infof(ctx, "split saw concurrent descriptor modification; maybe retrying") sq.MaybeAddAsync(ctx, r, sq.store.Clock().NowAsClockTimestamp()) return false, nil } return processed, err } func (sq *splitQueue) processAttempt( ctx context.Context, r *Replica, sysCfg *config.SystemConfig, ) (processed bool, err error) { desc := r.Desc() // First handle the case of splitting due to zone config maps. if splitKey := sysCfg.ComputeSplitKey(ctx, desc.StartKey, desc.EndKey); splitKey != nil { if _, err := r.adminSplitWithDescriptor( ctx, roachpb.AdminSplitRequest{ RequestHeader: roachpb.RequestHeader{ Key: splitKey.AsRawKey(), }, SplitKey: splitKey.AsRawKey(), ExpirationTime: hlc.Timestamp{}, }, desc, false, /* delayable */ "zone config", ); err != nil { return false, errors.Wrapf(err, "unable to split %s at key %q", r, splitKey) } return true, nil } // Next handle case of splitting due to size. Note that we don't perform // size-based splitting if maxBytes is 0 (happens in certain test // situations). size := r.GetMVCCStats().Total() maxBytes := r.GetMaxBytes() if maxBytes > 0 && float64(size)/float64(maxBytes) > 1 { _, err := r.adminSplitWithDescriptor( ctx, roachpb.AdminSplitRequest{}, desc, false, /* delayable */ fmt.Sprintf("%s above threshold size %s", humanizeutil.IBytes(size), humanizeutil.IBytes(maxBytes)), ) return err == nil, err } now := timeutil.Now() if splitByLoadKey := r.loadBasedSplitter.MaybeSplitKey(now); splitByLoadKey != nil { batchHandledQPS := r.QueriesPerSecond() raftAppliedQPS := r.WritesPerSecond() splitQPS := r.loadBasedSplitter.LastQPS(now) reason := fmt.Sprintf( "load at key %s (%.2f splitQPS, %.2f batches/sec, %.2f raft mutations/sec)", splitByLoadKey, splitQPS, batchHandledQPS, raftAppliedQPS, ) // Add a small delay (default of 5m) to any subsequent attempt to merge // this range split away. While the merge queue does takes into account // load to avoids merging ranges that would be immediately re-split due // to load-based splitting, it doesn't take into account historical // load. So this small delay is the only thing that prevents split // points created due to load from being immediately merged away after // load is stopped, which can be a problem for benchmarks where data is // first imported and then the workload begins after a small delay. var expTime hlc.Timestamp if expDelay := SplitByLoadMergeDelay.Get(&sq.store.cfg.Settings.SV); expDelay > 0 { expTime = sq.store.Clock().Now().Add(expDelay.Nanoseconds(), 0) } if _, pErr := r.adminSplitWithDescriptor( ctx, roachpb.AdminSplitRequest{ RequestHeader: roachpb.RequestHeader{ Key: splitByLoadKey, }, SplitKey: splitByLoadKey, ExpirationTime: expTime, }, desc, false, /* delayable */ reason, ); pErr != nil { return false, errors.Wrapf(pErr, "unable to split %s at key %q", r, splitByLoadKey) } telemetry.Inc(sq.loadBasedCount) // Reset the splitter now that the bounds of the range changed. r.loadBasedSplitter.Reset() return true, nil } return false, nil } // timer returns interval between processing successive queued splits. func (*splitQueue) timer(_ time.Duration) time.Duration { return splitQueueTimerDuration } // purgatoryChan returns the split queue's purgatory channel. func (sq *splitQueue) purgatoryChan() <-chan time.Time { return sq.purgChan }
/* Copyright © 2021 Author : mehtaarn000 Email : arnavm834@gmail.com */ package core import ( "bufio" "io/ioutil" "os" "ssc/utils" "strings" "github.com/glenn-brown/golang-pkg-pcre/src/pkg/pcre" ) func validateBranchName(name string) bool { newmatcher, err := pcre.Compile("^(?!/|.*([/.]\\.|//|@\\{|\\\\))[^\040\177 ~^:?*\\[]+(?<!\\.lock|[/.])$", 0) match := newmatcher.Matcher([]byte(name), 0).MatchString(name, 0) if err != nil { utils.Exit(err) } return match } func CreateBranch(name string) { if _, err := os.Stat(".ssc/branches/" + name); err != nil { if os.IsExist(err) { utils.Exit("Branch '" + name + "' already exists.") } } currentbranch, err := ioutil.ReadFile(".ssc/branch") othercommitlog, err := ioutil.ReadFile(".ssc/branches/" + string(currentbranch) + "/commitlog") array := strings.Split(string(othercommitlog), "\n") head := array[0] if head == "" || head == "\n" { utils.Exit("At least 1 commit must be made on the default branch before new branches can be created.") } match := validateBranchName(name) if !match { utils.Exit("Invalid branch name: '" + name + "'") } err = os.Mkdir(".ssc/branches/"+name, 0777) f, err := os.Create(".ssc/branches/" + name + "/commitlog") defer f.Close() f.WriteString(head + "\n") if err != nil { utils.Exit(err) } } func SwitchBranch(name string) { // ALL UNCOMMITTED CHANGES WILL BE LOST // TODO Add feature that stores uncommitted changes when switching branches if _, err := os.Stat(".ssc/branches/" + name); err != nil { if os.IsNotExist(err) { utils.Exit("Branch '" + name + "' does not exist.") } } currentbranch, err := ioutil.ReadFile(".ssc/branch") writer, err := os.Create(".ssc/branch") writer.WriteString(name) othercommitlog, err := ioutil.ReadFile(".ssc/branches/" + name + "/commitlog") array1 := strings.Split(string(othercommitlog), "\n") head1 := array1[0] thiscommitlog, err := ioutil.ReadFile(".ssc/branches/" + string(currentbranch) + "/commitlog") array2 := strings.Split(string(thiscommitlog), "\n") head2 := array2[0] if head1 != head2 { RevertTo(head1) } if err != nil { utils.Exit(err) } println("Switched to branch '" + name + "'") } var confirm string func DeleteBranch(name string, force bool) { if _, err := os.Stat(".ssc/branches/" + name); err != nil { if os.IsNotExist(err) { utils.Exit("Branch '" + name + "' does not exist.") } } branch, err := ioutil.ReadFile(".ssc/branch") if name == string(branch) { utils.Exit("Cannot delete current branch. Run ssc branch -s [branch name] to move to another branch or run ssc branch -ns [branch name] to create and switch to a new branch.") } if !force { scanner := bufio.NewScanner(os.Stdin) for { print("Are you sure you want to delete branch: " + name + " [y/n]?") scanner.Scan() confirm = scanner.Text() if confirm == "Y" || confirm == "N" || confirm == "y" || confirm == "n" { break } } if confirm == "Y" || confirm == "y" { err := os.RemoveAll(".ssc/branches/" + name) if err != nil { utils.Exit(err) } } else { return } } err = os.RemoveAll(".ssc/branches/" + name) if err != nil { utils.Exit(err) } }
// Created by: Infinity de Guzman // Created on: May 2021 // // This program calculates your actual pay and what the government takes package main import ( "fmt" "github.com/leekchan/accounting" ) func main() { var hoursWorked float64 var hourlyRate float64 // input fmt.Println("This program gets a user's actual pay and what the government takes.") fmt.Println() fmt.Print("Enter the hours you've worked: ") fmt.Scanln(&hoursWorked) fmt.Print("Enter your hourly rate: ") fmt.Scanln(&hourlyRate) // process var pay = (hoursWorked * hourlyRate) * 0.82 var government = (hoursWorked * hourlyRate) * 0.18 // This function displays currency accountingFormater := accounting.Accounting{Symbol: "$", Precision: 2} fmt.Println("The pay will be:", accountingFormater.FormatMoney(pay)) accountingFormater2 := accounting.Accounting{Symbol: "$", Precision: 2} fmt.Println("The government will take:", accountingFormater2.FormatMoney(government)) }
package model import ( "time" ) //Logging model for table loggings type Income struct { ID uint `gorm:"primary_key"` Username string `sql:"column:username" json:"username"` Income int `sql:"column:income" json:"income"` CreatedAt time.Time UpdatedAt time.Time DeletedAt *time.Time `sql:"index"` } func (Income) TableName() string { return "incomes" }
// Copyright 2018 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package arc import ( "context" "io" "net/http" "net/http/httptest" "regexp" "time" "github.com/mafredri/cdp/protocol/target" "chromiumos/tast/common/testexec" "chromiumos/tast/local/arc" "chromiumos/tast/local/chrome/browser" "chromiumos/tast/local/chrome/browser/browserfixt" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: IntentForward, LacrosStatus: testing.LacrosVariantExists, Desc: "Checks Android intents are forwarded to Chrome", Contacts: []string{"djacobo@google.com", "arc-core@google.com"}, SoftwareDeps: []string{"chrome"}, Timeout: 5 * time.Minute, Attr: []string{"group:mainline", "group:arc-functional"}, Params: []testing.Param{{ ExtraSoftwareDeps: []string{"android_p"}, Val: browser.TypeAsh, Fixture: "arcBooted", }, { Name: "lacros", // TODO(b/239469085): Remove "informational" attribute. ExtraAttr: []string{"informational"}, ExtraSoftwareDeps: []string{"android_p", "lacros"}, Val: browser.TypeLacros, Fixture: "lacrosWithArcBooted", }, { Name: "vm", ExtraSoftwareDeps: []string{"android_vm"}, Val: browser.TypeAsh, Fixture: "arcBooted", }, { Name: "lacros_vm", ExtraSoftwareDeps: []string{"android_vm", "lacros"}, Val: browser.TypeLacros, Fixture: "lacrosWithArcBooted", }}, }) } func IntentForward(ctx context.Context, s *testing.State) { const ( viewAction = "android.intent.action.VIEW" viewDownloadsAction = "android.intent.action.VIEW_DOWNLOADS" setWallpaperAction = "android.intent.action.SET_WALLPAPER" filesAppURL = "chrome://file-manager/" wallpaperPickerURL = "chrome://personalization/wallpaper" ) d := s.FixtValue().(*arc.PreData) a := d.ARC cr := d.Chrome if err := a.WaitIntentHelper(ctx); err != nil { s.Fatal("ArcIntentHelper did not come up: ", err) } server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, "It worked!") })) defer server.Close() localWebURL := server.URL + "/" // Must end with a slash checkIntent := func(action, data, url string, bt browser.Type) { ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() testing.ContextLogf(ctx, "Testing: %s(%s) -> %s", action, data, url) if err := a.SendIntentCommand(ctx, action, data).Run(testexec.DumpLogOnError); err != nil { s.Errorf("Failed to send an intent %q: %v", action, err) return } br, brCleanUp, err := browserfixt.Connect(ctx, cr, bt) if err != nil { s.Error("Failed to connect to browser: ", err) return } defer brCleanUp(ctx) urlMatcher := func(t *target.Info) bool { matched, _ := regexp.MatchString(url, t.URL) return matched } conn, err := br.NewConnForTarget(ctx, urlMatcher) if err != nil { s.Errorf("%s(%s) -> %s: %v", action, data, url, err) return } defer conn.Close() } checkIntent(viewAction, localWebURL, localWebURL, s.Param().(browser.Type)) checkIntent(setWallpaperAction, "", wallpaperPickerURL, browser.TypeAsh) checkIntent(viewDownloadsAction, "", filesAppURL, browser.TypeAsh) }
package datetime import ( "fmt" "github.com/project-flogo/core/data" "github.com/project-flogo/core/data/coerce" "github.com/project-flogo/core/data/expression/function" "github.com/project-flogo/core/support/log" "strings" ) // Deprecated type FormatDate struct { } func init() { function.Register(&FormatDate{}) } func (s *FormatDate) Name() string { return "formatDate" } func (s *FormatDate) GetCategory() string { return "datetime" } func (s *FormatDate) Sig() (paramTypes []data.Type, isVariadic bool) { return []data.Type{data.TypeDateTime, data.TypeString}, false } func (s *FormatDate) Eval(params ...interface{}) (interface{}, error) { date, err := coerce.ToDateTime(params[0]) if err != nil { return nil, fmt.Errorf("Format date first argument must be string") } format, err := coerce.ToString(params[1]) if err != nil { return nil, fmt.Errorf("Format date second argument must be string") } format = convertDateFormater(format) log.RootLogger().Debugf("Format date %s to format %s", date, format) return date.Format(format), nil } func convertDateFormater(format string) string { lowerFormat := strings.ToLower(format) if strings.Contains(lowerFormat, "yyyy") { lowerFormat = strings.Replace(lowerFormat, "yyyy", "2006", -1) } if strings.Contains(lowerFormat, "mm") { lowerFormat = strings.Replace(lowerFormat, "mm", "01", -1) } if strings.Contains(lowerFormat, "dd") { lowerFormat = strings.Replace(lowerFormat, "dd", "02", -1) } return lowerFormat }
package main import ( "flag" "fmt" "github.com/astaxie/beego/httplib" "strconv" "tripod/convert" "webserver/common" "webserver/models" "webserver/models/dimension" "webserver/models/maccount" ) var Online bool var ItemName string var ItemType int var Opt int var UserId int var ItemId int func init() { flag.BoolVar(&Online, "online", false, "") flag.StringVar(&ItemName, "name", "", "") flag.IntVar(&ItemType, "type", 0, "") // 1: community 2:officebuilding flag.IntVar(&Opt, "operation", 0, "") // 1:Add Item 2:Correct Info flag.IntVar(&UserId, "user", 0, "") flag.IntVar(&ItemId, "item", 0, "") flag.Parse() fmt.Println("ItemName:", ItemName, "ItemType:", ItemType, "operation:", Opt, "UserId:", UserId, "ItemId:", ItemId) } func Useage() { fmt.Println("online, name, type, operation, user, item") } func AddCommunity() { comm := &dimension.Community{} comm.CommunityId = 0 comm.ProvinceId = 3 comm.CityId = 3 comm.DistrictId = 0 comm.Name = ItemName comm.Extra = "" comm.Location = MapSearch(ItemName) comm.Nearby = "" comm.Status = 1 if err := models.CreateRecord(comm); err == nil { comm.CommunityId = comm.Id comm.Nearby = strconv.Itoa(comm.Id) err := models.SaveRecord(comm) fmt.Println("AddCommunity", comm, err) } } func AddOfficeBuilding() { office := &dimension.OfficeBuilding{} office.OfficebuildingId = 0 office.ProvinceId = 0 office.CityId = 3 office.DistrictId = 0 office.Name = ItemName office.Extra = "" office.Location = MapSearch(ItemName) office.Nearby = "" office.Status = 1 if err := models.CreateRecord(office); err == nil { office.OfficebuildingId = office.Id office.Nearby = strconv.Itoa(office.Id) err := models.SaveRecord(office) fmt.Println("AddOfficeBuilding", office, err) } } func UpdateUserIno() error { if user, err := maccount.FindUserById(UserId); err == nil { fmt.Println("Before Update", user) if ItemType == 1 { user.CommunityId = ItemId } else if ItemType == 2 { user.OfficebuildingId = ItemId } else if ItemType == 3 { user.SchoolId = ItemId } else if ItemType == 4 { user.HometownId = ItemId } else { return nil } err := models.SaveRecord(user) fmt.Println("After Update", user, err) } return nil } func MapSearch(key string) string { var url string if ItemType == 1 { url = fmt.Sprintf("http://restapi.amap.com/v3/place/text?key=7eea1d43b01c712da71c24ce54727dc9&keywords=%s&types=住宅区&city=北京&children=1&offset=20&page=1&extensions=base", key) } else if ItemType == 2 { url = fmt.Sprintf("http://restapi.amap.com/v3/place/text?key=7eea1d43b01c712da71c24ce54727dc9&keywords=%s&types=商务写字楼&city=北京&children=1&offset=20&page=1&extensions=base", key) } request := httplib.Get(url) resp, _ := request.String() data, _ := convert.StringToJson(resp) if data["status"] == nil || data["count"] == nil { fmt.Println(url) } else if data["status"].(string) == "1" && data["count"].(string) != "0" { if values, ok := data["pois"].([]interface{}); ok { if mdata, ok := values[0].(map[string]interface{}); ok { return mdata["location"].(string) } if len(values) > 1 { fmt.Println("multiname", key) } } } return "" } func main() { Useage() common.Source = common.DbSource{"127.0.0.1", 3306, "test001", "datatest", "test001"} if Online { common.Source = common.DbSource{"10.25.112.53", 3306, "test001", "youquaner", "test001"} } models.InitGormDb() if Opt == 1 { if ItemType == 1 { AddCommunity() } else if ItemType == 2 { AddOfficeBuilding() } else { fmt.Println("wrong item type") } } else if Opt == 2 { UpdateUserIno() } else { fmt.Println("wrong opt type") } }
package dataingest_mutation import ( "github.com/Dynatrace/dynatrace-operator/src/config" corev1 "k8s.io/api/core/v1" ) func addWorkloadInfoEnvs(container *corev1.Container, workload *workloadInfo) { container.Env = append(container.Env, corev1.EnvVar{Name: config.EnrichmentWorkloadKindEnv, Value: workload.kind}, corev1.EnvVar{Name: config.EnrichmentWorkloadNameEnv, Value: workload.name}, corev1.EnvVar{Name: config.EnrichmentInjectedEnv, Value: "true"}, ) }
package types // Task struct represent a Task =] type Task struct { Text string `json:"text"` MissedWord string `json:"missed-word"` Options []string `json:"options"` }
package main import ( "fmt" "io/ioutil" "log" "net/http" ) func main() { // get devuelve una url y un error si ocurre y sino lo devuelve nil res, error := http.Get("http://www-01.sil.org/linguistics/wordlists/english/wordlist/wordsEn.txt") if error != nil { log.Fatalln(error) } // esto va a devolver en bs el body del html y en el blank identifier si hubiera errores byteSlice, _ := ioutil.ReadAll(res.Body) // la operacion anterior devuelve un slice de bytes, asi que hay que pasarlo a string str := string(byteSlice) fmt.Println(str) }
package htmlparser // AttrStatus indicate a status of an attribute type AttrStatus uint8 const ( ASValid AttrStatus = iota ASDeprecated ASUnknown ) // Type of HTML Element according to the HTML 5.0 spec type HtmlElementType uint8 const ( HETPhrasing HtmlElementType = 0x1 // former "inline element" HETFlow = 0x2 // former "block element" HETMeta = 0x4 // control elements HETText = 0x8 // text block HETNRCharData = 0x10 // Non-Replaceable Char Data HETAnyContent = HETPhrasing | HETFlow | HETText HETTransparent = HETPhrasing | HETFlow HETNone = 0 ) type HtmlTagFormatting uint8 const ( HTFSingle HtmlTagFormatting = iota // Has no closing tag, e.g. <br> HTFOptionalClosing // has an optional closing tag, e.g. <li> HTFComplete // must have a closing tag )
package compliancescan import ( "context" "time" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/source" compv1alpha1 "github.com/openshift/compliance-operator/pkg/apis/compliance/v1alpha1" "github.com/openshift/compliance-operator/pkg/controller/common" "github.com/openshift/compliance-operator/pkg/utils" ) var log = logf.Log.WithName("scanctrl") var oneReplica int32 = 1 var ( trueVal = true hostPathDir = corev1.HostPathDirectory ) const ( // OpenSCAPScanContainerName defines the name of the contianer that will run OpenSCAP OpenSCAPScanContainerName = "openscap-ocp" NodeHostnameLabel = "kubernetes.io/hostname" AggregatorPodAnnotation = "scan-aggregator" // The default time we should wait before requeuing requeueAfterDefault = 10 * time.Second ) // Add creates a new ComplianceScan Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager) error { return add(mgr, newReconciler(mgr)) } // newReconciler returns a new reconcile.Reconciler func newReconciler(mgr manager.Manager) reconcile.Reconciler { return &ReconcileComplianceScan{client: mgr.GetClient(), scheme: mgr.GetScheme()} } // add adds a new Controller to mgr with r as the reconcile.Reconciler func add(mgr manager.Manager, r reconcile.Reconciler) error { // Create a new controller c, err := controller.New("compliancescan-controller", mgr, controller.Options{Reconciler: r}) if err != nil { return err } // Watch for changes to primary resource ComplianceScan err = c.Watch(&source.Kind{Type: &compv1alpha1.ComplianceScan{}}, &handler.EnqueueRequestForObject{}) if err != nil { return err } // TODO(user): Modify this to be the types you create that are owned by the primary resource // Watch for changes to secondary resource Pods and requeue the owner ComplianceScan err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{ IsController: true, OwnerType: &compv1alpha1.ComplianceScan{}, }) if err != nil { return err } return nil } // blank assignment to verify that ReconcileComplianceScan implements reconcile.Reconciler var _ reconcile.Reconciler = &ReconcileComplianceScan{} // ReconcileComplianceScan reconciles a ComplianceScan object type ReconcileComplianceScan struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme } // Reconcile reads that state of the cluster for a ComplianceScan object and makes changes based on the state read // and what is in the ComplianceScan.Spec // Note: // The Controller will requeue the Request to be processed again if the returned error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. func (r *ReconcileComplianceScan) Reconcile(request reconcile.Request) (reconcile.Result, error) { reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) reqLogger.Info("Reconciling ComplianceScan") // Fetch the ComplianceScan instance instance := &compv1alpha1.ComplianceScan{} err := r.client.Get(context.TODO(), request.NamespacedName, instance) if err != nil { if errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue return reconcile.Result{}, nil } // Error reading the object - requeue the request. return reconcile.Result{}, err } // At this point, we make a copy of the instance, so we can modify it in the functions below. scanToBeUpdated := instance.DeepCopy() // If no phase set, default to pending (the initial phase): if scanToBeUpdated.Status.Phase == "" { scanToBeUpdated.Status.Phase = compv1alpha1.PhasePending } switch scanToBeUpdated.Status.Phase { case compv1alpha1.PhasePending: return r.phasePendingHandler(scanToBeUpdated, reqLogger) case compv1alpha1.PhaseLaunching: return r.phaseLaunchingHandler(scanToBeUpdated, reqLogger) case compv1alpha1.PhaseRunning: return r.phaseRunningHandler(scanToBeUpdated, reqLogger) case compv1alpha1.PhaseAggregating: return r.phaseAggregatingHandler(scanToBeUpdated, reqLogger) case compv1alpha1.PhaseDone: return r.phaseDoneHandler(scanToBeUpdated, reqLogger) } // the default catch-all, just remove the request from the queue return reconcile.Result{}, nil } func (r *ReconcileComplianceScan) phasePendingHandler(instance *compv1alpha1.ComplianceScan, logger logr.Logger) (reconcile.Result, error) { logger.Info("Phase: Pending") err := createConfigMaps(r, scriptCmForScan(instance), envCmForScan(instance), instance) if err != nil { logger.Error(err, "Cannot create the configmaps") return reconcile.Result{}, err } // Update the scan instance, the next phase is running instance.Status.Phase = compv1alpha1.PhaseLaunching instance.Status.Result = compv1alpha1.ResultNotAvailable err = r.client.Status().Update(context.TODO(), instance) if err != nil { logger.Error(err, "Cannot update the status") return reconcile.Result{}, err } // TODO: It might be better to store the list of eligible nodes in the CR so that if someone edits the CR or // adds/removes nodes while the scan is running, we just work on the same set? return reconcile.Result{}, nil } func (r *ReconcileComplianceScan) phaseLaunchingHandler(instance *compv1alpha1.ComplianceScan, logger logr.Logger) (reconcile.Result, error) { var nodes corev1.NodeList var err error logger.Info("Phase: Launching") if nodes, err = getTargetNodes(r, instance); err != nil { log.Error(err, "Cannot get nodes") return reconcile.Result{}, err } if err = r.handleRootCASecret(instance, logger); err != nil { log.Error(err, "Cannot create CA secret") return reconcile.Result{}, err } if err = r.handleResultServerSecret(instance, logger); err != nil { log.Error(err, "Cannot create result server cert secret") return reconcile.Result{}, err } if err = r.handleResultClientSecret(instance, logger); err != nil { log.Error(err, "Cannot create result client cert secret") return reconcile.Result{}, err } if err = r.createResultServer(instance, logger); err != nil { log.Error(err, "Cannot create result server") return reconcile.Result{}, err } if err = r.createScanPods(instance, nodes, logger); err != nil { if !common.IsRetriable(err) { // Surface non-retriable errors to the CR log.Info("Updating scan status due to unretriable error") scanCopy := instance.DeepCopy() scanCopy.Status.ErrorMessage = err.Error() scanCopy.Status.Result = compv1alpha1.ResultError scanCopy.Status.Phase = compv1alpha1.PhaseDone if updateerr := r.client.Status().Update(context.TODO(), scanCopy); updateerr != nil { log.Error(updateerr, "Failed to update a scan") return reconcile.Result{}, updateerr } } return common.ReturnWithRetriableError(logger, err) } // if we got here, there are no new pods to be created, move to the next phase instance.Status.Phase = compv1alpha1.PhaseRunning err = r.client.Status().Update(context.TODO(), instance) if err != nil { return reconcile.Result{}, err } return reconcile.Result{}, nil } func (r *ReconcileComplianceScan) phaseRunningHandler(instance *compv1alpha1.ComplianceScan, logger logr.Logger) (reconcile.Result, error) { var nodes corev1.NodeList var err error logger.Info("Phase: Running") if nodes, err = getTargetNodes(r, instance); err != nil { log.Error(err, "Cannot get nodes") return reconcile.Result{}, err } if len(nodes.Items) == 0 { log.Info("Warning: No eligible nodes. CheckResult the nodeSelector.") } // On each eligible node.. for _, node := range nodes.Items { running, err := isPodRunningInNode(r, instance, &node, logger) if errors.IsNotFound(err) { // Let's go back to the previous state and make sure all the nodes are covered. logger.Info("Phase: Running: A pod is missing. Going to state LAUNCHING to make sure we launch it", "Node.Name", node.Name) instance.Status.Phase = compv1alpha1.PhaseLaunching err = r.client.Status().Update(context.TODO(), instance) if err != nil { return reconcile.Result{}, err } return reconcile.Result{}, nil } else if err != nil { return reconcile.Result{}, err } if running { // at least one pod is still running, just go back to the queue return reconcile.Result{}, err } } // if we got here, there are no pods running, move to the Aggregating phase instance.Status.Phase = compv1alpha1.PhaseAggregating err = r.client.Status().Update(context.TODO(), instance) if err != nil { return reconcile.Result{}, err } return reconcile.Result{}, nil } func (r *ReconcileComplianceScan) phaseAggregatingHandler(instance *compv1alpha1.ComplianceScan, logger logr.Logger) (reconcile.Result, error) { logger.Info("Phase: Aggregating") var nodes corev1.NodeList var err error if nodes, err = getTargetNodes(r, instance); err != nil { log.Error(err, "Cannot get nodes") return reconcile.Result{}, err } result, isReady, err := gatherResults(r, instance, nodes) // We only wait if there are no errors. if err == nil && !isReady { log.Info("ConfigMap missing (not ready). Requeuing.") return reconcile.Result{Requeue: true, RequeueAfter: requeueAfterDefault}, nil } instance.Status.Result = result if err != nil { instance.Status.ErrorMessage = err.Error() } logger.Info("Creating an aggregator pod for scan") aggregator := newAggregatorPod(instance, logger) if err = controllerutil.SetControllerReference(instance, aggregator, r.scheme); err != nil { log.Error(err, "Failed to set aggregator pod ownership", "aggregator", aggregator) return reconcile.Result{}, err } err = r.launchAggregatorPod(instance, aggregator, logger) if err != nil { log.Error(err, "Failed to launch aggregator pod", "aggregator", aggregator) return reconcile.Result{}, err } running, err := isAggregatorRunning(r, instance, logger) if err != nil { log.Error(err, "Failed to check if aggregator pod is running", "aggregator", aggregator) return reconcile.Result{}, err } if running { log.Info("Remaining in the aggregating phase") instance.Status.Phase = compv1alpha1.PhaseAggregating err = r.client.Status().Update(context.TODO(), instance) return reconcile.Result{Requeue: true, RequeueAfter: requeueAfterDefault}, nil } log.Info("Moving on to the Done phase") instance.Status.Phase = compv1alpha1.PhaseDone err = r.client.Status().Update(context.TODO(), instance) return reconcile.Result{}, err } func (r *ReconcileComplianceScan) phaseDoneHandler(instance *compv1alpha1.ComplianceScan, logger logr.Logger) (reconcile.Result, error) { var nodes corev1.NodeList var err error logger.Info("Phase: Done") if !instance.Spec.Debug { if nodes, err = getTargetNodes(r, instance); err != nil { log.Error(err, "Cannot get nodes") return reconcile.Result{}, err } if err := r.deleteScanPods(instance, nodes, logger); err != nil { log.Error(err, "Cannot delete scan pods") return reconcile.Result{}, err } if err := r.deleteResultServer(instance, logger); err != nil { log.Error(err, "Cannot delete result server") return reconcile.Result{}, err } if err := r.deleteAggregator(instance, logger); err != nil { log.Error(err, "Cannot delete aggregator") return reconcile.Result{}, err } if err = r.deleteResultServerSecret(instance, logger); err != nil { log.Error(err, "Cannot delete result server cert secret") return reconcile.Result{}, err } if err = r.deleteResultClientSecret(instance, logger); err != nil { log.Error(err, "Cannot delete result client cert secret") return reconcile.Result{}, err } if err = r.deleteRootCASecret(instance, logger); err != nil { log.Error(err, "Cannot delete CA secret") return reconcile.Result{}, err } } return reconcile.Result{}, nil } func getTargetNodes(r *ReconcileComplianceScan, instance *compv1alpha1.ComplianceScan) (corev1.NodeList, error) { var nodes corev1.NodeList listOpts := client.ListOptions{ LabelSelector: labels.SelectorFromSet(instance.Spec.NodeSelector), } if err := r.client.List(context.TODO(), &nodes, &listOpts); err != nil { return nodes, err } return nodes, nil } func (r *ReconcileComplianceScan) createPVCForScan(instance *compv1alpha1.ComplianceScan) error { pvc := getPVCForScan(instance) if err := controllerutil.SetControllerReference(instance, pvc, r.scheme); err != nil { log.Error(err, "Failed to set pvc ownership", "pvc", pvc.Name) return err } if err := r.client.Create(context.TODO(), pvc); err != nil && !errors.IsAlreadyExists(err) { return err } return nil } // returns true if the pod is still running, false otherwise func isPodRunningInNode(r *ReconcileComplianceScan, scanInstance *compv1alpha1.ComplianceScan, node *corev1.Node, logger logr.Logger) (bool, error) { podName := getPodForNodeName(scanInstance.Name, node.Name) return isPodRunning(r, podName, scanInstance.Namespace, logger) } func isPodRunning(r *ReconcileComplianceScan, podName, namespace string, logger logr.Logger) (bool, error) { foundPod := &corev1.Pod{} err := r.client.Get(context.TODO(), types.NamespacedName{Name: podName, Namespace: namespace}, foundPod) if err != nil { logger.Error(err, "Cannot retrieve pod", "Pod.Name", podName) return false, err } else if foundPod.Status.Phase == corev1.PodFailed || foundPod.Status.Phase == corev1.PodSucceeded { logger.Info("Pod has finished") return false, nil } // the pod is still running or being created etc logger.Info("Pod still running", "Pod.Name", podName) return true, nil } // gatherResults will iterate the nodes in the scan and get the results // for the OpenSCAP check. If the results haven't yet been persisted in // the relevant ConfigMap, the a requeue will be requested since the // results are not ready. func gatherResults(r *ReconcileComplianceScan, instance *compv1alpha1.ComplianceScan, nodes corev1.NodeList) (compv1alpha1.ComplianceScanStatusResult, bool, error) { var lastNonCompliance compv1alpha1.ComplianceScanStatusResult var result compv1alpha1.ComplianceScanStatusResult compliant := true isReady := true for _, node := range nodes.Items { targetCM := types.NamespacedName{ Name: getConfigMapForNodeName(instance.Name, node.Name), Namespace: instance.Namespace, } foundCM := &corev1.ConfigMap{} err := r.client.Get(context.TODO(), targetCM, foundCM) // Could be a transcient error, so we requeue if there's any // error here. if err != nil { isReady = false } // NOTE: err is only set if there is an error in the scan run result, err = getScanResult(foundCM) // we output the last result if it was an error if result == compv1alpha1.ResultError { return result, true, err } // Store the last non-compliance, so we can output that if // there were no errors. if result == compv1alpha1.ResultNonCompliant { lastNonCompliance = result compliant = false } } if !compliant { return lastNonCompliance, isReady, nil } return result, isReady, nil } func getPVCForScan(instance *compv1alpha1.ComplianceScan) *corev1.PersistentVolumeClaim { return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: getPVCForScanName(instance), Namespace: instance.Namespace, Labels: map[string]string{ "complianceScan": instance.Name, }, }, Spec: corev1.PersistentVolumeClaimSpec{ // NOTE(jaosorior): Currently we don't set a StorageClass // so the default will be taken into use. // TODO(jaosorior): Make StorageClass configurable StorageClassName: nil, AccessModes: []corev1.PersistentVolumeAccessMode{ "ReadWriteOnce", }, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ // TODO(jaosorior): Make this configurable corev1.ResourceStorage: resource.MustParse("1Gi"), }, }, }, } } // pod names are limited to 63 chars, inclusive. Try to use a friendly name, if that can't be done, // just use a hash. Either way, the node would be present in a label of the pod. func getPodForNodeName(scanName, nodeName string) string { return utils.DNSLengthName("openscap-pod-", "%s-%s-pod", scanName, nodeName) } func getConfigMapForNodeName(scanName, nodeName string) string { return utils.DNSLengthName("openscap-pod-", "%s-%s-pod", scanName, nodeName) } func getPVCForScanName(instance *compv1alpha1.ComplianceScan) string { return instance.Name } func getInitContainerImage(scanSpec *compv1alpha1.ComplianceScanSpec, logger logr.Logger) string { image := DefaultContentContainerImage if scanSpec.ContentImage != "" { image = scanSpec.ContentImage } logger.Info("Content image", "image", image) return image }
package socks import ( "fmt" "io" "net" "strconv" ) const ( // version socks5Version = uint8(5) // commands https://tools.ietf.org/html/rfc1928#section-4 connectCommand = uint8(1) bindCommand = uint8(2) associateCommand = uint8(3) // address types ipv4Address = uint8(1) fqdnAddress = uint8(3) ipv6Address = uint8(4) ) // https://tools.ietf.org/html/rfc1928#section-6 const ( successReply uint8 = iota serverFailure ruleFailure networkUnreachable hostUnreachable connectionRefused ttlExpired commandNotSupported addrTypeNotSupported ) // AddrSpec is used to return the target IPv4, IPv6, or a FQDN type AddrSpec struct { FQDN string IP net.IP Port int } // String gives a host version of the Address func (a *AddrSpec) String() string { if a.FQDN != "" { return fmt.Sprintf("%s (%s):%d", a.FQDN, a.IP, a.Port) } return fmt.Sprintf("%s:%d", a.IP, a.Port) } // Address returns a string suitable to dial; prefer returning IP-based // address, fallback to FQDN func (a AddrSpec) Address() string { if len(a.IP) != 0 { return net.JoinHostPort(a.IP.String(), strconv.Itoa(a.Port)) } return net.JoinHostPort(a.FQDN, strconv.Itoa(a.Port)) } // Request is a SOCKS5 command with supporting field of the connection type Request struct { // Protocol version Version uint8 // Requested command Command uint8 // AddrSpec of the destination DestAddr *AddrSpec // reading from the connection bufConn io.Reader } // NewRequest creates a new request from the connection data stream func NewRequest(bufConn io.Reader) (*Request, error) { // Read the version byte header := []byte{0, 0, 0} if _, err := io.ReadAtLeast(bufConn, header, 3); err != nil { return nil, fmt.Errorf("Failed to get command version: %v", err) } // ensure compatibility if header[0] != socks5Version { return nil, fmt.Errorf("Unsupported command version: %v", header[0]) } // Read in the destination address dest, err := readAddrSpec(bufConn) if err != nil { return nil, err } return &Request{ Version: socks5Version, Command: header[1], DestAddr: dest, bufConn: bufConn, }, nil } func sendReply(w io.Writer, resp uint8, addr *AddrSpec) error { var addrType uint8 var addrBody []byte var addrPort uint16 switch { case addr == nil: addrType = ipv4Address addrBody = []byte{0, 0, 0, 0} addrPort = 0 case addr.FQDN != "": addrType = fqdnAddress addrBody = append([]byte{byte(len(addr.FQDN))}, addr.FQDN...) addrPort = uint16(addr.Port) case addr.IP.To4() != nil: addrType = ipv4Address addrBody = []byte(addr.IP.To4()) addrPort = uint16(addr.Port) case addr.IP.To16() != nil: addrType = ipv6Address addrBody = []byte(addr.IP.To16()) addrPort = uint16(addr.Port) default: return fmt.Errorf("Failed to format address: %v", addr) } // Format the message msg := make([]byte, 6+len(addrBody)) msg[0] = socks5Version msg[1] = resp msg[2] = 0 // Reserved msg[3] = addrType copy(msg[4:], addrBody) msg[4+len(addrBody)] = byte(addrPort >> 8) msg[4+len(addrBody)+1] = byte(addrPort & 0xff) // Send the message _, err := w.Write(msg) return err } // readAddrSpec is used to read AddrSpec. // Expects an address type byte, followed by the address and port func readAddrSpec(r io.Reader) (*AddrSpec, error) { d := &AddrSpec{} // Get the address type addrType := []byte{0} if _, err := r.Read(addrType); err != nil { return nil, err } // Handle on a per type basis switch addrType[0] { case ipv4Address: addr := make([]byte, 4) if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil { return nil, err } d.IP = net.IP(addr) case ipv6Address: addr := make([]byte, 16) if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil { return nil, err } d.IP = net.IP(addr) case fqdnAddress: if _, err := r.Read(addrType); err != nil { return nil, err } addrLen := int(addrType[0]) fqdn := make([]byte, addrLen) if _, err := io.ReadAtLeast(r, fqdn, addrLen); err != nil { return nil, err } d.FQDN = string(fqdn) default: return nil, fmt.Errorf("Unrecognized address type") } // Read the port port := []byte{0, 0} if _, err := io.ReadAtLeast(r, port, 2); err != nil { return nil, err } d.Port = (int(port[0]) << 8) | int(port[1]) return d, nil }
package model import ( "strings" ) func TrySquash(runs []Cmd) []Cmd { newRuns := make([]Cmd, 0) for i := 0; i < len(runs); i++ { toSquash := []Cmd{} for j := i; j < len(runs); j++ { runJ := runs[j] if !runJ.IsShellStandardForm() { break } toSquash = append(toSquash, runJ) } if len(toSquash) < 2 { newRuns = append(newRuns, runs[i]) continue } newRuns = append(newRuns, squashHelper(toSquash)) i += len(toSquash) - 1 } return newRuns } // Create a new shell script that combines the individual runs. // We know all the scripts are in shell standard form. func squashHelper(runs []Cmd) Cmd { scripts := make([]string, len(runs)) for i, c := range runs { scripts[i] = c.ShellStandardScript() } return Cmd{ // This could potentially break things (because it converts normal shell // scripts to scripts run with -ex). We're not too worried about it right // now. In the future, we might need to do manual exit code checks for // correctness. Argv: []string{ "sh", "-exc", strings.Join(scripts, ";\n"), }, } }
package actiontime import ( "fmt" "testing" ) func testStatsImpl(t *testing.T, csvFn string) { var obj statsImplWrap runner := testRunner{CsvFn: csvFn, Obj: &obj} t.Run(csvFn, runner.Run) } func testStats(t *testing.T, csvFn string) { var obj statsWrap runner := testRunner{CsvFn: csvFn, Obj: &obj} t.Run(csvFn, runner.Run) } func TestEmpty(t *testing.T) { tc := "testdata/tc_empty.csv" testStatsImpl(t, tc) testStats(t, tc) } func TestWrOne(t *testing.T) { tc := "testdata/tc_wr_one_one.csv" testStatsImpl(t, tc) testStats(t, tc) } func TestWrFew(t *testing.T) { tc := "testdata/tc_wr_few_few.csv" testStatsImpl(t, tc) testStats(t, tc) } func TestWrFewAsync(t *testing.T) { tc := "testdata/tc_wr_few_few_async.csv" testStatsImpl(t, tc) testStats(t, tc) } func TestBalFewAsync(t *testing.T) { tc := "testdata/tc_bal_few_few_async.csv" testStatsImpl(t, tc) testStats(t, tc) } func TestWrMilAsync(t *testing.T) { if testing.Short() { t.SkipNow() } tc := "testdata/gen/tc_wr_mil_few_async.csv" if !fileExists(tc) { cmd := fmt.Sprintf("python3 tools/testgenerator.py --balance write --csv %s --add 1000000 jump run sit stand", tc) t.Errorf("Please generate %s with...\n%s", tc, cmd) } else { testStatsImpl(t, tc) testStats(t, tc) } } func TestRdMilAsync(t *testing.T) { if testing.Short() { t.SkipNow() } tc := "testdata/gen/tc_rd_mil_few_async.csv" if !fileExists(tc) { cmd := fmt.Sprintf("python3 tools/testgenerator.py --balance read --csv %s --add 1000000 jump run sit stand", tc) t.Errorf("Please generate %s with...\n%s", tc, cmd) } else { testStatsImpl(t, tc) testStats(t, tc) } } func TestBal100kAsync(t *testing.T) { if testing.Short() { t.SkipNow() } tc := "testdata/gen/tc_bal_100k_few_async.csv" if !fileExists(tc) { cmd := fmt.Sprintf("python3 tools/testgenerator.py --balance balanced --csv %s --add 100000 jump run sit stand", tc) t.Errorf("Please generate %s with...\n%s", tc, cmd) } else { testStatsImpl(t, tc) testStats(t, tc) } } func TestWrMilOneAsync(t *testing.T) { if testing.Short() { t.SkipNow() } tc := "testdata/gen/tc_wr_mil_one_async.csv" if !fileExists(tc) { cmd := fmt.Sprintf("python3 tools/testgenerator.py --balance write --csv %s --add 1000000 jump", tc) t.Errorf("Please generate %s with...\n%s", tc, cmd) } else { testStatsImpl(t, tc) testStats(t, tc) } } func TestRdMilOneAsync(t *testing.T) { if testing.Short() { t.SkipNow() } tc := "testdata/gen/tc_rd_mil_one_async.csv" if !fileExists(tc) { cmd := fmt.Sprintf("python3 tools/testgenerator.py --balance read --csv %s --add 1000000 jump", tc) t.Errorf("Please generate %s with...\n%s", tc, cmd) } else { testStatsImpl(t, tc) testStats(t, tc) } } func TestBal100kOneAsync(t *testing.T) { if testing.Short() { t.SkipNow() } tc := "testdata/gen/tc_bal_100k_one_async.csv" if !fileExists(tc) { cmd := fmt.Sprintf("python3 tools/testgenerator.py --balance balanced --csv %s --add 100000 jump", tc) t.Errorf("Please generate %s with...\n%s", tc, cmd) } else { testStatsImpl(t, tc) testStats(t, tc) } }
// SPDX-License-Identifier: ISC // Copyright (c) 2014-2020 Bitmark Inc. // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. // Package publish - this module handles publishing of events for add-on services // e.g. to maintain a database of transactions package publish
package Contains_Duplicate_III // similar to bucket sort func containsNearbyAlmostDuplicate(nums []int, k int, t int) bool { if t < 0 || k <= 0 || len(nums) < 2 { return false } buckets := make(map[int]int, len(nums)) for i := 0; i < len(nums); i++ { c := nums[i] if c < 0 { c -= t + 1 } key := c / (t + 1) if _, ok := buckets[key]; ok { return true } if v, ok := buckets[key+1]; ok && Abs(v-nums[i]) < t+1 { return true } if v, ok := buckets[key-1]; ok && Abs(v-nums[i]) < t+1 { return true } buckets[key] = nums[i] if i-k >= 0 { d := nums[i-k] if d < 0 { d -= t + 1 } delete(buckets, d/(t+1)) } } return false } // sliding window func containsNearbyAlmostDuplicate2(nums []int, k int, t int) bool { if k <= 0 || len(nums) < 2 { return false } left, right := 0, 1 for left < right && right < len(nums) { for c := left; c < right; c++ { sub := nums[right] - nums[c] if Abs(sub) <= t { return true } } if right-left == k { right++ left++ } else if right-left < k { right++ } else { left++ } } return false } func Abs(a int) int { if a < 0 { return -a } return a }
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package safemem import ( "bytes" "io" "testing" ) func makeBlocks(slices ...[]byte) []Block { blocks := make([]Block, 0, len(slices)) for _, s := range slices { blocks = append(blocks, BlockFromSafeSlice(s)) } return blocks } func TestFromIOReaderFullRead(t *testing.T) { r := FromIOReader{bytes.NewBufferString("foobar")} dsts := makeBlocks(make([]byte, 3), make([]byte, 3)) n, err := r.ReadToBlocks(BlockSeqFromSlice(dsts)) if wantN := uint64(6); n != wantN || err != nil { t.Errorf("ReadToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) } for i, want := range [][]byte{[]byte("foo"), []byte("bar")} { if got := dsts[i].ToSlice(); !bytes.Equal(got, want) { t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want) } } } type eofHidingReader struct { Reader io.Reader } func (r eofHidingReader) Read(dst []byte) (int, error) { n, err := r.Reader.Read(dst) if err == io.EOF { return n, nil } return n, err } func TestFromIOReaderPartialRead(t *testing.T) { r := FromIOReader{eofHidingReader{bytes.NewBufferString("foob")}} dsts := makeBlocks(make([]byte, 3), make([]byte, 3)) n, err := r.ReadToBlocks(BlockSeqFromSlice(dsts)) // FromIOReader should stop after the eofHidingReader returns (1, nil) // for a 3-byte read. if wantN := uint64(4); n != wantN || err != nil { t.Errorf("ReadToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) } for i, want := range [][]byte{[]byte("foo"), []byte("b\x00\x00")} { if got := dsts[i].ToSlice(); !bytes.Equal(got, want) { t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want) } } } type singleByteReader struct { Reader io.Reader } func (r singleByteReader) Read(dst []byte) (int, error) { if len(dst) == 0 { return r.Reader.Read(dst) } return r.Reader.Read(dst[:1]) } func TestSingleByteReader(t *testing.T) { r := FromIOReader{singleByteReader{bytes.NewBufferString("foobar")}} dsts := makeBlocks(make([]byte, 3), make([]byte, 3)) n, err := r.ReadToBlocks(BlockSeqFromSlice(dsts)) // FromIOReader should stop after the singleByteReader returns (1, nil) // for a 3-byte read. if wantN := uint64(1); n != wantN || err != nil { t.Errorf("ReadToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) } for i, want := range [][]byte{[]byte("f\x00\x00"), []byte("\x00\x00\x00")} { if got := dsts[i].ToSlice(); !bytes.Equal(got, want) { t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want) } } } func TestReadFullToBlocks(t *testing.T) { r := FromIOReader{singleByteReader{bytes.NewBufferString("foobar")}} dsts := makeBlocks(make([]byte, 3), make([]byte, 3)) n, err := ReadFullToBlocks(r, BlockSeqFromSlice(dsts)) // ReadFullToBlocks should call into FromIOReader => singleByteReader // repeatedly until dsts is exhausted. if wantN := uint64(6); n != wantN || err != nil { t.Errorf("ReadFullToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) } for i, want := range [][]byte{[]byte("foo"), []byte("bar")} { if got := dsts[i].ToSlice(); !bytes.Equal(got, want) { t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want) } } } func TestFromIOWriterFullWrite(t *testing.T) { srcs := makeBlocks([]byte("foo"), []byte("bar")) var dst bytes.Buffer w := FromIOWriter{&dst} n, err := w.WriteFromBlocks(BlockSeqFromSlice(srcs)) if wantN := uint64(6); n != wantN || err != nil { t.Errorf("WriteFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) } if got, want := dst.Bytes(), []byte("foobar"); !bytes.Equal(got, want) { t.Errorf("dst: got %q, wanted %q", got, want) } } type limitedWriter struct { Writer io.Writer Done int Limit int } func (w *limitedWriter) Write(src []byte) (int, error) { count := len(src) if count > (w.Limit - w.Done) { count = w.Limit - w.Done } n, err := w.Writer.Write(src[:count]) w.Done += n return n, err } func TestFromIOWriterPartialWrite(t *testing.T) { srcs := makeBlocks([]byte("foo"), []byte("bar")) var dst bytes.Buffer w := FromIOWriter{&limitedWriter{&dst, 0, 4}} n, err := w.WriteFromBlocks(BlockSeqFromSlice(srcs)) // FromIOWriter should stop after the limitedWriter returns (1, nil) for a // 3-byte write. if wantN := uint64(4); n != wantN || err != nil { t.Errorf("WriteFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) } if got, want := dst.Bytes(), []byte("foob"); !bytes.Equal(got, want) { t.Errorf("dst: got %q, wanted %q", got, want) } } type singleByteWriter struct { Writer io.Writer } func (w singleByteWriter) Write(src []byte) (int, error) { if len(src) == 0 { return w.Writer.Write(src) } return w.Writer.Write(src[:1]) } func TestSingleByteWriter(t *testing.T) { srcs := makeBlocks([]byte("foo"), []byte("bar")) var dst bytes.Buffer w := FromIOWriter{singleByteWriter{&dst}} n, err := w.WriteFromBlocks(BlockSeqFromSlice(srcs)) // FromIOWriter should stop after the singleByteWriter returns (1, nil) // for a 3-byte write. if wantN := uint64(1); n != wantN || err != nil { t.Errorf("WriteFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) } if got, want := dst.Bytes(), []byte("f"); !bytes.Equal(got, want) { t.Errorf("dst: got %q, wanted %q", got, want) } } func TestWriteFullToBlocks(t *testing.T) { srcs := makeBlocks([]byte("foo"), []byte("bar")) var dst bytes.Buffer w := FromIOWriter{singleByteWriter{&dst}} n, err := WriteFullFromBlocks(w, BlockSeqFromSlice(srcs)) // WriteFullToBlocks should call into FromIOWriter => singleByteWriter // repeatedly until srcs is exhausted. if wantN := uint64(6); n != wantN || err != nil { t.Errorf("WriteFullFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) } if got, want := dst.Bytes(), []byte("foobar"); !bytes.Equal(got, want) { t.Errorf("dst: got %q, wanted %q", got, want) } }
// Copyright 2015 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. See the AUTHORS file // for names of contributors. // // Author: Peter Mattis (peter@cockroachlabs.com) package cli import ( "flag" "go/build" "strings" "testing" "github.com/cockroachdb/cockroach/util/leaktest" ) func TestStdFlagToPflag(t *testing.T) { defer leaktest.AfterTest(t) cf := cockroachCmd.PersistentFlags() flag.VisitAll(func(f *flag.Flag) { if strings.HasPrefix(f.Name, "test.") { return } n := normalizeStdFlagName(f.Name) if pf := cf.Lookup(n); pf == nil { t.Errorf("unable to find \"%s\"", n) } }) } func TestNoLinkTesting(t *testing.T) { defer leaktest.AfterTest(t) if build.Default.GOPATH == "" { t.Skip("GOPATH isn't set") } imports := make(map[string]struct{}) var addImports func(string) addImports = func(root string) { pkg, err := build.Import(root, build.Default.GOPATH, 0) if err != nil { t.Fatal(err) } for _, imp := range pkg.Imports { // https: //github.com/golang/tools/blob/master/refactor/importgraph/graph.go#L115 if imp == "C" { continue // "C" is fake } if _, ok := imports[imp]; !ok { imports[imp] = struct{}{} addImports(imp) } } } addImports("github.com/cockroachdb/cockroach") for _, forbidden := range []string{ "testing", "github.com/cockroachdb/cockroach/security/securitytest", } { if _, ok := imports[forbidden]; ok { t.Errorf("%s is included in the main cockroach binary!", forbidden) } } }
package currency import ( "github.com/defaulteg/api/modules/core" "os" "encoding/csv" "bufio" "github.com/defaulteg/api/utils" "strings" ) func FetchBase() error { // Get id of elements from database for further push to it var currencyMap map[string]int var err error if currencyMap, err = core.GetElementIds(core.BaseCurrencyTypeId, core.QueryWhere, ""); err != nil { return err } // Download csv rates data file if err := utils.DownloadFile(core.PathToSaveCurrencyZipCsv, core.CurrencyDownloadUrl); err != nil { return err } // Unzip csv file if err := utils.UnzipFile(core.PathToSaveCurrencyZipCsv, core.PathToUnzipCurrencyCsv); err != nil { return err } // Open csv f, err := os.Open(core.PathToCurrencyCsv) if err != nil { return err } // Read data from csv to slice of rate objects rates := make([]core.Selector, 0) reader := csv.NewReader(bufio.NewReader(f)) // Read two records to two slices currencies, _ := reader.Read() values, _ := reader.Read() var rate core.Selector // Append all elements from records to rate slice. Zero element is date // Selector = next currency element with CUR, RATE, ID for i := 1; i < len(currencies) - 1; i++ { rate.Name = strings.TrimSpace(currencies[i]) rate.Rate = strings.TrimSpace(values[i]) rate.ElementId = currencyMap[rate.Name] rates = append(rates, rate) } // Make pseudo sources slice with selector slice of currency rates for pushing into database var s core.Source sources := make([]core.Source, 0) s.Selectors = rates s.Site = "csv" sources = append(sources, s) // Push to database if err := core.PushToDatabase(sources, core.BaseCurrencyTable); err != nil { return err } return nil }
package JsRedis //哈希表,自选选DB块(0---15) //db import ( . "JsGo/JsLogger" "encoding/json" "errors" "github.com/gomodule/redigo/redis" ) //读取数据库 func Redis_hdbget(db int8, t, k string, v interface{}) error { c := g_pool.Get() if c == nil { Error("connect is nil") return errors.New("connect is nil") } c.Send("SELECT", db) c.Send("HGET", t, k) c.Flush() c.Receive() b, e := redis.Bytes(c.Receive()) if e != nil { Error("hdbget hdbsize DB【%s】 Table【%s】,Key【%s】,error:%v", db, t, k, e.Error()) return e } e = json.Unmarshal(b, v) if e != nil { Error(e.Error()) return e } return nil } //写入数据库 func Redis_hdbset(db int8, t, k string, v interface{}) error { c := g_pool.Get() if c == nil { Error("connect is nil") return errors.New("connect is nil") } b, e := json.Marshal(v) if e != nil { Error(e.Error()) return e } c.Send("SELECT", db) c.Send("HSET", t, k, b) c.Flush() c.Receive() _, e = c.Receive() if e != nil { Error("hdbset hdbsize DB【%s】 Table【%s】,Key【%s】,error:%v", db, t, k, e.Error()) return e } return nil } //从数据库查找是否存在 func Redis_hdbexists(db int8, t, k string) (bool, error) { c := g_pool.Get() if c == nil { Error("connect is nil") return false, errors.New("connect is nil") } c.Send("SELECT", db) c.Send("HEXISTS", t, k) c.Flush() c.Receive() b, e := redis.Bool(c.Receive()) if e != nil { Error("hdbexists hdbsize DB【%s】 Table【%s】,Key【%s】,error:%v", db, t, k, e.Error()) return false, e } return b, nil } func Redis_hdbsize(db int8, t string) (int64, error) { c := g_pool.Get() if c == nil { Error("connect is nil") return -1, errors.New("connect is nil") } c.Send("SELECT", db) c.Send("HLEN", t) c.Flush() c.Receive() b, e := redis.Int64(c.Receive()) if e != nil { Error("hdbsize DB【%s】 Table【%s】,error:%v", db, t, e.Error()) return -1, e } return b, nil } func Redis_hdbkeys(db int8, t string) ([]string, error) { c := g_pool.Get() if c == nil { Error("connect is nil") return nil, errors.New("connect is nil") } c.Send("SELECT", db) c.Send("HKEYS", t) c.Flush() c.Receive() b, e := redis.Strings(c.Receive()) if e != nil { Error("hdbkeys DB【%s】 Table【%s】,error:%v", db, t, e.Error()) return nil, e } return b, nil } func Redis_hdbdel(db int8, t, k string) error { c := g_pool.Get() if c == nil { Error("connect is nil") return errors.New("connect is nil") } c.Send("SELECT", db) c.Send("HDEL", t, k) c.Flush() c.Receive() _, e := c.Receive() if e != nil { Error("hdbdel DB【%s】 Table【%s】,error:%v", db, t, e.Error()) return e } return nil } func Redis_hdbmset(db int8, t string, data map[string]interface{}) error { c := g_pool.Get() if c == nil { Error("connect is nil") return errors.New("connect is nil") } c.Send("SELECT", db) for k, v := range data { b, e := json.Marshal(v) if e != nil { Error(e.Error()) return e } // fmt.Printf("b = %v, %s\n", v, string(b)) c.Send("HSET", t, k, b) } c.Flush() c.Receive() for k, _ := range data { _, e := c.Receive() if e != nil { Error("k[%s],%s", k, e.Error()) return e } } return nil } func Redis_hdbmget(db int8, t string, ret *map[string]interface{}) error { c := g_pool.Get() if c == nil { Error("connect is nil") return errors.New("connect is nil") } c.Send("SELECT", db) keys := make([]string, len(*ret)) i := 0 for k, _ := range *ret { c.Send("HGET", t, k) keys[i] = k i++ } c.Flush() c.Receive() for _, k := range keys { b, e := redis.Bytes(c.Receive()) if e != nil { Error(e.Error()) return e } e = json.Unmarshal(b, (*ret)[k]) if e != nil { Error(e.Error()) return e } } return nil }
package lowlevel /* #include <stdlib.h> #include <fmod.h> */ import "C" import ( "runtime" "unsafe" ) // The main object for the FMOD Low Level System. // When using FMOD Studio, this system object will be automatically instantiated as part of `StudioSystem.Initialize()`. type System struct { cptr *C.FMOD_SYSTEM } /* 'System' API */ // FMOD System creation function. // This must be called to create an FMOD System object before you can do anything else. // Use this function to create 1, or multiple instances of FMOD System objects. func SystemCreate() (*System, error) { var s System res := C.FMOD_System_Create(&s.cptr) runtime.SetFinalizer(&s, (*System).Release) return &s, errs[res] } // Closes and frees a system object and its resources. // This function also calls "System.Close()", so calling close before this function is not necessary. func (s *System) Release() error { res := C.FMOD_System_Release(s.cptr) return errs[res] } /* Setup functions. */ // This function selects the output mode for the platform. // This is for selecting different OS specific APIs which might have different features. // See "OutputType" for different output types you can select. func (s *System) SetOutput(output OutputType) error { res := C.FMOD_System_SetOutput(s.cptr, C.FMOD_OUTPUTTYPE(output)) return errs[res] } // Retrieves the current output system FMOD is using to address the hardware. func (s *System) Output() (OutputType, error) { var output C.FMOD_OUTPUTTYPE res := C.FMOD_System_GetOutput(s.cptr, &output) return OutputType(output), errs[res] } // Retrieves the number of soundcard devices on the machine, specific to the output mode set with "System.SetOutput". // If "System.SetOutput" is not called it will return the number of drivers available for the default output type. Use this for enumerating sound devices. // Use "System.DriverInfo()" to get the device's name. func (s *System) NumDrivers() (int, error) { var numdrivers C.int res := C.FMOD_System_GetNumDrivers(s.cptr, &numdrivers) return int(numdrivers), errs[res] } // Retrieves identification information about a sound device specified by its index, and specific to the output mode set with "System.SetOutput". func (s *System) DriverInfo(id int, name string) (Guid, int, SpeakerMode, int, error) { var guid C.FMOD_GUID var systemrate C.int var speakermode C.FMOD_SPEAKERMODE var speakermodechannels C.int cname := C.CString(name) defer C.free(unsafe.Pointer(cname)) namelen := len(name) res := C.FMOD_System_GetDriverInfo(s.cptr, C.int(id), cname, C.int(namelen), &guid, &systemrate, &speakermode, &speakermodechannels) return Guid(guid), int(systemrate), SpeakerMode(speakermode), int(speakermodechannels), errs[res] } // Selects a soundcard driver. // This function is used when an output mode has enumerated more than one output device, and you need to select between them. // If this function is called after FMOD is already initialized with "System.Init", the current driver will be shutdown and the newly selected driver will be initialized / started. // The driver that you wish to change to must support the current output format, sample rate, and number of channels. // If it does not, FMOD_ERR_OUTPUT_INIT is returned and driver state is cleared. // You should now call "System.SetDriver" with your original driver index to restore driver state (providing that driver is still available / connected) or make another selection. // driver: Driver number to select. 0 = primary or main sound device as selected by the operating system settings. // Use "System.NumDrivers" and "System.DriverInfo" to determine available devices. func (s *System) SetDriver(driver int) error { res := C.FMOD_System_SetDriver(s.cptr, C.int(driver)) return errs[res] } // Returns the currently selected driver number. // Drivers are enumerated when selecting a driver with "System.SetDriver" or other driver related functions such as "System.NumDrivers" or "System.DriverInfo". // ID. 0 = primary or main sound device as selected by the operating system settings. func (s *System) Driver() (int, error) { var driver C.int res := C.FMOD_System_GetDriver(s.cptr, &driver) return int(driver), errs[res] } // Sets the maximum number of software mixed channels possible. // numsoftwarechannels: The maximum number of mixable voices to be allocated by FMOD, default = 64. // This function cannot be called after FMOD is already activated, it must be called before "System.Init", or after "System.Close". func (s *System) SetSoftwareChannels(numsoftwarechannels int) error { res := C.FMOD_System_SetSoftwareChannels(s.cptr, C.int(numsoftwarechannels)) return errs[res] } // Retrieves the maximum number of software mixed channels possible. func (s *System) SoftwareChannels() (int, error) { var numsoftwarechannels C.int res := C.FMOD_System_GetSoftwareChannels(s.cptr, &numsoftwarechannels) return int(numsoftwarechannels), errs[res] } // Sets the output format for the software mixer. // If loading Studio banks, this must be called with speakermode corresponding to the project's output format if there is a possibility of the output audio device not matching the project's format. // Any differences between the project format and the system's speakermode will cause the mix to sound wrong. // If not loading Studio banks, do not call this unless you explicity want to change a setting from the default. // FMOD will default to the speaker mode and sample rate that the OS / output prefers. // // samplerate: Sample rate in Hz, that the software mixer will run at. Specify values between 8000 and 192000. // // speakermode: Speaker setup for the software mixer. // // numrawspeakers: Number of output channels / speakers to initialize the sound card to in FMOD_SPEAKERMODE_RAW mode. Optional. Specify 0 to ignore. Maximum of FMOD_MAX_CHANNEL_WIDTH. // // This function cannot be called after FMOD is already activated, it must be called before "System.Init", or after "System.Close". func (s *System) SetSoftwareFormat(samplerate int, speakermode SpeakerMode, numrawspeakers int) error { res := C.FMOD_System_SetSoftwareFormat(s.cptr, C.int(samplerate), C.FMOD_SPEAKERMODE(speakermode), C.int(numrawspeakers)) return errs[res] } // Retrieves the output format for the software mixer. // Note that the settings returned here may differ from the settings provided by the user with "System.SetSoftwareFormat". // This is because the driver may require certain settings to initialize. func (s *System) SoftwareFormat() (int, SpeakerMode, int, error) { var samplerate C.int var speakermode C.FMOD_SPEAKERMODE var numrawspeakers C.int res := C.FMOD_System_GetSoftwareFormat(s.cptr, &samplerate, &speakermode, &numrawspeakers) return int(samplerate), SpeakerMode(speakermode), int(numrawspeakers), errs[res] } // Sets the FMOD internal mixing buffer size. This function is used if you need to control mixer latency or granularity. // Smaller buffersizes lead to smaller latency, but can lead to stuttering/skipping/unstable sound on slower machines or soundcards with bad drivers. // // bufferlength: The mixer engine block size in samples. Use this to adjust mixer update granularity. Default = 1024. // (milliseconds = 1024 at 48khz = 1024 / 48000 * 1000 = 21.33ms). This means the mixer updates every 21.33ms. // // numbuffers: The mixer engine number of buffers used. Use this to adjust mixer latency. Default = 4. // To get the total buffersize multiply the bufferlength by the numbuffers value. By default this would be 4*1024. // // The FMOD software mixer mixes to a ringbuffer. // The size of this ringbuffer is determined here. // It mixes a block of sound data every 'bufferlength' number of samples, and there are 'numbuffers' number of these blocks that make up the entire ringbuffer. // Adjusting these values can lead to extremely low latency performance (smaller values), or greater stability in sound output (larger values). // // Warning! The 'buffersize' is generally best left alone. Making the granularity smaller will just increase CPU usage (cache misses and DSP network overhead). // Making it larger affects how often you hear commands update such as volume/pitch/pan changes. // Anything above 20ms will be noticable and sound parameter changes will be obvious instead of smooth. // // FMOD chooses the most optimal size by default for best stability, depending on the output type, and if the drivers are emulated or not (for example DirectSound is emulated using waveOut on NT). // It is not recommended changing this value unless you really need to. You may get worse performance than the default settings chosen by FMOD. // To convert from milliseconds to 'samples', simply multiply the value in milliseconds by the sample rate of the output (ie 48000 if that is what it is set to), then divide by 1000. // //// This function cannot be called after FMOD is already activated, it must be called before "System.Init", or after "System.Close". func (s *System) SetDSPBufferSize(bufferlength uint32, numbuffers int) error { res := C.FMOD_System_SetDSPBufferSize(s.cptr, C.uint(bufferlength), C.int(numbuffers)) return errs[res] } // Retrieves the buffer size settings for the FMOD software mixing engine. //See documentation on "System.SetDSPBufferSize" for more information about these values. func (s *System) DSPBufferSize() (uint32, int, error) { var bufferlength C.uint var numbuffers C.int res := C.FMOD_System_GetDSPBufferSize(s.cptr, &bufferlength, &numbuffers) return uint32(bufferlength), int(numbuffers), errs[res] } // TODO: add more docs // NOTE: Not implement yet // Specify user callbacks for FMOD's internal file manipulation functions. This function is useful for replacing FMOD's file system with a game system's own file reading API. func (s *System) SetFileSystem(useropen C.FMOD_FILE_OPEN_CALLBACK, userclose C.FMOD_FILE_CLOSE_CALLBACK, userread C.FMOD_FILE_READ_CALLBACK, userseek C.FMOD_FILE_SEEK_CALLBACK, userasyncread C.FMOD_FILE_ASYNCREAD_CALLBACK, userasynccancel C.FMOD_FILE_ASYNCCANCEL_CALLBACK, blockalign C.int) error { //FMOD_RESULT F_API FMOD_System_SetFileSystem (FMOD_SYSTEM *system, FMOD_FILE_OPEN_CALLBACK useropen, FMOD_FILE_CLOSE_CALLBACK userclose, FMOD_FILE_READ_CALLBACK userread, FMOD_FILE_SEEK_CALLBACK userseek, FMOD_FILE_ASYNCREAD_CALLBACK userasyncread, FMOD_FILE_ASYNCCANCEL_CALLBACK userasynccancel, int blockalign); return ErrNoImpl } // TODO: add more docs // NOTE: Not implement yet // Function to allow a user to 'piggyback' on FMOD's file reading routines. // This allows users to capture data as FMOD reads it, which may be useful for ripping the raw data that FMOD reads for hard to support sources (for example internet streams). // // NOTE! Do not use this to 'override' FMOD's file system! That is what setFileSystem is for. // This function is purely for 'snooping' and letting FMOD do its own file access, but if you want to capture what FMOD is reading you can do it with this function. func (s *System) AttachFileSystem(useropen C.FMOD_FILE_OPEN_CALLBACK, userclose C.FMOD_FILE_CLOSE_CALLBACK, userread C.FMOD_FILE_READ_CALLBACK, userseek C.FMOD_FILE_SEEK_CALLBACK) error { //FMOD_RESULT F_API FMOD_System_AttachFileSystem (FMOD_SYSTEM *system, FMOD_FILE_OPEN_CALLBACK useropen, FMOD_FILE_CLOSE_CALLBACK userclose, FMOD_FILE_READ_CALLBACK userread, FMOD_FILE_SEEK_CALLBACK userseek); return ErrNoImpl } // Sets advanced features like configuring memory and cpu usage for FMOD_CREATECOMPRESSEDSAMPLE usage. func (s *System) SetAdvancedSettings(settings *AdvancedSettings) error { return ErrNoImpl var csettings *C.FMOD_ADVANCEDSETTINGS = settings.toC() res := C.FMOD_System_SetAdvancedSettings(s.cptr, csettings) return errs[res] } // Retrieves the advanced settings value set for the system object. func (s *System) AdvancedSettings() (*AdvancedSettings, error) { return nil, ErrNoImpl var settings C.FMOD_ADVANCEDSETTINGS settings.cbSize = C.int(unsafe.Sizeof(settings)) res := C.FMOD_System_GetAdvancedSettings(s.cptr, &settings) /*ॐ*/ as := new(AdvancedSettings) as.fromC(settings) return as, errs[res] } // TODO: add more docs // NOTE: Not implement yet // Sets a system callback to catch various fatal or informational events. func (s *System) SetCallback(callback C.FMOD_SYSTEM_CALLBACK, callbackmask C.FMOD_SYSTEM_CALLBACK_TYPE) error { //FMOD_RESULT F_API FMOD_System_SetCallback (FMOD_SYSTEM *system, FMOD_SYSTEM_CALLBACK callback, FMOD_SYSTEM_CALLBACK_TYPE callbackmask); return ErrNoImpl } /* Plug-in support. */ // NOTE: Not implement yet // Specify a base search path for plugins so they can be placed somewhere else than the directory of the main executable. func (s *System) SetPluginPath(path *C.char) error { //FMOD_RESULT F_API FMOD_System_SetPluginPath (FMOD_SYSTEM *system, const char *path); return ErrNoImpl } // NOTE: Not implement yet // Loads an FMOD plugin. This could be a DSP, file format or output plugin. func (s *System) LoadPlugin(filename *C.char, handle *C.uint, priority C.uint) error { //FMOD_RESULT F_API FMOD_System_LoadPlugin (FMOD_SYSTEM *system, const char *filename, unsigned int *handle, unsigned int priority); return ErrNoImpl } // NOTE: Not implement yet // Unloads a plugin from memory. func (s *System) UnloadPlugin(handle C.uint) error { //FMOD_RESULT F_API FMOD_System_UnloadPlugin (FMOD_SYSTEM *system, unsigned int handle); return ErrNoImpl } // Retrieves the number of available plugins loaded into FMOD at the current time. // plugintype: Plugin type such as PLUGINTYPE_OUTPUT, PLUGINTYPE_CODEC or PLUGINTYPE_DSP. func (s *System) NumPlugins(plugintype PluginType) (int, error) { var numplugins C.int res := C.FMOD_System_GetNumPlugins(s.cptr, C.FMOD_PLUGINTYPE(plugintype), &numplugins) return int(numplugins), errs[res] } // NOTE: Not implement yet //Retrieves the handle of a plugin based on its type and relative index. Use "System.NumPlugins" to enumerate plugins. func (s *System) PluginHandle(plugintype C.FMOD_PLUGINTYPE, index C.int, handle *C.uint) error { //FMOD_RESULT F_API FMOD_System_GetPluginHandle (FMOD_SYSTEM *system, FMOD_PLUGINTYPE plugintype, int index, unsigned int *handle); return ErrNoImpl } // NOTE: Not implement yet // Retrieves information to display for the selected plugin. func (s *System) PluginInfo(handle C.uint, plugintype *C.FMOD_PLUGINTYPE, name *C.char, version *C.int) error { //FMOD_RESULT F_API FMOD_System_GetPluginInfo (FMOD_SYSTEM *system, unsigned int handle, FMOD_PLUGINTYPE *plugintype, char *name, int namelen, unsigned int *version); return ErrNoImpl } // NOTE: Not implement yet // Selects an output type based on the enumerated list of outputs including FMOD and 3rd party output plugins. func (s *System) SetOutputByPlugin(handle C.uint) error { //FMOD_RESULT F_API FMOD_System_SetOutputByPlugin (FMOD_SYSTEM *system, unsigned int handle); return ErrNoImpl } // NOTE: Not implement yet // Returns the currently selected output as an id in the list of output plugins. // This function can be called after FMOD is already activated. You can use it to change the output mode at runtime. // If SYSTEM_CALLBACK_DEVICELISTCHANGED is specified use the setOutput call to change to "OUTPUTTYPE_NOSOUND" if no more sound card drivers exist. func (s *System) OutputByPlugin(handle *C.uint) error { //FMOD_RESULT F_API FMOD_System_GetOutputByPlugin (FMOD_SYSTEM *system, unsigned int *handle); return ErrNoImpl } // NOTE: Not implement yet // Creates a DSP unit object which is either built in or loaded as a plugin, to be inserted into a DSP network, for the purposes of sound filtering or sound generation. // This function creates a DSP unit that can be enumerated by using "System.NumPlugins" and "System.PluginInfo". // // A DSP unit can generate or filter incoming data. // To be active, a unit must be inserted into the FMOD DSP network to be heard. // Use functions such as "ChannelGroup.AddDSP", "Channel.AddDSP" or "DSP.AddInput" to do this. func (s *System) CreateDSPByPlugin(handle C.uint, dsp **C.FMOD_DSP) error { //FMOD_RESULT F_API FMOD_System_CreateDSPByPlugin (FMOD_SYSTEM *system, unsigned int handle, FMOD_DSP **dsp); return ErrNoImpl } // NOTE: Not implement yet // Retrieve the description structure for a pre-existing DSP plugin. func (s *System) DSPInfoByPlugin(handle C.uint, description **C.FMOD_DSP_DESCRIPTION) error { //FMOD_RESULT F_API FMOD_System_GetDSPInfoByPlugin (FMOD_SYSTEM *system, unsigned int handle, const FMOD_DSP_DESCRIPTION **description); return ErrNoImpl } // NOTE: Not implement yet // Creates a file format codec to be used by FMOD for opening custom file types. func (s *System) RegisterCodec(description *C.FMOD_CODEC_DESCRIPTION, handle *C.uint, priority C.uint) error { //FMOD_RESULT F_API FMOD_System_RegisterCodec (FMOD_SYSTEM *system, FMOD_CODEC_DESCRIPTION *description, unsigned int *handle, unsigned int priority); return ErrNoImpl } // NOTE: Not implement yet // Register a user-defined DSP effect for use with the System. // This function allows you to register statically-linked DSP effects. // Once registered, you can create instances of the DSP effect by using System::createDSPByPlugin. func (s *System) RegisterDSP(description *C.FMOD_DSP_DESCRIPTION, handle *C.uint) error { //FMOD_RESULT F_API FMOD_System_RegisterDSP (FMOD_SYSTEM *system, const FMOD_DSP_DESCRIPTION *description, unsigned int *handle); return ErrNoImpl } // NOTE: Not implement yet // Register a user-defined output mode for use with the System. // This function allows you to register statically-linked output modes. // Once registered, you can use the output mode with "System.SetOutputByPlugin". func (s *System) RegisterOutput(description *C.FMOD_OUTPUT_DESCRIPTION, handle *C.uint) error { //FMOD_RESULT F_API FMOD_System_RegisterOutput (FMOD_SYSTEM *system, const FMOD_OUTPUT_DESCRIPTION *description, unsigned int *handle); return ErrNoImpl } /* Init/Close. */ // Initializes the system object, and the sound device. This has to be called at the start of the user's program. // You must create a system object with "SystemCreate". // // maxchannels: The maximum number of channels to be used in FMOD. // They are also called 'virtual channels' as you can play as many of these as you want, even if you only have a small number of software voices. See remarks for more. // // flags: See "InitFlags". This can be a selection of flags bitwise OR'ed together to change the behaviour of FMOD at initialization time. // // extradriverdata: Driver specific data that can be passed to the output plugin. // For example the filename for the wav writer plugin. See FMOD_OUTPUTTYPE for what each output mode might take here. Optional. Specify 0 or NULL to ignore. // // Virtual channels. // These types of voices are the ones you work with using the Channel API. // The advantage of virtual channels are, unlike older versions of FMOD, you can now play as many sounds as you like without fear of ever running out of voices, or playsound failing. // You can also avoid 'channel stealing' if you specify enough virtual voices. // // As an example, you can play 1000 sounds at once, even on a 32 channel soundcard. // // FMOD will only play the most important/closest/loudest (determined by volume/distance/geometry and priority settings) voices, and the other 968 voices will be virtualized without expense to the CPU. // The voice's cursor positions are updated. // // When the priority of sounds change or emulated sounds get louder than audible ones, they will swap the actual voice resource over and play the voice from its correct position in time as it should be heard. // // What this means is you can play all 1000 sounds, if they are scattered around the game world, and as you move around the world you will hear the closest or most important 32, // and they will automatically swap in and out as you move. // // Currently the maximum channel limit is 4093. func (s *System) Init(maxchannels int, flags InitFlags, extradriverdata interface{}) error { res := C.FMOD_System_Init(s.cptr, C.int(maxchannels), C.FMOD_INITFLAGS(flags), unsafe.Pointer(uintptr(extradriverdata.(int)))) return errs[res] } // Closes the system object without freeing the object's memory, so the system handle will still be valid. // Closing the output renders objects created with this system object invalid. // Make sure any sounds, channelgroups, geometry and dsp objects are released before closing the system object. func (s *System) Close() error { res := C.FMOD_System_Close(s.cptr) return errs[res] } /* General post-init system functions. */ // TODO: add more docs // Updates the FMOD system. This should be called once per 'game' tick, or once per frame in your application. func (s *System) Update() error { res := C.FMOD_System_Update(s.cptr) return errs[res] } // TODO: add more docs // This function allows the user to specify the position of their actual physical speaker to account for non standard setups. // It also allows the user to disable speakers from 3D consideration in a game. // The funtion is for describing the 'real world' speaker placement to provide a more natural panning solution for 3d sound. // Graphical configuration screens in an application could draw icons for speaker placement that the user could position at their will. func (s *System) SetSpeakerPosition(speaker Speaker, x, y float32, active bool) error { res := C.FMOD_System_SetSpeakerPosition(s.cptr, C.FMOD_SPEAKER(speaker), C.float(x), C.float(y), getBool(active)) return errs[res] } // Retrieves the current speaker position information for the selected speaker. func (s *System) SpeakerPosition(speaker Speaker) (float32, float32, bool, error) { var x, y C.float var active C.FMOD_BOOL res := C.FMOD_System_GetSpeakerPosition(s.cptr, C.FMOD_SPEAKER(speaker), &x, &y, &active) return float32(x), float32(y), setBool(active), errs[res] } // Sets the internal buffersize for streams opened after this call. // Larger values will consume more memory, whereas smaller values may cause buffer under-run/starvation/stuttering caused by large delays in disk access (ie netstream), // or cpu usage in slow machines, or by trying to play too many streams at once. // // filebuffersize: Size of stream file buffer. Default is 16384 (TIMEUNIT_RAWBYTES). // // filebuffersizetype: Type of unit for stream file buffer size. // Must be TIMEUNIT_MS, TIMEUNIT_PCM, TIMEUNIT_PCMBYTES or TIMEUNIT_RAWBYTES. Default is TIMEUNIT_RAWBYTES. // // Note this function does not affect streams created with OPENUSER, as the buffer size is specified in "System.CreateSound". // This function does not affect latency of playback. All streams are pre-buffered (unless opened with OPENONLY), so they will always start immediately. // Seek and Play operations can sometimes cause a reflush of this buffer. // // If TIMEUNIT_RAWBYTES is used, the memory allocated is 2 * the size passed in, because fmod allocates a double buffer. // If TIMEUNIT_MS, TIMEUNIT_PCM or TIMEUNIT_PCMBYTES is used, and the stream is infinite (such as a shoutcast netstream), or VBR, // then FMOD cannot calculate an accurate compression ratio to work with when the file is opened. // This means it will then base the buffersize on TIMEUNIT_PCMBYTES, or in other words the number of PCM bytes, but this will be incorrect for some compressed formats. // Use TIMEUNIT_RAWBYTES for these type (infinite / undetermined length) of streams for more accurate read sizes. // // Note to determine the actual memory usage of a stream, including sound buffer and other overhead, use "MemoryGetStats" before and after creating a sound. // Note that the stream may still stutter if the codec uses a large amount of cpu time, which impacts the smaller, internal 'decode' buffer. // The decode buffer size is changeable via CREATESOUNDEXINFO. func (s *System) SetStreamBufferSize(filebuffersize uint32, filebuffersizetype TimeUnit) error { res := C.FMOD_System_SetStreamBufferSize(s.cptr, C.uint(filebuffersize), C.FMOD_TIMEUNIT(filebuffersizetype)) return errs[res] } // Returns the current internal buffersize settings for streamable sounds. func (s *System) StreamBufferSize() (uint32, TimeUnit, error) { var filebuffersize C.uint var filebuffersizetype C.FMOD_TIMEUNIT res := C.FMOD_System_GetStreamBufferSize(s.cptr, &filebuffersize, &filebuffersizetype) return uint32(filebuffersize), TimeUnit(filebuffersizetype), errs[res] } // Sets the global doppler scale, distance factor and log rolloff scale for all 3D sound in FMOD. // dopplerscale: Scaling factor for doppler shift. Default = 1.0. // // distancefactor: Relative distance factor to FMOD's units. Default = 1.0. (1.0 = 1 metre). // // rolloffscale: Scaling factor for 3D sound rolloff or attenuation for FMOD_3D_INVERSEROLLOFF based sounds only (which is the default type). Default = 1.0. // // The doppler scale is a general scaling factor for how much the pitch varies due to doppler shifting in 3D sound. // Doppler is the pitch bending effect when a sound comes towards the listener or moves away from it, much like the effect you hear when a train goes past you with its horn sounding. // With "dopplerscale" you can exaggerate or diminish the effect. FMOD's effective speed of sound at a doppler factor of 1.0 is 340 m/s. // // The distance factor is the FMOD 3D engine relative distance factor, compared to 1.0 meters. // Another way to put it is that it equates to "how many units per meter does your engine have". // For example, if you are using feet then "scale" would equal 3.28. // // Note! This only affects doppler! // If you keep your min/max distance, custom rolloff curves and positions in scale relative to each other the volume rolloff will not change. // If you set this, the mindistance of a sound will automatically set itself to this value when it is created in case the user forgets to set the mindistance to match the new distancefactor. // // The rolloff scale sets the global attenuation rolloff factor for FMOD_3D_INVERSEROLLOFF based sounds only (which is the default). // Volume for a sound set to FMOD_3D_INVERSEROLLOFF will scale at mindistance / distance. // This gives an inverse attenuation of volume as the source gets further away (or closer). // Setting this value makes the sound drop off faster or slower. The higher the value, the faster volume will attenuate, and conversely the lower the value, the slower it will attenuate. // For example a rolloff factor of 1 will simulate the real world, where as a value of 2 will make sounds attenuate 2 times quicker. // // Note! "rolloffscale" has no effect when using FMOD_3D_LINEARROLLOFF, FMOD_3D_LINEARSQUAREROLLOFF or FMOD_3D_CUSTOMROLLOFF. func (s *System) Set3DSettings(dopplerscale, distancefactor, rolloffscale float32) error { res := C.FMOD_System_Set3DSettings(s.cptr, C.float(dopplerscale), C.float(distancefactor), C.float(rolloffscale)) return errs[res] } // Retrieves the global doppler scale, distance factor and rolloff scale for all 3D sound in FMOD. func (s *System) Get3DSettings() (float32, float32, float32, error) { var dopplerscale, distancefactor, rolloffscale C.float res := C.FMOD_System_Get3DSettings(s.cptr, &dopplerscale, &distancefactor, &rolloffscale) return float32(dopplerscale), float32(distancefactor), float32(rolloffscale), errs[res] } // Sets the number of 3D 'listeners' in the 3D sound scene. This function is useful mainly for split-screen game purposes. // // numlisteners: Number of listeners in the scene. Valid values are from 1 to MAX_LISTENERS inclusive. Default = 1. // // If the number of listeners is set to more than 1, then panning and doppler are turned off. // All sound effects will be mono. FMOD uses a 'closest sound to the listener' method to determine what should be heard in this case. func (s *System) Set3DNumListeners(numlisteners int) error { res := C.FMOD_System_Set3DNumListeners(s.cptr, C.int(numlisteners)) return errs[res] } // Retrieves the number of 3D listeners. func (s *System) Get3DNumListeners() (int, error) { var numlisteners C.int res := C.FMOD_System_Get3DNumListeners(s.cptr, &numlisteners) return int(numlisteners), errs[res] } // This updates the position, velocity and orientation of the specified 3D sound listener. // // listener: Listener ID in a multi-listener environment. Specify 0 if there is only 1 listener. // // pos: The position of the listener in world space, measured in distance units. // You can specify 0 or NULL to not update the position. // // vel: The velocity of the listener measured in distance units per second. // You can specify 0 or NULL to not update the velocity of the listener. // // forward: The forwards orientation of the listener. This vector must be of unit length and perpendicular to the up vector. // You can specify 0 or NULL to not update the forwards orientation of the listener. // // up: The upwards orientation of the listener. This vector must be of unit length and perpendicular to the forwards vector. // You can specify 0 or NULL to not update the upwards orientation of the listener. // // By default, FMOD uses a left-handed co-ordinate system. This means +X is right, +Y is up, and +Z is forwards. // To change this to a right-handed coordinate system, use FMOD_INIT_3D_RIGHTHANDED. This means +X is right, +Y is up, and +Z is backwards or towards you. // // To map to another coordinate system, flip/negate and exchange these values. // // Orientation vectors are expected to be of UNIT length. This means the magnitude of the vector should be 1.0. // // A 'distance unit' is specified by "System.Set3DSettings". By default this is set to meters which is a distance scale of 1.0. // // Always remember to use units per second, not units per frame as this is a common mistake and will make the doppler effect sound wrong. // // For example, Do not just use (pos - lastpos) from the last frame's data for velocity, as this is not correct. // You need to time compensate it so it is given in units per second. func (s *System) Set3DListenerAttributes(listener int, pos, vel, forward, up Vector) error { var cpos C.FMOD_VECTOR = pos.toC() var cvel C.FMOD_VECTOR = vel.toC() var cforward C.FMOD_VECTOR = forward.toC() var cup C.FMOD_VECTOR = up.toC() res := C.FMOD_System_Set3DListenerAttributes(s.cptr, C.int(listener), &cpos, &cvel, &cforward, &cup) return errs[res] } // This retrieves the position, velocity and orientation of the specified 3D sound listener. func (s *System) Get3DListenerAttributes(listener int) (pos, vel, forward, up Vector, err error) { var cpos, cvel, cforward, cup C.FMOD_VECTOR res := C.FMOD_System_Get3DListenerAttributes(s.cptr, C.int(listener), &cpos, &cvel, &cforward, &cup) err = errs[res] pos.fromC(cpos) vel.fromC(cvel) forward.fromC(cforward) up.fromC(cup) return } // NOTE: Not implement yet // When FMOD wants to calculate 3d volume for a channel, this callback can be used to override the internal volume calculation based on distance. // // callback: Pointer to a C function of type FMOD_3D_ROLLOFF_CALLBACK, that is used to override the FMOD volume calculation. // Default is 0 or NULL. Setting the callback to null will return 3d calculation back to FMOD. func (s *System) Set3DRolloffCallback(callback C.FMOD_3D_ROLLOFF_CALLBACK) error { //FMOD_RESULT F_API FMOD_System_Set3DRolloffCallback (FMOD_SYSTEM *system, FMOD_3D_ROLLOFF_CALLBACK callback); return ErrNoImpl } // Suspend mixer thread and relinquish usage of audio hardware while maintaining internal state. // Used on mobile platforms when entering a backgrounded state to reduce CPU to 0%. // All internal state will be maintained, i.e. created sound and channels will stay available in memory. func (s *System) MixerSuspend() error { res := C.FMOD_System_MixerSuspend(s.cptr) return errs[res] } // Resume mixer thread and reacquire access to audio hardware. // Used on mobile platforms when entering the foreground after being suspended. // All internal state will resume, i.e. created sound and channels are still valid and playback will continue. func (s *System) MixerResume() error { res := C.FMOD_System_MixerResume(s.cptr) return errs[res] } // NOTE: Not implement yet // Gets the default matrix used to convert from one speaker mode to another. // The gain for source channel 's' to target channel 't' is matrix[t * matrixhop + s]. // If 'sourcespeakermode' or 'targetspeakermode' is SPEAKERMODE_RAW, this function will return error. func (s *System) DefaultMixMatrix(sourcespeakermode, targetspeakermode C.FMOD_SPEAKERMODE, matrix *C.float, matrixhop C.int) error { //FMOD_RESULT F_API FMOD_System_GetDefaultMixMatrix (FMOD_SYSTEM *system, FMOD_SPEAKERMODE sourcespeakermode, FMOD_SPEAKERMODE targetspeakermode, float *matrix, int matrixhop); return ErrNoImpl } // NOTE: Not implement yet // Gets the a speaker mode's channel count. func (s *System) SpeakerModeChannels(mode C.FMOD_SPEAKERMODE, channels *C.int) error { //FMOD_RESULT F_API FMOD_System_GetSpeakerModeChannels (FMOD_SYSTEM *system, FMOD_SPEAKERMODE mode, int *channels); return ErrNoImpl } /* System information functions. */ // Returns the current version of FMOD Studio being used. func (s *System) Version() (uint32, error) { var version C.uint res := C.FMOD_System_GetVersion(s.cptr, &version) return uint32(version), errs[res] } // NOTE: Not implement yet // Retrieves a pointer to the system level output device module. // This means a pointer to a DirectX "LPDIRECTSOUND", or a WINMM handle, or with something like with OUTPUTTYPE_NOSOUND output, the handle will be null or 0. // Must be called after "System.Init". func (s *System) OutputHandle(handle **interface{}) error { //FMOD_RESULT F_API FMOD_System_GetOutputHandle (FMOD_SYSTEM *system, void **handle); return ErrNoImpl } // Retrieves the number of currently playing channels. func (s *System) ChannelsPlaying() (int, error) { var channels C.int res := C.FMOD_System_GetChannelsPlaying(s.cptr, &channels) return int(channels), errs[res] } // TODO: Redo to native output instead map // Retrieves in percent of CPU time - the amount of cpu usage that FMOD is taking for streaming/mixing and "System.Update" combined. // This value is slightly smoothed to provide more stable readout (and to round off spikes that occur due to multitasking/operating system issues). // // NOTE! On ps3 and xbox360, the dsp and stream figures are NOT main cpu/main thread usage. // On PS3 this is the percentage of SPU being used. On Xbox 360 it is the percentage of a hardware thread being used which is on a totally different CPU than the main one. // // Do not be alarmed if the usage for these platforms reaches over 50%, this is normal and should be ignored if you are playing a lot of compressed sounds and are using effects. // The only value on the main cpu / main thread to take note of here that will impact your framerate is the update value, and this is typically very low (ie less than 1%). func (s *System) CPUUsage() (map[string]float32, error) { var dsp, stream, geometry, update, total C.float res := C.FMOD_System_GetCPUUsage(s.cptr, &dsp, &stream, &geometry, &update, &total) cpu := map[string]float32{ "dsp": float32(dsp), "stream": float32(stream), "geometry": float32(geometry), "update": float32(update), "total": float32(total), } return cpu, errs[res] } // TODO: Redo to native output instead map // Retrieves the amount of dedicated sound ram available if the platform supports it. // Most platforms use main ram to store audio data, so this function usually isn't necessary. func (s *System) SoundRAM() (map[string]int, error) { var currentalloced, maxalloced, total C.int res := C.FMOD_System_GetSoundRAM(s.cptr, &currentalloced, &maxalloced, &total) ram := map[string]int{ "currentalloced": int(currentalloced), "maxalloced": int(maxalloced), "total": int(total), } return ram, errs[res] } /* Sound/DSP/Channel/FX creation and retrieval. */ // TODO: add more docs // Loads a sound into memory, or opens it for streaming. // // name_or_data: Name of the file or URL to open encoded in a UTF-8 string, or a pointer to a preloaded sound memory block // if FMOD_OPENMEMORY/FMOD_OPENMEMORY_POINT is used. For CD playback the name should be a drive letter with a colon, example "D:" (windows only). // // mode: Behaviour modifier for opening the sound. See FMOD_MODE. // // exinfo: Pointer to a FMOD_CREATESOUNDEXINFO which lets the user provide extended information while playing the sound. Optional. Specify 0 or NULL to ignore. func (s *System) CreateSound(name_or_data string, mode Mode, exinfo *CreatesSoundExInfo) (*Sound, error) { var sound Sound defer runtime.SetFinalizer(&sound, (*Sound).Release) cname_or_data := C.CString(name_or_data) defer C.free(unsafe.Pointer(cname_or_data)) // FIX me res := C.FMOD_System_CreateSound(s.cptr, cname_or_data, C.FMOD_MODE(mode), (*C.FMOD_CREATESOUNDEXINFO)(null), &sound.cptr) return &sound, errs[res] } // TODO: add more docs // Opens a sound for streaming. // This function is a helper function that is the same as "System.CreateSound" but has the CREATESTREAM flag added internally. func (s *System) CreateStream(name_or_data string, mode Mode, exinfo *CreatesSoundExInfo) (*Sound, error) { var sound Sound defer runtime.SetFinalizer(&sound, (*Sound).Release) cname_or_data := C.CString(name_or_data) defer C.free(unsafe.Pointer(cname_or_data)) // FIX me res := C.FMOD_System_CreateStream(s.cptr, cname_or_data, C.FMOD_MODE(mode), (*C.FMOD_CREATESOUNDEXINFO)(null), &sound.cptr) return &sound, errs[res] } // Creates a user defined DSP unit object to be inserted into a DSP network, for the purposes of sound filtering or sound generation. // // description: Pointer of an DSP_DESCRIPTION structure containing information about the unit to be created. // Some members of DSP_DESCRIPTION are referenced directly inside FMOD so the structure should be allocated statically or at least remain in memory for the lifetime of the system. // // A DSP unit can generate or filter incoming data. // The data is created or filtered through use of the read callback that is defined by the user. // See the definition for the DSP_DESCRIPTION structure to find out what each member means. // To be active, a unit must be inserted into the FMOD DSP network to be heard. // Use functions such as "ChannelGroup.AddDSP", "Channel.AddDSP" or "DSP.AddInput" to do this. func (s *System) CreateDSP(description *DSPDesc) (*DSP, error) { var dsp DSP defer runtime.SetFinalizer(&dsp, (*DSP).Release) res := C.FMOD_System_CreateDSP(s.cptr, (*C.FMOD_DSP_DESCRIPTION)(description), &dsp.cptr) return &dsp, errs[res] } // Creates an FMOD defined built in DSP unit object to be inserted into a DSP network, for the purposes of sound filtering or sound generation. // This function is used to create special effects that come built into FMOD. // // typ: A pre-defined DSP effect or sound generator described by a DSP_TYPE. // // Note! Winamp DSP and VST plugins will only return the first plugin of this type that was loaded! // To access all VST or Winamp DSP plugins the "System.CreateDSPByPlugin" function! // Use the index returned by "System.LoadPlugin" if you don't want to enumerate them all. func (s *System) CreateDSPByType(typ DSPType) (*DSP, error) { // TODO Finalizer var dsp DSP res := C.FMOD_System_CreateDSPByType(s.cptr, C.FMOD_DSP_TYPE(typ), &dsp.cptr) return &dsp, errs[res] } // Creates a channel group object. These objects can be used to assign channels to for group channel settings, such as volume. // Channel groups are also used for sub-mixing. Any channels that are assigned to a channel group get submixed into that channel group's DSP. // // name: Label to give to the channel group for identification purposes. Optional (can be null). // // See the channel group class definition for the types of operations that can be performed on 'groups' of channels. // The channel group can for example be used to have 2 seperate groups of master volume, instead of one global master volume. // A channel group can be used for sub-mixing, ie so that a set of channels can be mixed into a channel group, then can have effects applied to it without affecting other channels. func (s *System) CreateChannelGroup(name string) (*ChannelGroup, error) { var channelgroup ChannelGroup cname := C.CString(name) defer C.free(unsafe.Pointer(cname)) res := C.FMOD_System_CreateChannelGroup(s.cptr, cname, &channelgroup.cptr) defer runtime.SetFinalizer(&channelgroup, (*ChannelGroup).Release) return &channelgroup, errs[res] } // Creates a sound group, which can store handles to multiple Sound pointers. // // name: Name of sound group. // // Once a SoundGroup is created, "Sound.SetSoundGroup" is used to put a sound in a SoundGroup. func (s *System) CreateSoundGroup(name string) (*SoundGroup, error) { var soundgroup = SoundGroup{name: name} cname := C.CString(name) defer C.free(unsafe.Pointer(cname)) res := C.FMOD_System_CreateSoundGroup(s.cptr, cname, &soundgroup.cptr) defer runtime.SetFinalizer(&soundgroup, (*SoundGroup).Release) return &soundgroup, errs[res] } // Creates a 'virtual reverb' object. This object reacts to 3d location and morphs the reverb environment based on how close it is to the reverb object's center. // Multiple reverb objects can be created to achieve a multi-reverb environment. // 1 Physical reverb object is used for all 3d reverb objects (slot 0 by default). // // The 3D reverb object is a sphere having 3D attributes (position, minimum distance, maximum distance) and reverb properties. // The properties and 3D attributes of all reverb objects collectively determine, along with the listener's position, the settings of and input gains into a single 3D reverb DSP. // When the listener is within the sphere of effect of one or more 3d reverbs, the listener's 3D reverb properties are a weighted combination of such 3d reverbs. // When the listener is outside all of the reverbs, no reverb is applied. // // In FMOD Ex a special 'ambient' reverb setting was used when outside the influence of all reverb spheres. This function no longer exists. // In FMOD Studio "System.SetReverbProperties" can be used to create an alternative reverb that can be used for 2D and background global reverb. // To avoid this reverb intefering with the reverb slot used by the 3d reverb, 2d reverb should use a different slot id with "System.SetReverbProperties", // otherwise ADVANCEDSETTINGS::reverb3Dinstance can also be used to place 3d reverb on a different physical reverb slot. // // Creating multiple reverb objects does not impact performance. These are 'virtual reverbs'. // There will still be only 1 physical reverb DSP running that just morphs between the different virtual reverbs. // // Note about phsyical reverb DSP unit allocation. // To remove the DSP unit and the associated CPU cost, first make sure all 3d reverb objects are released. // Then call "System.SetReverbProperties" with the 3d reverb's slot ID (default is 0) with a property point of 0 or NULL, to signal that the physical reverb instance should be deleted. // If a 3d reverb is still present, and "System.SetReverbProperties" function is called to free the physical reverb, // the 3D reverb system will immediately recreate it upon the next "System.Update" call. func (s *System) CreateReverb3D() (*Reverb3D, error) { // TODO Finalizer var reverb3d Reverb3D defer runtime.SetFinalizer(&reverb3d, (*Reverb3D).Release) res := C.FMOD_System_CreateReverb3D(s.cptr, &reverb3d.cptr) return &reverb3d, errs[res] } // Plays a sound object on a particular channel and ChannelGroup if desired. // // sound: Pointer to the sound to play. This is opened with "System.CreateSound". // // channelgroup: Pointer to a channelgroup become a member of. // This is more efficient than using "Channel.SetChannelGroup", as it does it during the channel setup, rather than connecting to the master channel group, // then later disconnecting and connecting to the new channelgroup when specified. Use 0/NULL to ignore. // // paused: true or false flag to specify whether to start the channel paused or not. // Starting a channel paused allows the user to alter its attributes without it being audible, and unpausing with "Channel.SetPaused" actually starts the sound. // // When a sound is played, it will use the sound's default frequency and priority. // // A sound defined as FMOD_3D will by default play at the position of the listener. // To set the 3D position of the channel before the sound is audible, start the channel paused by setting the paused flag to true, // and calling "Channel.Set3DAttributes". Following that, unpause the channel with "Channel.SetPaused". // // Channels are reference counted. If a channel is stolen by the FMOD priority system, then the handle to the stolen voice becomes invalid, // and Channel based commands will not affect the new sound playing in its place. // If all channels are currently full playing a sound, FMOD will steal a channel with the lowest priority sound. // // If more channels are playing than are currently available on the soundcard/sound device or software mixer, then FMOD will 'virtualize' the channel. // This type of channel is not heard, but it is updated as if it was playing. // When its priority becomes high enough or another sound stops that was using a real hardware/software channel, it will start playing from where it should be. // This technique saves CPU time (thousands of sounds can be played at once without actually being mixed or taking up resources), and also removes the need for the user to manage voices themselves. // An example of virtual channel usage is a dungeon with 100 torches burning, all with a looping crackling sound, but with a soundcard that only supports 32 hardware voices. // If the 3D positions and priorities for each torch are set correctly, FMOD will play all 100 sounds without any 'out of channels' errors, // and swap the real voices in and out according to which torches are closest in 3D space. // Priority for virtual channels can be changed in the sound's defaults, or at runtime with "Channel.SetPriority". func (s *System) PlaySound(sound *Sound, channelgroup *ChannelGroup, paused bool) (*Channel, error) { var channel Channel res := C.FMOD_System_PlaySound(s.cptr, sound.cptr, (*C.FMOD_CHANNELGROUP)(null), getBool(paused), &channel.cptr) return &channel, errs[res] } // Plays a DSP unit object and its input network on a particular channel. // // dsp: Pointer to the dsp unit to play. This is opened with "System.CreateDSP", "System.CreateDSPByType", "System.CreateDSPByPlugin". // // channelgroup: Pointer to a channelgroup become a member of. // This is more efficient than using "Channel.SetChannelGroup", as it does it during the channel setup, // rather than connecting to the master channel group, then later disconnecting and connecting to the new channelgroup when specified. Use 0/NULL to ignore. // // paused: true or false flag to specify whether to start the channel paused or not. // Starting a channel paused allows the user to alter its attributes without it being audible, and unpausing with "Channel.SetPaused" actually starts the dsp running. // // When a dsp is played, it will use the dsp's default frequency, volume, pan, levels and priority. // // A dsp defined as FMOD_3D will by default play at the position of the listener. // To change channel attributes before the dsp is audible, start the channel paused by setting the paused flag to true, and calling the relevant channel based functions. // Following that, unpause the channel with "Channel.SetPaused". // Channels are reference counted. If a channel is stolen by the FMOD priority system, then the handle to the stolen voice becomes invalid, // and Channel based commands will not affect the new channel playing in its place. // If all channels are currently full playing a dsp or sound, FMOD will steal a channel with the lowest priority dsp or sound. // If more channels are playing than are currently available on the soundcard/sound device or software mixer, then FMOD will 'virtualize' the channel. // This type of channel is not heard, but it is updated as if it was playing. When its priority becomes high enough or another sound stops that was using a real hardware/software channel, // it will start playing from where it should be. This technique saves CPU time (thousands of sounds can be played at once without actually being mixed or taking up resources), // and also removes the need for the user to manage voices themselves. // An example of virtual channel usage is a dungeon with 100 torches burning, all with a looping crackling sound, but with a soundcard that only supports 32 hardware voices. // If the 3D positions and priorities for each torch are set correctly, FMOD will play all 100 sounds without any 'out of channels' errors, and swap the real voices in // and out according to which torches are closest in 3D space. // Priority for virtual channels can be changed in the sound's defaults, or at runtime with "Channel.SetPriority". func (s *System) PlayDSP(dsp *DSP, channelgroup *ChannelGroup, paused bool) (*Channel, error) { var channel Channel res := C.FMOD_System_PlayDSP(s.cptr, dsp.cptr, (*C.FMOD_CHANNELGROUP)(null), getBool(paused), &channel.cptr) return &channel, errs[res] } // Retrieves a handle to a channel by ID. // This function is mainly for getting handles to existing (playing) channels and setting their attributes. func (s *System) Channel(channelid int) (*Channel, error) { var channel Channel res := C.FMOD_System_GetChannel(s.cptr, C.int(channelid), &channel.cptr) return &channel, errs[res] } // Retrieves a handle to the internal master channel group. This is the default channel group that all channels play on. // This channel group can be used to do things like set the master volume for all playing sounds. // See the ChannelGroup API for more functionality. func (s *System) MasterChannelGroup() (*ChannelGroup, error) { var channelgroup ChannelGroup res := C.FMOD_System_GetMasterChannelGroup(s.cptr, &channelgroup.cptr) return &channelgroup, errs[res] } // Retrieves the default sound group, where all sounds are placed when they are created. // If a user based soundgroup is deleted/released, the sounds will be put back into this sound group. func (s *System) MasterSoundGroup() (*SoundGroup, error) { var soundgroup SoundGroup res := C.FMOD_System_GetMasterSoundGroup(s.cptr, &soundgroup.cptr) return &soundgroup, errs[res] } /* Routing to ports. */ // NOTE: Not implement yet // Route the signal from a channel group into a seperate audio port on the output driver. // // portType: Output driver specific audio port type // // portIndex: Output driver specific index of the audio port // // channelgroup: Channel group to route away to the new port // // passThru: If true the signal will continue to be passed through to the main mix, if false the signal will be entirely to the designated port. func (s *System) AttachChannelGroupToPort(portType C.FMOD_PORT_TYPE, portIndex C.FMOD_PORT_INDEX, channelgroup *C.FMOD_CHANNELGROUP, passThru C.FMOD_BOOL) error { //FMOD_RESULT F_API FMOD_System_AttachChannelGroupToPort (FMOD_SYSTEM *system, FMOD_PORT_TYPE portType, FMOD_PORT_INDEX portIndex, FMOD_CHANNELGROUP *channelgroup, FMOD_BOOL passThru); return ErrNoImpl } // NOTE: Not implement yet // Disconnect a channel group from a and route audio back to the default port of the output driver func (s *System) DetachChannelGroupFromPort(channelgroup *C.FMOD_CHANNELGROUP) error { //FMOD_RESULT F_API FMOD_System_DetachChannelGroupFromPort(FMOD_SYSTEM *system, FMOD_CHANNELGROUP *channelgroup); return ErrNoImpl } /* Reverb API. */ // Sets parameters for the global reverb environment. // Reverb parameters can be set manually, or automatically using the pre-defined presets given in the fmod.h header. func (s *System) SetReverbProperties(props *ReverbProperties) error { rp := props.toC() res := C.FMOD_System_SetReverbProperties(s.cptr, C.int(1), *(**C.FMOD_REVERB_PROPERTIES)(unsafe.Pointer(&rp))) return errs[res] } // Retrieves the current reverb environment for the specified reverb instance. // You must specify the 'Instance' value (usually 0 unless you are using multiple reverbs) before calling this function. // Note! It is important to specify the 'Instance' value in the REVERB_PROPERTIES structure correctly, otherwise you will get an FMOD_ERR_REVERB_INSTANCE error. func (s *System) ReverbProperties() (*ReverbProperties, error) { props := new(ReverbProperties) cReverbProperties := *(*C.FMOD_REVERB_PROPERTIES)(unsafe.Pointer(props)) res := C.FMOD_System_GetReverbProperties(s.cptr, C.int(1), &cReverbProperties) props.fromC(cReverbProperties) return props, errs[res] } /* System level DSP functionality. */ // Mutual exclusion function to lock the FMOD DSP engine (which runs asynchronously in another thread), so that it will not execute. // If the FMOD DSP engine is already executing, this function will block until it has completed. // The function may be used to synchronize DSP network operations carried out by the user. // An example of using this function may be for when the user wants to construct a DSP sub-network, // without the DSP engine executing in the background while the sub-network is still under construction. // // Once the user no longer needs the DSP engine locked, it must be unlocked with "System.UnlockDSP()". // Note that the DSP engine should not be locked for a significant amount of time, otherwise inconsistency in the audio output may result. (audio skipping/stuttering). func (s *System) LockDSP() error { res := C.FMOD_System_LockDSP(s.cptr) return errs[res] } // Mutual exclusion function to unlock the FMOD DSP engine (which runs asynchronously in another thread) and let it continue executing. // The DSP engine must be locked with "System.LockDSP()" before this function is called. func (s *System) UnlockDSP() error { res := C.FMOD_System_UnlockDSP(s.cptr) return errs[res] } /* Recording API. */ // NOTE: Not implement yet // Retrieves the number of recording devices available for this output mode. // Use this to enumerate all recording devices possible so that the user can select one. func (s *System) RecordNumDrivers(numdrivers, numconnected *C.int) error { //FMOD_RESULT F_API FMOD_System_GetRecordNumDrivers (FMOD_SYSTEM *system, int *numdrivers, int *numconnected); return ErrNoImpl } // NOTE: Not implement yet // TODO: add more docs // Retrieves identification information about a sound device specified by its index, and specific to the output mode set with "System.SetOutput". func (s *System) RecordDriverInfo(id C.int, name *C.char, namelen C.int, guid *C.FMOD_GUID, systemrate *C.int, speakermode *C.FMOD_SPEAKERMODE, speakermodechannels *C.int, state *C.FMOD_DRIVER_STATE) error { //FMOD_RESULT F_API FMOD_System_GetRecordDriverInfo (FMOD_SYSTEM *system, int id, char *name, int namelen, FMOD_GUID *guid, int *systemrate, FMOD_SPEAKERMODE *speakermode, int *speakermodechannels, FMOD_DRIVER_STATE *state); return ErrNoImpl } // NOTE: Not implement yet // Retrieves the current recording position of the record buffer in PCM samples. // // id: Enumerated driver ID. This must be in a valid range delimited by "System.RecordNumDrivers". func (s *System) RecordPosition(id C.int, position *C.uint) error { //FMOD_RESULT F_API FMOD_System_GetRecordPosition (FMOD_SYSTEM *system, int id, unsigned int *position); return ErrNoImpl } // NOTE: Not implement yet // Starts the recording engine recording to the specified recording sound. // // id: Enumerated driver ID. This must be in a valid range delimited by "System.RecordNumDrivers". // // sound: User created sound for the user to record to. // // loop: Boolean flag to tell the recording engine whether to continue recording to the provided sound from the start again, after it has reached the end. // If this is set to true the data will be continually be overwritten once every loop. func (s *System) RecordStart(id C.int, sound *C.FMOD_SOUND, loop C.FMOD_BOOL) error { //FMOD_RESULT F_API FMOD_System_RecordStart (FMOD_SYSTEM *system, int id, FMOD_SOUND *sound, FMOD_BOOL loop); return ErrNoImpl } // NOTE: Not implement yet // Stops the recording engine from recording to the specified recording sound. func (s *System) RecordStop(id C.int) error { //FMOD_RESULT F_API FMOD_System_RecordStop (FMOD_SYSTEM *system, int id); return ErrNoImpl } // NOTE: Not implement yet // Retrieves the state of the FMOD recording API, ie if it is currently recording or not. func (s *System) IsRecording(id C.int, recording *C.FMOD_BOOL) error { //FMOD_RESULT F_API FMOD_System_IsRecording (FMOD_SYSTEM *system, int id, FMOD_BOOL *recording); return ErrNoImpl } /* Geometry API. */ // Geometry creation function. This function will create a base geometry object which can then have polygons added to it. // // maxpolygons: Maximum number of polygons within this object. // // maxvertices: Maximum number of vertices within this object. // // Polygons can be added to a geometry object using "Geometry.AddPolygon". // A geometry object stores its list of polygons in a structure optimized for quick line intersection testing and efficient insertion and updating. // The structure works best with regularly shaped polygons with minimal overlap. // Many overlapping polygons, or clusters of long thin polygons may not be handled efficiently. // Axis aligned polygons are handled most efficiently. // // The same type of structure is used to optimize line intersection testing with multiple geometry objects. // It is important to set the value of maxworldsize to an appropriate value using "System.SetGeometrySettings". // Objects or polygons outside the range of maxworldsize will not be handled efficiently. // Conversely, if maxworldsize is excessively large, the structure may lose precision and efficiency may drop. func (s *System) CreateGeometry(maxpolygons, maxvertices int) (Geometry, error) { var geom Geometry defer runtime.SetFinalizer(&geom, (*Geometry).Release) res := C.FMOD_System_CreateGeometry(s.cptr, C.int(maxpolygons), C.int(maxvertices), &geom.cptr) return geom, errs[res] } // Sets the maximum world size for the geometry engine for performance / precision reasons. // Setting maxworldsize should be done first before creating any geometry. // It can be done any time afterwards but may be slow in this case. // Objects or polygons outside the range of maxworldsize will not be handled efficiently. // Conversely, if maxworldsize is excessively large, the structure may loose precision and efficiency may drop. func (s *System) SetGeometrySettings(maxworldsize float64) error { res := C.FMOD_System_SetGeometrySettings(s.cptr, C.float(maxworldsize)) return errs[res] } // Retrieves the maximum world size for the geometry engine. func (s *System) GeometrySettings() (float64, error) { var maxworldsize C.float res := C.FMOD_System_GetGeometrySettings(s.cptr, &maxworldsize) return float64(maxworldsize), errs[res] } // NOTE: Not implement yet // Creates a geometry object from a block of memory which contains pre-saved geometry data, saved by "Geometry.Save". func (s *System) LoadGeometry(data *interface{}, datasize C.int, geometry **C.FMOD_GEOMETRY) error { //FMOD_RESULT F_API FMOD_System_LoadGeometry (FMOD_SYSTEM *system, const void *data, int datasize, FMOD_GEOMETRY **geometry); return ErrNoImpl } // NOTE: Not implement yet // Calculates geometry occlusion between a listener and a sound source. // If single sided polygons have been created, it is important to get the source and listener positions round the right way, // as the occlusion from point A to point B may not be the same as the occlusion from point B to point A. func (s *System) GeometryOcclusion(listener, source *C.FMOD_VECTOR, direct, reverb C.float) error { //FMOD_RESULT F_API FMOD_System_GetGeometryOcclusion (FMOD_SYSTEM *system, const FMOD_VECTOR *listener, const FMOD_VECTOR *source, float *direct, float *reverb); return ErrNoImpl } /* Network functions. */ // Set a proxy server to use for all subsequent internet connections. // Basic authentication is supported. To use it, this parameter must be in // user:password@host:port format e.g. bob:sekrit123@www.fmod.org:8888 Set this parameter to 0 / NULL if no proxy is required. func (s *System) SetNetworkProxy(proxy string) error { cproxy := C.CString(proxy) defer C.free(unsafe.Pointer(cproxy)) res := C.FMOD_System_SetNetworkProxy(s.cptr, cproxy) return errs[res] } // NOTE: Not implement yet // Retrieves the URL of the proxy server used in internet streaming. func (s *System) NetworkProxy(proxy *C.char, proxylen C.int) error { //FMOD_RESULT F_API FMOD_System_GetNetworkProxy (FMOD_SYSTEM *system, char *proxy, int proxylen); return ErrNoImpl } // Set the timeout for network streams. // timeout: The timeout value in ms. func (s *System) SetNetworkTimeout(timeout int) error { res := C.FMOD_System_SetNetworkTimeout(s.cptr, C.int(timeout)) return errs[res] } // Retrieve the timeout value for network streams func (s *System) NetworkTimeout() (int, error) { var timeout C.int res := C.FMOD_System_GetNetworkTimeout(s.cptr, &timeout) return int(timeout), errs[res] } /* Userdata set/get. */ // Sets a user value that the System object will store internally. Can be retrieved with "System.UserData". // This function is primarily used in case the user wishes to 'attach' data to an FMOD object. // It can be useful if an FMOD callback passes an object of this type as a parameter, and the user does not know which object it is (if many of these types of objects exist). // Using "System.UserData" would help in the identification of the object. func (s *System) SetUserData(userdata interface{}) error { data := *(*[]*C.char)(unsafe.Pointer(&userdata)) res := C.FMOD_System_SetUserData(s.cptr, unsafe.Pointer(&data)) return errs[res] } //Retrieves the user value that that was set by calling the System.SetUserData function. func (s *System) UserData() (interface{}, error) { var userdata *interface{} cUserdata := unsafe.Pointer(userdata) res := C.FMOD_System_GetUserData(s.cptr, &cUserdata) return *(*interface{})(cUserdata), errs[res] }
package user import ( "time" ) type AppUser struct { ID *string Username *string CreatedAt *time.Time UpdatedAt *time.Time passwordHash *string avatar *Avatar synced bool } func (a *AppUser) IsSynced() bool { return a.synced } func (a *AppUser) MarkSynced() { a.synced = true } func (a *AppUser) GetAvatar() *Avatar { return a.avatar } func (a *AppUser) SetAvatar(fileName string) { if fileName == "" { a.avatar = nil } else { a.avatar = &Avatar{FileName: fileName} } } func (a *AppUser) GetPasswordHash() string { return *a.passwordHash } func (a *AppUser) SetPasswordHash(hash string) { a.passwordHash = &hash }
package config import ( "github.com/go-errors/errors" "gopkg.in/yaml.v2" ) type Configuration struct { Host string `yaml:"host"` Port string `yaml:"port"` User string `yaml:"user"` Password string `yaml:"password"` Tls string `yaml:"tls"` Workers int `yaml:"workers"` } func InitConfig(f []byte) (*Configuration,error) { var conf Configuration err := yaml.Unmarshal(f, &conf) if err != nil { return nil, errors.Wrap(err, -1) } return &conf, nil }
package main import ( "fmt" "math" ) func main() { var r float64 fmt.Scan(&r) p := 2 * math.Pi * r s := math.Pi * r * r fmt.Printf("%f %f", s, p) }
// Copyright (c) 2016 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package cm import "github.com/m3db/m3x/pool" // Sample represents a sampled value. type Sample struct { value float64 // sampled value numRanks int64 // number of ranks represented delta int64 // delta between min rank and max rank prev *Sample // previous sample next *Sample // next sample } // SamplePool is a pool of samples. type SamplePool interface { // Init initializes the pool. Init() // Get returns a sample from the pool. Get() *Sample // Put returns a sample to the pool. Put(sample *Sample) } // Stream represents a data sample stream for floating point numbers. type Stream interface { // Add adds a sample value. Add(value float64) // Flush flushes the internal buffer. Flush() // Min returns the minimum value. Min() float64 // Max returns the maximum value. Max() float64 // Quantile returns the quantile value. Quantile(q float64) float64 // Close closes the stream. Close() // ResetSetData resets the stream and sets data. ResetSetData(quantiles []float64) } // StreamAlloc allocates a stream. type StreamAlloc func() Stream // StreamPool provides a pool for streams. type StreamPool interface { // Init initializes the pool. Init(alloc StreamAlloc) // Get provides a stream from the pool. Get() Stream // Put returns a stream to the pool. Put(value Stream) } // Options represent various options for computing quantiles. type Options interface { // SetEps sets the desired epsilon for errors. SetEps(value float64) Options // Eps returns the desired epsilon for errors. Eps() float64 // SetCapacity sets the initial heap capacity. SetCapacity(value int) Options // Capacity returns the initial heap capacity. Capacity() int // SetInsertAndCompressEvery sets how frequently the timer values are // inserted into the stream and compressed to reduce write latency for // high frequency timers. SetInsertAndCompressEvery(value int) Options // InsertAndCompressEvery returns how frequently the timer values are // inserted into the stream and compressed to reduce write latency for // high frequency timers. InsertAndCompressEvery() int // SetFlushEvery sets how frequently the underlying stream is flushed // to reduce processing time when computing aggregated statistics from // the stream. SetFlushEvery(value int) Options // FlushEvery returns how frequently the underlying stream is flushed // to reduce processing time when computing aggregated statistics from // the stream. FlushEvery() int // SetStreamPool sets the stream pool. SetStreamPool(value StreamPool) Options // StreamPool returns the stream pool. StreamPool() StreamPool // SetSamplePool sets the sample pool. SetSamplePool(value SamplePool) Options // SamplePool returns the sample pool. SamplePool() SamplePool // SetFloatsPool sets the floats pool. SetFloatsPool(value pool.FloatsPool) Options // FloatsPool returns the floats pool. FloatsPool() pool.FloatsPool // Validate validates the options. Validate() error }
// // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package controller import ( "errors" "github.com/naili-xing/singa_auto_scheduler/pkg/sanodemonitor/log" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) var Config *rest.Config func InitInClusterConfig() { err := errors.New("") // InClusterConfig log.Print("Init kubernetes Config. ") Config, err = rest.InClusterConfig() if err != nil { log.ErrPrint("InitInClusterConfig", err) } } func InitOutOfClusterConfig() { err := errors.New("") log.Print("Init kubernetes Config. ") Config, err = clientcmd.BuildConfigFromFlags( "", "/root/.kube/config") if err != nil { log.ErrPrint("InitOutOfClusterConfig", err) } }
package provider import ( "encoding/base64" "fmt" "github.com/kenlabs/pando/pkg/api/v1/model" "os" "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/peer" "github.com/spf13/cobra" "github.com/kenlabs/pando/cmd/client/command/api" ) const registerPath = "/register" type providerInfo struct { peerID string privateKey string addresses []string miner string name string onlyEnvelop bool } var registerInfo = &providerInfo{} func registerCmd() *cobra.Command { cmd := &cobra.Command{ Use: "register", Short: "register a provider to pando server", RunE: func(cmd *cobra.Command, args []string) error { if err := registerInfo.validateFlags(); err != nil { return err } peerID, err := peer.Decode(registerInfo.peerID) if err != nil { return err } privateKeyEncoded, err := base64.StdEncoding.DecodeString(registerInfo.privateKey) if err != nil { return err } privateKey, err := crypto.UnmarshalPrivateKey(privateKeyEncoded) if err != nil { return err } data, err := model.MakeRegisterRequest(peerID, privateKey, registerInfo.addresses, registerInfo.miner, registerInfo.name) if err != nil { return err } if registerInfo.onlyEnvelop { envelopFile, err := os.OpenFile("./envelop.data", os.O_RDWR|os.O_CREATE, 0755) if err != nil { return err } _, err = envelopFile.Write(data) if err != nil { return err } fmt.Println("envelop data saved at ./envelop.data") return nil } res, err := api.Client.R(). SetBody(data). SetHeader("Content-Type", "application/octet-stream"). Post(joinAPIPath(registerPath)) if err != nil { return err } return api.PrintResponseData(res) }, } registerInfo.setFlags(cmd) return cmd } func (f *providerInfo) setFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&f.peerID, "peer-id", "", "peerID of provider, required") cmd.Flags().StringVar(&f.privateKey, "private-key", "", "private key of provider, required") cmd.Flags().StringSliceVar(&f.addresses, "addresses", []string{}, "address array of provider") cmd.Flags().StringVar(&f.miner, "miner", "", "miner of provider") cmd.Flags().StringVar(&f.name, "name", "", "name of provider") cmd.Flags().BoolVarP(&f.onlyEnvelop, "only-envelop", "e", false, "only generate envelop body") } func (f *providerInfo) validateFlags() error { if f.peerID == "" || f.privateKey == "" { return fmt.Errorf("peerID and privateKey are requied, given:\n\taddresses: %v\n\tpeerID%v\n\tprivateKey%v", f.addresses, f.peerID, f.privateKey) } return nil }
/** * @website: https://vvotm.github.io * @author luowen<bigpao.luo@gmail.com> * @date 2017/12/16 10:19 * @description: */ package errhandle type AbsErrer interface { GetCode() int Error() string ErrorMsg() string } type SuperError struct { Code int `json:"code"` Message string `json:"message"` ErrMsg string `json:"errmsg"` } func (p *SuperError) GetCode() int { return p.Code } func (p *SuperError) Error() string { return p.Message } func (p *SuperError) ErrorMsg() string { return p.ErrMsg } type PDOError struct { *SuperError } func NewPDOError(message string, code int, errmsg string) AbsErrer { return &PDOError{ &SuperError{code, message, errmsg}, } }
// Copyright 2018 TriggerMesh, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package build import ( "fmt" "os" "testing" "github.com/stretchr/testify/assert" "github.com/triggermesh/tm/pkg/client" ) func TestList(t *testing.T) { namespace := "test-namespace" if ns, ok := os.LookupEnv("NAMESPACE"); ok { namespace = ns } buildClient, err := client.NewClient(client.ConfigPath("")) assert.NoError(t, err) build := &Build{Namespace: namespace} _, err = build.List(&buildClient) assert.NoError(t, err) } func TestBuild(t *testing.T) { namespace := "test-namespace" if ns, ok := os.LookupEnv("NAMESPACE"); ok { namespace = ns } buildClient, err := client.NewClient(client.ConfigPath("")) assert.NoError(t, err) testCases := []struct { Name string Source string Revision string Buildtemplate string Args []string ImageName string ErrMSG error }{ {"kaniko-build", "https://github.com/knative/docs", "master", "https://raw.githubusercontent.com/triggermesh/build-templates/master/kaniko/kaniko.yaml", []string{"DIRECTORY=docs/serving/samples/hello-world/helloworld-go", "FOO:BAR"}, "", nil}, } for _, tt := range testCases { build := &Build{ Wait: true, Name: tt.Name, Namespace: namespace, Source: tt.Source, Revision: tt.Revision, Buildtemplate: tt.Buildtemplate, Args: tt.Args, } image, err := build.Deploy(&buildClient) assert.NoError(t, err) assert.Contains(t, image, build.Name) b, err := build.Get(&buildClient) assert.NoError(t, err) assert.Equal(t, tt.Name, b.Name) for k, v := range mapFromSlice(tt.Args) { present := false for _, buildArgs := range b.Spec.Template.Arguments { if buildArgs.Name == k && buildArgs.Value == v { present = true break } } if !present { assert.Error(t, fmt.Errorf("Build is missing passed arg %q", k)) break } } err = build.Delete(&buildClient) assert.NoError(t, err) } }
package main import ( "flag" "log" "github.com/valyala/fasthttp" ) // BAZINGA BAZINGA! const BAZINGA = "Bazinga!" var ( // version is a variable set while build to a known git branch and timestamp. version string // listener is the host:port to listen to listenAddr = flag.String("listener", ":8080", "host listener <host>:<port> (default :8080)") // enable compression compress = flag.Bool("compress", false, "enable compression (default: false)") ) func main() { // parse given flags flag.Parse() // Run the daemon, we are in the psudo main loop run() } // Frun represents the main run loop func run() { // create request handler h := requestHandler if *compress { h = fasthttp.CompressHandler(h) } // check if listener is configured if len(*listenAddr) > 0 { if err := fasthttp.ListenAndServe(*listenAddr, h); err != nil { log.Fatalf("Error: %s", err) } } // wait forever select {} } // FrequestHandler is the main handler passed to fasthttp func requestHandler(ctx *fasthttp.RequestCtx) { // Logger may be cached in local variables. logger := ctx.Logger() logger.Printf("%s", ctx.Request.Header.UserAgent()) switch string(ctx.Path()) { default: defaultHandler(ctx) } } // FgraphqlHandler is GraphQL request handler func graphqlHandler(ctx *fasthttp.RequestCtx) { } // FdefaultHandler is the default request handler func defaultHandler(ctx *fasthttp.RequestCtx) { ctx.Error(BAZINGA, fasthttp.StatusNotFound) }
package samplepackage import ( "testing" "github.com/stretchr/testify/assert" ) func Test_binarySearch(t *testing.T) { tests := []struct { name string array []int64 key int64 expectedValue int }{ { name: "When key present at start of an array", array: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}, key: 1, expectedValue: 0, }, { name: "When key present at end of an array", array: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}, key: 9, expectedValue: 8, }, { name: "When key is present in array", array: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}, key: 7, expectedValue: 6, }, { name: "When key is not present in array", array: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}, key: 10, expectedValue: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { actualValue := binarySearch(tt.array, 0, len(tt.array)-1, tt.key) assert.Equal(t, tt.expectedValue, actualValue) }) } }
package game import ( "log" "net/http" ) type Req interface { Resolve() []byte } type GameAnswer struct { Message string `json:"message"` Data interface{} `json:"data"` } func (this *Game) handleConnections(w http.ResponseWriter, r *http.Request) { ws, err := upgrader.Upgrade(w, r, nil) if err != nil { log.Fatal(err) } defer ws.Close() player := NewPlayer(this.Map.GetChunk(0, 0), ws) this.Players[ws] = player for { var req map[string]interface{} err := ws.ReadJSON(&req) if err != nil { log.Printf("Read error: %v", err) player.UnsubscribeAll() delete(this.Players, ws) break } this.handleIncomingMessage(player, req) } } func (this *Game) Broadcast(obj Object) { chunks := this.Map.GetAllChunksAround(obj) players := make(map[int]*Player) for _, chunk := range chunks { for _, player := range chunk.Subscribers { players[player.Id] = player } } for _, player := range players { this.SendTo(player, obj) } } func (this *Game) SendTo(player *Player, obj interface{}) { err := player.Socket.WriteJSON(obj) if err != nil { log.Printf("Send error: %v", err) } } func (this *Game) handleIncomingMessage(player *Player, req map[string]interface{}) { switch req["message"] { case "ready": chunks := this.Map.GetAllChunksAround(player) this.SendTo(player, player) for _, chunk := range chunks { this.SendTo(player, chunk) } case "player_move": tile := player.Chunk.GetAdjacentTile(player, Orientation(req["data"].(map[string]interface{})["ori"].(float64))) player.X = tile.X player.Y = tile.Y player.Chunk = tile.Chunk this.Broadcast(player) } }
package runtime // A Module represents a group of Values in an Env type Module struct { // The environment in which the values are contained Env Env } // Wrap a given Env in a Module. func ModuleFrom(env Env) *Module { return &Module{env} } // Lookup a value in this module, or error if not found func (module *Module) Lookup(key Value) (Value, error) { if key, ok := key.(Symbol); ok == true { return key.Eval(module.Env) } return nil, BadType(SymbolType, key.Type()) } // Modules are callable and return their members by name func (module *Module) Call(site Env, args Sequence) (Value, error) { var key Value if err := ReadArgs(args, &key); err != nil { return nil, err } return module.Lookup(key) } func (module *Module) Type() Type { return ModuleType } func (module *Module) Eval(env Env) (Value, error) { return module, nil } func (module *Module) String() string { return "#<module>" }
package errors // ServiceError should be used to return business error message type ServiceError struct { Message string `json: "message"` }
package main import ( "crypto/aes" "crypto/cipher" "crypto/rand" "encoding/base64" "encoding/hex" "fmt" "io" "log" "os" "os/exec" "path/filepath" "strings" ) func main() { if len(os.Args) != 2 { fmt.Println("Usage: encrypt path") return } key := os.Getenv("ENCRYPTION_KEY") if key == "" { fmt.Println("Missing ENCRYPTION_KEY environment variable") return } path := os.Args[1] var content string iv := make([]byte, aes.BlockSize) if _, err := io.ReadFull(rand.Reader, iv); err != nil { panic(err) } _, err := os.Stat(path) if err != nil { if !os.IsNotExist(err) { panic(err) } } else { iv, content = decryptFile(key, path) } tmpfile, err := os.CreateTemp("", "scrt") if err != nil { log.Fatal(err) } defer os.Remove(tmpfile.Name()) tmpfile.WriteString(content) tmpfile.Close() editor := os.Getenv("EDITOR") if editor == "" { editor = "nvim" } var cmd *exec.Cmd if strings.Contains(editor, "vi") { cmd = exec.Command(editor, "-c", "set ft=json", tmpfile.Name()) } else { cmd = exec.Command(editor, tmpfile.Name()) } cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout err = cmd.Run() if err != nil { panic(err) } contentB, err := os.ReadFile(tmpfile.Name()) if err != nil { panic(err) } encryptFile(key, string(contentB), path, iv) } func decryptFile(keyS, path string) ([]byte, string) { key, err := hex.DecodeString(keyS) if err != nil { panic(err) } raw, err := os.ReadFile(path) if err != nil { panic(err) } ciphertext, err := base64.StdEncoding.DecodeString(string(raw)) if err != nil { panic(err) } block, err := aes.NewCipher(key) if err != nil { panic(err) } if len(ciphertext) < aes.BlockSize { panic("ciphertext too short") } iv := ciphertext[:aes.BlockSize] ciphertext = ciphertext[aes.BlockSize:] stream := cipher.NewCFBDecrypter(block, iv) stream.XORKeyStream(ciphertext, ciphertext) return iv, string(ciphertext) } func encryptFile(keyS, text, path string, iv []byte) { key, err := hex.DecodeString(keyS) if err != nil { panic(err) } plaintext := []byte(text) block, err := aes.NewCipher(key) if err != nil { panic(err) } ciphertext := make([]byte, aes.BlockSize+len(plaintext)) copy(ciphertext[:aes.BlockSize], iv) stream := cipher.NewCFBEncrypter(block, iv) stream.XORKeyStream(ciphertext[aes.BlockSize:], plaintext) dir := filepath.Dir(path) os.MkdirAll(dir, 0755) b64 := base64.StdEncoding.EncodeToString(ciphertext) err = os.WriteFile(path, []byte(b64), 0600) if err != nil { panic(err) } }
// Copyright 2020 The Moov Authors // Use of this source code is governed by an Apache License // license that can be found in the LICENSE file. package wire import ( "encoding/json" "strings" "unicode/utf8" ) // SenderToReceiver is the remittance information type SenderToReceiver struct { // tag tag string // CoverPayment is CoverPayment CoverPayment CoverPayment `json:"coverPayment,omitempty"` // validator is composed for data validation validator // converters is composed for WIRE to GoLang Converters converters } // NewSenderToReceiver returns a new SenderToReceiver func NewSenderToReceiver() *SenderToReceiver { str := &SenderToReceiver{ tag: TagSenderToReceiver, } return str } // Parse takes the input string and parses the SenderToReceiver values // // Parse provides no guarantee about all fields being filled in. Callers should make a Validate() call to confirm // successful parsing and data validity. func (str *SenderToReceiver) Parse(record string) error { if utf8.RuneCountInString(record) < 6 { return NewTagMinLengthErr(6, len(record)) } str.tag = record[:6] length := 6 value, read, err := str.parseVariableStringField(record[length:], 5) if err != nil { return fieldError("SwiftFieldTag", err) } str.CoverPayment.SwiftFieldTag = value length += read value, read, err = str.parseVariableStringField(record[length:], 35) if err != nil { return fieldError("SwiftLineOne", err) } str.CoverPayment.SwiftLineOne = value length += read value, read, err = str.parseVariableStringField(record[length:], 35) if err != nil { return fieldError("SwiftLineTwo", err) } str.CoverPayment.SwiftLineTwo = value length += read value, read, err = str.parseVariableStringField(record[length:], 35) if err != nil { return fieldError("SwiftLineThree", err) } str.CoverPayment.SwiftLineThree = value length += read value, read, err = str.parseVariableStringField(record[length:], 35) if err != nil { return fieldError("SwiftLineFour", err) } str.CoverPayment.SwiftLineFour = value length += read value, read, err = str.parseVariableStringField(record[length:], 35) if err != nil { return fieldError("SwiftLineFive", err) } str.CoverPayment.SwiftLineFive = value length += read value, read, err = str.parseVariableStringField(record[length:], 35) if err != nil { return fieldError("SwiftLineSix", err) } str.CoverPayment.SwiftLineSix = value length += read if err := str.verifyDataWithReadLength(record, length); err != nil { return NewTagMaxLengthErr(err) } return nil } func (str *SenderToReceiver) UnmarshalJSON(data []byte) error { type Alias SenderToReceiver aux := struct { *Alias }{ (*Alias)(str), } if err := json.Unmarshal(data, &aux); err != nil { return err } str.tag = TagSenderToReceiver return nil } // String returns a fixed-width SenderToReceiver record func (str *SenderToReceiver) String() string { return str.Format(FormatOptions{ VariableLengthFields: false, }) } // Format returns a SenderToReceiver record formatted according to the FormatOptions func (str *SenderToReceiver) Format(options FormatOptions) string { var buf strings.Builder buf.Grow(221) buf.WriteString(str.tag) buf.WriteString(str.FormatSwiftFieldTag(options)) buf.WriteString(str.FormatSwiftLineOne(options)) buf.WriteString(str.FormatSwiftLineTwo(options)) buf.WriteString(str.FormatSwiftLineThree(options)) buf.WriteString(str.FormatSwiftLineFour(options)) buf.WriteString(str.FormatSwiftLineFive(options)) buf.WriteString(str.FormatSwiftLineSix(options)) if options.VariableLengthFields { return str.stripDelimiters(buf.String()) } else { return buf.String() } } // Validate performs WIRE format rule checks on SenderToReceiver and returns an error if not Validated // The first error encountered is returned and stops that parsing. func (str *SenderToReceiver) Validate() error { if str.tag != TagSenderToReceiver { return fieldError("tag", ErrValidTagForType, str.tag) } if err := str.isAlphanumeric(str.CoverPayment.SwiftFieldTag); err != nil { return fieldError("SwiftFieldTag", err, str.CoverPayment.SwiftFieldTag) } if err := str.isAlphanumeric(str.CoverPayment.SwiftLineOne); err != nil { return fieldError("SwiftLineOne", err, str.CoverPayment.SwiftLineOne) } if err := str.isAlphanumeric(str.CoverPayment.SwiftLineTwo); err != nil { return fieldError("SwiftLineTwo", err, str.CoverPayment.SwiftLineTwo) } if err := str.isAlphanumeric(str.CoverPayment.SwiftLineThree); err != nil { return fieldError("SwiftLineThree", err, str.CoverPayment.SwiftLineThree) } if err := str.isAlphanumeric(str.CoverPayment.SwiftLineFour); err != nil { return fieldError("SwiftLineFour", err, str.CoverPayment.SwiftLineFour) } if err := str.isAlphanumeric(str.CoverPayment.SwiftLineFive); err != nil { return fieldError("SwiftLineFive", err, str.CoverPayment.SwiftLineFive) } if err := str.isAlphanumeric(str.CoverPayment.SwiftLineSix); err != nil { return fieldError("SwiftLineSix", err, str.CoverPayment.SwiftLineSix) } return nil } // SwiftFieldTagField gets a string of the SwiftFieldTag field func (str *SenderToReceiver) SwiftFieldTagField() string { return str.alphaField(str.CoverPayment.SwiftFieldTag, 5) } // SwiftLineOneField gets a string of the SwiftLineOne field func (str *SenderToReceiver) SwiftLineOneField() string { return str.alphaField(str.CoverPayment.SwiftLineOne, 35) } // SwiftLineTwoField gets a string of the SwiftLineTwo field func (str *SenderToReceiver) SwiftLineTwoField() string { return str.alphaField(str.CoverPayment.SwiftLineTwo, 35) } // SwiftLineThreeField gets a string of the SwiftLineThree field func (str *SenderToReceiver) SwiftLineThreeField() string { return str.alphaField(str.CoverPayment.SwiftLineThree, 35) } // SwiftLineFourField gets a string of the SwiftLineFour field func (str *SenderToReceiver) SwiftLineFourField() string { return str.alphaField(str.CoverPayment.SwiftLineFour, 35) } // SwiftLineFiveField gets a string of the SwiftLineFive field func (str *SenderToReceiver) SwiftLineFiveField() string { return str.alphaField(str.CoverPayment.SwiftLineFive, 35) } // SwiftLineSixField gets a string of the SwiftLineSix field func (str *SenderToReceiver) SwiftLineSixField() string { return str.alphaField(str.CoverPayment.SwiftLineSix, 35) } // FormatSwiftFieldTag returns CoverPayment.SwiftFieldTag formatted according to the FormatOptions func (str *SenderToReceiver) FormatSwiftFieldTag(options FormatOptions) string { return str.formatAlphaField(str.CoverPayment.SwiftFieldTag, 5, options) } // FormatSwiftLineOne returns CoverPayment.SwiftLineOne formatted according to the FormatOptions func (str *SenderToReceiver) FormatSwiftLineOne(options FormatOptions) string { return str.formatAlphaField(str.CoverPayment.SwiftLineOne, 35, options) } // FormatSwiftLineTwo returns CoverPayment.SwiftLineTwo formatted according to the FormatOptions func (str *SenderToReceiver) FormatSwiftLineTwo(options FormatOptions) string { return str.formatAlphaField(str.CoverPayment.SwiftLineTwo, 35, options) } // FormatSwiftLineThree returns CoverPayment.SwiftLineThree formatted according to the FormatOptions func (str *SenderToReceiver) FormatSwiftLineThree(options FormatOptions) string { return str.formatAlphaField(str.CoverPayment.SwiftLineThree, 35, options) } // FormatSwiftLineFour returns CoverPayment.SwiftLineFour formatted according to the FormatOptions func (str *SenderToReceiver) FormatSwiftLineFour(options FormatOptions) string { return str.formatAlphaField(str.CoverPayment.SwiftLineFour, 35, options) } // FormatSwiftLineFive returns CoverPayment.SwiftLineFive formatted according to the FormatOptions func (str *SenderToReceiver) FormatSwiftLineFive(options FormatOptions) string { return str.formatAlphaField(str.CoverPayment.SwiftLineFive, 35, options) } // FormatSwiftLineSix returns CoverPayment.SwiftLineSix formatted according to the FormatOptions func (str *SenderToReceiver) FormatSwiftLineSix(options FormatOptions) string { return str.formatAlphaField(str.CoverPayment.SwiftLineSix, 35, options) }
package main // I've named my bluetooth device 'Muse', so to get the muse device // to stream to port 5000, I use: // muse-io.exe --device Muse --osc osc.udp://localhost:5000 import ( "fmt" "github.com/hypebeast/go-osc/osc" "golang.org/x/net/websocket" "net/http" // "runtime" "time" ) const ( // From testing with MuseLab, eeg[0] only drops under about 700 when I blink. // NOTE: can't use /muse/elements/blink, it's too laggy. BLINK_EEG_THRESHOLD = 700 // Avoid double-counting blinks by forcing a time gap between them. BLINK_TIME_THRESHOLD_MS = 150.0 ) // A server that forwards OSC events to clients over websockets. type Server struct { activeClients []*websocket.Conn activeMessages []chan string } // Utility to serve the static files used for the app. func serveStaticFiles(fromDirectory string, toHttpPrefix string) { asPath := fmt.Sprintf("/%s/", toHttpPrefix) fmt.Printf("Serving %s as %s\n", fromDirectory, toHttpPrefix) fs := http.FileServer(http.Dir(fromDirectory)) http.Handle(asPath, disableCache(http.StripPrefix(asPath, fs))) } func disableCache(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Set("Pragma", "no-cache") w.Header().Set("Expires", "0") h.ServeHTTP(w, r) }) } // Save the connection, replacing any previous one. func (s *Server) socketHandler(ws *websocket.Conn) { fmt.Printf("Client connected!\n") // PICK: Properly clean up on client disconnect instead? if len(s.activeClients) == 1 { s.activeClients[0].Close() close(s.activeMessages[0]) s.activeClients[0] = ws s.activeMessages[0] = make(chan string) } else { s.activeClients = append(s.activeClients, ws) s.activeMessages = append(s.activeMessages, make(chan string)) } // Continually wait for messages to forward to client. for msg := range s.activeMessages[0] { ws.Write([]byte(msg)) } } // Launches a server to serve the static files and respond to websocket connections. func (s *Server) openWebServer(port int) { fmt.Printf("Opening WebServer on :%d...\n", port) serveStaticFiles("./static", "file") http.Handle("/sock", websocket.Handler(s.socketHandler)) // NOTE: blocking. err := http.ListenAndServe(fmt.Sprintf(":%d", port), nil) if err != nil { panic("ListenAndServe: " + err.Error()) } } // Launches a server to stream OSC data, find events, and forward to clients. func (s *Server) openOscServer(port int) { fmt.Printf("Opening OSCserver on :%d...\n", port) server := &osc.Server{ Addr: fmt.Sprintf("127.0.0.1:%d", port), } lastBlinkTime := time.Now() server.Handle("/muse/eeg", func(msg *osc.Message) { maybeBlink := (msg.Arguments[0].(float32) < BLINK_EEG_THRESHOLD) if maybeBlink { blinkTime := time.Now() msSinceLast := blinkTime.Sub(lastBlinkTime).Seconds() * 1000 if msSinceLast > BLINK_TIME_THRESHOLD_MS { lastBlinkTime = blinkTime s.HandleBlink() } } }) server.ListenAndServe() } // Process a single blink identified by the server. func (s *Server) HandleBlink() { fmt.Printf("Blinked!\n") msg := fmt.Sprintf("%d", 0) // Blink = '0' for _, client := range s.activeMessages { client <- msg } } func main() { server := &Server { make([]*websocket.Conn, 0, 1), make([]chan string, 0, 1), } go server.openOscServer(5000) server.openWebServer(8888) }
package translated import "github.com/stephens2424/php/passes/togo/internal/phpctx" func If(ctx phpctx.PHPContext) { if "hello" == "world" { } }
package v1 import ( kommonsv1 "github.com/flanksource/kommons/api/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +kubebuilder:object:root=true type ElasticsearchDB struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec ElasticsearchDBSpec `json:"spec,omitempty"` Status ElasticsearchDBStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true type ElasticsearchDBList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []ElasticsearchDB `json:"items"` } type ElasticsearchDBSpec struct { PodResources `json:",inline"` Domain string `json:"domain,omitempty"` Version string `json:"version,omitempty"` // +kubebuilder:validation:Optional Storage Storage `json:"storage,omitempty"` // +kubebuilder:validation:Optional Heap string `json:"heap,omitempty"` // +kubebuilder:validation:Optional Ingress Ingress `json:"ingress,omitempty"` } type ElasticsearchDBStatus struct { Conditions kommonsv1.ConditionList `json:"conditions"` } type Ingress struct { Annotations map[string]string `json:"annotations,omitempty"` } func init() { SchemeBuilder.Register(&ElasticsearchDB{}, &ElasticsearchDBList{}) }
package option var defaultAccountLevel = []int{1, 10, 100, 500} // AccountLevel is used for rank the accounts type AccountLevel struct { // account balance Threshold []int `yaml:"Threshold"` }
package tiered_cache import ( "fmt" "time" "github.com/minotar/imgd/pkg/cache" ) const MIN_RECACHE_TTL = time.Duration(1) * time.Minute type TieredCache struct { *TieredCacheConfig } type TieredCacheConfig struct { cache.CacheConfig Caches []cache.Cache } var _ cache.Cache = new(TieredCache) func NewTieredCache(cfg *TieredCacheConfig) (*TieredCache, error) { cfg.Logger.Infof("initializing TieredCache with %d cache(s)", len(cfg.Caches)) tc := &TieredCache{TieredCacheConfig: cfg} cfg.Logger.Infof("initialized TieredCache \"%s\"", tc.Name()) return tc, nil } func (tc *TieredCache) Name() string { return tc.CacheConfig.Name } func (tc *TieredCache) Insert(key string, value []byte) error { // InsertTTL with a TTL of 0 is added with no expiry return tc.InsertTTL(key, value, 0) } func (tc *TieredCache) cacheInsert(endCacheID int, key string, value []byte, ttl time.Duration) []error { var errors []error // endCacheID is how many of the caches to insert into for i, c := range tc.Caches[:endCacheID] { tc.Logger.Debugf("Inserting key \"%s\" into cache %d (%s)", key, i, c.Name()) err := c.InsertTTL(key, value, ttl) if err != nil { tc.Logger.Errorf("Error inserting key \"%s\" into cache %d (%s): %s", key, i, c.Name(), err) errors = append(errors, err) } } return errors } func (tc *TieredCache) InsertTTL(key string, value []byte, ttl time.Duration) error { // We want to insert into all caches, so the end ID is the number of caches endCacheID := len(tc.Caches) errors := tc.cacheInsert(endCacheID, key, value, ttl) if errors != nil { return fmt.Errorf("error(s) inserting \"%s\" into cache(s): %+v", key, errors) } return nil } func (tc *TieredCache) updateCaches(cacheID int, key string, value []byte) { validCache := tc.Caches[cacheID] ttl, err := validCache.TTL(key) if err == cache.ErrNotFound { // Likely that the key expired within the split second since? tc.Logger.Infof("Cache %d (%s) reports key \"%s\" is now a cache.ErrNotFound", cacheID, validCache.Name(), key) return } else if err == cache.ErrNoExpiry { // It's a key which doesn't have an expiry set - possibly badly added to the Cache? tc.Logger.Warnf("Cache %d (%s) reports key \"%s\" had no TTL/Expiry - not re-adding", cacheID, validCache.Name(), key) return } else if err != nil { // This is a cache related error (vs. a missing key/expiry) tc.Logger.Errorf("Cache %d (%s) reports key \"%s\" with TTL err: %s\n", cacheID, validCache.Name(), key, err) return } if ttl < MIN_RECACHE_TTL { tc.Logger.Debugf("TTL of key \"%s\" was less than a minute - not re-adding", key) return } // cacheID was the cache the data came from, so we insert in the caches before that tc.cacheInsert(cacheID, key, value, ttl) } func (tc *TieredCache) Retrieve(key string) ([]byte, error) { var errors []error for i, c := range tc.Caches { tc.Logger.Debugf("Retrieving \"%s\" from cache %d \"%s\"", key, i, c.Name()) value, err := c.Retrieve(key) if err == cache.ErrNotFound { // errors logic at end handles ErrNotFound continue } else if err != nil { // This is a cache related error (vs. a missing key) tc.Logger.Errorf("Error retrieving key \"%s\" from cache %d (%s): %s", key, i, c.Name(), err) errors = append(errors, err) continue } // We had a hit - we should update the earlier caches go tc.updateCaches(i, key, value) return value, nil } // If we had a genuine error, `errors` would be populated, otherwise, it must just be ErrNotFound if errors != nil { return nil, fmt.Errorf("error(s) retrieving \"%s\" from cache(s): %+v", key, errors) } return nil, cache.ErrNotFound } // Probably won't be used too much func (tc *TieredCache) TTL(key string) (time.Duration, error) { var errors []error for i, c := range tc.Caches { tc.Logger.Debugf("Getting TTL of key \"%s\" from cache %d (%s)", key, i, c.Name()) ttl, err := c.TTL(key) if err == cache.ErrNoExpiry { // Record has no expiry - we trust the first cache with this response // It's important the cache signified between NoExpiry vs. NotFound! return ttl, err } else if err != nil { // Todo: Probably just print here? errors = append(errors, err) continue } return ttl, err } return 0, errors[len(errors)-1] } func (tc *TieredCache) Remove(key string) error { var errors []error for i, c := range tc.Caches { tc.Logger.Debugf("Removing key \"%s\" from cache %d (%s)", key, i, c.Name()) err := c.Remove(key) if err != nil { tc.Logger.Errorf("Error removing key \"%s\" from cache %d (%s): %s", key, i, c.Name(), err) errors = append(errors, err) } } if errors != nil { return fmt.Errorf("error(s) removing \"%s\" from cache(s): %+v", key, errors) } return nil } func (tc *TieredCache) Flush() error { var errors []error for i, c := range tc.Caches { tc.Logger.Debugf("Flushing cache %d (%s)", i, c.Name()) err := c.Flush() if err != nil { tc.Logger.Errorf("Error flushing cache %d (%s): %s", i, c.Name(), err) errors = append(errors, err) } } if errors != nil { return fmt.Errorf("error(s) flushing cache(s): %+v", errors) } return nil } func (tc *TieredCache) Len() uint { var maxLen uint for i, c := range tc.Caches { tc.Logger.Debugf("Getting length of cache %d (%s)", i, c.Name()) cacheLen := c.Len() tc.Logger.Debugf("Length of cache %d (%s) is %d", i, c.Name(), cacheLen) if cacheLen > maxLen { maxLen = cacheLen } } return maxLen } func (tc *TieredCache) Size() uint64 { var maxSize uint64 for i, c := range tc.Caches { tc.Logger.Debugf("Getting size of cache %d (%s)", i, c.Name()) cacheSize := c.Size() tc.Logger.Debugf("Size of cache %d (%s) is %d", i, c.Name(), cacheSize) if cacheSize > maxSize { maxSize = cacheSize } } return maxSize } func (tc *TieredCache) Start() { tc.Logger.Info("starting TieredCache") for i, c := range tc.Caches { tc.Logger.Infof("starting cache %d \"%s\"", i, c.Name()) c.Start() } } func (tc *TieredCache) Stop() { tc.Logger.Info("stopping TieredCache") for i, c := range tc.Caches { tc.Logger.Infof("stopping cache %d \"%s\"", i, c.Name()) c.Stop() } } func (tc *TieredCache) Close() { tc.Logger.Debug("closing TieredCache") tc.Stop() for i, c := range tc.Caches { tc.Logger.Debugf("closing cache %d \"%s\"", i, c.Name()) c.Close() } }
package config import ( "encoding/json" "io/ioutil" "log" ) var Config struct { VerifyURL string FromEmail string EmailSendingPasswd string AdminUser string Token string } func SetConfig(file string) { conf, err := ioutil.ReadFile(file) if err != nil { log.Fatalln(err.Error()) } err = json.Unmarshal(conf, &Config) if err != nil { log.Fatalln(err.Error()) } }
package transform import ( "encoding/json" "fmt" snapshot "github.com/pganalyze/collector/output/pganalyze_collector" "github.com/pganalyze/collector/state" uuid "github.com/satori/go.uuid" "google.golang.org/protobuf/types/known/timestamppb" ) func LogStateToLogSnapshot(server *state.Server, logState state.TransientLogState) (snapshot.CompactLogSnapshot, snapshot.CompactSnapshot_BaseRefs) { var s snapshot.CompactLogSnapshot var r snapshot.CompactSnapshot_BaseRefs s, r = transformPostgresQuerySamples(server, s, r, logState) s, r = transformSystemLogs(server, s, r, logState) return s, r } func transformPostgresQuerySamples(server *state.Server, s snapshot.CompactLogSnapshot, r snapshot.CompactSnapshot_BaseRefs, logState state.TransientLogState) (snapshot.CompactLogSnapshot, snapshot.CompactSnapshot_BaseRefs) { for _, sampleIn := range logState.QuerySamples { occurredAt := timestamppb.New(sampleIn.OccurredAt) if sampleIn.Query == "" { continue } if sampleIn.Username == "" { sampleIn.Username = server.Config.GetDbUsername() } if sampleIn.Database == "" { sampleIn.Database = server.Config.GetDbName() } var roleIdx, databaseIdx, queryIdx int32 roleIdx, r.RoleReferences = upsertRoleReference(r.RoleReferences, sampleIn.Username) databaseIdx, r.DatabaseReferences = upsertDatabaseReference(r.DatabaseReferences, sampleIn.Database) queryIdx, r.QueryReferences, r.QueryInformations = upsertQueryReferenceAndInformationSimple( server, r.QueryReferences, r.QueryInformations, roleIdx, databaseIdx, sampleIn.Query, -1, ) var parameters []*snapshot.NullString var parametersLegacy []string for _, param := range sampleIn.Parameters { if param.Valid { parameters = append(parameters, &snapshot.NullString{Valid: true, Value: param.String}) parametersLegacy = append(parametersLegacy, param.String) } else { parameters = append(parameters, &snapshot.NullString{Valid: false, Value: ""}) parametersLegacy = append(parametersLegacy, "") } } var explainOutput string if sampleIn.ExplainFormat == snapshot.QuerySample_JSON_EXPLAIN_FORMAT && sampleIn.ExplainOutputJSON != nil { explainJSON, err := json.Marshal(sampleIn.ExplainOutputJSON) if err != nil { sampleIn.ExplainError = fmt.Sprintf("failed to marshal EXPLAIN JSON during collector output phase: %s", err) } else { // Reformat JSON so its the same as when using EXPLAIN (FORMAT JSON) explainOutput = "[" + string(explainJSON) + "]" } } else if sampleIn.ExplainFormat == snapshot.QuerySample_TEXT_EXPLAIN_FORMAT && sampleIn.ExplainOutputText != "" { explainOutput = sampleIn.ExplainOutputText } sample := snapshot.QuerySample{ QueryIdx: queryIdx, OccurredAt: occurredAt, RuntimeMs: sampleIn.RuntimeMs, LogLineUuid: sampleIn.LogLineUUID.String(), QueryText: sampleIn.Query, Parameters: parameters, ParametersLegacy: parametersLegacy, HasExplain: sampleIn.HasExplain, ExplainSource: sampleIn.ExplainSource, ExplainFormat: sampleIn.ExplainFormat, ExplainOutput: explainOutput, ExplainError: sampleIn.ExplainError, } s.QuerySamples = append(s.QuerySamples, &sample) } return s, r } func transformSystemLogs(server *state.Server, s snapshot.CompactLogSnapshot, r snapshot.CompactSnapshot_BaseRefs, logState state.TransientLogState) (snapshot.CompactLogSnapshot, snapshot.CompactSnapshot_BaseRefs) { for _, logFileIn := range logState.LogFiles { fileIdx := int32(len(s.LogFileReferences)) logFileReference := &snapshot.LogFileReference{ Uuid: logFileIn.UUID.String(), S3Location: logFileIn.S3Location, S3CekAlgo: logFileIn.S3CekAlgo, S3CmkKeyId: logFileIn.S3CmkKeyID, ByteSize: logFileIn.ByteSize, OriginalName: logFileIn.OriginalName, } for _, kind := range logFileIn.FilterLogSecret { switch kind { case state.CredentialLogSecret: logFileReference.FilterLogSecret = append(logFileReference.FilterLogSecret, snapshot.LogFileReference_CREDENTIAL_LOG_SECRET) case state.ParsingErrorLogSecret: logFileReference.FilterLogSecret = append(logFileReference.FilterLogSecret, snapshot.LogFileReference_PARSING_ERROR_LOG_SECRET) case state.StatementTextLogSecret: logFileReference.FilterLogSecret = append(logFileReference.FilterLogSecret, snapshot.LogFileReference_STATEMENT_TEXT_LOG_SECRET) case state.StatementParameterLogSecret: logFileReference.FilterLogSecret = append(logFileReference.FilterLogSecret, snapshot.LogFileReference_STATEMENT_PARAMETER_LOG_SECRET) case state.TableDataLogSecret: logFileReference.FilterLogSecret = append(logFileReference.FilterLogSecret, snapshot.LogFileReference_TABLE_DATA_LOG_SECRET) case state.OpsLogSecret: logFileReference.FilterLogSecret = append(logFileReference.FilterLogSecret, snapshot.LogFileReference_OPS_LOG_SECRET) case state.UnidentifiedLogSecret: logFileReference.FilterLogSecret = append(logFileReference.FilterLogSecret, snapshot.LogFileReference_UNIDENTIFIED_LOG_SECRET) } } s.LogFileReferences = append(s.LogFileReferences, logFileReference) for _, logLineIn := range logFileIn.LogLines { logLine := transformSystemLogLine(server, &r, fileIdx, logLineIn) s.LogLineInformations = append(s.LogLineInformations, &logLine) } } return s, r } func transformSystemLogLine(server *state.Server, r *snapshot.CompactSnapshot_BaseRefs, logFileIdx int32, logLineIn state.LogLine) snapshot.LogLineInformation { occurredAt := timestamppb.New(logLineIn.OccurredAt) logLine := snapshot.LogLineInformation{ LogFileIdx: logFileIdx, Uuid: logLineIn.UUID.String(), ByteStart: logLineIn.ByteStart, ByteContentStart: logLineIn.ByteContentStart, ByteEnd: logLineIn.ByteEnd, OccurredAt: occurredAt, BackendPid: logLineIn.BackendPid, Level: logLineIn.LogLevel, Classification: logLineIn.Classification, RelatedPids: logLineIn.RelatedPids, } if logLineIn.ParentUUID != uuid.Nil { logLine.ParentUuid = logLineIn.ParentUUID.String() } if logLineIn.Details != nil { detailsJson, err := json.Marshal(logLineIn.Details) if err == nil { logLine.DetailsJson = string(detailsJson) } } if logLineIn.Username != "" { logLine.RoleIdx, r.RoleReferences = upsertRoleReference(r.RoleReferences, logLineIn.Username) logLine.HasRoleIdx = true } if logLineIn.Database != "" { logLine.DatabaseIdx, r.DatabaseReferences = upsertDatabaseReference(r.DatabaseReferences, logLineIn.Database) logLine.HasDatabaseIdx = true if logLineIn.SchemaName != "" && logLineIn.RelationName != "" { logLine.RelationIdx, r.RelationReferences = upsertRelationReference(r.RelationReferences, logLine.DatabaseIdx, logLineIn.SchemaName, logLineIn.RelationName) logLine.HasRelationIdx = true } } if logLine.HasRoleIdx && logLine.HasDatabaseIdx && logLineIn.Query != "" { logLine.QueryIdx, r.QueryReferences, r.QueryInformations = upsertQueryReferenceAndInformationSimple( server, r.QueryReferences, r.QueryInformations, logLine.RoleIdx, logLine.DatabaseIdx, logLineIn.Query, -1, ) logLine.HasQueryIdx = true } return logLine }
package odoo import ( "fmt" ) // AccountPayment represents account.payment model. type AccountPayment struct { LastUpdate *Time `xmlrpc:"__last_update,omptempty"` Amount *Float `xmlrpc:"amount,omptempty"` Communication *String `xmlrpc:"communication,omptempty"` CompanyId *Many2One `xmlrpc:"company_id,omptempty"` CreateDate *Time `xmlrpc:"create_date,omptempty"` CreateUid *Many2One `xmlrpc:"create_uid,omptempty"` CurrencyId *Many2One `xmlrpc:"currency_id,omptempty"` DestinationAccountId *Many2One `xmlrpc:"destination_account_id,omptempty"` DestinationJournalId *Many2One `xmlrpc:"destination_journal_id,omptempty"` DisplayName *String `xmlrpc:"display_name,omptempty"` HasInvoices *Bool `xmlrpc:"has_invoices,omptempty"` HidePaymentMethod *Bool `xmlrpc:"hide_payment_method,omptempty"` Id *Int `xmlrpc:"id,omptempty"` InvoiceIds *Relation `xmlrpc:"invoice_ids,omptempty"` JournalId *Many2One `xmlrpc:"journal_id,omptempty"` MessageChannelIds *Relation `xmlrpc:"message_channel_ids,omptempty"` MessageFollowerIds *Relation `xmlrpc:"message_follower_ids,omptempty"` MessageIds *Relation `xmlrpc:"message_ids,omptempty"` MessageIsFollower *Bool `xmlrpc:"message_is_follower,omptempty"` MessageLastPost *Time `xmlrpc:"message_last_post,omptempty"` MessageNeedaction *Bool `xmlrpc:"message_needaction,omptempty"` MessageNeedactionCounter *Int `xmlrpc:"message_needaction_counter,omptempty"` MessagePartnerIds *Relation `xmlrpc:"message_partner_ids,omptempty"` MessageUnread *Bool `xmlrpc:"message_unread,omptempty"` MessageUnreadCounter *Int `xmlrpc:"message_unread_counter,omptempty"` MoveLineIds *Relation `xmlrpc:"move_line_ids,omptempty"` MoveName *String `xmlrpc:"move_name,omptempty"` MoveReconciled *Bool `xmlrpc:"move_reconciled,omptempty"` Name *String `xmlrpc:"name,omptempty"` PartnerId *Many2One `xmlrpc:"partner_id,omptempty"` PartnerType *Selection `xmlrpc:"partner_type,omptempty"` PaymentDate *Time `xmlrpc:"payment_date,omptempty"` PaymentDifference *Float `xmlrpc:"payment_difference,omptempty"` PaymentDifferenceHandling *Selection `xmlrpc:"payment_difference_handling,omptempty"` PaymentMethodCode *String `xmlrpc:"payment_method_code,omptempty"` PaymentMethodId *Many2One `xmlrpc:"payment_method_id,omptempty"` PaymentReference *String `xmlrpc:"payment_reference,omptempty"` PaymentTokenId *Many2One `xmlrpc:"payment_token_id,omptempty"` PaymentTransactionId *Many2One `xmlrpc:"payment_transaction_id,omptempty"` PaymentType *Selection `xmlrpc:"payment_type,omptempty"` State *Selection `xmlrpc:"state,omptempty"` WebsiteMessageIds *Relation `xmlrpc:"website_message_ids,omptempty"` WriteDate *Time `xmlrpc:"write_date,omptempty"` WriteUid *Many2One `xmlrpc:"write_uid,omptempty"` WriteoffAccountId *Many2One `xmlrpc:"writeoff_account_id,omptempty"` WriteoffLabel *String `xmlrpc:"writeoff_label,omptempty"` } // AccountPayments represents array of account.payment model. type AccountPayments []AccountPayment // AccountPaymentModel is the odoo model name. const AccountPaymentModel = "account.payment" // Many2One convert AccountPayment to *Many2One. func (ap *AccountPayment) Many2One() *Many2One { return NewMany2One(ap.Id.Get(), "") } // CreateAccountPayment creates a new account.payment model and returns its id. func (c *Client) CreateAccountPayment(ap *AccountPayment) (int64, error) { ids, err := c.CreateAccountPayments([]*AccountPayment{ap}) if err != nil { return -1, err } if len(ids) == 0 { return -1, nil } return ids[0], nil } // CreateAccountPayment creates a new account.payment model and returns its id. func (c *Client) CreateAccountPayments(aps []*AccountPayment) ([]int64, error) { var vv []interface{} for _, v := range aps { vv = append(vv, v) } return c.Create(AccountPaymentModel, vv) } // UpdateAccountPayment updates an existing account.payment record. func (c *Client) UpdateAccountPayment(ap *AccountPayment) error { return c.UpdateAccountPayments([]int64{ap.Id.Get()}, ap) } // UpdateAccountPayments updates existing account.payment records. // All records (represented by ids) will be updated by ap values. func (c *Client) UpdateAccountPayments(ids []int64, ap *AccountPayment) error { return c.Update(AccountPaymentModel, ids, ap) } // DeleteAccountPayment deletes an existing account.payment record. func (c *Client) DeleteAccountPayment(id int64) error { return c.DeleteAccountPayments([]int64{id}) } // DeleteAccountPayments deletes existing account.payment records. func (c *Client) DeleteAccountPayments(ids []int64) error { return c.Delete(AccountPaymentModel, ids) } // GetAccountPayment gets account.payment existing record. func (c *Client) GetAccountPayment(id int64) (*AccountPayment, error) { aps, err := c.GetAccountPayments([]int64{id}) if err != nil { return nil, err } if aps != nil && len(*aps) > 0 { return &((*aps)[0]), nil } return nil, fmt.Errorf("id %v of account.payment not found", id) } // GetAccountPayments gets account.payment existing records. func (c *Client) GetAccountPayments(ids []int64) (*AccountPayments, error) { aps := &AccountPayments{} if err := c.Read(AccountPaymentModel, ids, nil, aps); err != nil { return nil, err } return aps, nil } // FindAccountPayment finds account.payment record by querying it with criteria. func (c *Client) FindAccountPayment(criteria *Criteria) (*AccountPayment, error) { aps := &AccountPayments{} if err := c.SearchRead(AccountPaymentModel, criteria, NewOptions().Limit(1), aps); err != nil { return nil, err } if aps != nil && len(*aps) > 0 { return &((*aps)[0]), nil } return nil, fmt.Errorf("account.payment was not found with criteria %v", criteria) } // FindAccountPayments finds account.payment records by querying it // and filtering it with criteria and options. func (c *Client) FindAccountPayments(criteria *Criteria, options *Options) (*AccountPayments, error) { aps := &AccountPayments{} if err := c.SearchRead(AccountPaymentModel, criteria, options, aps); err != nil { return nil, err } return aps, nil } // FindAccountPaymentIds finds records ids by querying it // and filtering it with criteria and options. func (c *Client) FindAccountPaymentIds(criteria *Criteria, options *Options) ([]int64, error) { ids, err := c.Search(AccountPaymentModel, criteria, options) if err != nil { return []int64{}, err } return ids, nil } // FindAccountPaymentId finds record id by querying it with criteria. func (c *Client) FindAccountPaymentId(criteria *Criteria, options *Options) (int64, error) { ids, err := c.Search(AccountPaymentModel, criteria, options) if err != nil { return -1, err } if len(ids) > 0 { return ids[0], nil } return -1, fmt.Errorf("account.payment was not found with criteria %v and options %v", criteria, options) }
package main import ( "flag" "fmt" "net" "time" "github.com/khunafin/magazine/internal/producer" ) func main() { fmt.Println("Start client...") serverAddr := flag.String("server", ":7000", "server addr") count := flag.Int("count", 5, "message count") flag.Parse() conn, err := net.DialTimeout("tcp", *serverAddr, time.Second) if err != nil { panic(err) } defer conn.Close() p := producer.New(conn, *count) p.Produce() fmt.Println("Client stopped") }
package controller import ( "net/http" "strings" ) type KnownNodesRepository interface { Get() []string } type KnownNodesController struct { repository KnownNodesRepository } func NewKnownNodesController(repository KnownNodesRepository) *KnownNodesController { return &KnownNodesController{ repository: repository, } } func (k *KnownNodesController) Handle(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(strings.Join(k.repository.Get(), ","))) }
package myloader import "github.com/mholt/caddy" func init(){ caddy.RegisterCaddyfileLoader("myloader", caddy.LoaderFunc(myLoader)) } func myLoader(serverType string)(caddy.Input, error){ return nil, nil }
package main import "fmt" func rangeBitwiseAnd(m int, n int) int { um := uint(m) un := uint(n) udiff := un - um now := uint(1) for now < udiff { now = now << 1 } return int(um & un & (^(now - 1))) } func main() { m := 0 n := 1 fmt.Println(rangeBitwiseAnd(m, n)) }
package storage import ( "errors" "fmt" "io" "path/filepath" "github.com/768bit/promethium/lib/common" "github.com/768bit/promethium/lib/images" "github.com/768bit/vutils" "github.com/gobuffalo/envy" gozfs "github.com/mistifyio/go-zfs" ) var ZFS_ROOT_PATH = envy.Get("PROMETHIUM_ZFS_ROOT_PATH", "nvmepool0/promethium") type ZfsStorage struct { sm *StorageManager id string rootZfsPath string mountPoint string ds *gozfs.Dataset disksDs *gozfs.Dataset imagesCache map[string]string imagesFolder string imagesEnabled bool disksEnabled bool } func LoadZfsStorage(sm *StorageManager, id string, zfsPath string) (*ZfsStorage, error) { //check the dataset exists ds, err := gozfs.GetDataset(zfsPath) if err != nil { return nil, err } //it exists... imagesFolder := filepath.Join(ds.Mountpoint, "images") zfsStorage := &ZfsStorage{ sm: sm, id: id, mountPoint: ds.Mountpoint, imagesCache: map[string]string{}, rootZfsPath: zfsPath, ds: ds, imagesFolder: imagesFolder, } vmsDs, err := gozfs.GetDataset(zfsPath) if err != nil { zfsStorage.disksEnabled = false } else { zfsStorage.disksEnabled = true zfsStorage.disksDs = vmsDs } if !vutils.Files.CheckPathExists(imagesFolder) { zfsStorage.imagesEnabled = false } else { zfsStorage.imagesEnabled = true } return zfsStorage, nil } func (zfs *ZfsStorage) isMounted() bool { //checks if the zfs dataset is mounted if zfs.ds.Mountpoint == "" { //it isnt mounted.. return false } else if !vutils.Files.CheckPathExists(zfs.ds.Mountpoint) { return false } else { return true } } func (zfs *ZfsStorage) GetURI() string { return "zfs://" + zfs.id } func (zfs *ZfsStorage) LookupPath(path string) (string, bool, error) { return "zfs://" + zfs.id, false, nil } func (zfs *ZfsStorage) GetImages() ([]common.Image, error) { //in zfs images are stored under a filesystem path.. //vm disks are block devices (volumes) zfs.imagesCache = map[string]string{} if !zfs.isMounted() { return nil, errors.New("The ZFS Dataset is not mounted") } else if !zfs.imagesEnabled { //lets get all the images from the folder... if it exists return nil, errors.New("Unable to get images from this storage medium as it doesnt support images") } else { files := vutils.Files.GetFilesInDirWithExtension(zfs.imagesFolder, "prk") imagesList := []common.Image{} for _, file := range files { imageFilePath := filepath.Join(zfs.imagesFolder, file) img, err := images.LoadImageFromPrk(imageFilePath, zfs.sm.imagesCache) if err != nil { return nil, err } zfs.imagesCache[img.ID] = file[:len(file)-4] imagesList = append(imagesList, img) } return imagesList, nil } } func (zfs *ZfsStorage) GetImage(name string) (common.Image, error) { //in zfs images are stored under a filesystem path.. //vm disks are block devices (volumes) if !zfs.isMounted() { return nil, errors.New("The ZFS Dataset is not mounted") } else if !zfs.imagesEnabled { //lets get all the images from the folder... if it exists return nil, errors.New("Unable to get images from this storage medium as it doesnt support images") } else { imageFilePath := filepath.Join(zfs.imagesFolder, name+".prk") if !vutils.Files.CheckPathExists(imageFilePath) { return nil, errors.New("Unable to find image with name " + name) } else { img, err := images.LoadImageFromPrk(imageFilePath, zfs.sm.imagesCache) if err != nil { return nil, err } return img, nil } } } func (zfs *ZfsStorage) GetImageById(id string) (common.Image, error) { //in zfs images are stored under a filesystem path.. //vm disks are block devices (volumes) if !zfs.isMounted() { return nil, errors.New("The ZFS Dataset is not mounted") } else if !zfs.imagesEnabled { //lets get all the images from the folder... if it exists return nil, errors.New("Unable to get images from this storage medium as it doesnt support images") } else if v, ok := zfs.imagesCache[id]; !ok || v == "" { //not in cache - maybe we need to go get the list of items and rebuild cache.. _, err := zfs.GetImages() if err != nil { return nil, err } if v, ok := zfs.imagesCache[id]; !ok || v == "" { return nil, errors.New("Unable to find image with id " + id) } else { return zfs.GetImage(zfs.imagesCache[id]) } } else { imageFilePath := filepath.Join(zfs.imagesFolder, zfs.imagesCache[id]+".prk") if !vutils.Files.CheckPathExists(imageFilePath) { return nil, errors.New("Unable to find image with id " + id) } else { img, err := images.LoadImageFromPrk(imageFilePath, zfs.sm.imagesCache) if err != nil { return nil, err } return img, nil } } } func (zfs *ZfsStorage) CreateDiskFromImage(id string, img common.Image, size uint64) (*common.VmmStorageDisk, *common.VmmKernel, error) { return nil, nil, nil } func (zfs *ZfsStorage) WriteKernel(id string, source io.Reader) (string, error) { return "", nil } func (zfs *ZfsStorage) WriteRootDisk(id string, source io.Reader, sourceIsRaw bool, growPart bool) (string, error) { return "", nil } func (zfs *ZfsStorage) WriteAdditionalDisk(id string, index int, source io.Reader, sourceIsRaw bool, growPart bool) (string, error) { return "", nil } func (zfs *ZfsStorage) WriteCloudInit(id string, source io.Reader) (string, error) { return "", nil } func (zfs *ZfsStorage) ImportImageFromRdr(stream io.ReadCloser) error { return nil } func SetZfsRootPath(newRootPath string) { ZFS_ROOT_PATH = newRootPath } func NewZfsStorageDrive(id string, sizeMb int) (*Zfs, error) { //make an instance of zfs and lets see if it exists or not... datasetPath := fmt.Sprintf("%s/%s", ZFS_ROOT_PATH, id) size := uint64(sizeMb) * 1024 * 1024 zfs := &Zfs{ id: id, datasetPath: datasetPath, size: size, } if err := zfs.createOrLoad(); err != nil { return nil, err } return zfs, nil } type Zfs struct { id string datasetPath string size uint64 ds *gozfs.Dataset } func (zfs *Zfs) createOrLoad() error { //if it exists load it up - check sizes - auto expand? if !zfs.exists() { //create the dataset... _, err := gozfs.CreateVolume(zfs.getDataSetPath(), zfs.size, map[string]string{}) if err != nil { return err } } return zfs.load() } func (zfs *Zfs) getMbSize() int { val := zfs.size / 1024 / 1024 return int(val) } func (zfs *Zfs) exists() bool { ds, err := gozfs.GetDataset(zfs.getDataSetPath()) if err != nil || ds == nil { return false } return true } func (zfs *Zfs) getDataSetPath() string { return zfs.datasetPath } func (zfs *Zfs) load() error { ds, err := gozfs.GetDataset(zfs.getDataSetPath()) if err != nil { return err } //process ds zfs.ds = ds return zfs.checkSize() } func (zfs *Zfs) checkSize() error { //check the size is correct... currMbSize := int(zfs.ds.Volsize / 1024 / 1024) selMbSize := zfs.getMbSize() if currMbSize < selMbSize { //attempt the resize return zfs.resize(selMbSize) } else if currMbSize == selMbSize { //all good... } else { //the requested size is less than image size - use the actual size instead.. } return nil } func (zfs *Zfs) resize(newSizeMb int) error { //check the size is correct... if err := zfs.ds.SetProperty("volsize", fmt.Sprintf("%dM", newSizeMb)); err != nil { return err } return zfs.load() } func (zfs *Zfs) Destroy() error { return zfs.ds.Destroy(gozfs.DestroyDefault) } func (zfs *Zfs) DestroyRecursive() error { return zfs.ds.Destroy(gozfs.DestroyRecursive) }
package main import ( "reflect" "testing" ) func TestDecomp(t *testing.T) { type args struct { n int } tests := []struct { name string args args want string }{ {name: "5", args: args{n: 5}, want: "2^3 * 3 * 5"}, {name: "14", args: args{n: 14}, want: "2^19 * 3^9 * 5^4 * 7^3 * 11^2 * 13 * 17 * 19"}, {name: "17", args: args{n: 17}, want: "2^15 * 3^6 * 5^3 * 7^2 * 11 * 13 * 17"}, // {name: "22", args: args{n: 22}, want: "2^19 * 3^9 * 5^4 * 7^3 * 11^2 * 13 * 17 * 19"}, // {name: "25", args: args{n: 25}, want: "2^22 * 3^10 * 5^6 * 7^3 * 11^2 * 13 * 17 * 19 * 23"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := Decomp(tt.args.n); got != tt.want { t.Errorf("Decomp() = %v, want %v", got, tt.want) } }) } } func Test_getSmallestPrimeFactor(t *testing.T) { type args struct { n int64 } tests := []struct { name string args args want int64 }{ {name: "2", args: args{n: 2}, want: 2}, {name: "3", args: args{n: 3}, want: 3}, {name: "25", args: args{n: 25}, want: 5}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := getSmallestPrimeFactor(tt.args.n); got != tt.want { t.Errorf("getSmallestPrimeFactor() = %v, want %v", got, tt.want) } }) } } func Test_decomposeIntoPrimes(t *testing.T) { type args struct { n int64 } tests := []struct { name string args args want []int64 }{ {name: "2", args: args{n: 2}, want: []int64{2}}, {name: "3", args: args{n: 3}, want: []int64{3}}, {name: "8", args: args{n: 8}, want: []int64{2, 2, 2}}, {name: "25", args: args{n: 25}, want: []int64{5, 5}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := decomposeIntoPrimes(tt.args.n); !reflect.DeepEqual(got, tt.want) { t.Errorf("decomposeIntoPrimes() = %v, want %v", got, tt.want) } }) } } func Test_toFactorialString(t *testing.T) { type args struct { arr []int64 } tests := []struct { name string args args want string }{ {name: "0", args: args{arr: []int64{2, 3}}, want: "2 * 3"}, {name: "2", args: args{arr: []int64{2, 2, 2, 2}}, want: "2^4"}, {name: "3", args: args{arr: []int64{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 7, 7, 7, 11, 11, 13, 17, 19}}, want: "2^19 * 3^9 * 5^4 * 7^3 * 11^2 * 13 * 17 * 19"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := toFactorialString(tt.args.arr); got != tt.want { t.Errorf("toFactorialString() = %v, want %v", got, tt.want) } }) } } func Test_factorial(t *testing.T) { type args struct { n int } tests := []struct { name string args args want int64 }{ {name: "0", args: args{n: 0}, want: 1}, {name: "1", args: args{n: 1}, want: 1}, {name: "5", args: args{n: 5}, want: 120}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := factorial(tt.args.n); got != tt.want { t.Errorf("factorial() = %v, want %v", got, tt.want) } }) } }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package binaryxml import ( "bytes" "strings" "github.com/google/gapid/core/data/binary" ) type xmlAttributeList []xmlAttribute func (l xmlAttributeList) forName(s stringPoolRef) (*xmlAttribute, bool) { for idx, at := range l { if at.name == s { return &l[idx], true } } return nil, false } func (l xmlAttributeList) xml(ctx *xmlContext) string { b := bytes.Buffer{} for _, a := range l { b.WriteRune('\n') b.WriteString(strings.Repeat(ctx.tab, ctx.indent+2)) b.WriteString(a.xml(ctx)) } return b.String() } type xmlAttribute struct { namespace stringPoolRef name stringPoolRef rawValue stringPoolRef typedValue typedValue } func (a xmlAttribute) xml(ctx *xmlContext) string { b := bytes.Buffer{} if a.namespace.isValid() { ns := a.namespace.get() if prefix, ok := ctx.namespaces[ns]; ok { b.WriteString(prefix) } else { b.WriteString(a.namespace.get()) } b.WriteRune(':') } b.WriteString(a.name.get()) b.WriteRune('=') b.WriteRune('"') if a.rawValue.isValid() { b.WriteString(a.rawValue.get()) } else { b.WriteString(a.typedValue.String()) } b.WriteRune('"') return b.String() } const xmlAttributeSize = 20 func (a *xmlAttribute) decode(r binary.Reader, root *xmlTree) error { a.namespace = root.decodeString(r) a.name = root.decodeString(r) a.rawValue = root.decodeString(r) typedValue, err := decodeValue(r, root) a.typedValue = typedValue return err } func (a *xmlAttribute) encode(w binary.Writer) { a.namespace.encode(w) a.name.encode(w) a.rawValue.encode(w) a.typedValue.encode(w) } type attributesByResourceId struct { attributes xmlAttributeList xml *xmlTree } func (as attributesByResourceId) Len() int { return len(as.attributes) } func (as attributesByResourceId) Swap(i, j int) { as.attributes[i], as.attributes[j] = as.attributes[j], as.attributes[i] } func (as attributesByResourceId) Less(i, j int) bool { rm := as.xml.resourceMap a := as.attributes[i].name r1 := uint32(0xffffffff) if a.stringPoolIndex() < uint32(len(rm.ids)) { r1 = rm.ids[a.stringPoolIndex()] } b := as.attributes[j].name r2 := uint32(0xffffffff) if b.stringPoolIndex() < uint32(len(rm.ids)) { r2 = rm.ids[b.stringPoolIndex()] } return r1 < r2 || ((r1 == r2) && a.get() < b.get()) }
package main import "fmt" func main() { nums1 := []int{1, 2, 3, 0, 0, 0} nums2 := []int{2, 5, 6} m, n := 3, 3 merge(nums1, m, nums2, n) fmt.Println(nums1) } func merge(nums1 []int, m int, nums2 []int, n int) { // 合并后的nums1的指针 i := m + n - 1 // 分别指向两个数组最后 m-- n-- for m >= 0 && n >= 0 { if nums1[m] > nums2[n] { nums1[i] = nums1[m] m-- } else { nums1[i] = nums2[n] n-- } i-- } // 如果nums2没遍历完 for n >= 0 { nums1[i] = nums2[n] n-- i-- } }
/* Name : Kamil KAPLAN Date : 25.07.2019 */ package models type Location struct { Version int32 `json:"Version,omitempty"` Key string `json:"Key,omitempty"` Type string `json:"Type,omitempty"` Rank int32 `json:"Rank,omitempty"` LocalizedName string `json:"LocalizedName,omitempty"` EnglishName string `json:"EnglishName,omitempty"` PrimaryPostalCode string `json:"PrimaryPostalCode,omitempty"` Region Regions `json:"Region"` Country Country `json:"Country"` AdministrativeArea AdministrativeArea `json:"AdministrativeArea"` TimeZone TimeZone `json:"TimeZone"` GeoPosition GeoPosition `json:"GeoPosition"` IsAlias bool `json:"IsAlias,omitempty"` SupplementalAdminAreas []SupplementalAdminAreas `json:"SupplementalAdminAreas"` DataSets []string `json:"DataSets,omitempty"` Details Details `json:"Details"` }
package netutil import ( "math/big" "net" ) // IPAdd adds `val` to `ip`. // If `ip` is IPv4 address, the value returned is IPv4, or nil when over/underflowed. // If `ip` is IPv6 address, the value returned is IPv6, or nil when over/underflowed. func IPAdd(ip net.IP, val int64) net.IP { i := big.NewInt(val) ipv4 := ip.To4() if ipv4 != nil { b := new(big.Int) b.SetBytes([]byte(ipv4)) if i.Add(i, b).Sign() < 0 { return nil } res := i.Bytes() if len(res) < 4 { nres := make([]byte, 4) copy(nres[4-len(res):], res) res = nres } return net.IP(res).To4() } b := new(big.Int) b.SetBytes([]byte(ip)) if i.Add(i, b).Sign() < 0 { return nil } res := i.Bytes() if len(res) < 16 { nres := make([]byte, 16) copy(nres[16-len(res):], res) res = nres } return net.IP(res).To16() } // IPDiff calculates the numeric difference between two IP addresses. // Intuitively, the calculation is done as `ip2 - ip1`. // `ip1` and `ip2` must be the same IP version (4 or 6). func IPDiff(ip1, ip2 net.IP) int64 { if v4 := ip1.To4(); v4 != nil { ip1 = v4 ip2 = ip2.To4() } b1 := new(big.Int) b2 := new(big.Int) b1.SetBytes([]byte(ip1)) b2.SetBytes([]byte(ip2)) return b2.Sub(b2, b1).Int64() }
package service import ( "database/sql" "time" "github.com/NYTimes/gizmo/config/mysql" "github.com/NYTimes/sqliface" ) type ( // SavedItemsRepo is an interface layer between // our service and our database. Abstracting these methods // out of a pure implementation helps with testing. SavedItemsRepo interface { Get(uint64) ([]*SavedItem, error) Put(uint64, string) error Delete(uint64, string) error } // MySQLSavedItemsRepo is an implementation of the repo // interface built on top of MySQL. MySQLSavedItemsRepo struct { db *sql.DB } // SavedItem represents an article, blog, interactive, etc. // that a user wants to save for reading later. SavedItem struct { UserID uint64 `json:"user_id"` URL string `json:"url"` Timestamp time.Time `json:"timestamp"` } ) // NewSavedItemsRepo will attempt to connect to to MySQL and // return a SavedItemsRepo implementation. func NewSavedItemsRepo(cfg *mysql.Config) (SavedItemsRepo, error) { db, err := cfg.DB() if err != nil { return nil, err } return &MySQLSavedItemsRepo{db}, nil } // Get will attempt to query the underlying MySQL database for saved items // for a single user. func (r *MySQLSavedItemsRepo) Get(userID uint64) ([]*SavedItem, error) { query := `SELECT user_id, url, timestamp FROM saved_items WHERE user_id = ? ORDER BY timestamp DESC` rows, err := r.db.Query(query, userID) if err != nil { return nil, err } defer rows.Close() return scanItems(rows) } func scanItems(rows sqliface.Rows) ([]*SavedItem, error) { var err error // initializing so we return an empty array in case of 0 items := []*SavedItem{} for rows.Next() { item := &SavedItem{} err = rows.Scan(&item.UserID, &item.URL, &item.Timestamp) if err != nil { return nil, err } items = append(items, item) } return items, nil } // Put will attempt to insert a new saved item for the user. func (r *MySQLSavedItemsRepo) Put(userID uint64, url string) error { query := `INSERT INTO saved_items (user_id, url, timestamp) VALUES (?, ?, NOW()) ON DUPLICATE KEY UPDATE timestamp = NOW()` _, err := r.db.Exec(query, userID, url) return err } // Delete will attempt to remove an item from a user's saved items. func (r *MySQLSavedItemsRepo) Delete(userID uint64, url string) error { query := `DELETE FROM saved_items WHERE user_id = ? AND url = ?` _, err := r.db.Exec(query, userID, url) return err }
package main import ( "fmt" "time" ) func counter(n int, prefix string) { for i := 0; i < n; i++ { fmt.Println(prefix+":", i) time.Sleep(50 * time.Millisecond) } } func main() { counter(5, "Counter-A") go counter(5, "Counter-B") //normal function can be run asynchronously using goroutine go func(msg string) { fmt.Println("Echo", msg) }("Hello") // echo message can possibly be displayed in the middle of Counter-B's counting fmt.Scanln() //wait for manual input to exit from main goroutine, otherwise main goroutine may exit before the sub-goroutines fmt.Println("done") }
package pubsub import ( "time" "github.com/mylxsw/adanos-alert/internal/repository" "go.mongodb.org/mongo-driver/bson/primitive" ) // EventType 事件类型 type EventType string const ( // EventTypeAdd 新增事件 EventTypeAdd EventType = "added" // EventTypeUpdate 更新事件 EventTypeUpdate EventType = "updated" // EventTypeDelete 删除事件 EventTypeDelete EventType = "deleted" ) // RuleChangedEvent 规则变更事件 type RuleChangedEvent struct { Rule repository.Rule Type EventType CreatedAt time.Time } // DingdingRobotEvent 钉钉机器人变更事件 type DingdingRobotEvent struct { DingDingRobot repository.DingdingRobot Type EventType CreatedAt time.Time } // UserChangedEvent 用户变更事件 type UserChangedEvent struct { User repository.User Type EventType CreatedAt time.Time } // SystemUpDownEvent 系统启停事件 type SystemUpDownEvent struct { Up bool CreatedAt time.Time } // MessageGroupPendingEvent 消息分组变更状态为 Pending 事件 type MessageGroupPendingEvent struct { Group repository.EventGroup CreatedAt time.Time } // MessageGroupTriggeredEvent 消息分组的 Trigger 触发事件 type MessageGroupTriggeredEvent struct { Action string Rule repository.Rule Trigger repository.Trigger Group repository.EventGroup CreatedAt time.Time } // EventGroupReduceEvent 事件组缩减事件 type EventGroupReduceEvent struct { GroupID primitive.ObjectID KeepCount int64 DeleteCount int64 CreatedAt time.Time }
package controller import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "github.com/nickchou/learn-go/app" ) //ZhuzherController zhuzher控制器 type ZhuzherController struct { app.App } //Project 获取zhuzher的project信息 func (con *ZhuzherController) Project() { var bf bytes.Buffer //table相关 bf.WriteString("<style>table { border-collapse:collapse; } table,th, td { border: 1px solid black;</style>") bf.WriteString("zhuzher project list<br/>") bf.WriteString("<table>") bf.WriteString("<tr><td>index</td><td>city</td><td>project code</td><td>project name</td><td>stage</td></tr>") //zhuzher所有的城市code 66 items,from=https://flyingdutchman.4009515151.com/api/zhuzher/cities var citys = [...]int{210300, 469029, 110000, 220100, 430100, 320400, 510100, 500000, 210200, 469007, 441900, 440600, 210400, 350100, 440100, 520100, 230100, 460100, 330100, 340100, 441300, 330400, 220200, 370100, 140700, 530100, 131000, 469024, 360100, 511300, 320100, 450100, 320600, 330200, 350300, 370200, 441800, 130300, 350500, 350400, 460200, 310000, 330600, 210100, 440300, 320500, 140100, 331000, 130200, 120000, 330300, 420100, 340200, 650100, 320200, 350200, 610100, 320300, 321000, 370600, 210800, 350600, 410100, 321100, 442000, 440400} index := 0 //遍历所有城市 for _, city := range citys { //根据city_code拿到project信息 resp, err := http.Get(fmt.Sprintf("https://flyingdutchman.4009515151.com/api/zhuzher/projects?city_code=%v", city)) //判断err if err != nil { defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) //json反序列化项目信息 var pro Project if err := json.Unmarshal(body, &pro); err == nil { if pro.Code == 0 { //遍历json的返回结果 for _, p := range pro.Results { index++ bf.WriteString(fmt.Sprintf("<tr><td>%v</td><td>%v</td><td>%v</td><td>%v</td><td>%v</td></tr>", index, city, p.Code1, p.Name, p.Stage)) } } } else { fmt.Println(err) } } } //table bf.WriteString("</table>") io.WriteString(con.W(), bf.String()) } //Project zhuzher的小区实体信息 type Project struct { Code int32 `json:"code"` Results []Result `json:"result"` } //Result zhuzher接口返回的Result实体定义 type Result struct { Code1 string `json:"code"` Name string `json:"name"` Stage int32 `json:"stage"` }
package persistence import ( "database/sql" "github.com/Okaki030/hinagane-scraping/domain/repository" ) // wordCountPresistence はまとめ記事のワードの出現回数をカウントするための構造体 type wordCountDBPresistence struct { DB *sql.DB } // NewWordCountPersistence はwordCountPresistence型のインスタンスを生成するための関数 func NewWordCountDBPersistence(db *sql.DB) repository.WordCountRepository { return &wordCountDBPresistence{ DB: db, } } // InsertWordCountInThreeDays は直近3日間のまとめ記事へのワードの出現回数をカウントするためのメソッド func (wcp wordCountDBPresistence) InsertWordCountInThreeDays() error { var wordCnt, appearCnt int var row *sql.Row var err error // 単語の総数を取得 row = wcp.DB.QueryRow(`SELECT count(*) FROM word`) err = row.Scan(&(wordCnt)) if err != nil { return err } // 直近3日間のメンバーの記事数を取得する for wordId := 1; wordId <= wordCnt; wordId++ { row = wcp.DB.QueryRow(` SELECT count(*) FROM article_word_link INNER JOIN article on article_word_link.article_id=article.id WHERE (NOW( ) - INTERVAL 3 DAY)<article.date_time and article_word_link.word_id=?`, wordId) err = row.Scan(&(appearCnt)) if err != nil { return err } _, err = wcp.DB.Exec(` INSERT INTO word_counter (word_id, counter, date_time) VALUES (?,?,now())`, wordId, appearCnt) if err != nil { return err } } return nil }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package ash import ( "bytes" "context" "encoding/json" "fmt" "image" "image/png" "io/ioutil" "os" "path/filepath" "strings" "time" "chromiumos/tast/errors" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/internal/cdputil" "chromiumos/tast/local/chrome/internal/extension" "chromiumos/tast/testing" ) // AppListBubbleClassName is the automation API class name of the bubble launcher. const AppListBubbleClassName = "AppListBubbleView" // LauncherState represents the launcher (a.k.a AppList) state. type LauncherState string // LauncherState as defined in // https://cs.chromium.org/chromium/src/ash/public/cpp/app_list/app_list_types.h const ( FullscreenAllApps LauncherState = "FullscreenAllApps" FullscreenSearch LauncherState = "FullscreenSearch" Closed LauncherState = "Closed" ) // Accelerator represents the accelerator key to trigger certain actions. type Accelerator struct { KeyCode string `json:"keyCode"` Shift bool `json:"shift"` Control bool `json:"control"` Alt bool `json:"alt"` Search bool `json:"search"` } // Accelerator key used to trigger launcher state change. var ( AccelSearch = Accelerator{KeyCode: "search", Shift: false, Control: false, Alt: false, Search: false} AccelShiftSearch = Accelerator{KeyCode: "search", Shift: true, Control: false, Alt: false, Search: false} ) // WaitForLauncherState waits until the launcher state becomes state. It waits // up to 10 seconds and fail if the launcher doesn't have the desired state. // Expected to fail with "Not supported for bubble launcher" error when waiting // for state different from "Closed" if called for clamshell productivity (bubble) // launcher. Note that the autotest API is expected to return immediately, but still // asynchronously, in this case. // NOTE: Waiting for "Closed" state will always wait for the fullscreen launcher to // hide, even if one would otherwise expect bubble launcher to be used for the current // session state - this supports waiting for launcher UI hide animation to complete // after transitioning from tablet mode to clamshell. func WaitForLauncherState(ctx context.Context, tconn *chrome.TestConn, state LauncherState) error { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() if err := tconn.Call(ctx, nil, "tast.promisify(chrome.autotestPrivate.waitForLauncherState)", state); err != nil { return errors.Wrap(err, "failed to wait for launcher state") } return nil } // TriggerLauncherStateChange will cause the launcher state change via accelerator. func TriggerLauncherStateChange(ctx context.Context, tconn *chrome.TestConn, accel Accelerator) error { // Send the press event to store it in the history. It'll not be handled, so ignore the result. if err := tconn.Call(ctx, nil, `async (acceleratorKey) => { acceleratorKey.pressed = true; chrome.autotestPrivate.activateAccelerator(acceleratorKey, () => {}); acceleratorKey.pressed = false; await tast.promisify(chrome.autotestPrivate.activateAccelerator)(acceleratorKey); }`, accel); err != nil { return errors.Wrap(err, "failed to execute accelerator") } return nil } func scaleImage(src image.Image, siz int) image.Image { srcSize := src.Bounds().Size().X scaled := image.NewRGBA(image.Rect(0, 0, siz, siz)) for x := 0; x < siz; x++ { for y := 0; y < siz; y++ { scaled.Set(x, y, src.At(x*srcSize/siz, y*srcSize/siz)) } } return scaled } func saveImageAsPng(filename string, img image.Image) error { w, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { return err } defer w.Close() return png.Encode(w, img) } // generateFakeAppNames generates default names for fake apps. func generateFakeAppNames(numFakeApps int) []string { fakeAppNames := make([]string, numFakeApps) for i := 0; i < numFakeApps; i++ { fakeAppNames[i] = fmt.Sprintf("fake app %d", i) } return fakeAppNames } // GeneratePrepareFakeAppsWithNamesOptions calls PrepareDefaultFakeApps() and // returns options to be used by chrome.New() for logging in with the newly // created fake apps. baseDir is the path to the directory for keeping app data. // The function caller should always clean baseDir regardless of function // execution results. names specify app names. func GeneratePrepareFakeAppsWithNamesOptions(baseDir string, names []string) ([]chrome.Option, error) { dirs, err := PrepareDefaultFakeApps(baseDir, names, true) if err != nil { return nil, errors.Wrap(err, "failed to create fake apps") } opts := make([]chrome.Option, 0, len(names)) for _, dir := range dirs { opts = append(opts, chrome.UnpackedExtension(dir)) } return opts, nil } // GeneratePrepareFakeAppsWithIconDataOptions is similar with GeneratePrepareFakeAppsWithNamesOptions, // with a difference that GeneratePrepareFakeAppsWithIconDataOptions allows the // caller to specify both app names and icon data. The caller has the duty to // clean baseDir. func GeneratePrepareFakeAppsWithIconDataOptions(baseDir string, names []string, iconData [][]byte) ([]chrome.Option, error) { if len(names) != len(iconData) { return nil, errors.Errorf("unexpected count of icon data: got %d, expecting %d", len(iconData), len(names)) } dirs, err := prepareFakeAppsWithIconData(baseDir, names, iconData) if err != nil { return nil, errors.Wrap(err, "failed to prepare data for fake apps") } opts := make([]chrome.Option, 0, len(names)) for _, dir := range dirs { opts = append(opts, chrome.UnpackedExtension(dir)) } return opts, nil } // GeneratePrepareFakeAppsOptions is similar with GeneratePrepareFakeAppsWithNamesOptions, // with a difference that GeneratePrepareFakeAppsOptions accepts the fake app // count as the parameter. func GeneratePrepareFakeAppsOptions(baseDir string, numFakeApps int) ([]chrome.Option, error) { return GeneratePrepareFakeAppsWithNamesOptions(baseDir, generateFakeAppNames(numFakeApps)) } // prepareFakeApp creates data for a fake app with the specified app name and // icon (if any). func prepareFakeApp(baseDir, appName, iconDir string, iconFileMap map[int]string) (string, error) { // The manifest.json data for the fake hosted app; it just opens google.com // page on launch. const manifestTmpl = `{ "description": "fake", "name": "%s", "manifest_version": 2, "version": "0", %s "app": { "launch": { "web_url": "https://www.google.com/" } } }` extDir := filepath.Join(baseDir, appName) if err := os.Mkdir(extDir, 0755); err != nil { return "", errors.Wrapf(err, "failed to create the directory for %s", appName) } var iconJSON string if iconDir != "" { for _, iconFileName := range iconFileMap { if err := os.Symlink(filepath.Join(iconDir, iconFileName), filepath.Join(extDir, iconFileName)); err != nil { return "", errors.Wrapf(err, "failed to create link of icon %s", iconFileName) } } iconJSONData, err := json.Marshal(iconFileMap) if err != nil { return "", errors.Wrap(err, "failed to turn the mapptings between icon sizes and icon names into a JSON string") } iconJSON = fmt.Sprintf(`"icons": %s,`, string(iconJSONData)) } if err := ioutil.WriteFile(filepath.Join(extDir, "manifest.json"), []byte(fmt.Sprintf(manifestTmpl, appName, iconJSON)), 0644); err != nil { return "", errors.Wrapf(err, "failed to prepare manifest.json for %s", appName) } return extDir, nil } // prepareFakeAppIcon creates icon images in different scales with the given // icon data. These images are stored in a directory created under baseDir. // iconFolder specifies the directory's name. func prepareFakeAppIcon(baseDir, iconFolder string, iconData []byte) (string, map[int]string, error) { iconDir := filepath.Join(baseDir, iconFolder) if err := os.Mkdir(iconDir, 0755); err != nil { return "", nil, errors.Wrapf(err, "failed to create the icon directory %q", iconDir) } img, err := png.Decode(bytes.NewReader(iconData)) if err != nil { return "", nil, errors.Wrap(err, "failed to decode icon data") } iconFiles := map[int]string{} for _, siz := range []int{32, 48, 64, 96, 128, 192} { var imgToSave image.Image if siz == img.Bounds().Size().X { imgToSave = img } else { imgToSave = scaleImage(img, siz) } iconFile := fmt.Sprintf("icon%d.png", siz) iconFileFullPath := filepath.Join(iconDir, iconFile) if err := saveImageAsPng(iconFileFullPath, imgToSave); err != nil { return "", nil, errors.Wrapf(err, "failed to save the icon file to %q", iconFileFullPath) } iconFiles[siz] = iconFile } return iconDir, iconFiles, nil } // PrepareDefaultFakeApps creates directories for fake apps (hosted apps) under // the directory of baseDir and returns their path names. Fake app names are // specified by the parameter. hasIcon specifies whether a default icon should // be used. The intermediate data may remain even when an error is returned. It // is the caller's responsibility to clean up the contents under the baseDir. // This also may update the ownership of baseDir. func PrepareDefaultFakeApps(baseDir string, appNames []string, hasIcon bool) ([]string, error) { if err := extension.ChownContentsToChrome(baseDir); err != nil { return nil, errors.Wrapf(err, "failed to change ownership of %q", baseDir) } var iconDir string var iconFiles map[int]string var err error if hasIcon { iconDir, iconFiles, err = prepareFakeAppIcon(baseDir, "defaultIcons", fakeIconData) if err != nil { return nil, errors.Wrap(err, "failed to parepare the shared icon for fake apps") } } var dirs []string for _, appName := range appNames { dir, err := prepareFakeApp(baseDir, appName, iconDir, iconFiles) if err != nil { return nil, errors.Wrapf(err, "failed to prepare data for %q", appName) } dirs = append(dirs, dir) } return dirs, nil } // prepareFakeAppsWithIconData is similar with PrepareDefaultFakeApps, but with // the difference that app icons are specified by the parameter. func prepareFakeAppsWithIconData(baseDir string, appNames []string, iconData [][]byte) ([]string, error) { if len(appNames) != len(iconData) { return nil, errors.Errorf("unexpected count of icon data: got %d, expecting %d", len(iconData), len(appNames)) } if err := extension.ChownContentsToChrome(baseDir); err != nil { return nil, errors.Wrapf(err, "failed to change ownership of %q", baseDir) } var dirs []string for index, appName := range appNames { iconDir, iconFiles, err := prepareFakeAppIcon(baseDir, appName+"Icons", iconData[index]) if err != nil { return nil, errors.Wrapf(err, "failed to parepare icons for the fake app %q", appName) } dir, err := prepareFakeApp(baseDir, appName, iconDir, iconFiles) if err != nil { return nil, errors.Wrapf(err, "failed to prepare data for %q", appName) } dirs = append(dirs, dir) } return dirs, nil } // InstalledFakeApps returns a list of the installed fake apps. func InstalledFakeApps(ctx context.Context, tconn *chrome.TestConn) ([]*ChromeApp, error) { // Poll until the fake apps are installed. var installedApps []*ChromeApp if err := testing.Poll(ctx, func(ctx context.Context) error { var err error installedApps, err = ChromeApps(ctx, tconn) if err != nil { return errors.Wrap(err, "failed to obtain the list of the installed apps") } var fakeApps []*ChromeApp for _, app := range installedApps { if strings.HasPrefix(app.Name, "fake") { fakeApps = append(fakeApps, app) } } installedApps = fakeApps if len(installedApps) == 0 { return errors.New("no fake apps found") } return nil }, &testing.PollOptions{Interval: 2 * time.Second}); err != nil { return nil, errors.Wrap(err, "failed to wait for fake apps to be installed") } return installedApps, nil } // The remaining definitions are needed only for faillog & CaptureCDP. // TODO(crbug.com/1271473): Get rid of them. // They expose cdputil types and values. See the cdputil package for details. // DebuggingPortPath is a file where Chrome writes debugging port. const DebuggingPortPath = cdputil.DebuggingPortPath // DevtoolsConn is the connection to a web content view, e.g. a tab. type DevtoolsConn = cdputil.Conn // Session maintains the connection to talk to the browser in Chrome DevTools Protocol // over WebSocket. type Session = cdputil.Session // PortWaitOption controls whether the NewSession should wait for the port file // to be created. type PortWaitOption = cdputil.PortWaitOption // PortWaitOption values. const ( NoWaitPort PortWaitOption = cdputil.NoWaitPort WaitPort PortWaitOption = cdputil.WaitPort ) // NewDevtoolsSession establishes a Chrome DevTools Protocol WebSocket connection to the browser. func NewDevtoolsSession(ctx context.Context, debuggingPortPath string, portWait PortWaitOption) (sess *Session, retErr error) { return cdputil.NewSession(ctx, debuggingPortPath, portWait) }
package main import ( "github.com/gorilla/mux" "log" "net/http" ) func main() { r := mux.NewRouter() r.HandleFunc("/api/select/tbluser", tbluser_All_Handler).Methods("GET") r.HandleFunc("/api/select/tbluser/{name}", tbluser_Single_Handler).Methods("GET") r.HandleFunc("/api/insert/tbluser/{name}/{age}", tbluser_Insert_Handler).Methods("POST") r.HandleFunc("/api/update/tbluser/{name}/{age}", tbluser_Update_Handler).Methods("PUT") r.HandleFunc("/api/delete/tbluser/{name}", tbluser_Delete_Handler).Methods("DELETE") log.Fatal(http.ListenAndServe(":8080", r)) }
package main import "fmt" type Pill int // defining constants of the new type Pill // iota counts from 0 and increases in steps by 1 (in case you didn't know this feature) //go:generate stringer -type=Pill const ( Placebo Pill = iota Aspirin Ibuprofen Paracetamol NewPill Acetaminophen = Paracetamol // Acetaminophen is the same as Paracetamol ) func main() { var p Pill = Ibuprofen n := NewPill fmt.Printf("Taking a %s pill and a %s pill\n", p, n) }
/*------------------------------------------------------------------------- * * cycle_container_test.go * Test case for RingBuffer * * * Copyright (c) 2021, Alibaba Group Holding Limited * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * IDENTIFICATION * common/utils/cycle_container_test.go *------------------------------------------------------------------------- */ package utils import ( "bytes" "fmt" "testing" "github.com/stretchr/testify/assert" ) // BenchmarkPadding-8 100000000 11.1 ns/op func BenchmarkPadding(b *testing.B) { cc := NewCycleContainer(2) for i := 0; i < b.N; i++ { cc.GetRead() cc.NextReadSlot() } } func TestCycleContainer_Get(t *testing.T) { cc := NewCycleContainer(2) cc.GetWrite().WriteByte(3) cc.NextWriteSlot() v := cc.GetRead().Bytes() cc.GetRead().Reset() cc.NextReadSlot() cc.GetWrite().WriteByte(5) assert.Equal(t, uint8(3), v[0]) assert.Equal(t, uint8(5), cc.GetRead().Bytes()[0]) } func TestCycleContainer_Write(t *testing.T) { cc := NewCycleContainer(2) w := cc.GetWrite() w.WriteByte(3) t.Log(cc.GetRead().Bytes()) cc.NextWriteSlot() w.WriteByte(6) t.Log(cc.GetRead().Bytes()) } func TestGetInsName(t *testing.T) { var buf bytes.Buffer insname, err := GetInsName("/tmp", &buf) fmt.Println(insname, err) } func TestGetUserName(t *testing.T) { var buf bytes.Buffer insname, err := GetUserName("/tmp", &buf) fmt.Println(insname, err) }
package intset import ( "math/rand" "testing" ) func newIntSets() []IntSet { return []IntSet{NewMapIntSet(), NewBitIntSet(), NewBitInt32Set()} } func TestLenZeroInitially(t *testing.T) { for _, s := range newIntSets() { len := s.Len() if len != 0 { t.Errorf("%T.Len(): got %d, want 0", s, len) } } } func TestAdd(t *testing.T) { for _, s := range newIntSets() { s.Add(1) s.Add(3) if s.Len() != 2 { t.Errorf("%T.Len(): got %d, want 2", s, s.Len()) } if !s.Has(1) { t.Errorf("%T.Has(%d): got %t, want %t", s, 1, s.Has(1), !s.Has(1)) } if !s.Has(3) { t.Errorf("%T.Has(%d): got %t, want %t", s, 1, s.Has(3), !s.Has(3)) } } } func TestRemove(t *testing.T) { for _, s := range newIntSets() { s.Add(0) s.Remove(0) if s.Has(0) { t.Errorf("%T: want zero removed, got %s", s, s) } } } // General benchmark function const max = 32000 func addRandom(set IntSet, n int) { for i := 0; i < n; i++ { set.Add(rand.Intn(max)) } } func benchHas(b *testing.B, set IntSet, n int) { addRandom(set, n) for i := 0; i < b.N; i++ { set.Has(rand.Intn(max)) } } func benchAdd(b *testing.B, set IntSet, n int) { for i := 0; i < b.N; i++ { for j := 0; j < n; j++ { set.Add(rand.Intn(max)) } set.Clear() } } func benchUnionWith(bm *testing.B, a, b IntSet, n int) { addRandom(a, n) addRandom(b, n) for i := 0; i < bm.N; i++ { a.UnionWith(b) } } func benchString(b *testing.B, set IntSet, n int) { addRandom(set, n) for i := 0; i < b.N; i++ { set.String() } } // benchmark MapIntSet func BenchmarkMapIntSetAdd10(b *testing.B) { benchAdd(b, NewMapIntSet(), 10) } func BenchmarkMapIntSetAdd100(b *testing.B) { benchAdd(b, NewMapIntSet(), 100) } func BenchmarkMapIntSetAdd1000(b *testing.B) { benchAdd(b, NewMapIntSet(), 1000) } func BenchmarkMapIntSetHas10(b *testing.B) { benchHas(b, NewMapIntSet(), 10) } func BenchmarkMapIntSetHas100(b *testing.B) { benchHas(b, NewMapIntSet(), 100) } func BenchmarkMapIntSetHas1000(b *testing.B) { benchHas(b, NewMapIntSet(), 1000) } func BenchmarkMapIntSetUnionWith10(b *testing.B) { benchUnionWith(b, NewMapIntSet(), NewMapIntSet(), 10) } func BenchmarkMapIntSetUnionWith100(b *testing.B) { benchUnionWith(b, NewMapIntSet(), NewMapIntSet(), 100) } func BenchmarkMapIntSetUnionWith1000(b *testing.B) { benchUnionWith(b, NewMapIntSet(), NewMapIntSet(), 1000) } func BenchmarkMapIntSetString10(b *testing.B) { benchString(b, NewMapIntSet(), 10) } func BenchmarkMapIntSetString100(b *testing.B) { benchString(b, NewMapIntSet(), 100) } func BenchmarkMapIntSetString1000(b *testing.B) { benchString(b, NewMapIntSet(), 1000) } // benchmark bitIntSet32 func BenchmarkBitIntSet32Add10(b *testing.B) { benchAdd(b, NewBitInt32Set(), 10) } func BenchmarkBitIntSet32Add100(b *testing.B) { benchAdd(b, NewBitInt32Set(), 100) } func BenchmarkBitIntSet32Add1000(b *testing.B) { benchAdd(b, NewBitInt32Set(), 1000) } func BenchmarkBitIntSet32Has10(b *testing.B) { benchHas(b, NewBitInt32Set(), 10) } func BenchmarkBitIntSet32Has100(b *testing.B) { benchHas(b, NewBitInt32Set(), 100) } func BenchmarkBitIntSet32Has1000(b *testing.B) { benchHas(b, NewBitIntSet(), 1000) } func BenchmarkBitInt32SetUnionWith10(b *testing.B) { benchUnionWith(b, NewBitInt32Set(), NewBitInt32Set(), 10) } func BenchmarkBitInt32SetUnionWith100(b *testing.B) { benchUnionWith(b, NewBitInt32Set(), NewBitInt32Set(), 100) } func BenchmarkBitInt32SetUnionWith1000(b *testing.B) { benchUnionWith(b, NewBitInt32Set(), NewBitInt32Set(), 1000) } func BenchmarkBitInt32SetString10(b *testing.B) { benchString(b, NewBitInt32Set(), 10) } func BenchmarkBitInt32SetString100(b *testing.B) { benchString(b, NewBitInt32Set(), 100) } func BenchmarkBitInt32SetString1000(b *testing.B) { benchString(b, NewBitInt32Set(), 1000) } // benchmark bitIntSet func BenchmarkBitIntSetAdd10(b *testing.B) { benchAdd(b, NewBitIntSet(), 10) } func BenchmarkBitIntSetAdd100(b *testing.B) { benchAdd(b, NewBitIntSet(), 100) } func BenchmarkBitIntSetAdd1000(b *testing.B) { benchAdd(b, NewBitIntSet(), 1000) } func BenchmarkBitIntSetHas10(b *testing.B) { benchHas(b, NewBitIntSet(), 10) } func BenchmarkBitIntSetHas100(b *testing.B) { benchHas(b, NewBitIntSet(), 100) } func BenchmarkBitIntSetHas1000(b *testing.B) { benchHas(b, NewBitIntSet(), 1000) } func BenchmarkBitIntSetUnionWith10(b *testing.B) { benchUnionWith(b, NewBitIntSet(), NewBitIntSet(), 10) } func BenchmarkBitIntSetUnionWith100(b *testing.B) { benchUnionWith(b, NewBitIntSet(), NewBitIntSet(), 100) } func BenchmarkBitIntSetUnionWith1000(b *testing.B) { benchUnionWith(b, NewBitIntSet(), NewBitIntSet(), 1000) } func BenchmarkBitIntSetString10(b *testing.B) { benchString(b, NewBitIntSet(), 10) } func BenchmarkBitIntSetString100(b *testing.B) { benchString(b, NewBitIntSet(), 100) } func BenchmarkBitIntSetString1000(b *testing.B) { benchString(b, NewBitIntSet(), 1000) }
/* Copyright 2021-2023 ICS-FORTH. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package lifecycle import ( "context" "fmt" "github.com/carv-ics-forth/frisbee/api/v1alpha1" "github.com/carv-ics-forth/frisbee/controllers/common" "github.com/carv-ics-forth/frisbee/pkg/structure" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8errors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" ) // CreateVirtualJob wraps a call into a virtual object. This is used for operations that do not create external resources. // Examples: Deletions, Calls, ... // If the callback function fails, it will be reflected in the created virtual jobs and should be captured // by the parent's lifecycle. // If this function cannot create a virtual object (e.g, cannot create a virtual object), it will return an error. func CreateVirtualJob(ctx context.Context, reconciler common.Reconciler, parent client.Object, jobName string, callback func(vobj *v1alpha1.VirtualObject) error, ) error { /*--------------------------------------------------- * Create a Virtual Object to host the job *---------------------------------------------------*/ var vJob v1alpha1.VirtualObject vJob.SetGroupVersionKind(v1alpha1.GroupVersion.WithKind("VirtualObject")) vJob.SetNamespace(parent.GetNamespace()) vJob.SetName(jobName) // Set default metadata for the virtual object. v1alpha1.SetScenarioLabel(&vJob.ObjectMeta, parent.GetName()) v1alpha1.SetComponentLabel(&vJob.ObjectMeta, v1alpha1.ComponentSUT) // Copy parent's metadata (defaults will be overwritten). v1alpha1.PropagateLabels(&vJob, parent) if err := common.Create(ctx, reconciler, parent, &vJob); err != nil { return errors.Wrapf(err, "cannot create virtual resource for vJob '%s'", jobName) } reconciler.GetEventRecorderFor(parent.GetName()).Event(parent, corev1.EventTypeNormal, "VExecBegin", jobName) /*--------------------------------------------------- * Retrieve the created virtual object *---------------------------------------------------*/ // dirty solution to get the ResourceVersion is order to avoid update failing with // 'Invalid value: 0x0: must be specified for an update' // retry to until we get information about the service. vObjKey := client.ObjectKeyFromObject(&vJob) retryCond := func(ctx context.Context) (done bool, err error) { err = reconciler.GetClient().Get(ctx, vObjKey, &vJob) // Retry if k8errors.IsNotFound(err) { reconciler.Info("Object not found. Retry", "virtualobject", vObjKey) return false, nil } // Abort if err != nil { reconciler.Info("Error. Retry", "virtualobject", vObjKey, "err", err) return false, err } // OK return true, nil } if err := wait.ExponentialBackoffWithContext(ctx, common.DefaultBackoffForServiceEndpoint, retryCond); err != nil { return errors.Wrapf(err, "failed to retrieve virtual object '%s']", vObjKey) } /*--------------------------------------------------- * Run the callback function asynchronously *---------------------------------------------------*/ go func() { callbackJobErr := callback(&vJob) // resolve the status if callbackJobErr != nil { vJob.Status.Lifecycle.Phase = v1alpha1.PhaseFailed vJob.Status.Lifecycle.Reason = "VExecFailed" vJob.Status.Lifecycle.Message = errors.Wrapf(callbackJobErr, "Job failed").Error() reconciler.GetEventRecorderFor(parent.GetName()).Event(parent, corev1.EventTypeWarning, "VExecFailed", jobName) } else { vJob.Status.Lifecycle.Phase = v1alpha1.PhaseSuccess vJob.Status.Lifecycle.Reason = "VExecSuccess" vJob.Status.Lifecycle.Message = "Job completed" reconciler.GetEventRecorderFor(parent.GetName()).Event(parent, corev1.EventTypeNormal, "VExecSuccess", jobName) } // Append information for stored data, if any if len(vJob.Status.Data) > 0 { vJob.Status.Lifecycle.Message = fmt.Sprintf("%s. <StoredData>: '%s'", vJob.Status.Message, structure.SortedMapKeys(vJob.Status.Data)) } /*--------------------------------------------------- * Update the status of the Virtual Job *---------------------------------------------------*/ if err := common.UpdateStatus(ctx, reconciler, &vJob); err != nil { reconciler.GetEventRecorderFor(parent.GetName()).Event(parent, corev1.EventTypeWarning, "VExecUpdateError", err.Error()) } }() return nil }
package typeutils import ( "fmt" "reflect" "strings" ) const ( TypeField = "<type>" TypeFieldEscaped = "<type>" ) type FromMapFn func(from map[string]interface{}, to interface{}) error type ToMapFn func(from interface{}, to map[string]interface{}) error // Registry is the type registry interface. // A type registry tracks specific types by name, a facility not native to Go. // A type name in the registry is made up of package path and local type name. // Aliases may be specified to shorten the path to manageable lengths. // // Deprecated: This functionality has been rewritten in madkins23/go-type type Registry interface { // Deprecated: This functionality has been rewritten in madkins23/go-type Alias(alias string, example interface{}) error // Deprecated: This functionality has been rewritten in madkins23/go-type Register(example interface{}) error // Deprecated: This functionality has been rewritten in madkins23/go-type Make(name string) (interface{}, error) // Deprecated: This functionality has been rewritten in madkins23/go-type NameFor(item interface{}) (string, error) // Deprecated: This functionality has been rewritten in madkins23/go-type GenNames(item interface{}, aliased bool) (string, []string, error) // Deprecated: This functionality has been rewritten in madkins23/go-type ConvertItemToMap(item interface{}) (map[string]interface{}, error) // Deprecated: This functionality has been rewritten in madkins23/go-type CreateItemFromMap(in map[string]interface{}) (interface{}, error) } // RegistryItem contains methods for pushing fields to or pulling fields from a map. // A Registry will work with any kind of struct, but won't copy field data without this interface. // This is used by ConvertItemToMap and CreateItemFromMap (as called from marshal/unmarshal code). // Note that both methods must be provided for either to work. // // Deprecated: This functionality has been rewritten in madkins23/go-type type RegistryItem interface { // Deprecated: This functionality has been rewritten in madkins23/go-type PushToMap(toMap map[string]interface{}) error // Deprecated: This functionality has been rewritten in madkins23/go-type PullFromMap(fromMap map[string]interface{}) error } // NewRegistry creates a new Registry object of the default internal type. // Registries created via this function are not safe for concurrent access, // manage this access or use NewRegistrar() to create a concurrent safe version. // Developers might be able to write more efficient concurrency code using Registry. // // Deprecated: This functionality has been rewritten in madkins23/go-type func NewRegistry() Registry { return &registry{ byName: make(map[string]*registration), byType: make(map[reflect.Type]*registration), aliases: make(map[string]string), } } ////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////// // Default Registry implementation. type registry struct { // byName supports lookup of registrations by 'name'. // Full names and aliases are both entered herein. byName map[string]*registration // byType supports lookup of registrations by type. byType map[reflect.Type]*registration // alias maps shortened 'alias' strings to path prefix to shorten names. aliases map[string]string } // Registration structure groups data from indexes. type registration struct { // typeName includes package path and type name. defaultName string // typeNames is the set of all possible (i.e. aliased) type names. // The best one will always be in typeName. allNames []string // typeObj is the reflect.Type object for the example object. typeObj reflect.Type } ////////////////////////////////////////////////////////////////////////// // Alias creates an alias to be used to shorten names. // Alias must exist prior to registering applicable types. // Redefining a pre-existing alias is an error. // // Deprecated: This functionality has been rewritten in madkins23/go-type func (reg *registry) Alias(alias string, example interface{}) error { if _, ok := reg.aliases[alias]; ok { return fmt.Errorf("can't redefine alias %s", alias) } exampleType := reflect.TypeOf(example) if exampleType == nil { return fmt.Errorf("no type for alias %s (%v)", alias, example) } if exampleType.Kind() == reflect.Ptr { exampleType = exampleType.Elem() if exampleType == nil { return fmt.Errorf("no elem type for alias %s (%v)", alias, example) } } pkgPath := exampleType.PkgPath() if pkgPath == "" { return fmt.Errorf("no package path for alias %s (%v)", alias, example) } reg.aliases[alias] = pkgPath return nil } // Register a type by providing an example object. // // Deprecated: This functionality has been rewritten in madkins23/go-type func (reg *registry) Register(example interface{}) error { // Get reflected type for example object. exType := reflect.TypeOf(example) if exType != nil && exType.Kind() == reflect.Ptr { exType = exType.Elem() } if exType == nil { return fmt.Errorf("no reflected type for %v", example) } // Check for previous record. if _, ok := reg.byType[exType]; ok { return fmt.Errorf("previous registration for type %v", exType) } // Get type name without any pointer asterisks. typeName := exType.String() if strings.HasPrefix(typeName, "*") { typeName = strings.TrimLeft(typeName, "*") } // Create registration record for this type. item := &registration{ defaultName: typeName, allNames: make([]string, 1, len(reg.aliases)+1), typeObj: exType, } // Initialize default name to full name with package and type. name, aliases, err := reg.GenNames(example, true) if err != nil { return fmt.Errorf("getting type name of example: %w", err) } item.defaultName = name item.allNames[0] = name for _, alias := range aliases { item.allNames = append(item.allNames, alias) } // Add name lookups for all default and aliased names. reg.byName[name] = item for _, name := range item.allNames { reg.byName[name] = item } // Add type lookup. reg.byType[exType] = item return nil } // GenNames creates the possible names for the type represented by the example object. // Returns the 'canonical' name, an optional array of aliased names per current aliases, and any error. // If the aliased argument is true a possibly empty array will be returned for the second argument otherwise nil. // // Deprecated: This functionality has been rewritten in madkins23/go-type func (reg *registry) GenNames(example interface{}, aliased bool) (string, []string, error) { // Initialize default name to full name with package and type. name, err := genNameFromInterface(example) if err != nil { return "", nil, fmt.Errorf("generating basic name: %w", err) } var aliases []string if aliased { aliases = make([]string, 0, len(reg.aliases)) // Look for any possible aliases for the type and add them to the list of all names. for alias, prefixPath := range reg.aliases { if strings.HasPrefix(name, prefixPath) { aliases = append(aliases, "["+alias+"]"+name[len(prefixPath)+1:]) } } // Choose default name again from shortest, therefore most likely an aliased name if there are any. nameLen := len(name) for _, alias := range aliases { // Using <= favors later aliases of same size. if len(alias) <= nameLen { name = alias } } } return name, aliases, nil } // NameFor returns a name for the specified object. // // Deprecated: This functionality has been rewritten in madkins23/go-type func (reg *registry) NameFor(item interface{}) (string, error) { itemType := reflect.TypeOf(item) if itemType.Kind() == reflect.Ptr { itemType = itemType.Elem() } registration, ok := reg.byType[itemType] if !ok { return "", fmt.Errorf("no registration for type %s", itemType) } return registration.defaultName, nil } // Make creates a new instance of the example object with the specified name. // The new instance will be created with fields filled with zero values. // // Deprecated: This functionality has been rewritten in madkins23/go-type func (reg *registry) Make(name string) (interface{}, error) { item, found := reg.byName[name] if !found { return nil, fmt.Errorf("no example for '%s'", name) } return reflect.New(item.typeObj).Interface(), nil } // ConvertItemToMap converts a registry typed item into a map for further processing. // If the item is not of a Registry type an error is returned. // // Deprecated: This functionality has been rewritten in madkins23/go-type func (reg *registry) ConvertItemToMap(item interface{}) (map[string]interface{}, error) { value := reflect.ValueOf(item) if value.Kind() == reflect.Ptr { value = value.Elem() } if value.Kind() != reflect.Struct { return nil, fmt.Errorf("item %s is not a struct", item) } itemType := value.Type() registration, ok := reg.byType[itemType] if !ok { return nil, fmt.Errorf("no registration for type %s", itemType) } result := make(map[string]interface{}) // Add the special marker for the type of the object. // This should work with both JSON and YAML. result[TypeField] = registration.defaultName if mapper, ok := item.(RegistryItem); ok { if err := mapper.PushToMap(result); err != nil { return nil, fmt.Errorf("pushing item fields to map: %w", err) } } return result, nil } // CreateItemFromMap attempts to return a new item of the type specified in the map. // An error is returned if this is impossible. // // Deprecated: This functionality has been rewritten in madkins23/go-type func (reg *registry) CreateItemFromMap(in map[string]interface{}) (interface{}, error) { typeField, found := in[TypeField] if !found { _ = fmt.Errorf("no object type in map") } typeName, ok := typeField.(string) if !ok { _ = fmt.Errorf("converting type field %v to string", typeField) } item, err := reg.Make(typeName) if err != nil { return nil, fmt.Errorf("making item of type %s: %w", typeField, err) } if mapper, ok := item.(RegistryItem); ok { if err := mapper.PullFromMap(in); err != nil { return nil, fmt.Errorf("pulling item fields from map: %w", err) } } return item, nil } ////////////////////////////////////////////////////////////////////////// func genNameFromInterface(example interface{}) (string, error) { itemType := reflect.TypeOf(example) if itemType == nil { return "", fmt.Errorf("no type for item %v", example) } if itemType.Kind() == reflect.Ptr { itemType = itemType.Elem() } path := itemType.PkgPath() if path == "" { return "", fmt.Errorf("no path for type %s", itemType.Name()) } last := strings.LastIndex(path, "/") if last < 0 { return "", fmt.Errorf("no slash in %s", path) } final := path[last:] name := itemType.Name() if strings.HasPrefix(name, final+".") { name = name[len(final)+1:] } return path + "/" + name, nil }
package state // PostgresDatabase - A database in the PostgreSQL system, with multiple schemas and tables contained in it type PostgresDatabase struct { Oid Oid // ID of this database Name string // Database name OwnerRoleOid Oid // Owner of the database, usually the user who created it Encoding string // Character encoding for this database Collate string // LC_COLLATE for this database CType string // LC_CTYPE for this database IsTemplate bool // If true, then this database can be cloned by any user with CREATEDB privileges; if false, then only superusers or the owner of the database can clone it. AllowConnections bool // If false then no one can connect to this database. This is used to protect the template0 database from being altered. ConnectionLimit int32 // Sets maximum number of concurrent connections that can be made to this database. -1 means no limit. // All transaction IDs before this one have been replaced with a permanent ("frozen") transaction ID in this database. // This is used to track whether the database needs to be vacuumed in order to prevent transaction ID wraparound or to // allow pg_clog to be shrunk. It is the minimum of the per-table pg_class.relfrozenxid values. FrozenXID Xid // All multixact IDs before this one have been replaced with a transaction ID in this database. // This is used to track whether the database needs to be vacuumed in order to prevent multixact ID wraparound or to // allow pg_multixact to be shrunk. It is the minimum of the per-table pg_class.relminmxid values. MinimumMultixactXID Xid } // PostgresDatabaseStats - Database statistics for a single database type PostgresDatabaseStats struct { FrozenXIDAge int32 // Age of FrozenXID MinMXIDAge int32 // Age of MinimumMultixactXID XactCommit int64 // Number of transactions in this database that have been committed XactRollback int64 // Number of transactions in this database that have been rolled back } // PostgresDatabaseStatsMap - Map of database statistics (key = database Oid) type PostgresDatabaseStatsMap map[Oid]PostgresDatabaseStats // DiffedPostgresDatabaseStat - Database statistics for a single database as a diff type DiffedPostgresDatabaseStats struct { FrozenXIDAge int32 MinMXIDAge int32 XactCommit int32 XactRollback int32 } // DiffedDatabaseStats - Map of diffed database statistics (key = database Oid) type DiffedPostgresDatabaseStatsMap map[Oid]DiffedPostgresDatabaseStats // DiffSince - Calculate the diff between two stats runs func (curr PostgresDatabaseStats) DiffSince(prev PostgresDatabaseStats) DiffedPostgresDatabaseStats { return DiffedPostgresDatabaseStats{ FrozenXIDAge: curr.FrozenXIDAge, MinMXIDAge: curr.MinMXIDAge, XactCommit: int32(curr.XactCommit - prev.XactCommit), XactRollback: int32(curr.XactRollback - prev.XactRollback), } }
package purchasepersister import ( "context" "github.com/diegoholiveira/bookstore-sample/purchases" ) type ( PurchaserWithNewUser Purchaser PurchaserWithRegisteredUser Purchaser PurchaserService struct { newUser PurchaserWithNewUser registeredUser PurchaserWithRegisteredUser } ) func NewPurchaserService(registeredUser PurchaserWithRegisteredUser, newUser PurchaserWithNewUser) Purchaser { return PurchaserService{ newUser: newUser, registeredUser: registeredUser, } } func (s PurchaserService) MakePurchase(ctx context.Context, p purchases.Purchase) error { if p.HasNewUser() { return s.newUser.MakePurchase(ctx, p) } if p.HasRegisteredUser() { return s.registeredUser.MakePurchase(ctx, p) } return ErrPurchaseInvalid{ Message: "A purchase must have a registered user or a new user", } }
package main import "fmt" func main() { var avg = calculate(2, 4, 3, 5, 4, 3, 3, 5, 5, 3) var msg = fmt.Sprintf("Rata-rata : %.2f", avg) fmt.Println(msg) } func calculate(numbers ...int) float64 { var total int = 0 for _, number := range numbers { total += number } var avg = float64(total) / float64(len(numbers)) // casting: teknik untuk konversi tipe sebuah data ke tipe lain. return avg } /* Fungsi Sprintf() pada dasarnya sama dengan fmt.Printf(), hanaya saja fungsi tidak menampilkan nilai, melainkan mengembalikan nilainya dalam bentuk string. Pada kasus di atas, nilai kembalian fmt.Sprintf() ditampung oleh variable msg . Selain fmt.Sprintf(), ada juga fmt.Sprint() dan fmt.Sprintln() . */
package main import "fmt" // 定义全局变量字符串 var str string var str2 = "" func main() { fmt.Println(str, str2) }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package server import ( "context" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/stretchr/testify/require" ) func TestStickyEngines(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() attrs := roachpb.Attributes{} cacheSize := int64(1 << 20) registry := NewStickyInMemEnginesRegistry() spec1 := base.StoreSpec{ StickyInMemoryEngineID: "engine1", Attributes: attrs, Size: base.SizeSpec{InBytes: cacheSize}, } engine1, err := registry.GetOrCreateStickyInMemEngine(ctx, spec1) require.NoError(t, err) require.False(t, engine1.Closed()) spec2 := base.StoreSpec{ StickyInMemoryEngineID: "engine2", Attributes: attrs, Size: base.SizeSpec{InBytes: cacheSize}, } engine2, err := registry.GetOrCreateStickyInMemEngine(ctx, spec2) require.NoError(t, err) require.False(t, engine2.Closed()) // Regetting the engine whilst it is not closed will fail. _, err = registry.GetOrCreateStickyInMemEngine(ctx, spec1) require.EqualError(t, err, "sticky engine engine1 has not been closed") // Close the engine, which allows it to be refetched. engine1.Close() require.True(t, engine1.Closed()) require.False(t, engine1.(*stickyInMemEngine).Engine.Closed()) // Refetching the engine should give back the same engine. engine1Refetched, err := registry.GetOrCreateStickyInMemEngine(ctx, spec1) require.NoError(t, err) require.Equal(t, engine1, engine1Refetched) require.False(t, engine1.Closed()) // Cleaning up everything asserts everything is closed. registry.CloseAllStickyInMemEngines() for _, engine := range []storage.Engine{engine1, engine2} { require.True(t, engine.Closed()) require.True(t, engine.(*stickyInMemEngine).Engine.Closed()) } }
package main import ( "testing" "github.com/maximepeschard/adventofcode2020/08_handheld/code" ) var testLines = []string{ "nop +0", "acc +1", "jmp +4", "acc +3", "jmp -3", "acc -99", "acc +1", "jmp -4", "acc +6", } func TestPart1(t *testing.T) { program, _ := code.ParseProgram(testLines) execution := code.NewExecution(program) loopDetected, err := execution.Run() if err != nil { t.Error(err) } if !loopDetected { t.Errorf("expected loop detection") } if execution.Accumulator() != 5 { t.Errorf("expected accumulator = %d, found %d", 5, execution.Accumulator()) } } func TestPart2(t *testing.T) { program, _ := code.ParseProgram(testLines) accumulator, err := program.Fix() if err != nil { t.Error(err) } if accumulator != 8 { t.Errorf("expected accumulator = %d, found %d", 8, accumulator) } }
/* Copyright 2020 Docker Compose CLI authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package formatter import ( "fmt" "io" "strings" "text/tabwriter" ) // PrintPrettySection prints a tabbed section on the writer parameter func PrintPrettySection(out io.Writer, printer func(writer io.Writer), headers ...string) error { w := tabwriter.NewWriter(out, 20, 1, 3, ' ', 0) _, _ = fmt.Fprintln(w, strings.Join(headers, "\t")) printer(w) return w.Flush() }
package m2go type AttributeSet struct { AttributeSetID int `json:"attribute_set_id,omitempty"` AttributeSetName string `json:"attribute_set_name"` SortOrder int `json:"sort_order"` EntityTypeID int `json:"entity_type_id,omitempty"` ExtensionAttributes interface{} `json:"extension_attributes,omitempty"` } type Group struct { AttributeGroupID string `json:"attribute_group_id,omitempty"` AttributeGroupName string `json:"attribute_group_name"` AttributeSetID int `json:"attribute_set_id"` ExtensionAttributes struct { AttributeGroupCode string `json:"attribute_group_code,omitempty"` SortOrder string `json:"sort_order,omitempty"` } `json:"extension_attributes,omitempty"` } type attributeSetSearchQueryResponse struct { AttributeSets []AttributeSet `json:"items"` SearchCriteria struct { FilterGroups []struct { Filters []struct { Field string `json:"field"` Value string `json:"value"` ConditionType string `json:"condition_type"` } `json:"filters"` } `json:"filter_groups"` } `json:"search_criteria"` } type groupSearchQueryResponse struct { Groups []Group `json:"items"` SearchCriteria struct { FilterGroups []struct { Filters []struct { Field string `json:"field"` Value string `json:"value"` ConditionType string `json:"condition_type"` } `json:"filters"` } `json:"filter_groups"` } `json:"search_criteria"` } type createAttributeSetPayload struct { AttributeSet AttributeSet `json:"attributeSet"` SkeletonID int `json:"skeletonId"` } type assignAttributePayload struct { AttributeSetID int `json:"attributeSetId"` AttributeSetGroupID int `json:"attributeGroupId"` AttributeCode string `json:"attributeCode"` SortOrder int `json:"sortOrder"` } type createGroupPayload struct { Group Group `json:"group"` }
package commons func AddNums(num1, num2 int) int { return num1 + num2 }
package proxy import ( "log" "net" "github.com/lflxp/goproxys/protocol" ) func RunProxy(types string) { if types == "httprp" { protocol.RunHttpProxy() } else { // cer, err := tls.LoadX509KeyPair("server.crt", "server.key") // if err != nil { // log.Fatal(err) // } // config := &tls.Config{Certificates: []tls.Certificate{cer}} l, err := net.Listen("tcp", ":8081") // l, err := tls.Listen("tcp", ":8081", config) if err != nil { log.Panic(err) } log.Println("Started Proxy") if types == "http" { log.Println("Http Proxy Listening port: 8081") } else if types == "socket5" { log.Println("Socket5 Proxy Listening port: 8081") } else if types == "mysql" { log.Println("Mysql Proxy Listening port: 8081") } else if types == "ss" { log.Println("Socket5 Cipher Proxy Listening port: 8081") } for { client, err := l.Accept() if err != nil { log.Panic(err) } log.Println(client.RemoteAddr().String()) if types == "http" { go protocol.HandleHttpRequestTCP(client) } else if types == "socket5" { go protocol.HandleSocket5RequestTCP(client) } else if types == "mysql" { go protocol.HandleMysqlRequestTCP(client) } else if types == "ss" { go protocol.HandleSocket5CipherRequestTCP(client) } } } }
// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Package accountmanager provides functions to manage accounts in-session. package accountmanager import ( "context" "time" "chromiumos/tast/common/action" androidui "chromiumos/tast/common/android/ui" "chromiumos/tast/ctxutil" "chromiumos/tast/errors" "chromiumos/tast/local/accountmanager" "chromiumos/tast/local/arc" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/browser" "chromiumos/tast/local/chrome/uiauto" "chromiumos/tast/local/chrome/uiauto/faillog" "chromiumos/tast/local/chrome/uiauto/nodewith" "chromiumos/tast/local/chrome/uiauto/role" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: ARCAccountPicker, LacrosStatus: testing.LacrosVariantExists, Desc: "Verify ARC account picker behavior", Contacts: []string{"anastasiian@chromium.org", "team-dent@google.com"}, Attr: []string{"group:mainline", "informational"}, SoftwareDeps: []string{"chrome", "lacros"}, Params: []testing.Param{{ ExtraSoftwareDeps: []string{"android_p"}, Fixture: "loggedInToChromeAndArcWithLacros", }, { Name: "vm", ExtraSoftwareDeps: []string{"android_vm"}, Fixture: "loggedInToChromeAndArcWithLacros", }}, VarDeps: []string{ "accountmanager.ARCAccountPicker.username", "accountmanager.ARCAccountPicker.password", }, Timeout: 6 * time.Minute, }) } func ARCAccountPicker(ctx context.Context, s *testing.State) { username := s.RequiredVar("accountmanager.ARCAccountPicker.username") password := s.RequiredVar("accountmanager.ARCAccountPicker.password") // Reserve one minute for various cleanup. cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, time.Minute) defer cancel() cr := s.FixtValue().(accountmanager.FixtureData).Chrome() // Connect to Test API to use it with the UI library. tconn, err := cr.TestAPIConn(ctx) if err != nil { s.Fatal("Failed to connect Test API: ", err) } defer func(ctx context.Context) { s.Log("Running test cleanup") if err := accountmanager.TestCleanup(ctx, tconn, cr); err != nil { s.Fatal("Failed to do cleanup: ", err) } }(cleanupCtx) defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "arc_account_picker") ui := uiauto.New(tconn).WithTimeout(time.Minute) a := s.FixtValue().(accountmanager.FixtureData).ARC defer a.DumpUIHierarchyOnError(ctx, s.OutDir(), s.HasError) d, err := a.NewUIDevice(ctx) if err != nil { s.Fatal("Failed initializing UI Automator: ", err) } defer d.Close(ctx) addAccountButton := nodewith.Name("Add Google Account").Role(role.Button) moreActionsButton := nodewith.Name("More actions, " + username).Role(role.Button) addAccountDialog := accountmanager.AddAccountDialog() arcToggle := nodewith.NameStartingWith("Use this account with Android apps").Role(role.ToggleButton).Ancestor(addAccountDialog) // Open Account Manager page in OS Settings and click Add Google Account button. if err := uiauto.Combine("click add Google Account button", accountmanager.OpenAccountManagerSettingsAction(tconn, cr), ui.LeftClickUntil(addAccountButton, ui.Exists(accountmanager.AddAccountDialog())), // Uncheck ARC toggle. ui.LeftClickUntil(arcToggle, accountmanager.CheckARCToggleStatusAction(tconn, browser.TypeLacros, false /*expectedVal*/)), )(ctx); err != nil { s.Fatal("Failed to click add Google Account button: ", err) } s.Log("Adding a secondary Account") if err := accountmanager.AddAccount(ctx, tconn, username, password); err != nil { s.Fatal("Failed to add a secondary Account: ", err) } if err := uiauto.Combine("confirm account addition", // Make sure that the settings page is focused again. ui.WaitUntilExists(addAccountButton), // Find "More actions, <email>" button to make sure that account was added. ui.WaitUntilExists(moreActionsButton), // Check that account is not present in ARC. accountmanager.CheckIsAccountPresentInARCAction(tconn, d, accountmanager.NewARCAccountOptions(username).ExpectedPresentInARC(false)), )(ctx); err != nil { s.Fatal("Failed to confirm account addition: ", err) } accountPickerItem := nodewith.NameContaining(username).Role(role.Button).Focusable().Ancestor(addAccountDialog) if err := uiauto.Combine("add account to ARC from account picker", openAddAccountDialogFromARCAction(d, tconn), ui.WaitUntilExists(accountPickerItem), // Click on account to add it to ARC. ui.LeftClick(accountPickerItem), // Check that account is present in ARC. accountmanager.CheckIsAccountPresentInARCAction(tconn, d, accountmanager.NewARCAccountOptions(username).ExpectedPresentInARC(true)), )(ctx); err != nil { s.Fatal("Failed to add account to ARC from account picker: ", err) } } // openAddAccountDialogFromARCAction returns an action that clicks 'Add account' button in ARC settings. func openAddAccountDialogFromARCAction(d *androidui.Device, tconn *chrome.TestConn) action.Action { return func(ctx context.Context) error { if err := arc.ClickAddAccountInSettings(ctx, d, tconn); err != nil { return errors.Wrap(err, "failed to open Add account dialog from ARC") } return nil } }
package notification import ( "context" "github.com/caos/logging" sd "github.com/caos/zitadel/internal/config/systemdefaults" "github.com/caos/zitadel/internal/notification/repository/eventsourcing" "github.com/rakyll/statik/fs" _ "github.com/caos/zitadel/internal/notification/statik" ) type Config struct { Repository eventsourcing.Config } func Start(ctx context.Context, config Config, systemDefaults sd.SystemDefaults) { statikFS, err := fs.NewWithNamespace("notification") logging.Log("CONFI-7usEW").OnError(err).Panic("unable to start listener") _, err = eventsourcing.Start(config.Repository, statikFS, systemDefaults) logging.Log("MAIN-9uBxp").OnError(err).Panic("unable to start app") }
package main import "os" func main() { os.Chmod("/tmp/somefile", 0777) os.Chmod("/tmp/someotherfile", 0600) os.OpenFile("/tmp/thing", os.O_CREATE|os.O_WRONLY, 0666) os.OpenFile("/tmp/thing", os.O_CREATE|os.O_WRONLY, 0600) }
package main import ( "github.com/GO/Hafta2/2TwoDay/02-GO_GIN_REST_API/02/http/handler" "github.com/GO/Hafta2/2TwoDay/02-GO_GIN_REST_API/02/platform/newsfeed" "github.com/gin-gonic/gin" ) func main() { feed := newsfeed.New() r := gin.Default() // r.GET("/ping", handler.PingGet) 1 r.GET("/ping", handler.PingGet()) // 2 r.GET("/newsfeedGET", handler.NewsfeedGet(feed)) r.POST("/newsfeedPOST", handler.NewsfeedPost(feed)) r.Run() // feed := newsfeed.New() // fmt.Println(feed) // feed.Add(newsfeed.Item{"Hello","what do you do?"}) // fmt.Println(feed) }